aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJames Bottomley <JBottomley@Parallels.com>2012-05-21 07:17:30 -0400
committerJames Bottomley <JBottomley@Parallels.com>2012-05-21 07:17:30 -0400
commite34693336564f02b3e2cc09d8b872aef22a154e9 (patch)
tree09f51f10f9406042f9176e39b4dc8de850ba712e
parent76b311fdbdd2e16e5d39cd496a67aa1a1b948914 (diff)
parentde2eb4d5c5c25e8fb75d1e19092f24b83cb7d8d5 (diff)
Merge tag 'isci-for-3.5' into misc
isci update for 3.5 1/ Rework remote-node-context (RNC) handling for proper management of the silicon state machine in error handling and hot-plug conditions. Further details below, suffice to say if the RNC is mismanaged the silicon state machines may lock up. 2/ Refactor the initialization code to be reused for suspend/resume support 3/ Miscellaneous bug fixes to address discovery issues and hardware compatibility. RNC rework details from Jeff Skirvin: In the controller, devices as they appear on a SAS domain (or direct-attached SATA devices) are represented by memory structures known as "Remote Node Contexts" (RNCs). These structures are transferred from main memory to the controller using a set of register commands; these commands include setting up the context ("posting"), removing the context ("invalidating"), and commands to control the scheduling of commands and connections to that remote device ("suspensions" and "resumptions"). There is a similar path to control RNC scheduling from the protocol engine, which interprets the results of command and data transmission and reception. In general, the controller chooses among non-suspended RNCs to find one that has work requiring scheduling the transmission of command and data frames to a target. Likewise, when a target tries to return data back to the initiator, the state of the RNC is used by the controller to determine how to treat the incoming request. As an example, if the RNC is in the state "TX/RX Suspended", incoming SSP connection requests from the target will be rejected by the controller hardware. When an RNC is "TX Suspended", it will not be selected by the controller hardware to start outgoing command or data operations (with certain priority-based exceptions). As mentioned above, there are two sources for management of the RNC states: commands from driver software, and the result of transmission and reception conditions of commands and data signaled by the controller hardware. As an example of the latter, if an outgoing SSP command ends with a OPEN_REJECT(BAD_DESTINATION) status, the RNC state will transition to the "TX Suspended" state, and this is signaled by the controller hardware in the status to the completion of the pending command as well as signaled in a controller hardware event. Examples of the former are included in the patch changelogs. Driver software is required to suspend the RNC in a "TX/RX Suspended" condition before any outstanding commands can be terminated. Failure to guarantee this can lead to a complete hardware hang condition. Earlier versions of the driver software did not guarantee that an RNC was correctly managed before I/O termination, and so operated in an unsafe way. Further, the driver performed unnecessary contortions to preserve the remote device command state and so was more complicated than it needed to be. A simplifying driver assumption is that once an I/O has entered the error handler path without having completed in the target, the requirement on the driver is that all use of the sas_task must end. Beyond that, recovery of operation is dependent on libsas and other components to reset, rediscover and reconfigure the device before normal operation can restart. In the driver, this simplifying assumption meant that the RNC management could be reduced to entry into the suspended state, terminating the targeted I/O request, and resuming the RNC as needed for device-specific management such as an SSP Abort Task or LUN Reset Management request.
-rw-r--r--Documentation/ABI/testing/sysfs-bus-hsi19
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt-nv12m.xml2
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt-yuv420m.xml2
-rw-r--r--Documentation/devicetree/bindings/ata/ahci-platform.txt (renamed from Documentation/devicetree/bindings/ata/calxeda-sata.txt)5
-rw-r--r--Documentation/devicetree/bindings/sound/sgtl5000.txt2
-rw-r--r--Documentation/networking/ip-sysctl.txt4
-rw-r--r--Documentation/power/freezing-of-tasks.txt37
-rw-r--r--Documentation/security/keys.txt14
-rw-r--r--MAINTAINERS15
-rw-r--r--Makefile2
-rw-r--r--arch/alpha/Kconfig2
-rw-r--r--arch/alpha/include/asm/rtc.h8
-rw-r--r--arch/alpha/kernel/core_tsunami.c1
-rw-r--r--arch/alpha/kernel/sys_marvel.c2
-rw-r--r--arch/arm/Kconfig9
-rw-r--r--arch/arm/boot/dts/msm8660-surf.dts4
-rw-r--r--arch/arm/boot/dts/versatile-ab.dts2
-rw-r--r--arch/arm/boot/dts/versatile-pb.dts2
-rw-r--r--arch/arm/configs/imx_v4_v5_defconfig1
-rw-r--r--arch/arm/configs/mini2440_defconfig2
-rw-r--r--arch/arm/configs/u8500_defconfig9
-rw-r--r--arch/arm/include/asm/thread_info.h7
-rw-r--r--arch/arm/include/asm/tls.h4
-rw-r--r--arch/arm/kernel/irq.c6
-rw-r--r--arch/arm/kernel/ptrace.c24
-rw-r--r--arch/arm/kernel/signal.c55
-rw-r--r--arch/arm/kernel/smp.c32
-rw-r--r--arch/arm/kernel/smp_twd.c6
-rw-r--r--arch/arm/kernel/sys_arm.c2
-rw-r--r--arch/arm/mach-at91/at91rm9200_devices.c1
-rw-r--r--arch/arm/mach-at91/at91rm9200_time.c2
-rw-r--r--arch/arm/mach-at91/board-rm9200ek.c2
-rw-r--r--arch/arm/mach-at91/board-sam9261ek.c5
-rw-r--r--arch/arm/mach-at91/clock.c1
-rw-r--r--arch/arm/mach-at91/include/mach/at91_pmc.h2
-rw-r--r--arch/arm/mach-at91/setup.c2
-rw-r--r--arch/arm/mach-bcmring/core.c4
-rw-r--r--arch/arm/mach-exynos/Kconfig3
-rw-r--r--arch/arm/mach-exynos/clock-exynos4.c24
-rw-r--r--arch/arm/mach-exynos/clock-exynos5.c26
-rw-r--r--arch/arm/mach-exynos/common.c14
-rw-r--r--arch/arm/mach-exynos/dev-dwmci.c13
-rw-r--r--arch/arm/mach-exynos/mach-nuri.c1
-rw-r--r--arch/arm/mach-exynos/mach-universal_c210.c5
-rw-r--r--arch/arm/mach-imx/imx27-dt.c6
-rw-r--r--arch/arm/mach-imx/mm-imx5.c2
-rw-r--r--arch/arm/mach-kirkwood/board-dt.c1
-rw-r--r--arch/arm/mach-msm/board-msm8x60.c25
-rw-r--r--arch/arm/mach-omap1/ams-delta-fiq.c2
-rw-r--r--arch/arm/mach-omap1/mux.c1
-rw-r--r--arch/arm/mach-omap1/timer.c4
-rw-r--r--arch/arm/mach-omap2/board-4430sdp.c12
-rw-r--r--arch/arm/mach-omap2/board-generic.c2
-rw-r--r--arch/arm/mach-omap2/board-igep0020.c2
-rw-r--r--arch/arm/mach-omap2/board-omap4panda.c13
-rw-r--r--arch/arm/mach-omap2/include/mach/ctrl_module_pad_core_44xx.h8
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c17
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2420_data.c1
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2430_data.c1
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_3xxx_data.c1
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_44xx_data.c9
-rw-r--r--arch/arm/mach-omap2/serial.c124
-rw-r--r--arch/arm/mach-omap2/twl-common.c37
-rw-r--r--arch/arm/mach-omap2/twl-common.h10
-rw-r--r--arch/arm/mach-orion5x/mpp.h4
-rw-r--r--arch/arm/mach-pxa/include/mach/mfp-pxa2xx.h7
-rw-r--r--arch/arm/mach-pxa/mfp-pxa2xx.c21
-rw-r--r--arch/arm/mach-pxa/pxa27x.c6
-rw-r--r--arch/arm/mach-s3c24xx/Kconfig8
-rw-r--r--arch/arm/mach-s5pv210/mach-goni.c2
-rw-r--r--arch/arm/mach-sa1100/generic.c2
-rw-r--r--arch/arm/mach-shmobile/board-ag5evm.c22
-rw-r--r--arch/arm/mach-shmobile/board-mackerel.c22
-rw-r--r--arch/arm/mach-shmobile/headsmp.S56
-rw-r--r--arch/arm/mach-shmobile/include/mach/common.h2
-rw-r--r--arch/arm/mach-shmobile/setup-r8a7779.c4
-rw-r--r--arch/arm/mach-shmobile/setup-sh73a0.c4
-rw-r--r--arch/arm/mach-shmobile/smp-r8a7779.c8
-rw-r--r--arch/arm/mach-shmobile/smp-sh73a0.c7
-rw-r--r--arch/arm/mach-shmobile/timer.c9
-rw-r--r--arch/arm/mach-u300/core.c6
-rw-r--r--arch/arm/mach-u300/i2c.c9
-rw-r--r--arch/arm/mach-u300/include/mach/irqs.h150
-rw-r--r--arch/arm/mach-ux500/Kconfig1
-rw-r--r--arch/arm/mach-ux500/mbox-db5500.c2
-rw-r--r--arch/arm/mach-ux500/platsmp.c2
-rw-r--r--arch/arm/mm/abort-ev6.S17
-rw-r--r--arch/arm/mm/cache-l2x0.c25
-rw-r--r--arch/arm/mm/init.c4
-rw-r--r--arch/arm/mm/mmu.c4
-rw-r--r--arch/arm/plat-omap/dma.c14
-rw-r--r--arch/arm/plat-omap/include/plat/omap_hwmod.h4
-rw-r--r--arch/arm/plat-omap/sram.c12
-rw-r--r--arch/arm/plat-samsung/include/plat/sdhci.h28
-rw-r--r--arch/arm/vfp/vfpmodule.c99
-rw-r--r--arch/blackfin/mach-bf538/boards/ezkit.c53
-rw-r--r--arch/hexagon/kernel/dma.c1
-rw-r--r--arch/hexagon/kernel/process.c6
-rw-r--r--arch/hexagon/kernel/ptrace.c1
-rw-r--r--arch/hexagon/kernel/smp.c8
-rw-r--r--arch/hexagon/kernel/time.c1
-rw-r--r--arch/hexagon/kernel/vdso.c1
-rw-r--r--arch/ia64/kernel/perfmon.c18
-rw-r--r--arch/ia64/kvm/kvm-ia64.c2
-rw-r--r--arch/m68k/configs/m5275evb_defconfig1
-rw-r--r--arch/m68k/platform/520x/config.c6
-rw-r--r--arch/m68k/platform/523x/config.c6
-rw-r--r--arch/m68k/platform/5249/config.c6
-rw-r--r--arch/m68k/platform/527x/config.c8
-rw-r--r--arch/m68k/platform/528x/config.c6
-rw-r--r--arch/m68k/platform/532x/config.c6
-rw-r--r--arch/m68k/platform/68EZ328/Makefile6
-rw-r--r--arch/m68k/platform/68VZ328/Makefile9
-rw-r--r--arch/m68k/platform/68VZ328/bootlogo.h (renamed from arch/m68k/platform/68EZ328/bootlogo.h)2
-rw-r--r--arch/m68k/platform/coldfire/device.c8
-rw-r--r--arch/mips/ath79/dev-wmac.c2
-rw-r--r--arch/mips/include/asm/mach-jz4740/irq.h2
-rw-r--r--arch/mips/include/asm/mmu_context.h6
-rw-r--r--arch/mips/kernel/signal.c27
-rw-r--r--arch/mips/kernel/signal32.c20
-rw-r--r--arch/mips/kernel/signal_n32.c10
-rw-r--r--arch/parisc/include/asm/hardware.h3
-rw-r--r--arch/parisc/include/asm/page.h6
-rw-r--r--arch/parisc/include/asm/pdc.h7
-rw-r--r--arch/parisc/include/asm/pgtable.h2
-rw-r--r--arch/parisc/include/asm/spinlock.h2
-rw-r--r--arch/parisc/kernel/pdc_cons.c3
-rw-r--r--arch/parisc/kernel/time.c1
-rw-r--r--arch/powerpc/boot/dts/fsl/pq3-mpic-message-B.dtsi43
-rw-r--r--arch/powerpc/boot/dts/fsl/pq3-mpic.dtsi10
-rw-r--r--arch/powerpc/include/asm/exception-64s.h7
-rw-r--r--arch/powerpc/include/asm/irq.h4
-rw-r--r--arch/powerpc/include/asm/mpic.h18
-rw-r--r--arch/powerpc/include/asm/mpic_msgr.h1
-rw-r--r--arch/powerpc/include/asm/reg_booke.h5
-rw-r--r--arch/powerpc/kernel/entry_64.S62
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S2
-rw-r--r--arch/powerpc/kernel/irq.c27
-rw-r--r--arch/powerpc/kernel/machine_kexec.c7
-rw-r--r--arch/powerpc/kernel/setup_32.c3
-rw-r--r--arch/powerpc/kernel/traps.c10
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c22
-rw-r--r--arch/powerpc/kvm/book3s_hv.c2
-rw-r--r--arch/powerpc/net/bpf_jit.h8
-rw-r--r--arch/powerpc/net/bpf_jit_64.S108
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c26
-rw-r--r--arch/powerpc/platforms/85xx/common.c6
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_mds.c11
-rw-r--r--arch/powerpc/platforms/85xx/p1022_ds.c13
-rw-r--r--arch/powerpc/platforms/cell/axon_msi.c8
-rw-r--r--arch/powerpc/platforms/cell/beat_interrupt.c2
-rw-r--r--arch/powerpc/platforms/powermac/low_i2c.c9
-rw-r--r--arch/powerpc/platforms/powermac/pic.c6
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig4
-rw-r--r--arch/powerpc/platforms/pseries/eeh.c2
-rw-r--r--arch/powerpc/sysdev/cpm2_pic.c3
-rw-r--r--arch/powerpc/sysdev/mpc8xx_pic.c61
-rw-r--r--arch/powerpc/sysdev/mpic.c54
-rw-r--r--arch/powerpc/sysdev/mpic_msgr.c12
-rw-r--r--arch/powerpc/sysdev/scom.c1
-rw-r--r--arch/powerpc/sysdev/xics/xics-common.c7
-rw-r--r--arch/sh/include/asm/atomic.h2
-rw-r--r--arch/sh/mm/fault_32.c2
-rw-r--r--arch/sparc/kernel/central.c2
-rw-r--r--arch/sparc/kernel/leon_smp.c3
-rw-r--r--arch/sparc/kernel/sys_sparc_64.c7
-rw-r--r--arch/sparc/mm/ultra.S6
-rw-r--r--arch/tile/include/asm/pci.h4
-rw-r--r--arch/tile/kernel/pci.c4
-rw-r--r--arch/tile/kernel/single_step.c4
-rw-r--r--arch/x86/Kconfig2
-rw-r--r--arch/x86/boot/compressed/head_32.S14
-rw-r--r--arch/x86/boot/compressed/head_64.S22
-rw-r--r--arch/x86/boot/compressed/relocs.c2
-rw-r--r--arch/x86/boot/tools/build.c15
-rw-r--r--arch/x86/ia32/ia32_aout.c35
-rw-r--r--arch/x86/include/asm/posix_types.h6
-rw-r--r--arch/x86/include/asm/sigcontext.h2
-rw-r--r--arch/x86/include/asm/siginfo.h8
-rw-r--r--arch/x86/include/asm/unistd.h6
-rw-r--r--arch/x86/include/asm/word-at-a-time.h33
-rw-r--r--arch/x86/include/asm/x86_init.h1
-rw-r--r--arch/x86/kernel/acpi/sleep.c4
-rw-r--r--arch/x86/kernel/acpi/sleep.h4
-rw-r--r--arch/x86/kernel/acpi/wakeup_32.S4
-rw-r--r--arch/x86/kernel/acpi/wakeup_64.S4
-rw-r--r--arch/x86/kernel/apic/apic.c34
-rw-r--r--arch/x86/kernel/apic/apic_numachip.c7
-rw-r--r--arch/x86/kernel/apic/x2apic_phys.c6
-rw-r--r--arch/x86/kernel/cpu/amd.c29
-rw-r--r--arch/x86/kernel/cpu/common.c9
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c8
-rw-r--r--arch/x86/kernel/i387.c1
-rw-r--r--arch/x86/kernel/kvm.c9
-rw-r--r--arch/x86/kernel/microcode_amd.c12
-rw-r--r--arch/x86/kernel/microcode_core.c10
-rw-r--r--arch/x86/kernel/process_64.c1
-rw-r--r--arch/x86/kernel/setup_percpu.c14
-rw-r--r--arch/x86/kernel/x86_init.c1
-rw-r--r--arch/x86/kvm/x86.c9
-rw-r--r--arch/x86/platform/geode/net5501.c2
-rw-r--r--arch/x86/platform/mrst/mrst.c4
-rw-r--r--arch/x86/xen/enlighten.c46
-rw-r--r--arch/x86/xen/mmu.c7
-rw-r--r--arch/x86/xen/smp.c15
-rw-r--r--arch/x86/xen/xen-asm.S2
-rw-r--r--arch/xtensa/include/asm/hardirq.h3
-rw-r--r--arch/xtensa/include/asm/io.h1
-rw-r--r--arch/xtensa/kernel/signal.c1
-rw-r--r--crypto/sha512_generic.c2
-rw-r--r--drivers/acpi/acpica/hwxface.c3
-rw-r--r--drivers/acpi/power.c2
-rw-r--r--drivers/acpi/reboot.c3
-rw-r--r--drivers/acpi/scan.c17
-rw-r--r--drivers/acpi/sleep.c52
-rw-r--r--drivers/ata/ahci.c2
-rw-r--r--drivers/ata/ahci_platform.c1
-rw-r--r--drivers/ata/libata-core.c2
-rw-r--r--drivers/ata/libata-eh.c3
-rw-r--r--drivers/ata/libata-scsi.c38
-rw-r--r--drivers/ata/pata_arasan_cf.c4
-rw-r--r--drivers/base/regmap/regmap.c4
-rw-r--r--drivers/bcma/sprom.c7
-rw-r--r--drivers/block/drbd/drbd_nl.c2
-rw-r--r--drivers/block/xen-blkback/xenbus.c2
-rw-r--r--drivers/bluetooth/ath3k.c4
-rw-r--r--drivers/bluetooth/btusb.c6
-rw-r--r--drivers/crypto/ixp4xx_crypto.c1
-rw-r--r--drivers/crypto/talitos.c20
-rw-r--r--drivers/dma/Kconfig5
-rw-r--r--drivers/dma/amba-pl08x.c1
-rw-r--r--drivers/dma/at_hdmac.c4
-rw-r--r--drivers/dma/imx-dma.c9
-rw-r--r--drivers/dma/mxs-dma.c10
-rw-r--r--drivers/dma/pl330.c25
-rw-r--r--drivers/dma/ste_dma40.c323
-rw-r--r--drivers/dma/ste_dma40_ll.h2
-rw-r--r--drivers/firmware/efivars.c196
-rw-r--r--drivers/gpio/gpio-omap.c9
-rw-r--r--drivers/gpio/gpio-pch.c57
-rw-r--r--drivers/gpio/gpio-pxa.c21
-rw-r--r--drivers/gpio/gpio-samsung.c18
-rw-r--r--drivers/gpu/drm/drm_bufs.c12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c34
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c6
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c3
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c15
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c8
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c29
-rw-r--r--drivers/gpu/drm/i915/intel_display.c9
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c4
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c11
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c40
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hdmi.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.c199
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.h1
-rw-r--r--drivers/gpu/drm/nouveau/nv10_gpio.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fb.c5
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c3
-rw-r--r--drivers/hid/Kconfig2
-rw-r--r--drivers/hid/hid-tivo.c2
-rw-r--r--drivers/hsi/clients/hsi_char.c2
-rw-r--r--drivers/hsi/hsi.c223
-rw-r--r--drivers/hwmon/ad7314.c12
-rw-r--r--drivers/hwmon/ads1015.c33
-rw-r--r--drivers/hwmon/coretemp.c6
-rw-r--r--drivers/hwmon/fam15h_power.c42
-rw-r--r--drivers/i2c/busses/i2c-eg20t.c4
-rw-r--r--drivers/i2c/busses/i2c-mxs.c8
-rw-r--r--drivers/i2c/busses/i2c-pnx.c3
-rw-r--r--drivers/i2c/busses/i2c-tegra.c8
-rw-r--r--drivers/infiniband/core/mad.c8
-rw-r--r--drivers/infiniband/hw/mlx4/main.c2
-rw-r--r--drivers/input/misc/Kconfig3
-rw-r--r--drivers/input/misc/twl6040-vibra.c4
-rw-r--r--drivers/input/mouse/synaptics.c3
-rw-r--r--drivers/leds/leds-atmel-pwm.c2
-rw-r--r--drivers/leds/leds-netxbig.c4
-rw-r--r--drivers/leds/leds-ns2.c2
-rw-r--r--drivers/md/bitmap.c3
-rw-r--r--drivers/md/bitmap.h3
-rw-r--r--drivers/md/dm-log-userspace-transfer.c2
-rw-r--r--drivers/md/dm-mpath.c4
-rw-r--r--drivers/md/dm-raid.c4
-rw-r--r--drivers/md/dm-thin.c16
-rw-r--r--drivers/md/md.c7
-rw-r--r--drivers/media/common/tuners/xc5000.c39
-rw-r--r--drivers/media/common/tuners/xc5000.h1
-rw-r--r--drivers/media/dvb/dvb-core/dvb_frontend.c25
-rw-r--r--drivers/media/dvb/frontends/drxk_hard.c6
-rw-r--r--drivers/media/rc/winbond-cir.c1
-rw-r--r--drivers/media/video/Kconfig2
-rw-r--r--drivers/media/video/mt9m032.c5
-rw-r--r--drivers/mfd/Kconfig11
-rw-r--r--drivers/mfd/asic3.c4
-rw-r--r--drivers/mfd/omap-usb-host.c45
-rw-r--r--drivers/mfd/rc5t583.c39
-rw-r--r--drivers/mfd/twl6040-core.c114
-rw-r--r--drivers/mmc/card/block.c56
-rw-r--r--drivers/mmc/card/queue.c2
-rw-r--r--drivers/mmc/core/bus.c24
-rw-r--r--drivers/mmc/core/cd-gpio.c1
-rw-r--r--drivers/mmc/core/core.c64
-rw-r--r--drivers/mmc/host/dw_mmc.c7
-rw-r--r--drivers/mmc/host/mxs-mmc.c3
-rw-r--r--drivers/mmc/host/omap_hsmmc.c6
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c3
-rw-r--r--drivers/mmc/host/sdhci.c2
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c1
-rw-r--r--drivers/net/arcnet/arc-rimi.c8
-rw-r--r--drivers/net/bonding/bond_3ad.c18
-rw-r--r--drivers/net/bonding/bond_3ad.h2
-rw-r--r--drivers/net/bonding/bond_main.c16
-rw-r--r--drivers/net/caif/caif_hsi.c9
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c2
-rw-r--r--drivers/net/dummy.c6
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c12
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.h3
-rw-r--r--drivers/net/ethernet/atheros/atlx/atlx.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c23
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c18
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c92
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c52
-rw-r--r--drivers/net/ethernet/dlink/dl2k.h7
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c6
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.h2
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c62
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_phyp.h2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c15
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/param.c99
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c24
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c43
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c39
-rw-r--r--drivers/net/ethernet/marvell/sky2.c31
-rw-r--r--drivers/net/ethernet/marvell/sky2.h1
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c28
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c2
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c2
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c10
-rw-r--r--drivers/net/ethernet/realtek/r8169.c16
-rw-r--r--drivers/net/ethernet/sfc/efx.c2
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c17
-rw-r--r--drivers/net/ethernet/sun/sungem.c2
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c2
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c5
-rw-r--r--drivers/net/ethernet/ti/tlan.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h4
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c6
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c6
-rw-r--r--drivers/net/hyperv/netvsc_drv.c38
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/macvtap.c43
-rw-r--r--drivers/net/phy/icplus.c12
-rw-r--r--drivers/net/ppp/ppp_generic.c15
-rw-r--r--drivers/net/usb/asix.c4
-rw-r--r--drivers/net/usb/cdc_ether.c14
-rw-r--r--drivers/net/usb/qmi_wwan.c30
-rw-r--r--drivers/net/usb/smsc75xx.c36
-rw-r--r--drivers/net/usb/smsc95xx.c3
-rw-r--r--drivers/net/usb/usbnet.c5
-rw-r--r--drivers/net/virtio_net.c5
-rw-r--r--drivers/net/wan/farsync.c1
-rw-r--r--drivers/net/wireless/ath/ath5k/ahb.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_paprd.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c10
-rw-r--r--drivers/net/wireless/b43/main.c10
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c8
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c64
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c11
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-2000.c16
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c10
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rx.c21
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fh.h24
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-mac80211.c10
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h27
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h1
-rw-r--r--drivers/net/wireless/libertas/cfg.c9
-rw-r--r--drivers/net/wireless/mwifiex/pcie.h18
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c1
-rw-r--r--drivers/net/wireless/wl1251/main.c1
-rw-r--r--drivers/net/wireless/wl1251/sdio.c2
-rw-r--r--drivers/parisc/sba_iommu.c1
-rw-r--r--drivers/pci/Makefile1
-rw-r--r--drivers/pci/pci-acpi.c4
-rw-r--r--drivers/pinctrl/core.c25
-rw-r--r--drivers/platform/x86/acerhdf.c67
-rw-r--r--drivers/platform/x86/dell-laptop.c1
-rw-r--r--drivers/platform/x86/intel_ips.c2
-rw-r--r--drivers/platform/x86/intel_mid_powerbtn.c2
-rw-r--r--drivers/regulator/core.c5
-rw-r--r--drivers/regulator/max8997.c2
-rw-r--r--drivers/rtc/rtc-ds1307.c1
-rw-r--r--drivers/rtc/rtc-mpc5121.c3
-rw-r--r--drivers/s390/net/qeth_core_main.c6
-rw-r--r--drivers/scsi/hosts.c3
-rw-r--r--drivers/scsi/ipr.c6
-rw-r--r--drivers/scsi/isci/host.c703
-rw-r--r--drivers/scsi/isci/host.h124
-rw-r--r--drivers/scsi/isci/init.c212
-rw-r--r--drivers/scsi/isci/phy.c76
-rw-r--r--drivers/scsi/isci/phy.h9
-rw-r--r--drivers/scsi/isci/port.c68
-rw-r--r--drivers/scsi/isci/port.h11
-rw-r--r--drivers/scsi/isci/port_config.c18
-rw-r--r--drivers/scsi/isci/probe_roms.c12
-rw-r--r--drivers/scsi/isci/probe_roms.h2
-rw-r--r--drivers/scsi/isci/registers.h8
-rw-r--r--drivers/scsi/isci/remote_device.c576
-rw-r--r--drivers/scsi/isci/remote_device.h63
-rw-r--r--drivers/scsi/isci/remote_node_context.c393
-rw-r--r--drivers/scsi/isci/remote_node_context.h43
-rw-r--r--drivers/scsi/isci/request.c715
-rw-r--r--drivers/scsi/isci/request.h125
-rw-r--r--drivers/scsi/isci/scu_completion_codes.h2
-rw-r--r--drivers/scsi/isci/task.c800
-rw-r--r--drivers/scsi/isci/task.h132
-rw-r--r--drivers/scsi/isci/unsolicited_frame_control.c30
-rw-r--r--drivers/scsi/isci/unsolicited_frame_control.h6
-rw-r--r--drivers/scsi/libfc/fc_lport.c12
-rw-r--r--drivers/scsi/libsas/sas_ata.c33
-rw-r--r--drivers/scsi/libsas/sas_discover.c61
-rw-r--r--drivers/scsi/libsas/sas_event.c24
-rw-r--r--drivers/scsi/libsas/sas_expander.c56
-rw-r--r--drivers/scsi/libsas/sas_init.c11
-rw-r--r--drivers/scsi/libsas/sas_internal.h6
-rw-r--r--drivers/scsi/libsas/sas_phy.c21
-rw-r--r--drivers/scsi/libsas/sas_port.c17
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c15
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c18
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h6
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/scsi/virtio_scsi.c24
-rw-r--r--drivers/spi/Kconfig2
-rw-r--r--drivers/spi/Makefile2
-rw-r--r--drivers/spi/spi-bcm63xx.c163
-rw-r--r--drivers/spi/spi-bfin-sport.c21
-rw-r--r--drivers/spi/spi-bfin5xx.c14
-rw-r--r--drivers/spi/spi-ep93xx.c24
-rw-r--r--drivers/spi/spi-pl022.c58
-rw-r--r--drivers/staging/octeon/ethernet-rx.c1
-rw-r--r--drivers/staging/octeon/ethernet-tx.c1
-rw-r--r--drivers/staging/octeon/ethernet.c1
-rw-r--r--drivers/staging/ozwpan/ozpd.c2
-rw-r--r--drivers/staging/tidspbridge/core/tiomap3430.c20
-rw-r--r--drivers/staging/tidspbridge/core/wdt.c8
-rw-r--r--drivers/staging/zcache/Kconfig2
-rw-r--r--drivers/target/target_core_tpg.c22
-rw-r--r--drivers/tty/amiserial.c4
-rw-r--r--drivers/tty/serial/clps711x.c14
-rw-r--r--drivers/tty/serial/pch_uart.c4
-rw-r--r--drivers/tty/serial/pmac_zilog.c6
-rw-r--r--drivers/tty/vt/keyboard.c26
-rw-r--r--drivers/usb/class/cdc-wdm.c7
-rw-r--r--drivers/usb/core/hcd-pci.c9
-rw-r--r--drivers/usb/core/hub.c3
-rw-r--r--drivers/usb/core/message.c6
-rw-r--r--drivers/usb/dwc3/core.c6
-rw-r--r--drivers/usb/dwc3/ep0.c12
-rw-r--r--drivers/usb/gadget/at91_udc.c8
-rw-r--r--drivers/usb/gadget/dummy_hcd.c1
-rw-r--r--drivers/usb/gadget/f_fs.c3
-rw-r--r--drivers/usb/gadget/f_mass_storage.c2
-rw-r--r--drivers/usb/gadget/f_rndis.c1
-rw-r--r--drivers/usb/gadget/file_storage.c2
-rw-r--r--drivers/usb/gadget/fsl_udc_core.c25
-rw-r--r--drivers/usb/gadget/g_ffs.c4
-rw-r--r--drivers/usb/gadget/s3c-hsotg.c17
-rw-r--r--drivers/usb/gadget/udc-core.c6
-rw-r--r--drivers/usb/gadget/uvc.h2
-rw-r--r--drivers/usb/gadget/uvc_queue.c4
-rw-r--r--drivers/usb/gadget/uvc_v4l2.c2
-rw-r--r--drivers/usb/host/ehci-fsl.c7
-rw-r--r--drivers/usb/host/ehci-hcd.c9
-rw-r--r--drivers/usb/host/ehci-omap.c39
-rw-r--r--drivers/usb/host/ehci-pci.c8
-rw-r--r--drivers/usb/host/ehci-tegra.c377
-rw-r--r--drivers/usb/host/ohci-at91.c12
-rw-r--r--drivers/usb/misc/usbtest.c9
-rw-r--r--drivers/usb/misc/yurex.c10
-rw-r--r--drivers/usb/musb/davinci.c3
-rw-r--r--drivers/usb/musb/musb_core.c40
-rw-r--r--drivers/usb/musb/musb_core.h2
-rw-r--r--drivers/usb/musb/musb_host.c2
-rw-r--r--drivers/usb/musb/omap2430.c31
-rw-r--r--drivers/usb/otg/gpio_vbus.c15
-rw-r--r--drivers/usb/serial/cp210x.c9
-rw-r--r--drivers/usb/serial/sierra.c6
-rw-r--r--drivers/uwb/hwa-rc.c3
-rw-r--r--drivers/uwb/neh.c12
-rw-r--r--drivers/vhost/net.c9
-rw-r--r--drivers/vhost/vhost.c5
-rw-r--r--drivers/vhost/vhost.h2
-rw-r--r--drivers/video/bfin-lq035q1-fb.c1
-rw-r--r--drivers/video/console/sticore.c2
-rw-r--r--drivers/video/uvesafb.c2
-rw-r--r--drivers/video/xen-fbfront.c27
-rw-r--r--drivers/watchdog/hpwdt.c6
-rw-r--r--drivers/xen/Kconfig22
-rw-r--r--drivers/xen/events.c2
-rw-r--r--drivers/xen/gntdev.c2
-rw-r--r--drivers/xen/grant-table.c13
-rw-r--r--drivers/xen/manage.c1
-rw-r--r--drivers/xen/xen-acpi-processor.c5
-rw-r--r--drivers/xen/xenbus/xenbus_probe_frontend.c69
-rw-r--r--fs/aio.c16
-rw-r--r--fs/autofs4/autofs_i.h12
-rw-r--r--fs/autofs4/dev-ioctl.c3
-rw-r--r--fs/autofs4/inode.c4
-rw-r--r--fs/autofs4/waitq.c22
-rw-r--r--fs/binfmt_aout.c32
-rw-r--r--fs/binfmt_elf.c23
-rw-r--r--fs/binfmt_elf_fdpic.c18
-rw-r--r--fs/binfmt_flat.c12
-rw-r--r--fs/binfmt_som.c12
-rw-r--r--fs/btrfs/backref.c27
-rw-r--r--fs/btrfs/ctree.c28
-rw-r--r--fs/btrfs/ctree.h2
-rw-r--r--fs/btrfs/disk-io.c40
-rw-r--r--fs/btrfs/disk-io.h3
-rw-r--r--fs/btrfs/extent-tree.c17
-rw-r--r--fs/btrfs/extent_io.c60
-rw-r--r--fs/btrfs/extent_io.h4
-rw-r--r--fs/btrfs/file.c9
-rw-r--r--fs/btrfs/inode.c54
-rw-r--r--fs/btrfs/ioctl.c5
-rw-r--r--fs/btrfs/ioctl.h4
-rw-r--r--fs/btrfs/reada.c48
-rw-r--r--fs/btrfs/relocation.c4
-rw-r--r--fs/btrfs/scrub.c22
-rw-r--r--fs/btrfs/super.c7
-rw-r--r--fs/btrfs/transaction.c6
-rw-r--r--fs/btrfs/tree-log.c2
-rw-r--r--fs/btrfs/volumes.c13
-rw-r--r--fs/buffer.c1
-rw-r--r--fs/cifs/cifsfs.c16
-rw-r--r--fs/cifs/cifsfs.h2
-rw-r--r--fs/cifs/cifssmb.c6
-rw-r--r--fs/cifs/connect.c33
-rw-r--r--fs/cifs/dir.c17
-rw-r--r--fs/cifs/file.c3
-rw-r--r--fs/dcache.c26
-rw-r--r--fs/dlm/lock.c12
-rw-r--r--fs/eventpoll.c4
-rw-r--r--fs/ext4/super.c2
-rw-r--r--fs/gfs2/lock_dlm.c10
-rw-r--r--fs/hfsplus/catalog.c4
-rw-r--r--fs/hfsplus/dir.c11
-rw-r--r--fs/hugetlbfs/inode.c1
-rw-r--r--fs/jbd2/commit.c4
-rw-r--r--fs/namei.c4
-rw-r--r--fs/nfs/blocklayout/blocklayout.c4
-rw-r--r--fs/nfs/client.c5
-rw-r--r--fs/nfs/dir.c4
-rw-r--r--fs/nfs/idmap.c4
-rw-r--r--fs/nfs/internal.h8
-rw-r--r--fs/nfs/namespace.c93
-rw-r--r--fs/nfs/nfs4_fs.h11
-rw-r--r--fs/nfs/nfs4filelayoutdev.c2
-rw-r--r--fs/nfs/nfs4namespace.c86
-rw-r--r--fs/nfs/nfs4proc.c186
-rw-r--r--fs/nfs/nfs4state.c31
-rw-r--r--fs/nfs/nfs4xdr.c53
-rw-r--r--fs/nfs/objlayout/objlayout.c2
-rw-r--r--fs/nfs/pnfs.c2
-rw-r--r--fs/nfs/read.c2
-rw-r--r--fs/nfs/super.c12
-rw-r--r--fs/nfs/write.c5
-rw-r--r--fs/nfsd/nfs4recover.c2
-rw-r--r--fs/pipe.c31
-rw-r--r--fs/proc/task_mmu.c15
-rw-r--r--include/acpi/actypes.h7
-rw-r--r--include/asm-generic/siginfo.h14
-rw-r--r--include/asm-generic/statfs.h2
-rw-r--r--include/linux/efi.h13
-rw-r--r--include/linux/etherdevice.h11
-rw-r--r--include/linux/gpio-pxa.h4
-rw-r--r--include/linux/hsi/hsi.h31
-rw-r--r--include/linux/i2c/twl.h12
-rw-r--r--include/linux/irq.h7
-rw-r--r--include/linux/libata.h3
-rw-r--r--include/linux/mfd/db5500-prcmu.h88
-rw-r--r--include/linux/mfd/rc5t583.h47
-rw-r--r--include/linux/mfd/twl6040.h27
-rw-r--r--include/linux/mm.h27
-rw-r--r--include/linux/mmc/card.h2
-rw-r--r--include/linux/netdevice.h9
-rw-r--r--include/linux/netfilter_bridge.h9
-rw-r--r--include/linux/nfs_xdr.h7
-rw-r--r--include/linux/pinctrl/machine.h4
-rw-r--r--include/linux/pipe_fs_i.h1
-rw-r--r--include/linux/seqlock.h23
-rw-r--r--include/linux/skbuff.h11
-rw-r--r--include/linux/spi/spi.h2
-rw-r--r--include/linux/usb/hcd.h2
-rw-r--r--include/linux/usb/otg.h1
-rw-r--r--include/linux/vm_event_item.h5
-rw-r--r--include/net/bluetooth/hci_core.h3
-rw-r--r--include/net/dst.h7
-rw-r--r--include/net/ip6_fib.h48
-rw-r--r--include/net/ip_vs.h4
-rw-r--r--include/net/red.h6
-rw-r--r--include/net/sctp/sctp.h13
-rw-r--r--include/net/sock.h5
-rw-r--r--include/scsi/libsas.h40
-rw-r--r--include/scsi/sas.h1
-rw-r--r--include/scsi/sas_ata.h4
-rw-r--r--init/do_mounts.c2
-rw-r--r--init/main.c25
-rw-r--r--kernel/compat.c63
-rw-r--r--kernel/events/core.c2
-rw-r--r--kernel/fork.c3
-rw-r--r--kernel/irq/debug.h38
-rw-r--r--kernel/power/swap.c28
-rw-r--r--kernel/rcutree.c1
-rw-r--r--kernel/sched/core.c22
-rw-r--r--kernel/sched/fair.c18
-rw-r--r--kernel/sched/features.h1
-rw-r--r--kernel/time/tick-broadcast.c13
-rw-r--r--kernel/trace/trace.c8
-rw-r--r--kernel/trace/trace.h4
-rw-r--r--kernel/trace/trace_output.c5
-rw-r--r--mm/hugetlb.c3
-rw-r--r--mm/memblock.c7
-rw-r--r--mm/memcontrol.c23
-rw-r--r--mm/mempolicy.c11
-rw-r--r--mm/migrate.c16
-rw-r--r--mm/mmap.c59
-rw-r--r--mm/nobootmem.c13
-rw-r--r--mm/nommu.c41
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/percpu.c22
-rw-r--r--mm/swap_state.c2
-rw-r--r--mm/vmscan.c11
-rw-r--r--mm/vmstat.c4
-rw-r--r--net/8021q/vlan_dev.c2
-rw-r--r--net/ax25/af_ax25.c9
-rw-r--r--net/bluetooth/hci_core.c27
-rw-r--r--net/bluetooth/hci_event.c3
-rw-r--r--net/bluetooth/mgmt.c2
-rw-r--r--net/bridge/br_forward.c1
-rw-r--r--net/bridge/br_netfilter.c8
-rw-r--r--net/caif/chnl_net.c9
-rw-r--r--net/core/dev.c56
-rw-r--r--net/core/drop_monitor.c89
-rw-r--r--net/core/net_namespace.c33
-rw-r--r--net/core/pktgen.c10
-rw-r--r--net/ieee802154/6lowpan.c40
-rw-r--r--net/ipv4/fib_trie.c2
-rw-r--r--net/ipv4/inet_diag.c2
-rw-r--r--net/ipv4/tcp.c9
-rw-r--r--net/ipv4/tcp_input.c14
-rw-r--r--net/ipv4/tcp_output.c1
-rw-r--r--net/ipv4/udp_diag.c9
-rw-r--r--net/ipv6/addrconf.c9
-rw-r--r--net/ipv6/ip6_fib.c9
-rw-r--r--net/ipv6/ndisc.c3
-rw-r--r--net/ipv6/route.c71
-rw-r--r--net/ipv6/tcp_ipv6.c4
-rw-r--r--net/key/af_key.c2
-rw-r--r--net/l2tp/l2tp_ip.c8
-rw-r--r--net/mac80211/ibss.c4
-rw-r--r--net/mac80211/ieee80211_i.h2
-rw-r--r--net/mac80211/iface.c4
-rw-r--r--net/mac80211/mlme.c2
-rw-r--r--net/mac80211/rx.c10
-rw-r--r--net/mac80211/tx.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c11
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c56
-rw-r--r--net/netfilter/ipvs/ip_vs_ftp.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_lblc.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_lblcr.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_proto.c38
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_sctp.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_tcp.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_udp.c5
-rw-r--r--net/netfilter/xt_CT.c2
-rw-r--r--net/openvswitch/datapath.c27
-rw-r--r--net/openvswitch/flow.c3
-rw-r--r--net/phonet/pn_dev.c21
-rw-r--r--net/sched/sch_gred.c7
-rw-r--r--net/sched/sch_netem.c6
-rw-r--r--net/sctp/output.c4
-rw-r--r--net/sctp/transport.c17
-rw-r--r--net/sunrpc/auth_gss/gss_mech_switch.c7
-rw-r--r--net/sunrpc/clnt.c50
-rw-r--r--net/sunrpc/rpc_pipe.c3
-rw-r--r--net/sunrpc/sunrpc_syms.c17
-rw-r--r--net/wireless/util.c2
-rw-r--r--scripts/mod/file2alias.c4
-rw-r--r--sound/core/vmaster.c1
-rw-r--r--sound/last.c2
-rw-r--r--sound/pci/echoaudio/echoaudio_dsp.c2
-rw-r--r--sound/pci/hda/hda_codec.c4
-rw-r--r--sound/pci/hda/hda_intel.c20
-rw-r--r--sound/pci/hda/patch_conexant.c35
-rw-r--r--sound/pci/hda/patch_realtek.c66
-rw-r--r--sound/pci/hda/patch_sigmatel.c5
-rw-r--r--sound/pci/rme9652/hdsp.c1
-rw-r--r--sound/soc/blackfin/bf5xx-ssm2602.c2
-rw-r--r--sound/soc/codecs/Kconfig3
-rw-r--r--sound/soc/codecs/cs42l73.c2
-rw-r--r--sound/soc/codecs/tlv320aic23.c4
-rw-r--r--sound/soc/codecs/twl6040.c3
-rw-r--r--sound/soc/codecs/wm8350.c11
-rw-r--r--sound/soc/codecs/wm8994.c276
-rw-r--r--sound/soc/codecs/wm_hubs.c15
-rw-r--r--sound/soc/omap/Kconfig2
-rw-r--r--sound/soc/omap/omap-pcm.c4
-rw-r--r--sound/soc/samsung/s3c2412-i2s.c2
-rw-r--r--sound/soc/sh/fsi.c7
-rw-r--r--sound/soc/sh/migor.c2
-rw-r--r--sound/soc/soc-core.c7
-rw-r--r--sound/soc/soc-dapm.c2
-rw-r--r--tools/perf/Makefile4
-rw-r--r--tools/perf/builtin-report.c17
-rw-r--r--tools/perf/builtin-test.c30
-rw-r--r--tools/perf/util/parse-events.l2
-rw-r--r--tools/perf/util/symbol.c13
-rwxr-xr-xtools/testing/ktest/ktest.pl12
749 files changed, 8598 insertions, 6602 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-hsi b/Documentation/ABI/testing/sysfs-bus-hsi
new file mode 100644
index 000000000000..1b1b282a99e1
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-hsi
@@ -0,0 +1,19 @@
1What: /sys/bus/hsi
2Date: April 2012
3KernelVersion: 3.4
4Contact: Carlos Chinea <carlos.chinea@nokia.com>
5Description:
6 High Speed Synchronous Serial Interface (HSI) is a
7 serial interface mainly used for connecting application
8 engines (APE) with cellular modem engines (CMT) in cellular
9 handsets.
10 The bus will be populated with devices (hsi_clients) representing
11 the protocols available in the system. Bus drivers implement
12 those protocols.
13
14What: /sys/bus/hsi/devices/.../modalias
15Date: April 2012
16KernelVersion: 3.4
17Contact: Carlos Chinea <carlos.chinea@nokia.com>
18Description: Stores the same MODALIAS value emitted by uevent
19 Format: hsi:<hsi_client device name>
diff --git a/Documentation/DocBook/media/v4l/pixfmt-nv12m.xml b/Documentation/DocBook/media/v4l/pixfmt-nv12m.xml
index 3fd3ce5df270..5274c24d11e0 100644
--- a/Documentation/DocBook/media/v4l/pixfmt-nv12m.xml
+++ b/Documentation/DocBook/media/v4l/pixfmt-nv12m.xml
@@ -1,6 +1,6 @@
1 <refentry id="V4L2-PIX-FMT-NV12M"> 1 <refentry id="V4L2-PIX-FMT-NV12M">
2 <refmeta> 2 <refmeta>
3 <refentrytitle>V4L2_PIX_FMT_NV12M ('NV12M')</refentrytitle> 3 <refentrytitle>V4L2_PIX_FMT_NV12M ('NM12')</refentrytitle>
4 &manvol; 4 &manvol;
5 </refmeta> 5 </refmeta>
6 <refnamediv> 6 <refnamediv>
diff --git a/Documentation/DocBook/media/v4l/pixfmt-yuv420m.xml b/Documentation/DocBook/media/v4l/pixfmt-yuv420m.xml
index 9957863daf18..60308f1eefdf 100644
--- a/Documentation/DocBook/media/v4l/pixfmt-yuv420m.xml
+++ b/Documentation/DocBook/media/v4l/pixfmt-yuv420m.xml
@@ -1,6 +1,6 @@
1 <refentry id="V4L2-PIX-FMT-YUV420M"> 1 <refentry id="V4L2-PIX-FMT-YUV420M">
2 <refmeta> 2 <refmeta>
3 <refentrytitle>V4L2_PIX_FMT_YUV420M ('YU12M')</refentrytitle> 3 <refentrytitle>V4L2_PIX_FMT_YUV420M ('YM12')</refentrytitle>
4 &manvol; 4 &manvol;
5 </refmeta> 5 </refmeta>
6 <refnamediv> 6 <refnamediv>
diff --git a/Documentation/devicetree/bindings/ata/calxeda-sata.txt b/Documentation/devicetree/bindings/ata/ahci-platform.txt
index 79caa5651f53..8bb8a76d42e8 100644
--- a/Documentation/devicetree/bindings/ata/calxeda-sata.txt
+++ b/Documentation/devicetree/bindings/ata/ahci-platform.txt
@@ -1,10 +1,10 @@
1* Calxeda SATA Controller 1* AHCI SATA Controller
2 2
3SATA nodes are defined to describe on-chip Serial ATA controllers. 3SATA nodes are defined to describe on-chip Serial ATA controllers.
4Each SATA controller should have its own node. 4Each SATA controller should have its own node.
5 5
6Required properties: 6Required properties:
7- compatible : compatible list, contains "calxeda,hb-ahci" 7- compatible : compatible list, contains "calxeda,hb-ahci" or "snps,spear-ahci"
8- interrupts : <interrupt mapping for SATA IRQ> 8- interrupts : <interrupt mapping for SATA IRQ>
9- reg : <registers mapping> 9- reg : <registers mapping>
10 10
@@ -14,4 +14,3 @@ Example:
14 reg = <0xffe08000 0x1000>; 14 reg = <0xffe08000 0x1000>;
15 interrupts = <115>; 15 interrupts = <115>;
16 }; 16 };
17
diff --git a/Documentation/devicetree/bindings/sound/sgtl5000.txt b/Documentation/devicetree/bindings/sound/sgtl5000.txt
index 2c3cd413f042..9cc44449508d 100644
--- a/Documentation/devicetree/bindings/sound/sgtl5000.txt
+++ b/Documentation/devicetree/bindings/sound/sgtl5000.txt
@@ -3,6 +3,8 @@
3Required properties: 3Required properties:
4- compatible : "fsl,sgtl5000". 4- compatible : "fsl,sgtl5000".
5 5
6- reg : the I2C address of the device
7
6Example: 8Example:
7 9
8codec: sgtl5000@0a { 10codec: sgtl5000@0a {
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index bd80ba5847d2..1619a8c80873 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -147,7 +147,7 @@ tcp_adv_win_scale - INTEGER
147 (if tcp_adv_win_scale > 0) or bytes-bytes/2^(-tcp_adv_win_scale), 147 (if tcp_adv_win_scale > 0) or bytes-bytes/2^(-tcp_adv_win_scale),
148 if it is <= 0. 148 if it is <= 0.
149 Possible values are [-31, 31], inclusive. 149 Possible values are [-31, 31], inclusive.
150 Default: 2 150 Default: 1
151 151
152tcp_allowed_congestion_control - STRING 152tcp_allowed_congestion_control - STRING
153 Show/set the congestion control choices available to non-privileged 153 Show/set the congestion control choices available to non-privileged
@@ -410,7 +410,7 @@ tcp_rmem - vector of 3 INTEGERs: min, default, max
410 net.core.rmem_max. Calling setsockopt() with SO_RCVBUF disables 410 net.core.rmem_max. Calling setsockopt() with SO_RCVBUF disables
411 automatic tuning of that socket's receive buffer size, in which 411 automatic tuning of that socket's receive buffer size, in which
412 case this value is ignored. 412 case this value is ignored.
413 Default: between 87380B and 4MB, depending on RAM size. 413 Default: between 87380B and 6MB, depending on RAM size.
414 414
415tcp_sack - BOOLEAN 415tcp_sack - BOOLEAN
416 Enable select acknowledgments (SACKS). 416 Enable select acknowledgments (SACKS).
diff --git a/Documentation/power/freezing-of-tasks.txt b/Documentation/power/freezing-of-tasks.txt
index ec715cd78fbb..6ec291ea1c78 100644
--- a/Documentation/power/freezing-of-tasks.txt
+++ b/Documentation/power/freezing-of-tasks.txt
@@ -9,7 +9,7 @@ architectures).
9 9
10II. How does it work? 10II. How does it work?
11 11
12There are four per-task flags used for that, PF_NOFREEZE, PF_FROZEN, TIF_FREEZE 12There are three per-task flags used for that, PF_NOFREEZE, PF_FROZEN
13and PF_FREEZER_SKIP (the last one is auxiliary). The tasks that have 13and PF_FREEZER_SKIP (the last one is auxiliary). The tasks that have
14PF_NOFREEZE unset (all user space processes and some kernel threads) are 14PF_NOFREEZE unset (all user space processes and some kernel threads) are
15regarded as 'freezable' and treated in a special way before the system enters a 15regarded as 'freezable' and treated in a special way before the system enters a
@@ -17,30 +17,31 @@ suspend state as well as before a hibernation image is created (in what follows
17we only consider hibernation, but the description also applies to suspend). 17we only consider hibernation, but the description also applies to suspend).
18 18
19Namely, as the first step of the hibernation procedure the function 19Namely, as the first step of the hibernation procedure the function
20freeze_processes() (defined in kernel/power/process.c) is called. It executes 20freeze_processes() (defined in kernel/power/process.c) is called. A system-wide
21try_to_freeze_tasks() that sets TIF_FREEZE for all of the freezable tasks and 21variable system_freezing_cnt (as opposed to a per-task flag) is used to indicate
22either wakes them up, if they are kernel threads, or sends fake signals to them, 22whether the system is to undergo a freezing operation. And freeze_processes()
23if they are user space processes. A task that has TIF_FREEZE set, should react 23sets this variable. After this, it executes try_to_freeze_tasks() that sends a
24to it by calling the function called __refrigerator() (defined in 24fake signal to all user space processes, and wakes up all the kernel threads.
25kernel/freezer.c), which sets the task's PF_FROZEN flag, changes its state 25All freezable tasks must react to that by calling try_to_freeze(), which
26to TASK_UNINTERRUPTIBLE and makes it loop until PF_FROZEN is cleared for it. 26results in a call to __refrigerator() (defined in kernel/freezer.c), which sets
27Then, we say that the task is 'frozen' and therefore the set of functions 27the task's PF_FROZEN flag, changes its state to TASK_UNINTERRUPTIBLE and makes
28handling this mechanism is referred to as 'the freezer' (these functions are 28it loop until PF_FROZEN is cleared for it. Then, we say that the task is
29defined in kernel/power/process.c, kernel/freezer.c & include/linux/freezer.h). 29'frozen' and therefore the set of functions handling this mechanism is referred
30User space processes are generally frozen before kernel threads. 30to as 'the freezer' (these functions are defined in kernel/power/process.c,
31kernel/freezer.c & include/linux/freezer.h). User space processes are generally
32frozen before kernel threads.
31 33
32__refrigerator() must not be called directly. Instead, use the 34__refrigerator() must not be called directly. Instead, use the
33try_to_freeze() function (defined in include/linux/freezer.h), that checks 35try_to_freeze() function (defined in include/linux/freezer.h), that checks
34the task's TIF_FREEZE flag and makes the task enter __refrigerator() if the 36if the task is to be frozen and makes the task enter __refrigerator().
35flag is set.
36 37
37For user space processes try_to_freeze() is called automatically from the 38For user space processes try_to_freeze() is called automatically from the
38signal-handling code, but the freezable kernel threads need to call it 39signal-handling code, but the freezable kernel threads need to call it
39explicitly in suitable places or use the wait_event_freezable() or 40explicitly in suitable places or use the wait_event_freezable() or
40wait_event_freezable_timeout() macros (defined in include/linux/freezer.h) 41wait_event_freezable_timeout() macros (defined in include/linux/freezer.h)
41that combine interruptible sleep with checking if TIF_FREEZE is set and calling 42that combine interruptible sleep with checking if the task is to be frozen and
42try_to_freeze(). The main loop of a freezable kernel thread may look like the 43calling try_to_freeze(). The main loop of a freezable kernel thread may look
43following one: 44like the following one:
44 45
45 set_freezable(); 46 set_freezable();
46 do { 47 do {
@@ -53,7 +54,7 @@ following one:
53(from drivers/usb/core/hub.c::hub_thread()). 54(from drivers/usb/core/hub.c::hub_thread()).
54 55
55If a freezable kernel thread fails to call try_to_freeze() after the freezer has 56If a freezable kernel thread fails to call try_to_freeze() after the freezer has
56set TIF_FREEZE for it, the freezing of tasks will fail and the entire 57initiated a freezing operation, the freezing of tasks will fail and the entire
57hibernation operation will be cancelled. For this reason, freezable kernel 58hibernation operation will be cancelled. For this reason, freezable kernel
58threads must call try_to_freeze() somewhere or use one of the 59threads must call try_to_freeze() somewhere or use one of the
59wait_event_freezable() and wait_event_freezable_timeout() macros. 60wait_event_freezable() and wait_event_freezable_timeout() macros.
diff --git a/Documentation/security/keys.txt b/Documentation/security/keys.txt
index 787717091421..d389acd31e19 100644
--- a/Documentation/security/keys.txt
+++ b/Documentation/security/keys.txt
@@ -123,7 +123,7 @@ KEY SERVICE OVERVIEW
123 123
124The key service provides a number of features besides keys: 124The key service provides a number of features besides keys:
125 125
126 (*) The key service defines two special key types: 126 (*) The key service defines three special key types:
127 127
128 (+) "keyring" 128 (+) "keyring"
129 129
@@ -137,6 +137,18 @@ The key service provides a number of features besides keys:
137 blobs of data. These can be created, updated and read by userspace, 137 blobs of data. These can be created, updated and read by userspace,
138 and aren't intended for use by kernel services. 138 and aren't intended for use by kernel services.
139 139
140 (+) "logon"
141
142 Like a "user" key, a "logon" key has a payload that is an arbitrary
143 blob of data. It is intended as a place to store secrets which are
144 accessible to the kernel but not to userspace programs.
145
146 The description can be arbitrary, but must be prefixed with a non-zero
147 length string that describes the key "subclass". The subclass is
148 separated from the rest of the description by a ':'. "logon" keys can
149 be created and updated from userspace, but the payload is only
150 readable from kernel space.
151
140 (*) Each process subscribes to three keyrings: a thread-specific keyring, a 152 (*) Each process subscribes to three keyrings: a thread-specific keyring, a
141 process-specific keyring, and a session-specific keyring. 153 process-specific keyring, and a session-specific keyring.
142 154
diff --git a/MAINTAINERS b/MAINTAINERS
index c004782a1359..879520452133 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1969,10 +1969,9 @@ S: Maintained
1969F: drivers/net/ethernet/ti/cpmac.c 1969F: drivers/net/ethernet/ti/cpmac.c
1970 1970
1971CPU FREQUENCY DRIVERS 1971CPU FREQUENCY DRIVERS
1972M: Dave Jones <davej@redhat.com> 1972M: Rafael J. Wysocki <rjw@sisk.pl>
1973L: cpufreq@vger.kernel.org 1973L: cpufreq@vger.kernel.org
1974W: http://www.codemonkey.org.uk/projects/cpufreq/ 1974L: linux-pm@vger.kernel.org
1975T: git git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq.git
1976S: Maintained 1975S: Maintained
1977F: drivers/cpufreq/ 1976F: drivers/cpufreq/
1978F: include/linux/cpufreq.h 1977F: include/linux/cpufreq.h
@@ -3593,6 +3592,7 @@ S: Supported
3593F: drivers/net/wireless/iwlegacy/ 3592F: drivers/net/wireless/iwlegacy/
3594 3593
3595INTEL WIRELESS WIFI LINK (iwlwifi) 3594INTEL WIRELESS WIFI LINK (iwlwifi)
3595M: Johannes Berg <johannes.berg@intel.com>
3596M: Wey-Yi Guy <wey-yi.w.guy@intel.com> 3596M: Wey-Yi Guy <wey-yi.w.guy@intel.com>
3597M: Intel Linux Wireless <ilw@linux.intel.com> 3597M: Intel Linux Wireless <ilw@linux.intel.com>
3598L: linux-wireless@vger.kernel.org 3598L: linux-wireless@vger.kernel.org
@@ -4037,6 +4037,7 @@ F: Documentation/scsi/53c700.txt
4037F: drivers/scsi/53c700* 4037F: drivers/scsi/53c700*
4038 4038
4039LED SUBSYSTEM 4039LED SUBSYSTEM
4040M: Bryan Wu <bryan.wu@canonical.com>
4040M: Richard Purdie <rpurdie@rpsys.net> 4041M: Richard Purdie <rpurdie@rpsys.net>
4041S: Maintained 4042S: Maintained
4042F: drivers/leds/ 4043F: drivers/leds/
@@ -5892,11 +5893,11 @@ F: Documentation/scsi/st.txt
5892F: drivers/scsi/st* 5893F: drivers/scsi/st*
5893 5894
5894SCTP PROTOCOL 5895SCTP PROTOCOL
5895M: Vlad Yasevich <vladislav.yasevich@hp.com> 5896M: Vlad Yasevich <vyasevich@gmail.com>
5896M: Sridhar Samudrala <sri@us.ibm.com> 5897M: Sridhar Samudrala <sri@us.ibm.com>
5897L: linux-sctp@vger.kernel.org 5898L: linux-sctp@vger.kernel.org
5898W: http://lksctp.sourceforge.net 5899W: http://lksctp.sourceforge.net
5899S: Supported 5900S: Maintained
5900F: Documentation/networking/sctp.txt 5901F: Documentation/networking/sctp.txt
5901F: include/linux/sctp.h 5902F: include/linux/sctp.h
5902F: include/net/sctp/ 5903F: include/net/sctp/
@@ -7587,8 +7588,8 @@ F: Documentation/filesystems/xfs.txt
7587F: fs/xfs/ 7588F: fs/xfs/
7588 7589
7589XILINX AXI ETHERNET DRIVER 7590XILINX AXI ETHERNET DRIVER
7590M: Ariane Keller <ariane.keller@tik.ee.ethz.ch> 7591M: Anirudha Sarangi <anirudh@xilinx.com>
7591M: Daniel Borkmann <daniel.borkmann@tik.ee.ethz.ch> 7592M: John Linn <John.Linn@xilinx.com>
7592S: Maintained 7593S: Maintained
7593F: drivers/net/ethernet/xilinx/xilinx_axienet* 7594F: drivers/net/ethernet/xilinx/xilinx_axienet*
7594 7595
diff --git a/Makefile b/Makefile
index f6578f47e21e..48bd1f50dcc3 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 4 2PATCHLEVEL = 4
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc3 4EXTRAVERSION = -rc7
5NAME = Saber-toothed Squirrel 5NAME = Saber-toothed Squirrel
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 56a4df952fb0..22e58a99f38b 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -477,7 +477,7 @@ config ALPHA_BROKEN_IRQ_MASK
477 477
478config VGA_HOSE 478config VGA_HOSE
479 bool 479 bool
480 depends on ALPHA_GENERIC || ALPHA_TITAN || ALPHA_MARVEL || ALPHA_TSUNAMI 480 depends on VGA_CONSOLE && (ALPHA_GENERIC || ALPHA_TITAN || ALPHA_MARVEL || ALPHA_TSUNAMI)
481 default y 481 default y
482 help 482 help
483 Support VGA on an arbitrary hose; needed for several platforms 483 Support VGA on an arbitrary hose; needed for several platforms
diff --git a/arch/alpha/include/asm/rtc.h b/arch/alpha/include/asm/rtc.h
index 1f7fba671ae6..d70408d36677 100644
--- a/arch/alpha/include/asm/rtc.h
+++ b/arch/alpha/include/asm/rtc.h
@@ -1,14 +1,10 @@
1#ifndef _ALPHA_RTC_H 1#ifndef _ALPHA_RTC_H
2#define _ALPHA_RTC_H 2#define _ALPHA_RTC_H
3 3
4#if defined(CONFIG_ALPHA_GENERIC) 4#if defined(CONFIG_ALPHA_MARVEL) && defined(CONFIG_SMP) \
5 || defined(CONFIG_ALPHA_GENERIC)
5# define get_rtc_time alpha_mv.rtc_get_time 6# define get_rtc_time alpha_mv.rtc_get_time
6# define set_rtc_time alpha_mv.rtc_set_time 7# define set_rtc_time alpha_mv.rtc_set_time
7#else
8# if defined(CONFIG_ALPHA_MARVEL) && defined(CONFIG_SMP)
9# define get_rtc_time marvel_get_rtc_time
10# define set_rtc_time marvel_set_rtc_time
11# endif
12#endif 8#endif
13 9
14#include <asm-generic/rtc.h> 10#include <asm-generic/rtc.h>
diff --git a/arch/alpha/kernel/core_tsunami.c b/arch/alpha/kernel/core_tsunami.c
index 5e7c28f92f19..61893d7bdda5 100644
--- a/arch/alpha/kernel/core_tsunami.c
+++ b/arch/alpha/kernel/core_tsunami.c
@@ -11,6 +11,7 @@
11#include <asm/core_tsunami.h> 11#include <asm/core_tsunami.h>
12#undef __EXTERN_INLINE 12#undef __EXTERN_INLINE
13 13
14#include <linux/module.h>
14#include <linux/types.h> 15#include <linux/types.h>
15#include <linux/pci.h> 16#include <linux/pci.h>
16#include <linux/sched.h> 17#include <linux/sched.h>
diff --git a/arch/alpha/kernel/sys_marvel.c b/arch/alpha/kernel/sys_marvel.c
index 14a4b6a7cf59..407accc80877 100644
--- a/arch/alpha/kernel/sys_marvel.c
+++ b/arch/alpha/kernel/sys_marvel.c
@@ -317,7 +317,7 @@ marvel_init_irq(void)
317} 317}
318 318
319static int 319static int
320marvel_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 320marvel_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
321{ 321{
322 struct pci_controller *hose = dev->sysdata; 322 struct pci_controller *hose = dev->sysdata;
323 struct io7_port *io7_port = hose->sysdata; 323 struct io7_port *io7_port = hose->sysdata;
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index cf006d40342c..36586dba6fa6 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1186,6 +1186,15 @@ if !MMU
1186source "arch/arm/Kconfig-nommu" 1186source "arch/arm/Kconfig-nommu"
1187endif 1187endif
1188 1188
1189config ARM_ERRATA_326103
1190 bool "ARM errata: FSR write bit incorrect on a SWP to read-only memory"
1191 depends on CPU_V6
1192 help
1193 Executing a SWP instruction to read-only memory does not set bit 11
1194 of the FSR on the ARM 1136 prior to r1p0. This causes the kernel to
1195 treat the access as a read, preventing a COW from occurring and
1196 causing the faulting task to livelock.
1197
1189config ARM_ERRATA_411920 1198config ARM_ERRATA_411920
1190 bool "ARM errata: Invalidation of the Instruction Cache operation can fail" 1199 bool "ARM errata: Invalidation of the Instruction Cache operation can fail"
1191 depends on CPU_V6 || CPU_V6K 1200 depends on CPU_V6 || CPU_V6K
diff --git a/arch/arm/boot/dts/msm8660-surf.dts b/arch/arm/boot/dts/msm8660-surf.dts
index 15ded0deaa79..45bc4bb04e57 100644
--- a/arch/arm/boot/dts/msm8660-surf.dts
+++ b/arch/arm/boot/dts/msm8660-surf.dts
@@ -10,7 +10,7 @@
10 intc: interrupt-controller@02080000 { 10 intc: interrupt-controller@02080000 {
11 compatible = "qcom,msm-8660-qgic"; 11 compatible = "qcom,msm-8660-qgic";
12 interrupt-controller; 12 interrupt-controller;
13 #interrupt-cells = <1>; 13 #interrupt-cells = <3>;
14 reg = < 0x02080000 0x1000 >, 14 reg = < 0x02080000 0x1000 >,
15 < 0x02081000 0x1000 >; 15 < 0x02081000 0x1000 >;
16 }; 16 };
@@ -19,6 +19,6 @@
19 compatible = "qcom,msm-hsuart", "qcom,msm-uart"; 19 compatible = "qcom,msm-hsuart", "qcom,msm-uart";
20 reg = <0x19c40000 0x1000>, 20 reg = <0x19c40000 0x1000>,
21 <0x19c00000 0x1000>; 21 <0x19c00000 0x1000>;
22 interrupts = <195>; 22 interrupts = <0 195 0x0>;
23 }; 23 };
24}; 24};
diff --git a/arch/arm/boot/dts/versatile-ab.dts b/arch/arm/boot/dts/versatile-ab.dts
index 0b32925f2147..e2fe3195c0d1 100644
--- a/arch/arm/boot/dts/versatile-ab.dts
+++ b/arch/arm/boot/dts/versatile-ab.dts
@@ -173,7 +173,7 @@
173 mmc@5000 { 173 mmc@5000 {
174 compatible = "arm,primecell"; 174 compatible = "arm,primecell";
175 reg = < 0x5000 0x1000>; 175 reg = < 0x5000 0x1000>;
176 interrupts = <22>; 176 interrupts = <22 34>;
177 }; 177 };
178 kmi@6000 { 178 kmi@6000 {
179 compatible = "arm,pl050", "arm,primecell"; 179 compatible = "arm,pl050", "arm,primecell";
diff --git a/arch/arm/boot/dts/versatile-pb.dts b/arch/arm/boot/dts/versatile-pb.dts
index 166461073b78..7e8175269064 100644
--- a/arch/arm/boot/dts/versatile-pb.dts
+++ b/arch/arm/boot/dts/versatile-pb.dts
@@ -41,7 +41,7 @@
41 mmc@b000 { 41 mmc@b000 {
42 compatible = "arm,primecell"; 42 compatible = "arm,primecell";
43 reg = <0xb000 0x1000>; 43 reg = <0xb000 0x1000>;
44 interrupts = <23>; 44 interrupts = <23 34>;
45 }; 45 };
46 }; 46 };
47 }; 47 };
diff --git a/arch/arm/configs/imx_v4_v5_defconfig b/arch/arm/configs/imx_v4_v5_defconfig
index b5ac644e12af..6b31cb60daab 100644
--- a/arch/arm/configs/imx_v4_v5_defconfig
+++ b/arch/arm/configs/imx_v4_v5_defconfig
@@ -112,6 +112,7 @@ CONFIG_WATCHDOG=y
112CONFIG_IMX2_WDT=y 112CONFIG_IMX2_WDT=y
113CONFIG_MFD_MC13XXX=y 113CONFIG_MFD_MC13XXX=y
114CONFIG_REGULATOR=y 114CONFIG_REGULATOR=y
115CONFIG_REGULATOR_FIXED_VOLTAGE=y
115CONFIG_REGULATOR_MC13783=y 116CONFIG_REGULATOR_MC13783=y
116CONFIG_REGULATOR_MC13892=y 117CONFIG_REGULATOR_MC13892=y
117CONFIG_FB=y 118CONFIG_FB=y
diff --git a/arch/arm/configs/mini2440_defconfig b/arch/arm/configs/mini2440_defconfig
index 42da9183acc8..082175c54e7c 100644
--- a/arch/arm/configs/mini2440_defconfig
+++ b/arch/arm/configs/mini2440_defconfig
@@ -14,6 +14,8 @@ CONFIG_MODULE_FORCE_UNLOAD=y
14# CONFIG_BLK_DEV_BSG is not set 14# CONFIG_BLK_DEV_BSG is not set
15CONFIG_BLK_DEV_INTEGRITY=y 15CONFIG_BLK_DEV_INTEGRITY=y
16CONFIG_ARCH_S3C24XX=y 16CONFIG_ARCH_S3C24XX=y
17# CONFIG_CPU_S3C2410 is not set
18CONFIG_CPU_S3C2440=y
17CONFIG_S3C_ADC=y 19CONFIG_S3C_ADC=y
18CONFIG_S3C24XX_PWM=y 20CONFIG_S3C24XX_PWM=y
19CONFIG_MACH_MINI2440=y 21CONFIG_MACH_MINI2440=y
diff --git a/arch/arm/configs/u8500_defconfig b/arch/arm/configs/u8500_defconfig
index 889d73ac1ae1..7e84f453e8a6 100644
--- a/arch/arm/configs/u8500_defconfig
+++ b/arch/arm/configs/u8500_defconfig
@@ -8,8 +8,6 @@ CONFIG_MODULE_UNLOAD=y
8# CONFIG_LBDAF is not set 8# CONFIG_LBDAF is not set
9# CONFIG_BLK_DEV_BSG is not set 9# CONFIG_BLK_DEV_BSG is not set
10CONFIG_ARCH_U8500=y 10CONFIG_ARCH_U8500=y
11CONFIG_UX500_SOC_DB5500=y
12CONFIG_UX500_SOC_DB8500=y
13CONFIG_MACH_HREFV60=y 11CONFIG_MACH_HREFV60=y
14CONFIG_MACH_SNOWBALL=y 12CONFIG_MACH_SNOWBALL=y
15CONFIG_MACH_U5500=y 13CONFIG_MACH_U5500=y
@@ -39,7 +37,6 @@ CONFIG_CAIF=y
39CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 37CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
40CONFIG_BLK_DEV_RAM=y 38CONFIG_BLK_DEV_RAM=y
41CONFIG_BLK_DEV_RAM_SIZE=65536 39CONFIG_BLK_DEV_RAM_SIZE=65536
42CONFIG_MISC_DEVICES=y
43CONFIG_AB8500_PWM=y 40CONFIG_AB8500_PWM=y
44CONFIG_SENSORS_BH1780=y 41CONFIG_SENSORS_BH1780=y
45CONFIG_NETDEVICES=y 42CONFIG_NETDEVICES=y
@@ -65,16 +62,18 @@ CONFIG_SERIAL_AMBA_PL011=y
65CONFIG_SERIAL_AMBA_PL011_CONSOLE=y 62CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
66CONFIG_HW_RANDOM=y 63CONFIG_HW_RANDOM=y
67CONFIG_HW_RANDOM_NOMADIK=y 64CONFIG_HW_RANDOM_NOMADIK=y
68CONFIG_I2C=y
69CONFIG_I2C_NOMADIK=y
70CONFIG_SPI=y 65CONFIG_SPI=y
71CONFIG_SPI_PL022=y 66CONFIG_SPI_PL022=y
72CONFIG_GPIO_STMPE=y 67CONFIG_GPIO_STMPE=y
73CONFIG_GPIO_TC3589X=y 68CONFIG_GPIO_TC3589X=y
69CONFIG_POWER_SUPPLY=y
70CONFIG_AB8500_BM=y
71CONFIG_AB8500_BATTERY_THERM_ON_BATCTRL=y
74CONFIG_MFD_STMPE=y 72CONFIG_MFD_STMPE=y
75CONFIG_MFD_TC3589X=y 73CONFIG_MFD_TC3589X=y
76CONFIG_AB5500_CORE=y 74CONFIG_AB5500_CORE=y
77CONFIG_AB8500_CORE=y 75CONFIG_AB8500_CORE=y
76CONFIG_REGULATOR=y
78CONFIG_REGULATOR_AB8500=y 77CONFIG_REGULATOR_AB8500=y
79# CONFIG_HID_SUPPORT is not set 78# CONFIG_HID_SUPPORT is not set
80CONFIG_USB_GADGET=y 79CONFIG_USB_GADGET=y
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index d4c24d412a8d..0f04d84582e1 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -118,6 +118,13 @@ extern void iwmmxt_task_switch(struct thread_info *);
118extern void vfp_sync_hwstate(struct thread_info *); 118extern void vfp_sync_hwstate(struct thread_info *);
119extern void vfp_flush_hwstate(struct thread_info *); 119extern void vfp_flush_hwstate(struct thread_info *);
120 120
121struct user_vfp;
122struct user_vfp_exc;
123
124extern int vfp_preserve_user_clear_hwstate(struct user_vfp __user *,
125 struct user_vfp_exc __user *);
126extern int vfp_restore_user_hwstate(struct user_vfp __user *,
127 struct user_vfp_exc __user *);
121#endif 128#endif
122 129
123/* 130/*
diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h
index 60843eb0f61c..73409e6c0251 100644
--- a/arch/arm/include/asm/tls.h
+++ b/arch/arm/include/asm/tls.h
@@ -7,6 +7,8 @@
7 7
8 .macro set_tls_v6k, tp, tmp1, tmp2 8 .macro set_tls_v6k, tp, tmp1, tmp2
9 mcr p15, 0, \tp, c13, c0, 3 @ set TLS register 9 mcr p15, 0, \tp, c13, c0, 3 @ set TLS register
10 mov \tmp1, #0
11 mcr p15, 0, \tmp1, c13, c0, 2 @ clear user r/w TLS register
10 .endm 12 .endm
11 13
12 .macro set_tls_v6, tp, tmp1, tmp2 14 .macro set_tls_v6, tp, tmp1, tmp2
@@ -15,6 +17,8 @@
15 mov \tmp2, #0xffff0fff 17 mov \tmp2, #0xffff0fff
16 tst \tmp1, #HWCAP_TLS @ hardware TLS available? 18 tst \tmp1, #HWCAP_TLS @ hardware TLS available?
17 mcrne p15, 0, \tp, c13, c0, 3 @ yes, set TLS register 19 mcrne p15, 0, \tp, c13, c0, 3 @ yes, set TLS register
20 movne \tmp1, #0
21 mcrne p15, 0, \tmp1, c13, c0, 2 @ clear user r/w TLS register
18 streq \tp, [\tmp2, #-15] @ set TLS value at 0xffff0ff0 22 streq \tp, [\tmp2, #-15] @ set TLS value at 0xffff0ff0
19 .endm 23 .endm
20 24
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 71ccdbfed662..8349d4e97e2b 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -155,10 +155,10 @@ static bool migrate_one_irq(struct irq_desc *desc)
155 } 155 }
156 156
157 c = irq_data_get_irq_chip(d); 157 c = irq_data_get_irq_chip(d);
158 if (c->irq_set_affinity) 158 if (!c->irq_set_affinity)
159 c->irq_set_affinity(d, affinity, true);
160 else
161 pr_debug("IRQ%u: unable to set affinity\n", d->irq); 159 pr_debug("IRQ%u: unable to set affinity\n", d->irq);
160 else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret)
161 cpumask_copy(d->affinity, affinity);
162 162
163 return ret; 163 return ret;
164} 164}
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index 80abafb9bf33..9650c143afc1 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -906,27 +906,14 @@ long arch_ptrace(struct task_struct *child, long request,
906 return ret; 906 return ret;
907} 907}
908 908
909#ifdef __ARMEB__
910#define AUDIT_ARCH_NR AUDIT_ARCH_ARMEB
911#else
912#define AUDIT_ARCH_NR AUDIT_ARCH_ARM
913#endif
914
915asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno) 909asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
916{ 910{
917 unsigned long ip; 911 unsigned long ip;
918 912
919 /* 913 if (why)
920 * Save IP. IP is used to denote syscall entry/exit:
921 * IP = 0 -> entry, = 1 -> exit
922 */
923 ip = regs->ARM_ip;
924 regs->ARM_ip = why;
925
926 if (!ip)
927 audit_syscall_exit(regs); 914 audit_syscall_exit(regs);
928 else 915 else
929 audit_syscall_entry(AUDIT_ARCH_NR, scno, regs->ARM_r0, 916 audit_syscall_entry(AUDIT_ARCH_ARM, scno, regs->ARM_r0,
930 regs->ARM_r1, regs->ARM_r2, regs->ARM_r3); 917 regs->ARM_r1, regs->ARM_r2, regs->ARM_r3);
931 918
932 if (!test_thread_flag(TIF_SYSCALL_TRACE)) 919 if (!test_thread_flag(TIF_SYSCALL_TRACE))
@@ -936,6 +923,13 @@ asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
936 923
937 current_thread_info()->syscall = scno; 924 current_thread_info()->syscall = scno;
938 925
926 /*
927 * IP is used to denote syscall entry/exit:
928 * IP = 0 -> entry, =1 -> exit
929 */
930 ip = regs->ARM_ip;
931 regs->ARM_ip = why;
932
939 /* the 0x80 provides a way for the tracing parent to distinguish 933 /* the 0x80 provides a way for the tracing parent to distinguish
940 between a syscall stop and SIGTRAP delivery */ 934 between a syscall stop and SIGTRAP delivery */
941 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) 935 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 7cb532fc8aa4..d68d1b694680 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -180,44 +180,23 @@ static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame)
180 180
181static int preserve_vfp_context(struct vfp_sigframe __user *frame) 181static int preserve_vfp_context(struct vfp_sigframe __user *frame)
182{ 182{
183 struct thread_info *thread = current_thread_info();
184 struct vfp_hard_struct *h = &thread->vfpstate.hard;
185 const unsigned long magic = VFP_MAGIC; 183 const unsigned long magic = VFP_MAGIC;
186 const unsigned long size = VFP_STORAGE_SIZE; 184 const unsigned long size = VFP_STORAGE_SIZE;
187 int err = 0; 185 int err = 0;
188 186
189 vfp_sync_hwstate(thread);
190 __put_user_error(magic, &frame->magic, err); 187 __put_user_error(magic, &frame->magic, err);
191 __put_user_error(size, &frame->size, err); 188 __put_user_error(size, &frame->size, err);
192 189
193 /* 190 if (err)
194 * Copy the floating point registers. There can be unused 191 return -EFAULT;
195 * registers see asm/hwcap.h for details.
196 */
197 err |= __copy_to_user(&frame->ufp.fpregs, &h->fpregs,
198 sizeof(h->fpregs));
199 /*
200 * Copy the status and control register.
201 */
202 __put_user_error(h->fpscr, &frame->ufp.fpscr, err);
203
204 /*
205 * Copy the exception registers.
206 */
207 __put_user_error(h->fpexc, &frame->ufp_exc.fpexc, err);
208 __put_user_error(h->fpinst, &frame->ufp_exc.fpinst, err);
209 __put_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err);
210 192
211 return err ? -EFAULT : 0; 193 return vfp_preserve_user_clear_hwstate(&frame->ufp, &frame->ufp_exc);
212} 194}
213 195
214static int restore_vfp_context(struct vfp_sigframe __user *frame) 196static int restore_vfp_context(struct vfp_sigframe __user *frame)
215{ 197{
216 struct thread_info *thread = current_thread_info();
217 struct vfp_hard_struct *h = &thread->vfpstate.hard;
218 unsigned long magic; 198 unsigned long magic;
219 unsigned long size; 199 unsigned long size;
220 unsigned long fpexc;
221 int err = 0; 200 int err = 0;
222 201
223 __get_user_error(magic, &frame->magic, err); 202 __get_user_error(magic, &frame->magic, err);
@@ -228,33 +207,7 @@ static int restore_vfp_context(struct vfp_sigframe __user *frame)
228 if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE) 207 if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
229 return -EINVAL; 208 return -EINVAL;
230 209
231 vfp_flush_hwstate(thread); 210 return vfp_restore_user_hwstate(&frame->ufp, &frame->ufp_exc);
232
233 /*
234 * Copy the floating point registers. There can be unused
235 * registers see asm/hwcap.h for details.
236 */
237 err |= __copy_from_user(&h->fpregs, &frame->ufp.fpregs,
238 sizeof(h->fpregs));
239 /*
240 * Copy the status and control register.
241 */
242 __get_user_error(h->fpscr, &frame->ufp.fpscr, err);
243
244 /*
245 * Sanitise and restore the exception registers.
246 */
247 __get_user_error(fpexc, &frame->ufp_exc.fpexc, err);
248 /* Ensure the VFP is enabled. */
249 fpexc |= FPEXC_EN;
250 /* Ensure FPINST2 is invalid and the exception flag is cleared. */
251 fpexc &= ~(FPEXC_EX | FPEXC_FP2V);
252 h->fpexc = fpexc;
253
254 __get_user_error(h->fpinst, &frame->ufp_exc.fpinst, err);
255 __get_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err);
256
257 return err ? -EFAULT : 0;
258} 211}
259 212
260#endif 213#endif
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index addbbe8028c2..8f4644659777 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -251,8 +251,6 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
251 struct mm_struct *mm = &init_mm; 251 struct mm_struct *mm = &init_mm;
252 unsigned int cpu = smp_processor_id(); 252 unsigned int cpu = smp_processor_id();
253 253
254 printk("CPU%u: Booted secondary processor\n", cpu);
255
256 /* 254 /*
257 * All kernel threads share the same mm context; grab a 255 * All kernel threads share the same mm context; grab a
258 * reference and switch to it. 256 * reference and switch to it.
@@ -264,6 +262,8 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
264 enter_lazy_tlb(mm, current); 262 enter_lazy_tlb(mm, current);
265 local_flush_tlb_all(); 263 local_flush_tlb_all();
266 264
265 printk("CPU%u: Booted secondary processor\n", cpu);
266
267 cpu_init(); 267 cpu_init();
268 preempt_disable(); 268 preempt_disable();
269 trace_hardirqs_off(); 269 trace_hardirqs_off();
@@ -510,10 +510,6 @@ static void ipi_cpu_stop(unsigned int cpu)
510 local_fiq_disable(); 510 local_fiq_disable();
511 local_irq_disable(); 511 local_irq_disable();
512 512
513#ifdef CONFIG_HOTPLUG_CPU
514 platform_cpu_kill(cpu);
515#endif
516
517 while (1) 513 while (1)
518 cpu_relax(); 514 cpu_relax();
519} 515}
@@ -576,17 +572,25 @@ void smp_send_reschedule(int cpu)
576 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); 572 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
577} 573}
578 574
575#ifdef CONFIG_HOTPLUG_CPU
576static void smp_kill_cpus(cpumask_t *mask)
577{
578 unsigned int cpu;
579 for_each_cpu(cpu, mask)
580 platform_cpu_kill(cpu);
581}
582#else
583static void smp_kill_cpus(cpumask_t *mask) { }
584#endif
585
579void smp_send_stop(void) 586void smp_send_stop(void)
580{ 587{
581 unsigned long timeout; 588 unsigned long timeout;
589 struct cpumask mask;
582 590
583 if (num_online_cpus() > 1) { 591 cpumask_copy(&mask, cpu_online_mask);
584 struct cpumask mask; 592 cpumask_clear_cpu(smp_processor_id(), &mask);
585 cpumask_copy(&mask, cpu_online_mask); 593 smp_cross_call(&mask, IPI_CPU_STOP);
586 cpumask_clear_cpu(smp_processor_id(), &mask);
587
588 smp_cross_call(&mask, IPI_CPU_STOP);
589 }
590 594
591 /* Wait up to one second for other CPUs to stop */ 595 /* Wait up to one second for other CPUs to stop */
592 timeout = USEC_PER_SEC; 596 timeout = USEC_PER_SEC;
@@ -595,6 +599,8 @@ void smp_send_stop(void)
595 599
596 if (num_online_cpus() > 1) 600 if (num_online_cpus() > 1)
597 pr_warning("SMP: failed to stop secondary CPUs\n"); 601 pr_warning("SMP: failed to stop secondary CPUs\n");
602
603 smp_kill_cpus(&mask);
598} 604}
599 605
600/* 606/*
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index 5b150afb995b..fef42b21cecb 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -118,14 +118,10 @@ static int twd_cpufreq_transition(struct notifier_block *nb,
118 * The twd clock events must be reprogrammed to account for the new 118 * The twd clock events must be reprogrammed to account for the new
119 * frequency. The timer is local to a cpu, so cross-call to the 119 * frequency. The timer is local to a cpu, so cross-call to the
120 * changing cpu. 120 * changing cpu.
121 *
122 * Only wait for it to finish, if the cpu is active to avoid
123 * deadlock when cpu1 is spinning on while(!cpu_active(cpu1)) during
124 * booting of that cpu.
125 */ 121 */
126 if (state == CPUFREQ_POSTCHANGE || state == CPUFREQ_RESUMECHANGE) 122 if (state == CPUFREQ_POSTCHANGE || state == CPUFREQ_RESUMECHANGE)
127 smp_call_function_single(freqs->cpu, twd_update_frequency, 123 smp_call_function_single(freqs->cpu, twd_update_frequency,
128 NULL, cpu_active(freqs->cpu)); 124 NULL, 1);
129 125
130 return NOTIFY_OK; 126 return NOTIFY_OK;
131} 127}
diff --git a/arch/arm/kernel/sys_arm.c b/arch/arm/kernel/sys_arm.c
index d2b177905cdb..76cbb055dd05 100644
--- a/arch/arm/kernel/sys_arm.c
+++ b/arch/arm/kernel/sys_arm.c
@@ -115,7 +115,7 @@ int kernel_execve(const char *filename,
115 "Ir" (THREAD_START_SP - sizeof(regs)), 115 "Ir" (THREAD_START_SP - sizeof(regs)),
116 "r" (&regs), 116 "r" (&regs),
117 "Ir" (sizeof(regs)) 117 "Ir" (sizeof(regs))
118 : "r0", "r1", "r2", "r3", "ip", "lr", "memory"); 118 : "r0", "r1", "r2", "r3", "r8", "r9", "ip", "lr", "memory");
119 119
120 out: 120 out:
121 return ret; 121 return ret;
diff --git a/arch/arm/mach-at91/at91rm9200_devices.c b/arch/arm/mach-at91/at91rm9200_devices.c
index 99ce5c955e39..05774e5b1cba 100644
--- a/arch/arm/mach-at91/at91rm9200_devices.c
+++ b/arch/arm/mach-at91/at91rm9200_devices.c
@@ -1173,7 +1173,6 @@ void __init at91_add_device_serial(void)
1173 printk(KERN_INFO "AT91: No default serial console defined.\n"); 1173 printk(KERN_INFO "AT91: No default serial console defined.\n");
1174} 1174}
1175#else 1175#else
1176void __init __deprecated at91_init_serial(struct at91_uart_config *config) {}
1177void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins) {} 1176void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins) {}
1178void __init at91_set_serial_console(unsigned portnr) {} 1177void __init at91_set_serial_console(unsigned portnr) {}
1179void __init at91_add_device_serial(void) {} 1178void __init at91_add_device_serial(void) {}
diff --git a/arch/arm/mach-at91/at91rm9200_time.c b/arch/arm/mach-at91/at91rm9200_time.c
index dd7f782b0b91..104ca40d8d18 100644
--- a/arch/arm/mach-at91/at91rm9200_time.c
+++ b/arch/arm/mach-at91/at91rm9200_time.c
@@ -23,6 +23,7 @@
23#include <linux/interrupt.h> 23#include <linux/interrupt.h>
24#include <linux/irq.h> 24#include <linux/irq.h>
25#include <linux/clockchips.h> 25#include <linux/clockchips.h>
26#include <linux/export.h>
26 27
27#include <asm/mach/time.h> 28#include <asm/mach/time.h>
28 29
@@ -176,6 +177,7 @@ static struct clock_event_device clkevt = {
176}; 177};
177 178
178void __iomem *at91_st_base; 179void __iomem *at91_st_base;
180EXPORT_SYMBOL_GPL(at91_st_base);
179 181
180void __init at91rm9200_ioremap_st(u32 addr) 182void __init at91rm9200_ioremap_st(u32 addr)
181{ 183{
diff --git a/arch/arm/mach-at91/board-rm9200ek.c b/arch/arm/mach-at91/board-rm9200ek.c
index 11cbaa8946fe..b2e4fe21f346 100644
--- a/arch/arm/mach-at91/board-rm9200ek.c
+++ b/arch/arm/mach-at91/board-rm9200ek.c
@@ -117,7 +117,7 @@ static struct i2c_board_info __initdata ek_i2c_devices[] = {
117}; 117};
118 118
119#define EK_FLASH_BASE AT91_CHIPSELECT_0 119#define EK_FLASH_BASE AT91_CHIPSELECT_0
120#define EK_FLASH_SIZE SZ_2M 120#define EK_FLASH_SIZE SZ_8M
121 121
122static struct physmap_flash_data ek_flash_data = { 122static struct physmap_flash_data ek_flash_data = {
123 .width = 2, 123 .width = 2,
diff --git a/arch/arm/mach-at91/board-sam9261ek.c b/arch/arm/mach-at91/board-sam9261ek.c
index c3f994462864..065fed342424 100644
--- a/arch/arm/mach-at91/board-sam9261ek.c
+++ b/arch/arm/mach-at91/board-sam9261ek.c
@@ -85,8 +85,6 @@ static struct resource dm9000_resource[] = {
85 .flags = IORESOURCE_MEM 85 .flags = IORESOURCE_MEM
86 }, 86 },
87 [2] = { 87 [2] = {
88 .start = AT91_PIN_PC11,
89 .end = AT91_PIN_PC11,
90 .flags = IORESOURCE_IRQ 88 .flags = IORESOURCE_IRQ
91 | IORESOURCE_IRQ_LOWEDGE | IORESOURCE_IRQ_HIGHEDGE, 89 | IORESOURCE_IRQ_LOWEDGE | IORESOURCE_IRQ_HIGHEDGE,
92 } 90 }
@@ -130,6 +128,8 @@ static struct sam9_smc_config __initdata dm9000_smc_config = {
130 128
131static void __init ek_add_device_dm9000(void) 129static void __init ek_add_device_dm9000(void)
132{ 130{
131 struct resource *r = &dm9000_resource[2];
132
133 /* Configure chip-select 2 (DM9000) */ 133 /* Configure chip-select 2 (DM9000) */
134 sam9_smc_configure(0, 2, &dm9000_smc_config); 134 sam9_smc_configure(0, 2, &dm9000_smc_config);
135 135
@@ -139,6 +139,7 @@ static void __init ek_add_device_dm9000(void)
139 /* Configure Interrupt pin as input, no pull-up */ 139 /* Configure Interrupt pin as input, no pull-up */
140 at91_set_gpio_input(AT91_PIN_PC11, 0); 140 at91_set_gpio_input(AT91_PIN_PC11, 0);
141 141
142 r->start = r->end = gpio_to_irq(AT91_PIN_PC11);
142 platform_device_register(&dm9000_device); 143 platform_device_register(&dm9000_device);
143} 144}
144#else 145#else
diff --git a/arch/arm/mach-at91/clock.c b/arch/arm/mach-at91/clock.c
index a0f4d7424cdc..6b692824c988 100644
--- a/arch/arm/mach-at91/clock.c
+++ b/arch/arm/mach-at91/clock.c
@@ -35,6 +35,7 @@
35#include "generic.h" 35#include "generic.h"
36 36
37void __iomem *at91_pmc_base; 37void __iomem *at91_pmc_base;
38EXPORT_SYMBOL_GPL(at91_pmc_base);
38 39
39/* 40/*
40 * There's a lot more which can be done with clocks, including cpufreq 41 * There's a lot more which can be done with clocks, including cpufreq
diff --git a/arch/arm/mach-at91/include/mach/at91_pmc.h b/arch/arm/mach-at91/include/mach/at91_pmc.h
index 36604782a78f..ea2c57a86ca6 100644
--- a/arch/arm/mach-at91/include/mach/at91_pmc.h
+++ b/arch/arm/mach-at91/include/mach/at91_pmc.h
@@ -25,7 +25,7 @@ extern void __iomem *at91_pmc_base;
25#define at91_pmc_write(field, value) \ 25#define at91_pmc_write(field, value) \
26 __raw_writel(value, at91_pmc_base + field) 26 __raw_writel(value, at91_pmc_base + field)
27#else 27#else
28.extern at91_aic_base 28.extern at91_pmc_base
29#endif 29#endif
30 30
31#define AT91_PMC_SCER 0x00 /* System Clock Enable Register */ 31#define AT91_PMC_SCER 0x00 /* System Clock Enable Register */
diff --git a/arch/arm/mach-at91/setup.c b/arch/arm/mach-at91/setup.c
index 97cc04dc8073..f44a2e7272e3 100644
--- a/arch/arm/mach-at91/setup.c
+++ b/arch/arm/mach-at91/setup.c
@@ -54,6 +54,7 @@ void __init at91_init_interrupts(unsigned int *priority)
54} 54}
55 55
56void __iomem *at91_ramc_base[2]; 56void __iomem *at91_ramc_base[2];
57EXPORT_SYMBOL_GPL(at91_ramc_base);
57 58
58void __init at91_ioremap_ramc(int id, u32 addr, u32 size) 59void __init at91_ioremap_ramc(int id, u32 addr, u32 size)
59{ 60{
@@ -292,6 +293,7 @@ void __init at91_ioremap_rstc(u32 base_addr)
292} 293}
293 294
294void __iomem *at91_matrix_base; 295void __iomem *at91_matrix_base;
296EXPORT_SYMBOL_GPL(at91_matrix_base);
295 297
296void __init at91_ioremap_matrix(u32 base_addr) 298void __init at91_ioremap_matrix(u32 base_addr)
297{ 299{
diff --git a/arch/arm/mach-bcmring/core.c b/arch/arm/mach-bcmring/core.c
index 22e4e0a28ad1..adbfb1994582 100644
--- a/arch/arm/mach-bcmring/core.c
+++ b/arch/arm/mach-bcmring/core.c
@@ -52,8 +52,8 @@
52#include <mach/csp/chipcHw_inline.h> 52#include <mach/csp/chipcHw_inline.h>
53#include <mach/csp/tmrHw_reg.h> 53#include <mach/csp/tmrHw_reg.h>
54 54
55static AMBA_APB_DEVICE(uartA, "uarta", MM_ADDR_IO_UARTA, { IRQ_UARTA }, NULL); 55static AMBA_APB_DEVICE(uartA, "uartA", 0, MM_ADDR_IO_UARTA, {IRQ_UARTA}, NULL);
56static AMBA_APB_DEVICE(uartB, "uartb", MM_ADDR_IO_UARTB, { IRQ_UARTB }, NULL); 56static AMBA_APB_DEVICE(uartB, "uartB", 0, MM_ADDR_IO_UARTB, {IRQ_UARTB}, NULL);
57 57
58static struct clk pll1_clk = { 58static struct clk pll1_clk = {
59 .name = "PLL1", 59 .name = "PLL1",
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
index e81c35f936b5..b8df521fb68e 100644
--- a/arch/arm/mach-exynos/Kconfig
+++ b/arch/arm/mach-exynos/Kconfig
@@ -232,6 +232,9 @@ config MACH_ARMLEX4210
232config MACH_UNIVERSAL_C210 232config MACH_UNIVERSAL_C210
233 bool "Mobile UNIVERSAL_C210 Board" 233 bool "Mobile UNIVERSAL_C210 Board"
234 select CPU_EXYNOS4210 234 select CPU_EXYNOS4210
235 select S5P_HRT
236 select CLKSRC_MMIO
237 select HAVE_SCHED_CLOCK
235 select S5P_GPIO_INT 238 select S5P_GPIO_INT
236 select S5P_DEV_FIMC0 239 select S5P_DEV_FIMC0
237 select S5P_DEV_FIMC1 240 select S5P_DEV_FIMC1
diff --git a/arch/arm/mach-exynos/clock-exynos4.c b/arch/arm/mach-exynos/clock-exynos4.c
index df54c2a92225..6efd1e5919fd 100644
--- a/arch/arm/mach-exynos/clock-exynos4.c
+++ b/arch/arm/mach-exynos/clock-exynos4.c
@@ -497,25 +497,25 @@ static struct clk exynos4_init_clocks_off[] = {
497 .ctrlbit = (1 << 3), 497 .ctrlbit = (1 << 3),
498 }, { 498 }, {
499 .name = "hsmmc", 499 .name = "hsmmc",
500 .devname = "s3c-sdhci.0", 500 .devname = "exynos4-sdhci.0",
501 .parent = &exynos4_clk_aclk_133.clk, 501 .parent = &exynos4_clk_aclk_133.clk,
502 .enable = exynos4_clk_ip_fsys_ctrl, 502 .enable = exynos4_clk_ip_fsys_ctrl,
503 .ctrlbit = (1 << 5), 503 .ctrlbit = (1 << 5),
504 }, { 504 }, {
505 .name = "hsmmc", 505 .name = "hsmmc",
506 .devname = "s3c-sdhci.1", 506 .devname = "exynos4-sdhci.1",
507 .parent = &exynos4_clk_aclk_133.clk, 507 .parent = &exynos4_clk_aclk_133.clk,
508 .enable = exynos4_clk_ip_fsys_ctrl, 508 .enable = exynos4_clk_ip_fsys_ctrl,
509 .ctrlbit = (1 << 6), 509 .ctrlbit = (1 << 6),
510 }, { 510 }, {
511 .name = "hsmmc", 511 .name = "hsmmc",
512 .devname = "s3c-sdhci.2", 512 .devname = "exynos4-sdhci.2",
513 .parent = &exynos4_clk_aclk_133.clk, 513 .parent = &exynos4_clk_aclk_133.clk,
514 .enable = exynos4_clk_ip_fsys_ctrl, 514 .enable = exynos4_clk_ip_fsys_ctrl,
515 .ctrlbit = (1 << 7), 515 .ctrlbit = (1 << 7),
516 }, { 516 }, {
517 .name = "hsmmc", 517 .name = "hsmmc",
518 .devname = "s3c-sdhci.3", 518 .devname = "exynos4-sdhci.3",
519 .parent = &exynos4_clk_aclk_133.clk, 519 .parent = &exynos4_clk_aclk_133.clk,
520 .enable = exynos4_clk_ip_fsys_ctrl, 520 .enable = exynos4_clk_ip_fsys_ctrl,
521 .ctrlbit = (1 << 8), 521 .ctrlbit = (1 << 8),
@@ -1202,7 +1202,7 @@ static struct clksrc_clk exynos4_clk_sclk_uart3 = {
1202static struct clksrc_clk exynos4_clk_sclk_mmc0 = { 1202static struct clksrc_clk exynos4_clk_sclk_mmc0 = {
1203 .clk = { 1203 .clk = {
1204 .name = "sclk_mmc", 1204 .name = "sclk_mmc",
1205 .devname = "s3c-sdhci.0", 1205 .devname = "exynos4-sdhci.0",
1206 .parent = &exynos4_clk_dout_mmc0.clk, 1206 .parent = &exynos4_clk_dout_mmc0.clk,
1207 .enable = exynos4_clksrc_mask_fsys_ctrl, 1207 .enable = exynos4_clksrc_mask_fsys_ctrl,
1208 .ctrlbit = (1 << 0), 1208 .ctrlbit = (1 << 0),
@@ -1213,7 +1213,7 @@ static struct clksrc_clk exynos4_clk_sclk_mmc0 = {
1213static struct clksrc_clk exynos4_clk_sclk_mmc1 = { 1213static struct clksrc_clk exynos4_clk_sclk_mmc1 = {
1214 .clk = { 1214 .clk = {
1215 .name = "sclk_mmc", 1215 .name = "sclk_mmc",
1216 .devname = "s3c-sdhci.1", 1216 .devname = "exynos4-sdhci.1",
1217 .parent = &exynos4_clk_dout_mmc1.clk, 1217 .parent = &exynos4_clk_dout_mmc1.clk,
1218 .enable = exynos4_clksrc_mask_fsys_ctrl, 1218 .enable = exynos4_clksrc_mask_fsys_ctrl,
1219 .ctrlbit = (1 << 4), 1219 .ctrlbit = (1 << 4),
@@ -1224,7 +1224,7 @@ static struct clksrc_clk exynos4_clk_sclk_mmc1 = {
1224static struct clksrc_clk exynos4_clk_sclk_mmc2 = { 1224static struct clksrc_clk exynos4_clk_sclk_mmc2 = {
1225 .clk = { 1225 .clk = {
1226 .name = "sclk_mmc", 1226 .name = "sclk_mmc",
1227 .devname = "s3c-sdhci.2", 1227 .devname = "exynos4-sdhci.2",
1228 .parent = &exynos4_clk_dout_mmc2.clk, 1228 .parent = &exynos4_clk_dout_mmc2.clk,
1229 .enable = exynos4_clksrc_mask_fsys_ctrl, 1229 .enable = exynos4_clksrc_mask_fsys_ctrl,
1230 .ctrlbit = (1 << 8), 1230 .ctrlbit = (1 << 8),
@@ -1235,7 +1235,7 @@ static struct clksrc_clk exynos4_clk_sclk_mmc2 = {
1235static struct clksrc_clk exynos4_clk_sclk_mmc3 = { 1235static struct clksrc_clk exynos4_clk_sclk_mmc3 = {
1236 .clk = { 1236 .clk = {
1237 .name = "sclk_mmc", 1237 .name = "sclk_mmc",
1238 .devname = "s3c-sdhci.3", 1238 .devname = "exynos4-sdhci.3",
1239 .parent = &exynos4_clk_dout_mmc3.clk, 1239 .parent = &exynos4_clk_dout_mmc3.clk,
1240 .enable = exynos4_clksrc_mask_fsys_ctrl, 1240 .enable = exynos4_clksrc_mask_fsys_ctrl,
1241 .ctrlbit = (1 << 12), 1241 .ctrlbit = (1 << 12),
@@ -1340,10 +1340,10 @@ static struct clk_lookup exynos4_clk_lookup[] = {
1340 CLKDEV_INIT("exynos4210-uart.1", "clk_uart_baud0", &exynos4_clk_sclk_uart1.clk), 1340 CLKDEV_INIT("exynos4210-uart.1", "clk_uart_baud0", &exynos4_clk_sclk_uart1.clk),
1341 CLKDEV_INIT("exynos4210-uart.2", "clk_uart_baud0", &exynos4_clk_sclk_uart2.clk), 1341 CLKDEV_INIT("exynos4210-uart.2", "clk_uart_baud0", &exynos4_clk_sclk_uart2.clk),
1342 CLKDEV_INIT("exynos4210-uart.3", "clk_uart_baud0", &exynos4_clk_sclk_uart3.clk), 1342 CLKDEV_INIT("exynos4210-uart.3", "clk_uart_baud0", &exynos4_clk_sclk_uart3.clk),
1343 CLKDEV_INIT("s3c-sdhci.0", "mmc_busclk.2", &exynos4_clk_sclk_mmc0.clk), 1343 CLKDEV_INIT("exynos4-sdhci.0", "mmc_busclk.2", &exynos4_clk_sclk_mmc0.clk),
1344 CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.2", &exynos4_clk_sclk_mmc1.clk), 1344 CLKDEV_INIT("exynos4-sdhci.1", "mmc_busclk.2", &exynos4_clk_sclk_mmc1.clk),
1345 CLKDEV_INIT("s3c-sdhci.2", "mmc_busclk.2", &exynos4_clk_sclk_mmc2.clk), 1345 CLKDEV_INIT("exynos4-sdhci.2", "mmc_busclk.2", &exynos4_clk_sclk_mmc2.clk),
1346 CLKDEV_INIT("s3c-sdhci.3", "mmc_busclk.2", &exynos4_clk_sclk_mmc3.clk), 1346 CLKDEV_INIT("exynos4-sdhci.3", "mmc_busclk.2", &exynos4_clk_sclk_mmc3.clk),
1347 CLKDEV_INIT("exynos4-fb.0", "lcd", &exynos4_clk_fimd0), 1347 CLKDEV_INIT("exynos4-fb.0", "lcd", &exynos4_clk_fimd0),
1348 CLKDEV_INIT("dma-pl330.0", "apb_pclk", &exynos4_clk_pdma0), 1348 CLKDEV_INIT("dma-pl330.0", "apb_pclk", &exynos4_clk_pdma0),
1349 CLKDEV_INIT("dma-pl330.1", "apb_pclk", &exynos4_clk_pdma1), 1349 CLKDEV_INIT("dma-pl330.1", "apb_pclk", &exynos4_clk_pdma1),
diff --git a/arch/arm/mach-exynos/clock-exynos5.c b/arch/arm/mach-exynos/clock-exynos5.c
index d013982d0f8e..7ac6ff4c46bd 100644
--- a/arch/arm/mach-exynos/clock-exynos5.c
+++ b/arch/arm/mach-exynos/clock-exynos5.c
@@ -455,25 +455,25 @@ static struct clk exynos5_init_clocks_off[] = {
455 .ctrlbit = (1 << 20), 455 .ctrlbit = (1 << 20),
456 }, { 456 }, {
457 .name = "hsmmc", 457 .name = "hsmmc",
458 .devname = "s3c-sdhci.0", 458 .devname = "exynos4-sdhci.0",
459 .parent = &exynos5_clk_aclk_200.clk, 459 .parent = &exynos5_clk_aclk_200.clk,
460 .enable = exynos5_clk_ip_fsys_ctrl, 460 .enable = exynos5_clk_ip_fsys_ctrl,
461 .ctrlbit = (1 << 12), 461 .ctrlbit = (1 << 12),
462 }, { 462 }, {
463 .name = "hsmmc", 463 .name = "hsmmc",
464 .devname = "s3c-sdhci.1", 464 .devname = "exynos4-sdhci.1",
465 .parent = &exynos5_clk_aclk_200.clk, 465 .parent = &exynos5_clk_aclk_200.clk,
466 .enable = exynos5_clk_ip_fsys_ctrl, 466 .enable = exynos5_clk_ip_fsys_ctrl,
467 .ctrlbit = (1 << 13), 467 .ctrlbit = (1 << 13),
468 }, { 468 }, {
469 .name = "hsmmc", 469 .name = "hsmmc",
470 .devname = "s3c-sdhci.2", 470 .devname = "exynos4-sdhci.2",
471 .parent = &exynos5_clk_aclk_200.clk, 471 .parent = &exynos5_clk_aclk_200.clk,
472 .enable = exynos5_clk_ip_fsys_ctrl, 472 .enable = exynos5_clk_ip_fsys_ctrl,
473 .ctrlbit = (1 << 14), 473 .ctrlbit = (1 << 14),
474 }, { 474 }, {
475 .name = "hsmmc", 475 .name = "hsmmc",
476 .devname = "s3c-sdhci.3", 476 .devname = "exynos4-sdhci.3",
477 .parent = &exynos5_clk_aclk_200.clk, 477 .parent = &exynos5_clk_aclk_200.clk,
478 .enable = exynos5_clk_ip_fsys_ctrl, 478 .enable = exynos5_clk_ip_fsys_ctrl,
479 .ctrlbit = (1 << 15), 479 .ctrlbit = (1 << 15),
@@ -678,7 +678,7 @@ static struct clk exynos5_clk_pdma1 = {
678 .name = "dma", 678 .name = "dma",
679 .devname = "dma-pl330.1", 679 .devname = "dma-pl330.1",
680 .enable = exynos5_clk_ip_fsys_ctrl, 680 .enable = exynos5_clk_ip_fsys_ctrl,
681 .ctrlbit = (1 << 1), 681 .ctrlbit = (1 << 2),
682}; 682};
683 683
684static struct clk exynos5_clk_mdma1 = { 684static struct clk exynos5_clk_mdma1 = {
@@ -813,7 +813,7 @@ static struct clksrc_clk exynos5_clk_sclk_uart3 = {
813static struct clksrc_clk exynos5_clk_sclk_mmc0 = { 813static struct clksrc_clk exynos5_clk_sclk_mmc0 = {
814 .clk = { 814 .clk = {
815 .name = "sclk_mmc", 815 .name = "sclk_mmc",
816 .devname = "s3c-sdhci.0", 816 .devname = "exynos4-sdhci.0",
817 .parent = &exynos5_clk_dout_mmc0.clk, 817 .parent = &exynos5_clk_dout_mmc0.clk,
818 .enable = exynos5_clksrc_mask_fsys_ctrl, 818 .enable = exynos5_clksrc_mask_fsys_ctrl,
819 .ctrlbit = (1 << 0), 819 .ctrlbit = (1 << 0),
@@ -824,7 +824,7 @@ static struct clksrc_clk exynos5_clk_sclk_mmc0 = {
824static struct clksrc_clk exynos5_clk_sclk_mmc1 = { 824static struct clksrc_clk exynos5_clk_sclk_mmc1 = {
825 .clk = { 825 .clk = {
826 .name = "sclk_mmc", 826 .name = "sclk_mmc",
827 .devname = "s3c-sdhci.1", 827 .devname = "exynos4-sdhci.1",
828 .parent = &exynos5_clk_dout_mmc1.clk, 828 .parent = &exynos5_clk_dout_mmc1.clk,
829 .enable = exynos5_clksrc_mask_fsys_ctrl, 829 .enable = exynos5_clksrc_mask_fsys_ctrl,
830 .ctrlbit = (1 << 4), 830 .ctrlbit = (1 << 4),
@@ -835,7 +835,7 @@ static struct clksrc_clk exynos5_clk_sclk_mmc1 = {
835static struct clksrc_clk exynos5_clk_sclk_mmc2 = { 835static struct clksrc_clk exynos5_clk_sclk_mmc2 = {
836 .clk = { 836 .clk = {
837 .name = "sclk_mmc", 837 .name = "sclk_mmc",
838 .devname = "s3c-sdhci.2", 838 .devname = "exynos4-sdhci.2",
839 .parent = &exynos5_clk_dout_mmc2.clk, 839 .parent = &exynos5_clk_dout_mmc2.clk,
840 .enable = exynos5_clksrc_mask_fsys_ctrl, 840 .enable = exynos5_clksrc_mask_fsys_ctrl,
841 .ctrlbit = (1 << 8), 841 .ctrlbit = (1 << 8),
@@ -846,7 +846,7 @@ static struct clksrc_clk exynos5_clk_sclk_mmc2 = {
846static struct clksrc_clk exynos5_clk_sclk_mmc3 = { 846static struct clksrc_clk exynos5_clk_sclk_mmc3 = {
847 .clk = { 847 .clk = {
848 .name = "sclk_mmc", 848 .name = "sclk_mmc",
849 .devname = "s3c-sdhci.3", 849 .devname = "exynos4-sdhci.3",
850 .parent = &exynos5_clk_dout_mmc3.clk, 850 .parent = &exynos5_clk_dout_mmc3.clk,
851 .enable = exynos5_clksrc_mask_fsys_ctrl, 851 .enable = exynos5_clksrc_mask_fsys_ctrl,
852 .ctrlbit = (1 << 12), 852 .ctrlbit = (1 << 12),
@@ -990,10 +990,10 @@ static struct clk_lookup exynos5_clk_lookup[] = {
990 CLKDEV_INIT("exynos4210-uart.1", "clk_uart_baud0", &exynos5_clk_sclk_uart1.clk), 990 CLKDEV_INIT("exynos4210-uart.1", "clk_uart_baud0", &exynos5_clk_sclk_uart1.clk),
991 CLKDEV_INIT("exynos4210-uart.2", "clk_uart_baud0", &exynos5_clk_sclk_uart2.clk), 991 CLKDEV_INIT("exynos4210-uart.2", "clk_uart_baud0", &exynos5_clk_sclk_uart2.clk),
992 CLKDEV_INIT("exynos4210-uart.3", "clk_uart_baud0", &exynos5_clk_sclk_uart3.clk), 992 CLKDEV_INIT("exynos4210-uart.3", "clk_uart_baud0", &exynos5_clk_sclk_uart3.clk),
993 CLKDEV_INIT("s3c-sdhci.0", "mmc_busclk.2", &exynos5_clk_sclk_mmc0.clk), 993 CLKDEV_INIT("exynos4-sdhci.0", "mmc_busclk.2", &exynos5_clk_sclk_mmc0.clk),
994 CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.2", &exynos5_clk_sclk_mmc1.clk), 994 CLKDEV_INIT("exynos4-sdhci.1", "mmc_busclk.2", &exynos5_clk_sclk_mmc1.clk),
995 CLKDEV_INIT("s3c-sdhci.2", "mmc_busclk.2", &exynos5_clk_sclk_mmc2.clk), 995 CLKDEV_INIT("exynos4-sdhci.2", "mmc_busclk.2", &exynos5_clk_sclk_mmc2.clk),
996 CLKDEV_INIT("s3c-sdhci.3", "mmc_busclk.2", &exynos5_clk_sclk_mmc3.clk), 996 CLKDEV_INIT("exynos4-sdhci.3", "mmc_busclk.2", &exynos5_clk_sclk_mmc3.clk),
997 CLKDEV_INIT("dma-pl330.0", "apb_pclk", &exynos5_clk_pdma0), 997 CLKDEV_INIT("dma-pl330.0", "apb_pclk", &exynos5_clk_pdma0),
998 CLKDEV_INIT("dma-pl330.1", "apb_pclk", &exynos5_clk_pdma1), 998 CLKDEV_INIT("dma-pl330.1", "apb_pclk", &exynos5_clk_pdma1),
999 CLKDEV_INIT("dma-pl330.2", "apb_pclk", &exynos5_clk_mdma1), 999 CLKDEV_INIT("dma-pl330.2", "apb_pclk", &exynos5_clk_mdma1),
diff --git a/arch/arm/mach-exynos/common.c b/arch/arm/mach-exynos/common.c
index 8614aab47cc0..5ccd6e80a607 100644
--- a/arch/arm/mach-exynos/common.c
+++ b/arch/arm/mach-exynos/common.c
@@ -326,6 +326,11 @@ static void __init exynos4_map_io(void)
326 s3c_fimc_setname(2, "exynos4-fimc"); 326 s3c_fimc_setname(2, "exynos4-fimc");
327 s3c_fimc_setname(3, "exynos4-fimc"); 327 s3c_fimc_setname(3, "exynos4-fimc");
328 328
329 s3c_sdhci_setname(0, "exynos4-sdhci");
330 s3c_sdhci_setname(1, "exynos4-sdhci");
331 s3c_sdhci_setname(2, "exynos4-sdhci");
332 s3c_sdhci_setname(3, "exynos4-sdhci");
333
329 /* The I2C bus controllers are directly compatible with s3c2440 */ 334 /* The I2C bus controllers are directly compatible with s3c2440 */
330 s3c_i2c0_setname("s3c2440-i2c"); 335 s3c_i2c0_setname("s3c2440-i2c");
331 s3c_i2c1_setname("s3c2440-i2c"); 336 s3c_i2c1_setname("s3c2440-i2c");
@@ -344,6 +349,11 @@ static void __init exynos5_map_io(void)
344 s3c_device_i2c0.resource[1].start = EXYNOS5_IRQ_IIC; 349 s3c_device_i2c0.resource[1].start = EXYNOS5_IRQ_IIC;
345 s3c_device_i2c0.resource[1].end = EXYNOS5_IRQ_IIC; 350 s3c_device_i2c0.resource[1].end = EXYNOS5_IRQ_IIC;
346 351
352 s3c_sdhci_setname(0, "exynos4-sdhci");
353 s3c_sdhci_setname(1, "exynos4-sdhci");
354 s3c_sdhci_setname(2, "exynos4-sdhci");
355 s3c_sdhci_setname(3, "exynos4-sdhci");
356
347 /* The I2C bus controllers are directly compatible with s3c2440 */ 357 /* The I2C bus controllers are directly compatible with s3c2440 */
348 s3c_i2c0_setname("s3c2440-i2c"); 358 s3c_i2c0_setname("s3c2440-i2c");
349 s3c_i2c1_setname("s3c2440-i2c"); 359 s3c_i2c1_setname("s3c2440-i2c");
@@ -537,7 +547,9 @@ void __init exynos5_init_irq(void)
537{ 547{
538 int irq; 548 int irq;
539 549
540 gic_init(0, IRQ_PPI(0), S5P_VA_GIC_DIST, S5P_VA_GIC_CPU); 550#ifdef CONFIG_OF
551 of_irq_init(exynos4_dt_irq_match);
552#endif
541 553
542 for (irq = 0; irq < EXYNOS5_MAX_COMBINER_NR; irq++) { 554 for (irq = 0; irq < EXYNOS5_MAX_COMBINER_NR; irq++) {
543 combiner_init(irq, (void __iomem *)S5P_VA_COMBINER(irq), 555 combiner_init(irq, (void __iomem *)S5P_VA_COMBINER(irq),
diff --git a/arch/arm/mach-exynos/dev-dwmci.c b/arch/arm/mach-exynos/dev-dwmci.c
index b025db4bf602..79035018fb74 100644
--- a/arch/arm/mach-exynos/dev-dwmci.c
+++ b/arch/arm/mach-exynos/dev-dwmci.c
@@ -16,6 +16,7 @@
16#include <linux/dma-mapping.h> 16#include <linux/dma-mapping.h>
17#include <linux/platform_device.h> 17#include <linux/platform_device.h>
18#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19#include <linux/ioport.h>
19#include <linux/mmc/dw_mmc.h> 20#include <linux/mmc/dw_mmc.h>
20 21
21#include <plat/devs.h> 22#include <plat/devs.h>
@@ -33,16 +34,8 @@ static int exynos4_dwmci_init(u32 slot_id, irq_handler_t handler, void *data)
33} 34}
34 35
35static struct resource exynos4_dwmci_resource[] = { 36static struct resource exynos4_dwmci_resource[] = {
36 [0] = { 37 [0] = DEFINE_RES_MEM(EXYNOS4_PA_DWMCI, SZ_4K),
37 .start = EXYNOS4_PA_DWMCI, 38 [1] = DEFINE_RES_IRQ(EXYNOS4_IRQ_DWMCI),
38 .end = EXYNOS4_PA_DWMCI + SZ_4K - 1,
39 .flags = IORESOURCE_MEM,
40 },
41 [1] = {
42 .start = IRQ_DWMCI,
43 .end = IRQ_DWMCI,
44 .flags = IORESOURCE_IRQ,
45 }
46}; 39};
47 40
48static struct dw_mci_board exynos4_dwci_pdata = { 41static struct dw_mci_board exynos4_dwci_pdata = {
diff --git a/arch/arm/mach-exynos/mach-nuri.c b/arch/arm/mach-exynos/mach-nuri.c
index b4f1f902ce6d..ed90aef404c3 100644
--- a/arch/arm/mach-exynos/mach-nuri.c
+++ b/arch/arm/mach-exynos/mach-nuri.c
@@ -112,6 +112,7 @@ static struct s3c_sdhci_platdata nuri_hsmmc0_data __initdata = {
112 .host_caps = (MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA | 112 .host_caps = (MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA |
113 MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | 113 MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
114 MMC_CAP_ERASE), 114 MMC_CAP_ERASE),
115 .host_caps2 = MMC_CAP2_BROKEN_VOLTAGE,
115 .cd_type = S3C_SDHCI_CD_PERMANENT, 116 .cd_type = S3C_SDHCI_CD_PERMANENT,
116 .clk_type = S3C_SDHCI_CLK_DIV_EXTERNAL, 117 .clk_type = S3C_SDHCI_CLK_DIV_EXTERNAL,
117}; 118};
diff --git a/arch/arm/mach-exynos/mach-universal_c210.c b/arch/arm/mach-exynos/mach-universal_c210.c
index 7ebf79c2ab34..a34036eb8ba2 100644
--- a/arch/arm/mach-exynos/mach-universal_c210.c
+++ b/arch/arm/mach-exynos/mach-universal_c210.c
@@ -40,6 +40,7 @@
40#include <plat/pd.h> 40#include <plat/pd.h>
41#include <plat/regs-fb-v4.h> 41#include <plat/regs-fb-v4.h>
42#include <plat/fimc-core.h> 42#include <plat/fimc-core.h>
43#include <plat/s5p-time.h>
43#include <plat/camport.h> 44#include <plat/camport.h>
44#include <plat/mipi_csis.h> 45#include <plat/mipi_csis.h>
45 46
@@ -747,6 +748,7 @@ static struct s3c_sdhci_platdata universal_hsmmc0_data __initdata = {
747 .max_width = 8, 748 .max_width = 8,
748 .host_caps = (MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA | 749 .host_caps = (MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA |
749 MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED), 750 MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED),
751 .host_caps2 = MMC_CAP2_BROKEN_VOLTAGE,
750 .cd_type = S3C_SDHCI_CD_PERMANENT, 752 .cd_type = S3C_SDHCI_CD_PERMANENT,
751 .clk_type = S3C_SDHCI_CLK_DIV_EXTERNAL, 753 .clk_type = S3C_SDHCI_CLK_DIV_EXTERNAL,
752}; 754};
@@ -1062,6 +1064,7 @@ static void __init universal_map_io(void)
1062 exynos_init_io(NULL, 0); 1064 exynos_init_io(NULL, 0);
1063 s3c24xx_init_clocks(24000000); 1065 s3c24xx_init_clocks(24000000);
1064 s3c24xx_init_uarts(universal_uartcfgs, ARRAY_SIZE(universal_uartcfgs)); 1066 s3c24xx_init_uarts(universal_uartcfgs, ARRAY_SIZE(universal_uartcfgs));
1067 s5p_set_timer_source(S5P_PWM2, S5P_PWM4);
1065} 1068}
1066 1069
1067static void s5p_tv_setup(void) 1070static void s5p_tv_setup(void)
@@ -1112,7 +1115,7 @@ MACHINE_START(UNIVERSAL_C210, "UNIVERSAL_C210")
1112 .map_io = universal_map_io, 1115 .map_io = universal_map_io,
1113 .handle_irq = gic_handle_irq, 1116 .handle_irq = gic_handle_irq,
1114 .init_machine = universal_machine_init, 1117 .init_machine = universal_machine_init,
1115 .timer = &exynos4_timer, 1118 .timer = &s5p_timer,
1116 .reserve = &universal_reserve, 1119 .reserve = &universal_reserve,
1117 .restart = exynos4_restart, 1120 .restart = exynos4_restart,
1118MACHINE_END 1121MACHINE_END
diff --git a/arch/arm/mach-imx/imx27-dt.c b/arch/arm/mach-imx/imx27-dt.c
index 861ceb8232d6..ed38d03c61f2 100644
--- a/arch/arm/mach-imx/imx27-dt.c
+++ b/arch/arm/mach-imx/imx27-dt.c
@@ -35,7 +35,7 @@ static const struct of_dev_auxdata imx27_auxdata_lookup[] __initconst = {
35static int __init imx27_avic_add_irq_domain(struct device_node *np, 35static int __init imx27_avic_add_irq_domain(struct device_node *np,
36 struct device_node *interrupt_parent) 36 struct device_node *interrupt_parent)
37{ 37{
38 irq_domain_add_simple(np, 0); 38 irq_domain_add_legacy(np, 64, 0, 0, &irq_domain_simple_ops, NULL);
39 return 0; 39 return 0;
40} 40}
41 41
@@ -44,7 +44,9 @@ static int __init imx27_gpio_add_irq_domain(struct device_node *np,
44{ 44{
45 static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS; 45 static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS;
46 46
47 irq_domain_add_simple(np, gpio_irq_base); 47 gpio_irq_base -= 32;
48 irq_domain_add_legacy(np, 32, gpio_irq_base, 0, &irq_domain_simple_ops,
49 NULL);
48 50
49 return 0; 51 return 0;
50} 52}
diff --git a/arch/arm/mach-imx/mm-imx5.c b/arch/arm/mach-imx/mm-imx5.c
index 05250aed61fb..e10f3914fcfe 100644
--- a/arch/arm/mach-imx/mm-imx5.c
+++ b/arch/arm/mach-imx/mm-imx5.c
@@ -35,7 +35,7 @@ static void imx5_idle(void)
35 } 35 }
36 clk_enable(gpc_dvfs_clk); 36 clk_enable(gpc_dvfs_clk);
37 mx5_cpu_lp_set(WAIT_UNCLOCKED_POWER_OFF); 37 mx5_cpu_lp_set(WAIT_UNCLOCKED_POWER_OFF);
38 if (tzic_enable_wake() != 0) 38 if (!tzic_enable_wake())
39 cpu_do_idle(); 39 cpu_do_idle();
40 clk_disable(gpc_dvfs_clk); 40 clk_disable(gpc_dvfs_clk);
41} 41}
diff --git a/arch/arm/mach-kirkwood/board-dt.c b/arch/arm/mach-kirkwood/board-dt.c
index 1c672d9e6656..f7fe1b9f3170 100644
--- a/arch/arm/mach-kirkwood/board-dt.c
+++ b/arch/arm/mach-kirkwood/board-dt.c
@@ -14,6 +14,7 @@
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/of.h> 15#include <linux/of.h>
16#include <linux/of_platform.h> 16#include <linux/of_platform.h>
17#include <linux/kexec.h>
17#include <asm/mach/arch.h> 18#include <asm/mach/arch.h>
18#include <asm/mach/map.h> 19#include <asm/mach/map.h>
19#include <mach/bridge-regs.h> 20#include <mach/bridge-regs.h>
diff --git a/arch/arm/mach-msm/board-msm8x60.c b/arch/arm/mach-msm/board-msm8x60.c
index 962e71169750..fb3496a52ef4 100644
--- a/arch/arm/mach-msm/board-msm8x60.c
+++ b/arch/arm/mach-msm/board-msm8x60.c
@@ -17,6 +17,7 @@
17#include <linux/irqdomain.h> 17#include <linux/irqdomain.h>
18#include <linux/of.h> 18#include <linux/of.h>
19#include <linux/of_address.h> 19#include <linux/of_address.h>
20#include <linux/of_irq.h>
20#include <linux/of_platform.h> 21#include <linux/of_platform.h>
21#include <linux/memblock.h> 22#include <linux/memblock.h>
22 23
@@ -49,10 +50,22 @@ static void __init msm8x60_map_io(void)
49 msm_map_msm8x60_io(); 50 msm_map_msm8x60_io();
50} 51}
51 52
53#ifdef CONFIG_OF
54static struct of_device_id msm_dt_gic_match[] __initdata = {
55 { .compatible = "qcom,msm-8660-qgic", .data = gic_of_init },
56 {}
57};
58#endif
59
52static void __init msm8x60_init_irq(void) 60static void __init msm8x60_init_irq(void)
53{ 61{
54 gic_init(0, GIC_PPI_START, MSM_QGIC_DIST_BASE, 62 if (!of_have_populated_dt())
55 (void *)MSM_QGIC_CPU_BASE); 63 gic_init(0, GIC_PPI_START, MSM_QGIC_DIST_BASE,
64 (void *)MSM_QGIC_CPU_BASE);
65#ifdef CONFIG_OF
66 else
67 of_irq_init(msm_dt_gic_match);
68#endif
56 69
57 /* Edge trigger PPIs except AVS_SVICINT and AVS_SVICINTSWDONE */ 70 /* Edge trigger PPIs except AVS_SVICINT and AVS_SVICINTSWDONE */
58 writel(0xFFFFD7FF, MSM_QGIC_DIST_BASE + GIC_DIST_CONFIG + 4); 71 writel(0xFFFFD7FF, MSM_QGIC_DIST_BASE + GIC_DIST_CONFIG + 4);
@@ -73,16 +86,8 @@ static struct of_dev_auxdata msm_auxdata_lookup[] __initdata = {
73 {} 86 {}
74}; 87};
75 88
76static struct of_device_id msm_dt_gic_match[] __initdata = {
77 { .compatible = "qcom,msm-8660-qgic", },
78 {}
79};
80
81static void __init msm8x60_dt_init(void) 89static void __init msm8x60_dt_init(void)
82{ 90{
83 irq_domain_generate_simple(msm_dt_gic_match, MSM8X60_QGIC_DIST_PHYS,
84 GIC_SPI_START);
85
86 if (of_machine_is_compatible("qcom,msm8660-surf")) { 91 if (of_machine_is_compatible("qcom,msm8660-surf")) {
87 printk(KERN_INFO "Init surf UART registers\n"); 92 printk(KERN_INFO "Init surf UART registers\n");
88 msm8x60_init_uart12dm(); 93 msm8x60_init_uart12dm();
diff --git a/arch/arm/mach-omap1/ams-delta-fiq.c b/arch/arm/mach-omap1/ams-delta-fiq.c
index fcce7ff37630..cfd98b186fcc 100644
--- a/arch/arm/mach-omap1/ams-delta-fiq.c
+++ b/arch/arm/mach-omap1/ams-delta-fiq.c
@@ -48,7 +48,7 @@ static irqreturn_t deferred_fiq(int irq, void *dev_id)
48 struct irq_chip *irq_chip = NULL; 48 struct irq_chip *irq_chip = NULL;
49 int gpio, irq_num, fiq_count; 49 int gpio, irq_num, fiq_count;
50 50
51 irq_desc = irq_to_desc(IH_GPIO_BASE); 51 irq_desc = irq_to_desc(gpio_to_irq(AMS_DELTA_GPIO_PIN_KEYBRD_CLK));
52 if (irq_desc) 52 if (irq_desc)
53 irq_chip = irq_desc->irq_data.chip; 53 irq_chip = irq_desc->irq_data.chip;
54 54
diff --git a/arch/arm/mach-omap1/mux.c b/arch/arm/mach-omap1/mux.c
index 087dba0df47e..e9cc52d4cb28 100644
--- a/arch/arm/mach-omap1/mux.c
+++ b/arch/arm/mach-omap1/mux.c
@@ -27,6 +27,7 @@
27#include <linux/io.h> 27#include <linux/io.h>
28#include <linux/spinlock.h> 28#include <linux/spinlock.h>
29 29
30#include <mach/hardware.h>
30 31
31#include <plat/mux.h> 32#include <plat/mux.h>
32 33
diff --git a/arch/arm/mach-omap1/timer.c b/arch/arm/mach-omap1/timer.c
index 6e90665a7c47..fb202af01d0d 100644
--- a/arch/arm/mach-omap1/timer.c
+++ b/arch/arm/mach-omap1/timer.c
@@ -47,9 +47,9 @@ static int omap1_dm_timer_set_src(struct platform_device *pdev,
47 int n = (pdev->id - 1) << 1; 47 int n = (pdev->id - 1) << 1;
48 u32 l; 48 u32 l;
49 49
50 l = __raw_readl(MOD_CONF_CTRL_1) & ~(0x03 << n); 50 l = omap_readl(MOD_CONF_CTRL_1) & ~(0x03 << n);
51 l |= source << n; 51 l |= source << n;
52 __raw_writel(l, MOD_CONF_CTRL_1); 52 omap_writel(l, MOD_CONF_CTRL_1);
53 53
54 return 0; 54 return 0;
55} 55}
diff --git a/arch/arm/mach-omap2/board-4430sdp.c b/arch/arm/mach-omap2/board-4430sdp.c
index a39fc4bbd2b8..130ab00c09a2 100644
--- a/arch/arm/mach-omap2/board-4430sdp.c
+++ b/arch/arm/mach-omap2/board-4430sdp.c
@@ -20,6 +20,7 @@
20#include <linux/usb/otg.h> 20#include <linux/usb/otg.h>
21#include <linux/spi/spi.h> 21#include <linux/spi/spi.h>
22#include <linux/i2c/twl.h> 22#include <linux/i2c/twl.h>
23#include <linux/mfd/twl6040.h>
23#include <linux/gpio_keys.h> 24#include <linux/gpio_keys.h>
24#include <linux/regulator/machine.h> 25#include <linux/regulator/machine.h>
25#include <linux/regulator/fixed.h> 26#include <linux/regulator/fixed.h>
@@ -560,7 +561,7 @@ static struct regulator_init_data sdp4430_vusim = {
560 }, 561 },
561}; 562};
562 563
563static struct twl4030_codec_data twl6040_codec = { 564static struct twl6040_codec_data twl6040_codec = {
564 /* single-step ramp for headset and handsfree */ 565 /* single-step ramp for headset and handsfree */
565 .hs_left_step = 0x0f, 566 .hs_left_step = 0x0f,
566 .hs_right_step = 0x0f, 567 .hs_right_step = 0x0f,
@@ -568,7 +569,7 @@ static struct twl4030_codec_data twl6040_codec = {
568 .hf_right_step = 0x1d, 569 .hf_right_step = 0x1d,
569}; 570};
570 571
571static struct twl4030_vibra_data twl6040_vibra = { 572static struct twl6040_vibra_data twl6040_vibra = {
572 .vibldrv_res = 8, 573 .vibldrv_res = 8,
573 .vibrdrv_res = 3, 574 .vibrdrv_res = 3,
574 .viblmotor_res = 10, 575 .viblmotor_res = 10,
@@ -577,16 +578,14 @@ static struct twl4030_vibra_data twl6040_vibra = {
577 .vddvibr_uV = 0, /* fixed volt supply - VBAT */ 578 .vddvibr_uV = 0, /* fixed volt supply - VBAT */
578}; 579};
579 580
580static struct twl4030_audio_data twl6040_audio = { 581static struct twl6040_platform_data twl6040_data = {
581 .codec = &twl6040_codec, 582 .codec = &twl6040_codec,
582 .vibra = &twl6040_vibra, 583 .vibra = &twl6040_vibra,
583 .audpwron_gpio = 127, 584 .audpwron_gpio = 127,
584 .naudint_irq = OMAP44XX_IRQ_SYS_2N,
585 .irq_base = TWL6040_CODEC_IRQ_BASE, 585 .irq_base = TWL6040_CODEC_IRQ_BASE,
586}; 586};
587 587
588static struct twl4030_platform_data sdp4430_twldata = { 588static struct twl4030_platform_data sdp4430_twldata = {
589 .audio = &twl6040_audio,
590 /* Regulators */ 589 /* Regulators */
591 .vusim = &sdp4430_vusim, 590 .vusim = &sdp4430_vusim,
592 .vaux1 = &sdp4430_vaux1, 591 .vaux1 = &sdp4430_vaux1,
@@ -617,7 +616,8 @@ static int __init omap4_i2c_init(void)
617 TWL_COMMON_REGULATOR_VCXIO | 616 TWL_COMMON_REGULATOR_VCXIO |
618 TWL_COMMON_REGULATOR_VUSB | 617 TWL_COMMON_REGULATOR_VUSB |
619 TWL_COMMON_REGULATOR_CLK32KG); 618 TWL_COMMON_REGULATOR_CLK32KG);
620 omap4_pmic_init("twl6030", &sdp4430_twldata); 619 omap4_pmic_init("twl6030", &sdp4430_twldata,
620 &twl6040_data, OMAP44XX_IRQ_SYS_2N);
621 omap_register_i2c_bus(2, 400, NULL, 0); 621 omap_register_i2c_bus(2, 400, NULL, 0);
622 omap_register_i2c_bus(3, 400, sdp4430_i2c_3_boardinfo, 622 omap_register_i2c_bus(3, 400, sdp4430_i2c_3_boardinfo,
623 ARRAY_SIZE(sdp4430_i2c_3_boardinfo)); 623 ARRAY_SIZE(sdp4430_i2c_3_boardinfo));
diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
index 74e1687b5170..098d183a0086 100644
--- a/arch/arm/mach-omap2/board-generic.c
+++ b/arch/arm/mach-omap2/board-generic.c
@@ -137,7 +137,7 @@ static struct twl4030_platform_data sdp4430_twldata = {
137 137
138static void __init omap4_i2c_init(void) 138static void __init omap4_i2c_init(void)
139{ 139{
140 omap4_pmic_init("twl6030", &sdp4430_twldata); 140 omap4_pmic_init("twl6030", &sdp4430_twldata, NULL, 0);
141} 141}
142 142
143static void __init omap4_init(void) 143static void __init omap4_init(void)
diff --git a/arch/arm/mach-omap2/board-igep0020.c b/arch/arm/mach-omap2/board-igep0020.c
index 930c0d380435..740cee9369ba 100644
--- a/arch/arm/mach-omap2/board-igep0020.c
+++ b/arch/arm/mach-omap2/board-igep0020.c
@@ -641,7 +641,7 @@ static struct regulator_consumer_supply dummy_supplies[] = {
641 641
642static void __init igep_init(void) 642static void __init igep_init(void)
643{ 643{
644 regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies)); 644 regulator_register_fixed(1, dummy_supplies, ARRAY_SIZE(dummy_supplies));
645 omap3_mux_init(board_mux, OMAP_PACKAGE_CBB); 645 omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
646 646
647 /* Get IGEP2 hardware revision */ 647 /* Get IGEP2 hardware revision */
diff --git a/arch/arm/mach-omap2/board-omap4panda.c b/arch/arm/mach-omap2/board-omap4panda.c
index d8c0e89f0126..1b782ba53433 100644
--- a/arch/arm/mach-omap2/board-omap4panda.c
+++ b/arch/arm/mach-omap2/board-omap4panda.c
@@ -25,6 +25,7 @@
25#include <linux/gpio.h> 25#include <linux/gpio.h>
26#include <linux/usb/otg.h> 26#include <linux/usb/otg.h>
27#include <linux/i2c/twl.h> 27#include <linux/i2c/twl.h>
28#include <linux/mfd/twl6040.h>
28#include <linux/regulator/machine.h> 29#include <linux/regulator/machine.h>
29#include <linux/regulator/fixed.h> 30#include <linux/regulator/fixed.h>
30#include <linux/wl12xx.h> 31#include <linux/wl12xx.h>
@@ -284,7 +285,7 @@ static int __init omap4_twl6030_hsmmc_init(struct omap2_hsmmc_info *controllers)
284 return 0; 285 return 0;
285} 286}
286 287
287static struct twl4030_codec_data twl6040_codec = { 288static struct twl6040_codec_data twl6040_codec = {
288 /* single-step ramp for headset and handsfree */ 289 /* single-step ramp for headset and handsfree */
289 .hs_left_step = 0x0f, 290 .hs_left_step = 0x0f,
290 .hs_right_step = 0x0f, 291 .hs_right_step = 0x0f,
@@ -292,17 +293,14 @@ static struct twl4030_codec_data twl6040_codec = {
292 .hf_right_step = 0x1d, 293 .hf_right_step = 0x1d,
293}; 294};
294 295
295static struct twl4030_audio_data twl6040_audio = { 296static struct twl6040_platform_data twl6040_data = {
296 .codec = &twl6040_codec, 297 .codec = &twl6040_codec,
297 .audpwron_gpio = 127, 298 .audpwron_gpio = 127,
298 .naudint_irq = OMAP44XX_IRQ_SYS_2N,
299 .irq_base = TWL6040_CODEC_IRQ_BASE, 299 .irq_base = TWL6040_CODEC_IRQ_BASE,
300}; 300};
301 301
302/* Panda board uses the common PMIC configuration */ 302/* Panda board uses the common PMIC configuration */
303static struct twl4030_platform_data omap4_panda_twldata = { 303static struct twl4030_platform_data omap4_panda_twldata;
304 .audio = &twl6040_audio,
305};
306 304
307/* 305/*
308 * Display monitor features are burnt in their EEPROM as EDID data. The EEPROM 306 * Display monitor features are burnt in their EEPROM as EDID data. The EEPROM
@@ -326,7 +324,8 @@ static int __init omap4_panda_i2c_init(void)
326 TWL_COMMON_REGULATOR_VCXIO | 324 TWL_COMMON_REGULATOR_VCXIO |
327 TWL_COMMON_REGULATOR_VUSB | 325 TWL_COMMON_REGULATOR_VUSB |
328 TWL_COMMON_REGULATOR_CLK32KG); 326 TWL_COMMON_REGULATOR_CLK32KG);
329 omap4_pmic_init("twl6030", &omap4_panda_twldata); 327 omap4_pmic_init("twl6030", &omap4_panda_twldata,
328 &twl6040_data, OMAP44XX_IRQ_SYS_2N);
330 omap_register_i2c_bus(2, 400, NULL, 0); 329 omap_register_i2c_bus(2, 400, NULL, 0);
331 /* 330 /*
332 * Bus 3 is attached to the DVI port where devices like the pico DLP 331 * Bus 3 is attached to the DVI port where devices like the pico DLP
diff --git a/arch/arm/mach-omap2/include/mach/ctrl_module_pad_core_44xx.h b/arch/arm/mach-omap2/include/mach/ctrl_module_pad_core_44xx.h
index 1e2d3322f33e..c88420de1151 100644
--- a/arch/arm/mach-omap2/include/mach/ctrl_module_pad_core_44xx.h
+++ b/arch/arm/mach-omap2/include/mach/ctrl_module_pad_core_44xx.h
@@ -941,10 +941,10 @@
941#define OMAP4_DSI2_LANEENABLE_MASK (0x7 << 29) 941#define OMAP4_DSI2_LANEENABLE_MASK (0x7 << 29)
942#define OMAP4_DSI1_LANEENABLE_SHIFT 24 942#define OMAP4_DSI1_LANEENABLE_SHIFT 24
943#define OMAP4_DSI1_LANEENABLE_MASK (0x1f << 24) 943#define OMAP4_DSI1_LANEENABLE_MASK (0x1f << 24)
944#define OMAP4_DSI2_PIPD_SHIFT 19 944#define OMAP4_DSI1_PIPD_SHIFT 19
945#define OMAP4_DSI2_PIPD_MASK (0x1f << 19) 945#define OMAP4_DSI1_PIPD_MASK (0x1f << 19)
946#define OMAP4_DSI1_PIPD_SHIFT 14 946#define OMAP4_DSI2_PIPD_SHIFT 14
947#define OMAP4_DSI1_PIPD_MASK (0x1f << 14) 947#define OMAP4_DSI2_PIPD_MASK (0x1f << 14)
948 948
949/* CONTROL_MCBSPLP */ 949/* CONTROL_MCBSPLP */
950#define OMAP4_ALBCTRLRX_FSX_SHIFT 31 950#define OMAP4_ALBCTRLRX_FSX_SHIFT 31
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 2c27fdb61e66..7144ae651d3d 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -1422,6 +1422,9 @@ static int _ocp_softreset(struct omap_hwmod *oh)
1422 goto dis_opt_clks; 1422 goto dis_opt_clks;
1423 _write_sysconfig(v, oh); 1423 _write_sysconfig(v, oh);
1424 1424
1425 if (oh->class->sysc->srst_udelay)
1426 udelay(oh->class->sysc->srst_udelay);
1427
1425 if (oh->class->sysc->sysc_flags & SYSS_HAS_RESET_STATUS) 1428 if (oh->class->sysc->sysc_flags & SYSS_HAS_RESET_STATUS)
1426 omap_test_timeout((omap_hwmod_read(oh, 1429 omap_test_timeout((omap_hwmod_read(oh,
1427 oh->class->sysc->syss_offs) 1430 oh->class->sysc->syss_offs)
@@ -1903,10 +1906,20 @@ void omap_hwmod_write(u32 v, struct omap_hwmod *oh, u16 reg_offs)
1903 */ 1906 */
1904int omap_hwmod_softreset(struct omap_hwmod *oh) 1907int omap_hwmod_softreset(struct omap_hwmod *oh)
1905{ 1908{
1906 if (!oh) 1909 u32 v;
1910 int ret;
1911
1912 if (!oh || !(oh->_sysc_cache))
1907 return -EINVAL; 1913 return -EINVAL;
1908 1914
1909 return _ocp_softreset(oh); 1915 v = oh->_sysc_cache;
1916 ret = _set_softreset(oh, &v);
1917 if (ret)
1918 goto error;
1919 _write_sysconfig(v, oh);
1920
1921error:
1922 return ret;
1910} 1923}
1911 1924
1912/** 1925/**
diff --git a/arch/arm/mach-omap2/omap_hwmod_2420_data.c b/arch/arm/mach-omap2/omap_hwmod_2420_data.c
index a5409ce3f323..a6bde34e443a 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2420_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2420_data.c
@@ -1000,7 +1000,6 @@ static struct omap_hwmod_ocp_if omap2420_l4_core__dss_venc = {
1000 .flags = OMAP_FIREWALL_L4, 1000 .flags = OMAP_FIREWALL_L4,
1001 } 1001 }
1002 }, 1002 },
1003 .flags = OCPIF_SWSUP_IDLE,
1004 .user = OCP_USER_MPU | OCP_USER_SDMA, 1003 .user = OCP_USER_MPU | OCP_USER_SDMA,
1005}; 1004};
1006 1005
diff --git a/arch/arm/mach-omap2/omap_hwmod_2430_data.c b/arch/arm/mach-omap2/omap_hwmod_2430_data.c
index c4f56cb60d7d..04a3885f4475 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2430_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2430_data.c
@@ -1049,7 +1049,6 @@ static struct omap_hwmod_ocp_if omap2430_l4_core__dss_venc = {
1049 .slave = &omap2430_dss_venc_hwmod, 1049 .slave = &omap2430_dss_venc_hwmod,
1050 .clk = "dss_ick", 1050 .clk = "dss_ick",
1051 .addr = omap2_dss_venc_addrs, 1051 .addr = omap2_dss_venc_addrs,
1052 .flags = OCPIF_SWSUP_IDLE,
1053 .user = OCP_USER_MPU | OCP_USER_SDMA, 1052 .user = OCP_USER_MPU | OCP_USER_SDMA,
1054}; 1053};
1055 1054
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index 34b9766d1d23..db86ce90c69f 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -1676,7 +1676,6 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_venc = {
1676 .flags = OMAP_FIREWALL_L4, 1676 .flags = OMAP_FIREWALL_L4,
1677 } 1677 }
1678 }, 1678 },
1679 .flags = OCPIF_SWSUP_IDLE,
1680 .user = OCP_USER_MPU | OCP_USER_SDMA, 1679 .user = OCP_USER_MPU | OCP_USER_SDMA,
1681}; 1680};
1682 1681
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index cc9bd106a854..6abc75753e42 100644
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -2594,6 +2594,15 @@ static struct omap_hwmod omap44xx_ipu_hwmod = {
2594static struct omap_hwmod_class_sysconfig omap44xx_iss_sysc = { 2594static struct omap_hwmod_class_sysconfig omap44xx_iss_sysc = {
2595 .rev_offs = 0x0000, 2595 .rev_offs = 0x0000,
2596 .sysc_offs = 0x0010, 2596 .sysc_offs = 0x0010,
2597 /*
2598 * ISS needs 100 OCP clk cycles delay after a softreset before
2599 * accessing sysconfig again.
2600 * The lowest frequency at the moment for L3 bus is 100 MHz, so
2601 * 1usec delay is needed. Add an x2 margin to be safe (2 usecs).
2602 *
2603 * TODO: Indicate errata when available.
2604 */
2605 .srst_udelay = 2,
2597 .sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_RESET_STATUS | 2606 .sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_RESET_STATUS |
2598 SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET), 2607 SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET),
2599 .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | 2608 .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
diff --git a/arch/arm/mach-omap2/serial.c b/arch/arm/mach-omap2/serial.c
index 0cdd359a128e..9fc2f44188cb 100644
--- a/arch/arm/mach-omap2/serial.c
+++ b/arch/arm/mach-omap2/serial.c
@@ -108,8 +108,14 @@ static void omap_uart_set_noidle(struct platform_device *pdev)
108static void omap_uart_set_smartidle(struct platform_device *pdev) 108static void omap_uart_set_smartidle(struct platform_device *pdev)
109{ 109{
110 struct omap_device *od = to_omap_device(pdev); 110 struct omap_device *od = to_omap_device(pdev);
111 u8 idlemode;
111 112
112 omap_hwmod_set_slave_idlemode(od->hwmods[0], HWMOD_IDLEMODE_SMART); 113 if (od->hwmods[0]->class->sysc->idlemodes & SIDLE_SMART_WKUP)
114 idlemode = HWMOD_IDLEMODE_SMART_WKUP;
115 else
116 idlemode = HWMOD_IDLEMODE_SMART;
117
118 omap_hwmod_set_slave_idlemode(od->hwmods[0], idlemode);
113} 119}
114 120
115#else 121#else
@@ -120,124 +126,8 @@ static void omap_uart_set_smartidle(struct platform_device *pdev) {}
120#endif /* CONFIG_PM */ 126#endif /* CONFIG_PM */
121 127
122#ifdef CONFIG_OMAP_MUX 128#ifdef CONFIG_OMAP_MUX
123static struct omap_device_pad default_uart1_pads[] __initdata = {
124 {
125 .name = "uart1_cts.uart1_cts",
126 .enable = OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0,
127 },
128 {
129 .name = "uart1_rts.uart1_rts",
130 .enable = OMAP_PIN_OUTPUT | OMAP_MUX_MODE0,
131 },
132 {
133 .name = "uart1_tx.uart1_tx",
134 .enable = OMAP_PIN_OUTPUT | OMAP_MUX_MODE0,
135 },
136 {
137 .name = "uart1_rx.uart1_rx",
138 .flags = OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP,
139 .enable = OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0,
140 .idle = OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0,
141 },
142};
143
144static struct omap_device_pad default_uart2_pads[] __initdata = {
145 {
146 .name = "uart2_cts.uart2_cts",
147 .enable = OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0,
148 },
149 {
150 .name = "uart2_rts.uart2_rts",
151 .enable = OMAP_PIN_OUTPUT | OMAP_MUX_MODE0,
152 },
153 {
154 .name = "uart2_tx.uart2_tx",
155 .enable = OMAP_PIN_OUTPUT | OMAP_MUX_MODE0,
156 },
157 {
158 .name = "uart2_rx.uart2_rx",
159 .flags = OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP,
160 .enable = OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0,
161 .idle = OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0,
162 },
163};
164
165static struct omap_device_pad default_uart3_pads[] __initdata = {
166 {
167 .name = "uart3_cts_rctx.uart3_cts_rctx",
168 .enable = OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0,
169 },
170 {
171 .name = "uart3_rts_sd.uart3_rts_sd",
172 .enable = OMAP_PIN_OUTPUT | OMAP_MUX_MODE0,
173 },
174 {
175 .name = "uart3_tx_irtx.uart3_tx_irtx",
176 .enable = OMAP_PIN_OUTPUT | OMAP_MUX_MODE0,
177 },
178 {
179 .name = "uart3_rx_irrx.uart3_rx_irrx",
180 .flags = OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP,
181 .enable = OMAP_PIN_INPUT | OMAP_MUX_MODE0,
182 .idle = OMAP_PIN_INPUT | OMAP_MUX_MODE0,
183 },
184};
185
186static struct omap_device_pad default_omap36xx_uart4_pads[] __initdata = {
187 {
188 .name = "gpmc_wait2.uart4_tx",
189 .enable = OMAP_PIN_OUTPUT | OMAP_MUX_MODE0,
190 },
191 {
192 .name = "gpmc_wait3.uart4_rx",
193 .flags = OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP,
194 .enable = OMAP_PIN_INPUT | OMAP_MUX_MODE2,
195 .idle = OMAP_PIN_INPUT | OMAP_MUX_MODE2,
196 },
197};
198
199static struct omap_device_pad default_omap4_uart4_pads[] __initdata = {
200 {
201 .name = "uart4_tx.uart4_tx",
202 .enable = OMAP_PIN_OUTPUT | OMAP_MUX_MODE0,
203 },
204 {
205 .name = "uart4_rx.uart4_rx",
206 .flags = OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP,
207 .enable = OMAP_PIN_INPUT | OMAP_MUX_MODE0,
208 .idle = OMAP_PIN_INPUT | OMAP_MUX_MODE0,
209 },
210};
211
212static void omap_serial_fill_default_pads(struct omap_board_data *bdata) 129static void omap_serial_fill_default_pads(struct omap_board_data *bdata)
213{ 130{
214 switch (bdata->id) {
215 case 0:
216 bdata->pads = default_uart1_pads;
217 bdata->pads_cnt = ARRAY_SIZE(default_uart1_pads);
218 break;
219 case 1:
220 bdata->pads = default_uart2_pads;
221 bdata->pads_cnt = ARRAY_SIZE(default_uart2_pads);
222 break;
223 case 2:
224 bdata->pads = default_uart3_pads;
225 bdata->pads_cnt = ARRAY_SIZE(default_uart3_pads);
226 break;
227 case 3:
228 if (cpu_is_omap44xx()) {
229 bdata->pads = default_omap4_uart4_pads;
230 bdata->pads_cnt =
231 ARRAY_SIZE(default_omap4_uart4_pads);
232 } else if (cpu_is_omap3630()) {
233 bdata->pads = default_omap36xx_uart4_pads;
234 bdata->pads_cnt =
235 ARRAY_SIZE(default_omap36xx_uart4_pads);
236 }
237 break;
238 default:
239 break;
240 }
241} 131}
242#else 132#else
243static void omap_serial_fill_default_pads(struct omap_board_data *bdata) {} 133static void omap_serial_fill_default_pads(struct omap_board_data *bdata) {}
diff --git a/arch/arm/mach-omap2/twl-common.c b/arch/arm/mach-omap2/twl-common.c
index 4b57757bf9d1..7a7b89304c48 100644
--- a/arch/arm/mach-omap2/twl-common.c
+++ b/arch/arm/mach-omap2/twl-common.c
@@ -37,6 +37,16 @@ static struct i2c_board_info __initdata pmic_i2c_board_info = {
37 .flags = I2C_CLIENT_WAKE, 37 .flags = I2C_CLIENT_WAKE,
38}; 38};
39 39
40static struct i2c_board_info __initdata omap4_i2c1_board_info[] = {
41 {
42 .addr = 0x48,
43 .flags = I2C_CLIENT_WAKE,
44 },
45 {
46 I2C_BOARD_INFO("twl6040", 0x4b),
47 },
48};
49
40void __init omap_pmic_init(int bus, u32 clkrate, 50void __init omap_pmic_init(int bus, u32 clkrate,
41 const char *pmic_type, int pmic_irq, 51 const char *pmic_type, int pmic_irq,
42 struct twl4030_platform_data *pmic_data) 52 struct twl4030_platform_data *pmic_data)
@@ -49,14 +59,31 @@ void __init omap_pmic_init(int bus, u32 clkrate,
49 omap_register_i2c_bus(bus, clkrate, &pmic_i2c_board_info, 1); 59 omap_register_i2c_bus(bus, clkrate, &pmic_i2c_board_info, 1);
50} 60}
51 61
62void __init omap4_pmic_init(const char *pmic_type,
63 struct twl4030_platform_data *pmic_data,
64 struct twl6040_platform_data *twl6040_data, int twl6040_irq)
65{
66 /* PMIC part*/
67 strncpy(omap4_i2c1_board_info[0].type, pmic_type,
68 sizeof(omap4_i2c1_board_info[0].type));
69 omap4_i2c1_board_info[0].irq = OMAP44XX_IRQ_SYS_1N;
70 omap4_i2c1_board_info[0].platform_data = pmic_data;
71
72 /* TWL6040 audio IC part */
73 omap4_i2c1_board_info[1].irq = twl6040_irq;
74 omap4_i2c1_board_info[1].platform_data = twl6040_data;
75
76 omap_register_i2c_bus(1, 400, omap4_i2c1_board_info, 2);
77
78}
79
52void __init omap_pmic_late_init(void) 80void __init omap_pmic_late_init(void)
53{ 81{
54 /* Init the OMAP TWL parameters (if PMIC has been registerd) */ 82 /* Init the OMAP TWL parameters (if PMIC has been registerd) */
55 if (!pmic_i2c_board_info.irq) 83 if (pmic_i2c_board_info.irq)
56 return; 84 omap3_twl_init();
57 85 if (omap4_i2c1_board_info[0].irq)
58 omap3_twl_init(); 86 omap4_twl_init();
59 omap4_twl_init();
60} 87}
61 88
62#if defined(CONFIG_ARCH_OMAP3) 89#if defined(CONFIG_ARCH_OMAP3)
diff --git a/arch/arm/mach-omap2/twl-common.h b/arch/arm/mach-omap2/twl-common.h
index 275dde8cb27a..09627483a57f 100644
--- a/arch/arm/mach-omap2/twl-common.h
+++ b/arch/arm/mach-omap2/twl-common.h
@@ -29,6 +29,7 @@
29 29
30 30
31struct twl4030_platform_data; 31struct twl4030_platform_data;
32struct twl6040_platform_data;
32 33
33void omap_pmic_init(int bus, u32 clkrate, const char *pmic_type, int pmic_irq, 34void omap_pmic_init(int bus, u32 clkrate, const char *pmic_type, int pmic_irq,
34 struct twl4030_platform_data *pmic_data); 35 struct twl4030_platform_data *pmic_data);
@@ -46,12 +47,9 @@ static inline void omap3_pmic_init(const char *pmic_type,
46 omap_pmic_init(1, 2600, pmic_type, INT_34XX_SYS_NIRQ, pmic_data); 47 omap_pmic_init(1, 2600, pmic_type, INT_34XX_SYS_NIRQ, pmic_data);
47} 48}
48 49
49static inline void omap4_pmic_init(const char *pmic_type, 50void omap4_pmic_init(const char *pmic_type,
50 struct twl4030_platform_data *pmic_data) 51 struct twl4030_platform_data *pmic_data,
51{ 52 struct twl6040_platform_data *audio_data, int twl6040_irq);
52 /* Phoenix Audio IC needs I2C1 to start with 400 KHz or less */
53 omap_pmic_init(1, 400, pmic_type, OMAP44XX_IRQ_SYS_1N, pmic_data);
54}
55 53
56void omap3_pmic_get_config(struct twl4030_platform_data *pmic_data, 54void omap3_pmic_get_config(struct twl4030_platform_data *pmic_data,
57 u32 pdata_flags, u32 regulators_flags); 55 u32 pdata_flags, u32 regulators_flags);
diff --git a/arch/arm/mach-orion5x/mpp.h b/arch/arm/mach-orion5x/mpp.h
index eac68978a2c2..db70e79a1198 100644
--- a/arch/arm/mach-orion5x/mpp.h
+++ b/arch/arm/mach-orion5x/mpp.h
@@ -65,8 +65,8 @@
65#define MPP8_GIGE MPP(8, 0x1, 0, 0, 1, 1, 1) 65#define MPP8_GIGE MPP(8, 0x1, 0, 0, 1, 1, 1)
66 66
67#define MPP9_UNUSED MPP(9, 0x0, 0, 0, 1, 1, 1) 67#define MPP9_UNUSED MPP(9, 0x0, 0, 0, 1, 1, 1)
68#define MPP9_GPIO MPP(9, 0x0, 0, 0, 1, 1, 1) 68#define MPP9_GPIO MPP(9, 0x0, 1, 1, 1, 1, 1)
69#define MPP9_GIGE MPP(9, 0x1, 1, 1, 1, 1, 1) 69#define MPP9_GIGE MPP(9, 0x1, 0, 0, 1, 1, 1)
70 70
71#define MPP10_UNUSED MPP(10, 0x0, 0, 0, 1, 1, 1) 71#define MPP10_UNUSED MPP(10, 0x0, 0, 0, 1, 1, 1)
72#define MPP10_GPIO MPP(10, 0x0, 1, 1, 1, 1, 1) 72#define MPP10_GPIO MPP(10, 0x0, 1, 1, 1, 1, 1)
diff --git a/arch/arm/mach-pxa/include/mach/mfp-pxa2xx.h b/arch/arm/mach-pxa/include/mach/mfp-pxa2xx.h
index c54cef25895c..cbf51ae81855 100644
--- a/arch/arm/mach-pxa/include/mach/mfp-pxa2xx.h
+++ b/arch/arm/mach-pxa/include/mach/mfp-pxa2xx.h
@@ -17,6 +17,7 @@
17 * 17 *
18 * bit 23 - Input/Output (PXA2xx specific) 18 * bit 23 - Input/Output (PXA2xx specific)
19 * bit 24 - Wakeup Enable(PXA2xx specific) 19 * bit 24 - Wakeup Enable(PXA2xx specific)
20 * bit 25 - Keep Output (PXA2xx specific)
20 */ 21 */
21 22
22#define MFP_DIR_IN (0x0 << 23) 23#define MFP_DIR_IN (0x0 << 23)
@@ -25,6 +26,12 @@
25#define MFP_DIR(x) (((x) >> 23) & 0x1) 26#define MFP_DIR(x) (((x) >> 23) & 0x1)
26 27
27#define MFP_LPM_CAN_WAKEUP (0x1 << 24) 28#define MFP_LPM_CAN_WAKEUP (0x1 << 24)
29
30/*
31 * MFP_LPM_KEEP_OUTPUT must be specified for pins that need to
32 * retain their last output level (low or high).
33 * Note: MFP_LPM_KEEP_OUTPUT has no effect on pins configured for input.
34 */
28#define MFP_LPM_KEEP_OUTPUT (0x1 << 25) 35#define MFP_LPM_KEEP_OUTPUT (0x1 << 25)
29 36
30#define WAKEUP_ON_EDGE_RISE (MFP_LPM_CAN_WAKEUP | MFP_LPM_EDGE_RISE) 37#define WAKEUP_ON_EDGE_RISE (MFP_LPM_CAN_WAKEUP | MFP_LPM_EDGE_RISE)
diff --git a/arch/arm/mach-pxa/mfp-pxa2xx.c b/arch/arm/mach-pxa/mfp-pxa2xx.c
index b0a842887780..ef0426a159d4 100644
--- a/arch/arm/mach-pxa/mfp-pxa2xx.c
+++ b/arch/arm/mach-pxa/mfp-pxa2xx.c
@@ -33,6 +33,8 @@
33#define BANK_OFF(n) (((n) < 3) ? (n) << 2 : 0x100 + (((n) - 3) << 2)) 33#define BANK_OFF(n) (((n) < 3) ? (n) << 2 : 0x100 + (((n) - 3) << 2))
34#define GPLR(x) __REG2(0x40E00000, BANK_OFF((x) >> 5)) 34#define GPLR(x) __REG2(0x40E00000, BANK_OFF((x) >> 5))
35#define GPDR(x) __REG2(0x40E00000, BANK_OFF((x) >> 5) + 0x0c) 35#define GPDR(x) __REG2(0x40E00000, BANK_OFF((x) >> 5) + 0x0c)
36#define GPSR(x) __REG2(0x40E00000, BANK_OFF((x) >> 5) + 0x18)
37#define GPCR(x) __REG2(0x40E00000, BANK_OFF((x) >> 5) + 0x24)
36 38
37#define PWER_WE35 (1 << 24) 39#define PWER_WE35 (1 << 24)
38 40
@@ -348,6 +350,7 @@ static inline void pxa27x_mfp_init(void) {}
348#ifdef CONFIG_PM 350#ifdef CONFIG_PM
349static unsigned long saved_gafr[2][4]; 351static unsigned long saved_gafr[2][4];
350static unsigned long saved_gpdr[4]; 352static unsigned long saved_gpdr[4];
353static unsigned long saved_gplr[4];
351static unsigned long saved_pgsr[4]; 354static unsigned long saved_pgsr[4];
352 355
353static int pxa2xx_mfp_suspend(void) 356static int pxa2xx_mfp_suspend(void)
@@ -366,14 +369,26 @@ static int pxa2xx_mfp_suspend(void)
366 } 369 }
367 370
368 for (i = 0; i <= gpio_to_bank(pxa_last_gpio); i++) { 371 for (i = 0; i <= gpio_to_bank(pxa_last_gpio); i++) {
369
370 saved_gafr[0][i] = GAFR_L(i); 372 saved_gafr[0][i] = GAFR_L(i);
371 saved_gafr[1][i] = GAFR_U(i); 373 saved_gafr[1][i] = GAFR_U(i);
372 saved_gpdr[i] = GPDR(i * 32); 374 saved_gpdr[i] = GPDR(i * 32);
375 saved_gplr[i] = GPLR(i * 32);
373 saved_pgsr[i] = PGSR(i); 376 saved_pgsr[i] = PGSR(i);
374 377
375 GPDR(i * 32) = gpdr_lpm[i]; 378 GPSR(i * 32) = PGSR(i);
379 GPCR(i * 32) = ~PGSR(i);
380 }
381
382 /* set GPDR bits taking into account MFP_LPM_KEEP_OUTPUT */
383 for (i = 0; i < pxa_last_gpio; i++) {
384 if ((gpdr_lpm[gpio_to_bank(i)] & GPIO_bit(i)) ||
385 ((gpio_desc[i].config & MFP_LPM_KEEP_OUTPUT) &&
386 (saved_gpdr[gpio_to_bank(i)] & GPIO_bit(i))))
387 GPDR(i) |= GPIO_bit(i);
388 else
389 GPDR(i) &= ~GPIO_bit(i);
376 } 390 }
391
377 return 0; 392 return 0;
378} 393}
379 394
@@ -384,6 +399,8 @@ static void pxa2xx_mfp_resume(void)
384 for (i = 0; i <= gpio_to_bank(pxa_last_gpio); i++) { 399 for (i = 0; i <= gpio_to_bank(pxa_last_gpio); i++) {
385 GAFR_L(i) = saved_gafr[0][i]; 400 GAFR_L(i) = saved_gafr[0][i];
386 GAFR_U(i) = saved_gafr[1][i]; 401 GAFR_U(i) = saved_gafr[1][i];
402 GPSR(i * 32) = saved_gplr[i];
403 GPCR(i * 32) = ~saved_gplr[i];
387 GPDR(i * 32) = saved_gpdr[i]; 404 GPDR(i * 32) = saved_gpdr[i];
388 PGSR(i) = saved_pgsr[i]; 405 PGSR(i) = saved_pgsr[i];
389 } 406 }
diff --git a/arch/arm/mach-pxa/pxa27x.c b/arch/arm/mach-pxa/pxa27x.c
index 6bce78edce7a..4726c246dcdc 100644
--- a/arch/arm/mach-pxa/pxa27x.c
+++ b/arch/arm/mach-pxa/pxa27x.c
@@ -421,8 +421,11 @@ void __init pxa27x_set_i2c_power_info(struct i2c_pxa_platform_data *info)
421 pxa_register_device(&pxa27x_device_i2c_power, info); 421 pxa_register_device(&pxa27x_device_i2c_power, info);
422} 422}
423 423
424static struct pxa_gpio_platform_data pxa27x_gpio_info __initdata = {
425 .gpio_set_wake = gpio_set_wake,
426};
427
424static struct platform_device *devices[] __initdata = { 428static struct platform_device *devices[] __initdata = {
425 &pxa_device_gpio,
426 &pxa27x_device_udc, 429 &pxa27x_device_udc,
427 &pxa_device_pmu, 430 &pxa_device_pmu,
428 &pxa_device_i2s, 431 &pxa_device_i2s,
@@ -458,6 +461,7 @@ static int __init pxa27x_init(void)
458 register_syscore_ops(&pxa2xx_mfp_syscore_ops); 461 register_syscore_ops(&pxa2xx_mfp_syscore_ops);
459 register_syscore_ops(&pxa2xx_clock_syscore_ops); 462 register_syscore_ops(&pxa2xx_clock_syscore_ops);
460 463
464 pxa_register_device(&pxa_device_gpio, &pxa27x_gpio_info);
461 ret = platform_add_devices(devices, ARRAY_SIZE(devices)); 465 ret = platform_add_devices(devices, ARRAY_SIZE(devices));
462 } 466 }
463 467
diff --git a/arch/arm/mach-s3c24xx/Kconfig b/arch/arm/mach-s3c24xx/Kconfig
index 0f3a327ebcaa..b34287ab5afd 100644
--- a/arch/arm/mach-s3c24xx/Kconfig
+++ b/arch/arm/mach-s3c24xx/Kconfig
@@ -111,10 +111,6 @@ config S3C24XX_SETUP_TS
111 help 111 help
112 Compile in platform device definition for Samsung TouchScreen. 112 Compile in platform device definition for Samsung TouchScreen.
113 113
114# cpu-specific sections
115
116if CPU_S3C2410
117
118config S3C2410_DMA 114config S3C2410_DMA
119 bool 115 bool
120 depends on S3C24XX_DMA && (CPU_S3C2410 || CPU_S3C2442) 116 depends on S3C24XX_DMA && (CPU_S3C2410 || CPU_S3C2442)
@@ -127,6 +123,10 @@ config S3C2410_PM
127 help 123 help
128 Power Management code common to S3C2410 and better 124 Power Management code common to S3C2410 and better
129 125
126# cpu-specific sections
127
128if CPU_S3C2410
129
130config S3C24XX_SIMTEC_NOR 130config S3C24XX_SIMTEC_NOR
131 bool 131 bool
132 help 132 help
diff --git a/arch/arm/mach-s5pv210/mach-goni.c b/arch/arm/mach-s5pv210/mach-goni.c
index a8933de3d627..32395664e879 100644
--- a/arch/arm/mach-s5pv210/mach-goni.c
+++ b/arch/arm/mach-s5pv210/mach-goni.c
@@ -25,6 +25,7 @@
25#include <linux/gpio_keys.h> 25#include <linux/gpio_keys.h>
26#include <linux/input.h> 26#include <linux/input.h>
27#include <linux/gpio.h> 27#include <linux/gpio.h>
28#include <linux/mmc/host.h>
28#include <linux/interrupt.h> 29#include <linux/interrupt.h>
29 30
30#include <asm/hardware/vic.h> 31#include <asm/hardware/vic.h>
@@ -765,6 +766,7 @@ static void __init goni_pmic_init(void)
765/* MoviNAND */ 766/* MoviNAND */
766static struct s3c_sdhci_platdata goni_hsmmc0_data __initdata = { 767static struct s3c_sdhci_platdata goni_hsmmc0_data __initdata = {
767 .max_width = 4, 768 .max_width = 4,
769 .host_caps2 = MMC_CAP2_BROKEN_VOLTAGE,
768 .cd_type = S3C_SDHCI_CD_PERMANENT, 770 .cd_type = S3C_SDHCI_CD_PERMANENT,
769}; 771};
770 772
diff --git a/arch/arm/mach-sa1100/generic.c b/arch/arm/mach-sa1100/generic.c
index 7c524b4e415d..16be4c56abe3 100644
--- a/arch/arm/mach-sa1100/generic.c
+++ b/arch/arm/mach-sa1100/generic.c
@@ -306,7 +306,7 @@ void sa11x0_register_irda(struct irda_platform_data *irda)
306} 306}
307 307
308static struct resource sa1100_rtc_resources[] = { 308static struct resource sa1100_rtc_resources[] = {
309 DEFINE_RES_MEM(0x90010000, 0x9001003f), 309 DEFINE_RES_MEM(0x90010000, 0x40),
310 DEFINE_RES_IRQ_NAMED(IRQ_RTC1Hz, "rtc 1Hz"), 310 DEFINE_RES_IRQ_NAMED(IRQ_RTC1Hz, "rtc 1Hz"),
311 DEFINE_RES_IRQ_NAMED(IRQ_RTCAlrm, "rtc alarm"), 311 DEFINE_RES_IRQ_NAMED(IRQ_RTCAlrm, "rtc alarm"),
312}; 312};
diff --git a/arch/arm/mach-shmobile/board-ag5evm.c b/arch/arm/mach-shmobile/board-ag5evm.c
index cb224a344af0..0891ec6e27f5 100644
--- a/arch/arm/mach-shmobile/board-ag5evm.c
+++ b/arch/arm/mach-shmobile/board-ag5evm.c
@@ -365,23 +365,13 @@ static struct platform_device mipidsi0_device = {
365}; 365};
366 366
367/* SDHI0 */ 367/* SDHI0 */
368static irqreturn_t ag5evm_sdhi0_gpio_cd(int irq, void *arg)
369{
370 struct device *dev = arg;
371 struct sh_mobile_sdhi_info *info = dev->platform_data;
372 struct tmio_mmc_data *pdata = info->pdata;
373
374 tmio_mmc_cd_wakeup(pdata);
375
376 return IRQ_HANDLED;
377}
378
379static struct sh_mobile_sdhi_info sdhi0_info = { 368static struct sh_mobile_sdhi_info sdhi0_info = {
380 .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX, 369 .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX,
381 .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX, 370 .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX,
382 .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT, 371 .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_USE_GPIO_CD,
383 .tmio_caps = MMC_CAP_SD_HIGHSPEED, 372 .tmio_caps = MMC_CAP_SD_HIGHSPEED,
384 .tmio_ocr_mask = MMC_VDD_27_28 | MMC_VDD_28_29, 373 .tmio_ocr_mask = MMC_VDD_27_28 | MMC_VDD_28_29,
374 .cd_gpio = GPIO_PORT251,
385}; 375};
386 376
387static struct resource sdhi0_resources[] = { 377static struct resource sdhi0_resources[] = {
@@ -557,7 +547,6 @@ static void __init ag5evm_init(void)
557 lcd_backlight_reset(); 547 lcd_backlight_reset();
558 548
559 /* enable SDHI0 on CN15 [SD I/F] */ 549 /* enable SDHI0 on CN15 [SD I/F] */
560 gpio_request(GPIO_FN_SDHICD0, NULL);
561 gpio_request(GPIO_FN_SDHIWP0, NULL); 550 gpio_request(GPIO_FN_SDHIWP0, NULL);
562 gpio_request(GPIO_FN_SDHICMD0, NULL); 551 gpio_request(GPIO_FN_SDHICMD0, NULL);
563 gpio_request(GPIO_FN_SDHICLK0, NULL); 552 gpio_request(GPIO_FN_SDHICLK0, NULL);
@@ -566,13 +555,6 @@ static void __init ag5evm_init(void)
566 gpio_request(GPIO_FN_SDHID0_1, NULL); 555 gpio_request(GPIO_FN_SDHID0_1, NULL);
567 gpio_request(GPIO_FN_SDHID0_0, NULL); 556 gpio_request(GPIO_FN_SDHID0_0, NULL);
568 557
569 if (!request_irq(intcs_evt2irq(0x3c0), ag5evm_sdhi0_gpio_cd,
570 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
571 "sdhi0 cd", &sdhi0_device.dev))
572 sdhi0_info.tmio_flags |= TMIO_MMC_HAS_COLD_CD;
573 else
574 pr_warn("Unable to setup SDHI0 GPIO IRQ\n");
575
576 /* enable SDHI1 on CN4 [WLAN I/F] */ 558 /* enable SDHI1 on CN4 [WLAN I/F] */
577 gpio_request(GPIO_FN_SDHICLK1, NULL); 559 gpio_request(GPIO_FN_SDHICLK1, NULL);
578 gpio_request(GPIO_FN_SDHICMD1_PU, NULL); 560 gpio_request(GPIO_FN_SDHICMD1_PU, NULL);
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c
index f49e28abe0ab..8c6202bb6aeb 100644
--- a/arch/arm/mach-shmobile/board-mackerel.c
+++ b/arch/arm/mach-shmobile/board-mackerel.c
@@ -1011,21 +1011,12 @@ static int slot_cn7_get_cd(struct platform_device *pdev)
1011} 1011}
1012 1012
1013/* SDHI0 */ 1013/* SDHI0 */
1014static irqreturn_t mackerel_sdhi0_gpio_cd(int irq, void *arg)
1015{
1016 struct device *dev = arg;
1017 struct sh_mobile_sdhi_info *info = dev->platform_data;
1018 struct tmio_mmc_data *pdata = info->pdata;
1019
1020 tmio_mmc_cd_wakeup(pdata);
1021
1022 return IRQ_HANDLED;
1023}
1024
1025static struct sh_mobile_sdhi_info sdhi0_info = { 1014static struct sh_mobile_sdhi_info sdhi0_info = {
1026 .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX, 1015 .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX,
1027 .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX, 1016 .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX,
1017 .tmio_flags = TMIO_MMC_USE_GPIO_CD,
1028 .tmio_caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ, 1018 .tmio_caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ,
1019 .cd_gpio = GPIO_PORT172,
1029}; 1020};
1030 1021
1031static struct resource sdhi0_resources[] = { 1022static struct resource sdhi0_resources[] = {
@@ -1384,7 +1375,6 @@ static void __init mackerel_init(void)
1384{ 1375{
1385 u32 srcr4; 1376 u32 srcr4;
1386 struct clk *clk; 1377 struct clk *clk;
1387 int ret;
1388 1378
1389 /* External clock source */ 1379 /* External clock source */
1390 clk_set_rate(&sh7372_dv_clki_clk, 27000000); 1380 clk_set_rate(&sh7372_dv_clki_clk, 27000000);
@@ -1481,7 +1471,6 @@ static void __init mackerel_init(void)
1481 irq_set_irq_type(IRQ21, IRQ_TYPE_LEVEL_HIGH); 1471 irq_set_irq_type(IRQ21, IRQ_TYPE_LEVEL_HIGH);
1482 1472
1483 /* enable SDHI0 */ 1473 /* enable SDHI0 */
1484 gpio_request(GPIO_FN_SDHICD0, NULL);
1485 gpio_request(GPIO_FN_SDHIWP0, NULL); 1474 gpio_request(GPIO_FN_SDHIWP0, NULL);
1486 gpio_request(GPIO_FN_SDHICMD0, NULL); 1475 gpio_request(GPIO_FN_SDHICMD0, NULL);
1487 gpio_request(GPIO_FN_SDHICLK0, NULL); 1476 gpio_request(GPIO_FN_SDHICLK0, NULL);
@@ -1490,13 +1479,6 @@ static void __init mackerel_init(void)
1490 gpio_request(GPIO_FN_SDHID0_1, NULL); 1479 gpio_request(GPIO_FN_SDHID0_1, NULL);
1491 gpio_request(GPIO_FN_SDHID0_0, NULL); 1480 gpio_request(GPIO_FN_SDHID0_0, NULL);
1492 1481
1493 ret = request_irq(evt2irq(0x3340), mackerel_sdhi0_gpio_cd,
1494 IRQF_TRIGGER_FALLING, "sdhi0 cd", &sdhi0_device.dev);
1495 if (!ret)
1496 sdhi0_info.tmio_flags |= TMIO_MMC_HAS_COLD_CD;
1497 else
1498 pr_err("Cannot get IRQ #%d: %d\n", evt2irq(0x3340), ret);
1499
1500#if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE) 1482#if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE)
1501 /* enable SDHI1 */ 1483 /* enable SDHI1 */
1502 gpio_request(GPIO_FN_SDHICMD1, NULL); 1484 gpio_request(GPIO_FN_SDHICMD1, NULL);
diff --git a/arch/arm/mach-shmobile/headsmp.S b/arch/arm/mach-shmobile/headsmp.S
index 6ac015c89206..b202c1272526 100644
--- a/arch/arm/mach-shmobile/headsmp.S
+++ b/arch/arm/mach-shmobile/headsmp.S
@@ -16,6 +16,59 @@
16 16
17 __CPUINIT 17 __CPUINIT
18 18
19/* Cache invalidation nicked from arch/arm/mach-imx/head-v7.S, thanks!
20 *
21 * The secondary kernel init calls v7_flush_dcache_all before it enables
22 * the L1; however, the L1 comes out of reset in an undefined state, so
23 * the clean + invalidate performed by v7_flush_dcache_all causes a bunch
24 * of cache lines with uninitialized data and uninitialized tags to get
25 * written out to memory, which does really unpleasant things to the main
26 * processor. We fix this by performing an invalidate, rather than a
27 * clean + invalidate, before jumping into the kernel.
28 *
29 * This funciton is cloned from arch/arm/mach-tegra/headsmp.S, and needs
30 * to be called for both secondary cores startup and primary core resume
31 * procedures. Ideally, it should be moved into arch/arm/mm/cache-v7.S.
32 */
33ENTRY(v7_invalidate_l1)
34 mov r0, #0
35 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
36 mcr p15, 2, r0, c0, c0, 0
37 mrc p15, 1, r0, c0, c0, 0
38
39 ldr r1, =0x7fff
40 and r2, r1, r0, lsr #13
41
42 ldr r1, =0x3ff
43
44 and r3, r1, r0, lsr #3 @ NumWays - 1
45 add r2, r2, #1 @ NumSets
46
47 and r0, r0, #0x7
48 add r0, r0, #4 @ SetShift
49
50 clz r1, r3 @ WayShift
51 add r4, r3, #1 @ NumWays
521: sub r2, r2, #1 @ NumSets--
53 mov r3, r4 @ Temp = NumWays
542: subs r3, r3, #1 @ Temp--
55 mov r5, r3, lsl r1
56 mov r6, r2, lsl r0
57 orr r5, r5, r6 @ Reg = (Temp<<WayShift)|(NumSets<<SetShift)
58 mcr p15, 0, r5, c7, c6, 2
59 bgt 2b
60 cmp r2, #0
61 bgt 1b
62 dsb
63 isb
64 mov pc, lr
65ENDPROC(v7_invalidate_l1)
66
67ENTRY(shmobile_invalidate_start)
68 bl v7_invalidate_l1
69 b secondary_startup
70ENDPROC(shmobile_invalidate_start)
71
19/* 72/*
20 * Reset vector for secondary CPUs. 73 * Reset vector for secondary CPUs.
21 * This will be mapped at address 0 by SBAR register. 74 * This will be mapped at address 0 by SBAR register.
@@ -24,4 +77,5 @@
24 .align 12 77 .align 12
25ENTRY(shmobile_secondary_vector) 78ENTRY(shmobile_secondary_vector)
26 ldr pc, 1f 79 ldr pc, 1f
271: .long secondary_startup - PAGE_OFFSET + PLAT_PHYS_OFFSET 801: .long shmobile_invalidate_start - PAGE_OFFSET + PLAT_PHYS_OFFSET
81ENDPROC(shmobile_secondary_vector)
diff --git a/arch/arm/mach-shmobile/include/mach/common.h b/arch/arm/mach-shmobile/include/mach/common.h
index 83ad3fe0a75f..c85e6ecda606 100644
--- a/arch/arm/mach-shmobile/include/mach/common.h
+++ b/arch/arm/mach-shmobile/include/mach/common.h
@@ -4,7 +4,6 @@
4extern void shmobile_earlytimer_init(void); 4extern void shmobile_earlytimer_init(void);
5extern struct sys_timer shmobile_timer; 5extern struct sys_timer shmobile_timer;
6struct twd_local_timer; 6struct twd_local_timer;
7void shmobile_twd_init(struct twd_local_timer *twd_local_timer);
8extern void shmobile_setup_console(void); 7extern void shmobile_setup_console(void);
9extern void shmobile_secondary_vector(void); 8extern void shmobile_secondary_vector(void);
10extern int shmobile_platform_cpu_kill(unsigned int cpu); 9extern int shmobile_platform_cpu_kill(unsigned int cpu);
@@ -82,5 +81,6 @@ extern int r8a7779_platform_cpu_kill(unsigned int cpu);
82extern void r8a7779_secondary_init(unsigned int cpu); 81extern void r8a7779_secondary_init(unsigned int cpu);
83extern int r8a7779_boot_secondary(unsigned int cpu); 82extern int r8a7779_boot_secondary(unsigned int cpu);
84extern void r8a7779_smp_prepare_cpus(void); 83extern void r8a7779_smp_prepare_cpus(void);
84extern void r8a7779_register_twd(void);
85 85
86#endif /* __ARCH_MACH_COMMON_H */ 86#endif /* __ARCH_MACH_COMMON_H */
diff --git a/arch/arm/mach-shmobile/setup-r8a7779.c b/arch/arm/mach-shmobile/setup-r8a7779.c
index 12c6f529ab89..e98e46f6cf55 100644
--- a/arch/arm/mach-shmobile/setup-r8a7779.c
+++ b/arch/arm/mach-shmobile/setup-r8a7779.c
@@ -262,10 +262,14 @@ void __init r8a7779_add_standard_devices(void)
262 ARRAY_SIZE(r8a7779_late_devices)); 262 ARRAY_SIZE(r8a7779_late_devices));
263} 263}
264 264
265/* do nothing for !CONFIG_SMP or !CONFIG_HAVE_TWD */
266void __init __weak r8a7779_register_twd(void) { }
267
265static void __init r8a7779_earlytimer_init(void) 268static void __init r8a7779_earlytimer_init(void)
266{ 269{
267 r8a7779_clock_init(); 270 r8a7779_clock_init();
268 shmobile_earlytimer_init(); 271 shmobile_earlytimer_init();
272 r8a7779_register_twd();
269} 273}
270 274
271void __init r8a7779_add_early_devices(void) 275void __init r8a7779_add_early_devices(void)
diff --git a/arch/arm/mach-shmobile/setup-sh73a0.c b/arch/arm/mach-shmobile/setup-sh73a0.c
index 5bebffc10455..04a0dfe75493 100644
--- a/arch/arm/mach-shmobile/setup-sh73a0.c
+++ b/arch/arm/mach-shmobile/setup-sh73a0.c
@@ -688,10 +688,14 @@ void __init sh73a0_add_standard_devices(void)
688 ARRAY_SIZE(sh73a0_late_devices)); 688 ARRAY_SIZE(sh73a0_late_devices));
689} 689}
690 690
691/* do nothing for !CONFIG_SMP or !CONFIG_HAVE_TWD */
692void __init __weak sh73a0_register_twd(void) { }
693
691static void __init sh73a0_earlytimer_init(void) 694static void __init sh73a0_earlytimer_init(void)
692{ 695{
693 sh73a0_clock_init(); 696 sh73a0_clock_init();
694 shmobile_earlytimer_init(); 697 shmobile_earlytimer_init();
698 sh73a0_register_twd();
695} 699}
696 700
697void __init sh73a0_add_early_devices(void) 701void __init sh73a0_add_early_devices(void)
diff --git a/arch/arm/mach-shmobile/smp-r8a7779.c b/arch/arm/mach-shmobile/smp-r8a7779.c
index b62e19d4c9af..6d1d0238cbf7 100644
--- a/arch/arm/mach-shmobile/smp-r8a7779.c
+++ b/arch/arm/mach-shmobile/smp-r8a7779.c
@@ -64,8 +64,15 @@ static void __iomem *scu_base_addr(void)
64static DEFINE_SPINLOCK(scu_lock); 64static DEFINE_SPINLOCK(scu_lock);
65static unsigned long tmp; 65static unsigned long tmp;
66 66
67#ifdef CONFIG_HAVE_ARM_TWD
67static DEFINE_TWD_LOCAL_TIMER(twd_local_timer, 0xf0000600, 29); 68static DEFINE_TWD_LOCAL_TIMER(twd_local_timer, 0xf0000600, 29);
68 69
70void __init r8a7779_register_twd(void)
71{
72 twd_local_timer_register(&twd_local_timer);
73}
74#endif
75
69static void modify_scu_cpu_psr(unsigned long set, unsigned long clr) 76static void modify_scu_cpu_psr(unsigned long set, unsigned long clr)
70{ 77{
71 void __iomem *scu_base = scu_base_addr(); 78 void __iomem *scu_base = scu_base_addr();
@@ -84,7 +91,6 @@ unsigned int __init r8a7779_get_core_count(void)
84{ 91{
85 void __iomem *scu_base = scu_base_addr(); 92 void __iomem *scu_base = scu_base_addr();
86 93
87 shmobile_twd_init(&twd_local_timer);
88 return scu_get_core_count(scu_base); 94 return scu_get_core_count(scu_base);
89} 95}
90 96
diff --git a/arch/arm/mach-shmobile/smp-sh73a0.c b/arch/arm/mach-shmobile/smp-sh73a0.c
index 14ad8b052f1a..e36c41c4ab40 100644
--- a/arch/arm/mach-shmobile/smp-sh73a0.c
+++ b/arch/arm/mach-shmobile/smp-sh73a0.c
@@ -42,7 +42,13 @@ static void __iomem *scu_base_addr(void)
42static DEFINE_SPINLOCK(scu_lock); 42static DEFINE_SPINLOCK(scu_lock);
43static unsigned long tmp; 43static unsigned long tmp;
44 44
45#ifdef CONFIG_HAVE_ARM_TWD
45static DEFINE_TWD_LOCAL_TIMER(twd_local_timer, 0xf0000600, 29); 46static DEFINE_TWD_LOCAL_TIMER(twd_local_timer, 0xf0000600, 29);
47void __init sh73a0_register_twd(void)
48{
49 twd_local_timer_register(&twd_local_timer);
50}
51#endif
46 52
47static void modify_scu_cpu_psr(unsigned long set, unsigned long clr) 53static void modify_scu_cpu_psr(unsigned long set, unsigned long clr)
48{ 54{
@@ -62,7 +68,6 @@ unsigned int __init sh73a0_get_core_count(void)
62{ 68{
63 void __iomem *scu_base = scu_base_addr(); 69 void __iomem *scu_base = scu_base_addr();
64 70
65 shmobile_twd_init(&twd_local_timer);
66 return scu_get_core_count(scu_base); 71 return scu_get_core_count(scu_base);
67} 72}
68 73
diff --git a/arch/arm/mach-shmobile/timer.c b/arch/arm/mach-shmobile/timer.c
index 2fba5f3d1c8a..8b79e7917a23 100644
--- a/arch/arm/mach-shmobile/timer.c
+++ b/arch/arm/mach-shmobile/timer.c
@@ -46,15 +46,6 @@ static void __init shmobile_timer_init(void)
46{ 46{
47} 47}
48 48
49void __init shmobile_twd_init(struct twd_local_timer *twd_local_timer)
50{
51#ifdef CONFIG_HAVE_ARM_TWD
52 int err = twd_local_timer_register(twd_local_timer);
53 if (err)
54 pr_err("twd_local_timer_register failed %d\n", err);
55#endif
56}
57
58struct sys_timer shmobile_timer = { 49struct sys_timer shmobile_timer = {
59 .init = shmobile_timer_init, 50 .init = shmobile_timer_init,
60}; 51};
diff --git a/arch/arm/mach-u300/core.c b/arch/arm/mach-u300/core.c
index 1621ad07d284..33339745d432 100644
--- a/arch/arm/mach-u300/core.c
+++ b/arch/arm/mach-u300/core.c
@@ -1667,8 +1667,10 @@ void __init u300_init_irq(void)
1667 1667
1668 for (i = 0; i < U300_VIC_IRQS_END; i++) 1668 for (i = 0; i < U300_VIC_IRQS_END; i++)
1669 set_bit(i, (unsigned long *) &mask[0]); 1669 set_bit(i, (unsigned long *) &mask[0]);
1670 vic_init((void __iomem *) U300_INTCON0_VBASE, 0, mask[0], mask[0]); 1670 vic_init((void __iomem *) U300_INTCON0_VBASE, IRQ_U300_INTCON0_START,
1671 vic_init((void __iomem *) U300_INTCON1_VBASE, 32, mask[1], mask[1]); 1671 mask[0], mask[0]);
1672 vic_init((void __iomem *) U300_INTCON1_VBASE, IRQ_U300_INTCON1_START,
1673 mask[1], mask[1]);
1672} 1674}
1673 1675
1674 1676
diff --git a/arch/arm/mach-u300/i2c.c b/arch/arm/mach-u300/i2c.c
index a38f80238ea9..cb04bd6ab3e7 100644
--- a/arch/arm/mach-u300/i2c.c
+++ b/arch/arm/mach-u300/i2c.c
@@ -146,9 +146,6 @@ static struct ab3100_platform_data ab3100_plf_data = {
146 .min_uV = 1800000, 146 .min_uV = 1800000,
147 .max_uV = 1800000, 147 .max_uV = 1800000,
148 .valid_modes_mask = REGULATOR_MODE_NORMAL, 148 .valid_modes_mask = REGULATOR_MODE_NORMAL,
149 .valid_ops_mask =
150 REGULATOR_CHANGE_VOLTAGE |
151 REGULATOR_CHANGE_STATUS,
152 .always_on = 1, 149 .always_on = 1,
153 .boot_on = 1, 150 .boot_on = 1,
154 }, 151 },
@@ -160,9 +157,6 @@ static struct ab3100_platform_data ab3100_plf_data = {
160 .min_uV = 2500000, 157 .min_uV = 2500000,
161 .max_uV = 2500000, 158 .max_uV = 2500000,
162 .valid_modes_mask = REGULATOR_MODE_NORMAL, 159 .valid_modes_mask = REGULATOR_MODE_NORMAL,
163 .valid_ops_mask =
164 REGULATOR_CHANGE_VOLTAGE |
165 REGULATOR_CHANGE_STATUS,
166 .always_on = 1, 160 .always_on = 1,
167 .boot_on = 1, 161 .boot_on = 1,
168 }, 162 },
@@ -230,8 +224,7 @@ static struct ab3100_platform_data ab3100_plf_data = {
230 .max_uV = 1800000, 224 .max_uV = 1800000,
231 .valid_modes_mask = REGULATOR_MODE_NORMAL, 225 .valid_modes_mask = REGULATOR_MODE_NORMAL,
232 .valid_ops_mask = 226 .valid_ops_mask =
233 REGULATOR_CHANGE_VOLTAGE | 227 REGULATOR_CHANGE_VOLTAGE,
234 REGULATOR_CHANGE_STATUS,
235 .always_on = 1, 228 .always_on = 1,
236 .boot_on = 1, 229 .boot_on = 1,
237 }, 230 },
diff --git a/arch/arm/mach-u300/include/mach/irqs.h b/arch/arm/mach-u300/include/mach/irqs.h
index ee78a26707eb..ec09c1e07b1a 100644
--- a/arch/arm/mach-u300/include/mach/irqs.h
+++ b/arch/arm/mach-u300/include/mach/irqs.h
@@ -12,101 +12,101 @@
12#ifndef __MACH_IRQS_H 12#ifndef __MACH_IRQS_H
13#define __MACH_IRQS_H 13#define __MACH_IRQS_H
14 14
15#define IRQ_U300_INTCON0_START 0 15#define IRQ_U300_INTCON0_START 1
16#define IRQ_U300_INTCON1_START 32 16#define IRQ_U300_INTCON1_START 33
17/* These are on INTCON0 - 30 lines */ 17/* These are on INTCON0 - 30 lines */
18#define IRQ_U300_IRQ0_EXT 0 18#define IRQ_U300_IRQ0_EXT 1
19#define IRQ_U300_IRQ1_EXT 1 19#define IRQ_U300_IRQ1_EXT 2
20#define IRQ_U300_DMA 2 20#define IRQ_U300_DMA 3
21#define IRQ_U300_VIDEO_ENC_0 3 21#define IRQ_U300_VIDEO_ENC_0 4
22#define IRQ_U300_VIDEO_ENC_1 4 22#define IRQ_U300_VIDEO_ENC_1 5
23#define IRQ_U300_AAIF_RX 5 23#define IRQ_U300_AAIF_RX 6
24#define IRQ_U300_AAIF_TX 6 24#define IRQ_U300_AAIF_TX 7
25#define IRQ_U300_AAIF_VGPIO 7 25#define IRQ_U300_AAIF_VGPIO 8
26#define IRQ_U300_AAIF_WAKEUP 8 26#define IRQ_U300_AAIF_WAKEUP 9
27#define IRQ_U300_PCM_I2S0_FRAME 9 27#define IRQ_U300_PCM_I2S0_FRAME 10
28#define IRQ_U300_PCM_I2S0_FIFO 10 28#define IRQ_U300_PCM_I2S0_FIFO 11
29#define IRQ_U300_PCM_I2S1_FRAME 11 29#define IRQ_U300_PCM_I2S1_FRAME 12
30#define IRQ_U300_PCM_I2S1_FIFO 12 30#define IRQ_U300_PCM_I2S1_FIFO 13
31#define IRQ_U300_XGAM_GAMCON 13 31#define IRQ_U300_XGAM_GAMCON 14
32#define IRQ_U300_XGAM_CDI 14 32#define IRQ_U300_XGAM_CDI 15
33#define IRQ_U300_XGAM_CDICON 15 33#define IRQ_U300_XGAM_CDICON 16
34#if defined(CONFIG_MACH_U300_BS2X) || defined(CONFIG_MACH_U300_BS330) 34#if defined(CONFIG_MACH_U300_BS2X) || defined(CONFIG_MACH_U300_BS330)
35/* MMIACC not used on the DB3210 or DB3350 chips */ 35/* MMIACC not used on the DB3210 or DB3350 chips */
36#define IRQ_U300_XGAM_MMIACC 16 36#define IRQ_U300_XGAM_MMIACC 17
37#endif 37#endif
38#define IRQ_U300_XGAM_PDI 17 38#define IRQ_U300_XGAM_PDI 18
39#define IRQ_U300_XGAM_PDICON 18 39#define IRQ_U300_XGAM_PDICON 19
40#define IRQ_U300_XGAM_GAMEACC 19 40#define IRQ_U300_XGAM_GAMEACC 20
41#define IRQ_U300_XGAM_MCIDCT 20 41#define IRQ_U300_XGAM_MCIDCT 21
42#define IRQ_U300_APEX 21 42#define IRQ_U300_APEX 22
43#define IRQ_U300_UART0 22 43#define IRQ_U300_UART0 23
44#define IRQ_U300_SPI 23 44#define IRQ_U300_SPI 24
45#define IRQ_U300_TIMER_APP_OS 24 45#define IRQ_U300_TIMER_APP_OS 25
46#define IRQ_U300_TIMER_APP_DD 25 46#define IRQ_U300_TIMER_APP_DD 26
47#define IRQ_U300_TIMER_APP_GP1 26 47#define IRQ_U300_TIMER_APP_GP1 27
48#define IRQ_U300_TIMER_APP_GP2 27 48#define IRQ_U300_TIMER_APP_GP2 28
49#define IRQ_U300_TIMER_OS 28 49#define IRQ_U300_TIMER_OS 29
50#define IRQ_U300_TIMER_MS 29 50#define IRQ_U300_TIMER_MS 30
51#define IRQ_U300_KEYPAD_KEYBF 30 51#define IRQ_U300_KEYPAD_KEYBF 31
52#define IRQ_U300_KEYPAD_KEYBR 31 52#define IRQ_U300_KEYPAD_KEYBR 32
53/* These are on INTCON1 - 32 lines */ 53/* These are on INTCON1 - 32 lines */
54#define IRQ_U300_GPIO_PORT0 32 54#define IRQ_U300_GPIO_PORT0 33
55#define IRQ_U300_GPIO_PORT1 33 55#define IRQ_U300_GPIO_PORT1 34
56#define IRQ_U300_GPIO_PORT2 34 56#define IRQ_U300_GPIO_PORT2 35
57 57
58#if defined(CONFIG_MACH_U300_BS2X) || defined(CONFIG_MACH_U300_BS330) || \ 58#if defined(CONFIG_MACH_U300_BS2X) || defined(CONFIG_MACH_U300_BS330) || \
59 defined(CONFIG_MACH_U300_BS335) 59 defined(CONFIG_MACH_U300_BS335)
60/* These are for DB3150, DB3200 and DB3350 */ 60/* These are for DB3150, DB3200 and DB3350 */
61#define IRQ_U300_WDOG 35 61#define IRQ_U300_WDOG 36
62#define IRQ_U300_EVHIST 36 62#define IRQ_U300_EVHIST 37
63#define IRQ_U300_MSPRO 37 63#define IRQ_U300_MSPRO 38
64#define IRQ_U300_MMCSD_MCIINTR0 38 64#define IRQ_U300_MMCSD_MCIINTR0 39
65#define IRQ_U300_MMCSD_MCIINTR1 39 65#define IRQ_U300_MMCSD_MCIINTR1 40
66#define IRQ_U300_I2C0 40 66#define IRQ_U300_I2C0 41
67#define IRQ_U300_I2C1 41 67#define IRQ_U300_I2C1 42
68#define IRQ_U300_RTC 42 68#define IRQ_U300_RTC 43
69#define IRQ_U300_NFIF 43 69#define IRQ_U300_NFIF 44
70#define IRQ_U300_NFIF2 44 70#define IRQ_U300_NFIF2 45
71#endif 71#endif
72 72
73/* DB3150 and DB3200 have only 45 IRQs */ 73/* DB3150 and DB3200 have only 45 IRQs */
74#if defined(CONFIG_MACH_U300_BS2X) || defined(CONFIG_MACH_U300_BS330) 74#if defined(CONFIG_MACH_U300_BS2X) || defined(CONFIG_MACH_U300_BS330)
75#define U300_VIC_IRQS_END 45 75#define U300_VIC_IRQS_END 46
76#endif 76#endif
77 77
78/* The DB3350-specific interrupt lines */ 78/* The DB3350-specific interrupt lines */
79#ifdef CONFIG_MACH_U300_BS335 79#ifdef CONFIG_MACH_U300_BS335
80#define IRQ_U300_ISP_F0 45 80#define IRQ_U300_ISP_F0 46
81#define IRQ_U300_ISP_F1 46 81#define IRQ_U300_ISP_F1 47
82#define IRQ_U300_ISP_F2 47 82#define IRQ_U300_ISP_F2 48
83#define IRQ_U300_ISP_F3 48 83#define IRQ_U300_ISP_F3 49
84#define IRQ_U300_ISP_F4 49 84#define IRQ_U300_ISP_F4 50
85#define IRQ_U300_GPIO_PORT3 50 85#define IRQ_U300_GPIO_PORT3 51
86#define IRQ_U300_SYSCON_PLL_LOCK 51 86#define IRQ_U300_SYSCON_PLL_LOCK 52
87#define IRQ_U300_UART1 52 87#define IRQ_U300_UART1 53
88#define IRQ_U300_GPIO_PORT4 53 88#define IRQ_U300_GPIO_PORT4 54
89#define IRQ_U300_GPIO_PORT5 54 89#define IRQ_U300_GPIO_PORT5 55
90#define IRQ_U300_GPIO_PORT6 55 90#define IRQ_U300_GPIO_PORT6 56
91#define U300_VIC_IRQS_END 56 91#define U300_VIC_IRQS_END 57
92#endif 92#endif
93 93
94/* The DB3210-specific interrupt lines */ 94/* The DB3210-specific interrupt lines */
95#ifdef CONFIG_MACH_U300_BS365 95#ifdef CONFIG_MACH_U300_BS365
96#define IRQ_U300_GPIO_PORT3 35 96#define IRQ_U300_GPIO_PORT3 36
97#define IRQ_U300_GPIO_PORT4 36 97#define IRQ_U300_GPIO_PORT4 37
98#define IRQ_U300_WDOG 37 98#define IRQ_U300_WDOG 38
99#define IRQ_U300_EVHIST 38 99#define IRQ_U300_EVHIST 39
100#define IRQ_U300_MSPRO 39 100#define IRQ_U300_MSPRO 40
101#define IRQ_U300_MMCSD_MCIINTR0 40 101#define IRQ_U300_MMCSD_MCIINTR0 41
102#define IRQ_U300_MMCSD_MCIINTR1 41 102#define IRQ_U300_MMCSD_MCIINTR1 42
103#define IRQ_U300_I2C0 42 103#define IRQ_U300_I2C0 43
104#define IRQ_U300_I2C1 43 104#define IRQ_U300_I2C1 44
105#define IRQ_U300_RTC 44 105#define IRQ_U300_RTC 45
106#define IRQ_U300_NFIF 45 106#define IRQ_U300_NFIF 46
107#define IRQ_U300_NFIF2 46 107#define IRQ_U300_NFIF2 47
108#define IRQ_U300_SYSCON_PLL_LOCK 47 108#define IRQ_U300_SYSCON_PLL_LOCK 48
109#define U300_VIC_IRQS_END 48 109#define U300_VIC_IRQS_END 49
110#endif 110#endif
111 111
112/* Maximum 8*7 GPIO lines */ 112/* Maximum 8*7 GPIO lines */
@@ -117,6 +117,6 @@
117#define IRQ_U300_GPIO_END (U300_VIC_IRQS_END) 117#define IRQ_U300_GPIO_END (U300_VIC_IRQS_END)
118#endif 118#endif
119 119
120#define NR_IRQS (IRQ_U300_GPIO_END) 120#define NR_IRQS (IRQ_U300_GPIO_END - IRQ_U300_INTCON0_START)
121 121
122#endif 122#endif
diff --git a/arch/arm/mach-ux500/Kconfig b/arch/arm/mach-ux500/Kconfig
index 880d02ec89d4..ef7099eea0f2 100644
--- a/arch/arm/mach-ux500/Kconfig
+++ b/arch/arm/mach-ux500/Kconfig
@@ -17,6 +17,7 @@ config UX500_SOC_DB5500
17config UX500_SOC_DB8500 17config UX500_SOC_DB8500
18 bool 18 bool
19 select MFD_DB8500_PRCMU 19 select MFD_DB8500_PRCMU
20 select REGULATOR
20 select REGULATOR_DB8500_PRCMU 21 select REGULATOR_DB8500_PRCMU
21 select CPU_FREQ_TABLE if CPU_FREQ 22 select CPU_FREQ_TABLE if CPU_FREQ
22 23
diff --git a/arch/arm/mach-ux500/mbox-db5500.c b/arch/arm/mach-ux500/mbox-db5500.c
index 2b2d51caf9d8..0127490218cd 100644
--- a/arch/arm/mach-ux500/mbox-db5500.c
+++ b/arch/arm/mach-ux500/mbox-db5500.c
@@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct device *dev,
168 return sprintf(buf, "0x%X\n", mbox_value); 168 return sprintf(buf, "0x%X\n", mbox_value);
169} 169}
170 170
171static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo); 171static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
172 172
173static int mbox_show(struct seq_file *s, void *data) 173static int mbox_show(struct seq_file *s, void *data)
174{ 174{
diff --git a/arch/arm/mach-ux500/platsmp.c b/arch/arm/mach-ux500/platsmp.c
index d2058ef8345f..eff5842f6232 100644
--- a/arch/arm/mach-ux500/platsmp.c
+++ b/arch/arm/mach-ux500/platsmp.c
@@ -99,7 +99,7 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
99 */ 99 */
100 write_pen_release(cpu_logical_map(cpu)); 100 write_pen_release(cpu_logical_map(cpu));
101 101
102 gic_raise_softirq(cpumask_of(cpu), 1); 102 smp_send_reschedule(cpu);
103 103
104 timeout = jiffies + (1 * HZ); 104 timeout = jiffies + (1 * HZ);
105 while (time_before(jiffies, timeout)) { 105 while (time_before(jiffies, timeout)) {
diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S
index ff1f7cc11f87..80741992a9fc 100644
--- a/arch/arm/mm/abort-ev6.S
+++ b/arch/arm/mm/abort-ev6.S
@@ -26,18 +26,23 @@ ENTRY(v6_early_abort)
26 mrc p15, 0, r1, c5, c0, 0 @ get FSR 26 mrc p15, 0, r1, c5, c0, 0 @ get FSR
27 mrc p15, 0, r0, c6, c0, 0 @ get FAR 27 mrc p15, 0, r0, c6, c0, 0 @ get FAR
28/* 28/*
29 * Faulty SWP instruction on 1136 doesn't set bit 11 in DFSR (erratum 326103). 29 * Faulty SWP instruction on 1136 doesn't set bit 11 in DFSR.
30 * The test below covers all the write situations, including Java bytecodes
31 */ 30 */
32 bic r1, r1, #1 << 11 @ clear bit 11 of FSR 31#ifdef CONFIG_ARM_ERRATA_326103
32 ldr ip, =0x4107b36
33 mrc p15, 0, r3, c0, c0, 0 @ get processor id
34 teq ip, r3, lsr #4 @ r0 ARM1136?
35 bne do_DataAbort
33 tst r5, #PSR_J_BIT @ Java? 36 tst r5, #PSR_J_BIT @ Java?
37 tsteq r5, #PSR_T_BIT @ Thumb?
34 bne do_DataAbort 38 bne do_DataAbort
35 do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3 39 bic r1, r1, #1 << 11 @ clear bit 11 of FSR
36 ldreq r3, [r4] @ read aborted ARM instruction 40 ldr r3, [r4] @ read aborted ARM instruction
37#ifdef CONFIG_CPU_ENDIAN_BE8 41#ifdef CONFIG_CPU_ENDIAN_BE8
38 reveq r3, r3 42 rev r3, r3
39#endif 43#endif
40 do_ldrd_abort tmp=ip, insn=r3 44 do_ldrd_abort tmp=ip, insn=r3
41 tst r3, #1 << 20 @ L = 0 -> write 45 tst r3, #1 << 20 @ L = 0 -> write
42 orreq r1, r1, #1 << 11 @ yes. 46 orreq r1, r1, #1 << 11 @ yes.
47#endif
43 b do_DataAbort 48 b do_DataAbort
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index a53fd2aaa2f4..2a8e380501e8 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -32,6 +32,7 @@ static void __iomem *l2x0_base;
32static DEFINE_RAW_SPINLOCK(l2x0_lock); 32static DEFINE_RAW_SPINLOCK(l2x0_lock);
33static u32 l2x0_way_mask; /* Bitmask of active ways */ 33static u32 l2x0_way_mask; /* Bitmask of active ways */
34static u32 l2x0_size; 34static u32 l2x0_size;
35static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
35 36
36struct l2x0_regs l2x0_saved_regs; 37struct l2x0_regs l2x0_saved_regs;
37 38
@@ -61,12 +62,7 @@ static inline void cache_sync(void)
61{ 62{
62 void __iomem *base = l2x0_base; 63 void __iomem *base = l2x0_base;
63 64
64#ifdef CONFIG_PL310_ERRATA_753970 65 writel_relaxed(0, base + sync_reg_offset);
65 /* write to an unmmapped register */
66 writel_relaxed(0, base + L2X0_DUMMY_REG);
67#else
68 writel_relaxed(0, base + L2X0_CACHE_SYNC);
69#endif
70 cache_wait(base + L2X0_CACHE_SYNC, 1); 66 cache_wait(base + L2X0_CACHE_SYNC, 1);
71} 67}
72 68
@@ -85,10 +81,13 @@ static inline void l2x0_inv_line(unsigned long addr)
85} 81}
86 82
87#if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915) 83#if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
84static inline void debug_writel(unsigned long val)
85{
86 if (outer_cache.set_debug)
87 outer_cache.set_debug(val);
88}
88 89
89#define debug_writel(val) outer_cache.set_debug(val) 90static void pl310_set_debug(unsigned long val)
90
91static void l2x0_set_debug(unsigned long val)
92{ 91{
93 writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL); 92 writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL);
94} 93}
@@ -98,7 +97,7 @@ static inline void debug_writel(unsigned long val)
98{ 97{
99} 98}
100 99
101#define l2x0_set_debug NULL 100#define pl310_set_debug NULL
102#endif 101#endif
103 102
104#ifdef CONFIG_PL310_ERRATA_588369 103#ifdef CONFIG_PL310_ERRATA_588369
@@ -331,6 +330,11 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
331 else 330 else
332 ways = 8; 331 ways = 8;
333 type = "L310"; 332 type = "L310";
333#ifdef CONFIG_PL310_ERRATA_753970
334 /* Unmapped register. */
335 sync_reg_offset = L2X0_DUMMY_REG;
336#endif
337 outer_cache.set_debug = pl310_set_debug;
334 break; 338 break;
335 case L2X0_CACHE_ID_PART_L210: 339 case L2X0_CACHE_ID_PART_L210:
336 ways = (aux >> 13) & 0xf; 340 ways = (aux >> 13) & 0xf;
@@ -379,7 +383,6 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
379 outer_cache.flush_all = l2x0_flush_all; 383 outer_cache.flush_all = l2x0_flush_all;
380 outer_cache.inv_all = l2x0_inv_all; 384 outer_cache.inv_all = l2x0_inv_all;
381 outer_cache.disable = l2x0_disable; 385 outer_cache.disable = l2x0_disable;
382 outer_cache.set_debug = l2x0_set_debug;
383 386
384 printk(KERN_INFO "%s cache controller enabled\n", type); 387 printk(KERN_INFO "%s cache controller enabled\n", type);
385 printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n", 388 printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 595079fa9d1d..8f5813bbffb5 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -293,11 +293,11 @@ EXPORT_SYMBOL(pfn_valid);
293#endif 293#endif
294 294
295#ifndef CONFIG_SPARSEMEM 295#ifndef CONFIG_SPARSEMEM
296static void arm_memory_present(void) 296static void __init arm_memory_present(void)
297{ 297{
298} 298}
299#else 299#else
300static void arm_memory_present(void) 300static void __init arm_memory_present(void)
301{ 301{
302 struct memblock_region *reg; 302 struct memblock_region *reg;
303 303
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index b86f8933ff91..2c7cf2f9c837 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -618,8 +618,8 @@ static void __init alloc_init_section(pud_t *pud, unsigned long addr,
618 } 618 }
619} 619}
620 620
621static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end, 621static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
622 unsigned long phys, const struct mem_type *type) 622 unsigned long end, unsigned long phys, const struct mem_type *type)
623{ 623{
624 pud_t *pud = pud_offset(pgd, addr); 624 pud_t *pud = pud_offset(pgd, addr);
625 unsigned long next; 625 unsigned long next;
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c
index ecdb3da0dea9..c58d896cd5c3 100644
--- a/arch/arm/plat-omap/dma.c
+++ b/arch/arm/plat-omap/dma.c
@@ -916,6 +916,13 @@ void omap_start_dma(int lch)
916 l |= OMAP_DMA_CCR_BUFFERING_DISABLE; 916 l |= OMAP_DMA_CCR_BUFFERING_DISABLE;
917 l |= OMAP_DMA_CCR_EN; 917 l |= OMAP_DMA_CCR_EN;
918 918
919 /*
920 * As dma_write() uses IO accessors which are weakly ordered, there
921 * is no guarantee that data in coherent DMA memory will be visible
922 * to the DMA device. Add a memory barrier here to ensure that any
923 * such data is visible prior to enabling DMA.
924 */
925 mb();
919 p->dma_write(l, CCR, lch); 926 p->dma_write(l, CCR, lch);
920 927
921 dma_chan[lch].flags |= OMAP_DMA_ACTIVE; 928 dma_chan[lch].flags |= OMAP_DMA_ACTIVE;
@@ -965,6 +972,13 @@ void omap_stop_dma(int lch)
965 p->dma_write(l, CCR, lch); 972 p->dma_write(l, CCR, lch);
966 } 973 }
967 974
975 /*
976 * Ensure that data transferred by DMA is visible to any access
977 * after DMA has been disabled. This is important for coherent
978 * DMA regions.
979 */
980 mb();
981
968 if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) { 982 if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
969 int next_lch, cur_lch = lch; 983 int next_lch, cur_lch = lch;
970 char dma_chan_link_map[dma_lch_count]; 984 char dma_chan_link_map[dma_lch_count];
diff --git a/arch/arm/plat-omap/include/plat/omap_hwmod.h b/arch/arm/plat-omap/include/plat/omap_hwmod.h
index 8070145ccb98..3f26db4ee8e6 100644
--- a/arch/arm/plat-omap/include/plat/omap_hwmod.h
+++ b/arch/arm/plat-omap/include/plat/omap_hwmod.h
@@ -305,6 +305,7 @@ struct omap_hwmod_sysc_fields {
305 * @rev_offs: IP block revision register offset (from module base addr) 305 * @rev_offs: IP block revision register offset (from module base addr)
306 * @sysc_offs: OCP_SYSCONFIG register offset (from module base addr) 306 * @sysc_offs: OCP_SYSCONFIG register offset (from module base addr)
307 * @syss_offs: OCP_SYSSTATUS register offset (from module base addr) 307 * @syss_offs: OCP_SYSSTATUS register offset (from module base addr)
308 * @srst_udelay: Delay needed after doing a softreset in usecs
308 * @idlemodes: One or more of {SIDLE,MSTANDBY}_{OFF,FORCE,SMART} 309 * @idlemodes: One or more of {SIDLE,MSTANDBY}_{OFF,FORCE,SMART}
309 * @sysc_flags: SYS{C,S}_HAS* flags indicating SYSCONFIG bits supported 310 * @sysc_flags: SYS{C,S}_HAS* flags indicating SYSCONFIG bits supported
310 * @clockact: the default value of the module CLOCKACTIVITY bits 311 * @clockact: the default value of the module CLOCKACTIVITY bits
@@ -330,9 +331,10 @@ struct omap_hwmod_class_sysconfig {
330 u16 sysc_offs; 331 u16 sysc_offs;
331 u16 syss_offs; 332 u16 syss_offs;
332 u16 sysc_flags; 333 u16 sysc_flags;
334 struct omap_hwmod_sysc_fields *sysc_fields;
335 u8 srst_udelay;
333 u8 idlemodes; 336 u8 idlemodes;
334 u8 clockact; 337 u8 clockact;
335 struct omap_hwmod_sysc_fields *sysc_fields;
336}; 338};
337 339
338/** 340/**
diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
index eec98afa0f83..f9a8c5341ee9 100644
--- a/arch/arm/plat-omap/sram.c
+++ b/arch/arm/plat-omap/sram.c
@@ -348,7 +348,6 @@ u32 omap3_configure_core_dpll(u32 m2, u32 unlock_dll, u32 f, u32 inc,
348 sdrc_actim_ctrl_b_1, sdrc_mr_1); 348 sdrc_actim_ctrl_b_1, sdrc_mr_1);
349} 349}
350 350
351#ifdef CONFIG_PM
352void omap3_sram_restore_context(void) 351void omap3_sram_restore_context(void)
353{ 352{
354 omap_sram_ceil = omap_sram_base + omap_sram_size; 353 omap_sram_ceil = omap_sram_base + omap_sram_size;
@@ -358,17 +357,18 @@ void omap3_sram_restore_context(void)
358 omap3_sram_configure_core_dpll_sz); 357 omap3_sram_configure_core_dpll_sz);
359 omap_push_sram_idle(); 358 omap_push_sram_idle();
360} 359}
361#endif /* CONFIG_PM */
362
363#endif /* CONFIG_ARCH_OMAP3 */
364 360
365static inline int omap34xx_sram_init(void) 361static inline int omap34xx_sram_init(void)
366{ 362{
367#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_PM)
368 omap3_sram_restore_context(); 363 omap3_sram_restore_context();
369#endif
370 return 0; 364 return 0;
371} 365}
366#else
367static inline int omap34xx_sram_init(void)
368{
369 return 0;
370}
371#endif /* CONFIG_ARCH_OMAP3 */
372 372
373static inline int am33xx_sram_init(void) 373static inline int am33xx_sram_init(void)
374{ 374{
diff --git a/arch/arm/plat-samsung/include/plat/sdhci.h b/arch/arm/plat-samsung/include/plat/sdhci.h
index 317e246ffc56..e834c5ef437c 100644
--- a/arch/arm/plat-samsung/include/plat/sdhci.h
+++ b/arch/arm/plat-samsung/include/plat/sdhci.h
@@ -18,6 +18,8 @@
18#ifndef __PLAT_S3C_SDHCI_H 18#ifndef __PLAT_S3C_SDHCI_H
19#define __PLAT_S3C_SDHCI_H __FILE__ 19#define __PLAT_S3C_SDHCI_H __FILE__
20 20
21#include <plat/devs.h>
22
21struct platform_device; 23struct platform_device;
22struct mmc_host; 24struct mmc_host;
23struct mmc_card; 25struct mmc_card;
@@ -356,4 +358,30 @@ static inline void exynos4_default_sdhci3(void) { }
356 358
357#endif /* CONFIG_EXYNOS4_SETUP_SDHCI */ 359#endif /* CONFIG_EXYNOS4_SETUP_SDHCI */
358 360
361static inline void s3c_sdhci_setname(int id, char *name)
362{
363 switch (id) {
364#ifdef CONFIG_S3C_DEV_HSMMC
365 case 0:
366 s3c_device_hsmmc0.name = name;
367 break;
368#endif
369#ifdef CONFIG_S3C_DEV_HSMMC1
370 case 1:
371 s3c_device_hsmmc1.name = name;
372 break;
373#endif
374#ifdef CONFIG_S3C_DEV_HSMMC2
375 case 2:
376 s3c_device_hsmmc2.name = name;
377 break;
378#endif
379#ifdef CONFIG_S3C_DEV_HSMMC3
380 case 3:
381 s3c_device_hsmmc3.name = name;
382 break;
383#endif
384 }
385}
386
359#endif /* __PLAT_S3C_SDHCI_H */ 387#endif /* __PLAT_S3C_SDHCI_H */
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index 858748eaa144..bc683b8219b5 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -17,6 +17,8 @@
17#include <linux/sched.h> 17#include <linux/sched.h>
18#include <linux/smp.h> 18#include <linux/smp.h>
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/uaccess.h>
21#include <linux/user.h>
20 22
21#include <asm/cp15.h> 23#include <asm/cp15.h>
22#include <asm/cputype.h> 24#include <asm/cputype.h>
@@ -529,6 +531,103 @@ void vfp_flush_hwstate(struct thread_info *thread)
529} 531}
530 532
531/* 533/*
534 * Save the current VFP state into the provided structures and prepare
535 * for entry into a new function (signal handler).
536 */
537int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp,
538 struct user_vfp_exc __user *ufp_exc)
539{
540 struct thread_info *thread = current_thread_info();
541 struct vfp_hard_struct *hwstate = &thread->vfpstate.hard;
542 int err = 0;
543
544 /* Ensure that the saved hwstate is up-to-date. */
545 vfp_sync_hwstate(thread);
546
547 /*
548 * Copy the floating point registers. There can be unused
549 * registers see asm/hwcap.h for details.
550 */
551 err |= __copy_to_user(&ufp->fpregs, &hwstate->fpregs,
552 sizeof(hwstate->fpregs));
553 /*
554 * Copy the status and control register.
555 */
556 __put_user_error(hwstate->fpscr, &ufp->fpscr, err);
557
558 /*
559 * Copy the exception registers.
560 */
561 __put_user_error(hwstate->fpexc, &ufp_exc->fpexc, err);
562 __put_user_error(hwstate->fpinst, &ufp_exc->fpinst, err);
563 __put_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err);
564
565 if (err)
566 return -EFAULT;
567
568 /* Ensure that VFP is disabled. */
569 vfp_flush_hwstate(thread);
570
571 /*
572 * As per the PCS, clear the length and stride bits for function
573 * entry.
574 */
575 hwstate->fpscr &= ~(FPSCR_LENGTH_MASK | FPSCR_STRIDE_MASK);
576
577 /*
578 * Disable VFP in the hwstate so that we can detect if it gets
579 * used.
580 */
581 hwstate->fpexc &= ~FPEXC_EN;
582 return 0;
583}
584
585/* Sanitise and restore the current VFP state from the provided structures. */
586int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
587 struct user_vfp_exc __user *ufp_exc)
588{
589 struct thread_info *thread = current_thread_info();
590 struct vfp_hard_struct *hwstate = &thread->vfpstate.hard;
591 unsigned long fpexc;
592 int err = 0;
593
594 /*
595 * If VFP has been used, then disable it to avoid corrupting
596 * the new thread state.
597 */
598 if (hwstate->fpexc & FPEXC_EN)
599 vfp_flush_hwstate(thread);
600
601 /*
602 * Copy the floating point registers. There can be unused
603 * registers see asm/hwcap.h for details.
604 */
605 err |= __copy_from_user(&hwstate->fpregs, &ufp->fpregs,
606 sizeof(hwstate->fpregs));
607 /*
608 * Copy the status and control register.
609 */
610 __get_user_error(hwstate->fpscr, &ufp->fpscr, err);
611
612 /*
613 * Sanitise and restore the exception registers.
614 */
615 __get_user_error(fpexc, &ufp_exc->fpexc, err);
616
617 /* Ensure the VFP is enabled. */
618 fpexc |= FPEXC_EN;
619
620 /* Ensure FPINST2 is invalid and the exception flag is cleared. */
621 fpexc &= ~(FPEXC_EX | FPEXC_FP2V);
622 hwstate->fpexc = fpexc;
623
624 __get_user_error(hwstate->fpinst, &ufp_exc->fpinst, err);
625 __get_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err);
626
627 return err ? -EFAULT : 0;
628}
629
630/*
532 * VFP hardware can lose all context when a CPU goes offline. 631 * VFP hardware can lose all context when a CPU goes offline.
533 * As we will be running in SMP mode with CPU hotplug, we will save the 632 * As we will be running in SMP mode with CPU hotplug, we will save the
534 * hardware state at every thread switch. We clear our held state when 633 * hardware state at every thread switch. We clear our held state when
diff --git a/arch/blackfin/mach-bf538/boards/ezkit.c b/arch/blackfin/mach-bf538/boards/ezkit.c
index 1633a6f306c0..85038f54354d 100644
--- a/arch/blackfin/mach-bf538/boards/ezkit.c
+++ b/arch/blackfin/mach-bf538/boards/ezkit.c
@@ -38,7 +38,7 @@ static struct platform_device rtc_device = {
38 .name = "rtc-bfin", 38 .name = "rtc-bfin",
39 .id = -1, 39 .id = -1,
40}; 40};
41#endif 41#endif /* CONFIG_RTC_DRV_BFIN */
42 42
43#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 43#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
44#ifdef CONFIG_SERIAL_BFIN_UART0 44#ifdef CONFIG_SERIAL_BFIN_UART0
@@ -100,7 +100,7 @@ static struct platform_device bfin_uart0_device = {
100 .platform_data = &bfin_uart0_peripherals, /* Passed to driver */ 100 .platform_data = &bfin_uart0_peripherals, /* Passed to driver */
101 }, 101 },
102}; 102};
103#endif 103#endif /* CONFIG_SERIAL_BFIN_UART0 */
104#ifdef CONFIG_SERIAL_BFIN_UART1 104#ifdef CONFIG_SERIAL_BFIN_UART1
105static struct resource bfin_uart1_resources[] = { 105static struct resource bfin_uart1_resources[] = {
106 { 106 {
@@ -148,7 +148,7 @@ static struct platform_device bfin_uart1_device = {
148 .platform_data = &bfin_uart1_peripherals, /* Passed to driver */ 148 .platform_data = &bfin_uart1_peripherals, /* Passed to driver */
149 }, 149 },
150}; 150};
151#endif 151#endif /* CONFIG_SERIAL_BFIN_UART1 */
152#ifdef CONFIG_SERIAL_BFIN_UART2 152#ifdef CONFIG_SERIAL_BFIN_UART2
153static struct resource bfin_uart2_resources[] = { 153static struct resource bfin_uart2_resources[] = {
154 { 154 {
@@ -196,8 +196,8 @@ static struct platform_device bfin_uart2_device = {
196 .platform_data = &bfin_uart2_peripherals, /* Passed to driver */ 196 .platform_data = &bfin_uart2_peripherals, /* Passed to driver */
197 }, 197 },
198}; 198};
199#endif 199#endif /* CONFIG_SERIAL_BFIN_UART2 */
200#endif 200#endif /* CONFIG_SERIAL_BFIN */
201 201
202#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 202#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
203#ifdef CONFIG_BFIN_SIR0 203#ifdef CONFIG_BFIN_SIR0
@@ -224,7 +224,7 @@ static struct platform_device bfin_sir0_device = {
224 .num_resources = ARRAY_SIZE(bfin_sir0_resources), 224 .num_resources = ARRAY_SIZE(bfin_sir0_resources),
225 .resource = bfin_sir0_resources, 225 .resource = bfin_sir0_resources,
226}; 226};
227#endif 227#endif /* CONFIG_BFIN_SIR0 */
228#ifdef CONFIG_BFIN_SIR1 228#ifdef CONFIG_BFIN_SIR1
229static struct resource bfin_sir1_resources[] = { 229static struct resource bfin_sir1_resources[] = {
230 { 230 {
@@ -249,7 +249,7 @@ static struct platform_device bfin_sir1_device = {
249 .num_resources = ARRAY_SIZE(bfin_sir1_resources), 249 .num_resources = ARRAY_SIZE(bfin_sir1_resources),
250 .resource = bfin_sir1_resources, 250 .resource = bfin_sir1_resources,
251}; 251};
252#endif 252#endif /* CONFIG_BFIN_SIR1 */
253#ifdef CONFIG_BFIN_SIR2 253#ifdef CONFIG_BFIN_SIR2
254static struct resource bfin_sir2_resources[] = { 254static struct resource bfin_sir2_resources[] = {
255 { 255 {
@@ -274,8 +274,8 @@ static struct platform_device bfin_sir2_device = {
274 .num_resources = ARRAY_SIZE(bfin_sir2_resources), 274 .num_resources = ARRAY_SIZE(bfin_sir2_resources),
275 .resource = bfin_sir2_resources, 275 .resource = bfin_sir2_resources,
276}; 276};
277#endif 277#endif /* CONFIG_BFIN_SIR2 */
278#endif 278#endif /* CONFIG_BFIN_SIR */
279 279
280#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) 280#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
281#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART 281#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
@@ -311,7 +311,7 @@ static struct platform_device bfin_sport0_uart_device = {
311 .platform_data = &bfin_sport0_peripherals, /* Passed to driver */ 311 .platform_data = &bfin_sport0_peripherals, /* Passed to driver */
312 }, 312 },
313}; 313};
314#endif 314#endif /* CONFIG_SERIAL_BFIN_SPORT0_UART */
315#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART 315#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
316static struct resource bfin_sport1_uart_resources[] = { 316static struct resource bfin_sport1_uart_resources[] = {
317 { 317 {
@@ -345,7 +345,7 @@ static struct platform_device bfin_sport1_uart_device = {
345 .platform_data = &bfin_sport1_peripherals, /* Passed to driver */ 345 .platform_data = &bfin_sport1_peripherals, /* Passed to driver */
346 }, 346 },
347}; 347};
348#endif 348#endif /* CONFIG_SERIAL_BFIN_SPORT1_UART */
349#ifdef CONFIG_SERIAL_BFIN_SPORT2_UART 349#ifdef CONFIG_SERIAL_BFIN_SPORT2_UART
350static struct resource bfin_sport2_uart_resources[] = { 350static struct resource bfin_sport2_uart_resources[] = {
351 { 351 {
@@ -379,7 +379,7 @@ static struct platform_device bfin_sport2_uart_device = {
379 .platform_data = &bfin_sport2_peripherals, /* Passed to driver */ 379 .platform_data = &bfin_sport2_peripherals, /* Passed to driver */
380 }, 380 },
381}; 381};
382#endif 382#endif /* CONFIG_SERIAL_BFIN_SPORT2_UART */
383#ifdef CONFIG_SERIAL_BFIN_SPORT3_UART 383#ifdef CONFIG_SERIAL_BFIN_SPORT3_UART
384static struct resource bfin_sport3_uart_resources[] = { 384static struct resource bfin_sport3_uart_resources[] = {
385 { 385 {
@@ -413,8 +413,8 @@ static struct platform_device bfin_sport3_uart_device = {
413 .platform_data = &bfin_sport3_peripherals, /* Passed to driver */ 413 .platform_data = &bfin_sport3_peripherals, /* Passed to driver */
414 }, 414 },
415}; 415};
416#endif 416#endif /* CONFIG_SERIAL_BFIN_SPORT3_UART */
417#endif 417#endif /* CONFIG_SERIAL_BFIN_SPORT */
418 418
419#if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE) 419#if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE)
420static unsigned short bfin_can_peripherals[] = { 420static unsigned short bfin_can_peripherals[] = {
@@ -452,7 +452,7 @@ static struct platform_device bfin_can_device = {
452 .platform_data = &bfin_can_peripherals, /* Passed to driver */ 452 .platform_data = &bfin_can_peripherals, /* Passed to driver */
453 }, 453 },
454}; 454};
455#endif 455#endif /* CONFIG_CAN_BFIN */
456 456
457/* 457/*
458 * USB-LAN EzExtender board 458 * USB-LAN EzExtender board
@@ -488,7 +488,7 @@ static struct platform_device smc91x_device = {
488 .platform_data = &smc91x_info, 488 .platform_data = &smc91x_info,
489 }, 489 },
490}; 490};
491#endif 491#endif /* CONFIG_SMC91X */
492 492
493#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) 493#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
494/* all SPI peripherals info goes here */ 494/* all SPI peripherals info goes here */
@@ -518,7 +518,8 @@ static struct flash_platform_data bfin_spi_flash_data = {
518static struct bfin5xx_spi_chip spi_flash_chip_info = { 518static struct bfin5xx_spi_chip spi_flash_chip_info = {
519 .enable_dma = 0, /* use dma transfer with this chip*/ 519 .enable_dma = 0, /* use dma transfer with this chip*/
520}; 520};
521#endif 521#endif /* CONFIG_MTD_M25P80 */
522#endif /* CONFIG_SPI_BFIN5XX */
522 523
523#if defined(CONFIG_TOUCHSCREEN_AD7879) || defined(CONFIG_TOUCHSCREEN_AD7879_MODULE) 524#if defined(CONFIG_TOUCHSCREEN_AD7879) || defined(CONFIG_TOUCHSCREEN_AD7879_MODULE)
524#include <linux/spi/ad7879.h> 525#include <linux/spi/ad7879.h>
@@ -535,7 +536,7 @@ static const struct ad7879_platform_data bfin_ad7879_ts_info = {
535 .gpio_export = 1, /* Export GPIO to gpiolib */ 536 .gpio_export = 1, /* Export GPIO to gpiolib */
536 .gpio_base = -1, /* Dynamic allocation */ 537 .gpio_base = -1, /* Dynamic allocation */
537}; 538};
538#endif 539#endif /* CONFIG_TOUCHSCREEN_AD7879 */
539 540
540#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE) 541#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
541#include <asm/bfin-lq035q1.h> 542#include <asm/bfin-lq035q1.h>
@@ -564,7 +565,7 @@ static struct platform_device bfin_lq035q1_device = {
564 .platform_data = &bfin_lq035q1_data, 565 .platform_data = &bfin_lq035q1_data,
565 }, 566 },
566}; 567};
567#endif 568#endif /* CONFIG_FB_BFIN_LQ035Q1 */
568 569
569static struct spi_board_info bf538_spi_board_info[] __initdata = { 570static struct spi_board_info bf538_spi_board_info[] __initdata = {
570#if defined(CONFIG_MTD_M25P80) \ 571#if defined(CONFIG_MTD_M25P80) \
@@ -579,7 +580,7 @@ static struct spi_board_info bf538_spi_board_info[] __initdata = {
579 .controller_data = &spi_flash_chip_info, 580 .controller_data = &spi_flash_chip_info,
580 .mode = SPI_MODE_3, 581 .mode = SPI_MODE_3,
581 }, 582 },
582#endif 583#endif /* CONFIG_MTD_M25P80 */
583#if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE) 584#if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE)
584 { 585 {
585 .modalias = "ad7879", 586 .modalias = "ad7879",
@@ -590,7 +591,7 @@ static struct spi_board_info bf538_spi_board_info[] __initdata = {
590 .chip_select = 1, 591 .chip_select = 1,
591 .mode = SPI_CPHA | SPI_CPOL, 592 .mode = SPI_CPHA | SPI_CPOL,
592 }, 593 },
593#endif 594#endif /* CONFIG_TOUCHSCREEN_AD7879_SPI */
594#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE) 595#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
595 { 596 {
596 .modalias = "bfin-lq035q1-spi", 597 .modalias = "bfin-lq035q1-spi",
@@ -599,7 +600,7 @@ static struct spi_board_info bf538_spi_board_info[] __initdata = {
599 .chip_select = 2, 600 .chip_select = 2,
600 .mode = SPI_CPHA | SPI_CPOL, 601 .mode = SPI_CPHA | SPI_CPOL,
601 }, 602 },
602#endif 603#endif /* CONFIG_FB_BFIN_LQ035Q1 */
603#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE) 604#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
604 { 605 {
605 .modalias = "spidev", 606 .modalias = "spidev",
@@ -607,7 +608,7 @@ static struct spi_board_info bf538_spi_board_info[] __initdata = {
607 .bus_num = 0, 608 .bus_num = 0,
608 .chip_select = 1, 609 .chip_select = 1,
609 }, 610 },
610#endif 611#endif /* CONFIG_SPI_SPIDEV */
611}; 612};
612 613
613/* SPI (0) */ 614/* SPI (0) */
@@ -716,8 +717,6 @@ static struct platform_device bf538_spi_master2 = {
716 }, 717 },
717}; 718};
718 719
719#endif /* spi master and devices */
720
721#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) 720#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
722static struct resource bfin_twi0_resource[] = { 721static struct resource bfin_twi0_resource[] = {
723 [0] = { 722 [0] = {
@@ -759,8 +758,8 @@ static struct platform_device i2c_bfin_twi1_device = {
759 .num_resources = ARRAY_SIZE(bfin_twi1_resource), 758 .num_resources = ARRAY_SIZE(bfin_twi1_resource),
760 .resource = bfin_twi1_resource, 759 .resource = bfin_twi1_resource,
761}; 760};
762#endif 761#endif /* CONFIG_BF542 */
763#endif 762#endif /* CONFIG_I2C_BLACKFIN_TWI */
764 763
765#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) 764#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
766#include <linux/gpio_keys.h> 765#include <linux/gpio_keys.h>
diff --git a/arch/hexagon/kernel/dma.c b/arch/hexagon/kernel/dma.c
index 37302218ca4a..0f2367cc5493 100644
--- a/arch/hexagon/kernel/dma.c
+++ b/arch/hexagon/kernel/dma.c
@@ -22,6 +22,7 @@
22#include <linux/bootmem.h> 22#include <linux/bootmem.h>
23#include <linux/genalloc.h> 23#include <linux/genalloc.h>
24#include <asm/dma-mapping.h> 24#include <asm/dma-mapping.h>
25#include <linux/module.h>
25 26
26struct dma_map_ops *dma_ops; 27struct dma_map_ops *dma_ops;
27EXPORT_SYMBOL(dma_ops); 28EXPORT_SYMBOL(dma_ops);
diff --git a/arch/hexagon/kernel/process.c b/arch/hexagon/kernel/process.c
index 18c4f0b0f4ba..ff02821bfb7e 100644
--- a/arch/hexagon/kernel/process.c
+++ b/arch/hexagon/kernel/process.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Process creation support for Hexagon 2 * Process creation support for Hexagon
3 * 3 *
4 * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. 4 * Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and 7 * it under the terms of the GNU General Public License version 2 and
@@ -88,7 +88,7 @@ void (*idle_sleep)(void) = default_idle;
88void cpu_idle(void) 88void cpu_idle(void)
89{ 89{
90 while (1) { 90 while (1) {
91 tick_nohz_stop_sched_tick(1); 91 tick_nohz_idle_enter();
92 local_irq_disable(); 92 local_irq_disable();
93 while (!need_resched()) { 93 while (!need_resched()) {
94 idle_sleep(); 94 idle_sleep();
@@ -97,7 +97,7 @@ void cpu_idle(void)
97 local_irq_disable(); 97 local_irq_disable();
98 } 98 }
99 local_irq_enable(); 99 local_irq_enable();
100 tick_nohz_restart_sched_tick(); 100 tick_nohz_idle_exit();
101 schedule(); 101 schedule();
102 } 102 }
103} 103}
diff --git a/arch/hexagon/kernel/ptrace.c b/arch/hexagon/kernel/ptrace.c
index 32342de1a79c..96c3b2c4dbad 100644
--- a/arch/hexagon/kernel/ptrace.c
+++ b/arch/hexagon/kernel/ptrace.c
@@ -28,6 +28,7 @@
28#include <linux/ptrace.h> 28#include <linux/ptrace.h>
29#include <linux/regset.h> 29#include <linux/regset.h>
30#include <linux/user.h> 30#include <linux/user.h>
31#include <linux/elf.h>
31 32
32#include <asm/user.h> 33#include <asm/user.h>
33 34
diff --git a/arch/hexagon/kernel/smp.c b/arch/hexagon/kernel/smp.c
index 9b44a9e2d05a..1298141874a3 100644
--- a/arch/hexagon/kernel/smp.c
+++ b/arch/hexagon/kernel/smp.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * SMP support for Hexagon 2 * SMP support for Hexagon
3 * 3 *
4 * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. 4 * Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and 7 * it under the terms of the GNU General Public License version 2 and
@@ -28,6 +28,7 @@
28#include <linux/sched.h> 28#include <linux/sched.h>
29#include <linux/smp.h> 29#include <linux/smp.h>
30#include <linux/spinlock.h> 30#include <linux/spinlock.h>
31#include <linux/cpu.h>
31 32
32#include <asm/time.h> /* timer_interrupt */ 33#include <asm/time.h> /* timer_interrupt */
33#include <asm/hexagon_vm.h> 34#include <asm/hexagon_vm.h>
@@ -177,7 +178,12 @@ void __cpuinit start_secondary(void)
177 178
178 printk(KERN_INFO "%s cpu %d\n", __func__, current_thread_info()->cpu); 179 printk(KERN_INFO "%s cpu %d\n", __func__, current_thread_info()->cpu);
179 180
181 notify_cpu_starting(cpu);
182
183 ipi_call_lock();
180 set_cpu_online(cpu, true); 184 set_cpu_online(cpu, true);
185 ipi_call_unlock();
186
181 local_irq_enable(); 187 local_irq_enable();
182 188
183 cpu_idle(); 189 cpu_idle();
diff --git a/arch/hexagon/kernel/time.c b/arch/hexagon/kernel/time.c
index 6bee15c9c113..5d9b33b67935 100644
--- a/arch/hexagon/kernel/time.c
+++ b/arch/hexagon/kernel/time.c
@@ -28,6 +28,7 @@
28#include <linux/of.h> 28#include <linux/of.h>
29#include <linux/of_address.h> 29#include <linux/of_address.h>
30#include <linux/of_irq.h> 30#include <linux/of_irq.h>
31#include <linux/module.h>
31 32
32#include <asm/timer-regs.h> 33#include <asm/timer-regs.h>
33#include <asm/hexagon_vm.h> 34#include <asm/hexagon_vm.h>
diff --git a/arch/hexagon/kernel/vdso.c b/arch/hexagon/kernel/vdso.c
index f212a453b527..5d39f42f7085 100644
--- a/arch/hexagon/kernel/vdso.c
+++ b/arch/hexagon/kernel/vdso.c
@@ -21,6 +21,7 @@
21#include <linux/err.h> 21#include <linux/err.h>
22#include <linux/mm.h> 22#include <linux/mm.h>
23#include <linux/vmalloc.h> 23#include <linux/vmalloc.h>
24#include <linux/binfmts.h>
24 25
25#include <asm/vdso.h> 26#include <asm/vdso.h>
26 27
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 9d0fd7d5bb82..f00ba025375d 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -604,12 +604,6 @@ pfm_unprotect_ctx_ctxsw(pfm_context_t *x, unsigned long f)
604 spin_unlock(&(x)->ctx_lock); 604 spin_unlock(&(x)->ctx_lock);
605} 605}
606 606
607static inline unsigned int
608pfm_do_munmap(struct mm_struct *mm, unsigned long addr, size_t len, int acct)
609{
610 return do_munmap(mm, addr, len);
611}
612
613static inline unsigned long 607static inline unsigned long
614pfm_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags, unsigned long exec) 608pfm_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags, unsigned long exec)
615{ 609{
@@ -1458,8 +1452,9 @@ pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu)
1458 * a PROTECT_CTX() section. 1452 * a PROTECT_CTX() section.
1459 */ 1453 */
1460static int 1454static int
1461pfm_remove_smpl_mapping(struct task_struct *task, void *vaddr, unsigned long size) 1455pfm_remove_smpl_mapping(void *vaddr, unsigned long size)
1462{ 1456{
1457 struct task_struct *task = current;
1463 int r; 1458 int r;
1464 1459
1465 /* sanity checks */ 1460 /* sanity checks */
@@ -1473,13 +1468,8 @@ pfm_remove_smpl_mapping(struct task_struct *task, void *vaddr, unsigned long siz
1473 /* 1468 /*
1474 * does the actual unmapping 1469 * does the actual unmapping
1475 */ 1470 */
1476 down_write(&task->mm->mmap_sem); 1471 r = vm_munmap((unsigned long)vaddr, size);
1477 1472
1478 DPRINT(("down_write done smpl_vaddr=%p size=%lu\n", vaddr, size));
1479
1480 r = pfm_do_munmap(task->mm, (unsigned long)vaddr, size, 0);
1481
1482 up_write(&task->mm->mmap_sem);
1483 if (r !=0) { 1473 if (r !=0) {
1484 printk(KERN_ERR "perfmon: [%d] unable to unmap sampling buffer @%p size=%lu\n", task_pid_nr(task), vaddr, size); 1474 printk(KERN_ERR "perfmon: [%d] unable to unmap sampling buffer @%p size=%lu\n", task_pid_nr(task), vaddr, size);
1485 } 1475 }
@@ -1945,7 +1935,7 @@ pfm_flush(struct file *filp, fl_owner_t id)
1945 * because some VM function reenables interrupts. 1935 * because some VM function reenables interrupts.
1946 * 1936 *
1947 */ 1937 */
1948 if (smpl_buf_vaddr) pfm_remove_smpl_mapping(current, smpl_buf_vaddr, smpl_buf_size); 1938 if (smpl_buf_vaddr) pfm_remove_smpl_mapping(smpl_buf_vaddr, smpl_buf_size);
1949 1939
1950 return 0; 1940 return 0;
1951} 1941}
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index f5104b7c52cd..463fb3bbe11e 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -1174,7 +1174,7 @@ out:
1174 1174
1175bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) 1175bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
1176{ 1176{
1177 return irqchip_in_kernel(vcpu->kcm) == (vcpu->arch.apic != NULL); 1177 return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL);
1178} 1178}
1179 1179
1180int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) 1180int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
diff --git a/arch/m68k/configs/m5275evb_defconfig b/arch/m68k/configs/m5275evb_defconfig
index 33c32aeca12b..a1230e82bb1e 100644
--- a/arch/m68k/configs/m5275evb_defconfig
+++ b/arch/m68k/configs/m5275evb_defconfig
@@ -49,7 +49,6 @@ CONFIG_BLK_DEV_RAM=y
49CONFIG_NETDEVICES=y 49CONFIG_NETDEVICES=y
50CONFIG_NET_ETHERNET=y 50CONFIG_NET_ETHERNET=y
51CONFIG_FEC=y 51CONFIG_FEC=y
52CONFIG_FEC2=y
53# CONFIG_NETDEV_1000 is not set 52# CONFIG_NETDEV_1000 is not set
54# CONFIG_NETDEV_10000 is not set 53# CONFIG_NETDEV_10000 is not set
55CONFIG_PPP=y 54CONFIG_PPP=y
diff --git a/arch/m68k/platform/520x/config.c b/arch/m68k/platform/520x/config.c
index 235947844f27..09df4b89e8be 100644
--- a/arch/m68k/platform/520x/config.c
+++ b/arch/m68k/platform/520x/config.c
@@ -22,7 +22,7 @@
22 22
23/***************************************************************************/ 23/***************************************************************************/
24 24
25#ifdef CONFIG_SPI_COLDFIRE_QSPI 25#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
26 26
27static void __init m520x_qspi_init(void) 27static void __init m520x_qspi_init(void)
28{ 28{
@@ -35,7 +35,7 @@ static void __init m520x_qspi_init(void)
35 writew(par, MCF_GPIO_PAR_UART); 35 writew(par, MCF_GPIO_PAR_UART);
36} 36}
37 37
38#endif /* CONFIG_SPI_COLDFIRE_QSPI */ 38#endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */
39 39
40/***************************************************************************/ 40/***************************************************************************/
41 41
@@ -79,7 +79,7 @@ void __init config_BSP(char *commandp, int size)
79 mach_sched_init = hw_timer_init; 79 mach_sched_init = hw_timer_init;
80 m520x_uarts_init(); 80 m520x_uarts_init();
81 m520x_fec_init(); 81 m520x_fec_init();
82#ifdef CONFIG_SPI_COLDFIRE_QSPI 82#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
83 m520x_qspi_init(); 83 m520x_qspi_init();
84#endif 84#endif
85} 85}
diff --git a/arch/m68k/platform/523x/config.c b/arch/m68k/platform/523x/config.c
index c8b405d5a961..d47dfd8f50a2 100644
--- a/arch/m68k/platform/523x/config.c
+++ b/arch/m68k/platform/523x/config.c
@@ -22,7 +22,7 @@
22 22
23/***************************************************************************/ 23/***************************************************************************/
24 24
25#ifdef CONFIG_SPI_COLDFIRE_QSPI 25#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
26 26
27static void __init m523x_qspi_init(void) 27static void __init m523x_qspi_init(void)
28{ 28{
@@ -36,7 +36,7 @@ static void __init m523x_qspi_init(void)
36 writew(par, MCFGPIO_PAR_TIMER); 36 writew(par, MCFGPIO_PAR_TIMER);
37} 37}
38 38
39#endif /* CONFIG_SPI_COLDFIRE_QSPI */ 39#endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */
40 40
41/***************************************************************************/ 41/***************************************************************************/
42 42
@@ -58,7 +58,7 @@ void __init config_BSP(char *commandp, int size)
58{ 58{
59 mach_sched_init = hw_timer_init; 59 mach_sched_init = hw_timer_init;
60 m523x_fec_init(); 60 m523x_fec_init();
61#ifdef CONFIG_SPI_COLDFIRE_QSPI 61#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
62 m523x_qspi_init(); 62 m523x_qspi_init();
63#endif 63#endif
64} 64}
diff --git a/arch/m68k/platform/5249/config.c b/arch/m68k/platform/5249/config.c
index bbf05135bb98..300e729a58d0 100644
--- a/arch/m68k/platform/5249/config.c
+++ b/arch/m68k/platform/5249/config.c
@@ -51,7 +51,7 @@ static struct platform_device *m5249_devices[] __initdata = {
51 51
52/***************************************************************************/ 52/***************************************************************************/
53 53
54#ifdef CONFIG_SPI_COLDFIRE_QSPI 54#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
55 55
56static void __init m5249_qspi_init(void) 56static void __init m5249_qspi_init(void)
57{ 57{
@@ -61,7 +61,7 @@ static void __init m5249_qspi_init(void)
61 mcf_mapirq2imr(MCF_IRQ_QSPI, MCFINTC_QSPI); 61 mcf_mapirq2imr(MCF_IRQ_QSPI, MCFINTC_QSPI);
62} 62}
63 63
64#endif /* CONFIG_SPI_COLDFIRE_QSPI */ 64#endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */
65 65
66/***************************************************************************/ 66/***************************************************************************/
67 67
@@ -90,7 +90,7 @@ void __init config_BSP(char *commandp, int size)
90#ifdef CONFIG_M5249C3 90#ifdef CONFIG_M5249C3
91 m5249_smc91x_init(); 91 m5249_smc91x_init();
92#endif 92#endif
93#ifdef CONFIG_SPI_COLDFIRE_QSPI 93#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
94 m5249_qspi_init(); 94 m5249_qspi_init();
95#endif 95#endif
96} 96}
diff --git a/arch/m68k/platform/527x/config.c b/arch/m68k/platform/527x/config.c
index 7ed848c3b848..b3cb378c5e94 100644
--- a/arch/m68k/platform/527x/config.c
+++ b/arch/m68k/platform/527x/config.c
@@ -23,7 +23,7 @@
23 23
24/***************************************************************************/ 24/***************************************************************************/
25 25
26#ifdef CONFIG_SPI_COLDFIRE_QSPI 26#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
27 27
28static void __init m527x_qspi_init(void) 28static void __init m527x_qspi_init(void)
29{ 29{
@@ -42,7 +42,7 @@ static void __init m527x_qspi_init(void)
42#endif 42#endif
43} 43}
44 44
45#endif /* CONFIG_SPI_COLDFIRE_QSPI */ 45#endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */
46 46
47/***************************************************************************/ 47/***************************************************************************/
48 48
@@ -74,9 +74,7 @@ static void __init m527x_fec_init(void)
74 writew(par | 0xf00, MCF_IPSBAR + 0x100082); 74 writew(par | 0xf00, MCF_IPSBAR + 0x100082);
75 v = readb(MCF_IPSBAR + 0x100078); 75 v = readb(MCF_IPSBAR + 0x100078);
76 writeb(v | 0xc0, MCF_IPSBAR + 0x100078); 76 writeb(v | 0xc0, MCF_IPSBAR + 0x100078);
77#endif
78 77
79#ifdef CONFIG_FEC2
80 /* Set multi-function pins to ethernet mode for fec1 */ 78 /* Set multi-function pins to ethernet mode for fec1 */
81 par = readw(MCF_IPSBAR + 0x100082); 79 par = readw(MCF_IPSBAR + 0x100082);
82 writew(par | 0xa0, MCF_IPSBAR + 0x100082); 80 writew(par | 0xa0, MCF_IPSBAR + 0x100082);
@@ -92,7 +90,7 @@ void __init config_BSP(char *commandp, int size)
92 mach_sched_init = hw_timer_init; 90 mach_sched_init = hw_timer_init;
93 m527x_uarts_init(); 91 m527x_uarts_init();
94 m527x_fec_init(); 92 m527x_fec_init();
95#ifdef CONFIG_SPI_COLDFIRE_QSPI 93#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
96 m527x_qspi_init(); 94 m527x_qspi_init();
97#endif 95#endif
98} 96}
diff --git a/arch/m68k/platform/528x/config.c b/arch/m68k/platform/528x/config.c
index d4492926614c..c5f11ba49be5 100644
--- a/arch/m68k/platform/528x/config.c
+++ b/arch/m68k/platform/528x/config.c
@@ -24,7 +24,7 @@
24 24
25/***************************************************************************/ 25/***************************************************************************/
26 26
27#ifdef CONFIG_SPI_COLDFIRE_QSPI 27#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
28 28
29static void __init m528x_qspi_init(void) 29static void __init m528x_qspi_init(void)
30{ 30{
@@ -32,7 +32,7 @@ static void __init m528x_qspi_init(void)
32 __raw_writeb(0x07, MCFGPIO_PQSPAR); 32 __raw_writeb(0x07, MCFGPIO_PQSPAR);
33} 33}
34 34
35#endif /* CONFIG_SPI_COLDFIRE_QSPI */ 35#endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */
36 36
37/***************************************************************************/ 37/***************************************************************************/
38 38
@@ -98,7 +98,7 @@ void __init config_BSP(char *commandp, int size)
98 mach_sched_init = hw_timer_init; 98 mach_sched_init = hw_timer_init;
99 m528x_uarts_init(); 99 m528x_uarts_init();
100 m528x_fec_init(); 100 m528x_fec_init();
101#ifdef CONFIG_SPI_COLDFIRE_QSPI 101#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
102 m528x_qspi_init(); 102 m528x_qspi_init();
103#endif 103#endif
104} 104}
diff --git a/arch/m68k/platform/532x/config.c b/arch/m68k/platform/532x/config.c
index 2bec3477b739..37082d02f2bd 100644
--- a/arch/m68k/platform/532x/config.c
+++ b/arch/m68k/platform/532x/config.c
@@ -30,7 +30,7 @@
30 30
31/***************************************************************************/ 31/***************************************************************************/
32 32
33#ifdef CONFIG_SPI_COLDFIRE_QSPI 33#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
34 34
35static void __init m532x_qspi_init(void) 35static void __init m532x_qspi_init(void)
36{ 36{
@@ -38,7 +38,7 @@ static void __init m532x_qspi_init(void)
38 writew(0x01f0, MCF_GPIO_PAR_QSPI); 38 writew(0x01f0, MCF_GPIO_PAR_QSPI);
39} 39}
40 40
41#endif /* CONFIG_SPI_COLDFIRE_QSPI */ 41#endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */
42 42
43/***************************************************************************/ 43/***************************************************************************/
44 44
@@ -77,7 +77,7 @@ void __init config_BSP(char *commandp, int size)
77 mach_sched_init = hw_timer_init; 77 mach_sched_init = hw_timer_init;
78 m532x_uarts_init(); 78 m532x_uarts_init();
79 m532x_fec_init(); 79 m532x_fec_init();
80#ifdef CONFIG_SPI_COLDFIRE_QSPI 80#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
81 m532x_qspi_init(); 81 m532x_qspi_init();
82#endif 82#endif
83 83
diff --git a/arch/m68k/platform/68EZ328/Makefile b/arch/m68k/platform/68EZ328/Makefile
index ee97735a242c..b44d799b1115 100644
--- a/arch/m68k/platform/68EZ328/Makefile
+++ b/arch/m68k/platform/68EZ328/Makefile
@@ -3,9 +3,3 @@
3# 3#
4 4
5obj-y := config.o 5obj-y := config.o
6
7extra-y := bootlogo.rh
8
9$(obj)/bootlogo.rh: $(src)/bootlogo.h
10 perl $(src)/../68328/bootlogo.pl < $(src)/bootlogo.h \
11 > $(obj)/bootlogo.rh
diff --git a/arch/m68k/platform/68VZ328/Makefile b/arch/m68k/platform/68VZ328/Makefile
index 447ffa0fd7c7..a49d75e65489 100644
--- a/arch/m68k/platform/68VZ328/Makefile
+++ b/arch/m68k/platform/68VZ328/Makefile
@@ -3,14 +3,9 @@
3# 3#
4 4
5obj-y := config.o 5obj-y := config.o
6logo-$(UCDIMM) := bootlogo.rh 6extra-$(DRAGEN2):= screen.h
7logo-$(DRAGEN2) := screen.h
8extra-y := $(logo-y)
9
10$(obj)/bootlogo.rh: $(src)/../68EZ328/bootlogo.h
11 perl $(src)/bootlogo.pl < $(src)/../68328/bootlogo.h > $(obj)/bootlogo.rh
12 7
13$(obj)/screen.h: $(src)/screen.xbm $(src)/xbm2lcd.pl 8$(obj)/screen.h: $(src)/screen.xbm $(src)/xbm2lcd.pl
14 perl $(src)/xbm2lcd.pl < $(src)/screen.xbm > $(obj)/screen.h 9 perl $(src)/xbm2lcd.pl < $(src)/screen.xbm > $(obj)/screen.h
15 10
16clean-files := $(obj)/screen.h $(obj)/bootlogo.rh 11clean-files := $(obj)/screen.h
diff --git a/arch/m68k/platform/68EZ328/bootlogo.h b/arch/m68k/platform/68VZ328/bootlogo.h
index e842bdae5839..b38e2b255142 100644
--- a/arch/m68k/platform/68EZ328/bootlogo.h
+++ b/arch/m68k/platform/68VZ328/bootlogo.h
@@ -1,6 +1,6 @@
1#define splash_width 640 1#define splash_width 640
2#define splash_height 480 2#define splash_height 480
3static unsigned char splash_bits[] = { 3unsigned char __attribute__ ((aligned(16))) bootlogo_bits[] = {
4 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 4 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
5 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 5 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
6 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 6 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
diff --git a/arch/m68k/platform/coldfire/device.c b/arch/m68k/platform/coldfire/device.c
index fa50c48292ff..3aa77ddea89d 100644
--- a/arch/m68k/platform/coldfire/device.c
+++ b/arch/m68k/platform/coldfire/device.c
@@ -114,14 +114,14 @@ static struct resource mcf_fec1_resources[] = {
114 114
115static struct platform_device mcf_fec1 = { 115static struct platform_device mcf_fec1 = {
116 .name = "fec", 116 .name = "fec",
117 .id = 0, 117 .id = 1,
118 .num_resources = ARRAY_SIZE(mcf_fec1_resources), 118 .num_resources = ARRAY_SIZE(mcf_fec1_resources),
119 .resource = mcf_fec1_resources, 119 .resource = mcf_fec1_resources,
120}; 120};
121#endif /* MCFFEC_BASE1 */ 121#endif /* MCFFEC_BASE1 */
122#endif /* CONFIG_FEC */ 122#endif /* CONFIG_FEC */
123 123
124#ifdef CONFIG_SPI_COLDFIRE_QSPI 124#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
125/* 125/*
126 * The ColdFire QSPI module is an SPI protocol hardware block used 126 * The ColdFire QSPI module is an SPI protocol hardware block used
127 * on a number of different ColdFire CPUs. 127 * on a number of different ColdFire CPUs.
@@ -274,7 +274,7 @@ static struct platform_device mcf_qspi = {
274 .resource = mcf_qspi_resources, 274 .resource = mcf_qspi_resources,
275 .dev.platform_data = &mcf_qspi_data, 275 .dev.platform_data = &mcf_qspi_data,
276}; 276};
277#endif /* CONFIG_SPI_COLDFIRE_QSPI */ 277#endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */
278 278
279static struct platform_device *mcf_devices[] __initdata = { 279static struct platform_device *mcf_devices[] __initdata = {
280 &mcf_uart, 280 &mcf_uart,
@@ -284,7 +284,7 @@ static struct platform_device *mcf_devices[] __initdata = {
284 &mcf_fec1, 284 &mcf_fec1,
285#endif 285#endif
286#endif 286#endif
287#ifdef CONFIG_SPI_COLDFIRE_QSPI 287#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
288 &mcf_qspi, 288 &mcf_qspi,
289#endif 289#endif
290}; 290};
diff --git a/arch/mips/ath79/dev-wmac.c b/arch/mips/ath79/dev-wmac.c
index e21507052066..9c717bf98ffe 100644
--- a/arch/mips/ath79/dev-wmac.c
+++ b/arch/mips/ath79/dev-wmac.c
@@ -58,8 +58,8 @@ static void __init ar913x_wmac_setup(void)
58 58
59static int ar933x_wmac_reset(void) 59static int ar933x_wmac_reset(void)
60{ 60{
61 ath79_device_reset_clear(AR933X_RESET_WMAC);
62 ath79_device_reset_set(AR933X_RESET_WMAC); 61 ath79_device_reset_set(AR933X_RESET_WMAC);
62 ath79_device_reset_clear(AR933X_RESET_WMAC);
63 63
64 return 0; 64 return 0;
65} 65}
diff --git a/arch/mips/include/asm/mach-jz4740/irq.h b/arch/mips/include/asm/mach-jz4740/irq.h
index a865c983c70a..5ad1a9c113c6 100644
--- a/arch/mips/include/asm/mach-jz4740/irq.h
+++ b/arch/mips/include/asm/mach-jz4740/irq.h
@@ -45,7 +45,7 @@
45#define JZ4740_IRQ_LCD JZ4740_IRQ(30) 45#define JZ4740_IRQ_LCD JZ4740_IRQ(30)
46 46
47/* 2nd-level interrupts */ 47/* 2nd-level interrupts */
48#define JZ4740_IRQ_DMA(x) (JZ4740_IRQ(32) + (X)) 48#define JZ4740_IRQ_DMA(x) (JZ4740_IRQ(32) + (x))
49 49
50#define JZ4740_IRQ_INTC_GPIO(x) (JZ4740_IRQ_GPIO0 - (x)) 50#define JZ4740_IRQ_INTC_GPIO(x) (JZ4740_IRQ_GPIO0 - (x))
51#define JZ4740_IRQ_GPIO(x) (JZ4740_IRQ(48) + (x)) 51#define JZ4740_IRQ_GPIO(x) (JZ4740_IRQ(48) + (x))
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h
index 73c0d45798de..9b02cfba7449 100644
--- a/arch/mips/include/asm/mmu_context.h
+++ b/arch/mips/include/asm/mmu_context.h
@@ -37,12 +37,6 @@ extern void tlbmiss_handler_setup_pgd(unsigned long pgd);
37 write_c0_xcontext((unsigned long) smp_processor_id() << 51); \ 37 write_c0_xcontext((unsigned long) smp_processor_id() << 51); \
38 } while (0) 38 } while (0)
39 39
40
41static inline unsigned long get_current_pgd(void)
42{
43 return PHYS_TO_XKSEG_CACHED((read_c0_context() >> 11) & ~0xfffUL);
44}
45
46#else /* CONFIG_MIPS_PGD_C0_CONTEXT: using pgd_current*/ 40#else /* CONFIG_MIPS_PGD_C0_CONTEXT: using pgd_current*/
47 41
48/* 42/*
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index 185ca00c4c84..d5a338a1739c 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -257,11 +257,8 @@ asmlinkage int sys_sigsuspend(nabi_no_regargs struct pt_regs regs)
257 return -EFAULT; 257 return -EFAULT;
258 sigdelsetmask(&newset, ~_BLOCKABLE); 258 sigdelsetmask(&newset, ~_BLOCKABLE);
259 259
260 spin_lock_irq(&current->sighand->siglock);
261 current->saved_sigmask = current->blocked; 260 current->saved_sigmask = current->blocked;
262 current->blocked = newset; 261 set_current_blocked(&newset);
263 recalc_sigpending();
264 spin_unlock_irq(&current->sighand->siglock);
265 262
266 current->state = TASK_INTERRUPTIBLE; 263 current->state = TASK_INTERRUPTIBLE;
267 schedule(); 264 schedule();
@@ -286,11 +283,8 @@ asmlinkage int sys_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
286 return -EFAULT; 283 return -EFAULT;
287 sigdelsetmask(&newset, ~_BLOCKABLE); 284 sigdelsetmask(&newset, ~_BLOCKABLE);
288 285
289 spin_lock_irq(&current->sighand->siglock);
290 current->saved_sigmask = current->blocked; 286 current->saved_sigmask = current->blocked;
291 current->blocked = newset; 287 set_current_blocked(&newset);
292 recalc_sigpending();
293 spin_unlock_irq(&current->sighand->siglock);
294 288
295 current->state = TASK_INTERRUPTIBLE; 289 current->state = TASK_INTERRUPTIBLE;
296 schedule(); 290 schedule();
@@ -362,10 +356,7 @@ asmlinkage void sys_sigreturn(nabi_no_regargs struct pt_regs regs)
362 goto badframe; 356 goto badframe;
363 357
364 sigdelsetmask(&blocked, ~_BLOCKABLE); 358 sigdelsetmask(&blocked, ~_BLOCKABLE);
365 spin_lock_irq(&current->sighand->siglock); 359 set_current_blocked(&blocked);
366 current->blocked = blocked;
367 recalc_sigpending();
368 spin_unlock_irq(&current->sighand->siglock);
369 360
370 sig = restore_sigcontext(&regs, &frame->sf_sc); 361 sig = restore_sigcontext(&regs, &frame->sf_sc);
371 if (sig < 0) 362 if (sig < 0)
@@ -401,10 +392,7 @@ asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
401 goto badframe; 392 goto badframe;
402 393
403 sigdelsetmask(&set, ~_BLOCKABLE); 394 sigdelsetmask(&set, ~_BLOCKABLE);
404 spin_lock_irq(&current->sighand->siglock); 395 set_current_blocked(&set);
405 current->blocked = set;
406 recalc_sigpending();
407 spin_unlock_irq(&current->sighand->siglock);
408 396
409 sig = restore_sigcontext(&regs, &frame->rs_uc.uc_mcontext); 397 sig = restore_sigcontext(&regs, &frame->rs_uc.uc_mcontext);
410 if (sig < 0) 398 if (sig < 0)
@@ -580,12 +568,7 @@ static int handle_signal(unsigned long sig, siginfo_t *info,
580 if (ret) 568 if (ret)
581 return ret; 569 return ret;
582 570
583 spin_lock_irq(&current->sighand->siglock); 571 block_sigmask(ka, sig);
584 sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask);
585 if (!(ka->sa.sa_flags & SA_NODEFER))
586 sigaddset(&current->blocked, sig);
587 recalc_sigpending();
588 spin_unlock_irq(&current->sighand->siglock);
589 572
590 return ret; 573 return ret;
591} 574}
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index 06b5da392e24..ac3b8d89aae5 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -290,11 +290,8 @@ asmlinkage int sys32_sigsuspend(nabi_no_regargs struct pt_regs regs)
290 return -EFAULT; 290 return -EFAULT;
291 sigdelsetmask(&newset, ~_BLOCKABLE); 291 sigdelsetmask(&newset, ~_BLOCKABLE);
292 292
293 spin_lock_irq(&current->sighand->siglock);
294 current->saved_sigmask = current->blocked; 293 current->saved_sigmask = current->blocked;
295 current->blocked = newset; 294 set_current_blocked(&newset);
296 recalc_sigpending();
297 spin_unlock_irq(&current->sighand->siglock);
298 295
299 current->state = TASK_INTERRUPTIBLE; 296 current->state = TASK_INTERRUPTIBLE;
300 schedule(); 297 schedule();
@@ -318,11 +315,8 @@ asmlinkage int sys32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
318 return -EFAULT; 315 return -EFAULT;
319 sigdelsetmask(&newset, ~_BLOCKABLE); 316 sigdelsetmask(&newset, ~_BLOCKABLE);
320 317
321 spin_lock_irq(&current->sighand->siglock);
322 current->saved_sigmask = current->blocked; 318 current->saved_sigmask = current->blocked;
323 current->blocked = newset; 319 set_current_blocked(&newset);
324 recalc_sigpending();
325 spin_unlock_irq(&current->sighand->siglock);
326 320
327 current->state = TASK_INTERRUPTIBLE; 321 current->state = TASK_INTERRUPTIBLE;
328 schedule(); 322 schedule();
@@ -488,10 +482,7 @@ asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs)
488 goto badframe; 482 goto badframe;
489 483
490 sigdelsetmask(&blocked, ~_BLOCKABLE); 484 sigdelsetmask(&blocked, ~_BLOCKABLE);
491 spin_lock_irq(&current->sighand->siglock); 485 set_current_blocked(&blocked);
492 current->blocked = blocked;
493 recalc_sigpending();
494 spin_unlock_irq(&current->sighand->siglock);
495 486
496 sig = restore_sigcontext32(&regs, &frame->sf_sc); 487 sig = restore_sigcontext32(&regs, &frame->sf_sc);
497 if (sig < 0) 488 if (sig < 0)
@@ -529,10 +520,7 @@ asmlinkage void sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
529 goto badframe; 520 goto badframe;
530 521
531 sigdelsetmask(&set, ~_BLOCKABLE); 522 sigdelsetmask(&set, ~_BLOCKABLE);
532 spin_lock_irq(&current->sighand->siglock); 523 set_current_blocked(&set);
533 current->blocked = set;
534 recalc_sigpending();
535 spin_unlock_irq(&current->sighand->siglock);
536 524
537 sig = restore_sigcontext32(&regs, &frame->rs_uc.uc_mcontext); 525 sig = restore_sigcontext32(&regs, &frame->rs_uc.uc_mcontext);
538 if (sig < 0) 526 if (sig < 0)
diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c
index ae29e894ab8d..86eb4b04631c 100644
--- a/arch/mips/kernel/signal_n32.c
+++ b/arch/mips/kernel/signal_n32.c
@@ -93,11 +93,8 @@ asmlinkage int sysn32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
93 sigset_from_compat(&newset, &uset); 93 sigset_from_compat(&newset, &uset);
94 sigdelsetmask(&newset, ~_BLOCKABLE); 94 sigdelsetmask(&newset, ~_BLOCKABLE);
95 95
96 spin_lock_irq(&current->sighand->siglock);
97 current->saved_sigmask = current->blocked; 96 current->saved_sigmask = current->blocked;
98 current->blocked = newset; 97 set_current_blocked(&newset);
99 recalc_sigpending();
100 spin_unlock_irq(&current->sighand->siglock);
101 98
102 current->state = TASK_INTERRUPTIBLE; 99 current->state = TASK_INTERRUPTIBLE;
103 schedule(); 100 schedule();
@@ -121,10 +118,7 @@ asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
121 goto badframe; 118 goto badframe;
122 119
123 sigdelsetmask(&set, ~_BLOCKABLE); 120 sigdelsetmask(&set, ~_BLOCKABLE);
124 spin_lock_irq(&current->sighand->siglock); 121 set_current_blocked(&set);
125 current->blocked = set;
126 recalc_sigpending();
127 spin_unlock_irq(&current->sighand->siglock);
128 122
129 sig = restore_sigcontext(&regs, &frame->rs_uc.uc_mcontext); 123 sig = restore_sigcontext(&regs, &frame->rs_uc.uc_mcontext);
130 if (sig < 0) 124 if (sig < 0)
diff --git a/arch/parisc/include/asm/hardware.h b/arch/parisc/include/asm/hardware.h
index 4e9626836bab..d1d864b81bae 100644
--- a/arch/parisc/include/asm/hardware.h
+++ b/arch/parisc/include/asm/hardware.h
@@ -2,7 +2,6 @@
2#define _PARISC_HARDWARE_H 2#define _PARISC_HARDWARE_H
3 3
4#include <linux/mod_devicetable.h> 4#include <linux/mod_devicetable.h>
5#include <asm/pdc.h>
6 5
7#define HWTYPE_ANY_ID PA_HWTYPE_ANY_ID 6#define HWTYPE_ANY_ID PA_HWTYPE_ANY_ID
8#define HVERSION_ANY_ID PA_HVERSION_ANY_ID 7#define HVERSION_ANY_ID PA_HVERSION_ANY_ID
@@ -95,12 +94,14 @@ struct bc_module {
95#define HPHW_MC 15 94#define HPHW_MC 15
96#define HPHW_FAULTY 31 95#define HPHW_FAULTY 31
97 96
97struct parisc_device_id;
98 98
99/* hardware.c: */ 99/* hardware.c: */
100extern const char *parisc_hardware_description(struct parisc_device_id *id); 100extern const char *parisc_hardware_description(struct parisc_device_id *id);
101extern enum cpu_type parisc_get_cpu_type(unsigned long hversion); 101extern enum cpu_type parisc_get_cpu_type(unsigned long hversion);
102 102
103struct pci_dev; 103struct pci_dev;
104struct hardware_path;
104 105
105/* drivers.c: */ 106/* drivers.c: */
106extern struct parisc_device *alloc_pa_dev(unsigned long hpa, 107extern struct parisc_device *alloc_pa_dev(unsigned long hpa,
diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h
index a84cc1f925f6..4e0e7dbf0f3f 100644
--- a/arch/parisc/include/asm/page.h
+++ b/arch/parisc/include/asm/page.h
@@ -160,5 +160,11 @@ extern int npmem_ranges;
160 160
161#include <asm-generic/memory_model.h> 161#include <asm-generic/memory_model.h>
162#include <asm-generic/getorder.h> 162#include <asm-generic/getorder.h>
163#include <asm/pdc.h>
164
165#define PAGE0 ((struct zeropage *)__PAGE_OFFSET)
166
167/* DEFINITION OF THE ZERO-PAGE (PAG0) */
168/* based on work by Jason Eckhardt (jason@equator.com) */
163 169
164#endif /* _PARISC_PAGE_H */ 170#endif /* _PARISC_PAGE_H */
diff --git a/arch/parisc/include/asm/pdc.h b/arch/parisc/include/asm/pdc.h
index 4ca510b3c6f8..7f0f2d23059d 100644
--- a/arch/parisc/include/asm/pdc.h
+++ b/arch/parisc/include/asm/pdc.h
@@ -343,8 +343,6 @@
343 343
344#ifdef __KERNEL__ 344#ifdef __KERNEL__
345 345
346#include <asm/page.h> /* for __PAGE_OFFSET */
347
348extern int pdc_type; 346extern int pdc_type;
349 347
350/* Values for pdc_type */ 348/* Values for pdc_type */
@@ -677,11 +675,6 @@ static inline char * os_id_to_string(u16 os_id) {
677 675
678#endif /* __KERNEL__ */ 676#endif /* __KERNEL__ */
679 677
680#define PAGE0 ((struct zeropage *)__PAGE_OFFSET)
681
682/* DEFINITION OF THE ZERO-PAGE (PAG0) */
683/* based on work by Jason Eckhardt (jason@equator.com) */
684
685/* flags of the device_path */ 678/* flags of the device_path */
686#define PF_AUTOBOOT 0x80 679#define PF_AUTOBOOT 0x80
687#define PF_AUTOSEARCH 0x40 680#define PF_AUTOSEARCH 0x40
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index 22dadeb58695..ee99f2339356 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -44,6 +44,8 @@ struct vm_area_struct;
44 44
45#endif /* !__ASSEMBLY__ */ 45#endif /* !__ASSEMBLY__ */
46 46
47#include <asm/page.h>
48
47#define pte_ERROR(e) \ 49#define pte_ERROR(e) \
48 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) 50 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
49#define pmd_ERROR(e) \ 51#define pmd_ERROR(e) \
diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h
index 804aa28ab1d6..3516e0b27044 100644
--- a/arch/parisc/include/asm/spinlock.h
+++ b/arch/parisc/include/asm/spinlock.h
@@ -1,6 +1,8 @@
1#ifndef __ASM_SPINLOCK_H 1#ifndef __ASM_SPINLOCK_H
2#define __ASM_SPINLOCK_H 2#define __ASM_SPINLOCK_H
3 3
4#include <asm/barrier.h>
5#include <asm/ldcw.h>
4#include <asm/processor.h> 6#include <asm/processor.h>
5#include <asm/spinlock_types.h> 7#include <asm/spinlock_types.h>
6 8
diff --git a/arch/parisc/kernel/pdc_cons.c b/arch/parisc/kernel/pdc_cons.c
index 4f004596a6e7..47341aa208f2 100644
--- a/arch/parisc/kernel/pdc_cons.c
+++ b/arch/parisc/kernel/pdc_cons.c
@@ -50,6 +50,7 @@
50#include <linux/init.h> 50#include <linux/init.h>
51#include <linux/major.h> 51#include <linux/major.h>
52#include <linux/tty.h> 52#include <linux/tty.h>
53#include <asm/page.h> /* for PAGE0 */
53#include <asm/pdc.h> /* for iodc_call() proto and friends */ 54#include <asm/pdc.h> /* for iodc_call() proto and friends */
54 55
55static DEFINE_SPINLOCK(pdc_console_lock); 56static DEFINE_SPINLOCK(pdc_console_lock);
@@ -104,7 +105,7 @@ static int pdc_console_tty_open(struct tty_struct *tty, struct file *filp)
104 105
105static void pdc_console_tty_close(struct tty_struct *tty, struct file *filp) 106static void pdc_console_tty_close(struct tty_struct *tty, struct file *filp)
106{ 107{
107 if (!tty->count) { 108 if (tty->count == 1) {
108 del_timer_sync(&pdc_console_timer); 109 del_timer_sync(&pdc_console_timer);
109 tty_port_tty_set(&tty_port, NULL); 110 tty_port_tty_set(&tty_port, NULL);
110 } 111 }
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
index 7c0774397b89..70e105d62423 100644
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -29,6 +29,7 @@
29#include <asm/uaccess.h> 29#include <asm/uaccess.h>
30#include <asm/io.h> 30#include <asm/io.h>
31#include <asm/irq.h> 31#include <asm/irq.h>
32#include <asm/page.h>
32#include <asm/param.h> 33#include <asm/param.h>
33#include <asm/pdc.h> 34#include <asm/pdc.h>
34#include <asm/led.h> 35#include <asm/led.h>
diff --git a/arch/powerpc/boot/dts/fsl/pq3-mpic-message-B.dtsi b/arch/powerpc/boot/dts/fsl/pq3-mpic-message-B.dtsi
new file mode 100644
index 000000000000..1cf0b77b1efe
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-mpic-message-B.dtsi
@@ -0,0 +1,43 @@
1/*
2 * PQ3 MPIC Message (Group B) device tree stub [ controller @ offset 0x42400 ]
3 *
4 * Copyright 2012 Freescale Semiconductor Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * * Neither the name of Freescale Semiconductor nor the
14 * names of its contributors may be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") as published by the Free Software
20 * Foundation, either version 2 of that License or (at your option) any
21 * later version.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
24 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35message@42400 {
36 compatible = "fsl,mpic-v3.1-msgr";
37 reg = <0x42400 0x200>;
38 interrupts = <
39 0xb4 2 0 0
40 0xb5 2 0 0
41 0xb6 2 0 0
42 0xb7 2 0 0>;
43};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-mpic.dtsi b/arch/powerpc/boot/dts/fsl/pq3-mpic.dtsi
index fdedf7b1fe0f..71c30eb10056 100644
--- a/arch/powerpc/boot/dts/fsl/pq3-mpic.dtsi
+++ b/arch/powerpc/boot/dts/fsl/pq3-mpic.dtsi
@@ -53,6 +53,16 @@ timer@41100 {
53 3 0 3 0>; 53 3 0 3 0>;
54}; 54};
55 55
56message@41400 {
57 compatible = "fsl,mpic-v3.1-msgr";
58 reg = <0x41400 0x200>;
59 interrupts = <
60 0xb0 2 0 0
61 0xb1 2 0 0
62 0xb2 2 0 0
63 0xb3 2 0 0>;
64};
65
56msi@41600 { 66msi@41600 {
57 compatible = "fsl,mpic-msi"; 67 compatible = "fsl,mpic-msi";
58 reg = <0x41600 0x80>; 68 reg = <0x41600 0x80>;
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index 548da3aa0a30..d58fc4e4149c 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -288,13 +288,6 @@ label##_hv: \
288/* Exception addition: Hard disable interrupts */ 288/* Exception addition: Hard disable interrupts */
289#define DISABLE_INTS SOFT_DISABLE_INTS(r10,r11) 289#define DISABLE_INTS SOFT_DISABLE_INTS(r10,r11)
290 290
291/* Exception addition: Keep interrupt state */
292#define ENABLE_INTS \
293 ld r11,PACAKMSR(r13); \
294 ld r12,_MSR(r1); \
295 rlwimi r11,r12,0,MSR_EE; \
296 mtmsrd r11,1
297
298#define ADD_NVGPRS \ 291#define ADD_NVGPRS \
299 bl .save_nvgprs 292 bl .save_nvgprs
300 293
diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h
index e648af92ced1..0e40843a1c6e 100644
--- a/arch/powerpc/include/asm/irq.h
+++ b/arch/powerpc/include/asm/irq.h
@@ -18,10 +18,6 @@
18#include <linux/atomic.h> 18#include <linux/atomic.h>
19 19
20 20
21/* Define a way to iterate across irqs. */
22#define for_each_irq(i) \
23 for ((i) = 0; (i) < NR_IRQS; ++(i))
24
25extern atomic_t ppc_n_lost_interrupts; 21extern atomic_t ppc_n_lost_interrupts;
26 22
27/* This number is used when no interrupt has been assigned */ 23/* This number is used when no interrupt has been assigned */
diff --git a/arch/powerpc/include/asm/mpic.h b/arch/powerpc/include/asm/mpic.h
index c65b9294376e..c9f698a994be 100644
--- a/arch/powerpc/include/asm/mpic.h
+++ b/arch/powerpc/include/asm/mpic.h
@@ -275,9 +275,6 @@ struct mpic
275 unsigned int isu_mask; 275 unsigned int isu_mask;
276 /* Number of sources */ 276 /* Number of sources */
277 unsigned int num_sources; 277 unsigned int num_sources;
278 /* default senses array */
279 unsigned char *senses;
280 unsigned int senses_count;
281 278
282 /* vector numbers used for internal sources (ipi/timers) */ 279 /* vector numbers used for internal sources (ipi/timers) */
283 unsigned int ipi_vecs[4]; 280 unsigned int ipi_vecs[4];
@@ -415,21 +412,6 @@ extern struct mpic *mpic_alloc(struct device_node *node,
415extern void mpic_assign_isu(struct mpic *mpic, unsigned int isu_num, 412extern void mpic_assign_isu(struct mpic *mpic, unsigned int isu_num,
416 phys_addr_t phys_addr); 413 phys_addr_t phys_addr);
417 414
418/* Set default sense codes
419 *
420 * @mpic: controller
421 * @senses: array of sense codes
422 * @count: size of above array
423 *
424 * Optionally provide an array (indexed on hardware interrupt numbers
425 * for this MPIC) of default sense codes for the chip. Those are linux
426 * sense codes IRQ_TYPE_*
427 *
428 * The driver gets ownership of the pointer, don't dispose of it or
429 * anything like that. __init only.
430 */
431extern void mpic_set_default_senses(struct mpic *mpic, u8 *senses, int count);
432
433 415
434/* Initialize the controller. After this has been called, none of the above 416/* Initialize the controller. After this has been called, none of the above
435 * should be called again for this mpic 417 * should be called again for this mpic
diff --git a/arch/powerpc/include/asm/mpic_msgr.h b/arch/powerpc/include/asm/mpic_msgr.h
index 3ec37dc9003e..326d33ca55cd 100644
--- a/arch/powerpc/include/asm/mpic_msgr.h
+++ b/arch/powerpc/include/asm/mpic_msgr.h
@@ -13,6 +13,7 @@
13 13
14#include <linux/types.h> 14#include <linux/types.h>
15#include <linux/spinlock.h> 15#include <linux/spinlock.h>
16#include <asm/smp.h>
16 17
17struct mpic_msgr { 18struct mpic_msgr {
18 u32 __iomem *base; 19 u32 __iomem *base;
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index b86faa9107da..8a97aa7289d3 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -15,11 +15,6 @@
15#ifndef __ASM_POWERPC_REG_BOOKE_H__ 15#ifndef __ASM_POWERPC_REG_BOOKE_H__
16#define __ASM_POWERPC_REG_BOOKE_H__ 16#define __ASM_POWERPC_REG_BOOKE_H__
17 17
18#ifdef CONFIG_BOOKE_WDT
19extern u32 booke_wdt_enabled;
20extern u32 booke_wdt_period;
21#endif /* CONFIG_BOOKE_WDT */
22
23/* Machine State Register (MSR) Fields */ 18/* Machine State Register (MSR) Fields */
24#define MSR_GS (1<<28) /* Guest state */ 19#define MSR_GS (1<<28) /* Guest state */
25#define MSR_UCLE (1<<26) /* User-mode cache lock enable */ 20#define MSR_UCLE (1<<26) /* User-mode cache lock enable */
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index f8a7a1a1a9f4..ef2074c3e906 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -588,23 +588,19 @@ _GLOBAL(ret_from_except_lite)
588fast_exc_return_irq: 588fast_exc_return_irq:
589restore: 589restore:
590 /* 590 /*
591 * This is the main kernel exit path, we first check if we 591 * This is the main kernel exit path. First we check if we
592 * have to change our interrupt state. 592 * are about to re-enable interrupts
593 */ 593 */
594 ld r5,SOFTE(r1) 594 ld r5,SOFTE(r1)
595 lbz r6,PACASOFTIRQEN(r13) 595 lbz r6,PACASOFTIRQEN(r13)
596 cmpwi cr1,r5,0 596 cmpwi cr0,r5,0
597 cmpw cr0,r5,r6 597 beq restore_irq_off
598 beq cr0,4f
599 598
600 /* We do, handle disable first, which is easy */ 599 /* We are enabling, were we already enabled ? Yes, just return */
601 bne cr1,3f; 600 cmpwi cr0,r6,1
602 li r0,0 601 beq cr0,do_restore
603 stb r0,PACASOFTIRQEN(r13);
604 TRACE_DISABLE_INTS
605 b 4f
606 602
6073: /* 603 /*
608 * We are about to soft-enable interrupts (we are hard disabled 604 * We are about to soft-enable interrupts (we are hard disabled
609 * at this point). We check if there's anything that needs to 605 * at this point). We check if there's anything that needs to
610 * be replayed first. 606 * be replayed first.
@@ -626,7 +622,7 @@ restore_no_replay:
626 /* 622 /*
627 * Final return path. BookE is handled in a different file 623 * Final return path. BookE is handled in a different file
628 */ 624 */
6294: 625do_restore:
630#ifdef CONFIG_PPC_BOOK3E 626#ifdef CONFIG_PPC_BOOK3E
631 b .exception_return_book3e 627 b .exception_return_book3e
632#else 628#else
@@ -700,6 +696,25 @@ fast_exception_return:
700#endif /* CONFIG_PPC_BOOK3E */ 696#endif /* CONFIG_PPC_BOOK3E */
701 697
702 /* 698 /*
699 * We are returning to a context with interrupts soft disabled.
700 *
701 * However, we may also about to hard enable, so we need to
702 * make sure that in this case, we also clear PACA_IRQ_HARD_DIS
703 * or that bit can get out of sync and bad things will happen
704 */
705restore_irq_off:
706 ld r3,_MSR(r1)
707 lbz r7,PACAIRQHAPPENED(r13)
708 andi. r0,r3,MSR_EE
709 beq 1f
710 rlwinm r7,r7,0,~PACA_IRQ_HARD_DIS
711 stb r7,PACAIRQHAPPENED(r13)
7121: li r0,0
713 stb r0,PACASOFTIRQEN(r13);
714 TRACE_DISABLE_INTS
715 b do_restore
716
717 /*
703 * Something did happen, check if a re-emit is needed 718 * Something did happen, check if a re-emit is needed
704 * (this also clears paca->irq_happened) 719 * (this also clears paca->irq_happened)
705 */ 720 */
@@ -748,6 +763,9 @@ restore_check_irq_replay:
748#endif /* CONFIG_PPC_BOOK3E */ 763#endif /* CONFIG_PPC_BOOK3E */
7491: b .ret_from_except /* What else to do here ? */ 7641: b .ret_from_except /* What else to do here ? */
750 765
766
767
7683:
751do_work: 769do_work:
752#ifdef CONFIG_PREEMPT 770#ifdef CONFIG_PREEMPT
753 andi. r0,r3,MSR_PR /* Returning to user mode? */ 771 andi. r0,r3,MSR_PR /* Returning to user mode? */
@@ -767,16 +785,6 @@ do_work:
767 SOFT_DISABLE_INTS(r3,r4) 785 SOFT_DISABLE_INTS(r3,r4)
7681: bl .preempt_schedule_irq 7861: bl .preempt_schedule_irq
769 787
770 /* Hard-disable interrupts again (and update PACA) */
771#ifdef CONFIG_PPC_BOOK3E
772 wrteei 0
773#else
774 ld r10,PACAKMSR(r13) /* Get kernel MSR without EE */
775 mtmsrd r10,1
776#endif /* CONFIG_PPC_BOOK3E */
777 li r0,PACA_IRQ_HARD_DIS
778 stb r0,PACAIRQHAPPENED(r13)
779
780 /* Re-test flags and eventually loop */ 788 /* Re-test flags and eventually loop */
781 clrrdi r9,r1,THREAD_SHIFT 789 clrrdi r9,r1,THREAD_SHIFT
782 ld r4,TI_FLAGS(r9) 790 ld r4,TI_FLAGS(r9)
@@ -787,14 +795,6 @@ do_work:
787user_work: 795user_work:
788#endif /* CONFIG_PREEMPT */ 796#endif /* CONFIG_PREEMPT */
789 797
790 /* Enable interrupts */
791#ifdef CONFIG_PPC_BOOK3E
792 wrteei 1
793#else
794 ori r10,r10,MSR_EE
795 mtmsrd r10,1
796#endif /* CONFIG_PPC_BOOK3E */
797
798 andi. r0,r4,_TIF_NEED_RESCHED 798 andi. r0,r4,_TIF_NEED_RESCHED
799 beq 1f 799 beq 1f
800 bl .restore_interrupts 800 bl .restore_interrupts
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index cb705fdbb458..8f880bc77c56 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -768,8 +768,8 @@ alignment_common:
768 std r3,_DAR(r1) 768 std r3,_DAR(r1)
769 std r4,_DSISR(r1) 769 std r4,_DSISR(r1)
770 bl .save_nvgprs 770 bl .save_nvgprs
771 DISABLE_INTS
771 addi r3,r1,STACK_FRAME_OVERHEAD 772 addi r3,r1,STACK_FRAME_OVERHEAD
772 ENABLE_INTS
773 bl .alignment_exception 773 bl .alignment_exception
774 b .ret_from_except 774 b .ret_from_except
775 775
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 5ec1b2354ca6..641da9e868ce 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -229,6 +229,19 @@ notrace void arch_local_irq_restore(unsigned long en)
229 */ 229 */
230 if (unlikely(irq_happened != PACA_IRQ_HARD_DIS)) 230 if (unlikely(irq_happened != PACA_IRQ_HARD_DIS))
231 __hard_irq_disable(); 231 __hard_irq_disable();
232#ifdef CONFIG_TRACE_IRQFLAG
233 else {
234 /*
235 * We should already be hard disabled here. We had bugs
236 * where that wasn't the case so let's dbl check it and
237 * warn if we are wrong. Only do that when IRQ tracing
238 * is enabled as mfmsr() can be costly.
239 */
240 if (WARN_ON(mfmsr() & MSR_EE))
241 __hard_irq_disable();
242 }
243#endif /* CONFIG_TRACE_IRQFLAG */
244
232 set_soft_enabled(0); 245 set_soft_enabled(0);
233 246
234 /* 247 /*
@@ -260,11 +273,17 @@ EXPORT_SYMBOL(arch_local_irq_restore);
260 * if they are currently disabled. This is typically called before 273 * if they are currently disabled. This is typically called before
261 * schedule() or do_signal() when returning to userspace. We do it 274 * schedule() or do_signal() when returning to userspace. We do it
262 * in C to avoid the burden of dealing with lockdep etc... 275 * in C to avoid the burden of dealing with lockdep etc...
276 *
277 * NOTE: This is called with interrupts hard disabled but not marked
278 * as such in paca->irq_happened, so we need to resync this.
263 */ 279 */
264void restore_interrupts(void) 280void restore_interrupts(void)
265{ 281{
266 if (irqs_disabled()) 282 if (irqs_disabled()) {
283 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
267 local_irq_enable(); 284 local_irq_enable();
285 } else
286 __hard_irq_enable();
268} 287}
269 288
270#endif /* CONFIG_PPC64 */ 289#endif /* CONFIG_PPC64 */
@@ -330,14 +349,10 @@ void migrate_irqs(void)
330 349
331 alloc_cpumask_var(&mask, GFP_KERNEL); 350 alloc_cpumask_var(&mask, GFP_KERNEL);
332 351
333 for_each_irq(irq) { 352 for_each_irq_desc(irq, desc) {
334 struct irq_data *data; 353 struct irq_data *data;
335 struct irq_chip *chip; 354 struct irq_chip *chip;
336 355
337 desc = irq_to_desc(irq);
338 if (!desc)
339 continue;
340
341 data = irq_desc_get_irq_data(desc); 356 data = irq_desc_get_irq_data(desc);
342 if (irqd_is_per_cpu(data)) 357 if (irqd_is_per_cpu(data))
343 continue; 358 continue;
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c
index c957b1202bdc..5df777794403 100644
--- a/arch/powerpc/kernel/machine_kexec.c
+++ b/arch/powerpc/kernel/machine_kexec.c
@@ -23,14 +23,11 @@
23 23
24void machine_kexec_mask_interrupts(void) { 24void machine_kexec_mask_interrupts(void) {
25 unsigned int i; 25 unsigned int i;
26 struct irq_desc *desc;
26 27
27 for_each_irq(i) { 28 for_each_irq_desc(i, desc) {
28 struct irq_desc *desc = irq_to_desc(i);
29 struct irq_chip *chip; 29 struct irq_chip *chip;
30 30
31 if (!desc)
32 continue;
33
34 chip = irq_desc_get_chip(desc); 31 chip = irq_desc_get_chip(desc);
35 if (!chip) 32 if (!chip)
36 continue; 33 continue;
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 9825f29d1faf..ec8a53fa9e8f 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -150,6 +150,9 @@ notrace void __init machine_init(u64 dt_ptr)
150} 150}
151 151
152#ifdef CONFIG_BOOKE_WDT 152#ifdef CONFIG_BOOKE_WDT
153extern u32 booke_wdt_enabled;
154extern u32 booke_wdt_period;
155
153/* Checks wdt=x and wdt_period=xx command-line option */ 156/* Checks wdt=x and wdt_period=xx command-line option */
154notrace int __init early_parse_wdt(char *p) 157notrace int __init early_parse_wdt(char *p)
155{ 158{
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 6aa0c663e247..158972341a2d 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -248,7 +248,7 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
248 addr, regs->nip, regs->link, code); 248 addr, regs->nip, regs->link, code);
249 } 249 }
250 250
251 if (!arch_irq_disabled_regs(regs)) 251 if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs))
252 local_irq_enable(); 252 local_irq_enable();
253 253
254 memset(&info, 0, sizeof(info)); 254 memset(&info, 0, sizeof(info));
@@ -1019,7 +1019,9 @@ void __kprobes program_check_exception(struct pt_regs *regs)
1019 return; 1019 return;
1020 } 1020 }
1021 1021
1022 local_irq_enable(); 1022 /* We restore the interrupt state now */
1023 if (!arch_irq_disabled_regs(regs))
1024 local_irq_enable();
1023 1025
1024#ifdef CONFIG_MATH_EMULATION 1026#ifdef CONFIG_MATH_EMULATION
1025 /* (reason & REASON_ILLEGAL) would be the obvious thing here, 1027 /* (reason & REASON_ILLEGAL) would be the obvious thing here,
@@ -1069,6 +1071,10 @@ void alignment_exception(struct pt_regs *regs)
1069{ 1071{
1070 int sig, code, fixed = 0; 1072 int sig, code, fixed = 0;
1071 1073
1074 /* We restore the interrupt state now */
1075 if (!arch_irq_disabled_regs(regs))
1076 local_irq_enable();
1077
1072 /* we don't implement logging of alignment exceptions */ 1078 /* we don't implement logging of alignment exceptions */
1073 if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS)) 1079 if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
1074 fixed = fix_alignment(regs); 1080 fixed = fix_alignment(regs);
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index ddc485a529f2..c3beaeef3f60 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -258,6 +258,8 @@ static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn,
258 !(memslot->userspace_addr & (s - 1))) { 258 !(memslot->userspace_addr & (s - 1))) {
259 start &= ~(s - 1); 259 start &= ~(s - 1);
260 pgsize = s; 260 pgsize = s;
261 get_page(hpage);
262 put_page(page);
261 page = hpage; 263 page = hpage;
262 } 264 }
263 } 265 }
@@ -281,11 +283,8 @@ static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn,
281 err = 0; 283 err = 0;
282 284
283 out: 285 out:
284 if (got) { 286 if (got)
285 if (PageHuge(page))
286 page = compound_head(page);
287 put_page(page); 287 put_page(page);
288 }
289 return err; 288 return err;
290 289
291 up_err: 290 up_err:
@@ -678,8 +677,15 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
678 SetPageDirty(page); 677 SetPageDirty(page);
679 678
680 out_put: 679 out_put:
681 if (page) 680 if (page) {
682 put_page(page); 681 /*
682 * We drop pages[0] here, not page because page might
683 * have been set to the head page of a compound, but
684 * we have to drop the reference on the correct tail
685 * page to match the get inside gup()
686 */
687 put_page(pages[0]);
688 }
683 return ret; 689 return ret;
684 690
685 out_unlock: 691 out_unlock:
@@ -979,6 +985,7 @@ void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa,
979 pa = *physp; 985 pa = *physp;
980 } 986 }
981 page = pfn_to_page(pa >> PAGE_SHIFT); 987 page = pfn_to_page(pa >> PAGE_SHIFT);
988 get_page(page);
982 } else { 989 } else {
983 hva = gfn_to_hva_memslot(memslot, gfn); 990 hva = gfn_to_hva_memslot(memslot, gfn);
984 npages = get_user_pages_fast(hva, 1, 1, pages); 991 npages = get_user_pages_fast(hva, 1, 1, pages);
@@ -991,8 +998,6 @@ void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa,
991 page = compound_head(page); 998 page = compound_head(page);
992 psize <<= compound_order(page); 999 psize <<= compound_order(page);
993 } 1000 }
994 if (!kvm->arch.using_mmu_notifiers)
995 get_page(page);
996 offset = gpa & (psize - 1); 1001 offset = gpa & (psize - 1);
997 if (nb_ret) 1002 if (nb_ret)
998 *nb_ret = psize - offset; 1003 *nb_ret = psize - offset;
@@ -1003,7 +1008,6 @@ void kvmppc_unpin_guest_page(struct kvm *kvm, void *va)
1003{ 1008{
1004 struct page *page = virt_to_page(va); 1009 struct page *page = virt_to_page(va);
1005 1010
1006 page = compound_head(page);
1007 put_page(page); 1011 put_page(page);
1008} 1012}
1009 1013
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 01294a5099dd..108d1f580177 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -1192,8 +1192,6 @@ static void unpin_slot(struct kvm *kvm, int slot_id)
1192 continue; 1192 continue;
1193 pfn = physp[j] >> PAGE_SHIFT; 1193 pfn = physp[j] >> PAGE_SHIFT;
1194 page = pfn_to_page(pfn); 1194 page = pfn_to_page(pfn);
1195 if (PageHuge(page))
1196 page = compound_head(page);
1197 SetPageDirty(page); 1195 SetPageDirty(page);
1198 put_page(page); 1196 put_page(page);
1199 } 1197 }
diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h
index af1ab5e9a691..5c3cf2d04e41 100644
--- a/arch/powerpc/net/bpf_jit.h
+++ b/arch/powerpc/net/bpf_jit.h
@@ -48,7 +48,13 @@
48/* 48/*
49 * Assembly helpers from arch/powerpc/net/bpf_jit.S: 49 * Assembly helpers from arch/powerpc/net/bpf_jit.S:
50 */ 50 */
51extern u8 sk_load_word[], sk_load_half[], sk_load_byte[], sk_load_byte_msh[]; 51#define DECLARE_LOAD_FUNC(func) \
52 extern u8 func[], func##_negative_offset[], func##_positive_offset[]
53
54DECLARE_LOAD_FUNC(sk_load_word);
55DECLARE_LOAD_FUNC(sk_load_half);
56DECLARE_LOAD_FUNC(sk_load_byte);
57DECLARE_LOAD_FUNC(sk_load_byte_msh);
52 58
53#define FUNCTION_DESCR_SIZE 24 59#define FUNCTION_DESCR_SIZE 24
54 60
diff --git a/arch/powerpc/net/bpf_jit_64.S b/arch/powerpc/net/bpf_jit_64.S
index ff4506e85cce..55ba3855a97f 100644
--- a/arch/powerpc/net/bpf_jit_64.S
+++ b/arch/powerpc/net/bpf_jit_64.S
@@ -31,14 +31,13 @@
31 * then branch directly to slow_path_XXX if required. (In fact, could 31 * then branch directly to slow_path_XXX if required. (In fact, could
32 * load a spare GPR with the address of slow_path_generic and pass size 32 * load a spare GPR with the address of slow_path_generic and pass size
33 * as an argument, making the call site a mtlr, li and bllr.) 33 * as an argument, making the call site a mtlr, li and bllr.)
34 *
35 * Technically, the "is addr < 0" check is unnecessary & slowing down
36 * the ABS path, as it's statically checked on generation.
37 */ 34 */
38 .globl sk_load_word 35 .globl sk_load_word
39sk_load_word: 36sk_load_word:
40 cmpdi r_addr, 0 37 cmpdi r_addr, 0
41 blt bpf_error 38 blt bpf_slow_path_word_neg
39 .globl sk_load_word_positive_offset
40sk_load_word_positive_offset:
42 /* Are we accessing past headlen? */ 41 /* Are we accessing past headlen? */
43 subi r_scratch1, r_HL, 4 42 subi r_scratch1, r_HL, 4
44 cmpd r_scratch1, r_addr 43 cmpd r_scratch1, r_addr
@@ -51,7 +50,9 @@ sk_load_word:
51 .globl sk_load_half 50 .globl sk_load_half
52sk_load_half: 51sk_load_half:
53 cmpdi r_addr, 0 52 cmpdi r_addr, 0
54 blt bpf_error 53 blt bpf_slow_path_half_neg
54 .globl sk_load_half_positive_offset
55sk_load_half_positive_offset:
55 subi r_scratch1, r_HL, 2 56 subi r_scratch1, r_HL, 2
56 cmpd r_scratch1, r_addr 57 cmpd r_scratch1, r_addr
57 blt bpf_slow_path_half 58 blt bpf_slow_path_half
@@ -61,7 +62,9 @@ sk_load_half:
61 .globl sk_load_byte 62 .globl sk_load_byte
62sk_load_byte: 63sk_load_byte:
63 cmpdi r_addr, 0 64 cmpdi r_addr, 0
64 blt bpf_error 65 blt bpf_slow_path_byte_neg
66 .globl sk_load_byte_positive_offset
67sk_load_byte_positive_offset:
65 cmpd r_HL, r_addr 68 cmpd r_HL, r_addr
66 ble bpf_slow_path_byte 69 ble bpf_slow_path_byte
67 lbzx r_A, r_D, r_addr 70 lbzx r_A, r_D, r_addr
@@ -69,22 +72,20 @@ sk_load_byte:
69 72
70/* 73/*
71 * BPF_S_LDX_B_MSH: ldxb 4*([offset]&0xf) 74 * BPF_S_LDX_B_MSH: ldxb 4*([offset]&0xf)
72 * r_addr is the offset value, already known positive 75 * r_addr is the offset value
73 */ 76 */
74 .globl sk_load_byte_msh 77 .globl sk_load_byte_msh
75sk_load_byte_msh: 78sk_load_byte_msh:
79 cmpdi r_addr, 0
80 blt bpf_slow_path_byte_msh_neg
81 .globl sk_load_byte_msh_positive_offset
82sk_load_byte_msh_positive_offset:
76 cmpd r_HL, r_addr 83 cmpd r_HL, r_addr
77 ble bpf_slow_path_byte_msh 84 ble bpf_slow_path_byte_msh
78 lbzx r_X, r_D, r_addr 85 lbzx r_X, r_D, r_addr
79 rlwinm r_X, r_X, 2, 32-4-2, 31-2 86 rlwinm r_X, r_X, 2, 32-4-2, 31-2
80 blr 87 blr
81 88
82bpf_error:
83 /* Entered with cr0 = lt */
84 li r3, 0
85 /* Generated code will 'blt epilogue', returning 0. */
86 blr
87
88/* Call out to skb_copy_bits: 89/* Call out to skb_copy_bits:
89 * We'll need to back up our volatile regs first; we have 90 * We'll need to back up our volatile regs first; we have
90 * local variable space at r1+(BPF_PPC_STACK_BASIC). 91 * local variable space at r1+(BPF_PPC_STACK_BASIC).
@@ -136,3 +137,84 @@ bpf_slow_path_byte_msh:
136 lbz r_X, BPF_PPC_STACK_BASIC+(2*8)(r1) 137 lbz r_X, BPF_PPC_STACK_BASIC+(2*8)(r1)
137 rlwinm r_X, r_X, 2, 32-4-2, 31-2 138 rlwinm r_X, r_X, 2, 32-4-2, 31-2
138 blr 139 blr
140
141/* Call out to bpf_internal_load_pointer_neg_helper:
142 * We'll need to back up our volatile regs first; we have
143 * local variable space at r1+(BPF_PPC_STACK_BASIC).
144 * Allocate a new stack frame here to remain ABI-compliant in
145 * stashing LR.
146 */
147#define sk_negative_common(SIZE) \
148 mflr r0; \
149 std r0, 16(r1); \
150 /* R3 goes in parameter space of caller's frame */ \
151 std r_skb, (BPF_PPC_STACKFRAME+48)(r1); \
152 std r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \
153 std r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \
154 stdu r1, -BPF_PPC_SLOWPATH_FRAME(r1); \
155 /* R3 = r_skb, as passed */ \
156 mr r4, r_addr; \
157 li r5, SIZE; \
158 bl bpf_internal_load_pointer_neg_helper; \
159 /* R3 != 0 on success */ \
160 addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \
161 ld r0, 16(r1); \
162 ld r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \
163 ld r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \
164 mtlr r0; \
165 cmpldi r3, 0; \
166 beq bpf_error_slow; /* cr0 = EQ */ \
167 mr r_addr, r3; \
168 ld r_skb, (BPF_PPC_STACKFRAME+48)(r1); \
169 /* Great success! */
170
171bpf_slow_path_word_neg:
172 lis r_scratch1,-32 /* SKF_LL_OFF */
173 cmpd r_addr, r_scratch1 /* addr < SKF_* */
174 blt bpf_error /* cr0 = LT */
175 .globl sk_load_word_negative_offset
176sk_load_word_negative_offset:
177 sk_negative_common(4)
178 lwz r_A, 0(r_addr)
179 blr
180
181bpf_slow_path_half_neg:
182 lis r_scratch1,-32 /* SKF_LL_OFF */
183 cmpd r_addr, r_scratch1 /* addr < SKF_* */
184 blt bpf_error /* cr0 = LT */
185 .globl sk_load_half_negative_offset
186sk_load_half_negative_offset:
187 sk_negative_common(2)
188 lhz r_A, 0(r_addr)
189 blr
190
191bpf_slow_path_byte_neg:
192 lis r_scratch1,-32 /* SKF_LL_OFF */
193 cmpd r_addr, r_scratch1 /* addr < SKF_* */
194 blt bpf_error /* cr0 = LT */
195 .globl sk_load_byte_negative_offset
196sk_load_byte_negative_offset:
197 sk_negative_common(1)
198 lbz r_A, 0(r_addr)
199 blr
200
201bpf_slow_path_byte_msh_neg:
202 lis r_scratch1,-32 /* SKF_LL_OFF */
203 cmpd r_addr, r_scratch1 /* addr < SKF_* */
204 blt bpf_error /* cr0 = LT */
205 .globl sk_load_byte_msh_negative_offset
206sk_load_byte_msh_negative_offset:
207 sk_negative_common(1)
208 lbz r_X, 0(r_addr)
209 rlwinm r_X, r_X, 2, 32-4-2, 31-2
210 blr
211
212bpf_error_slow:
213 /* fabricate a cr0 = lt */
214 li r_scratch1, -1
215 cmpdi r_scratch1, 0
216bpf_error:
217 /* Entered with cr0 = lt */
218 li r3, 0
219 /* Generated code will 'blt epilogue', returning 0. */
220 blr
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index 73619d3aeb6c..2dc8b1484845 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -127,6 +127,9 @@ static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
127 PPC_BLR(); 127 PPC_BLR();
128} 128}
129 129
130#define CHOOSE_LOAD_FUNC(K, func) \
131 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
132
130/* Assemble the body code between the prologue & epilogue. */ 133/* Assemble the body code between the prologue & epilogue. */
131static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, 134static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
132 struct codegen_context *ctx, 135 struct codegen_context *ctx,
@@ -391,21 +394,16 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
391 394
392 /*** Absolute loads from packet header/data ***/ 395 /*** Absolute loads from packet header/data ***/
393 case BPF_S_LD_W_ABS: 396 case BPF_S_LD_W_ABS:
394 func = sk_load_word; 397 func = CHOOSE_LOAD_FUNC(K, sk_load_word);
395 goto common_load; 398 goto common_load;
396 case BPF_S_LD_H_ABS: 399 case BPF_S_LD_H_ABS:
397 func = sk_load_half; 400 func = CHOOSE_LOAD_FUNC(K, sk_load_half);
398 goto common_load; 401 goto common_load;
399 case BPF_S_LD_B_ABS: 402 case BPF_S_LD_B_ABS:
400 func = sk_load_byte; 403 func = CHOOSE_LOAD_FUNC(K, sk_load_byte);
401 common_load: 404 common_load:
402 /* 405 /* Load from [K]. */
403 * Load from [K]. Reference with the (negative)
404 * SKF_NET_OFF/SKF_LL_OFF offsets is unsupported.
405 */
406 ctx->seen |= SEEN_DATAREF; 406 ctx->seen |= SEEN_DATAREF;
407 if ((int)K < 0)
408 return -ENOTSUPP;
409 PPC_LI64(r_scratch1, func); 407 PPC_LI64(r_scratch1, func);
410 PPC_MTLR(r_scratch1); 408 PPC_MTLR(r_scratch1);
411 PPC_LI32(r_addr, K); 409 PPC_LI32(r_addr, K);
@@ -429,7 +427,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
429 common_load_ind: 427 common_load_ind:
430 /* 428 /*
431 * Load from [X + K]. Negative offsets are tested for 429 * Load from [X + K]. Negative offsets are tested for
432 * in the helper functions, and result in a 'ret 0'. 430 * in the helper functions.
433 */ 431 */
434 ctx->seen |= SEEN_DATAREF | SEEN_XREG; 432 ctx->seen |= SEEN_DATAREF | SEEN_XREG;
435 PPC_LI64(r_scratch1, func); 433 PPC_LI64(r_scratch1, func);
@@ -443,13 +441,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
443 break; 441 break;
444 442
445 case BPF_S_LDX_B_MSH: 443 case BPF_S_LDX_B_MSH:
446 /* 444 func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh);
447 * x86 version drops packet (RET 0) when K<0, whereas
448 * interpreter does allow K<0 (__load_pointer, special
449 * ancillary data). common_load returns ENOTSUPP if K<0,
450 * so we fall back to interpreter & filter works.
451 */
452 func = sk_load_byte_msh;
453 goto common_load; 445 goto common_load;
454 break; 446 break;
455 447
diff --git a/arch/powerpc/platforms/85xx/common.c b/arch/powerpc/platforms/85xx/common.c
index 9fef5302adc1..67dac22b4363 100644
--- a/arch/powerpc/platforms/85xx/common.c
+++ b/arch/powerpc/platforms/85xx/common.c
@@ -21,6 +21,12 @@ static struct of_device_id __initdata mpc85xx_common_ids[] = {
21 { .compatible = "fsl,qe", }, 21 { .compatible = "fsl,qe", },
22 { .compatible = "fsl,cpm2", }, 22 { .compatible = "fsl,cpm2", },
23 { .compatible = "fsl,srio", }, 23 { .compatible = "fsl,srio", },
24 /* So that the DMA channel nodes can be probed individually: */
25 { .compatible = "fsl,eloplus-dma", },
26 /* For the PMC driver */
27 { .compatible = "fsl,mpc8548-guts", },
28 /* Probably unnecessary? */
29 { .compatible = "gpio-leds", },
24 {}, 30 {},
25}; 31};
26 32
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
index 9a6f04406e0d..d208ebccb91c 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
@@ -399,12 +399,6 @@ static int __init board_fixups(void)
399machine_arch_initcall(mpc8568_mds, board_fixups); 399machine_arch_initcall(mpc8568_mds, board_fixups);
400machine_arch_initcall(mpc8569_mds, board_fixups); 400machine_arch_initcall(mpc8569_mds, board_fixups);
401 401
402static struct of_device_id mpc85xx_ids[] = {
403 { .compatible = "fsl,mpc8548-guts", },
404 { .compatible = "gpio-leds", },
405 {},
406};
407
408static int __init mpc85xx_publish_devices(void) 402static int __init mpc85xx_publish_devices(void)
409{ 403{
410 if (machine_is(mpc8568_mds)) 404 if (machine_is(mpc8568_mds))
@@ -412,10 +406,7 @@ static int __init mpc85xx_publish_devices(void)
412 if (machine_is(mpc8569_mds)) 406 if (machine_is(mpc8569_mds))
413 simple_gpiochip_init("fsl,mpc8569mds-bcsr-gpio"); 407 simple_gpiochip_init("fsl,mpc8569mds-bcsr-gpio");
414 408
415 mpc85xx_common_publish_devices(); 409 return mpc85xx_common_publish_devices();
416 of_platform_bus_probe(NULL, mpc85xx_ids, NULL);
417
418 return 0;
419} 410}
420 411
421machine_device_initcall(mpc8568_mds, mpc85xx_publish_devices); 412machine_device_initcall(mpc8568_mds, mpc85xx_publish_devices);
diff --git a/arch/powerpc/platforms/85xx/p1022_ds.c b/arch/powerpc/platforms/85xx/p1022_ds.c
index e74b7cde9aee..f700c81a1321 100644
--- a/arch/powerpc/platforms/85xx/p1022_ds.c
+++ b/arch/powerpc/platforms/85xx/p1022_ds.c
@@ -460,18 +460,7 @@ static void __init p1022_ds_setup_arch(void)
460 pr_info("Freescale P1022 DS reference board\n"); 460 pr_info("Freescale P1022 DS reference board\n");
461} 461}
462 462
463static struct of_device_id __initdata p1022_ds_ids[] = { 463machine_device_initcall(p1022_ds, mpc85xx_common_publish_devices);
464 /* So that the DMA channel nodes can be probed individually: */
465 { .compatible = "fsl,eloplus-dma", },
466 {},
467};
468
469static int __init p1022_ds_publish_devices(void)
470{
471 mpc85xx_common_publish_devices();
472 return of_platform_bus_probe(NULL, p1022_ds_ids, NULL);
473}
474machine_device_initcall(p1022_ds, p1022_ds_publish_devices);
475 464
476machine_arch_initcall(p1022_ds, swiotlb_setup_bus_notifier); 465machine_arch_initcall(p1022_ds, swiotlb_setup_bus_notifier);
477 466
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c
index d09f3e8e6867..85825b5401e5 100644
--- a/arch/powerpc/platforms/cell/axon_msi.c
+++ b/arch/powerpc/platforms/cell/axon_msi.c
@@ -114,7 +114,7 @@ static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc)
114 pr_devel("axon_msi: woff %x roff %x msi %x\n", 114 pr_devel("axon_msi: woff %x roff %x msi %x\n",
115 write_offset, msic->read_offset, msi); 115 write_offset, msic->read_offset, msi);
116 116
117 if (msi < NR_IRQS && irq_get_chip_data(msi) == msic) { 117 if (msi < nr_irqs && irq_get_chip_data(msi) == msic) {
118 generic_handle_irq(msi); 118 generic_handle_irq(msi);
119 msic->fifo_virt[idx] = cpu_to_le32(0xffffffff); 119 msic->fifo_virt[idx] = cpu_to_le32(0xffffffff);
120 } else { 120 } else {
@@ -276,9 +276,6 @@ static int axon_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
276 if (rc) 276 if (rc)
277 return rc; 277 return rc;
278 278
279 /* We rely on being able to stash a virq in a u16 */
280 BUILD_BUG_ON(NR_IRQS > 65536);
281
282 list_for_each_entry(entry, &dev->msi_list, list) { 279 list_for_each_entry(entry, &dev->msi_list, list) {
283 virq = irq_create_direct_mapping(msic->irq_domain); 280 virq = irq_create_direct_mapping(msic->irq_domain);
284 if (virq == NO_IRQ) { 281 if (virq == NO_IRQ) {
@@ -392,7 +389,8 @@ static int axon_msi_probe(struct platform_device *device)
392 } 389 }
393 memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES); 390 memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES);
394 391
395 msic->irq_domain = irq_domain_add_nomap(dn, 0, &msic_host_ops, msic); 392 /* We rely on being able to stash a virq in a u16, so limit irqs to < 65536 */
393 msic->irq_domain = irq_domain_add_nomap(dn, 65536, &msic_host_ops, msic);
396 if (!msic->irq_domain) { 394 if (!msic->irq_domain) {
397 printk(KERN_ERR "axon_msi: couldn't allocate irq_domain for %s\n", 395 printk(KERN_ERR "axon_msi: couldn't allocate irq_domain for %s\n",
398 dn->full_name); 396 dn->full_name);
diff --git a/arch/powerpc/platforms/cell/beat_interrupt.c b/arch/powerpc/platforms/cell/beat_interrupt.c
index f9a48af335cb..8c6dc42ecf65 100644
--- a/arch/powerpc/platforms/cell/beat_interrupt.c
+++ b/arch/powerpc/platforms/cell/beat_interrupt.c
@@ -248,6 +248,6 @@ void beatic_deinit_IRQ(void)
248{ 248{
249 int i; 249 int i;
250 250
251 for (i = 1; i < NR_IRQS; i++) 251 for (i = 1; i < nr_irqs; i++)
252 beat_destruct_irq_plug(i); 252 beat_destruct_irq_plug(i);
253} 253}
diff --git a/arch/powerpc/platforms/powermac/low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c
index 996c5ff7824b..03685a329d7d 100644
--- a/arch/powerpc/platforms/powermac/low_i2c.c
+++ b/arch/powerpc/platforms/powermac/low_i2c.c
@@ -366,11 +366,20 @@ static void kw_i2c_timeout(unsigned long data)
366 unsigned long flags; 366 unsigned long flags;
367 367
368 spin_lock_irqsave(&host->lock, flags); 368 spin_lock_irqsave(&host->lock, flags);
369
370 /*
371 * If the timer is pending, that means we raced with the
372 * irq, in which case we just return
373 */
374 if (timer_pending(&host->timeout_timer))
375 goto skip;
376
369 kw_i2c_handle_interrupt(host, kw_read_reg(reg_isr)); 377 kw_i2c_handle_interrupt(host, kw_read_reg(reg_isr));
370 if (host->state != state_idle) { 378 if (host->state != state_idle) {
371 host->timeout_timer.expires = jiffies + KW_POLL_TIMEOUT; 379 host->timeout_timer.expires = jiffies + KW_POLL_TIMEOUT;
372 add_timer(&host->timeout_timer); 380 add_timer(&host->timeout_timer);
373 } 381 }
382 skip:
374 spin_unlock_irqrestore(&host->lock, flags); 383 spin_unlock_irqrestore(&host->lock, flags);
375} 384}
376 385
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c
index 66ad93de1d55..c4e630576ff2 100644
--- a/arch/powerpc/platforms/powermac/pic.c
+++ b/arch/powerpc/platforms/powermac/pic.c
@@ -57,9 +57,9 @@ static int max_real_irqs;
57 57
58static DEFINE_RAW_SPINLOCK(pmac_pic_lock); 58static DEFINE_RAW_SPINLOCK(pmac_pic_lock);
59 59
60#define NR_MASK_WORDS ((NR_IRQS + 31) / 32) 60/* The max irq number this driver deals with is 128; see max_irqs */
61static unsigned long ppc_lost_interrupts[NR_MASK_WORDS]; 61static DECLARE_BITMAP(ppc_lost_interrupts, 128);
62static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; 62static DECLARE_BITMAP(ppc_cached_irq_mask, 128);
63static int pmac_irq_cascade = -1; 63static int pmac_irq_cascade = -1;
64static struct irq_domain *pmac_pic_host; 64static struct irq_domain *pmac_pic_host;
65 65
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index aadbe4f6d537..178a5f300bc9 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -30,9 +30,9 @@ config PPC_SPLPAR
30 two or more partitions. 30 two or more partitions.
31 31
32config EEH 32config EEH
33 bool "PCI Extended Error Handling (EEH)" if EXPERT 33 bool
34 depends on PPC_PSERIES && PCI 34 depends on PPC_PSERIES && PCI
35 default y if !EXPERT 35 default y
36 36
37config PSERIES_MSI 37config PSERIES_MSI
38 bool 38 bool
diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c
index 309d38ef7322..a75e37dc41aa 100644
--- a/arch/powerpc/platforms/pseries/eeh.c
+++ b/arch/powerpc/platforms/pseries/eeh.c
@@ -1076,7 +1076,7 @@ static void eeh_add_device_late(struct pci_dev *dev)
1076 pr_debug("EEH: Adding device %s\n", pci_name(dev)); 1076 pr_debug("EEH: Adding device %s\n", pci_name(dev));
1077 1077
1078 dn = pci_device_to_OF_node(dev); 1078 dn = pci_device_to_OF_node(dev);
1079 edev = pci_dev_to_eeh_dev(dev); 1079 edev = of_node_to_eeh_dev(dn);
1080 if (edev->pdev == dev) { 1080 if (edev->pdev == dev) {
1081 pr_debug("EEH: Already referenced !\n"); 1081 pr_debug("EEH: Already referenced !\n");
1082 return; 1082 return;
diff --git a/arch/powerpc/sysdev/cpm2_pic.c b/arch/powerpc/sysdev/cpm2_pic.c
index d3be961e2ae7..10386b676d87 100644
--- a/arch/powerpc/sysdev/cpm2_pic.c
+++ b/arch/powerpc/sysdev/cpm2_pic.c
@@ -51,8 +51,7 @@
51static intctl_cpm2_t __iomem *cpm2_intctl; 51static intctl_cpm2_t __iomem *cpm2_intctl;
52 52
53static struct irq_domain *cpm2_pic_host; 53static struct irq_domain *cpm2_pic_host;
54#define NR_MASK_WORDS ((NR_IRQS + 31) / 32) 54static unsigned long ppc_cached_irq_mask[2]; /* 2 32-bit registers */
55static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
56 55
57static const u_char irq_to_siureg[] = { 56static const u_char irq_to_siureg[] = {
58 1, 1, 1, 1, 1, 1, 1, 1, 57 1, 1, 1, 1, 1, 1, 1, 1,
diff --git a/arch/powerpc/sysdev/mpc8xx_pic.c b/arch/powerpc/sysdev/mpc8xx_pic.c
index d5f5416be310..b724622c3a0b 100644
--- a/arch/powerpc/sysdev/mpc8xx_pic.c
+++ b/arch/powerpc/sysdev/mpc8xx_pic.c
@@ -18,69 +18,45 @@
18extern int cpm_get_irq(struct pt_regs *regs); 18extern int cpm_get_irq(struct pt_regs *regs);
19 19
20static struct irq_domain *mpc8xx_pic_host; 20static struct irq_domain *mpc8xx_pic_host;
21#define NR_MASK_WORDS ((NR_IRQS + 31) / 32) 21static unsigned long mpc8xx_cached_irq_mask;
22static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
23static sysconf8xx_t __iomem *siu_reg; 22static sysconf8xx_t __iomem *siu_reg;
24 23
25int cpm_get_irq(struct pt_regs *regs); 24static inline unsigned long mpc8xx_irqd_to_bit(struct irq_data *d)
25{
26 return 0x80000000 >> irqd_to_hwirq(d);
27}
26 28
27static void mpc8xx_unmask_irq(struct irq_data *d) 29static void mpc8xx_unmask_irq(struct irq_data *d)
28{ 30{
29 int bit, word; 31 mpc8xx_cached_irq_mask |= mpc8xx_irqd_to_bit(d);
30 unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d); 32 out_be32(&siu_reg->sc_simask, mpc8xx_cached_irq_mask);
31
32 bit = irq_nr & 0x1f;
33 word = irq_nr >> 5;
34
35 ppc_cached_irq_mask[word] |= (1 << (31-bit));
36 out_be32(&siu_reg->sc_simask, ppc_cached_irq_mask[word]);
37} 33}
38 34
39static void mpc8xx_mask_irq(struct irq_data *d) 35static void mpc8xx_mask_irq(struct irq_data *d)
40{ 36{
41 int bit, word; 37 mpc8xx_cached_irq_mask &= ~mpc8xx_irqd_to_bit(d);
42 unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d); 38 out_be32(&siu_reg->sc_simask, mpc8xx_cached_irq_mask);
43
44 bit = irq_nr & 0x1f;
45 word = irq_nr >> 5;
46
47 ppc_cached_irq_mask[word] &= ~(1 << (31-bit));
48 out_be32(&siu_reg->sc_simask, ppc_cached_irq_mask[word]);
49} 39}
50 40
51static void mpc8xx_ack(struct irq_data *d) 41static void mpc8xx_ack(struct irq_data *d)
52{ 42{
53 int bit; 43 out_be32(&siu_reg->sc_sipend, mpc8xx_irqd_to_bit(d));
54 unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d);
55
56 bit = irq_nr & 0x1f;
57 out_be32(&siu_reg->sc_sipend, 1 << (31-bit));
58} 44}
59 45
60static void mpc8xx_end_irq(struct irq_data *d) 46static void mpc8xx_end_irq(struct irq_data *d)
61{ 47{
62 int bit, word; 48 mpc8xx_cached_irq_mask |= mpc8xx_irqd_to_bit(d);
63 unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d); 49 out_be32(&siu_reg->sc_simask, mpc8xx_cached_irq_mask);
64
65 bit = irq_nr & 0x1f;
66 word = irq_nr >> 5;
67
68 ppc_cached_irq_mask[word] |= (1 << (31-bit));
69 out_be32(&siu_reg->sc_simask, ppc_cached_irq_mask[word]);
70} 50}
71 51
72static int mpc8xx_set_irq_type(struct irq_data *d, unsigned int flow_type) 52static int mpc8xx_set_irq_type(struct irq_data *d, unsigned int flow_type)
73{ 53{
74 if (flow_type & IRQ_TYPE_EDGE_FALLING) { 54 /* only external IRQ senses are programmable */
75 irq_hw_number_t hw = (unsigned int)irqd_to_hwirq(d); 55 if ((flow_type & IRQ_TYPE_EDGE_FALLING) && !(irqd_to_hwirq(d) & 1)) {
76 unsigned int siel = in_be32(&siu_reg->sc_siel); 56 unsigned int siel = in_be32(&siu_reg->sc_siel);
77 57 siel |= mpc8xx_irqd_to_bit(d);
78 /* only external IRQ senses are programmable */ 58 out_be32(&siu_reg->sc_siel, siel);
79 if ((hw & 1) == 0) { 59 __irq_set_handler_locked(d->irq, handle_edge_irq);
80 siel |= (0x80000000 >> hw);
81 out_be32(&siu_reg->sc_siel, siel);
82 __irq_set_handler_locked(d->irq, handle_edge_irq);
83 }
84 } 60 }
85 return 0; 61 return 0;
86} 62}
@@ -132,6 +108,9 @@ static int mpc8xx_pic_host_xlate(struct irq_domain *h, struct device_node *ct,
132 IRQ_TYPE_EDGE_FALLING, 108 IRQ_TYPE_EDGE_FALLING,
133 }; 109 };
134 110
111 if (intspec[0] > 0x1f)
112 return 0;
113
135 *out_hwirq = intspec[0]; 114 *out_hwirq = intspec[0];
136 if (intsize > 1 && intspec[1] < 4) 115 if (intsize > 1 && intspec[1] < 4)
137 *out_flags = map_pic_senses[intspec[1]]; 116 *out_flags = map_pic_senses[intspec[1]];
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 9ac71ebd2c40..395af1347749 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -604,18 +604,14 @@ static struct mpic *mpic_find(unsigned int irq)
604} 604}
605 605
606/* Determine if the linux irq is an IPI */ 606/* Determine if the linux irq is an IPI */
607static unsigned int mpic_is_ipi(struct mpic *mpic, unsigned int irq) 607static unsigned int mpic_is_ipi(struct mpic *mpic, unsigned int src)
608{ 608{
609 unsigned int src = virq_to_hw(irq);
610
611 return (src >= mpic->ipi_vecs[0] && src <= mpic->ipi_vecs[3]); 609 return (src >= mpic->ipi_vecs[0] && src <= mpic->ipi_vecs[3]);
612} 610}
613 611
614/* Determine if the linux irq is a timer */ 612/* Determine if the linux irq is a timer */
615static unsigned int mpic_is_tm(struct mpic *mpic, unsigned int irq) 613static unsigned int mpic_is_tm(struct mpic *mpic, unsigned int src)
616{ 614{
617 unsigned int src = virq_to_hw(irq);
618
619 return (src >= mpic->timer_vecs[0] && src <= mpic->timer_vecs[7]); 615 return (src >= mpic->timer_vecs[0] && src <= mpic->timer_vecs[7]);
620} 616}
621 617
@@ -876,21 +872,45 @@ int mpic_set_irq_type(struct irq_data *d, unsigned int flow_type)
876 if (src >= mpic->num_sources) 872 if (src >= mpic->num_sources)
877 return -EINVAL; 873 return -EINVAL;
878 874
875 vold = mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI));
876
877 /* We don't support "none" type */
879 if (flow_type == IRQ_TYPE_NONE) 878 if (flow_type == IRQ_TYPE_NONE)
880 if (mpic->senses && src < mpic->senses_count) 879 flow_type = IRQ_TYPE_DEFAULT;
881 flow_type = mpic->senses[src]; 880
882 if (flow_type == IRQ_TYPE_NONE) 881 /* Default: read HW settings */
883 flow_type = IRQ_TYPE_LEVEL_LOW; 882 if (flow_type == IRQ_TYPE_DEFAULT) {
883 switch(vold & (MPIC_INFO(VECPRI_POLARITY_MASK) |
884 MPIC_INFO(VECPRI_SENSE_MASK))) {
885 case MPIC_INFO(VECPRI_SENSE_EDGE) |
886 MPIC_INFO(VECPRI_POLARITY_POSITIVE):
887 flow_type = IRQ_TYPE_EDGE_RISING;
888 break;
889 case MPIC_INFO(VECPRI_SENSE_EDGE) |
890 MPIC_INFO(VECPRI_POLARITY_NEGATIVE):
891 flow_type = IRQ_TYPE_EDGE_FALLING;
892 break;
893 case MPIC_INFO(VECPRI_SENSE_LEVEL) |
894 MPIC_INFO(VECPRI_POLARITY_POSITIVE):
895 flow_type = IRQ_TYPE_LEVEL_HIGH;
896 break;
897 case MPIC_INFO(VECPRI_SENSE_LEVEL) |
898 MPIC_INFO(VECPRI_POLARITY_NEGATIVE):
899 flow_type = IRQ_TYPE_LEVEL_LOW;
900 break;
901 }
902 }
884 903
904 /* Apply to irq desc */
885 irqd_set_trigger_type(d, flow_type); 905 irqd_set_trigger_type(d, flow_type);
886 906
907 /* Apply to HW */
887 if (mpic_is_ht_interrupt(mpic, src)) 908 if (mpic_is_ht_interrupt(mpic, src))
888 vecpri = MPIC_VECPRI_POLARITY_POSITIVE | 909 vecpri = MPIC_VECPRI_POLARITY_POSITIVE |
889 MPIC_VECPRI_SENSE_EDGE; 910 MPIC_VECPRI_SENSE_EDGE;
890 else 911 else
891 vecpri = mpic_type_to_vecpri(mpic, flow_type); 912 vecpri = mpic_type_to_vecpri(mpic, flow_type);
892 913
893 vold = mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI));
894 vnew = vold & ~(MPIC_INFO(VECPRI_POLARITY_MASK) | 914 vnew = vold & ~(MPIC_INFO(VECPRI_POLARITY_MASK) |
895 MPIC_INFO(VECPRI_SENSE_MASK)); 915 MPIC_INFO(VECPRI_SENSE_MASK));
896 vnew |= vecpri; 916 vnew |= vecpri;
@@ -1026,7 +1046,7 @@ static int mpic_host_map(struct irq_domain *h, unsigned int virq,
1026 irq_set_chip_and_handler(virq, chip, handle_fasteoi_irq); 1046 irq_set_chip_and_handler(virq, chip, handle_fasteoi_irq);
1027 1047
1028 /* Set default irq type */ 1048 /* Set default irq type */
1029 irq_set_irq_type(virq, IRQ_TYPE_NONE); 1049 irq_set_irq_type(virq, IRQ_TYPE_DEFAULT);
1030 1050
1031 /* If the MPIC was reset, then all vectors have already been 1051 /* If the MPIC was reset, then all vectors have already been
1032 * initialized. Otherwise, a per source lazy initialization 1052 * initialized. Otherwise, a per source lazy initialization
@@ -1417,12 +1437,6 @@ void __init mpic_assign_isu(struct mpic *mpic, unsigned int isu_num,
1417 mpic->num_sources = isu_first + mpic->isu_size; 1437 mpic->num_sources = isu_first + mpic->isu_size;
1418} 1438}
1419 1439
1420void __init mpic_set_default_senses(struct mpic *mpic, u8 *senses, int count)
1421{
1422 mpic->senses = senses;
1423 mpic->senses_count = count;
1424}
1425
1426void __init mpic_init(struct mpic *mpic) 1440void __init mpic_init(struct mpic *mpic)
1427{ 1441{
1428 int i, cpu; 1442 int i, cpu;
@@ -1555,12 +1569,12 @@ void mpic_irq_set_priority(unsigned int irq, unsigned int pri)
1555 return; 1569 return;
1556 1570
1557 raw_spin_lock_irqsave(&mpic_lock, flags); 1571 raw_spin_lock_irqsave(&mpic_lock, flags);
1558 if (mpic_is_ipi(mpic, irq)) { 1572 if (mpic_is_ipi(mpic, src)) {
1559 reg = mpic_ipi_read(src - mpic->ipi_vecs[0]) & 1573 reg = mpic_ipi_read(src - mpic->ipi_vecs[0]) &
1560 ~MPIC_VECPRI_PRIORITY_MASK; 1574 ~MPIC_VECPRI_PRIORITY_MASK;
1561 mpic_ipi_write(src - mpic->ipi_vecs[0], 1575 mpic_ipi_write(src - mpic->ipi_vecs[0],
1562 reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT)); 1576 reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT));
1563 } else if (mpic_is_tm(mpic, irq)) { 1577 } else if (mpic_is_tm(mpic, src)) {
1564 reg = mpic_tm_read(src - mpic->timer_vecs[0]) & 1578 reg = mpic_tm_read(src - mpic->timer_vecs[0]) &
1565 ~MPIC_VECPRI_PRIORITY_MASK; 1579 ~MPIC_VECPRI_PRIORITY_MASK;
1566 mpic_tm_write(src - mpic->timer_vecs[0], 1580 mpic_tm_write(src - mpic->timer_vecs[0],
diff --git a/arch/powerpc/sysdev/mpic_msgr.c b/arch/powerpc/sysdev/mpic_msgr.c
index 6e7fa386e76a..483d8fa72e8b 100644
--- a/arch/powerpc/sysdev/mpic_msgr.c
+++ b/arch/powerpc/sysdev/mpic_msgr.c
@@ -27,6 +27,7 @@
27 27
28static struct mpic_msgr **mpic_msgrs; 28static struct mpic_msgr **mpic_msgrs;
29static unsigned int mpic_msgr_count; 29static unsigned int mpic_msgr_count;
30static DEFINE_RAW_SPINLOCK(msgrs_lock);
30 31
31static inline void _mpic_msgr_mer_write(struct mpic_msgr *msgr, u32 value) 32static inline void _mpic_msgr_mer_write(struct mpic_msgr *msgr, u32 value)
32{ 33{
@@ -56,12 +57,11 @@ struct mpic_msgr *mpic_msgr_get(unsigned int reg_num)
56 if (reg_num >= mpic_msgr_count) 57 if (reg_num >= mpic_msgr_count)
57 return ERR_PTR(-ENODEV); 58 return ERR_PTR(-ENODEV);
58 59
59 raw_spin_lock_irqsave(&msgr->lock, flags); 60 raw_spin_lock_irqsave(&msgrs_lock, flags);
60 if (mpic_msgrs[reg_num]->in_use == MSGR_FREE) { 61 msgr = mpic_msgrs[reg_num];
61 msgr = mpic_msgrs[reg_num]; 62 if (msgr->in_use == MSGR_FREE)
62 msgr->in_use = MSGR_INUSE; 63 msgr->in_use = MSGR_INUSE;
63 } 64 raw_spin_unlock_irqrestore(&msgrs_lock, flags);
64 raw_spin_unlock_irqrestore(&msgr->lock, flags);
65 65
66 return msgr; 66 return msgr;
67} 67}
@@ -228,7 +228,7 @@ static __devinit int mpic_msgr_probe(struct platform_device *dev)
228 228
229 reg_number = block_number * MPIC_MSGR_REGISTERS_PER_BLOCK + i; 229 reg_number = block_number * MPIC_MSGR_REGISTERS_PER_BLOCK + i;
230 msgr->base = msgr_block_addr + i * MPIC_MSGR_STRIDE; 230 msgr->base = msgr_block_addr + i * MPIC_MSGR_STRIDE;
231 msgr->mer = msgr->base + MPIC_MSGR_MER_OFFSET; 231 msgr->mer = (u32 *)((u8 *)msgr->base + MPIC_MSGR_MER_OFFSET);
232 msgr->in_use = MSGR_FREE; 232 msgr->in_use = MSGR_FREE;
233 msgr->num = i; 233 msgr->num = i;
234 raw_spin_lock_init(&msgr->lock); 234 raw_spin_lock_init(&msgr->lock);
diff --git a/arch/powerpc/sysdev/scom.c b/arch/powerpc/sysdev/scom.c
index 49a3ece1c6b3..702256a1ca11 100644
--- a/arch/powerpc/sysdev/scom.c
+++ b/arch/powerpc/sysdev/scom.c
@@ -22,6 +22,7 @@
22#include <linux/debugfs.h> 22#include <linux/debugfs.h>
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/export.h> 24#include <linux/export.h>
25#include <asm/debug.h>
25#include <asm/prom.h> 26#include <asm/prom.h>
26#include <asm/scom.h> 27#include <asm/scom.h>
27 28
diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c
index ea5e204e3450..cd1d18db92c6 100644
--- a/arch/powerpc/sysdev/xics/xics-common.c
+++ b/arch/powerpc/sysdev/xics/xics-common.c
@@ -188,6 +188,7 @@ void xics_migrate_irqs_away(void)
188{ 188{
189 int cpu = smp_processor_id(), hw_cpu = hard_smp_processor_id(); 189 int cpu = smp_processor_id(), hw_cpu = hard_smp_processor_id();
190 unsigned int irq, virq; 190 unsigned int irq, virq;
191 struct irq_desc *desc;
191 192
192 /* If we used to be the default server, move to the new "boot_cpuid" */ 193 /* If we used to be the default server, move to the new "boot_cpuid" */
193 if (hw_cpu == xics_default_server) 194 if (hw_cpu == xics_default_server)
@@ -202,8 +203,7 @@ void xics_migrate_irqs_away(void)
202 /* Allow IPIs again... */ 203 /* Allow IPIs again... */
203 icp_ops->set_priority(DEFAULT_PRIORITY); 204 icp_ops->set_priority(DEFAULT_PRIORITY);
204 205
205 for_each_irq(virq) { 206 for_each_irq_desc(virq, desc) {
206 struct irq_desc *desc;
207 struct irq_chip *chip; 207 struct irq_chip *chip;
208 long server; 208 long server;
209 unsigned long flags; 209 unsigned long flags;
@@ -212,9 +212,8 @@ void xics_migrate_irqs_away(void)
212 /* We can't set affinity on ISA interrupts */ 212 /* We can't set affinity on ISA interrupts */
213 if (virq < NUM_ISA_INTERRUPTS) 213 if (virq < NUM_ISA_INTERRUPTS)
214 continue; 214 continue;
215 desc = irq_to_desc(virq);
216 /* We only need to migrate enabled IRQS */ 215 /* We only need to migrate enabled IRQS */
217 if (!desc || !desc->action) 216 if (!desc->action)
218 continue; 217 continue;
219 if (desc->irq_data.domain != xics_host) 218 if (desc->irq_data.domain != xics_host)
220 continue; 219 continue;
diff --git a/arch/sh/include/asm/atomic.h b/arch/sh/include/asm/atomic.h
index 37f2f4a55231..f4c1c20bcdf6 100644
--- a/arch/sh/include/asm/atomic.h
+++ b/arch/sh/include/asm/atomic.h
@@ -11,7 +11,7 @@
11#include <linux/types.h> 11#include <linux/types.h>
12#include <asm/cmpxchg.h> 12#include <asm/cmpxchg.h>
13 13
14#define ATOMIC_INIT(i) ( (atomic_t) { (i) } ) 14#define ATOMIC_INIT(i) { (i) }
15 15
16#define atomic_read(v) (*(volatile int *)&(v)->counter) 16#define atomic_read(v) (*(volatile int *)&(v)->counter)
17#define atomic_set(v,i) ((v)->counter = (i)) 17#define atomic_set(v,i) ((v)->counter = (i))
diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c
index 324eef93c900..e99b104d967a 100644
--- a/arch/sh/mm/fault_32.c
+++ b/arch/sh/mm/fault_32.c
@@ -86,7 +86,7 @@ static noinline int vmalloc_fault(unsigned long address)
86 pte_t *pte_k; 86 pte_t *pte_k;
87 87
88 /* Make sure we are in vmalloc/module/P3 area: */ 88 /* Make sure we are in vmalloc/module/P3 area: */
89 if (!(address >= VMALLOC_START && address < P3_ADDR_MAX)) 89 if (!(address >= P3SEG && address < P3_ADDR_MAX))
90 return -1; 90 return -1;
91 91
92 /* 92 /*
diff --git a/arch/sparc/kernel/central.c b/arch/sparc/kernel/central.c
index 38d48a59879c..9708851a8b9f 100644
--- a/arch/sparc/kernel/central.c
+++ b/arch/sparc/kernel/central.c
@@ -269,4 +269,4 @@ static int __init sunfire_init(void)
269 return 0; 269 return 0;
270} 270}
271 271
272subsys_initcall(sunfire_init); 272fs_initcall(sunfire_init);
diff --git a/arch/sparc/kernel/leon_smp.c b/arch/sparc/kernel/leon_smp.c
index 1210fde18740..160cac9c4036 100644
--- a/arch/sparc/kernel/leon_smp.c
+++ b/arch/sparc/kernel/leon_smp.c
@@ -23,6 +23,7 @@
23#include <linux/pm.h> 23#include <linux/pm.h>
24#include <linux/delay.h> 24#include <linux/delay.h>
25#include <linux/gfp.h> 25#include <linux/gfp.h>
26#include <linux/cpu.h>
26 27
27#include <asm/cacheflush.h> 28#include <asm/cacheflush.h>
28#include <asm/tlbflush.h> 29#include <asm/tlbflush.h>
@@ -78,6 +79,8 @@ void __cpuinit leon_callin(void)
78 local_flush_tlb_all(); 79 local_flush_tlb_all();
79 leon_configure_cache_smp(); 80 leon_configure_cache_smp();
80 81
82 notify_cpu_starting(cpuid);
83
81 /* Get our local ticker going. */ 84 /* Get our local ticker going. */
82 smp_setup_percpu_timer(); 85 smp_setup_percpu_timer();
83 86
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index 232df9949530..3ee51f189a55 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -566,15 +566,10 @@ out:
566 566
567SYSCALL_DEFINE2(64_munmap, unsigned long, addr, size_t, len) 567SYSCALL_DEFINE2(64_munmap, unsigned long, addr, size_t, len)
568{ 568{
569 long ret;
570
571 if (invalid_64bit_range(addr, len)) 569 if (invalid_64bit_range(addr, len))
572 return -EINVAL; 570 return -EINVAL;
573 571
574 down_write(&current->mm->mmap_sem); 572 return vm_munmap(addr, len);
575 ret = do_munmap(current->mm, addr, len);
576 up_write(&current->mm->mmap_sem);
577 return ret;
578} 573}
579 574
580extern unsigned long do_mremap(unsigned long addr, 575extern unsigned long do_mremap(unsigned long addr,
diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S
index b57a5942ba64..874162a11ceb 100644
--- a/arch/sparc/mm/ultra.S
+++ b/arch/sparc/mm/ultra.S
@@ -495,11 +495,11 @@ xcall_fetch_glob_regs:
495 stx %o7, [%g1 + GR_SNAP_O7] 495 stx %o7, [%g1 + GR_SNAP_O7]
496 stx %i7, [%g1 + GR_SNAP_I7] 496 stx %i7, [%g1 + GR_SNAP_I7]
497 /* Don't try this at home kids... */ 497 /* Don't try this at home kids... */
498 rdpr %cwp, %g2 498 rdpr %cwp, %g3
499 sub %g2, 1, %g7 499 sub %g3, 1, %g7
500 wrpr %g7, %cwp 500 wrpr %g7, %cwp
501 mov %i7, %g7 501 mov %i7, %g7
502 wrpr %g2, %cwp 502 wrpr %g3, %cwp
503 stx %g7, [%g1 + GR_SNAP_RPC] 503 stx %g7, [%g1 + GR_SNAP_RPC]
504 sethi %hi(trap_block), %g7 504 sethi %hi(trap_block), %g7
505 or %g7, %lo(trap_block), %g7 505 or %g7, %lo(trap_block), %g7
diff --git a/arch/tile/include/asm/pci.h b/arch/tile/include/asm/pci.h
index 5d5a635530bd..32e6cbe8dff3 100644
--- a/arch/tile/include/asm/pci.h
+++ b/arch/tile/include/asm/pci.h
@@ -47,8 +47,8 @@ struct pci_controller {
47 */ 47 */
48#define PCI_DMA_BUS_IS_PHYS 1 48#define PCI_DMA_BUS_IS_PHYS 1
49 49
50int __devinit tile_pci_init(void); 50int __init tile_pci_init(void);
51int __devinit pcibios_init(void); 51int __init pcibios_init(void);
52 52
53static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr) {} 53static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr) {}
54 54
diff --git a/arch/tile/kernel/pci.c b/arch/tile/kernel/pci.c
index a1bb59eecc18..b56d12bf5900 100644
--- a/arch/tile/kernel/pci.c
+++ b/arch/tile/kernel/pci.c
@@ -141,7 +141,7 @@ static int __devinit tile_init_irqs(int controller_id,
141 * 141 *
142 * Returns the number of controllers discovered. 142 * Returns the number of controllers discovered.
143 */ 143 */
144int __devinit tile_pci_init(void) 144int __init tile_pci_init(void)
145{ 145{
146 int i; 146 int i;
147 147
@@ -287,7 +287,7 @@ static void __devinit fixup_read_and_payload_sizes(void)
287 * The controllers have been set up by the time we get here, by a call to 287 * The controllers have been set up by the time we get here, by a call to
288 * tile_pci_init. 288 * tile_pci_init.
289 */ 289 */
290int __devinit pcibios_init(void) 290int __init pcibios_init(void)
291{ 291{
292 int i; 292 int i;
293 293
diff --git a/arch/tile/kernel/single_step.c b/arch/tile/kernel/single_step.c
index 9efbc1391b3c..89529c9f0605 100644
--- a/arch/tile/kernel/single_step.c
+++ b/arch/tile/kernel/single_step.c
@@ -346,12 +346,10 @@ void single_step_once(struct pt_regs *regs)
346 } 346 }
347 347
348 /* allocate a cache line of writable, executable memory */ 348 /* allocate a cache line of writable, executable memory */
349 down_write(&current->mm->mmap_sem); 349 buffer = (void __user *) vm_mmap(NULL, 0, 64,
350 buffer = (void __user *) do_mmap(NULL, 0, 64,
351 PROT_EXEC | PROT_READ | PROT_WRITE, 350 PROT_EXEC | PROT_READ | PROT_WRITE,
352 MAP_PRIVATE | MAP_ANONYMOUS, 351 MAP_PRIVATE | MAP_ANONYMOUS,
353 0); 352 0);
354 up_write(&current->mm->mmap_sem);
355 353
356 if (IS_ERR((void __force *)buffer)) { 354 if (IS_ERR((void __force *)buffer)) {
357 kfree(state); 355 kfree(state);
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 1d14cc6b79ad..c9866b0b77d8 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -81,7 +81,7 @@ config X86
81 select CLKEVT_I8253 81 select CLKEVT_I8253
82 select ARCH_HAVE_NMI_SAFE_CMPXCHG 82 select ARCH_HAVE_NMI_SAFE_CMPXCHG
83 select GENERIC_IOMAP 83 select GENERIC_IOMAP
84 select DCACHE_WORD_ACCESS if !DEBUG_PAGEALLOC 84 select DCACHE_WORD_ACCESS
85 85
86config INSTRUCTION_DECODER 86config INSTRUCTION_DECODER
87 def_bool (KPROBES || PERF_EVENTS) 87 def_bool (KPROBES || PERF_EVENTS)
diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
index a0559930a180..c85e3ac99bba 100644
--- a/arch/x86/boot/compressed/head_32.S
+++ b/arch/x86/boot/compressed/head_32.S
@@ -33,6 +33,9 @@
33 __HEAD 33 __HEAD
34ENTRY(startup_32) 34ENTRY(startup_32)
35#ifdef CONFIG_EFI_STUB 35#ifdef CONFIG_EFI_STUB
36 jmp preferred_addr
37
38 .balign 0x10
36 /* 39 /*
37 * We don't need the return address, so set up the stack so 40 * We don't need the return address, so set up the stack so
38 * efi_main() can find its arugments. 41 * efi_main() can find its arugments.
@@ -41,12 +44,17 @@ ENTRY(startup_32)
41 44
42 call efi_main 45 call efi_main
43 cmpl $0, %eax 46 cmpl $0, %eax
44 je preferred_addr
45 movl %eax, %esi 47 movl %eax, %esi
46 call 1f 48 jne 2f
471: 491:
50 /* EFI init failed, so hang. */
51 hlt
52 jmp 1b
532:
54 call 3f
553:
48 popl %eax 56 popl %eax
49 subl $1b, %eax 57 subl $3b, %eax
50 subl BP_pref_address(%esi), %eax 58 subl BP_pref_address(%esi), %eax
51 add BP_code32_start(%esi), %eax 59 add BP_code32_start(%esi), %eax
52 leal preferred_addr(%eax), %eax 60 leal preferred_addr(%eax), %eax
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index 558d76ce23bc..87e03a13d8e3 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -200,18 +200,28 @@ ENTRY(startup_64)
200 * entire text+data+bss and hopefully all of memory. 200 * entire text+data+bss and hopefully all of memory.
201 */ 201 */
202#ifdef CONFIG_EFI_STUB 202#ifdef CONFIG_EFI_STUB
203 pushq %rsi 203 /*
204 * The entry point for the PE/COFF executable is 0x210, so only
205 * legacy boot loaders will execute this jmp.
206 */
207 jmp preferred_addr
208
209 .org 0x210
204 mov %rcx, %rdi 210 mov %rcx, %rdi
205 mov %rdx, %rsi 211 mov %rdx, %rsi
206 call efi_main 212 call efi_main
207 popq %rsi
208 cmpq $0,%rax
209 je preferred_addr
210 movq %rax,%rsi 213 movq %rax,%rsi
211 call 1f 214 cmpq $0,%rax
215 jne 2f
2121: 2161:
217 /* EFI init failed, so hang. */
218 hlt
219 jmp 1b
2202:
221 call 3f
2223:
213 popq %rax 223 popq %rax
214 subq $1b, %rax 224 subq $3b, %rax
215 subq BP_pref_address(%rsi), %rax 225 subq BP_pref_address(%rsi), %rax
216 add BP_code32_start(%esi), %eax 226 add BP_code32_start(%esi), %eax
217 leaq preferred_addr(%rax), %rax 227 leaq preferred_addr(%rax), %rax
diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
index d3c0b0277666..fb7117a4ade1 100644
--- a/arch/x86/boot/compressed/relocs.c
+++ b/arch/x86/boot/compressed/relocs.c
@@ -403,13 +403,11 @@ static void print_absolute_symbols(void)
403 for (i = 0; i < ehdr.e_shnum; i++) { 403 for (i = 0; i < ehdr.e_shnum; i++) {
404 struct section *sec = &secs[i]; 404 struct section *sec = &secs[i];
405 char *sym_strtab; 405 char *sym_strtab;
406 Elf32_Sym *sh_symtab;
407 int j; 406 int j;
408 407
409 if (sec->shdr.sh_type != SHT_SYMTAB) { 408 if (sec->shdr.sh_type != SHT_SYMTAB) {
410 continue; 409 continue;
411 } 410 }
412 sh_symtab = sec->symtab;
413 sym_strtab = sec->link->strtab; 411 sym_strtab = sec->link->strtab;
414 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Sym); j++) { 412 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Sym); j++) {
415 Elf32_Sym *sym; 413 Elf32_Sym *sym;
diff --git a/arch/x86/boot/tools/build.c b/arch/x86/boot/tools/build.c
index ed549767a231..24443a332083 100644
--- a/arch/x86/boot/tools/build.c
+++ b/arch/x86/boot/tools/build.c
@@ -205,8 +205,13 @@ int main(int argc, char ** argv)
205 put_unaligned_le32(file_sz, &buf[pe_header + 0x50]); 205 put_unaligned_le32(file_sz, &buf[pe_header + 0x50]);
206 206
207#ifdef CONFIG_X86_32 207#ifdef CONFIG_X86_32
208 /* Address of entry point */ 208 /*
209 put_unaligned_le32(i, &buf[pe_header + 0x28]); 209 * Address of entry point.
210 *
211 * The EFI stub entry point is +16 bytes from the start of
212 * the .text section.
213 */
214 put_unaligned_le32(i + 16, &buf[pe_header + 0x28]);
210 215
211 /* .text size */ 216 /* .text size */
212 put_unaligned_le32(file_sz, &buf[pe_header + 0xb0]); 217 put_unaligned_le32(file_sz, &buf[pe_header + 0xb0]);
@@ -217,9 +222,11 @@ int main(int argc, char ** argv)
217 /* 222 /*
218 * Address of entry point. startup_32 is at the beginning and 223 * Address of entry point. startup_32 is at the beginning and
219 * the 64-bit entry point (startup_64) is always 512 bytes 224 * the 64-bit entry point (startup_64) is always 512 bytes
220 * after. 225 * after. The EFI stub entry point is 16 bytes after that, as
226 * the first instruction allows legacy loaders to jump over
227 * the EFI stub initialisation
221 */ 228 */
222 put_unaligned_le32(i + 512, &buf[pe_header + 0x28]); 229 put_unaligned_le32(i + 528, &buf[pe_header + 0x28]);
223 230
224 /* .text size */ 231 /* .text size */
225 put_unaligned_le32(file_sz, &buf[pe_header + 0xc0]); 232 put_unaligned_le32(file_sz, &buf[pe_header + 0xc0]);
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
index d511d951a052..07b3a68d2d29 100644
--- a/arch/x86/ia32/ia32_aout.c
+++ b/arch/x86/ia32/ia32_aout.c
@@ -119,9 +119,7 @@ static void set_brk(unsigned long start, unsigned long end)
119 end = PAGE_ALIGN(end); 119 end = PAGE_ALIGN(end);
120 if (end <= start) 120 if (end <= start)
121 return; 121 return;
122 down_write(&current->mm->mmap_sem); 122 vm_brk(start, end - start);
123 do_brk(start, end - start);
124 up_write(&current->mm->mmap_sem);
125} 123}
126 124
127#ifdef CORE_DUMP 125#ifdef CORE_DUMP
@@ -296,8 +294,7 @@ static int load_aout_binary(struct linux_binprm *bprm, struct pt_regs *regs)
296 294
297 /* OK, This is the point of no return */ 295 /* OK, This is the point of no return */
298 set_personality(PER_LINUX); 296 set_personality(PER_LINUX);
299 set_thread_flag(TIF_IA32); 297 set_personality_ia32(false);
300 current->mm->context.ia32_compat = 1;
301 298
302 setup_new_exec(bprm); 299 setup_new_exec(bprm);
303 300
@@ -332,9 +329,7 @@ static int load_aout_binary(struct linux_binprm *bprm, struct pt_regs *regs)
332 pos = 32; 329 pos = 32;
333 map_size = ex.a_text+ex.a_data; 330 map_size = ex.a_text+ex.a_data;
334 331
335 down_write(&current->mm->mmap_sem); 332 error = vm_brk(text_addr & PAGE_MASK, map_size);
336 error = do_brk(text_addr & PAGE_MASK, map_size);
337 up_write(&current->mm->mmap_sem);
338 333
339 if (error != (text_addr & PAGE_MASK)) { 334 if (error != (text_addr & PAGE_MASK)) {
340 send_sig(SIGKILL, current, 0); 335 send_sig(SIGKILL, current, 0);
@@ -373,9 +368,7 @@ static int load_aout_binary(struct linux_binprm *bprm, struct pt_regs *regs)
373 if (!bprm->file->f_op->mmap || (fd_offset & ~PAGE_MASK) != 0) { 368 if (!bprm->file->f_op->mmap || (fd_offset & ~PAGE_MASK) != 0) {
374 loff_t pos = fd_offset; 369 loff_t pos = fd_offset;
375 370
376 down_write(&current->mm->mmap_sem); 371 vm_brk(N_TXTADDR(ex), ex.a_text+ex.a_data);
377 do_brk(N_TXTADDR(ex), ex.a_text+ex.a_data);
378 up_write(&current->mm->mmap_sem);
379 bprm->file->f_op->read(bprm->file, 372 bprm->file->f_op->read(bprm->file,
380 (char __user *)N_TXTADDR(ex), 373 (char __user *)N_TXTADDR(ex),
381 ex.a_text+ex.a_data, &pos); 374 ex.a_text+ex.a_data, &pos);
@@ -385,26 +378,22 @@ static int load_aout_binary(struct linux_binprm *bprm, struct pt_regs *regs)
385 goto beyond_if; 378 goto beyond_if;
386 } 379 }
387 380
388 down_write(&current->mm->mmap_sem); 381 error = vm_mmap(bprm->file, N_TXTADDR(ex), ex.a_text,
389 error = do_mmap(bprm->file, N_TXTADDR(ex), ex.a_text,
390 PROT_READ | PROT_EXEC, 382 PROT_READ | PROT_EXEC,
391 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | 383 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE |
392 MAP_EXECUTABLE | MAP_32BIT, 384 MAP_EXECUTABLE | MAP_32BIT,
393 fd_offset); 385 fd_offset);
394 up_write(&current->mm->mmap_sem);
395 386
396 if (error != N_TXTADDR(ex)) { 387 if (error != N_TXTADDR(ex)) {
397 send_sig(SIGKILL, current, 0); 388 send_sig(SIGKILL, current, 0);
398 return error; 389 return error;
399 } 390 }
400 391
401 down_write(&current->mm->mmap_sem); 392 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
402 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
403 PROT_READ | PROT_WRITE | PROT_EXEC, 393 PROT_READ | PROT_WRITE | PROT_EXEC,
404 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | 394 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE |
405 MAP_EXECUTABLE | MAP_32BIT, 395 MAP_EXECUTABLE | MAP_32BIT,
406 fd_offset + ex.a_text); 396 fd_offset + ex.a_text);
407 up_write(&current->mm->mmap_sem);
408 if (error != N_DATADDR(ex)) { 397 if (error != N_DATADDR(ex)) {
409 send_sig(SIGKILL, current, 0); 398 send_sig(SIGKILL, current, 0);
410 return error; 399 return error;
@@ -476,9 +465,7 @@ static int load_aout_library(struct file *file)
476 error_time = jiffies; 465 error_time = jiffies;
477 } 466 }
478#endif 467#endif
479 down_write(&current->mm->mmap_sem); 468 vm_brk(start_addr, ex.a_text + ex.a_data + ex.a_bss);
480 do_brk(start_addr, ex.a_text + ex.a_data + ex.a_bss);
481 up_write(&current->mm->mmap_sem);
482 469
483 file->f_op->read(file, (char __user *)start_addr, 470 file->f_op->read(file, (char __user *)start_addr,
484 ex.a_text + ex.a_data, &pos); 471 ex.a_text + ex.a_data, &pos);
@@ -490,12 +477,10 @@ static int load_aout_library(struct file *file)
490 goto out; 477 goto out;
491 } 478 }
492 /* Now use mmap to map the library into memory. */ 479 /* Now use mmap to map the library into memory. */
493 down_write(&current->mm->mmap_sem); 480 error = vm_mmap(file, start_addr, ex.a_text + ex.a_data,
494 error = do_mmap(file, start_addr, ex.a_text + ex.a_data,
495 PROT_READ | PROT_WRITE | PROT_EXEC, 481 PROT_READ | PROT_WRITE | PROT_EXEC,
496 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_32BIT, 482 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_32BIT,
497 N_TXTOFF(ex)); 483 N_TXTOFF(ex));
498 up_write(&current->mm->mmap_sem);
499 retval = error; 484 retval = error;
500 if (error != start_addr) 485 if (error != start_addr)
501 goto out; 486 goto out;
@@ -503,9 +488,7 @@ static int load_aout_library(struct file *file)
503 len = PAGE_ALIGN(ex.a_text + ex.a_data); 488 len = PAGE_ALIGN(ex.a_text + ex.a_data);
504 bss = ex.a_text + ex.a_data + ex.a_bss; 489 bss = ex.a_text + ex.a_data + ex.a_bss;
505 if (bss > len) { 490 if (bss > len) {
506 down_write(&current->mm->mmap_sem); 491 error = vm_brk(start_addr + len, bss - len);
507 error = do_brk(start_addr + len, bss - len);
508 up_write(&current->mm->mmap_sem);
509 retval = error; 492 retval = error;
510 if (error != start_addr + len) 493 if (error != start_addr + len)
511 goto out; 494 goto out;
diff --git a/arch/x86/include/asm/posix_types.h b/arch/x86/include/asm/posix_types.h
index 3427b7798dbc..7ef7c3020e5c 100644
--- a/arch/x86/include/asm/posix_types.h
+++ b/arch/x86/include/asm/posix_types.h
@@ -7,9 +7,9 @@
7#else 7#else
8# ifdef __i386__ 8# ifdef __i386__
9# include "posix_types_32.h" 9# include "posix_types_32.h"
10# elif defined(__LP64__) 10# elif defined(__ILP32__)
11# include "posix_types_64.h"
12# else
13# include "posix_types_x32.h" 11# include "posix_types_x32.h"
12# else
13# include "posix_types_64.h"
14# endif 14# endif
15#endif 15#endif
diff --git a/arch/x86/include/asm/sigcontext.h b/arch/x86/include/asm/sigcontext.h
index 4a085383af27..5ca71c065eef 100644
--- a/arch/x86/include/asm/sigcontext.h
+++ b/arch/x86/include/asm/sigcontext.h
@@ -257,7 +257,7 @@ struct sigcontext {
257 __u64 oldmask; 257 __u64 oldmask;
258 __u64 cr2; 258 __u64 cr2;
259 struct _fpstate __user *fpstate; /* zero when no FPU context */ 259 struct _fpstate __user *fpstate; /* zero when no FPU context */
260#ifndef __LP64__ 260#ifdef __ILP32__
261 __u32 __fpstate_pad; 261 __u32 __fpstate_pad;
262#endif 262#endif
263 __u64 reserved1[8]; 263 __u64 reserved1[8];
diff --git a/arch/x86/include/asm/siginfo.h b/arch/x86/include/asm/siginfo.h
index fc1aa5535646..34c47b3341c0 100644
--- a/arch/x86/include/asm/siginfo.h
+++ b/arch/x86/include/asm/siginfo.h
@@ -2,7 +2,13 @@
2#define _ASM_X86_SIGINFO_H 2#define _ASM_X86_SIGINFO_H
3 3
4#ifdef __x86_64__ 4#ifdef __x86_64__
5# define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int)) 5# ifdef __ILP32__ /* x32 */
6typedef long long __kernel_si_clock_t __attribute__((aligned(4)));
7# define __ARCH_SI_CLOCK_T __kernel_si_clock_t
8# define __ARCH_SI_ATTRIBUTES __attribute__((aligned(8)))
9# else /* x86-64 */
10# define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int))
11# endif
6#endif 12#endif
7 13
8#include <asm-generic/siginfo.h> 14#include <asm-generic/siginfo.h>
diff --git a/arch/x86/include/asm/unistd.h b/arch/x86/include/asm/unistd.h
index 37cdc9d99bb1..4437001d8e3d 100644
--- a/arch/x86/include/asm/unistd.h
+++ b/arch/x86/include/asm/unistd.h
@@ -63,10 +63,10 @@
63#else 63#else
64# ifdef __i386__ 64# ifdef __i386__
65# include <asm/unistd_32.h> 65# include <asm/unistd_32.h>
66# elif defined(__LP64__) 66# elif defined(__ILP32__)
67# include <asm/unistd_64.h>
68# else
69# include <asm/unistd_x32.h> 67# include <asm/unistd_x32.h>
68# else
69# include <asm/unistd_64.h>
70# endif 70# endif
71#endif 71#endif
72 72
diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
index 6fe6767b7124..e58f03b206c3 100644
--- a/arch/x86/include/asm/word-at-a-time.h
+++ b/arch/x86/include/asm/word-at-a-time.h
@@ -43,4 +43,37 @@ static inline unsigned long has_zero(unsigned long a)
43 return ((a - REPEAT_BYTE(0x01)) & ~a) & REPEAT_BYTE(0x80); 43 return ((a - REPEAT_BYTE(0x01)) & ~a) & REPEAT_BYTE(0x80);
44} 44}
45 45
46/*
47 * Load an unaligned word from kernel space.
48 *
49 * In the (very unlikely) case of the word being a page-crosser
50 * and the next page not being mapped, take the exception and
51 * return zeroes in the non-existing part.
52 */
53static inline unsigned long load_unaligned_zeropad(const void *addr)
54{
55 unsigned long ret, dummy;
56
57 asm(
58 "1:\tmov %2,%0\n"
59 "2:\n"
60 ".section .fixup,\"ax\"\n"
61 "3:\t"
62 "lea %2,%1\n\t"
63 "and %3,%1\n\t"
64 "mov (%1),%0\n\t"
65 "leal %2,%%ecx\n\t"
66 "andl %4,%%ecx\n\t"
67 "shll $3,%%ecx\n\t"
68 "shr %%cl,%0\n\t"
69 "jmp 2b\n"
70 ".previous\n"
71 _ASM_EXTABLE(1b, 3b)
72 :"=&r" (ret),"=&c" (dummy)
73 :"m" (*(unsigned long *)addr),
74 "i" (-sizeof(unsigned long)),
75 "i" (sizeof(unsigned long)-1));
76 return ret;
77}
78
46#endif /* _ASM_WORD_AT_A_TIME_H */ 79#endif /* _ASM_WORD_AT_A_TIME_H */
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index baaca8defec8..764b66a4cf89 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -195,6 +195,5 @@ extern struct x86_msi_ops x86_msi;
195 195
196extern void x86_init_noop(void); 196extern void x86_init_noop(void);
197extern void x86_init_uint_noop(unsigned int unused); 197extern void x86_init_uint_noop(unsigned int unused);
198extern void x86_default_fixup_cpu_id(struct cpuinfo_x86 *c, int node);
199 198
200#endif 199#endif
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index 103b6ab368d3..146a49c763a4 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -24,6 +24,10 @@ unsigned long acpi_realmode_flags;
24static char temp_stack[4096]; 24static char temp_stack[4096];
25#endif 25#endif
26 26
27asmlinkage void acpi_enter_s3(void)
28{
29 acpi_enter_sleep_state(3, wake_sleep_flags);
30}
27/** 31/**
28 * acpi_suspend_lowlevel - save kernel state 32 * acpi_suspend_lowlevel - save kernel state
29 * 33 *
diff --git a/arch/x86/kernel/acpi/sleep.h b/arch/x86/kernel/acpi/sleep.h
index 416d4be13fef..d68677a2a010 100644
--- a/arch/x86/kernel/acpi/sleep.h
+++ b/arch/x86/kernel/acpi/sleep.h
@@ -3,12 +3,16 @@
3 */ 3 */
4 4
5#include <asm/trampoline.h> 5#include <asm/trampoline.h>
6#include <linux/linkage.h>
6 7
7extern unsigned long saved_video_mode; 8extern unsigned long saved_video_mode;
8extern long saved_magic; 9extern long saved_magic;
9 10
10extern int wakeup_pmode_return; 11extern int wakeup_pmode_return;
11 12
13extern u8 wake_sleep_flags;
14extern asmlinkage void acpi_enter_s3(void);
15
12extern unsigned long acpi_copy_wakeup_routine(unsigned long); 16extern unsigned long acpi_copy_wakeup_routine(unsigned long);
13extern void wakeup_long64(void); 17extern void wakeup_long64(void);
14 18
diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
index 13ab720573e3..72610839f03b 100644
--- a/arch/x86/kernel/acpi/wakeup_32.S
+++ b/arch/x86/kernel/acpi/wakeup_32.S
@@ -74,9 +74,7 @@ restore_registers:
74ENTRY(do_suspend_lowlevel) 74ENTRY(do_suspend_lowlevel)
75 call save_processor_state 75 call save_processor_state
76 call save_registers 76 call save_registers
77 pushl $3 77 call acpi_enter_s3
78 call acpi_enter_sleep_state
79 addl $4, %esp
80 78
81# In case of S3 failure, we'll emerge here. Jump 79# In case of S3 failure, we'll emerge here. Jump
82# to ret_point to recover 80# to ret_point to recover
diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S
index 8ea5164cbd04..014d1d28c397 100644
--- a/arch/x86/kernel/acpi/wakeup_64.S
+++ b/arch/x86/kernel/acpi/wakeup_64.S
@@ -71,9 +71,7 @@ ENTRY(do_suspend_lowlevel)
71 movq %rsi, saved_rsi 71 movq %rsi, saved_rsi
72 72
73 addq $8, %rsp 73 addq $8, %rsp
74 movl $3, %edi 74 call acpi_enter_s3
75 xorl %eax, %eax
76 call acpi_enter_sleep_state
77 /* in case something went wrong, restore the machine status and go on */ 75 /* in case something went wrong, restore the machine status and go on */
78 jmp resume_point 76 jmp resume_point
79 77
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 11544d8f1e97..edc24480469f 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -1637,9 +1637,11 @@ static int __init apic_verify(void)
1637 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; 1637 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
1638 1638
1639 /* The BIOS may have set up the APIC at some other address */ 1639 /* The BIOS may have set up the APIC at some other address */
1640 rdmsr(MSR_IA32_APICBASE, l, h); 1640 if (boot_cpu_data.x86 >= 6) {
1641 if (l & MSR_IA32_APICBASE_ENABLE) 1641 rdmsr(MSR_IA32_APICBASE, l, h);
1642 mp_lapic_addr = l & MSR_IA32_APICBASE_BASE; 1642 if (l & MSR_IA32_APICBASE_ENABLE)
1643 mp_lapic_addr = l & MSR_IA32_APICBASE_BASE;
1644 }
1643 1645
1644 pr_info("Found and enabled local APIC!\n"); 1646 pr_info("Found and enabled local APIC!\n");
1645 return 0; 1647 return 0;
@@ -1657,13 +1659,15 @@ int __init apic_force_enable(unsigned long addr)
1657 * MSR. This can only be done in software for Intel P6 or later 1659 * MSR. This can only be done in software for Intel P6 or later
1658 * and AMD K7 (Model > 1) or later. 1660 * and AMD K7 (Model > 1) or later.
1659 */ 1661 */
1660 rdmsr(MSR_IA32_APICBASE, l, h); 1662 if (boot_cpu_data.x86 >= 6) {
1661 if (!(l & MSR_IA32_APICBASE_ENABLE)) { 1663 rdmsr(MSR_IA32_APICBASE, l, h);
1662 pr_info("Local APIC disabled by BIOS -- reenabling.\n"); 1664 if (!(l & MSR_IA32_APICBASE_ENABLE)) {
1663 l &= ~MSR_IA32_APICBASE_BASE; 1665 pr_info("Local APIC disabled by BIOS -- reenabling.\n");
1664 l |= MSR_IA32_APICBASE_ENABLE | addr; 1666 l &= ~MSR_IA32_APICBASE_BASE;
1665 wrmsr(MSR_IA32_APICBASE, l, h); 1667 l |= MSR_IA32_APICBASE_ENABLE | addr;
1666 enabled_via_apicbase = 1; 1668 wrmsr(MSR_IA32_APICBASE, l, h);
1669 enabled_via_apicbase = 1;
1670 }
1667 } 1671 }
1668 return apic_verify(); 1672 return apic_verify();
1669} 1673}
@@ -2209,10 +2213,12 @@ static void lapic_resume(void)
2209 * FIXME! This will be wrong if we ever support suspend on 2213 * FIXME! This will be wrong if we ever support suspend on
2210 * SMP! We'll need to do this as part of the CPU restore! 2214 * SMP! We'll need to do this as part of the CPU restore!
2211 */ 2215 */
2212 rdmsr(MSR_IA32_APICBASE, l, h); 2216 if (boot_cpu_data.x86 >= 6) {
2213 l &= ~MSR_IA32_APICBASE_BASE; 2217 rdmsr(MSR_IA32_APICBASE, l, h);
2214 l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr; 2218 l &= ~MSR_IA32_APICBASE_BASE;
2215 wrmsr(MSR_IA32_APICBASE, l, h); 2219 l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
2220 wrmsr(MSR_IA32_APICBASE, l, h);
2221 }
2216 } 2222 }
2217 2223
2218 maxlvt = lapic_get_maxlvt(); 2224 maxlvt = lapic_get_maxlvt();
diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c
index 899803e03214..23e75422e013 100644
--- a/arch/x86/kernel/apic/apic_numachip.c
+++ b/arch/x86/kernel/apic/apic_numachip.c
@@ -207,8 +207,11 @@ static void __init map_csrs(void)
207 207
208static void fixup_cpu_id(struct cpuinfo_x86 *c, int node) 208static void fixup_cpu_id(struct cpuinfo_x86 *c, int node)
209{ 209{
210 c->phys_proc_id = node; 210
211 per_cpu(cpu_llc_id, smp_processor_id()) = node; 211 if (c->phys_proc_id != node) {
212 c->phys_proc_id = node;
213 per_cpu(cpu_llc_id, smp_processor_id()) = node;
214 }
212} 215}
213 216
214static int __init numachip_system_init(void) 217static int __init numachip_system_init(void)
diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
index 8a778db45e3a..991e315f4227 100644
--- a/arch/x86/kernel/apic/x2apic_phys.c
+++ b/arch/x86/kernel/apic/x2apic_phys.c
@@ -24,6 +24,12 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
24{ 24{
25 if (x2apic_phys) 25 if (x2apic_phys)
26 return x2apic_enabled(); 26 return x2apic_enabled();
27 else if ((acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) &&
28 (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL) &&
29 x2apic_enabled()) {
30 printk(KERN_DEBUG "System requires x2apic physical mode\n");
31 return 1;
32 }
27 else 33 else
28 return 0; 34 return 0;
29} 35}
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 0a44b90602b0..146bb6218eec 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -26,7 +26,8 @@
26 * contact AMD for precise details and a CPU swap. 26 * contact AMD for precise details and a CPU swap.
27 * 27 *
28 * See http://www.multimania.com/poulot/k6bug.html 28 * See http://www.multimania.com/poulot/k6bug.html
29 * http://www.amd.com/K6/k6docs/revgd.html 29 * and section 2.6.2 of "AMD-K6 Processor Revision Guide - Model 6"
30 * (Publication # 21266 Issue Date: August 1998)
30 * 31 *
31 * The following test is erm.. interesting. AMD neglected to up 32 * The following test is erm.. interesting. AMD neglected to up
32 * the chip setting when fixing the bug but they also tweaked some 33 * the chip setting when fixing the bug but they also tweaked some
@@ -94,7 +95,6 @@ static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c)
94 "system stability may be impaired when more than 32 MB are used.\n"); 95 "system stability may be impaired when more than 32 MB are used.\n");
95 else 96 else
96 printk(KERN_CONT "probably OK (after B9730xxxx).\n"); 97 printk(KERN_CONT "probably OK (after B9730xxxx).\n");
97 printk(KERN_INFO "Please see http://membres.lycos.fr/poulot/k6bug.html\n");
98 } 98 }
99 99
100 /* K6 with old style WHCR */ 100 /* K6 with old style WHCR */
@@ -353,10 +353,11 @@ static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
353 node = per_cpu(cpu_llc_id, cpu); 353 node = per_cpu(cpu_llc_id, cpu);
354 354
355 /* 355 /*
356 * If core numbers are inconsistent, it's likely a multi-fabric platform, 356 * On multi-fabric platform (e.g. Numascale NumaChip) a
357 * so invoke platform-specific handler 357 * platform-specific handler needs to be called to fixup some
358 * IDs of the CPU.
358 */ 359 */
359 if (c->phys_proc_id != node) 360 if (x86_cpuinit.fixup_cpu_id)
360 x86_cpuinit.fixup_cpu_id(c, node); 361 x86_cpuinit.fixup_cpu_id(c, node);
361 362
362 if (!node_online(node)) { 363 if (!node_online(node)) {
@@ -579,6 +580,24 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
579 } 580 }
580 } 581 }
581 582
583 /* re-enable TopologyExtensions if switched off by BIOS */
584 if ((c->x86 == 0x15) &&
585 (c->x86_model >= 0x10) && (c->x86_model <= 0x1f) &&
586 !cpu_has(c, X86_FEATURE_TOPOEXT)) {
587 u64 val;
588
589 if (!rdmsrl_amd_safe(0xc0011005, &val)) {
590 val |= 1ULL << 54;
591 wrmsrl_amd_safe(0xc0011005, val);
592 rdmsrl(0xc0011005, val);
593 if (val & (1ULL << 54)) {
594 set_cpu_cap(c, X86_FEATURE_TOPOEXT);
595 printk(KERN_INFO FW_INFO "CPU: Re-enabling "
596 "disabled Topology Extensions Support\n");
597 }
598 }
599 }
600
582 cpu_detect_cache_sizes(c); 601 cpu_detect_cache_sizes(c);
583 602
584 /* Multi core CPU? */ 603 /* Multi core CPU? */
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 67e258362a3d..cf79302198a6 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1163,15 +1163,6 @@ static void dbg_restore_debug_regs(void)
1163#endif /* ! CONFIG_KGDB */ 1163#endif /* ! CONFIG_KGDB */
1164 1164
1165/* 1165/*
1166 * Prints an error where the NUMA and configured core-number mismatch and the
1167 * platform didn't override this to fix it up
1168 */
1169void __cpuinit x86_default_fixup_cpu_id(struct cpuinfo_x86 *c, int node)
1170{
1171 pr_err("NUMA core number %d differs from configured core number %d\n", node, c->phys_proc_id);
1172}
1173
1174/*
1175 * cpu_init() initializes state that is per-CPU. Some data is already 1166 * cpu_init() initializes state that is per-CPU. Some data is already
1176 * initialized (naturally) in the bootstrap process, such as the GDT 1167 * initialized (naturally) in the bootstrap process, such as the GDT
1177 * and IDT. We reload them nevertheless, this function acts as a 1168 * and IDT. We reload them nevertheless, this function acts as a
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 73d08ed98a64..b8f3653dddbc 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -433,14 +433,14 @@ int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu, unsigned slot,
433 /* check if @slot is already used or the index is already disabled */ 433 /* check if @slot is already used or the index is already disabled */
434 ret = amd_get_l3_disable_slot(nb, slot); 434 ret = amd_get_l3_disable_slot(nb, slot);
435 if (ret >= 0) 435 if (ret >= 0)
436 return -EINVAL; 436 return -EEXIST;
437 437
438 if (index > nb->l3_cache.indices) 438 if (index > nb->l3_cache.indices)
439 return -EINVAL; 439 return -EINVAL;
440 440
441 /* check whether the other slot has disabled the same index already */ 441 /* check whether the other slot has disabled the same index already */
442 if (index == amd_get_l3_disable_slot(nb, !slot)) 442 if (index == amd_get_l3_disable_slot(nb, !slot))
443 return -EINVAL; 443 return -EEXIST;
444 444
445 amd_l3_disable_index(nb, cpu, slot, index); 445 amd_l3_disable_index(nb, cpu, slot, index);
446 446
@@ -468,8 +468,8 @@ static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
468 err = amd_set_l3_disable_slot(this_leaf->base.nb, cpu, slot, val); 468 err = amd_set_l3_disable_slot(this_leaf->base.nb, cpu, slot, val);
469 if (err) { 469 if (err) {
470 if (err == -EEXIST) 470 if (err == -EEXIST)
471 printk(KERN_WARNING "L3 disable slot %d in use!\n", 471 pr_warning("L3 slot %d in use/index already disabled!\n",
472 slot); 472 slot);
473 return err; 473 return err;
474 } 474 }
475 return count; 475 return count;
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index 7734bcbb5a3a..2d6e6498c176 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -235,6 +235,7 @@ int init_fpu(struct task_struct *tsk)
235 if (tsk_used_math(tsk)) { 235 if (tsk_used_math(tsk)) {
236 if (HAVE_HWFP && tsk == current) 236 if (HAVE_HWFP && tsk == current)
237 unlazy_fpu(tsk); 237 unlazy_fpu(tsk);
238 tsk->thread.fpu.last_cpu = ~0;
238 return 0; 239 return 0;
239 } 240 }
240 241
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index b8ba6e4a27e4..e554e5ad2fe8 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -79,7 +79,6 @@ struct kvm_task_sleep_node {
79 u32 token; 79 u32 token;
80 int cpu; 80 int cpu;
81 bool halted; 81 bool halted;
82 struct mm_struct *mm;
83}; 82};
84 83
85static struct kvm_task_sleep_head { 84static struct kvm_task_sleep_head {
@@ -126,9 +125,7 @@ void kvm_async_pf_task_wait(u32 token)
126 125
127 n.token = token; 126 n.token = token;
128 n.cpu = smp_processor_id(); 127 n.cpu = smp_processor_id();
129 n.mm = current->active_mm;
130 n.halted = idle || preempt_count() > 1; 128 n.halted = idle || preempt_count() > 1;
131 atomic_inc(&n.mm->mm_count);
132 init_waitqueue_head(&n.wq); 129 init_waitqueue_head(&n.wq);
133 hlist_add_head(&n.link, &b->list); 130 hlist_add_head(&n.link, &b->list);
134 spin_unlock(&b->lock); 131 spin_unlock(&b->lock);
@@ -161,9 +158,6 @@ EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
161static void apf_task_wake_one(struct kvm_task_sleep_node *n) 158static void apf_task_wake_one(struct kvm_task_sleep_node *n)
162{ 159{
163 hlist_del_init(&n->link); 160 hlist_del_init(&n->link);
164 if (!n->mm)
165 return;
166 mmdrop(n->mm);
167 if (n->halted) 161 if (n->halted)
168 smp_send_reschedule(n->cpu); 162 smp_send_reschedule(n->cpu);
169 else if (waitqueue_active(&n->wq)) 163 else if (waitqueue_active(&n->wq))
@@ -207,7 +201,7 @@ again:
207 * async PF was not yet handled. 201 * async PF was not yet handled.
208 * Add dummy entry for the token. 202 * Add dummy entry for the token.
209 */ 203 */
210 n = kmalloc(sizeof(*n), GFP_ATOMIC); 204 n = kzalloc(sizeof(*n), GFP_ATOMIC);
211 if (!n) { 205 if (!n) {
212 /* 206 /*
213 * Allocation failed! Busy wait while other cpu 207 * Allocation failed! Busy wait while other cpu
@@ -219,7 +213,6 @@ again:
219 } 213 }
220 n->token = token; 214 n->token = token;
221 n->cpu = smp_processor_id(); 215 n->cpu = smp_processor_id();
222 n->mm = NULL;
223 init_waitqueue_head(&n->wq); 216 init_waitqueue_head(&n->wq);
224 hlist_add_head(&n->link, &b->list); 217 hlist_add_head(&n->link, &b->list);
225 } else 218 } else
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
index 73465aab28f8..8a2ce8fd41c0 100644
--- a/arch/x86/kernel/microcode_amd.c
+++ b/arch/x86/kernel/microcode_amd.c
@@ -82,11 +82,6 @@ static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
82{ 82{
83 struct cpuinfo_x86 *c = &cpu_data(cpu); 83 struct cpuinfo_x86 *c = &cpu_data(cpu);
84 84
85 if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) {
86 pr_warning("CPU%d: family %d not supported\n", cpu, c->x86);
87 return -1;
88 }
89
90 csig->rev = c->microcode; 85 csig->rev = c->microcode;
91 pr_info("CPU%d: patch_level=0x%08x\n", cpu, csig->rev); 86 pr_info("CPU%d: patch_level=0x%08x\n", cpu, csig->rev);
92 87
@@ -380,6 +375,13 @@ static struct microcode_ops microcode_amd_ops = {
380 375
381struct microcode_ops * __init init_amd_microcode(void) 376struct microcode_ops * __init init_amd_microcode(void)
382{ 377{
378 struct cpuinfo_x86 *c = &cpu_data(0);
379
380 if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) {
381 pr_warning("AMD CPU family 0x%x not supported\n", c->x86);
382 return NULL;
383 }
384
383 patch = (void *)get_zeroed_page(GFP_KERNEL); 385 patch = (void *)get_zeroed_page(GFP_KERNEL);
384 if (!patch) 386 if (!patch)
385 return NULL; 387 return NULL;
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
index 87a0f8688301..c9bda6d6035c 100644
--- a/arch/x86/kernel/microcode_core.c
+++ b/arch/x86/kernel/microcode_core.c
@@ -419,10 +419,8 @@ static int mc_device_add(struct device *dev, struct subsys_interface *sif)
419 if (err) 419 if (err)
420 return err; 420 return err;
421 421
422 if (microcode_init_cpu(cpu) == UCODE_ERROR) { 422 if (microcode_init_cpu(cpu) == UCODE_ERROR)
423 sysfs_remove_group(&dev->kobj, &mc_attr_group);
424 return -EINVAL; 423 return -EINVAL;
425 }
426 424
427 return err; 425 return err;
428} 426}
@@ -528,11 +526,11 @@ static int __init microcode_init(void)
528 microcode_ops = init_intel_microcode(); 526 microcode_ops = init_intel_microcode();
529 else if (c->x86_vendor == X86_VENDOR_AMD) 527 else if (c->x86_vendor == X86_VENDOR_AMD)
530 microcode_ops = init_amd_microcode(); 528 microcode_ops = init_amd_microcode();
531 529 else
532 if (!microcode_ops) {
533 pr_err("no support for this CPU vendor\n"); 530 pr_err("no support for this CPU vendor\n");
531
532 if (!microcode_ops)
534 return -ENODEV; 533 return -ENODEV;
535 }
536 534
537 microcode_pdev = platform_device_register_simple("microcode", -1, 535 microcode_pdev = platform_device_register_simple("microcode", -1,
538 NULL, 0); 536 NULL, 0);
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 733ca39f367e..43d8b48b23e6 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -423,6 +423,7 @@ void set_personality_ia32(bool x32)
423 current_thread_info()->status |= TS_COMPAT; 423 current_thread_info()->status |= TS_COMPAT;
424 } 424 }
425} 425}
426EXPORT_SYMBOL_GPL(set_personality_ia32);
426 427
427unsigned long get_wchan(struct task_struct *p) 428unsigned long get_wchan(struct task_struct *p)
428{ 429{
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index 71f4727da373..5a98aa272184 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -185,10 +185,22 @@ void __init setup_per_cpu_areas(void)
185#endif 185#endif
186 rc = -EINVAL; 186 rc = -EINVAL;
187 if (pcpu_chosen_fc != PCPU_FC_PAGE) { 187 if (pcpu_chosen_fc != PCPU_FC_PAGE) {
188 const size_t atom_size = cpu_has_pse ? PMD_SIZE : PAGE_SIZE;
189 const size_t dyn_size = PERCPU_MODULE_RESERVE + 188 const size_t dyn_size = PERCPU_MODULE_RESERVE +
190 PERCPU_DYNAMIC_RESERVE - PERCPU_FIRST_CHUNK_RESERVE; 189 PERCPU_DYNAMIC_RESERVE - PERCPU_FIRST_CHUNK_RESERVE;
190 size_t atom_size;
191 191
192 /*
193 * On 64bit, use PMD_SIZE for atom_size so that embedded
194 * percpu areas are aligned to PMD. This, in the future,
195 * can also allow using PMD mappings in vmalloc area. Use
196 * PAGE_SIZE on 32bit as vmalloc space is highly contended
197 * and large vmalloc area allocs can easily fail.
198 */
199#ifdef CONFIG_X86_64
200 atom_size = PMD_SIZE;
201#else
202 atom_size = PAGE_SIZE;
203#endif
192 rc = pcpu_embed_first_chunk(PERCPU_FIRST_CHUNK_RESERVE, 204 rc = pcpu_embed_first_chunk(PERCPU_FIRST_CHUNK_RESERVE,
193 dyn_size, atom_size, 205 dyn_size, atom_size,
194 pcpu_cpu_distance, 206 pcpu_cpu_distance,
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
index e9f265fd79ae..9cf71d0b2d37 100644
--- a/arch/x86/kernel/x86_init.c
+++ b/arch/x86/kernel/x86_init.c
@@ -93,7 +93,6 @@ struct x86_init_ops x86_init __initdata = {
93struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = { 93struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {
94 .early_percpu_clock_init = x86_init_noop, 94 .early_percpu_clock_init = x86_init_noop,
95 .setup_percpu_clockev = setup_secondary_APIC_clock, 95 .setup_percpu_clockev = setup_secondary_APIC_clock,
96 .fixup_cpu_id = x86_default_fixup_cpu_id,
97}; 96};
98 97
99static void default_nmi_init(void) { }; 98static void default_nmi_init(void) { };
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 4044ce0bf7c1..185a2b823a2d 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6336,13 +6336,11 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
6336 if (npages && !old.rmap) { 6336 if (npages && !old.rmap) {
6337 unsigned long userspace_addr; 6337 unsigned long userspace_addr;
6338 6338
6339 down_write(&current->mm->mmap_sem); 6339 userspace_addr = vm_mmap(NULL, 0,
6340 userspace_addr = do_mmap(NULL, 0,
6341 npages * PAGE_SIZE, 6340 npages * PAGE_SIZE,
6342 PROT_READ | PROT_WRITE, 6341 PROT_READ | PROT_WRITE,
6343 map_flags, 6342 map_flags,
6344 0); 6343 0);
6345 up_write(&current->mm->mmap_sem);
6346 6344
6347 if (IS_ERR((void *)userspace_addr)) 6345 if (IS_ERR((void *)userspace_addr))
6348 return PTR_ERR((void *)userspace_addr); 6346 return PTR_ERR((void *)userspace_addr);
@@ -6366,10 +6364,8 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
6366 if (!user_alloc && !old.user_alloc && old.rmap && !npages) { 6364 if (!user_alloc && !old.user_alloc && old.rmap && !npages) {
6367 int ret; 6365 int ret;
6368 6366
6369 down_write(&current->mm->mmap_sem); 6367 ret = vm_munmap(old.userspace_addr,
6370 ret = do_munmap(current->mm, old.userspace_addr,
6371 old.npages * PAGE_SIZE); 6368 old.npages * PAGE_SIZE);
6372 up_write(&current->mm->mmap_sem);
6373 if (ret < 0) 6369 if (ret < 0)
6374 printk(KERN_WARNING 6370 printk(KERN_WARNING
6375 "kvm_vm_ioctl_set_memory_region: " 6371 "kvm_vm_ioctl_set_memory_region: "
@@ -6585,6 +6581,7 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
6585 kvm_inject_page_fault(vcpu, &fault); 6581 kvm_inject_page_fault(vcpu, &fault);
6586 } 6582 }
6587 vcpu->arch.apf.halted = false; 6583 vcpu->arch.apf.halted = false;
6584 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
6588} 6585}
6589 6586
6590bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu) 6587bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/platform/geode/net5501.c b/arch/x86/platform/geode/net5501.c
index 66d377e334f7..646e3b5b4bb6 100644
--- a/arch/x86/platform/geode/net5501.c
+++ b/arch/x86/platform/geode/net5501.c
@@ -63,7 +63,7 @@ static struct gpio_led net5501_leds[] = {
63 .name = "net5501:1", 63 .name = "net5501:1",
64 .gpio = 6, 64 .gpio = 6,
65 .default_trigger = "default-on", 65 .default_trigger = "default-on",
66 .active_low = 1, 66 .active_low = 0,
67 }, 67 },
68}; 68};
69 69
diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
index e0a37233c0af..e31bcd8f2eee 100644
--- a/arch/x86/platform/mrst/mrst.c
+++ b/arch/x86/platform/mrst/mrst.c
@@ -805,7 +805,7 @@ void intel_scu_devices_create(void)
805 } else 805 } else
806 i2c_register_board_info(i2c_bus[i], i2c_devs[i], 1); 806 i2c_register_board_info(i2c_bus[i], i2c_devs[i], 1);
807 } 807 }
808 intel_scu_notifier_post(SCU_AVAILABLE, 0L); 808 intel_scu_notifier_post(SCU_AVAILABLE, NULL);
809} 809}
810EXPORT_SYMBOL_GPL(intel_scu_devices_create); 810EXPORT_SYMBOL_GPL(intel_scu_devices_create);
811 811
@@ -814,7 +814,7 @@ void intel_scu_devices_destroy(void)
814{ 814{
815 int i; 815 int i;
816 816
817 intel_scu_notifier_post(SCU_DOWN, 0L); 817 intel_scu_notifier_post(SCU_DOWN, NULL);
818 818
819 for (i = 0; i < ipc_next_dev; i++) 819 for (i = 0; i < ipc_next_dev; i++)
820 platform_device_del(ipc_devs[i]); 820 platform_device_del(ipc_devs[i]);
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 4f51bebac02c..95dccce8e979 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -63,6 +63,7 @@
63#include <asm/stackprotector.h> 63#include <asm/stackprotector.h>
64#include <asm/hypervisor.h> 64#include <asm/hypervisor.h>
65#include <asm/mwait.h> 65#include <asm/mwait.h>
66#include <asm/pci_x86.h>
66 67
67#ifdef CONFIG_ACPI 68#ifdef CONFIG_ACPI
68#include <linux/acpi.h> 69#include <linux/acpi.h>
@@ -261,7 +262,8 @@ static void xen_cpuid(unsigned int *ax, unsigned int *bx,
261 262
262static bool __init xen_check_mwait(void) 263static bool __init xen_check_mwait(void)
263{ 264{
264#ifdef CONFIG_ACPI 265#if defined(CONFIG_ACPI) && !defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR) && \
266 !defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR_MODULE)
265 struct xen_platform_op op = { 267 struct xen_platform_op op = {
266 .cmd = XENPF_set_processor_pminfo, 268 .cmd = XENPF_set_processor_pminfo,
267 .u.set_pminfo.id = -1, 269 .u.set_pminfo.id = -1,
@@ -349,7 +351,6 @@ static void __init xen_init_cpuid_mask(void)
349 /* Xen will set CR4.OSXSAVE if supported and not disabled by force */ 351 /* Xen will set CR4.OSXSAVE if supported and not disabled by force */
350 if ((cx & xsave_mask) != xsave_mask) 352 if ((cx & xsave_mask) != xsave_mask)
351 cpuid_leaf1_ecx_mask &= ~xsave_mask; /* disable XSAVE & OSXSAVE */ 353 cpuid_leaf1_ecx_mask &= ~xsave_mask; /* disable XSAVE & OSXSAVE */
352
353 if (xen_check_mwait()) 354 if (xen_check_mwait())
354 cpuid_leaf1_ecx_set_mask = (1 << (X86_FEATURE_MWAIT % 32)); 355 cpuid_leaf1_ecx_set_mask = (1 << (X86_FEATURE_MWAIT % 32));
355} 356}
@@ -809,9 +810,40 @@ static void xen_io_delay(void)
809} 810}
810 811
811#ifdef CONFIG_X86_LOCAL_APIC 812#ifdef CONFIG_X86_LOCAL_APIC
813static unsigned long xen_set_apic_id(unsigned int x)
814{
815 WARN_ON(1);
816 return x;
817}
818static unsigned int xen_get_apic_id(unsigned long x)
819{
820 return ((x)>>24) & 0xFFu;
821}
812static u32 xen_apic_read(u32 reg) 822static u32 xen_apic_read(u32 reg)
813{ 823{
814 return 0; 824 struct xen_platform_op op = {
825 .cmd = XENPF_get_cpuinfo,
826 .interface_version = XENPF_INTERFACE_VERSION,
827 .u.pcpu_info.xen_cpuid = 0,
828 };
829 int ret = 0;
830
831 /* Shouldn't need this as APIC is turned off for PV, and we only
832 * get called on the bootup processor. But just in case. */
833 if (!xen_initial_domain() || smp_processor_id())
834 return 0;
835
836 if (reg == APIC_LVR)
837 return 0x10;
838
839 if (reg != APIC_ID)
840 return 0;
841
842 ret = HYPERVISOR_dom0_op(&op);
843 if (ret)
844 return 0;
845
846 return op.u.pcpu_info.apic_id << 24;
815} 847}
816 848
817static void xen_apic_write(u32 reg, u32 val) 849static void xen_apic_write(u32 reg, u32 val)
@@ -849,6 +881,8 @@ static void set_xen_basic_apic_ops(void)
849 apic->icr_write = xen_apic_icr_write; 881 apic->icr_write = xen_apic_icr_write;
850 apic->wait_icr_idle = xen_apic_wait_icr_idle; 882 apic->wait_icr_idle = xen_apic_wait_icr_idle;
851 apic->safe_wait_icr_idle = xen_safe_apic_wait_icr_idle; 883 apic->safe_wait_icr_idle = xen_safe_apic_wait_icr_idle;
884 apic->set_apic_id = xen_set_apic_id;
885 apic->get_apic_id = xen_get_apic_id;
852} 886}
853 887
854#endif 888#endif
@@ -1365,8 +1399,10 @@ asmlinkage void __init xen_start_kernel(void)
1365 /* Make sure ACS will be enabled */ 1399 /* Make sure ACS will be enabled */
1366 pci_request_acs(); 1400 pci_request_acs();
1367 } 1401 }
1368 1402#ifdef CONFIG_PCI
1369 1403 /* PCI BIOS service won't work from a PV guest. */
1404 pci_probe &= ~PCI_PROBE_BIOS;
1405#endif
1370 xen_raw_console_write("about to get started...\n"); 1406 xen_raw_console_write("about to get started...\n");
1371 1407
1372 xen_setup_runstate_info(0); 1408 xen_setup_runstate_info(0);
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index b8e279479a6b..69f5857660ac 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -353,8 +353,13 @@ static pteval_t pte_mfn_to_pfn(pteval_t val)
353{ 353{
354 if (val & _PAGE_PRESENT) { 354 if (val & _PAGE_PRESENT) {
355 unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; 355 unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
356 unsigned long pfn = mfn_to_pfn(mfn);
357
356 pteval_t flags = val & PTE_FLAGS_MASK; 358 pteval_t flags = val & PTE_FLAGS_MASK;
357 val = ((pteval_t)mfn_to_pfn(mfn) << PAGE_SHIFT) | flags; 359 if (unlikely(pfn == ~0))
360 val = flags & ~_PAGE_PRESENT;
361 else
362 val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
358 } 363 }
359 364
360 return val; 365 return val;
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 5fac6919b957..0503c0c493a9 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -178,6 +178,7 @@ static void __init xen_fill_possible_map(void)
178static void __init xen_filter_cpu_maps(void) 178static void __init xen_filter_cpu_maps(void)
179{ 179{
180 int i, rc; 180 int i, rc;
181 unsigned int subtract = 0;
181 182
182 if (!xen_initial_domain()) 183 if (!xen_initial_domain())
183 return; 184 return;
@@ -192,8 +193,22 @@ static void __init xen_filter_cpu_maps(void)
192 } else { 193 } else {
193 set_cpu_possible(i, false); 194 set_cpu_possible(i, false);
194 set_cpu_present(i, false); 195 set_cpu_present(i, false);
196 subtract++;
195 } 197 }
196 } 198 }
199#ifdef CONFIG_HOTPLUG_CPU
200 /* This is akin to using 'nr_cpus' on the Linux command line.
201 * Which is OK as when we use 'dom0_max_vcpus=X' we can only
202 * have up to X, while nr_cpu_ids is greater than X. This
203 * normally is not a problem, except when CPU hotplugging
204 * is involved and then there might be more than X CPUs
205 * in the guest - which will not work as there is no
206 * hypercall to expand the max number of VCPUs an already
207 * running guest has. So cap it up to X. */
208 if (subtract)
209 nr_cpu_ids = nr_cpu_ids - subtract;
210#endif
211
197} 212}
198 213
199static void __init xen_smp_prepare_boot_cpu(void) 214static void __init xen_smp_prepare_boot_cpu(void)
diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S
index 79d7362ad6d1..3e45aa000718 100644
--- a/arch/x86/xen/xen-asm.S
+++ b/arch/x86/xen/xen-asm.S
@@ -96,7 +96,7 @@ ENTRY(xen_restore_fl_direct)
96 96
97 /* check for unmasked and pending */ 97 /* check for unmasked and pending */
98 cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending 98 cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
99 jz 1f 99 jnz 1f
1002: call check_events 1002: call check_events
1011: 1011:
102ENDPATCH(xen_restore_fl_direct) 102ENDPATCH(xen_restore_fl_direct)
diff --git a/arch/xtensa/include/asm/hardirq.h b/arch/xtensa/include/asm/hardirq.h
index 26664cef8f11..91695a135498 100644
--- a/arch/xtensa/include/asm/hardirq.h
+++ b/arch/xtensa/include/asm/hardirq.h
@@ -11,9 +11,6 @@
11#ifndef _XTENSA_HARDIRQ_H 11#ifndef _XTENSA_HARDIRQ_H
12#define _XTENSA_HARDIRQ_H 12#define _XTENSA_HARDIRQ_H
13 13
14void ack_bad_irq(unsigned int irq);
15#define ack_bad_irq ack_bad_irq
16
17#include <asm-generic/hardirq.h> 14#include <asm-generic/hardirq.h>
18 15
19#endif /* _XTENSA_HARDIRQ_H */ 16#endif /* _XTENSA_HARDIRQ_H */
diff --git a/arch/xtensa/include/asm/io.h b/arch/xtensa/include/asm/io.h
index d04cd3a625fa..4beb43c087d3 100644
--- a/arch/xtensa/include/asm/io.h
+++ b/arch/xtensa/include/asm/io.h
@@ -14,6 +14,7 @@
14#ifdef __KERNEL__ 14#ifdef __KERNEL__
15#include <asm/byteorder.h> 15#include <asm/byteorder.h>
16#include <asm/page.h> 16#include <asm/page.h>
17#include <linux/bug.h>
17#include <linux/kernel.h> 18#include <linux/kernel.h>
18 19
19#include <linux/types.h> 20#include <linux/types.h>
diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c
index b69b000349fc..d78869a00b11 100644
--- a/arch/xtensa/kernel/signal.c
+++ b/arch/xtensa/kernel/signal.c
@@ -496,6 +496,7 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
496 signr = get_signal_to_deliver(&info, &ka, regs, NULL); 496 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
497 497
498 if (signr > 0) { 498 if (signr > 0) {
499 int ret;
499 500
500 /* Are we from a system call? */ 501 /* Are we from a system call? */
501 502
diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c
index 107f6f7be5e1..dd30f40af9f5 100644
--- a/crypto/sha512_generic.c
+++ b/crypto/sha512_generic.c
@@ -174,7 +174,7 @@ sha512_update(struct shash_desc *desc, const u8 *data, unsigned int len)
174 index = sctx->count[0] & 0x7f; 174 index = sctx->count[0] & 0x7f;
175 175
176 /* Update number of bytes */ 176 /* Update number of bytes */
177 if (!(sctx->count[0] += len)) 177 if ((sctx->count[0] += len) < len)
178 sctx->count[1]++; 178 sctx->count[1]++;
179 179
180 part_len = 128 - index; 180 part_len = 128 - index;
diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
index ab513a972c95..a716fede4f25 100644
--- a/drivers/acpi/acpica/hwxface.c
+++ b/drivers/acpi/acpica/hwxface.c
@@ -74,7 +74,8 @@ acpi_status acpi_reset(void)
74 74
75 /* Check if the reset register is supported */ 75 /* Check if the reset register is supported */
76 76
77 if (!reset_reg->address) { 77 if (!(acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) ||
78 !reset_reg->address) {
78 return_ACPI_STATUS(AE_NOT_EXIST); 79 return_ACPI_STATUS(AE_NOT_EXIST);
79 } 80 }
80 81
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 7049a7d27c4f..330bb4d75852 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -631,7 +631,7 @@ int acpi_power_get_inferred_state(struct acpi_device *device, int *state)
631 * We know a device's inferred power state when all the resources 631 * We know a device's inferred power state when all the resources
632 * required for a given D-state are 'on'. 632 * required for a given D-state are 'on'.
633 */ 633 */
634 for (i = ACPI_STATE_D0; i < ACPI_STATE_D3; i++) { 634 for (i = ACPI_STATE_D0; i < ACPI_STATE_D3_HOT; i++) {
635 list = &device->power.states[i].resources; 635 list = &device->power.states[i].resources;
636 if (list->count < 1) 636 if (list->count < 1)
637 continue; 637 continue;
diff --git a/drivers/acpi/reboot.c b/drivers/acpi/reboot.c
index c1d612435939..a6c77e8b37bd 100644
--- a/drivers/acpi/reboot.c
+++ b/drivers/acpi/reboot.c
@@ -23,7 +23,8 @@ void acpi_reboot(void)
23 /* Is the reset register supported? The spec says we should be 23 /* Is the reset register supported? The spec says we should be
24 * checking the bit width and bit offset, but Windows ignores 24 * checking the bit width and bit offset, but Windows ignores
25 * these fields */ 25 * these fields */
26 /* Ignore also acpi_gbl_FADT.flags.ACPI_FADT_RESET_REGISTER */ 26 if (!(acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER))
27 return;
27 28
28 reset_value = acpi_gbl_FADT.reset_value; 29 reset_value = acpi_gbl_FADT.reset_value;
29 30
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 767e2dcb9616..7417267e88fa 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -869,7 +869,7 @@ static int acpi_bus_get_power_flags(struct acpi_device *device)
869 /* 869 /*
870 * Enumerate supported power management states 870 * Enumerate supported power management states
871 */ 871 */
872 for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3; i++) { 872 for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) {
873 struct acpi_device_power_state *ps = &device->power.states[i]; 873 struct acpi_device_power_state *ps = &device->power.states[i];
874 char object_name[5] = { '_', 'P', 'R', '0' + i, '\0' }; 874 char object_name[5] = { '_', 'P', 'R', '0' + i, '\0' };
875 875
@@ -884,21 +884,18 @@ static int acpi_bus_get_power_flags(struct acpi_device *device)
884 acpi_bus_add_power_resource(ps->resources.handles[j]); 884 acpi_bus_add_power_resource(ps->resources.handles[j]);
885 } 885 }
886 886
887 /* The exist of _PR3 indicates D3Cold support */
888 if (i == ACPI_STATE_D3) {
889 status = acpi_get_handle(device->handle, object_name, &handle);
890 if (ACPI_SUCCESS(status))
891 device->power.states[ACPI_STATE_D3_COLD].flags.valid = 1;
892 }
893
894 /* Evaluate "_PSx" to see if we can do explicit sets */ 887 /* Evaluate "_PSx" to see if we can do explicit sets */
895 object_name[2] = 'S'; 888 object_name[2] = 'S';
896 status = acpi_get_handle(device->handle, object_name, &handle); 889 status = acpi_get_handle(device->handle, object_name, &handle);
897 if (ACPI_SUCCESS(status)) 890 if (ACPI_SUCCESS(status))
898 ps->flags.explicit_set = 1; 891 ps->flags.explicit_set = 1;
899 892
900 /* State is valid if we have some power control */ 893 /*
901 if (ps->resources.count || ps->flags.explicit_set) 894 * State is valid if there are means to put the device into it.
895 * D3hot is only valid if _PR3 present.
896 */
897 if (ps->resources.count ||
898 (ps->flags.explicit_set && i < ACPI_STATE_D3_HOT))
902 ps->flags.valid = 1; 899 ps->flags.valid = 1;
903 900
904 ps->power = -1; /* Unknown - driver assigned */ 901 ps->power = -1; /* Unknown - driver assigned */
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 1d661b5c3287..eb6fd233764b 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -28,23 +28,33 @@
28#include "internal.h" 28#include "internal.h"
29#include "sleep.h" 29#include "sleep.h"
30 30
31u8 wake_sleep_flags = ACPI_NO_OPTIONAL_METHODS;
31static unsigned int gts, bfs; 32static unsigned int gts, bfs;
32module_param(gts, uint, 0644); 33static int set_param_wake_flag(const char *val, struct kernel_param *kp)
33module_param(bfs, uint, 0644);
34MODULE_PARM_DESC(gts, "Enable evaluation of _GTS on suspend.");
35MODULE_PARM_DESC(bfs, "Enable evaluation of _BFS on resume".);
36
37static u8 wake_sleep_flags(void)
38{ 34{
39 u8 flags = ACPI_NO_OPTIONAL_METHODS; 35 int ret = param_set_int(val, kp);
40 36
41 if (gts) 37 if (ret)
42 flags |= ACPI_EXECUTE_GTS; 38 return ret;
43 if (bfs)
44 flags |= ACPI_EXECUTE_BFS;
45 39
46 return flags; 40 if (kp->arg == (const char *)&gts) {
41 if (gts)
42 wake_sleep_flags |= ACPI_EXECUTE_GTS;
43 else
44 wake_sleep_flags &= ~ACPI_EXECUTE_GTS;
45 }
46 if (kp->arg == (const char *)&bfs) {
47 if (bfs)
48 wake_sleep_flags |= ACPI_EXECUTE_BFS;
49 else
50 wake_sleep_flags &= ~ACPI_EXECUTE_BFS;
51 }
52 return ret;
47} 53}
54module_param_call(gts, set_param_wake_flag, param_get_int, &gts, 0644);
55module_param_call(bfs, set_param_wake_flag, param_get_int, &bfs, 0644);
56MODULE_PARM_DESC(gts, "Enable evaluation of _GTS on suspend.");
57MODULE_PARM_DESC(bfs, "Enable evaluation of _BFS on resume".);
48 58
49static u8 sleep_states[ACPI_S_STATE_COUNT]; 59static u8 sleep_states[ACPI_S_STATE_COUNT];
50 60
@@ -263,7 +273,6 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
263{ 273{
264 acpi_status status = AE_OK; 274 acpi_status status = AE_OK;
265 u32 acpi_state = acpi_target_sleep_state; 275 u32 acpi_state = acpi_target_sleep_state;
266 u8 flags = wake_sleep_flags();
267 int error; 276 int error;
268 277
269 ACPI_FLUSH_CPU_CACHE(); 278 ACPI_FLUSH_CPU_CACHE();
@@ -271,7 +280,7 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
271 switch (acpi_state) { 280 switch (acpi_state) {
272 case ACPI_STATE_S1: 281 case ACPI_STATE_S1:
273 barrier(); 282 barrier();
274 status = acpi_enter_sleep_state(acpi_state, flags); 283 status = acpi_enter_sleep_state(acpi_state, wake_sleep_flags);
275 break; 284 break;
276 285
277 case ACPI_STATE_S3: 286 case ACPI_STATE_S3:
@@ -286,7 +295,7 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
286 acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1); 295 acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
287 296
288 /* Reprogram control registers and execute _BFS */ 297 /* Reprogram control registers and execute _BFS */
289 acpi_leave_sleep_state_prep(acpi_state, flags); 298 acpi_leave_sleep_state_prep(acpi_state, wake_sleep_flags);
290 299
291 /* ACPI 3.0 specs (P62) says that it's the responsibility 300 /* ACPI 3.0 specs (P62) says that it's the responsibility
292 * of the OSPM to clear the status bit [ implying that the 301 * of the OSPM to clear the status bit [ implying that the
@@ -550,30 +559,27 @@ static int acpi_hibernation_begin(void)
550 559
551static int acpi_hibernation_enter(void) 560static int acpi_hibernation_enter(void)
552{ 561{
553 u8 flags = wake_sleep_flags();
554 acpi_status status = AE_OK; 562 acpi_status status = AE_OK;
555 563
556 ACPI_FLUSH_CPU_CACHE(); 564 ACPI_FLUSH_CPU_CACHE();
557 565
558 /* This shouldn't return. If it returns, we have a problem */ 566 /* This shouldn't return. If it returns, we have a problem */
559 status = acpi_enter_sleep_state(ACPI_STATE_S4, flags); 567 status = acpi_enter_sleep_state(ACPI_STATE_S4, wake_sleep_flags);
560 /* Reprogram control registers and execute _BFS */ 568 /* Reprogram control registers and execute _BFS */
561 acpi_leave_sleep_state_prep(ACPI_STATE_S4, flags); 569 acpi_leave_sleep_state_prep(ACPI_STATE_S4, wake_sleep_flags);
562 570
563 return ACPI_SUCCESS(status) ? 0 : -EFAULT; 571 return ACPI_SUCCESS(status) ? 0 : -EFAULT;
564} 572}
565 573
566static void acpi_hibernation_leave(void) 574static void acpi_hibernation_leave(void)
567{ 575{
568 u8 flags = wake_sleep_flags();
569
570 /* 576 /*
571 * If ACPI is not enabled by the BIOS and the boot kernel, we need to 577 * If ACPI is not enabled by the BIOS and the boot kernel, we need to
572 * enable it here. 578 * enable it here.
573 */ 579 */
574 acpi_enable(); 580 acpi_enable();
575 /* Reprogram control registers and execute _BFS */ 581 /* Reprogram control registers and execute _BFS */
576 acpi_leave_sleep_state_prep(ACPI_STATE_S4, flags); 582 acpi_leave_sleep_state_prep(ACPI_STATE_S4, wake_sleep_flags);
577 /* Check the hardware signature */ 583 /* Check the hardware signature */
578 if (facs && s4_hardware_signature != facs->hardware_signature) { 584 if (facs && s4_hardware_signature != facs->hardware_signature) {
579 printk(KERN_EMERG "ACPI: Hardware changed while hibernated, " 585 printk(KERN_EMERG "ACPI: Hardware changed while hibernated, "
@@ -828,12 +834,10 @@ static void acpi_power_off_prepare(void)
828 834
829static void acpi_power_off(void) 835static void acpi_power_off(void)
830{ 836{
831 u8 flags = wake_sleep_flags();
832
833 /* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */ 837 /* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */
834 printk(KERN_DEBUG "%s called\n", __func__); 838 printk(KERN_DEBUG "%s called\n", __func__);
835 local_irq_disable(); 839 local_irq_disable();
836 acpi_enter_sleep_state(ACPI_STATE_S5, flags); 840 acpi_enter_sleep_state(ACPI_STATE_S5, wake_sleep_flags);
837} 841}
838 842
839/* 843/*
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 79a1e9dd56d9..ebaf67e4b2bc 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -394,6 +394,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
394 .driver_data = board_ahci_yes_fbs }, /* 88se9128 */ 394 .driver_data = board_ahci_yes_fbs }, /* 88se9128 */
395 { PCI_DEVICE(0x1b4b, 0x9125), 395 { PCI_DEVICE(0x1b4b, 0x9125),
396 .driver_data = board_ahci_yes_fbs }, /* 88se9125 */ 396 .driver_data = board_ahci_yes_fbs }, /* 88se9125 */
397 { PCI_DEVICE(0x1b4b, 0x917a),
398 .driver_data = board_ahci_yes_fbs }, /* 88se9172 */
397 { PCI_DEVICE(0x1b4b, 0x91a3), 399 { PCI_DEVICE(0x1b4b, 0x91a3),
398 .driver_data = board_ahci_yes_fbs }, 400 .driver_data = board_ahci_yes_fbs },
399 401
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index 0c86c77764bc..9e419e1c2006 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -280,6 +280,7 @@ static struct dev_pm_ops ahci_pm_ops = {
280 280
281static const struct of_device_id ahci_of_match[] = { 281static const struct of_device_id ahci_of_match[] = {
282 { .compatible = "calxeda,hb-ahci", }, 282 { .compatible = "calxeda,hb-ahci", },
283 { .compatible = "snps,spear-ahci", },
283 {}, 284 {},
284}; 285};
285MODULE_DEVICE_TABLE(of, ahci_of_match); 286MODULE_DEVICE_TABLE(of, ahci_of_match);
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 28db50b57b91..23763a1ec570 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -95,7 +95,7 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
95static void ata_dev_xfermask(struct ata_device *dev); 95static void ata_dev_xfermask(struct ata_device *dev);
96static unsigned long ata_dev_blacklisted(const struct ata_device *dev); 96static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
97 97
98atomic_t ata_print_id = ATOMIC_INIT(1); 98atomic_t ata_print_id = ATOMIC_INIT(0);
99 99
100struct ata_force_param { 100struct ata_force_param {
101 const char *name; 101 const char *name;
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index c61316e9d2f7..d1fbd59ead16 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -3501,7 +3501,8 @@ static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg
3501 u64 now = get_jiffies_64(); 3501 u64 now = get_jiffies_64();
3502 int *trials = void_arg; 3502 int *trials = void_arg;
3503 3503
3504 if (ent->timestamp < now - min(now, interval)) 3504 if ((ent->eflags & ATA_EFLAG_OLD_ER) ||
3505 (ent->timestamp < now - min(now, interval)))
3505 return -1; 3506 return -1;
3506 3507
3507 (*trials)++; 3508 (*trials)++;
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 93dabdcd2cbe..22226350cd0c 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -3399,7 +3399,8 @@ int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht)
3399 */ 3399 */
3400 shost->max_host_blocked = 1; 3400 shost->max_host_blocked = 1;
3401 3401
3402 rc = scsi_add_host(ap->scsi_host, &ap->tdev); 3402 rc = scsi_add_host_with_dma(ap->scsi_host,
3403 &ap->tdev, ap->host->dev);
3403 if (rc) 3404 if (rc)
3404 goto err_add; 3405 goto err_add;
3405 } 3406 }
@@ -3838,18 +3839,25 @@ void ata_sas_port_stop(struct ata_port *ap)
3838} 3839}
3839EXPORT_SYMBOL_GPL(ata_sas_port_stop); 3840EXPORT_SYMBOL_GPL(ata_sas_port_stop);
3840 3841
3841int ata_sas_async_port_init(struct ata_port *ap) 3842/**
3843 * ata_sas_async_probe - simply schedule probing and return
3844 * @ap: Port to probe
3845 *
3846 * For batch scheduling of probe for sas attached ata devices, assumes
3847 * the port has already been through ata_sas_port_init()
3848 */
3849void ata_sas_async_probe(struct ata_port *ap)
3842{ 3850{
3843 int rc = ap->ops->port_start(ap); 3851 __ata_port_probe(ap);
3844 3852}
3845 if (!rc) { 3853EXPORT_SYMBOL_GPL(ata_sas_async_probe);
3846 ap->print_id = atomic_inc_return(&ata_print_id);
3847 __ata_port_probe(ap);
3848 }
3849 3854
3850 return rc; 3855int ata_sas_sync_probe(struct ata_port *ap)
3856{
3857 return ata_port_probe(ap);
3851} 3858}
3852EXPORT_SYMBOL_GPL(ata_sas_async_port_init); 3859EXPORT_SYMBOL_GPL(ata_sas_sync_probe);
3860
3853 3861
3854/** 3862/**
3855 * ata_sas_port_init - Initialize a SATA device 3863 * ata_sas_port_init - Initialize a SATA device
@@ -3866,12 +3874,10 @@ int ata_sas_port_init(struct ata_port *ap)
3866{ 3874{
3867 int rc = ap->ops->port_start(ap); 3875 int rc = ap->ops->port_start(ap);
3868 3876
3869 if (!rc) { 3877 if (rc)
3870 ap->print_id = atomic_inc_return(&ata_print_id); 3878 return rc;
3871 rc = ata_port_probe(ap); 3879 ap->print_id = atomic_inc_return(&ata_print_id);
3872 } 3880 return 0;
3873
3874 return rc;
3875} 3881}
3876EXPORT_SYMBOL_GPL(ata_sas_port_init); 3882EXPORT_SYMBOL_GPL(ata_sas_port_init);
3877 3883
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
index fc2db2a89a6b..3239517f4d90 100644
--- a/drivers/ata/pata_arasan_cf.c
+++ b/drivers/ata/pata_arasan_cf.c
@@ -943,9 +943,9 @@ static int arasan_cf_resume(struct device *dev)
943 943
944 return 0; 944 return 0;
945} 945}
946#endif
946 947
947static SIMPLE_DEV_PM_OPS(arasan_cf_pm_ops, arasan_cf_suspend, arasan_cf_resume); 948static SIMPLE_DEV_PM_OPS(arasan_cf_pm_ops, arasan_cf_suspend, arasan_cf_resume);
948#endif
949 949
950static struct platform_driver arasan_cf_driver = { 950static struct platform_driver arasan_cf_driver = {
951 .probe = arasan_cf_probe, 951 .probe = arasan_cf_probe,
@@ -953,9 +953,7 @@ static struct platform_driver arasan_cf_driver = {
953 .driver = { 953 .driver = {
954 .name = DRIVER_NAME, 954 .name = DRIVER_NAME,
955 .owner = THIS_MODULE, 955 .owner = THIS_MODULE,
956#ifdef CONFIG_PM
957 .pm = &arasan_cf_pm_ops, 956 .pm = &arasan_cf_pm_ops,
958#endif
959 }, 957 },
960}; 958};
961 959
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 7a3f535e481c..bb80853ff27a 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -775,9 +775,11 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
775 map->format.parse_val(val + i); 775 map->format.parse_val(val + i);
776 } else { 776 } else {
777 for (i = 0; i < val_count; i++) { 777 for (i = 0; i < val_count; i++) {
778 ret = regmap_read(map, reg + i, val + (i * val_bytes)); 778 unsigned int ival;
779 ret = regmap_read(map, reg + i, &ival);
779 if (ret != 0) 780 if (ret != 0)
780 return ret; 781 return ret;
782 memcpy(val + (i * val_bytes), &ival, val_bytes);
781 } 783 }
782 } 784 }
783 785
diff --git a/drivers/bcma/sprom.c b/drivers/bcma/sprom.c
index cdcf75c0954f..3e2a6002aae6 100644
--- a/drivers/bcma/sprom.c
+++ b/drivers/bcma/sprom.c
@@ -404,16 +404,19 @@ int bcma_sprom_get(struct bcma_bus *bus)
404 return -EOPNOTSUPP; 404 return -EOPNOTSUPP;
405 405
406 if (!bcma_sprom_ext_available(bus)) { 406 if (!bcma_sprom_ext_available(bus)) {
407 bool sprom_onchip;
408
407 /* 409 /*
408 * External SPROM takes precedence so check 410 * External SPROM takes precedence so check
409 * on-chip OTP only when no external SPROM 411 * on-chip OTP only when no external SPROM
410 * is present. 412 * is present.
411 */ 413 */
412 if (bcma_sprom_onchip_available(bus)) { 414 sprom_onchip = bcma_sprom_onchip_available(bus);
415 if (sprom_onchip) {
413 /* determine offset */ 416 /* determine offset */
414 offset = bcma_sprom_onchip_offset(bus); 417 offset = bcma_sprom_onchip_offset(bus);
415 } 418 }
416 if (!offset) { 419 if (!offset || !sprom_onchip) {
417 /* 420 /*
418 * Maybe there is no SPROM on the device? 421 * Maybe there is no SPROM on the device?
419 * Now we ask the arch code if there is some sprom 422 * Now we ask the arch code if there is some sprom
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index abfaacaaf346..946166e13953 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -2297,7 +2297,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
2297 return; 2297 return;
2298 } 2298 }
2299 2299
2300 if (!cap_raised(current_cap(), CAP_SYS_ADMIN)) { 2300 if (!capable(CAP_SYS_ADMIN)) {
2301 retcode = ERR_PERM; 2301 retcode = ERR_PERM;
2302 goto fail; 2302 goto fail;
2303 } 2303 }
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 89860f34a7ec..4f66171c6683 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -416,7 +416,7 @@ static void xen_blkbk_discard(struct xenbus_transaction xbt, struct backend_info
416 "discard-secure", "%d", 416 "discard-secure", "%d",
417 blkif->vbd.discard_secure); 417 blkif->vbd.discard_secure);
418 if (err) { 418 if (err) {
419 dev_warn(dev-dev, "writing discard-secure (%d)", err); 419 dev_warn(&dev->dev, "writing discard-secure (%d)", err);
420 return; 420 return;
421 } 421 }
422 } 422 }
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index ae9edca7b56d..57fd867553d7 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -75,6 +75,8 @@ static struct usb_device_id ath3k_table[] = {
75 { USB_DEVICE(0x0CF3, 0x311D) }, 75 { USB_DEVICE(0x0CF3, 0x311D) },
76 { USB_DEVICE(0x13d3, 0x3375) }, 76 { USB_DEVICE(0x13d3, 0x3375) },
77 { USB_DEVICE(0x04CA, 0x3005) }, 77 { USB_DEVICE(0x04CA, 0x3005) },
78 { USB_DEVICE(0x13d3, 0x3362) },
79 { USB_DEVICE(0x0CF3, 0xE004) },
78 80
79 /* Atheros AR5BBU12 with sflash firmware */ 81 /* Atheros AR5BBU12 with sflash firmware */
80 { USB_DEVICE(0x0489, 0xE02C) }, 82 { USB_DEVICE(0x0489, 0xE02C) },
@@ -94,6 +96,8 @@ static struct usb_device_id ath3k_blist_tbl[] = {
94 { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 }, 96 { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 },
95 { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, 97 { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
96 { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, 98 { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
99 { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
100 { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
97 101
98 { } /* Terminating entry */ 102 { } /* Terminating entry */
99}; 103};
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 3311b812a0c6..9217121362e1 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -101,12 +101,16 @@ static struct usb_device_id btusb_table[] = {
101 { USB_DEVICE(0x0c10, 0x0000) }, 101 { USB_DEVICE(0x0c10, 0x0000) },
102 102
103 /* Broadcom BCM20702A0 */ 103 /* Broadcom BCM20702A0 */
104 { USB_DEVICE(0x0489, 0xe042) },
104 { USB_DEVICE(0x0a5c, 0x21e3) }, 105 { USB_DEVICE(0x0a5c, 0x21e3) },
105 { USB_DEVICE(0x0a5c, 0x21e6) }, 106 { USB_DEVICE(0x0a5c, 0x21e6) },
106 { USB_DEVICE(0x0a5c, 0x21e8) }, 107 { USB_DEVICE(0x0a5c, 0x21e8) },
107 { USB_DEVICE(0x0a5c, 0x21f3) }, 108 { USB_DEVICE(0x0a5c, 0x21f3) },
108 { USB_DEVICE(0x413c, 0x8197) }, 109 { USB_DEVICE(0x413c, 0x8197) },
109 110
111 /* Foxconn - Hon Hai */
112 { USB_DEVICE(0x0489, 0xe033) },
113
110 { } /* Terminating entry */ 114 { } /* Terminating entry */
111}; 115};
112 116
@@ -133,6 +137,8 @@ static struct usb_device_id blacklist_table[] = {
133 { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 }, 137 { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
134 { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, 138 { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
135 { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, 139 { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
140 { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
141 { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
136 142
137 /* Atheros AR5BBU12 with sflash firmware */ 143 /* Atheros AR5BBU12 with sflash firmware */
138 { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE }, 144 { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 0053d7ebb5ca..8f3f74ce8c7f 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -18,6 +18,7 @@
18#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19#include <linux/spinlock.h> 19#include <linux/spinlock.h>
20#include <linux/gfp.h> 20#include <linux/gfp.h>
21#include <linux/module.h>
21 22
22#include <crypto/ctr.h> 23#include <crypto/ctr.h>
23#include <crypto/des.h> 24#include <crypto/des.h>
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index dc641c796526..921039e56f87 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -124,6 +124,9 @@ struct talitos_private {
124 void __iomem *reg; 124 void __iomem *reg;
125 int irq[2]; 125 int irq[2];
126 126
127 /* SEC global registers lock */
128 spinlock_t reg_lock ____cacheline_aligned;
129
127 /* SEC version geometry (from device tree node) */ 130 /* SEC version geometry (from device tree node) */
128 unsigned int num_channels; 131 unsigned int num_channels;
129 unsigned int chfifo_len; 132 unsigned int chfifo_len;
@@ -412,6 +415,7 @@ static void talitos_done_##name(unsigned long data) \
412{ \ 415{ \
413 struct device *dev = (struct device *)data; \ 416 struct device *dev = (struct device *)data; \
414 struct talitos_private *priv = dev_get_drvdata(dev); \ 417 struct talitos_private *priv = dev_get_drvdata(dev); \
418 unsigned long flags; \
415 \ 419 \
416 if (ch_done_mask & 1) \ 420 if (ch_done_mask & 1) \
417 flush_channel(dev, 0, 0, 0); \ 421 flush_channel(dev, 0, 0, 0); \
@@ -427,8 +431,10 @@ static void talitos_done_##name(unsigned long data) \
427out: \ 431out: \
428 /* At this point, all completed channels have been processed */ \ 432 /* At this point, all completed channels have been processed */ \
429 /* Unmask done interrupts for channels completed later on. */ \ 433 /* Unmask done interrupts for channels completed later on. */ \
434 spin_lock_irqsave(&priv->reg_lock, flags); \
430 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \ 435 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
431 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT); \ 436 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT); \
437 spin_unlock_irqrestore(&priv->reg_lock, flags); \
432} 438}
433DEF_TALITOS_DONE(4ch, TALITOS_ISR_4CHDONE) 439DEF_TALITOS_DONE(4ch, TALITOS_ISR_4CHDONE)
434DEF_TALITOS_DONE(ch0_2, TALITOS_ISR_CH_0_2_DONE) 440DEF_TALITOS_DONE(ch0_2, TALITOS_ISR_CH_0_2_DONE)
@@ -619,22 +625,28 @@ static irqreturn_t talitos_interrupt_##name(int irq, void *data) \
619 struct device *dev = data; \ 625 struct device *dev = data; \
620 struct talitos_private *priv = dev_get_drvdata(dev); \ 626 struct talitos_private *priv = dev_get_drvdata(dev); \
621 u32 isr, isr_lo; \ 627 u32 isr, isr_lo; \
628 unsigned long flags; \
622 \ 629 \
630 spin_lock_irqsave(&priv->reg_lock, flags); \
623 isr = in_be32(priv->reg + TALITOS_ISR); \ 631 isr = in_be32(priv->reg + TALITOS_ISR); \
624 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \ 632 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
625 /* Acknowledge interrupt */ \ 633 /* Acknowledge interrupt */ \
626 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \ 634 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
627 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \ 635 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
628 \ 636 \
629 if (unlikely((isr & ~TALITOS_ISR_4CHDONE) & ch_err_mask || isr_lo)) \ 637 if (unlikely(isr & ch_err_mask || isr_lo)) { \
630 talitos_error(dev, isr, isr_lo); \ 638 spin_unlock_irqrestore(&priv->reg_lock, flags); \
631 else \ 639 talitos_error(dev, isr & ch_err_mask, isr_lo); \
640 } \
641 else { \
632 if (likely(isr & ch_done_mask)) { \ 642 if (likely(isr & ch_done_mask)) { \
633 /* mask further done interrupts. */ \ 643 /* mask further done interrupts. */ \
634 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \ 644 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
635 /* done_task will unmask done interrupts at exit */ \ 645 /* done_task will unmask done interrupts at exit */ \
636 tasklet_schedule(&priv->done_task[tlet]); \ 646 tasklet_schedule(&priv->done_task[tlet]); \
637 } \ 647 } \
648 spin_unlock_irqrestore(&priv->reg_lock, flags); \
649 } \
638 \ 650 \
639 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \ 651 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
640 IRQ_NONE; \ 652 IRQ_NONE; \
@@ -2719,6 +2731,8 @@ static int talitos_probe(struct platform_device *ofdev)
2719 2731
2720 priv->ofdev = ofdev; 2732 priv->ofdev = ofdev;
2721 2733
2734 spin_lock_init(&priv->reg_lock);
2735
2722 err = talitos_probe_irq(ofdev); 2736 err = talitos_probe_irq(ofdev);
2723 if (err) 2737 if (err)
2724 goto err_out; 2738 goto err_out;
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index cf9da362d64f..ef378b5b17e4 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -91,11 +91,10 @@ config DW_DMAC
91 91
92config AT_HDMAC 92config AT_HDMAC
93 tristate "Atmel AHB DMA support" 93 tristate "Atmel AHB DMA support"
94 depends on ARCH_AT91SAM9RL || ARCH_AT91SAM9G45 94 depends on ARCH_AT91
95 select DMA_ENGINE 95 select DMA_ENGINE
96 help 96 help
97 Support the Atmel AHB DMA controller. This can be integrated in 97 Support the Atmel AHB DMA controller.
98 chips such as the Atmel AT91SAM9RL.
99 98
100config FSL_DMA 99config FSL_DMA
101 tristate "Freescale Elo and Elo Plus DMA support" 100 tristate "Freescale Elo and Elo Plus DMA support"
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index c301a8ec31aa..3d704abd7912 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -1429,6 +1429,7 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1429 * signal 1429 * signal
1430 */ 1430 */
1431 release_phy_channel(plchan); 1431 release_phy_channel(plchan);
1432 plchan->phychan_hold = 0;
1432 } 1433 }
1433 /* Dequeue jobs and free LLIs */ 1434 /* Dequeue jobs and free LLIs */
1434 if (plchan->at) { 1435 if (plchan->at) {
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 7aa58d204892..445fdf811695 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -221,10 +221,6 @@ static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
221 221
222 vdbg_dump_regs(atchan); 222 vdbg_dump_regs(atchan);
223 223
224 /* clear any pending interrupt */
225 while (dma_readl(atdma, EBCISR))
226 cpu_relax();
227
228 channel_writel(atchan, SADDR, 0); 224 channel_writel(atchan, SADDR, 0);
229 channel_writel(atchan, DADDR, 0); 225 channel_writel(atchan, DADDR, 0);
230 channel_writel(atchan, CTRLA, 0); 226 channel_writel(atchan, CTRLA, 0);
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index a45b5d2a5987..bb787d8e1529 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -571,11 +571,14 @@ static void imxdma_tasklet(unsigned long data)
571 if (desc->desc.callback) 571 if (desc->desc.callback)
572 desc->desc.callback(desc->desc.callback_param); 572 desc->desc.callback(desc->desc.callback_param);
573 573
574 dma_cookie_complete(&desc->desc); 574 /* If we are dealing with a cyclic descriptor keep it on ld_active
575 575 * and dont mark the descripor as complete.
576 /* If we are dealing with a cyclic descriptor keep it on ld_active */ 576 * Only in non-cyclic cases it would be marked as complete
577 */
577 if (imxdma_chan_is_doing_cyclic(imxdmac)) 578 if (imxdma_chan_is_doing_cyclic(imxdmac))
578 goto out; 579 goto out;
580 else
581 dma_cookie_complete(&desc->desc);
579 582
580 /* Free 2D slot if it was an interleaved transfer */ 583 /* Free 2D slot if it was an interleaved transfer */
581 if (imxdmac->enabled_2d) { 584 if (imxdmac->enabled_2d) {
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index c81ef7e10e08..655d4ce6ed0d 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -201,10 +201,6 @@ static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan)
201 201
202static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx) 202static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx)
203{ 203{
204 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(tx->chan);
205
206 mxs_dma_enable_chan(mxs_chan);
207
208 return dma_cookie_assign(tx); 204 return dma_cookie_assign(tx);
209} 205}
210 206
@@ -558,9 +554,9 @@ static enum dma_status mxs_dma_tx_status(struct dma_chan *chan,
558 554
559static void mxs_dma_issue_pending(struct dma_chan *chan) 555static void mxs_dma_issue_pending(struct dma_chan *chan)
560{ 556{
561 /* 557 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
562 * Nothing to do. We only have a single descriptor. 558
563 */ 559 mxs_dma_enable_chan(mxs_chan);
564} 560}
565 561
566static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma) 562static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma)
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 282caf118be8..2ee6e23930ad 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2225,12 +2225,9 @@ static inline void free_desc_list(struct list_head *list)
2225{ 2225{
2226 struct dma_pl330_dmac *pdmac; 2226 struct dma_pl330_dmac *pdmac;
2227 struct dma_pl330_desc *desc; 2227 struct dma_pl330_desc *desc;
2228 struct dma_pl330_chan *pch; 2228 struct dma_pl330_chan *pch = NULL;
2229 unsigned long flags; 2229 unsigned long flags;
2230 2230
2231 if (list_empty(list))
2232 return;
2233
2234 /* Finish off the work list */ 2231 /* Finish off the work list */
2235 list_for_each_entry(desc, list, node) { 2232 list_for_each_entry(desc, list, node) {
2236 dma_async_tx_callback callback; 2233 dma_async_tx_callback callback;
@@ -2247,6 +2244,10 @@ static inline void free_desc_list(struct list_head *list)
2247 desc->pchan = NULL; 2244 desc->pchan = NULL;
2248 } 2245 }
2249 2246
2247 /* pch will be unset if list was empty */
2248 if (!pch)
2249 return;
2250
2250 pdmac = pch->dmac; 2251 pdmac = pch->dmac;
2251 2252
2252 spin_lock_irqsave(&pdmac->pool_lock, flags); 2253 spin_lock_irqsave(&pdmac->pool_lock, flags);
@@ -2257,12 +2258,9 @@ static inline void free_desc_list(struct list_head *list)
2257static inline void handle_cyclic_desc_list(struct list_head *list) 2258static inline void handle_cyclic_desc_list(struct list_head *list)
2258{ 2259{
2259 struct dma_pl330_desc *desc; 2260 struct dma_pl330_desc *desc;
2260 struct dma_pl330_chan *pch; 2261 struct dma_pl330_chan *pch = NULL;
2261 unsigned long flags; 2262 unsigned long flags;
2262 2263
2263 if (list_empty(list))
2264 return;
2265
2266 list_for_each_entry(desc, list, node) { 2264 list_for_each_entry(desc, list, node) {
2267 dma_async_tx_callback callback; 2265 dma_async_tx_callback callback;
2268 2266
@@ -2274,6 +2272,10 @@ static inline void handle_cyclic_desc_list(struct list_head *list)
2274 callback(desc->txd.callback_param); 2272 callback(desc->txd.callback_param);
2275 } 2273 }
2276 2274
2275 /* pch will be unset if list was empty */
2276 if (!pch)
2277 return;
2278
2277 spin_lock_irqsave(&pch->lock, flags); 2279 spin_lock_irqsave(&pch->lock, flags);
2278 list_splice_tail_init(list, &pch->work_list); 2280 list_splice_tail_init(list, &pch->work_list);
2279 spin_unlock_irqrestore(&pch->lock, flags); 2281 spin_unlock_irqrestore(&pch->lock, flags);
@@ -2926,8 +2928,11 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
2926 INIT_LIST_HEAD(&pd->channels); 2928 INIT_LIST_HEAD(&pd->channels);
2927 2929
2928 /* Initialize channel parameters */ 2930 /* Initialize channel parameters */
2929 num_chan = max(pdat ? pdat->nr_valid_peri : (u8)pi->pcfg.num_peri, 2931 if (pdat)
2930 (u8)pi->pcfg.num_chan); 2932 num_chan = max_t(int, pdat->nr_valid_peri, pi->pcfg.num_chan);
2933 else
2934 num_chan = max_t(int, pi->pcfg.num_peri, pi->pcfg.num_chan);
2935
2931 pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL); 2936 pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
2932 2937
2933 for (i = 0; i < num_chan; i++) { 2938 for (i = 0; i < num_chan; i++) {
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index bdd41d4bfa8d..2ed1ac3513f3 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -18,6 +18,7 @@
18#include <linux/pm_runtime.h> 18#include <linux/pm_runtime.h>
19#include <linux/err.h> 19#include <linux/err.h>
20#include <linux/amba/bus.h> 20#include <linux/amba/bus.h>
21#include <linux/regulator/consumer.h>
21 22
22#include <plat/ste_dma40.h> 23#include <plat/ste_dma40.h>
23 24
@@ -69,6 +70,22 @@ enum d40_command {
69}; 70};
70 71
71/* 72/*
73 * enum d40_events - The different Event Enables for the event lines.
74 *
75 * @D40_DEACTIVATE_EVENTLINE: De-activate Event line, stopping the logical chan.
76 * @D40_ACTIVATE_EVENTLINE: Activate the Event line, to start a logical chan.
77 * @D40_SUSPEND_REQ_EVENTLINE: Requesting for suspending a event line.
78 * @D40_ROUND_EVENTLINE: Status check for event line.
79 */
80
81enum d40_events {
82 D40_DEACTIVATE_EVENTLINE = 0,
83 D40_ACTIVATE_EVENTLINE = 1,
84 D40_SUSPEND_REQ_EVENTLINE = 2,
85 D40_ROUND_EVENTLINE = 3
86};
87
88/*
72 * These are the registers that has to be saved and later restored 89 * These are the registers that has to be saved and later restored
73 * when the DMA hw is powered off. 90 * when the DMA hw is powered off.
74 * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works. 91 * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works.
@@ -870,8 +887,8 @@ static void d40_save_restore_registers(struct d40_base *base, bool save)
870} 887}
871#endif 888#endif
872 889
873static int d40_channel_execute_command(struct d40_chan *d40c, 890static int __d40_execute_command_phy(struct d40_chan *d40c,
874 enum d40_command command) 891 enum d40_command command)
875{ 892{
876 u32 status; 893 u32 status;
877 int i; 894 int i;
@@ -880,6 +897,12 @@ static int d40_channel_execute_command(struct d40_chan *d40c,
880 unsigned long flags; 897 unsigned long flags;
881 u32 wmask; 898 u32 wmask;
882 899
900 if (command == D40_DMA_STOP) {
901 ret = __d40_execute_command_phy(d40c, D40_DMA_SUSPEND_REQ);
902 if (ret)
903 return ret;
904 }
905
883 spin_lock_irqsave(&d40c->base->execmd_lock, flags); 906 spin_lock_irqsave(&d40c->base->execmd_lock, flags);
884 907
885 if (d40c->phy_chan->num % 2 == 0) 908 if (d40c->phy_chan->num % 2 == 0)
@@ -973,67 +996,109 @@ static void d40_term_all(struct d40_chan *d40c)
973 } 996 }
974 997
975 d40c->pending_tx = 0; 998 d40c->pending_tx = 0;
976 d40c->busy = false;
977} 999}
978 1000
979static void __d40_config_set_event(struct d40_chan *d40c, bool enable, 1001static void __d40_config_set_event(struct d40_chan *d40c,
980 u32 event, int reg) 1002 enum d40_events event_type, u32 event,
1003 int reg)
981{ 1004{
982 void __iomem *addr = chan_base(d40c) + reg; 1005 void __iomem *addr = chan_base(d40c) + reg;
983 int tries; 1006 int tries;
1007 u32 status;
1008
1009 switch (event_type) {
1010
1011 case D40_DEACTIVATE_EVENTLINE:
984 1012
985 if (!enable) {
986 writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) 1013 writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event))
987 | ~D40_EVENTLINE_MASK(event), addr); 1014 | ~D40_EVENTLINE_MASK(event), addr);
988 return; 1015 break;
989 } 1016
1017 case D40_SUSPEND_REQ_EVENTLINE:
1018 status = (readl(addr) & D40_EVENTLINE_MASK(event)) >>
1019 D40_EVENTLINE_POS(event);
1020
1021 if (status == D40_DEACTIVATE_EVENTLINE ||
1022 status == D40_SUSPEND_REQ_EVENTLINE)
1023 break;
990 1024
1025 writel((D40_SUSPEND_REQ_EVENTLINE << D40_EVENTLINE_POS(event))
1026 | ~D40_EVENTLINE_MASK(event), addr);
1027
1028 for (tries = 0 ; tries < D40_SUSPEND_MAX_IT; tries++) {
1029
1030 status = (readl(addr) & D40_EVENTLINE_MASK(event)) >>
1031 D40_EVENTLINE_POS(event);
1032
1033 cpu_relax();
1034 /*
1035 * Reduce the number of bus accesses while
1036 * waiting for the DMA to suspend.
1037 */
1038 udelay(3);
1039
1040 if (status == D40_DEACTIVATE_EVENTLINE)
1041 break;
1042 }
1043
1044 if (tries == D40_SUSPEND_MAX_IT) {
1045 chan_err(d40c,
1046 "unable to stop the event_line chl %d (log: %d)"
1047 "status %x\n", d40c->phy_chan->num,
1048 d40c->log_num, status);
1049 }
1050 break;
1051
1052 case D40_ACTIVATE_EVENTLINE:
991 /* 1053 /*
992 * The hardware sometimes doesn't register the enable when src and dst 1054 * The hardware sometimes doesn't register the enable when src and dst
993 * event lines are active on the same logical channel. Retry to ensure 1055 * event lines are active on the same logical channel. Retry to ensure
994 * it does. Usually only one retry is sufficient. 1056 * it does. Usually only one retry is sufficient.
995 */ 1057 */
996 tries = 100; 1058 tries = 100;
997 while (--tries) { 1059 while (--tries) {
998 writel((D40_ACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) 1060 writel((D40_ACTIVATE_EVENTLINE <<
999 | ~D40_EVENTLINE_MASK(event), addr); 1061 D40_EVENTLINE_POS(event)) |
1062 ~D40_EVENTLINE_MASK(event), addr);
1000 1063
1001 if (readl(addr) & D40_EVENTLINE_MASK(event)) 1064 if (readl(addr) & D40_EVENTLINE_MASK(event))
1002 break; 1065 break;
1003 } 1066 }
1004 1067
1005 if (tries != 99) 1068 if (tries != 99)
1006 dev_dbg(chan2dev(d40c), 1069 dev_dbg(chan2dev(d40c),
1007 "[%s] workaround enable S%cLNK (%d tries)\n", 1070 "[%s] workaround enable S%cLNK (%d tries)\n",
1008 __func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D', 1071 __func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D',
1009 100 - tries); 1072 100 - tries);
1010 1073
1011 WARN_ON(!tries); 1074 WARN_ON(!tries);
1012} 1075 break;
1013 1076
1014static void d40_config_set_event(struct d40_chan *d40c, bool do_enable) 1077 case D40_ROUND_EVENTLINE:
1015{ 1078 BUG();
1016 unsigned long flags; 1079 break;
1017 1080
1018 spin_lock_irqsave(&d40c->phy_chan->lock, flags); 1081 }
1082}
1019 1083
1084static void d40_config_set_event(struct d40_chan *d40c,
1085 enum d40_events event_type)
1086{
1020 /* Enable event line connected to device (or memcpy) */ 1087 /* Enable event line connected to device (or memcpy) */
1021 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) || 1088 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
1022 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) { 1089 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) {
1023 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); 1090 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1024 1091
1025 __d40_config_set_event(d40c, do_enable, event, 1092 __d40_config_set_event(d40c, event_type, event,
1026 D40_CHAN_REG_SSLNK); 1093 D40_CHAN_REG_SSLNK);
1027 } 1094 }
1028 1095
1029 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) { 1096 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) {
1030 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); 1097 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1031 1098
1032 __d40_config_set_event(d40c, do_enable, event, 1099 __d40_config_set_event(d40c, event_type, event,
1033 D40_CHAN_REG_SDLNK); 1100 D40_CHAN_REG_SDLNK);
1034 } 1101 }
1035
1036 spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
1037} 1102}
1038 1103
1039static u32 d40_chan_has_events(struct d40_chan *d40c) 1104static u32 d40_chan_has_events(struct d40_chan *d40c)
@@ -1047,6 +1112,64 @@ static u32 d40_chan_has_events(struct d40_chan *d40c)
1047 return val; 1112 return val;
1048} 1113}
1049 1114
1115static int
1116__d40_execute_command_log(struct d40_chan *d40c, enum d40_command command)
1117{
1118 unsigned long flags;
1119 int ret = 0;
1120 u32 active_status;
1121 void __iomem *active_reg;
1122
1123 if (d40c->phy_chan->num % 2 == 0)
1124 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1125 else
1126 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1127
1128
1129 spin_lock_irqsave(&d40c->phy_chan->lock, flags);
1130
1131 switch (command) {
1132 case D40_DMA_STOP:
1133 case D40_DMA_SUSPEND_REQ:
1134
1135 active_status = (readl(active_reg) &
1136 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1137 D40_CHAN_POS(d40c->phy_chan->num);
1138
1139 if (active_status == D40_DMA_RUN)
1140 d40_config_set_event(d40c, D40_SUSPEND_REQ_EVENTLINE);
1141 else
1142 d40_config_set_event(d40c, D40_DEACTIVATE_EVENTLINE);
1143
1144 if (!d40_chan_has_events(d40c) && (command == D40_DMA_STOP))
1145 ret = __d40_execute_command_phy(d40c, command);
1146
1147 break;
1148
1149 case D40_DMA_RUN:
1150
1151 d40_config_set_event(d40c, D40_ACTIVATE_EVENTLINE);
1152 ret = __d40_execute_command_phy(d40c, command);
1153 break;
1154
1155 case D40_DMA_SUSPENDED:
1156 BUG();
1157 break;
1158 }
1159
1160 spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
1161 return ret;
1162}
1163
1164static int d40_channel_execute_command(struct d40_chan *d40c,
1165 enum d40_command command)
1166{
1167 if (chan_is_logical(d40c))
1168 return __d40_execute_command_log(d40c, command);
1169 else
1170 return __d40_execute_command_phy(d40c, command);
1171}
1172
1050static u32 d40_get_prmo(struct d40_chan *d40c) 1173static u32 d40_get_prmo(struct d40_chan *d40c)
1051{ 1174{
1052 static const unsigned int phy_map[] = { 1175 static const unsigned int phy_map[] = {
@@ -1149,15 +1272,7 @@ static int d40_pause(struct d40_chan *d40c)
1149 spin_lock_irqsave(&d40c->lock, flags); 1272 spin_lock_irqsave(&d40c->lock, flags);
1150 1273
1151 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); 1274 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1152 if (res == 0) { 1275
1153 if (chan_is_logical(d40c)) {
1154 d40_config_set_event(d40c, false);
1155 /* Resume the other logical channels if any */
1156 if (d40_chan_has_events(d40c))
1157 res = d40_channel_execute_command(d40c,
1158 D40_DMA_RUN);
1159 }
1160 }
1161 pm_runtime_mark_last_busy(d40c->base->dev); 1276 pm_runtime_mark_last_busy(d40c->base->dev);
1162 pm_runtime_put_autosuspend(d40c->base->dev); 1277 pm_runtime_put_autosuspend(d40c->base->dev);
1163 spin_unlock_irqrestore(&d40c->lock, flags); 1278 spin_unlock_irqrestore(&d40c->lock, flags);
@@ -1174,45 +1289,17 @@ static int d40_resume(struct d40_chan *d40c)
1174 1289
1175 spin_lock_irqsave(&d40c->lock, flags); 1290 spin_lock_irqsave(&d40c->lock, flags);
1176 pm_runtime_get_sync(d40c->base->dev); 1291 pm_runtime_get_sync(d40c->base->dev);
1177 if (d40c->base->rev == 0)
1178 if (chan_is_logical(d40c)) {
1179 res = d40_channel_execute_command(d40c,
1180 D40_DMA_SUSPEND_REQ);
1181 goto no_suspend;
1182 }
1183 1292
1184 /* If bytes left to transfer or linked tx resume job */ 1293 /* If bytes left to transfer or linked tx resume job */
1185 if (d40_residue(d40c) || d40_tx_is_linked(d40c)) { 1294 if (d40_residue(d40c) || d40_tx_is_linked(d40c))
1186
1187 if (chan_is_logical(d40c))
1188 d40_config_set_event(d40c, true);
1189
1190 res = d40_channel_execute_command(d40c, D40_DMA_RUN); 1295 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
1191 }
1192 1296
1193no_suspend:
1194 pm_runtime_mark_last_busy(d40c->base->dev); 1297 pm_runtime_mark_last_busy(d40c->base->dev);
1195 pm_runtime_put_autosuspend(d40c->base->dev); 1298 pm_runtime_put_autosuspend(d40c->base->dev);
1196 spin_unlock_irqrestore(&d40c->lock, flags); 1299 spin_unlock_irqrestore(&d40c->lock, flags);
1197 return res; 1300 return res;
1198} 1301}
1199 1302
1200static int d40_terminate_all(struct d40_chan *chan)
1201{
1202 unsigned long flags;
1203 int ret = 0;
1204
1205 ret = d40_pause(chan);
1206 if (!ret && chan_is_physical(chan))
1207 ret = d40_channel_execute_command(chan, D40_DMA_STOP);
1208
1209 spin_lock_irqsave(&chan->lock, flags);
1210 d40_term_all(chan);
1211 spin_unlock_irqrestore(&chan->lock, flags);
1212
1213 return ret;
1214}
1215
1216static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) 1303static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
1217{ 1304{
1218 struct d40_chan *d40c = container_of(tx->chan, 1305 struct d40_chan *d40c = container_of(tx->chan,
@@ -1232,20 +1319,6 @@ static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
1232 1319
1233static int d40_start(struct d40_chan *d40c) 1320static int d40_start(struct d40_chan *d40c)
1234{ 1321{
1235 if (d40c->base->rev == 0) {
1236 int err;
1237
1238 if (chan_is_logical(d40c)) {
1239 err = d40_channel_execute_command(d40c,
1240 D40_DMA_SUSPEND_REQ);
1241 if (err)
1242 return err;
1243 }
1244 }
1245
1246 if (chan_is_logical(d40c))
1247 d40_config_set_event(d40c, true);
1248
1249 return d40_channel_execute_command(d40c, D40_DMA_RUN); 1322 return d40_channel_execute_command(d40c, D40_DMA_RUN);
1250} 1323}
1251 1324
@@ -1258,10 +1331,10 @@ static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
1258 d40d = d40_first_queued(d40c); 1331 d40d = d40_first_queued(d40c);
1259 1332
1260 if (d40d != NULL) { 1333 if (d40d != NULL) {
1261 if (!d40c->busy) 1334 if (!d40c->busy) {
1262 d40c->busy = true; 1335 d40c->busy = true;
1263 1336 pm_runtime_get_sync(d40c->base->dev);
1264 pm_runtime_get_sync(d40c->base->dev); 1337 }
1265 1338
1266 /* Remove from queue */ 1339 /* Remove from queue */
1267 d40_desc_remove(d40d); 1340 d40_desc_remove(d40d);
@@ -1388,8 +1461,8 @@ static void dma_tasklet(unsigned long data)
1388 1461
1389 return; 1462 return;
1390 1463
1391 err: 1464err:
1392 /* Rescue manoeuvre if receiving double interrupts */ 1465 /* Rescue manouver if receiving double interrupts */
1393 if (d40c->pending_tx > 0) 1466 if (d40c->pending_tx > 0)
1394 d40c->pending_tx--; 1467 d40c->pending_tx--;
1395 spin_unlock_irqrestore(&d40c->lock, flags); 1468 spin_unlock_irqrestore(&d40c->lock, flags);
@@ -1770,7 +1843,6 @@ static int d40_config_memcpy(struct d40_chan *d40c)
1770 return 0; 1843 return 0;
1771} 1844}
1772 1845
1773
1774static int d40_free_dma(struct d40_chan *d40c) 1846static int d40_free_dma(struct d40_chan *d40c)
1775{ 1847{
1776 1848
@@ -1806,43 +1878,18 @@ static int d40_free_dma(struct d40_chan *d40c)
1806 } 1878 }
1807 1879
1808 pm_runtime_get_sync(d40c->base->dev); 1880 pm_runtime_get_sync(d40c->base->dev);
1809 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); 1881 res = d40_channel_execute_command(d40c, D40_DMA_STOP);
1810 if (res) { 1882 if (res) {
1811 chan_err(d40c, "suspend failed\n"); 1883 chan_err(d40c, "stop failed\n");
1812 goto out; 1884 goto out;
1813 } 1885 }
1814 1886
1815 if (chan_is_logical(d40c)) { 1887 d40_alloc_mask_free(phy, is_src, chan_is_logical(d40c) ? event : 0);
1816 /* Release logical channel, deactivate the event line */
1817 1888
1818 d40_config_set_event(d40c, false); 1889 if (chan_is_logical(d40c))
1819 d40c->base->lookup_log_chans[d40c->log_num] = NULL; 1890 d40c->base->lookup_log_chans[d40c->log_num] = NULL;
1820 1891 else
1821 /* 1892 d40c->base->lookup_phy_chans[phy->num] = NULL;
1822 * Check if there are more logical allocation
1823 * on this phy channel.
1824 */
1825 if (!d40_alloc_mask_free(phy, is_src, event)) {
1826 /* Resume the other logical channels if any */
1827 if (d40_chan_has_events(d40c)) {
1828 res = d40_channel_execute_command(d40c,
1829 D40_DMA_RUN);
1830 if (res)
1831 chan_err(d40c,
1832 "Executing RUN command\n");
1833 }
1834 goto out;
1835 }
1836 } else {
1837 (void) d40_alloc_mask_free(phy, is_src, 0);
1838 }
1839
1840 /* Release physical channel */
1841 res = d40_channel_execute_command(d40c, D40_DMA_STOP);
1842 if (res) {
1843 chan_err(d40c, "Failed to stop channel\n");
1844 goto out;
1845 }
1846 1893
1847 if (d40c->busy) { 1894 if (d40c->busy) {
1848 pm_runtime_mark_last_busy(d40c->base->dev); 1895 pm_runtime_mark_last_busy(d40c->base->dev);
@@ -1852,7 +1899,6 @@ static int d40_free_dma(struct d40_chan *d40c)
1852 d40c->busy = false; 1899 d40c->busy = false;
1853 d40c->phy_chan = NULL; 1900 d40c->phy_chan = NULL;
1854 d40c->configured = false; 1901 d40c->configured = false;
1855 d40c->base->lookup_phy_chans[phy->num] = NULL;
1856out: 1902out:
1857 1903
1858 pm_runtime_mark_last_busy(d40c->base->dev); 1904 pm_runtime_mark_last_busy(d40c->base->dev);
@@ -2070,7 +2116,7 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
2070 if (sg_next(&sg_src[sg_len - 1]) == sg_src) 2116 if (sg_next(&sg_src[sg_len - 1]) == sg_src)
2071 desc->cyclic = true; 2117 desc->cyclic = true;
2072 2118
2073 if (direction != DMA_NONE) { 2119 if (direction != DMA_TRANS_NONE) {
2074 dma_addr_t dev_addr = d40_get_dev_addr(chan, direction); 2120 dma_addr_t dev_addr = d40_get_dev_addr(chan, direction);
2075 2121
2076 if (direction == DMA_DEV_TO_MEM) 2122 if (direction == DMA_DEV_TO_MEM)
@@ -2371,6 +2417,31 @@ static void d40_issue_pending(struct dma_chan *chan)
2371 spin_unlock_irqrestore(&d40c->lock, flags); 2417 spin_unlock_irqrestore(&d40c->lock, flags);
2372} 2418}
2373 2419
2420static void d40_terminate_all(struct dma_chan *chan)
2421{
2422 unsigned long flags;
2423 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2424 int ret;
2425
2426 spin_lock_irqsave(&d40c->lock, flags);
2427
2428 pm_runtime_get_sync(d40c->base->dev);
2429 ret = d40_channel_execute_command(d40c, D40_DMA_STOP);
2430 if (ret)
2431 chan_err(d40c, "Failed to stop channel\n");
2432
2433 d40_term_all(d40c);
2434 pm_runtime_mark_last_busy(d40c->base->dev);
2435 pm_runtime_put_autosuspend(d40c->base->dev);
2436 if (d40c->busy) {
2437 pm_runtime_mark_last_busy(d40c->base->dev);
2438 pm_runtime_put_autosuspend(d40c->base->dev);
2439 }
2440 d40c->busy = false;
2441
2442 spin_unlock_irqrestore(&d40c->lock, flags);
2443}
2444
2374static int 2445static int
2375dma40_config_to_halfchannel(struct d40_chan *d40c, 2446dma40_config_to_halfchannel(struct d40_chan *d40c,
2376 struct stedma40_half_channel_info *info, 2447 struct stedma40_half_channel_info *info,
@@ -2551,7 +2622,8 @@ static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
2551 2622
2552 switch (cmd) { 2623 switch (cmd) {
2553 case DMA_TERMINATE_ALL: 2624 case DMA_TERMINATE_ALL:
2554 return d40_terminate_all(d40c); 2625 d40_terminate_all(chan);
2626 return 0;
2555 case DMA_PAUSE: 2627 case DMA_PAUSE:
2556 return d40_pause(d40c); 2628 return d40_pause(d40c);
2557 case DMA_RESUME: 2629 case DMA_RESUME:
@@ -2908,6 +2980,12 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2908 dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n", 2980 dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n",
2909 rev, res->start); 2981 rev, res->start);
2910 2982
2983 if (rev < 2) {
2984 d40_err(&pdev->dev, "hardware revision: %d is not supported",
2985 rev);
2986 goto failure;
2987 }
2988
2911 plat_data = pdev->dev.platform_data; 2989 plat_data = pdev->dev.platform_data;
2912 2990
2913 /* Count the number of logical channels in use */ 2991 /* Count the number of logical channels in use */
@@ -2998,6 +3076,7 @@ failure:
2998 3076
2999 if (base) { 3077 if (base) {
3000 kfree(base->lcla_pool.alloc_map); 3078 kfree(base->lcla_pool.alloc_map);
3079 kfree(base->reg_val_backup_chan);
3001 kfree(base->lookup_log_chans); 3080 kfree(base->lookup_log_chans);
3002 kfree(base->lookup_phy_chans); 3081 kfree(base->lookup_phy_chans);
3003 kfree(base->phy_res); 3082 kfree(base->phy_res);
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h
index 8d3d490968a3..51e8e5396e9b 100644
--- a/drivers/dma/ste_dma40_ll.h
+++ b/drivers/dma/ste_dma40_ll.h
@@ -62,8 +62,6 @@
62#define D40_SREG_ELEM_LOG_LIDX_MASK (0xFF << D40_SREG_ELEM_LOG_LIDX_POS) 62#define D40_SREG_ELEM_LOG_LIDX_MASK (0xFF << D40_SREG_ELEM_LOG_LIDX_POS)
63 63
64/* Link register */ 64/* Link register */
65#define D40_DEACTIVATE_EVENTLINE 0x0
66#define D40_ACTIVATE_EVENTLINE 0x1
67#define D40_EVENTLINE_POS(i) (2 * i) 65#define D40_EVENTLINE_POS(i) (2 * i)
68#define D40_EVENTLINE_MASK(i) (0x3 << D40_EVENTLINE_POS(i)) 66#define D40_EVENTLINE_MASK(i) (0x3 << D40_EVENTLINE_POS(i))
69 67
diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
index d25599f2a3f8..47408e802ab6 100644
--- a/drivers/firmware/efivars.c
+++ b/drivers/firmware/efivars.c
@@ -191,6 +191,190 @@ utf16_strncmp(const efi_char16_t *a, const efi_char16_t *b, size_t len)
191 } 191 }
192} 192}
193 193
194static bool
195validate_device_path(struct efi_variable *var, int match, u8 *buffer,
196 unsigned long len)
197{
198 struct efi_generic_dev_path *node;
199 int offset = 0;
200
201 node = (struct efi_generic_dev_path *)buffer;
202
203 if (len < sizeof(*node))
204 return false;
205
206 while (offset <= len - sizeof(*node) &&
207 node->length >= sizeof(*node) &&
208 node->length <= len - offset) {
209 offset += node->length;
210
211 if ((node->type == EFI_DEV_END_PATH ||
212 node->type == EFI_DEV_END_PATH2) &&
213 node->sub_type == EFI_DEV_END_ENTIRE)
214 return true;
215
216 node = (struct efi_generic_dev_path *)(buffer + offset);
217 }
218
219 /*
220 * If we're here then either node->length pointed past the end
221 * of the buffer or we reached the end of the buffer without
222 * finding a device path end node.
223 */
224 return false;
225}
226
227static bool
228validate_boot_order(struct efi_variable *var, int match, u8 *buffer,
229 unsigned long len)
230{
231 /* An array of 16-bit integers */
232 if ((len % 2) != 0)
233 return false;
234
235 return true;
236}
237
238static bool
239validate_load_option(struct efi_variable *var, int match, u8 *buffer,
240 unsigned long len)
241{
242 u16 filepathlength;
243 int i, desclength = 0, namelen;
244
245 namelen = utf16_strnlen(var->VariableName, sizeof(var->VariableName));
246
247 /* Either "Boot" or "Driver" followed by four digits of hex */
248 for (i = match; i < match+4; i++) {
249 if (var->VariableName[i] > 127 ||
250 hex_to_bin(var->VariableName[i] & 0xff) < 0)
251 return true;
252 }
253
254 /* Reject it if there's 4 digits of hex and then further content */
255 if (namelen > match + 4)
256 return false;
257
258 /* A valid entry must be at least 8 bytes */
259 if (len < 8)
260 return false;
261
262 filepathlength = buffer[4] | buffer[5] << 8;
263
264 /*
265 * There's no stored length for the description, so it has to be
266 * found by hand
267 */
268 desclength = utf16_strsize((efi_char16_t *)(buffer + 6), len - 6) + 2;
269
270 /* Each boot entry must have a descriptor */
271 if (!desclength)
272 return false;
273
274 /*
275 * If the sum of the length of the description, the claimed filepath
276 * length and the original header are greater than the length of the
277 * variable, it's malformed
278 */
279 if ((desclength + filepathlength + 6) > len)
280 return false;
281
282 /*
283 * And, finally, check the filepath
284 */
285 return validate_device_path(var, match, buffer + desclength + 6,
286 filepathlength);
287}
288
289static bool
290validate_uint16(struct efi_variable *var, int match, u8 *buffer,
291 unsigned long len)
292{
293 /* A single 16-bit integer */
294 if (len != 2)
295 return false;
296
297 return true;
298}
299
300static bool
301validate_ascii_string(struct efi_variable *var, int match, u8 *buffer,
302 unsigned long len)
303{
304 int i;
305
306 for (i = 0; i < len; i++) {
307 if (buffer[i] > 127)
308 return false;
309
310 if (buffer[i] == 0)
311 return true;
312 }
313
314 return false;
315}
316
317struct variable_validate {
318 char *name;
319 bool (*validate)(struct efi_variable *var, int match, u8 *data,
320 unsigned long len);
321};
322
323static const struct variable_validate variable_validate[] = {
324 { "BootNext", validate_uint16 },
325 { "BootOrder", validate_boot_order },
326 { "DriverOrder", validate_boot_order },
327 { "Boot*", validate_load_option },
328 { "Driver*", validate_load_option },
329 { "ConIn", validate_device_path },
330 { "ConInDev", validate_device_path },
331 { "ConOut", validate_device_path },
332 { "ConOutDev", validate_device_path },
333 { "ErrOut", validate_device_path },
334 { "ErrOutDev", validate_device_path },
335 { "Timeout", validate_uint16 },
336 { "Lang", validate_ascii_string },
337 { "PlatformLang", validate_ascii_string },
338 { "", NULL },
339};
340
341static bool
342validate_var(struct efi_variable *var, u8 *data, unsigned long len)
343{
344 int i;
345 u16 *unicode_name = var->VariableName;
346
347 for (i = 0; variable_validate[i].validate != NULL; i++) {
348 const char *name = variable_validate[i].name;
349 int match;
350
351 for (match = 0; ; match++) {
352 char c = name[match];
353 u16 u = unicode_name[match];
354
355 /* All special variables are plain ascii */
356 if (u > 127)
357 return true;
358
359 /* Wildcard in the matching name means we've matched */
360 if (c == '*')
361 return variable_validate[i].validate(var,
362 match, data, len);
363
364 /* Case sensitive match */
365 if (c != u)
366 break;
367
368 /* Reached the end of the string while matching */
369 if (!c)
370 return variable_validate[i].validate(var,
371 match, data, len);
372 }
373 }
374
375 return true;
376}
377
194static efi_status_t 378static efi_status_t
195get_var_data_locked(struct efivars *efivars, struct efi_variable *var) 379get_var_data_locked(struct efivars *efivars, struct efi_variable *var)
196{ 380{
@@ -324,6 +508,12 @@ efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count)
324 return -EINVAL; 508 return -EINVAL;
325 } 509 }
326 510
511 if ((new_var->Attributes & ~EFI_VARIABLE_MASK) != 0 ||
512 validate_var(new_var, new_var->Data, new_var->DataSize) == false) {
513 printk(KERN_ERR "efivars: Malformed variable content\n");
514 return -EINVAL;
515 }
516
327 spin_lock(&efivars->lock); 517 spin_lock(&efivars->lock);
328 status = efivars->ops->set_variable(new_var->VariableName, 518 status = efivars->ops->set_variable(new_var->VariableName,
329 &new_var->VendorGuid, 519 &new_var->VendorGuid,
@@ -626,6 +816,12 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
626 if (!capable(CAP_SYS_ADMIN)) 816 if (!capable(CAP_SYS_ADMIN))
627 return -EACCES; 817 return -EACCES;
628 818
819 if ((new_var->Attributes & ~EFI_VARIABLE_MASK) != 0 ||
820 validate_var(new_var, new_var->Data, new_var->DataSize) == false) {
821 printk(KERN_ERR "efivars: Malformed variable content\n");
822 return -EINVAL;
823 }
824
629 spin_lock(&efivars->lock); 825 spin_lock(&efivars->lock);
630 826
631 /* 827 /*
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 1adc2ec1e383..4461540653a8 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -965,18 +965,15 @@ static void omap_gpio_mod_init(struct gpio_bank *bank)
965 } 965 }
966 966
967 _gpio_rmw(base, bank->regs->irqenable, l, bank->regs->irqenable_inv); 967 _gpio_rmw(base, bank->regs->irqenable, l, bank->regs->irqenable_inv);
968 _gpio_rmw(base, bank->regs->irqstatus, l, 968 _gpio_rmw(base, bank->regs->irqstatus, l, !bank->regs->irqenable_inv);
969 bank->regs->irqenable_inv == false);
970 _gpio_rmw(base, bank->regs->irqenable, l, bank->regs->debounce_en != 0);
971 _gpio_rmw(base, bank->regs->irqenable, l, bank->regs->ctrl != 0);
972 if (bank->regs->debounce_en) 969 if (bank->regs->debounce_en)
973 _gpio_rmw(base, bank->regs->debounce_en, 0, 1); 970 __raw_writel(0, base + bank->regs->debounce_en);
974 971
975 /* Save OE default value (0xffffffff) in the context */ 972 /* Save OE default value (0xffffffff) in the context */
976 bank->context.oe = __raw_readl(bank->base + bank->regs->direction); 973 bank->context.oe = __raw_readl(bank->base + bank->regs->direction);
977 /* Initialize interface clk ungated, module enabled */ 974 /* Initialize interface clk ungated, module enabled */
978 if (bank->regs->ctrl) 975 if (bank->regs->ctrl)
979 _gpio_rmw(base, bank->regs->ctrl, 0, 1); 976 __raw_writel(0, base + bank->regs->ctrl);
980} 977}
981 978
982static __devinit void 979static __devinit void
diff --git a/drivers/gpio/gpio-pch.c b/drivers/gpio/gpio-pch.c
index e8729cc2ba2b..2cd958e0b822 100644
--- a/drivers/gpio/gpio-pch.c
+++ b/drivers/gpio/gpio-pch.c
@@ -230,16 +230,12 @@ static void pch_gpio_setup(struct pch_gpio *chip)
230 230
231static int pch_irq_type(struct irq_data *d, unsigned int type) 231static int pch_irq_type(struct irq_data *d, unsigned int type)
232{ 232{
233 u32 im;
234 u32 __iomem *im_reg;
235 u32 ien;
236 u32 im_pos;
237 int ch;
238 unsigned long flags;
239 u32 val;
240 int irq = d->irq;
241 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 233 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
242 struct pch_gpio *chip = gc->private; 234 struct pch_gpio *chip = gc->private;
235 u32 im, im_pos, val;
236 u32 __iomem *im_reg;
237 unsigned long flags;
238 int ch, irq = d->irq;
243 239
244 ch = irq - chip->irq_base; 240 ch = irq - chip->irq_base;
245 if (irq <= chip->irq_base + 7) { 241 if (irq <= chip->irq_base + 7) {
@@ -270,30 +266,22 @@ static int pch_irq_type(struct irq_data *d, unsigned int type)
270 case IRQ_TYPE_LEVEL_LOW: 266 case IRQ_TYPE_LEVEL_LOW:
271 val = PCH_LEVEL_L; 267 val = PCH_LEVEL_L;
272 break; 268 break;
273 case IRQ_TYPE_PROBE:
274 goto end;
275 default: 269 default:
276 dev_warn(chip->dev, "%s: unknown type(%dd)", 270 goto unlock;
277 __func__, type);
278 goto end;
279 } 271 }
280 272
281 /* Set interrupt mode */ 273 /* Set interrupt mode */
282 im = ioread32(im_reg) & ~(PCH_IM_MASK << (im_pos * 4)); 274 im = ioread32(im_reg) & ~(PCH_IM_MASK << (im_pos * 4));
283 iowrite32(im | (val << (im_pos * 4)), im_reg); 275 iowrite32(im | (val << (im_pos * 4)), im_reg);
284 276
285 /* iclr */ 277 /* And the handler */
286 iowrite32(BIT(ch), &chip->reg->iclr); 278 if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
279 __irq_set_handler_locked(d->irq, handle_level_irq);
280 else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
281 __irq_set_handler_locked(d->irq, handle_edge_irq);
287 282
288 /* IMASKCLR */ 283unlock:
289 iowrite32(BIT(ch), &chip->reg->imaskclr);
290
291 /* Enable interrupt */
292 ien = ioread32(&chip->reg->ien);
293 iowrite32(ien | BIT(ch), &chip->reg->ien);
294end:
295 spin_unlock_irqrestore(&chip->spinlock, flags); 284 spin_unlock_irqrestore(&chip->spinlock, flags);
296
297 return 0; 285 return 0;
298} 286}
299 287
@@ -313,18 +301,24 @@ static void pch_irq_mask(struct irq_data *d)
313 iowrite32(1 << (d->irq - chip->irq_base), &chip->reg->imask); 301 iowrite32(1 << (d->irq - chip->irq_base), &chip->reg->imask);
314} 302}
315 303
304static void pch_irq_ack(struct irq_data *d)
305{
306 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
307 struct pch_gpio *chip = gc->private;
308
309 iowrite32(1 << (d->irq - chip->irq_base), &chip->reg->iclr);
310}
311
316static irqreturn_t pch_gpio_handler(int irq, void *dev_id) 312static irqreturn_t pch_gpio_handler(int irq, void *dev_id)
317{ 313{
318 struct pch_gpio *chip = dev_id; 314 struct pch_gpio *chip = dev_id;
319 u32 reg_val = ioread32(&chip->reg->istatus); 315 u32 reg_val = ioread32(&chip->reg->istatus);
320 int i; 316 int i, ret = IRQ_NONE;
321 int ret = IRQ_NONE;
322 317
323 for (i = 0; i < gpio_pins[chip->ioh]; i++) { 318 for (i = 0; i < gpio_pins[chip->ioh]; i++) {
324 if (reg_val & BIT(i)) { 319 if (reg_val & BIT(i)) {
325 dev_dbg(chip->dev, "%s:[%d]:irq=%d status=0x%x\n", 320 dev_dbg(chip->dev, "%s:[%d]:irq=%d status=0x%x\n",
326 __func__, i, irq, reg_val); 321 __func__, i, irq, reg_val);
327 iowrite32(BIT(i), &chip->reg->iclr);
328 generic_handle_irq(chip->irq_base + i); 322 generic_handle_irq(chip->irq_base + i);
329 ret = IRQ_HANDLED; 323 ret = IRQ_HANDLED;
330 } 324 }
@@ -343,6 +337,7 @@ static __devinit void pch_gpio_alloc_generic_chip(struct pch_gpio *chip,
343 gc->private = chip; 337 gc->private = chip;
344 ct = gc->chip_types; 338 ct = gc->chip_types;
345 339
340 ct->chip.irq_ack = pch_irq_ack;
346 ct->chip.irq_mask = pch_irq_mask; 341 ct->chip.irq_mask = pch_irq_mask;
347 ct->chip.irq_unmask = pch_irq_unmask; 342 ct->chip.irq_unmask = pch_irq_unmask;
348 ct->chip.irq_set_type = pch_irq_type; 343 ct->chip.irq_set_type = pch_irq_type;
@@ -357,6 +352,7 @@ static int __devinit pch_gpio_probe(struct pci_dev *pdev,
357 s32 ret; 352 s32 ret;
358 struct pch_gpio *chip; 353 struct pch_gpio *chip;
359 int irq_base; 354 int irq_base;
355 u32 msk;
360 356
361 chip = kzalloc(sizeof(*chip), GFP_KERNEL); 357 chip = kzalloc(sizeof(*chip), GFP_KERNEL);
362 if (chip == NULL) 358 if (chip == NULL)
@@ -408,8 +404,13 @@ static int __devinit pch_gpio_probe(struct pci_dev *pdev,
408 } 404 }
409 chip->irq_base = irq_base; 405 chip->irq_base = irq_base;
410 406
407 /* Mask all interrupts, but enable them */
408 msk = (1 << gpio_pins[chip->ioh]) - 1;
409 iowrite32(msk, &chip->reg->imask);
410 iowrite32(msk, &chip->reg->ien);
411
411 ret = request_irq(pdev->irq, pch_gpio_handler, 412 ret = request_irq(pdev->irq, pch_gpio_handler,
412 IRQF_SHARED, KBUILD_MODNAME, chip); 413 IRQF_SHARED, KBUILD_MODNAME, chip);
413 if (ret != 0) { 414 if (ret != 0) {
414 dev_err(&pdev->dev, 415 dev_err(&pdev->dev,
415 "%s request_irq failed\n", __func__); 416 "%s request_irq failed\n", __func__);
@@ -418,8 +419,6 @@ static int __devinit pch_gpio_probe(struct pci_dev *pdev,
418 419
419 pch_gpio_alloc_generic_chip(chip, irq_base, gpio_pins[chip->ioh]); 420 pch_gpio_alloc_generic_chip(chip, irq_base, gpio_pins[chip->ioh]);
420 421
421 /* Initialize interrupt ien register */
422 iowrite32(0, &chip->reg->ien);
423end: 422end:
424 return 0; 423 return 0;
425 424
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index 5689ce62fd81..fc3ace3fd4cb 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -64,6 +64,7 @@ struct pxa_gpio_chip {
64 unsigned long irq_mask; 64 unsigned long irq_mask;
65 unsigned long irq_edge_rise; 65 unsigned long irq_edge_rise;
66 unsigned long irq_edge_fall; 66 unsigned long irq_edge_fall;
67 int (*set_wake)(unsigned int gpio, unsigned int on);
67 68
68#ifdef CONFIG_PM 69#ifdef CONFIG_PM
69 unsigned long saved_gplr; 70 unsigned long saved_gplr;
@@ -269,7 +270,8 @@ static void pxa_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
269 (value ? GPSR_OFFSET : GPCR_OFFSET)); 270 (value ? GPSR_OFFSET : GPCR_OFFSET));
270} 271}
271 272
272static int __devinit pxa_init_gpio_chip(int gpio_end) 273static int __devinit pxa_init_gpio_chip(int gpio_end,
274 int (*set_wake)(unsigned int, unsigned int))
273{ 275{
274 int i, gpio, nbanks = gpio_to_bank(gpio_end) + 1; 276 int i, gpio, nbanks = gpio_to_bank(gpio_end) + 1;
275 struct pxa_gpio_chip *chips; 277 struct pxa_gpio_chip *chips;
@@ -285,6 +287,7 @@ static int __devinit pxa_init_gpio_chip(int gpio_end)
285 287
286 sprintf(chips[i].label, "gpio-%d", i); 288 sprintf(chips[i].label, "gpio-%d", i);
287 chips[i].regbase = gpio_reg_base + BANK_OFF(i); 289 chips[i].regbase = gpio_reg_base + BANK_OFF(i);
290 chips[i].set_wake = set_wake;
288 291
289 c->base = gpio; 292 c->base = gpio;
290 c->label = chips[i].label; 293 c->label = chips[i].label;
@@ -412,6 +415,17 @@ static void pxa_mask_muxed_gpio(struct irq_data *d)
412 writel_relaxed(gfer, c->regbase + GFER_OFFSET); 415 writel_relaxed(gfer, c->regbase + GFER_OFFSET);
413} 416}
414 417
418static int pxa_gpio_set_wake(struct irq_data *d, unsigned int on)
419{
420 int gpio = pxa_irq_to_gpio(d->irq);
421 struct pxa_gpio_chip *c = gpio_to_pxachip(gpio);
422
423 if (c->set_wake)
424 return c->set_wake(gpio, on);
425 else
426 return 0;
427}
428
415static void pxa_unmask_muxed_gpio(struct irq_data *d) 429static void pxa_unmask_muxed_gpio(struct irq_data *d)
416{ 430{
417 int gpio = pxa_irq_to_gpio(d->irq); 431 int gpio = pxa_irq_to_gpio(d->irq);
@@ -427,6 +441,7 @@ static struct irq_chip pxa_muxed_gpio_chip = {
427 .irq_mask = pxa_mask_muxed_gpio, 441 .irq_mask = pxa_mask_muxed_gpio,
428 .irq_unmask = pxa_unmask_muxed_gpio, 442 .irq_unmask = pxa_unmask_muxed_gpio,
429 .irq_set_type = pxa_gpio_irq_type, 443 .irq_set_type = pxa_gpio_irq_type,
444 .irq_set_wake = pxa_gpio_set_wake,
430}; 445};
431 446
432static int pxa_gpio_nums(void) 447static int pxa_gpio_nums(void)
@@ -471,6 +486,7 @@ static int __devinit pxa_gpio_probe(struct platform_device *pdev)
471 struct pxa_gpio_chip *c; 486 struct pxa_gpio_chip *c;
472 struct resource *res; 487 struct resource *res;
473 struct clk *clk; 488 struct clk *clk;
489 struct pxa_gpio_platform_data *info;
474 int gpio, irq, ret; 490 int gpio, irq, ret;
475 int irq0 = 0, irq1 = 0, irq_mux, gpio_offset = 0; 491 int irq0 = 0, irq1 = 0, irq_mux, gpio_offset = 0;
476 492
@@ -516,7 +532,8 @@ static int __devinit pxa_gpio_probe(struct platform_device *pdev)
516 } 532 }
517 533
518 /* Initialize GPIO chips */ 534 /* Initialize GPIO chips */
519 pxa_init_gpio_chip(pxa_last_gpio); 535 info = dev_get_platdata(&pdev->dev);
536 pxa_init_gpio_chip(pxa_last_gpio, info ? info->gpio_set_wake : NULL);
520 537
521 /* clear all GPIO edge detects */ 538 /* clear all GPIO edge detects */
522 for_each_gpio_chip(gpio, c) { 539 for_each_gpio_chip(gpio, c) {
diff --git a/drivers/gpio/gpio-samsung.c b/drivers/gpio/gpio-samsung.c
index 19d6fc0229c3..e991d9171961 100644
--- a/drivers/gpio/gpio-samsung.c
+++ b/drivers/gpio/gpio-samsung.c
@@ -452,12 +452,14 @@ static struct samsung_gpio_cfg s3c24xx_gpiocfg_banka = {
452}; 452};
453#endif 453#endif
454 454
455#if defined(CONFIG_ARCH_EXYNOS4) || defined(CONFIG_ARCH_EXYNOS5)
455static struct samsung_gpio_cfg exynos_gpio_cfg = { 456static struct samsung_gpio_cfg exynos_gpio_cfg = {
456 .set_pull = exynos_gpio_setpull, 457 .set_pull = exynos_gpio_setpull,
457 .get_pull = exynos_gpio_getpull, 458 .get_pull = exynos_gpio_getpull,
458 .set_config = samsung_gpio_setcfg_4bit, 459 .set_config = samsung_gpio_setcfg_4bit,
459 .get_config = samsung_gpio_getcfg_4bit, 460 .get_config = samsung_gpio_getcfg_4bit,
460}; 461};
462#endif
461 463
462#if defined(CONFIG_CPU_S5P6440) || defined(CONFIG_CPU_S5P6450) 464#if defined(CONFIG_CPU_S5P6440) || defined(CONFIG_CPU_S5P6450)
463static struct samsung_gpio_cfg s5p64x0_gpio_cfg_rbank = { 465static struct samsung_gpio_cfg s5p64x0_gpio_cfg_rbank = {
@@ -2123,8 +2125,8 @@ static struct samsung_gpio_chip s5pv210_gpios_4bit[] = {
2123 * uses the above macro and depends on the banks being listed in order here. 2125 * uses the above macro and depends on the banks being listed in order here.
2124 */ 2126 */
2125 2127
2126static struct samsung_gpio_chip exynos4_gpios_1[] = {
2127#ifdef CONFIG_ARCH_EXYNOS4 2128#ifdef CONFIG_ARCH_EXYNOS4
2129static struct samsung_gpio_chip exynos4_gpios_1[] = {
2128 { 2130 {
2129 .chip = { 2131 .chip = {
2130 .base = EXYNOS4_GPA0(0), 2132 .base = EXYNOS4_GPA0(0),
@@ -2222,11 +2224,11 @@ static struct samsung_gpio_chip exynos4_gpios_1[] = {
2222 .label = "GPF3", 2224 .label = "GPF3",
2223 }, 2225 },
2224 }, 2226 },
2225#endif
2226}; 2227};
2228#endif
2227 2229
2228static struct samsung_gpio_chip exynos4_gpios_2[] = {
2229#ifdef CONFIG_ARCH_EXYNOS4 2230#ifdef CONFIG_ARCH_EXYNOS4
2231static struct samsung_gpio_chip exynos4_gpios_2[] = {
2230 { 2232 {
2231 .chip = { 2233 .chip = {
2232 .base = EXYNOS4_GPJ0(0), 2234 .base = EXYNOS4_GPJ0(0),
@@ -2367,11 +2369,11 @@ static struct samsung_gpio_chip exynos4_gpios_2[] = {
2367 .to_irq = samsung_gpiolib_to_irq, 2369 .to_irq = samsung_gpiolib_to_irq,
2368 }, 2370 },
2369 }, 2371 },
2370#endif
2371}; 2372};
2373#endif
2372 2374
2373static struct samsung_gpio_chip exynos4_gpios_3[] = {
2374#ifdef CONFIG_ARCH_EXYNOS4 2375#ifdef CONFIG_ARCH_EXYNOS4
2376static struct samsung_gpio_chip exynos4_gpios_3[] = {
2375 { 2377 {
2376 .chip = { 2378 .chip = {
2377 .base = EXYNOS4_GPZ(0), 2379 .base = EXYNOS4_GPZ(0),
@@ -2379,8 +2381,8 @@ static struct samsung_gpio_chip exynos4_gpios_3[] = {
2379 .label = "GPZ", 2381 .label = "GPZ",
2380 }, 2382 },
2381 }, 2383 },
2382#endif
2383}; 2384};
2385#endif
2384 2386
2385#ifdef CONFIG_ARCH_EXYNOS5 2387#ifdef CONFIG_ARCH_EXYNOS5
2386static struct samsung_gpio_chip exynos5_gpios_1[] = { 2388static struct samsung_gpio_chip exynos5_gpios_1[] = {
@@ -2719,7 +2721,9 @@ static __init int samsung_gpiolib_init(void)
2719{ 2721{
2720 struct samsung_gpio_chip *chip; 2722 struct samsung_gpio_chip *chip;
2721 int i, nr_chips; 2723 int i, nr_chips;
2724#if defined(CONFIG_CPU_EXYNOS4210) || defined(CONFIG_SOC_EXYNOS5250)
2722 void __iomem *gpio_base1, *gpio_base2, *gpio_base3, *gpio_base4; 2725 void __iomem *gpio_base1, *gpio_base2, *gpio_base3, *gpio_base4;
2726#endif
2723 int group = 0; 2727 int group = 0;
2724 2728
2725 samsung_gpiolib_set_cfg(samsung_gpio_cfgs, ARRAY_SIZE(samsung_gpio_cfgs)); 2729 samsung_gpiolib_set_cfg(samsung_gpio_cfgs, ARRAY_SIZE(samsung_gpio_cfgs));
@@ -2971,6 +2975,7 @@ static __init int samsung_gpiolib_init(void)
2971 2975
2972 return 0; 2976 return 0;
2973 2977
2978#if defined(CONFIG_CPU_EXYNOS4210) || defined(CONFIG_SOC_EXYNOS5250)
2974err_ioremap4: 2979err_ioremap4:
2975 iounmap(gpio_base3); 2980 iounmap(gpio_base3);
2976err_ioremap3: 2981err_ioremap3:
@@ -2979,6 +2984,7 @@ err_ioremap2:
2979 iounmap(gpio_base1); 2984 iounmap(gpio_base1);
2980err_ioremap1: 2985err_ioremap1:
2981 return -ENOMEM; 2986 return -ENOMEM;
2987#endif
2982} 2988}
2983core_initcall(samsung_gpiolib_init); 2989core_initcall(samsung_gpiolib_init);
2984 2990
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 30372f7b2d45..348b367debeb 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -1510,8 +1510,8 @@ int drm_freebufs(struct drm_device *dev, void *data,
1510 * \param arg pointer to a drm_buf_map structure. 1510 * \param arg pointer to a drm_buf_map structure.
1511 * \return zero on success or a negative number on failure. 1511 * \return zero on success or a negative number on failure.
1512 * 1512 *
1513 * Maps the AGP, SG or PCI buffer region with do_mmap(), and copies information 1513 * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information
1514 * about each buffer into user space. For PCI buffers, it calls do_mmap() with 1514 * about each buffer into user space. For PCI buffers, it calls vm_mmap() with
1515 * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls 1515 * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
1516 * drm_mmap_dma(). 1516 * drm_mmap_dma().
1517 */ 1517 */
@@ -1553,18 +1553,14 @@ int drm_mapbufs(struct drm_device *dev, void *data,
1553 retcode = -EINVAL; 1553 retcode = -EINVAL;
1554 goto done; 1554 goto done;
1555 } 1555 }
1556 down_write(&current->mm->mmap_sem); 1556 virtual = vm_mmap(file_priv->filp, 0, map->size,
1557 virtual = do_mmap(file_priv->filp, 0, map->size,
1558 PROT_READ | PROT_WRITE, 1557 PROT_READ | PROT_WRITE,
1559 MAP_SHARED, 1558 MAP_SHARED,
1560 token); 1559 token);
1561 up_write(&current->mm->mmap_sem);
1562 } else { 1560 } else {
1563 down_write(&current->mm->mmap_sem); 1561 virtual = vm_mmap(file_priv->filp, 0, dma->byte_count,
1564 virtual = do_mmap(file_priv->filp, 0, dma->byte_count,
1565 PROT_READ | PROT_WRITE, 1562 PROT_READ | PROT_WRITE,
1566 MAP_SHARED, 0); 1563 MAP_SHARED, 0);
1567 up_write(&current->mm->mmap_sem);
1568 } 1564 }
1569 if (virtual > -1024UL) { 1565 if (virtual > -1024UL) {
1570 /* Real error */ 1566 /* Real error */
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 26d51979116b..1dffa8359f88 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -149,22 +149,12 @@ static int exynos_drm_gem_map_pages(struct drm_gem_object *obj,
149 unsigned long pfn; 149 unsigned long pfn;
150 150
151 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) { 151 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
152 unsigned long usize = buf->size;
153
154 if (!buf->pages) 152 if (!buf->pages)
155 return -EINTR; 153 return -EINTR;
156 154
157 while (usize > 0) { 155 pfn = page_to_pfn(buf->pages[page_offset++]);
158 pfn = page_to_pfn(buf->pages[page_offset++]); 156 } else
159 vm_insert_mixed(vma, f_vaddr, pfn); 157 pfn = (buf->dma_addr >> PAGE_SHIFT) + page_offset;
160 f_vaddr += PAGE_SIZE;
161 usize -= PAGE_SIZE;
162 }
163
164 return 0;
165 }
166
167 pfn = (buf->dma_addr >> PAGE_SHIFT) + page_offset;
168 158
169 return vm_insert_mixed(vma, f_vaddr, pfn); 159 return vm_insert_mixed(vma, f_vaddr, pfn);
170} 160}
@@ -524,6 +514,8 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
524 if (!buffer->pages) 514 if (!buffer->pages)
525 return -EINVAL; 515 return -EINVAL;
526 516
517 vma->vm_flags |= VM_MIXEDMAP;
518
527 do { 519 do {
528 ret = vm_insert_page(vma, uaddr, buffer->pages[i++]); 520 ret = vm_insert_page(vma, uaddr, buffer->pages[i++]);
529 if (ret) { 521 if (ret) {
@@ -581,10 +573,8 @@ int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
581 obj->filp->f_op = &exynos_drm_gem_fops; 573 obj->filp->f_op = &exynos_drm_gem_fops;
582 obj->filp->private_data = obj; 574 obj->filp->private_data = obj;
583 575
584 down_write(&current->mm->mmap_sem); 576 addr = vm_mmap(obj->filp, 0, args->size,
585 addr = do_mmap(obj->filp, 0, args->size,
586 PROT_READ | PROT_WRITE, MAP_SHARED, 0); 577 PROT_READ | PROT_WRITE, MAP_SHARED, 0);
587 up_write(&current->mm->mmap_sem);
588 578
589 drm_gem_object_unreference_unlocked(obj); 579 drm_gem_object_unreference_unlocked(obj);
590 580
@@ -712,7 +702,6 @@ int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
712int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 702int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
713{ 703{
714 struct drm_gem_object *obj = vma->vm_private_data; 704 struct drm_gem_object *obj = vma->vm_private_data;
715 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
716 struct drm_device *dev = obj->dev; 705 struct drm_device *dev = obj->dev;
717 unsigned long f_vaddr; 706 unsigned long f_vaddr;
718 pgoff_t page_offset; 707 pgoff_t page_offset;
@@ -724,21 +713,10 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
724 713
725 mutex_lock(&dev->struct_mutex); 714 mutex_lock(&dev->struct_mutex);
726 715
727 /*
728 * allocate all pages as desired size if user wants to allocate
729 * physically non-continuous memory.
730 */
731 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
732 ret = exynos_drm_gem_get_pages(obj);
733 if (ret < 0)
734 goto err;
735 }
736
737 ret = exynos_drm_gem_map_pages(obj, vma, f_vaddr, page_offset); 716 ret = exynos_drm_gem_map_pages(obj, vma, f_vaddr, page_offset);
738 if (ret < 0) 717 if (ret < 0)
739 DRM_ERROR("failed to map pages.\n"); 718 DRM_ERROR("failed to map pages.\n");
740 719
741err:
742 mutex_unlock(&dev->struct_mutex); 720 mutex_unlock(&dev->struct_mutex);
743 721
744 return convert_to_vm_err_msg(ret); 722 return convert_to_vm_err_msg(ret);
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index 2c8a60c3b98e..f920fb5e42b6 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -129,6 +129,7 @@ static int i810_map_buffer(struct drm_buf *buf, struct drm_file *file_priv)
129 if (buf_priv->currently_mapped == I810_BUF_MAPPED) 129 if (buf_priv->currently_mapped == I810_BUF_MAPPED)
130 return -EINVAL; 130 return -EINVAL;
131 131
132 /* This is all entirely broken */
132 down_write(&current->mm->mmap_sem); 133 down_write(&current->mm->mmap_sem);
133 old_fops = file_priv->filp->f_op; 134 old_fops = file_priv->filp->f_op;
134 file_priv->filp->f_op = &i810_buffer_fops; 135 file_priv->filp->f_op = &i810_buffer_fops;
@@ -157,11 +158,8 @@ static int i810_unmap_buffer(struct drm_buf *buf)
157 if (buf_priv->currently_mapped != I810_BUF_MAPPED) 158 if (buf_priv->currently_mapped != I810_BUF_MAPPED)
158 return -EINVAL; 159 return -EINVAL;
159 160
160 down_write(&current->mm->mmap_sem); 161 retcode = vm_munmap((unsigned long)buf_priv->virtual,
161 retcode = do_munmap(current->mm,
162 (unsigned long)buf_priv->virtual,
163 (size_t) buf->total); 162 (size_t) buf->total);
164 up_write(&current->mm->mmap_sem);
165 163
166 buf_priv->currently_mapped = I810_BUF_UNMAPPED; 164 buf_priv->currently_mapped = I810_BUF_UNMAPPED;
167 buf_priv->virtual = NULL; 165 buf_priv->virtual = NULL;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index b505b70dba05..e6162a1681f0 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1224,6 +1224,9 @@ static int i915_emon_status(struct seq_file *m, void *unused)
1224 unsigned long temp, chipset, gfx; 1224 unsigned long temp, chipset, gfx;
1225 int ret; 1225 int ret;
1226 1226
1227 if (!IS_GEN5(dev))
1228 return -ENODEV;
1229
1227 ret = mutex_lock_interruptible(&dev->struct_mutex); 1230 ret = mutex_lock_interruptible(&dev->struct_mutex);
1228 if (ret) 1231 if (ret)
1229 return ret; 1232 return ret;
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 785f67f963ef..ba60f3c8f911 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1701,6 +1701,9 @@ void i915_update_gfx_val(struct drm_i915_private *dev_priv)
1701 unsigned long diffms; 1701 unsigned long diffms;
1702 u32 count; 1702 u32 count;
1703 1703
1704 if (dev_priv->info->gen != 5)
1705 return;
1706
1704 getrawmonotonic(&now); 1707 getrawmonotonic(&now);
1705 diff1 = timespec_sub(now, dev_priv->last_time2); 1708 diff1 = timespec_sub(now, dev_priv->last_time2);
1706 1709
@@ -2121,12 +2124,14 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
2121 setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed, 2124 setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
2122 (unsigned long) dev); 2125 (unsigned long) dev);
2123 2126
2124 spin_lock(&mchdev_lock); 2127 if (IS_GEN5(dev)) {
2125 i915_mch_dev = dev_priv; 2128 spin_lock(&mchdev_lock);
2126 dev_priv->mchdev_lock = &mchdev_lock; 2129 i915_mch_dev = dev_priv;
2127 spin_unlock(&mchdev_lock); 2130 dev_priv->mchdev_lock = &mchdev_lock;
2131 spin_unlock(&mchdev_lock);
2128 2132
2129 ips_ping_for_i915_load(); 2133 ips_ping_for_i915_load();
2134 }
2130 2135
2131 return 0; 2136 return 0;
2132 2137
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 0e3c6acde955..0d1e4b7b4b99 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1087,11 +1087,9 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1087 if (obj == NULL) 1087 if (obj == NULL)
1088 return -ENOENT; 1088 return -ENOENT;
1089 1089
1090 down_write(&current->mm->mmap_sem); 1090 addr = vm_mmap(obj->filp, 0, args->size,
1091 addr = do_mmap(obj->filp, 0, args->size,
1092 PROT_READ | PROT_WRITE, MAP_SHARED, 1091 PROT_READ | PROT_WRITE, MAP_SHARED,
1093 args->offset); 1092 args->offset);
1094 up_write(&current->mm->mmap_sem);
1095 drm_gem_object_unreference_unlocked(obj); 1093 drm_gem_object_unreference_unlocked(obj);
1096 if (IS_ERR((void *)addr)) 1094 if (IS_ERR((void *)addr))
1097 return addr; 1095 return addr;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index f51a696486cb..de431942ded4 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1133,6 +1133,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1133 return -EINVAL; 1133 return -EINVAL;
1134 } 1134 }
1135 1135
1136 if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
1137 DRM_DEBUG("execbuf with %u cliprects\n",
1138 args->num_cliprects);
1139 return -EINVAL;
1140 }
1136 cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects), 1141 cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects),
1137 GFP_KERNEL); 1142 GFP_KERNEL);
1138 if (cliprects == NULL) { 1143 if (cliprects == NULL) {
@@ -1404,7 +1409,8 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
1404 struct drm_i915_gem_exec_object2 *exec2_list = NULL; 1409 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1405 int ret; 1410 int ret;
1406 1411
1407 if (args->buffer_count < 1) { 1412 if (args->buffer_count < 1 ||
1413 args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
1408 DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count); 1414 DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
1409 return -EINVAL; 1415 return -EINVAL;
1410 } 1416 }
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index b4bb1ef77ddc..9d24d65f0c3e 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -568,6 +568,7 @@
568#define CM0_MASK_SHIFT 16 568#define CM0_MASK_SHIFT 16
569#define CM0_IZ_OPT_DISABLE (1<<6) 569#define CM0_IZ_OPT_DISABLE (1<<6)
570#define CM0_ZR_OPT_DISABLE (1<<5) 570#define CM0_ZR_OPT_DISABLE (1<<5)
571#define CM0_STC_EVICT_DISABLE_LRA_SNB (1<<5)
571#define CM0_DEPTH_EVICT_DISABLE (1<<4) 572#define CM0_DEPTH_EVICT_DISABLE (1<<4)
572#define CM0_COLOR_EVICT_DISABLE (1<<3) 573#define CM0_COLOR_EVICT_DISABLE (1<<3)
573#define CM0_DEPTH_WRITE_DISABLE (1<<1) 574#define CM0_DEPTH_WRITE_DISABLE (1<<1)
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 4d3d736a4f56..90b9793fd5da 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -430,8 +430,8 @@ intel_crt_detect(struct drm_connector *connector, bool force)
430{ 430{
431 struct drm_device *dev = connector->dev; 431 struct drm_device *dev = connector->dev;
432 struct intel_crt *crt = intel_attached_crt(connector); 432 struct intel_crt *crt = intel_attached_crt(connector);
433 struct drm_crtc *crtc;
434 enum drm_connector_status status; 433 enum drm_connector_status status;
434 struct intel_load_detect_pipe tmp;
435 435
436 if (I915_HAS_HOTPLUG(dev)) { 436 if (I915_HAS_HOTPLUG(dev)) {
437 if (intel_crt_detect_hotplug(connector)) { 437 if (intel_crt_detect_hotplug(connector)) {
@@ -450,23 +450,16 @@ intel_crt_detect(struct drm_connector *connector, bool force)
450 return connector->status; 450 return connector->status;
451 451
452 /* for pre-945g platforms use load detect */ 452 /* for pre-945g platforms use load detect */
453 crtc = crt->base.base.crtc; 453 if (intel_get_load_detect_pipe(&crt->base, connector, NULL,
454 if (crtc && crtc->enabled) { 454 &tmp)) {
455 status = intel_crt_load_detect(crt); 455 if (intel_crt_detect_ddc(connector))
456 } else { 456 status = connector_status_connected;
457 struct intel_load_detect_pipe tmp; 457 else
458 458 status = intel_crt_load_detect(crt);
459 if (intel_get_load_detect_pipe(&crt->base, connector, NULL, 459 intel_release_load_detect_pipe(&crt->base, connector,
460 &tmp)) { 460 &tmp);
461 if (intel_crt_detect_ddc(connector)) 461 } else
462 status = connector_status_connected; 462 status = connector_status_unknown;
463 else
464 status = intel_crt_load_detect(crt);
465 intel_release_load_detect_pipe(&crt->base, connector,
466 &tmp);
467 } else
468 status = connector_status_unknown;
469 }
470 463
471 return status; 464 return status;
472} 465}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 5908cd563400..1b1cf3b3ff51 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -7072,9 +7072,6 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
7072 struct drm_device *dev = crtc->dev; 7072 struct drm_device *dev = crtc->dev;
7073 drm_i915_private_t *dev_priv = dev->dev_private; 7073 drm_i915_private_t *dev_priv = dev->dev_private;
7074 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7074 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7075 int pipe = intel_crtc->pipe;
7076 int dpll_reg = DPLL(pipe);
7077 int dpll = I915_READ(dpll_reg);
7078 7075
7079 if (HAS_PCH_SPLIT(dev)) 7076 if (HAS_PCH_SPLIT(dev))
7080 return; 7077 return;
@@ -7087,10 +7084,15 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
7087 * the manual case. 7084 * the manual case.
7088 */ 7085 */
7089 if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) { 7086 if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
7087 int pipe = intel_crtc->pipe;
7088 int dpll_reg = DPLL(pipe);
7089 u32 dpll;
7090
7090 DRM_DEBUG_DRIVER("downclocking LVDS\n"); 7091 DRM_DEBUG_DRIVER("downclocking LVDS\n");
7091 7092
7092 assert_panel_unlocked(dev_priv, pipe); 7093 assert_panel_unlocked(dev_priv, pipe);
7093 7094
7095 dpll = I915_READ(dpll_reg);
7094 dpll |= DISPLAY_RATE_SELECT_FPA1; 7096 dpll |= DISPLAY_RATE_SELECT_FPA1;
7095 I915_WRITE(dpll_reg, dpll); 7097 I915_WRITE(dpll_reg, dpll);
7096 intel_wait_for_vblank(dev, pipe); 7098 intel_wait_for_vblank(dev, pipe);
@@ -7098,7 +7100,6 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
7098 if (!(dpll & DISPLAY_RATE_SELECT_FPA1)) 7100 if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
7099 DRM_DEBUG_DRIVER("failed to downclock LVDS!\n"); 7101 DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
7100 } 7102 }
7101
7102} 7103}
7103 7104
7104/** 7105/**
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index cae3e5f17a49..2d7f47b56b6a 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -136,7 +136,7 @@ static void i9xx_write_infoframe(struct drm_encoder *encoder,
136 136
137 val &= ~VIDEO_DIP_SELECT_MASK; 137 val &= ~VIDEO_DIP_SELECT_MASK;
138 138
139 I915_WRITE(VIDEO_DIP_CTL, val | port | flags); 139 I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | val | port | flags);
140 140
141 for (i = 0; i < len; i += 4) { 141 for (i = 0; i < len; i += 4) {
142 I915_WRITE(VIDEO_DIP_DATA, *data); 142 I915_WRITE(VIDEO_DIP_DATA, *data);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 30e2c82101de..9c71183629c2 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -750,7 +750,7 @@ static const struct dmi_system_id intel_no_lvds[] = {
750 .ident = "Hewlett-Packard t5745", 750 .ident = "Hewlett-Packard t5745",
751 .matches = { 751 .matches = {
752 DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), 752 DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
753 DMI_MATCH(DMI_BOARD_NAME, "hp t5745"), 753 DMI_MATCH(DMI_PRODUCT_NAME, "hp t5745"),
754 }, 754 },
755 }, 755 },
756 { 756 {
@@ -758,7 +758,7 @@ static const struct dmi_system_id intel_no_lvds[] = {
758 .ident = "Hewlett-Packard st5747", 758 .ident = "Hewlett-Packard st5747",
759 .matches = { 759 .matches = {
760 DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), 760 DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
761 DMI_MATCH(DMI_BOARD_NAME, "hp st5747"), 761 DMI_MATCH(DMI_PRODUCT_NAME, "hp st5747"),
762 }, 762 },
763 }, 763 },
764 { 764 {
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index f75806e5bff5..62892a826ede 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -398,6 +398,17 @@ static int init_render_ring(struct intel_ring_buffer *ring)
398 return ret; 398 return ret;
399 } 399 }
400 400
401
402 if (IS_GEN6(dev)) {
403 /* From the Sandybridge PRM, volume 1 part 3, page 24:
404 * "If this bit is set, STCunit will have LRA as replacement
405 * policy. [...] This bit must be reset. LRA replacement
406 * policy is not supported."
407 */
408 I915_WRITE(CACHE_MODE_0,
409 CM0_STC_EVICT_DISABLE_LRA_SNB << CM0_MASK_SHIFT);
410 }
411
401 if (INTEL_INFO(dev)->gen >= 6) { 412 if (INTEL_INFO(dev)->gen >= 6) {
402 I915_WRITE(INSTPM, 413 I915_WRITE(INSTPM,
403 INSTPM_FORCE_ORDERING << 16 | INSTPM_FORCE_ORDERING); 414 INSTPM_FORCE_ORDERING << 16 | INSTPM_FORCE_ORDERING);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index e36b171c1e7d..ae5e748f39bb 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -731,6 +731,7 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
731 uint16_t width, height; 731 uint16_t width, height;
732 uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len; 732 uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len;
733 uint16_t h_sync_offset, v_sync_offset; 733 uint16_t h_sync_offset, v_sync_offset;
734 int mode_clock;
734 735
735 width = mode->crtc_hdisplay; 736 width = mode->crtc_hdisplay;
736 height = mode->crtc_vdisplay; 737 height = mode->crtc_vdisplay;
@@ -745,7 +746,11 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
745 h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start; 746 h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start;
746 v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start; 747 v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start;
747 748
748 dtd->part1.clock = mode->clock / 10; 749 mode_clock = mode->clock;
750 mode_clock /= intel_mode_get_pixel_multiplier(mode) ?: 1;
751 mode_clock /= 10;
752 dtd->part1.clock = mode_clock;
753
749 dtd->part1.h_active = width & 0xff; 754 dtd->part1.h_active = width & 0xff;
750 dtd->part1.h_blank = h_blank_len & 0xff; 755 dtd->part1.h_blank = h_blank_len & 0xff;
751 dtd->part1.h_high = (((width >> 8) & 0xf) << 4) | 756 dtd->part1.h_high = (((width >> 8) & 0xf) << 4) |
@@ -996,7 +1001,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
996 struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder); 1001 struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
997 u32 sdvox; 1002 u32 sdvox;
998 struct intel_sdvo_in_out_map in_out; 1003 struct intel_sdvo_in_out_map in_out;
999 struct intel_sdvo_dtd input_dtd; 1004 struct intel_sdvo_dtd input_dtd, output_dtd;
1000 int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); 1005 int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
1001 int rate; 1006 int rate;
1002 1007
@@ -1021,20 +1026,13 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1021 intel_sdvo->attached_output)) 1026 intel_sdvo->attached_output))
1022 return; 1027 return;
1023 1028
1024 /* We have tried to get input timing in mode_fixup, and filled into 1029 /* lvds has a special fixed output timing. */
1025 * adjusted_mode. 1030 if (intel_sdvo->is_lvds)
1026 */ 1031 intel_sdvo_get_dtd_from_mode(&output_dtd,
1027 if (intel_sdvo->is_tv || intel_sdvo->is_lvds) { 1032 intel_sdvo->sdvo_lvds_fixed_mode);
1028 input_dtd = intel_sdvo->input_dtd; 1033 else
1029 } else { 1034 intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
1030 /* Set the output timing to the screen */ 1035 (void) intel_sdvo_set_output_timing(intel_sdvo, &output_dtd);
1031 if (!intel_sdvo_set_target_output(intel_sdvo,
1032 intel_sdvo->attached_output))
1033 return;
1034
1035 intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
1036 (void) intel_sdvo_set_output_timing(intel_sdvo, &input_dtd);
1037 }
1038 1036
1039 /* Set the input timing to the screen. Assume always input 0. */ 1037 /* Set the input timing to the screen. Assume always input 0. */
1040 if (!intel_sdvo_set_target_input(intel_sdvo)) 1038 if (!intel_sdvo_set_target_input(intel_sdvo))
@@ -1052,6 +1050,10 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1052 !intel_sdvo_set_tv_format(intel_sdvo)) 1050 !intel_sdvo_set_tv_format(intel_sdvo))
1053 return; 1051 return;
1054 1052
1053 /* We have tried to get input timing in mode_fixup, and filled into
1054 * adjusted_mode.
1055 */
1056 intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
1055 (void) intel_sdvo_set_input_timing(intel_sdvo, &input_dtd); 1057 (void) intel_sdvo_set_input_timing(intel_sdvo, &input_dtd);
1056 1058
1057 switch (pixel_multiplier) { 1059 switch (pixel_multiplier) {
@@ -1218,8 +1220,14 @@ static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct in
1218 1220
1219static int intel_sdvo_supports_hotplug(struct intel_sdvo *intel_sdvo) 1221static int intel_sdvo_supports_hotplug(struct intel_sdvo *intel_sdvo)
1220{ 1222{
1223 struct drm_device *dev = intel_sdvo->base.base.dev;
1221 u8 response[2]; 1224 u8 response[2];
1222 1225
1226 /* HW Erratum: SDVO Hotplug is broken on all i945G chips, there's noise
1227 * on the line. */
1228 if (IS_I945G(dev) || IS_I945GM(dev))
1229 return false;
1230
1223 return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, 1231 return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
1224 &response, 2) && response[0]; 1232 &response, 2) && response[0];
1225} 1233}
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 7814a760c164..284bd25d5d21 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -270,7 +270,7 @@ static bool nouveau_dsm_detect(void)
270 struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name}; 270 struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
271 struct pci_dev *pdev = NULL; 271 struct pci_dev *pdev = NULL;
272 int has_dsm = 0; 272 int has_dsm = 0;
273 int has_optimus; 273 int has_optimus = 0;
274 int vga_count = 0; 274 int vga_count = 0;
275 bool guid_valid; 275 bool guid_valid;
276 int retval; 276 int retval;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 80963d05b54a..0be4a815e706 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -6156,10 +6156,14 @@ dcb_fake_connectors(struct nvbios *bios)
6156 6156
6157 /* heuristic: if we ever get a non-zero connector field, assume 6157 /* heuristic: if we ever get a non-zero connector field, assume
6158 * that all the indices are valid and we don't need fake them. 6158 * that all the indices are valid and we don't need fake them.
6159 *
6160 * and, as usual, a blacklist of boards with bad bios data..
6159 */ 6161 */
6160 for (i = 0; i < dcbt->entries; i++) { 6162 if (!nv_match_device(bios->dev, 0x0392, 0x107d, 0x20a2)) {
6161 if (dcbt->entry[i].connector) 6163 for (i = 0; i < dcbt->entries; i++) {
6162 return; 6164 if (dcbt->entry[i].connector)
6165 return;
6166 }
6163 } 6167 }
6164 6168
6165 /* no useful connector info available, we need to make it up 6169 /* no useful connector info available, we need to make it up
diff --git a/drivers/gpu/drm/nouveau/nouveau_hdmi.c b/drivers/gpu/drm/nouveau/nouveau_hdmi.c
index 59ea1c14eca0..c3de36384522 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hdmi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hdmi.c
@@ -32,7 +32,9 @@ static bool
32hdmi_sor(struct drm_encoder *encoder) 32hdmi_sor(struct drm_encoder *encoder)
33{ 33{
34 struct drm_nouveau_private *dev_priv = encoder->dev->dev_private; 34 struct drm_nouveau_private *dev_priv = encoder->dev->dev_private;
35 if (dev_priv->chipset < 0xa3) 35 if (dev_priv->chipset < 0xa3 ||
36 dev_priv->chipset == 0xaa ||
37 dev_priv->chipset == 0xac)
36 return false; 38 return false;
37 return true; 39 return true;
38} 40}
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c
index e2be95af2e52..77e564667b5c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.c
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c
@@ -29,10 +29,6 @@
29#include "nouveau_i2c.h" 29#include "nouveau_i2c.h"
30#include "nouveau_hw.h" 30#include "nouveau_hw.h"
31 31
32#define T_TIMEOUT 2200000
33#define T_RISEFALL 1000
34#define T_HOLD 5000
35
36static void 32static void
37i2c_drive_scl(void *data, int state) 33i2c_drive_scl(void *data, int state)
38{ 34{
@@ -113,175 +109,6 @@ i2c_sense_sda(void *data)
113 return 0; 109 return 0;
114} 110}
115 111
116static void
117i2c_delay(struct nouveau_i2c_chan *port, u32 nsec)
118{
119 udelay((nsec + 500) / 1000);
120}
121
122static bool
123i2c_raise_scl(struct nouveau_i2c_chan *port)
124{
125 u32 timeout = T_TIMEOUT / T_RISEFALL;
126
127 i2c_drive_scl(port, 1);
128 do {
129 i2c_delay(port, T_RISEFALL);
130 } while (!i2c_sense_scl(port) && --timeout);
131
132 return timeout != 0;
133}
134
135static int
136i2c_start(struct nouveau_i2c_chan *port)
137{
138 int ret = 0;
139
140 port->state = i2c_sense_scl(port);
141 port->state |= i2c_sense_sda(port) << 1;
142 if (port->state != 3) {
143 i2c_drive_scl(port, 0);
144 i2c_drive_sda(port, 1);
145 if (!i2c_raise_scl(port))
146 ret = -EBUSY;
147 }
148
149 i2c_drive_sda(port, 0);
150 i2c_delay(port, T_HOLD);
151 i2c_drive_scl(port, 0);
152 i2c_delay(port, T_HOLD);
153 return ret;
154}
155
156static void
157i2c_stop(struct nouveau_i2c_chan *port)
158{
159 i2c_drive_scl(port, 0);
160 i2c_drive_sda(port, 0);
161 i2c_delay(port, T_RISEFALL);
162
163 i2c_drive_scl(port, 1);
164 i2c_delay(port, T_HOLD);
165 i2c_drive_sda(port, 1);
166 i2c_delay(port, T_HOLD);
167}
168
169static int
170i2c_bitw(struct nouveau_i2c_chan *port, int sda)
171{
172 i2c_drive_sda(port, sda);
173 i2c_delay(port, T_RISEFALL);
174
175 if (!i2c_raise_scl(port))
176 return -ETIMEDOUT;
177 i2c_delay(port, T_HOLD);
178
179 i2c_drive_scl(port, 0);
180 i2c_delay(port, T_HOLD);
181 return 0;
182}
183
184static int
185i2c_bitr(struct nouveau_i2c_chan *port)
186{
187 int sda;
188
189 i2c_drive_sda(port, 1);
190 i2c_delay(port, T_RISEFALL);
191
192 if (!i2c_raise_scl(port))
193 return -ETIMEDOUT;
194 i2c_delay(port, T_HOLD);
195
196 sda = i2c_sense_sda(port);
197
198 i2c_drive_scl(port, 0);
199 i2c_delay(port, T_HOLD);
200 return sda;
201}
202
203static int
204i2c_get_byte(struct nouveau_i2c_chan *port, u8 *byte, bool last)
205{
206 int i, bit;
207
208 *byte = 0;
209 for (i = 7; i >= 0; i--) {
210 bit = i2c_bitr(port);
211 if (bit < 0)
212 return bit;
213 *byte |= bit << i;
214 }
215
216 return i2c_bitw(port, last ? 1 : 0);
217}
218
219static int
220i2c_put_byte(struct nouveau_i2c_chan *port, u8 byte)
221{
222 int i, ret;
223 for (i = 7; i >= 0; i--) {
224 ret = i2c_bitw(port, !!(byte & (1 << i)));
225 if (ret < 0)
226 return ret;
227 }
228
229 ret = i2c_bitr(port);
230 if (ret == 1) /* nack */
231 ret = -EIO;
232 return ret;
233}
234
235static int
236i2c_addr(struct nouveau_i2c_chan *port, struct i2c_msg *msg)
237{
238 u32 addr = msg->addr << 1;
239 if (msg->flags & I2C_M_RD)
240 addr |= 1;
241 return i2c_put_byte(port, addr);
242}
243
244static int
245i2c_bit_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
246{
247 struct nouveau_i2c_chan *port = (struct nouveau_i2c_chan *)adap;
248 struct i2c_msg *msg = msgs;
249 int ret = 0, mcnt = num;
250
251 while (!ret && mcnt--) {
252 u8 remaining = msg->len;
253 u8 *ptr = msg->buf;
254
255 ret = i2c_start(port);
256 if (ret == 0)
257 ret = i2c_addr(port, msg);
258
259 if (msg->flags & I2C_M_RD) {
260 while (!ret && remaining--)
261 ret = i2c_get_byte(port, ptr++, !remaining);
262 } else {
263 while (!ret && remaining--)
264 ret = i2c_put_byte(port, *ptr++);
265 }
266
267 msg++;
268 }
269
270 i2c_stop(port);
271 return (ret < 0) ? ret : num;
272}
273
274static u32
275i2c_bit_func(struct i2c_adapter *adap)
276{
277 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
278}
279
280const struct i2c_algorithm nouveau_i2c_bit_algo = {
281 .master_xfer = i2c_bit_xfer,
282 .functionality = i2c_bit_func
283};
284
285static const uint32_t nv50_i2c_port[] = { 112static const uint32_t nv50_i2c_port[] = {
286 0x00e138, 0x00e150, 0x00e168, 0x00e180, 113 0x00e138, 0x00e150, 0x00e168, 0x00e180,
287 0x00e254, 0x00e274, 0x00e764, 0x00e780, 114 0x00e254, 0x00e274, 0x00e764, 0x00e780,
@@ -384,12 +211,10 @@ nouveau_i2c_init(struct drm_device *dev)
384 case 0: /* NV04:NV50 */ 211 case 0: /* NV04:NV50 */
385 port->drive = entry[0]; 212 port->drive = entry[0];
386 port->sense = entry[1]; 213 port->sense = entry[1];
387 port->adapter.algo = &nouveau_i2c_bit_algo;
388 break; 214 break;
389 case 4: /* NV4E */ 215 case 4: /* NV4E */
390 port->drive = 0x600800 + entry[1]; 216 port->drive = 0x600800 + entry[1];
391 port->sense = port->drive; 217 port->sense = port->drive;
392 port->adapter.algo = &nouveau_i2c_bit_algo;
393 break; 218 break;
394 case 5: /* NV50- */ 219 case 5: /* NV50- */
395 port->drive = entry[0] & 0x0f; 220 port->drive = entry[0] & 0x0f;
@@ -402,7 +227,6 @@ nouveau_i2c_init(struct drm_device *dev)
402 port->drive = 0x00d014 + (port->drive * 0x20); 227 port->drive = 0x00d014 + (port->drive * 0x20);
403 port->sense = port->drive; 228 port->sense = port->drive;
404 } 229 }
405 port->adapter.algo = &nouveau_i2c_bit_algo;
406 break; 230 break;
407 case 6: /* NV50- DP AUX */ 231 case 6: /* NV50- DP AUX */
408 port->drive = entry[0]; 232 port->drive = entry[0];
@@ -413,7 +237,7 @@ nouveau_i2c_init(struct drm_device *dev)
413 break; 237 break;
414 } 238 }
415 239
416 if (!port->adapter.algo) { 240 if (!port->adapter.algo && !port->drive) {
417 NV_ERROR(dev, "I2C%d: type %d index %x/%x unknown\n", 241 NV_ERROR(dev, "I2C%d: type %d index %x/%x unknown\n",
418 i, port->type, port->drive, port->sense); 242 i, port->type, port->drive, port->sense);
419 kfree(port); 243 kfree(port);
@@ -429,7 +253,26 @@ nouveau_i2c_init(struct drm_device *dev)
429 port->dcb = ROM32(entry[0]); 253 port->dcb = ROM32(entry[0]);
430 i2c_set_adapdata(&port->adapter, i2c); 254 i2c_set_adapdata(&port->adapter, i2c);
431 255
432 ret = i2c_add_adapter(&port->adapter); 256 if (port->adapter.algo != &nouveau_dp_i2c_algo) {
257 port->adapter.algo_data = &port->bit;
258 port->bit.udelay = 10;
259 port->bit.timeout = usecs_to_jiffies(2200);
260 port->bit.data = port;
261 port->bit.setsda = i2c_drive_sda;
262 port->bit.setscl = i2c_drive_scl;
263 port->bit.getsda = i2c_sense_sda;
264 port->bit.getscl = i2c_sense_scl;
265
266 i2c_drive_scl(port, 0);
267 i2c_drive_sda(port, 1);
268 i2c_drive_scl(port, 1);
269
270 ret = i2c_bit_add_bus(&port->adapter);
271 } else {
272 port->adapter.algo = &nouveau_dp_i2c_algo;
273 ret = i2c_add_adapter(&port->adapter);
274 }
275
433 if (ret) { 276 if (ret) {
434 NV_ERROR(dev, "I2C%d: failed register: %d\n", i, ret); 277 NV_ERROR(dev, "I2C%d: failed register: %d\n", i, ret);
435 kfree(port); 278 kfree(port);
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.h b/drivers/gpu/drm/nouveau/nouveau_i2c.h
index 4d2e4e9031be..1d083893a4d7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.h
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.h
@@ -34,6 +34,7 @@
34struct nouveau_i2c_chan { 34struct nouveau_i2c_chan {
35 struct i2c_adapter adapter; 35 struct i2c_adapter adapter;
36 struct drm_device *dev; 36 struct drm_device *dev;
37 struct i2c_algo_bit_data bit;
37 struct list_head head; 38 struct list_head head;
38 u8 index; 39 u8 index;
39 u8 type; 40 u8 type;
diff --git a/drivers/gpu/drm/nouveau/nv10_gpio.c b/drivers/gpu/drm/nouveau/nv10_gpio.c
index 550ad3fcf0af..9d79180069df 100644
--- a/drivers/gpu/drm/nouveau/nv10_gpio.c
+++ b/drivers/gpu/drm/nouveau/nv10_gpio.c
@@ -65,7 +65,7 @@ nv10_gpio_drive(struct drm_device *dev, int line, int dir, int out)
65 if (line < 10) { 65 if (line < 10) {
66 line = (line - 2) * 4; 66 line = (line - 2) * 4;
67 reg = NV_PCRTC_GPIO_EXT; 67 reg = NV_PCRTC_GPIO_EXT;
68 mask = 0x00000003 << ((line - 2) * 4); 68 mask = 0x00000003;
69 data = (dir << 1) | out; 69 data = (dir << 1) | out;
70 } else 70 } else
71 if (line < 14) { 71 if (line < 14) {
diff --git a/drivers/gpu/drm/nouveau/nvc0_fb.c b/drivers/gpu/drm/nouveau/nvc0_fb.c
index 5bf55038fd92..f704e942372e 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fb.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fb.c
@@ -54,6 +54,11 @@ nvc0_mfb_isr(struct drm_device *dev)
54 nvc0_mfb_subp_isr(dev, unit, subp); 54 nvc0_mfb_subp_isr(dev, unit, subp);
55 units &= ~(1 << unit); 55 units &= ~(1 << unit);
56 } 56 }
57
58 /* we do something horribly wrong and upset PMFB a lot, so mask off
59 * interrupts from it after the first one until it's fixed
60 */
61 nv_mask(dev, 0x000640, 0x02000000, 0x00000000);
57} 62}
58 63
59static void 64static void
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index b5ff1f7b6f7e..af1054f8202a 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -575,6 +575,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
575 575
576 if (rdev->family < CHIP_RV770) 576 if (rdev->family < CHIP_RV770)
577 pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; 577 pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
578 /* use frac fb div on APUs */
579 if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev))
580 pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
578 } else { 581 } else {
579 pll->flags |= RADEON_PLL_LEGACY; 582 pll->flags |= RADEON_PLL_LEGACY;
580 583
@@ -955,8 +958,8 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
955 break; 958 break;
956 } 959 }
957 960
958 if (radeon_encoder->active_device & 961 if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
959 (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) { 962 (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)) {
960 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 963 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
961 struct drm_connector *connector = 964 struct drm_connector *connector =
962 radeon_get_connector_for_encoder(encoder); 965 radeon_get_connector_for_encoder(encoder);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index ea7df16e2f84..5992502a3448 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -241,8 +241,8 @@ int radeon_wb_init(struct radeon_device *rdev)
241 rdev->wb.use_event = true; 241 rdev->wb.use_event = true;
242 } 242 }
243 } 243 }
244 /* always use writeback/events on NI */ 244 /* always use writeback/events on NI, APUs */
245 if (ASIC_IS_DCE5(rdev)) { 245 if (rdev->family >= CHIP_PALM) {
246 rdev->wb.enabled = true; 246 rdev->wb.enabled = true;
247 rdev->wb.use_event = true; 247 rdev->wb.use_event = true;
248 } 248 }
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 8086c96e0b06..0a1d4bd65edc 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -533,7 +533,7 @@ static void radeon_crtc_init(struct drm_device *dev, int index)
533 radeon_legacy_init_crtc(dev, radeon_crtc); 533 radeon_legacy_init_crtc(dev, radeon_crtc);
534} 534}
535 535
536static const char *encoder_names[36] = { 536static const char *encoder_names[37] = {
537 "NONE", 537 "NONE",
538 "INTERNAL_LVDS", 538 "INTERNAL_LVDS",
539 "INTERNAL_TMDS1", 539 "INTERNAL_TMDS1",
@@ -570,6 +570,7 @@ static const char *encoder_names[36] = {
570 "INTERNAL_UNIPHY2", 570 "INTERNAL_UNIPHY2",
571 "NUTMEG", 571 "NUTMEG",
572 "TRAVIS", 572 "TRAVIS",
573 "INTERNAL_VCE"
573}; 574};
574 575
575static const char *connector_names[15] = { 576static const char *connector_names[15] = {
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index a3d033252995..ffddcba32af6 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -34,7 +34,7 @@ config HID
34config HID_BATTERY_STRENGTH 34config HID_BATTERY_STRENGTH
35 bool 35 bool
36 depends on HID && POWER_SUPPLY && HID = POWER_SUPPLY 36 depends on HID && POWER_SUPPLY && HID = POWER_SUPPLY
37 default y 37 default n
38 38
39config HIDRAW 39config HIDRAW
40 bool "/dev/hidraw raw HID device support" 40 bool "/dev/hidraw raw HID device support"
diff --git a/drivers/hid/hid-tivo.c b/drivers/hid/hid-tivo.c
index de47039c708c..9f85f827607f 100644
--- a/drivers/hid/hid-tivo.c
+++ b/drivers/hid/hid-tivo.c
@@ -62,7 +62,7 @@ static int tivo_input_mapping(struct hid_device *hdev, struct hid_input *hi,
62 62
63static const struct hid_device_id tivo_devices[] = { 63static const struct hid_device_id tivo_devices[] = {
64 /* TiVo Slide Bluetooth remote, pairs with a Broadcom dongle */ 64 /* TiVo Slide Bluetooth remote, pairs with a Broadcom dongle */
65 { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) }, 65 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) },
66 { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) }, 66 { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) },
67 { } 67 { }
68}; 68};
diff --git a/drivers/hsi/clients/hsi_char.c b/drivers/hsi/clients/hsi_char.c
index 88a050df2389..3ad91f6447d8 100644
--- a/drivers/hsi/clients/hsi_char.c
+++ b/drivers/hsi/clients/hsi_char.c
@@ -123,7 +123,7 @@ struct hsc_client_data {
123static unsigned int hsc_major; 123static unsigned int hsc_major;
124/* Maximum buffer size that hsi_char will accept from userspace */ 124/* Maximum buffer size that hsi_char will accept from userspace */
125static unsigned int max_data_size = 0x1000; 125static unsigned int max_data_size = 0x1000;
126module_param(max_data_size, uint, S_IRUSR | S_IWUSR); 126module_param(max_data_size, uint, 0);
127MODULE_PARM_DESC(max_data_size, "max read/write data size [4,8..65536] (^2)"); 127MODULE_PARM_DESC(max_data_size, "max read/write data size [4,8..65536] (^2)");
128 128
129static void hsc_add_tail(struct hsc_channel *channel, struct hsi_msg *msg, 129static void hsc_add_tail(struct hsc_channel *channel, struct hsi_msg *msg,
diff --git a/drivers/hsi/hsi.c b/drivers/hsi/hsi.c
index 4e2d79b79334..2d58f939d27f 100644
--- a/drivers/hsi/hsi.c
+++ b/drivers/hsi/hsi.c
@@ -21,26 +21,13 @@
21 */ 21 */
22#include <linux/hsi/hsi.h> 22#include <linux/hsi/hsi.h>
23#include <linux/compiler.h> 23#include <linux/compiler.h>
24#include <linux/rwsem.h>
25#include <linux/list.h> 24#include <linux/list.h>
26#include <linux/spinlock.h>
27#include <linux/kobject.h> 25#include <linux/kobject.h>
28#include <linux/slab.h> 26#include <linux/slab.h>
29#include <linux/string.h> 27#include <linux/string.h>
28#include <linux/notifier.h>
30#include "hsi_core.h" 29#include "hsi_core.h"
31 30
32static struct device_type hsi_ctrl = {
33 .name = "hsi_controller",
34};
35
36static struct device_type hsi_cl = {
37 .name = "hsi_client",
38};
39
40static struct device_type hsi_port = {
41 .name = "hsi_port",
42};
43
44static ssize_t modalias_show(struct device *dev, 31static ssize_t modalias_show(struct device *dev,
45 struct device_attribute *a __maybe_unused, char *buf) 32 struct device_attribute *a __maybe_unused, char *buf)
46{ 33{
@@ -54,8 +41,7 @@ static struct device_attribute hsi_bus_dev_attrs[] = {
54 41
55static int hsi_bus_uevent(struct device *dev, struct kobj_uevent_env *env) 42static int hsi_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
56{ 43{
57 if (dev->type == &hsi_cl) 44 add_uevent_var(env, "MODALIAS=hsi:%s", dev_name(dev));
58 add_uevent_var(env, "MODALIAS=hsi:%s", dev_name(dev));
59 45
60 return 0; 46 return 0;
61} 47}
@@ -80,12 +66,10 @@ static void hsi_client_release(struct device *dev)
80static void hsi_new_client(struct hsi_port *port, struct hsi_board_info *info) 66static void hsi_new_client(struct hsi_port *port, struct hsi_board_info *info)
81{ 67{
82 struct hsi_client *cl; 68 struct hsi_client *cl;
83 unsigned long flags;
84 69
85 cl = kzalloc(sizeof(*cl), GFP_KERNEL); 70 cl = kzalloc(sizeof(*cl), GFP_KERNEL);
86 if (!cl) 71 if (!cl)
87 return; 72 return;
88 cl->device.type = &hsi_cl;
89 cl->tx_cfg = info->tx_cfg; 73 cl->tx_cfg = info->tx_cfg;
90 cl->rx_cfg = info->rx_cfg; 74 cl->rx_cfg = info->rx_cfg;
91 cl->device.bus = &hsi_bus_type; 75 cl->device.bus = &hsi_bus_type;
@@ -93,14 +77,11 @@ static void hsi_new_client(struct hsi_port *port, struct hsi_board_info *info)
93 cl->device.release = hsi_client_release; 77 cl->device.release = hsi_client_release;
94 dev_set_name(&cl->device, info->name); 78 dev_set_name(&cl->device, info->name);
95 cl->device.platform_data = info->platform_data; 79 cl->device.platform_data = info->platform_data;
96 spin_lock_irqsave(&port->clock, flags);
97 list_add_tail(&cl->link, &port->clients);
98 spin_unlock_irqrestore(&port->clock, flags);
99 if (info->archdata) 80 if (info->archdata)
100 cl->device.archdata = *info->archdata; 81 cl->device.archdata = *info->archdata;
101 if (device_register(&cl->device) < 0) { 82 if (device_register(&cl->device) < 0) {
102 pr_err("hsi: failed to register client: %s\n", info->name); 83 pr_err("hsi: failed to register client: %s\n", info->name);
103 kfree(cl); 84 put_device(&cl->device);
104 } 85 }
105} 86}
106 87
@@ -120,13 +101,6 @@ static void hsi_scan_board_info(struct hsi_controller *hsi)
120 101
121static int hsi_remove_client(struct device *dev, void *data __maybe_unused) 102static int hsi_remove_client(struct device *dev, void *data __maybe_unused)
122{ 103{
123 struct hsi_client *cl = to_hsi_client(dev);
124 struct hsi_port *port = to_hsi_port(dev->parent);
125 unsigned long flags;
126
127 spin_lock_irqsave(&port->clock, flags);
128 list_del(&cl->link);
129 spin_unlock_irqrestore(&port->clock, flags);
130 device_unregister(dev); 104 device_unregister(dev);
131 105
132 return 0; 106 return 0;
@@ -140,12 +114,17 @@ static int hsi_remove_port(struct device *dev, void *data __maybe_unused)
140 return 0; 114 return 0;
141} 115}
142 116
143static void hsi_controller_release(struct device *dev __maybe_unused) 117static void hsi_controller_release(struct device *dev)
144{ 118{
119 struct hsi_controller *hsi = to_hsi_controller(dev);
120
121 kfree(hsi->port);
122 kfree(hsi);
145} 123}
146 124
147static void hsi_port_release(struct device *dev __maybe_unused) 125static void hsi_port_release(struct device *dev)
148{ 126{
127 kfree(to_hsi_port(dev));
149} 128}
150 129
151/** 130/**
@@ -170,20 +149,12 @@ int hsi_register_controller(struct hsi_controller *hsi)
170 unsigned int i; 149 unsigned int i;
171 int err; 150 int err;
172 151
173 hsi->device.type = &hsi_ctrl; 152 err = device_add(&hsi->device);
174 hsi->device.bus = &hsi_bus_type;
175 hsi->device.release = hsi_controller_release;
176 err = device_register(&hsi->device);
177 if (err < 0) 153 if (err < 0)
178 return err; 154 return err;
179 for (i = 0; i < hsi->num_ports; i++) { 155 for (i = 0; i < hsi->num_ports; i++) {
180 hsi->port[i].device.parent = &hsi->device; 156 hsi->port[i]->device.parent = &hsi->device;
181 hsi->port[i].device.bus = &hsi_bus_type; 157 err = device_add(&hsi->port[i]->device);
182 hsi->port[i].device.release = hsi_port_release;
183 hsi->port[i].device.type = &hsi_port;
184 INIT_LIST_HEAD(&hsi->port[i].clients);
185 spin_lock_init(&hsi->port[i].clock);
186 err = device_register(&hsi->port[i].device);
187 if (err < 0) 158 if (err < 0)
188 goto out; 159 goto out;
189 } 160 }
@@ -192,7 +163,9 @@ int hsi_register_controller(struct hsi_controller *hsi)
192 163
193 return 0; 164 return 0;
194out: 165out:
195 hsi_unregister_controller(hsi); 166 while (i-- > 0)
167 device_del(&hsi->port[i]->device);
168 device_del(&hsi->device);
196 169
197 return err; 170 return err;
198} 171}
@@ -223,6 +196,29 @@ static inline int hsi_dummy_cl(struct hsi_client *cl __maybe_unused)
223} 196}
224 197
225/** 198/**
199 * hsi_put_controller - Free an HSI controller
200 *
201 * @hsi: Pointer to the HSI controller to freed
202 *
203 * HSI controller drivers should only use this function if they need
204 * to free their allocated hsi_controller structures before a successful
205 * call to hsi_register_controller. Other use is not allowed.
206 */
207void hsi_put_controller(struct hsi_controller *hsi)
208{
209 unsigned int i;
210
211 if (!hsi)
212 return;
213
214 for (i = 0; i < hsi->num_ports; i++)
215 if (hsi->port && hsi->port[i])
216 put_device(&hsi->port[i]->device);
217 put_device(&hsi->device);
218}
219EXPORT_SYMBOL_GPL(hsi_put_controller);
220
221/**
226 * hsi_alloc_controller - Allocate an HSI controller and its ports 222 * hsi_alloc_controller - Allocate an HSI controller and its ports
227 * @n_ports: Number of ports on the HSI controller 223 * @n_ports: Number of ports on the HSI controller
228 * @flags: Kernel allocation flags 224 * @flags: Kernel allocation flags
@@ -232,55 +228,52 @@ static inline int hsi_dummy_cl(struct hsi_client *cl __maybe_unused)
232struct hsi_controller *hsi_alloc_controller(unsigned int n_ports, gfp_t flags) 228struct hsi_controller *hsi_alloc_controller(unsigned int n_ports, gfp_t flags)
233{ 229{
234 struct hsi_controller *hsi; 230 struct hsi_controller *hsi;
235 struct hsi_port *port; 231 struct hsi_port **port;
236 unsigned int i; 232 unsigned int i;
237 233
238 if (!n_ports) 234 if (!n_ports)
239 return NULL; 235 return NULL;
240 236
241 port = kzalloc(sizeof(*port)*n_ports, flags);
242 if (!port)
243 return NULL;
244 hsi = kzalloc(sizeof(*hsi), flags); 237 hsi = kzalloc(sizeof(*hsi), flags);
245 if (!hsi) 238 if (!hsi)
246 goto out; 239 return NULL;
247 for (i = 0; i < n_ports; i++) { 240 port = kzalloc(sizeof(*port)*n_ports, flags);
248 dev_set_name(&port[i].device, "port%d", i); 241 if (!port) {
249 port[i].num = i; 242 kfree(hsi);
250 port[i].async = hsi_dummy_msg; 243 return NULL;
251 port[i].setup = hsi_dummy_cl;
252 port[i].flush = hsi_dummy_cl;
253 port[i].start_tx = hsi_dummy_cl;
254 port[i].stop_tx = hsi_dummy_cl;
255 port[i].release = hsi_dummy_cl;
256 mutex_init(&port[i].lock);
257 } 244 }
258 hsi->num_ports = n_ports; 245 hsi->num_ports = n_ports;
259 hsi->port = port; 246 hsi->port = port;
247 hsi->device.release = hsi_controller_release;
248 device_initialize(&hsi->device);
249
250 for (i = 0; i < n_ports; i++) {
251 port[i] = kzalloc(sizeof(**port), flags);
252 if (port[i] == NULL)
253 goto out;
254 port[i]->num = i;
255 port[i]->async = hsi_dummy_msg;
256 port[i]->setup = hsi_dummy_cl;
257 port[i]->flush = hsi_dummy_cl;
258 port[i]->start_tx = hsi_dummy_cl;
259 port[i]->stop_tx = hsi_dummy_cl;
260 port[i]->release = hsi_dummy_cl;
261 mutex_init(&port[i]->lock);
262 ATOMIC_INIT_NOTIFIER_HEAD(&port[i]->n_head);
263 dev_set_name(&port[i]->device, "port%d", i);
264 hsi->port[i]->device.release = hsi_port_release;
265 device_initialize(&hsi->port[i]->device);
266 }
260 267
261 return hsi; 268 return hsi;
262out: 269out:
263 kfree(port); 270 hsi_put_controller(hsi);
264 271
265 return NULL; 272 return NULL;
266} 273}
267EXPORT_SYMBOL_GPL(hsi_alloc_controller); 274EXPORT_SYMBOL_GPL(hsi_alloc_controller);
268 275
269/** 276/**
270 * hsi_free_controller - Free an HSI controller
271 * @hsi: Pointer to HSI controller
272 */
273void hsi_free_controller(struct hsi_controller *hsi)
274{
275 if (!hsi)
276 return;
277
278 kfree(hsi->port);
279 kfree(hsi);
280}
281EXPORT_SYMBOL_GPL(hsi_free_controller);
282
283/**
284 * hsi_free_msg - Free an HSI message 277 * hsi_free_msg - Free an HSI message
285 * @msg: Pointer to the HSI message 278 * @msg: Pointer to the HSI message
286 * 279 *
@@ -414,37 +407,67 @@ void hsi_release_port(struct hsi_client *cl)
414} 407}
415EXPORT_SYMBOL_GPL(hsi_release_port); 408EXPORT_SYMBOL_GPL(hsi_release_port);
416 409
417static int hsi_start_rx(struct hsi_client *cl, void *data __maybe_unused) 410static int hsi_event_notifier_call(struct notifier_block *nb,
411 unsigned long event, void *data __maybe_unused)
418{ 412{
419 if (cl->hsi_start_rx) 413 struct hsi_client *cl = container_of(nb, struct hsi_client, nb);
420 (*cl->hsi_start_rx)(cl); 414
415 (*cl->ehandler)(cl, event);
421 416
422 return 0; 417 return 0;
423} 418}
424 419
425static int hsi_stop_rx(struct hsi_client *cl, void *data __maybe_unused) 420/**
421 * hsi_register_port_event - Register a client to receive port events
422 * @cl: HSI client that wants to receive port events
423 * @cb: Event handler callback
424 *
425 * Clients should register a callback to be able to receive
426 * events from the ports. Registration should happen after
427 * claiming the port.
428 * The handler can be called in interrupt context.
429 *
430 * Returns -errno on error, or 0 on success.
431 */
432int hsi_register_port_event(struct hsi_client *cl,
433 void (*handler)(struct hsi_client *, unsigned long))
426{ 434{
427 if (cl->hsi_stop_rx) 435 struct hsi_port *port = hsi_get_port(cl);
428 (*cl->hsi_stop_rx)(cl);
429 436
430 return 0; 437 if (!handler || cl->ehandler)
438 return -EINVAL;
439 if (!hsi_port_claimed(cl))
440 return -EACCES;
441 cl->ehandler = handler;
442 cl->nb.notifier_call = hsi_event_notifier_call;
443
444 return atomic_notifier_chain_register(&port->n_head, &cl->nb);
431} 445}
446EXPORT_SYMBOL_GPL(hsi_register_port_event);
432 447
433static int hsi_port_for_each_client(struct hsi_port *port, void *data, 448/**
434 int (*fn)(struct hsi_client *cl, void *data)) 449 * hsi_unregister_port_event - Stop receiving port events for a client
450 * @cl: HSI client that wants to stop receiving port events
451 *
452 * Clients should call this function before releasing their associated
453 * port.
454 *
455 * Returns -errno on error, or 0 on success.
456 */
457int hsi_unregister_port_event(struct hsi_client *cl)
435{ 458{
436 struct hsi_client *cl; 459 struct hsi_port *port = hsi_get_port(cl);
460 int err;
437 461
438 spin_lock(&port->clock); 462 WARN_ON(!hsi_port_claimed(cl));
439 list_for_each_entry(cl, &port->clients, link) {
440 spin_unlock(&port->clock);
441 (*fn)(cl, data);
442 spin_lock(&port->clock);
443 }
444 spin_unlock(&port->clock);
445 463
446 return 0; 464 err = atomic_notifier_chain_unregister(&port->n_head, &cl->nb);
465 if (!err)
466 cl->ehandler = NULL;
467
468 return err;
447} 469}
470EXPORT_SYMBOL_GPL(hsi_unregister_port_event);
448 471
449/** 472/**
450 * hsi_event -Notifies clients about port events 473 * hsi_event -Notifies clients about port events
@@ -458,22 +481,12 @@ static int hsi_port_for_each_client(struct hsi_port *port, void *data,
458 * Events: 481 * Events:
459 * HSI_EVENT_START_RX - Incoming wake line high 482 * HSI_EVENT_START_RX - Incoming wake line high
460 * HSI_EVENT_STOP_RX - Incoming wake line down 483 * HSI_EVENT_STOP_RX - Incoming wake line down
484 *
485 * Returns -errno on error, or 0 on success.
461 */ 486 */
462void hsi_event(struct hsi_port *port, unsigned int event) 487int hsi_event(struct hsi_port *port, unsigned long event)
463{ 488{
464 int (*fn)(struct hsi_client *cl, void *data); 489 return atomic_notifier_call_chain(&port->n_head, event, NULL);
465
466 switch (event) {
467 case HSI_EVENT_START_RX:
468 fn = hsi_start_rx;
469 break;
470 case HSI_EVENT_STOP_RX:
471 fn = hsi_stop_rx;
472 break;
473 default:
474 return;
475 }
476 hsi_port_for_each_client(port, NULL, fn);
477} 490}
478EXPORT_SYMBOL_GPL(hsi_event); 491EXPORT_SYMBOL_GPL(hsi_event);
479 492
diff --git a/drivers/hwmon/ad7314.c b/drivers/hwmon/ad7314.c
index ce43642ef03e..f85ce70d9677 100644
--- a/drivers/hwmon/ad7314.c
+++ b/drivers/hwmon/ad7314.c
@@ -47,7 +47,7 @@ struct ad7314_data {
47 u16 rx ____cacheline_aligned; 47 u16 rx ____cacheline_aligned;
48}; 48};
49 49
50static int ad7314_spi_read(struct ad7314_data *chip, s16 *data) 50static int ad7314_spi_read(struct ad7314_data *chip)
51{ 51{
52 int ret; 52 int ret;
53 53
@@ -57,9 +57,7 @@ static int ad7314_spi_read(struct ad7314_data *chip, s16 *data)
57 return ret; 57 return ret;
58 } 58 }
59 59
60 *data = be16_to_cpu(chip->rx); 60 return be16_to_cpu(chip->rx);
61
62 return ret;
63} 61}
64 62
65static ssize_t ad7314_show_temperature(struct device *dev, 63static ssize_t ad7314_show_temperature(struct device *dev,
@@ -70,12 +68,12 @@ static ssize_t ad7314_show_temperature(struct device *dev,
70 s16 data; 68 s16 data;
71 int ret; 69 int ret;
72 70
73 ret = ad7314_spi_read(chip, &data); 71 ret = ad7314_spi_read(chip);
74 if (ret < 0) 72 if (ret < 0)
75 return ret; 73 return ret;
76 switch (spi_get_device_id(chip->spi_dev)->driver_data) { 74 switch (spi_get_device_id(chip->spi_dev)->driver_data) {
77 case ad7314: 75 case ad7314:
78 data = (data & AD7314_TEMP_MASK) >> AD7314_TEMP_OFFSET; 76 data = (ret & AD7314_TEMP_MASK) >> AD7314_TEMP_OFFSET;
79 data = (data << 6) >> 6; 77 data = (data << 6) >> 6;
80 78
81 return sprintf(buf, "%d\n", 250 * data); 79 return sprintf(buf, "%d\n", 250 * data);
@@ -86,7 +84,7 @@ static ssize_t ad7314_show_temperature(struct device *dev,
86 * with a sign bit - which is a 14 bit 2's complement 84 * with a sign bit - which is a 14 bit 2's complement
87 * register. 1lsb - 31.25 milli degrees centigrade 85 * register. 1lsb - 31.25 milli degrees centigrade
88 */ 86 */
89 data &= ADT7301_TEMP_MASK; 87 data = ret & ADT7301_TEMP_MASK;
90 data = (data << 2) >> 2; 88 data = (data << 2) >> 2;
91 89
92 return sprintf(buf, "%d\n", 90 return sprintf(buf, "%d\n",
diff --git a/drivers/hwmon/ads1015.c b/drivers/hwmon/ads1015.c
index 7765e4f74ec5..1958f03efd7a 100644
--- a/drivers/hwmon/ads1015.c
+++ b/drivers/hwmon/ads1015.c
@@ -59,14 +59,11 @@ struct ads1015_data {
59 struct ads1015_channel_data channel_data[ADS1015_CHANNELS]; 59 struct ads1015_channel_data channel_data[ADS1015_CHANNELS];
60}; 60};
61 61
62static int ads1015_read_value(struct i2c_client *client, unsigned int channel, 62static int ads1015_read_adc(struct i2c_client *client, unsigned int channel)
63 int *value)
64{ 63{
65 u16 config; 64 u16 config;
66 s16 conversion;
67 struct ads1015_data *data = i2c_get_clientdata(client); 65 struct ads1015_data *data = i2c_get_clientdata(client);
68 unsigned int pga = data->channel_data[channel].pga; 66 unsigned int pga = data->channel_data[channel].pga;
69 int fullscale;
70 unsigned int data_rate = data->channel_data[channel].data_rate; 67 unsigned int data_rate = data->channel_data[channel].data_rate;
71 unsigned int conversion_time_ms; 68 unsigned int conversion_time_ms;
72 int res; 69 int res;
@@ -78,7 +75,6 @@ static int ads1015_read_value(struct i2c_client *client, unsigned int channel,
78 if (res < 0) 75 if (res < 0)
79 goto err_unlock; 76 goto err_unlock;
80 config = res; 77 config = res;
81 fullscale = fullscale_table[pga];
82 conversion_time_ms = DIV_ROUND_UP(1000, data_rate_table[data_rate]); 78 conversion_time_ms = DIV_ROUND_UP(1000, data_rate_table[data_rate]);
83 79
84 /* setup and start single conversion */ 80 /* setup and start single conversion */
@@ -105,33 +101,36 @@ static int ads1015_read_value(struct i2c_client *client, unsigned int channel,
105 } 101 }
106 102
107 res = i2c_smbus_read_word_swapped(client, ADS1015_CONVERSION); 103 res = i2c_smbus_read_word_swapped(client, ADS1015_CONVERSION);
108 if (res < 0)
109 goto err_unlock;
110 conversion = res;
111
112 mutex_unlock(&data->update_lock);
113
114 *value = DIV_ROUND_CLOSEST(conversion * fullscale, 0x7ff0);
115
116 return 0;
117 104
118err_unlock: 105err_unlock:
119 mutex_unlock(&data->update_lock); 106 mutex_unlock(&data->update_lock);
120 return res; 107 return res;
121} 108}
122 109
110static int ads1015_reg_to_mv(struct i2c_client *client, unsigned int channel,
111 s16 reg)
112{
113 struct ads1015_data *data = i2c_get_clientdata(client);
114 unsigned int pga = data->channel_data[channel].pga;
115 int fullscale = fullscale_table[pga];
116
117 return DIV_ROUND_CLOSEST(reg * fullscale, 0x7ff0);
118}
119
123/* sysfs callback function */ 120/* sysfs callback function */
124static ssize_t show_in(struct device *dev, struct device_attribute *da, 121static ssize_t show_in(struct device *dev, struct device_attribute *da,
125 char *buf) 122 char *buf)
126{ 123{
127 struct sensor_device_attribute *attr = to_sensor_dev_attr(da); 124 struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
128 struct i2c_client *client = to_i2c_client(dev); 125 struct i2c_client *client = to_i2c_client(dev);
129 int in;
130 int res; 126 int res;
127 int index = attr->index;
131 128
132 res = ads1015_read_value(client, attr->index, &in); 129 res = ads1015_read_adc(client, index);
130 if (res < 0)
131 return res;
133 132
134 return (res < 0) ? res : sprintf(buf, "%d\n", in); 133 return sprintf(buf, "%d\n", ads1015_reg_to_mv(client, index, res));
135} 134}
136 135
137static const struct sensor_device_attribute ads1015_in[] = { 136static const struct sensor_device_attribute ads1015_in[] = {
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 0d3141fbbc20..b9d512331ed4 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -52,7 +52,7 @@ module_param_named(tjmax, force_tjmax, int, 0444);
52MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius"); 52MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
53 53
54#define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */ 54#define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */
55#define NUM_REAL_CORES 16 /* Number of Real cores per cpu */ 55#define NUM_REAL_CORES 32 /* Number of Real cores per cpu */
56#define CORETEMP_NAME_LENGTH 17 /* String Length of attrs */ 56#define CORETEMP_NAME_LENGTH 17 /* String Length of attrs */
57#define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */ 57#define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */
58#define TOTAL_ATTRS (MAX_CORE_ATTRS + 1) 58#define TOTAL_ATTRS (MAX_CORE_ATTRS + 1)
@@ -709,6 +709,10 @@ static void __cpuinit put_core_offline(unsigned int cpu)
709 709
710 indx = TO_ATTR_NO(cpu); 710 indx = TO_ATTR_NO(cpu);
711 711
712 /* The core id is too big, just return */
713 if (indx > MAX_CORE_DATA - 1)
714 return;
715
712 if (pdata->core_data[indx] && pdata->core_data[indx]->cpu == cpu) 716 if (pdata->core_data[indx] && pdata->core_data[indx]->cpu == cpu)
713 coretemp_remove_core(pdata, &pdev->dev, indx); 717 coretemp_remove_core(pdata, &pdev->dev, indx);
714 718
diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c
index b7494af1e4a9..e8e18cab1fb8 100644
--- a/drivers/hwmon/fam15h_power.c
+++ b/drivers/hwmon/fam15h_power.c
@@ -122,6 +122,41 @@ static bool __devinit fam15h_power_is_internal_node0(struct pci_dev *f4)
122 return true; 122 return true;
123} 123}
124 124
125/*
126 * Newer BKDG versions have an updated recommendation on how to properly
127 * initialize the running average range (was: 0xE, now: 0x9). This avoids
128 * counter saturations resulting in bogus power readings.
129 * We correct this value ourselves to cope with older BIOSes.
130 */
131static DEFINE_PCI_DEVICE_TABLE(affected_device) = {
132 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
133 { 0 }
134};
135
136static void __devinit tweak_runavg_range(struct pci_dev *pdev)
137{
138 u32 val;
139
140 /*
141 * let this quirk apply only to the current version of the
142 * northbridge, since future versions may change the behavior
143 */
144 if (!pci_match_id(affected_device, pdev))
145 return;
146
147 pci_bus_read_config_dword(pdev->bus,
148 PCI_DEVFN(PCI_SLOT(pdev->devfn), 5),
149 REG_TDP_RUNNING_AVERAGE, &val);
150 if ((val & 0xf) != 0xe)
151 return;
152
153 val &= ~0xf;
154 val |= 0x9;
155 pci_bus_write_config_dword(pdev->bus,
156 PCI_DEVFN(PCI_SLOT(pdev->devfn), 5),
157 REG_TDP_RUNNING_AVERAGE, val);
158}
159
125static void __devinit fam15h_power_init_data(struct pci_dev *f4, 160static void __devinit fam15h_power_init_data(struct pci_dev *f4,
126 struct fam15h_power_data *data) 161 struct fam15h_power_data *data)
127{ 162{
@@ -155,6 +190,13 @@ static int __devinit fam15h_power_probe(struct pci_dev *pdev,
155 struct device *dev; 190 struct device *dev;
156 int err; 191 int err;
157 192
193 /*
194 * though we ignore every other northbridge, we still have to
195 * do the tweaking on _each_ node in MCM processors as the counters
196 * are working hand-in-hand
197 */
198 tweak_runavg_range(pdev);
199
158 if (!fam15h_power_is_internal_node0(pdev)) { 200 if (!fam15h_power_is_internal_node0(pdev)) {
159 err = -ENODEV; 201 err = -ENODEV;
160 goto exit; 202 goto exit;
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
index f086131cb1c7..c811289b61e2 100644
--- a/drivers/i2c/busses/i2c-eg20t.c
+++ b/drivers/i2c/busses/i2c-eg20t.c
@@ -324,7 +324,7 @@ static s32 pch_i2c_wait_for_xfer_complete(struct i2c_algo_pch_data *adap)
324{ 324{
325 long ret; 325 long ret;
326 ret = wait_event_timeout(pch_event, 326 ret = wait_event_timeout(pch_event,
327 (adap->pch_event_flag != 0), msecs_to_jiffies(50)); 327 (adap->pch_event_flag != 0), msecs_to_jiffies(1000));
328 328
329 if (ret == 0) { 329 if (ret == 0) {
330 pch_err(adap, "timeout: %x\n", adap->pch_event_flag); 330 pch_err(adap, "timeout: %x\n", adap->pch_event_flag);
@@ -1063,6 +1063,6 @@ module_exit(pch_pci_exit);
1063 1063
1064MODULE_DESCRIPTION("Intel EG20T PCH/LAPIS Semico ML7213/ML7223/ML7831 IOH I2C"); 1064MODULE_DESCRIPTION("Intel EG20T PCH/LAPIS Semico ML7213/ML7223/ML7831 IOH I2C");
1065MODULE_LICENSE("GPL"); 1065MODULE_LICENSE("GPL");
1066MODULE_AUTHOR("Tomoya MORINAGA. <tomoya-linux@dsn.lapis-semi.com>"); 1066MODULE_AUTHOR("Tomoya MORINAGA. <tomoya.rohm@gmail.com>");
1067module_param(pch_i2c_speed, int, (S_IRUSR | S_IWUSR)); 1067module_param(pch_i2c_speed, int, (S_IRUSR | S_IWUSR));
1068module_param(pch_clk, int, (S_IRUSR | S_IWUSR)); 1068module_param(pch_clk, int, (S_IRUSR | S_IWUSR));
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
index 3d471d56bf15..76b8af44f634 100644
--- a/drivers/i2c/busses/i2c-mxs.c
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -227,6 +227,7 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
227 return -EINVAL; 227 return -EINVAL;
228 228
229 init_completion(&i2c->cmd_complete); 229 init_completion(&i2c->cmd_complete);
230 i2c->cmd_err = 0;
230 231
231 flags = stop ? MXS_I2C_CTRL0_POST_SEND_STOP : 0; 232 flags = stop ? MXS_I2C_CTRL0_POST_SEND_STOP : 0;
232 233
@@ -252,6 +253,9 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
252 253
253 if (i2c->cmd_err == -ENXIO) 254 if (i2c->cmd_err == -ENXIO)
254 mxs_i2c_reset(i2c); 255 mxs_i2c_reset(i2c);
256 else
257 writel(MXS_I2C_QUEUECTRL_QUEUE_RUN,
258 i2c->regs + MXS_I2C_QUEUECTRL_CLR);
255 259
256 dev_dbg(i2c->dev, "Done with err=%d\n", i2c->cmd_err); 260 dev_dbg(i2c->dev, "Done with err=%d\n", i2c->cmd_err);
257 261
@@ -299,8 +303,6 @@ static irqreturn_t mxs_i2c_isr(int this_irq, void *dev_id)
299 MXS_I2C_CTRL1_SLAVE_STOP_IRQ | MXS_I2C_CTRL1_SLAVE_IRQ)) 303 MXS_I2C_CTRL1_SLAVE_STOP_IRQ | MXS_I2C_CTRL1_SLAVE_IRQ))
300 /* MXS_I2C_CTRL1_OVERSIZE_XFER_TERM_IRQ is only for slaves */ 304 /* MXS_I2C_CTRL1_OVERSIZE_XFER_TERM_IRQ is only for slaves */
301 i2c->cmd_err = -EIO; 305 i2c->cmd_err = -EIO;
302 else
303 i2c->cmd_err = 0;
304 306
305 is_last_cmd = (readl(i2c->regs + MXS_I2C_QUEUESTAT) & 307 is_last_cmd = (readl(i2c->regs + MXS_I2C_QUEUESTAT) &
306 MXS_I2C_QUEUESTAT_WRITE_QUEUE_CNT_MASK) == 0; 308 MXS_I2C_QUEUESTAT_WRITE_QUEUE_CNT_MASK) == 0;
@@ -384,8 +386,6 @@ static int __devexit mxs_i2c_remove(struct platform_device *pdev)
384 if (ret) 386 if (ret)
385 return -EBUSY; 387 return -EBUSY;
386 388
387 writel(MXS_I2C_QUEUECTRL_QUEUE_RUN,
388 i2c->regs + MXS_I2C_QUEUECTRL_CLR);
389 writel(MXS_I2C_CTRL0_SFTRST, i2c->regs + MXS_I2C_CTRL0_SET); 389 writel(MXS_I2C_CTRL0_SFTRST, i2c->regs + MXS_I2C_CTRL0_SET);
390 390
391 platform_set_drvdata(pdev, NULL); 391 platform_set_drvdata(pdev, NULL);
diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c
index 04be9f82e14b..eb8ad538c79f 100644
--- a/drivers/i2c/busses/i2c-pnx.c
+++ b/drivers/i2c/busses/i2c-pnx.c
@@ -546,8 +546,7 @@ static int i2c_pnx_controller_suspend(struct platform_device *pdev,
546{ 546{
547 struct i2c_pnx_algo_data *alg_data = platform_get_drvdata(pdev); 547 struct i2c_pnx_algo_data *alg_data = platform_get_drvdata(pdev);
548 548
549 /* FIXME: shouldn't this be clk_disable? */ 549 clk_disable(alg_data->clk);
550 clk_enable(alg_data->clk);
551 550
552 return 0; 551 return 0;
553} 552}
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index e978635e60f0..55e5ea62ccee 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -516,6 +516,14 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
516 if (likely(i2c_dev->msg_err == I2C_ERR_NONE)) 516 if (likely(i2c_dev->msg_err == I2C_ERR_NONE))
517 return 0; 517 return 0;
518 518
519 /*
520 * NACK interrupt is generated before the I2C controller generates the
521 * STOP condition on the bus. So wait for 2 clock periods before resetting
522 * the controller so that STOP condition has been delivered properly.
523 */
524 if (i2c_dev->msg_err == I2C_ERR_NO_ACK)
525 udelay(DIV_ROUND_UP(2 * 1000000, i2c_dev->bus_clk_rate));
526
519 tegra_i2c_init(i2c_dev); 527 tegra_i2c_init(i2c_dev);
520 if (i2c_dev->msg_err == I2C_ERR_NO_ACK) { 528 if (i2c_dev->msg_err == I2C_ERR_NO_ACK) {
521 if (msg->flags & I2C_M_IGNORE_NAK) 529 if (msg->flags & I2C_M_IGNORE_NAK)
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 426bb7617ec6..b0d0bc8a6fb6 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -1854,6 +1854,8 @@ static bool generate_unmatched_resp(struct ib_mad_private *recv,
1854 response->mad.mad.mad_hdr.method = IB_MGMT_METHOD_GET_RESP; 1854 response->mad.mad.mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
1855 response->mad.mad.mad_hdr.status = 1855 response->mad.mad.mad_hdr.status =
1856 cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB); 1856 cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB);
1857 if (recv->mad.mad.mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
1858 response->mad.mad.mad_hdr.status |= IB_SMP_DIRECTION;
1857 1859
1858 return true; 1860 return true;
1859 } else { 1861 } else {
@@ -1869,6 +1871,7 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
1869 struct ib_mad_list_head *mad_list; 1871 struct ib_mad_list_head *mad_list;
1870 struct ib_mad_agent_private *mad_agent; 1872 struct ib_mad_agent_private *mad_agent;
1871 int port_num; 1873 int port_num;
1874 int ret = IB_MAD_RESULT_SUCCESS;
1872 1875
1873 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id; 1876 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
1874 qp_info = mad_list->mad_queue->qp_info; 1877 qp_info = mad_list->mad_queue->qp_info;
@@ -1952,8 +1955,6 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
1952local: 1955local:
1953 /* Give driver "right of first refusal" on incoming MAD */ 1956 /* Give driver "right of first refusal" on incoming MAD */
1954 if (port_priv->device->process_mad) { 1957 if (port_priv->device->process_mad) {
1955 int ret;
1956
1957 ret = port_priv->device->process_mad(port_priv->device, 0, 1958 ret = port_priv->device->process_mad(port_priv->device, 0,
1958 port_priv->port_num, 1959 port_priv->port_num,
1959 wc, &recv->grh, 1960 wc, &recv->grh,
@@ -1981,7 +1982,8 @@ local:
1981 * or via recv_handler in ib_mad_complete_recv() 1982 * or via recv_handler in ib_mad_complete_recv()
1982 */ 1983 */
1983 recv = NULL; 1984 recv = NULL;
1984 } else if (generate_unmatched_resp(recv, response)) { 1985 } else if ((ret & IB_MAD_RESULT_SUCCESS) &&
1986 generate_unmatched_resp(recv, response)) {
1985 agent_send_response(&response->mad.mad, &recv->grh, wc, 1987 agent_send_response(&response->mad.mad, &recv->grh, wc,
1986 port_priv->device, port_num, qp_info->qp->qp_num); 1988 port_priv->device, port_num, qp_info->qp->qp_num);
1987 } 1989 }
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 669673e81439..b948b6dd5d55 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -247,7 +247,7 @@ static int ib_link_query_port(struct ib_device *ibdev, u8 port,
247 err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, 247 err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port,
248 NULL, NULL, in_mad, out_mad); 248 NULL, NULL, in_mad, out_mad);
249 if (err) 249 if (err)
250 return err; 250 goto out;
251 251
252 /* Checking LinkSpeedActive for FDR-10 */ 252 /* Checking LinkSpeedActive for FDR-10 */
253 if (out_mad->data[15] & 0x1) 253 if (out_mad->data[15] & 0x1)
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 2d787796bf50..7faf4a7fcaa9 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -380,8 +380,7 @@ config INPUT_TWL4030_VIBRA
380 380
381config INPUT_TWL6040_VIBRA 381config INPUT_TWL6040_VIBRA
382 tristate "Support for TWL6040 Vibrator" 382 tristate "Support for TWL6040 Vibrator"
383 depends on TWL4030_CORE 383 depends on TWL6040_CORE
384 select TWL6040_CORE
385 select INPUT_FF_MEMLESS 384 select INPUT_FF_MEMLESS
386 help 385 help
387 This option enables support for TWL6040 Vibrator Driver. 386 This option enables support for TWL6040 Vibrator Driver.
diff --git a/drivers/input/misc/twl6040-vibra.c b/drivers/input/misc/twl6040-vibra.c
index 45874fed523a..14e94f56cb7d 100644
--- a/drivers/input/misc/twl6040-vibra.c
+++ b/drivers/input/misc/twl6040-vibra.c
@@ -28,7 +28,7 @@
28#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/platform_device.h> 29#include <linux/platform_device.h>
30#include <linux/workqueue.h> 30#include <linux/workqueue.h>
31#include <linux/i2c/twl.h> 31#include <linux/input.h>
32#include <linux/mfd/twl6040.h> 32#include <linux/mfd/twl6040.h>
33#include <linux/slab.h> 33#include <linux/slab.h>
34#include <linux/delay.h> 34#include <linux/delay.h>
@@ -257,7 +257,7 @@ static SIMPLE_DEV_PM_OPS(twl6040_vibra_pm_ops, twl6040_vibra_suspend, NULL);
257 257
258static int __devinit twl6040_vibra_probe(struct platform_device *pdev) 258static int __devinit twl6040_vibra_probe(struct platform_device *pdev)
259{ 259{
260 struct twl4030_vibra_data *pdata = pdev->dev.platform_data; 260 struct twl6040_vibra_data *pdata = pdev->dev.platform_data;
261 struct vibra_info *info; 261 struct vibra_info *info;
262 int ret; 262 int ret;
263 263
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 8081a0a5d602..a4b14a41cbf4 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -274,7 +274,8 @@ static int synaptics_set_advanced_gesture_mode(struct psmouse *psmouse)
274 static unsigned char param = 0xc8; 274 static unsigned char param = 0xc8;
275 struct synaptics_data *priv = psmouse->private; 275 struct synaptics_data *priv = psmouse->private;
276 276
277 if (!SYN_CAP_ADV_GESTURE(priv->ext_cap_0c)) 277 if (!(SYN_CAP_ADV_GESTURE(priv->ext_cap_0c) ||
278 SYN_CAP_IMAGE_SENSOR(priv->ext_cap_0c)))
278 return 0; 279 return 0;
279 280
280 if (psmouse_sliced_command(psmouse, SYN_QUE_MODEL)) 281 if (psmouse_sliced_command(psmouse, SYN_QUE_MODEL))
diff --git a/drivers/leds/leds-atmel-pwm.c b/drivers/leds/leds-atmel-pwm.c
index 800243b6037e..64ad702a2ecc 100644
--- a/drivers/leds/leds-atmel-pwm.c
+++ b/drivers/leds/leds-atmel-pwm.c
@@ -35,7 +35,7 @@ static void pwmled_brightness(struct led_classdev *cdev, enum led_brightness b)
35 * NOTE: we reuse the platform_data structure of GPIO leds, 35 * NOTE: we reuse the platform_data structure of GPIO leds,
36 * but repurpose its "gpio" number as a PWM channel number. 36 * but repurpose its "gpio" number as a PWM channel number.
37 */ 37 */
38static int __init pwmled_probe(struct platform_device *pdev) 38static int __devinit pwmled_probe(struct platform_device *pdev)
39{ 39{
40 const struct gpio_led_platform_data *pdata; 40 const struct gpio_led_platform_data *pdata;
41 struct pwmled *leds; 41 struct pwmled *leds;
diff --git a/drivers/leds/leds-netxbig.c b/drivers/leds/leds-netxbig.c
index d8433f2d53bc..73973fdbd8be 100644
--- a/drivers/leds/leds-netxbig.c
+++ b/drivers/leds/leds-netxbig.c
@@ -112,7 +112,7 @@ err_free_addr:
112 return err; 112 return err;
113} 113}
114 114
115static void __devexit gpio_ext_free(struct netxbig_gpio_ext *gpio_ext) 115static void gpio_ext_free(struct netxbig_gpio_ext *gpio_ext)
116{ 116{
117 int i; 117 int i;
118 118
@@ -294,7 +294,7 @@ static ssize_t netxbig_led_sata_show(struct device *dev,
294 294
295static DEVICE_ATTR(sata, 0644, netxbig_led_sata_show, netxbig_led_sata_store); 295static DEVICE_ATTR(sata, 0644, netxbig_led_sata_show, netxbig_led_sata_store);
296 296
297static void __devexit delete_netxbig_led(struct netxbig_led_data *led_dat) 297static void delete_netxbig_led(struct netxbig_led_data *led_dat)
298{ 298{
299 if (led_dat->mode_val[NETXBIG_LED_SATA] != NETXBIG_LED_INVALID_MODE) 299 if (led_dat->mode_val[NETXBIG_LED_SATA] != NETXBIG_LED_INVALID_MODE)
300 device_remove_file(led_dat->cdev.dev, &dev_attr_sata); 300 device_remove_file(led_dat->cdev.dev, &dev_attr_sata);
diff --git a/drivers/leds/leds-ns2.c b/drivers/leds/leds-ns2.c
index 2f0a14421a73..01cf89ec6944 100644
--- a/drivers/leds/leds-ns2.c
+++ b/drivers/leds/leds-ns2.c
@@ -255,7 +255,7 @@ err_free_cmd:
255 return ret; 255 return ret;
256} 256}
257 257
258static void __devexit delete_ns2_led(struct ns2_led_data *led_dat) 258static void delete_ns2_led(struct ns2_led_data *led_dat)
259{ 259{
260 device_remove_file(led_dat->cdev.dev, &dev_attr_sata); 260 device_remove_file(led_dat->cdev.dev, &dev_attr_sata);
261 led_classdev_unregister(&led_dat->cdev); 261 led_classdev_unregister(&led_dat->cdev);
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 97e73e555d11..17e2b472e16d 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -1727,8 +1727,7 @@ int bitmap_create(struct mddev *mddev)
1727 bitmap->chunkshift = (ffz(~mddev->bitmap_info.chunksize) 1727 bitmap->chunkshift = (ffz(~mddev->bitmap_info.chunksize)
1728 - BITMAP_BLOCK_SHIFT); 1728 - BITMAP_BLOCK_SHIFT);
1729 1729
1730 /* now that chunksize and chunkshift are set, we can use these macros */ 1730 chunks = (blocks + (1 << bitmap->chunkshift) - 1) >>
1731 chunks = (blocks + bitmap->chunkshift - 1) >>
1732 bitmap->chunkshift; 1731 bitmap->chunkshift;
1733 pages = (chunks + PAGE_COUNTER_RATIO - 1) / PAGE_COUNTER_RATIO; 1732 pages = (chunks + PAGE_COUNTER_RATIO - 1) / PAGE_COUNTER_RATIO;
1734 1733
diff --git a/drivers/md/bitmap.h b/drivers/md/bitmap.h
index 55ca5aec84e4..b44b0aba2d47 100644
--- a/drivers/md/bitmap.h
+++ b/drivers/md/bitmap.h
@@ -101,9 +101,6 @@ typedef __u16 bitmap_counter_t;
101 101
102#define BITMAP_BLOCK_SHIFT 9 102#define BITMAP_BLOCK_SHIFT 9
103 103
104/* how many blocks per chunk? (this is variable) */
105#define CHUNK_BLOCK_RATIO(bitmap) ((bitmap)->mddev->bitmap_info.chunksize >> BITMAP_BLOCK_SHIFT)
106
107#endif 104#endif
108 105
109/* 106/*
diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c
index 1f23e048f077..08d9a207259a 100644
--- a/drivers/md/dm-log-userspace-transfer.c
+++ b/drivers/md/dm-log-userspace-transfer.c
@@ -134,7 +134,7 @@ static void cn_ulog_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
134{ 134{
135 struct dm_ulog_request *tfr = (struct dm_ulog_request *)(msg + 1); 135 struct dm_ulog_request *tfr = (struct dm_ulog_request *)(msg + 1);
136 136
137 if (!cap_raised(current_cap(), CAP_SYS_ADMIN)) 137 if (!capable(CAP_SYS_ADMIN))
138 return; 138 return;
139 139
140 spin_lock(&receiving_list_lock); 140 spin_lock(&receiving_list_lock);
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 922a3385eead..754f38f8a692 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -718,8 +718,8 @@ static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
718 return 0; 718 return 0;
719 719
720 m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL); 720 m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
721 request_module("scsi_dh_%s", m->hw_handler_name); 721 if (!try_then_request_module(scsi_dh_handler_exist(m->hw_handler_name),
722 if (scsi_dh_handler_exist(m->hw_handler_name) == 0) { 722 "scsi_dh_%s", m->hw_handler_name)) {
723 ti->error = "unknown hardware handler type"; 723 ti->error = "unknown hardware handler type";
724 ret = -EINVAL; 724 ret = -EINVAL;
725 goto fail; 725 goto fail;
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index b0ba52459ed7..68965e663248 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -859,7 +859,7 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
859 int ret; 859 int ret;
860 unsigned redundancy = 0; 860 unsigned redundancy = 0;
861 struct raid_dev *dev; 861 struct raid_dev *dev;
862 struct md_rdev *rdev, *freshest; 862 struct md_rdev *rdev, *tmp, *freshest;
863 struct mddev *mddev = &rs->md; 863 struct mddev *mddev = &rs->md;
864 864
865 switch (rs->raid_type->level) { 865 switch (rs->raid_type->level) {
@@ -877,7 +877,7 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
877 } 877 }
878 878
879 freshest = NULL; 879 freshest = NULL;
880 rdev_for_each(rdev, mddev) { 880 rdev_for_each_safe(rdev, tmp, mddev) {
881 if (!rdev->meta_bdev) 881 if (!rdev->meta_bdev)
882 continue; 882 continue;
883 883
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 213ae32a0fc4..2fd87b544a93 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -279,8 +279,10 @@ static void __cell_release(struct cell *cell, struct bio_list *inmates)
279 279
280 hlist_del(&cell->list); 280 hlist_del(&cell->list);
281 281
282 bio_list_add(inmates, cell->holder); 282 if (inmates) {
283 bio_list_merge(inmates, &cell->bios); 283 bio_list_add(inmates, cell->holder);
284 bio_list_merge(inmates, &cell->bios);
285 }
284 286
285 mempool_free(cell, prison->cell_pool); 287 mempool_free(cell, prison->cell_pool);
286} 288}
@@ -303,9 +305,10 @@ static void cell_release(struct cell *cell, struct bio_list *bios)
303 */ 305 */
304static void __cell_release_singleton(struct cell *cell, struct bio *bio) 306static void __cell_release_singleton(struct cell *cell, struct bio *bio)
305{ 307{
306 hlist_del(&cell->list);
307 BUG_ON(cell->holder != bio); 308 BUG_ON(cell->holder != bio);
308 BUG_ON(!bio_list_empty(&cell->bios)); 309 BUG_ON(!bio_list_empty(&cell->bios));
310
311 __cell_release(cell, NULL);
309} 312}
310 313
311static void cell_release_singleton(struct cell *cell, struct bio *bio) 314static void cell_release_singleton(struct cell *cell, struct bio *bio)
@@ -1177,6 +1180,7 @@ static void no_space(struct cell *cell)
1177static void process_discard(struct thin_c *tc, struct bio *bio) 1180static void process_discard(struct thin_c *tc, struct bio *bio)
1178{ 1181{
1179 int r; 1182 int r;
1183 unsigned long flags;
1180 struct pool *pool = tc->pool; 1184 struct pool *pool = tc->pool;
1181 struct cell *cell, *cell2; 1185 struct cell *cell, *cell2;
1182 struct cell_key key, key2; 1186 struct cell_key key, key2;
@@ -1218,7 +1222,9 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
1218 m->bio = bio; 1222 m->bio = bio;
1219 1223
1220 if (!ds_add_work(&pool->all_io_ds, &m->list)) { 1224 if (!ds_add_work(&pool->all_io_ds, &m->list)) {
1225 spin_lock_irqsave(&pool->lock, flags);
1221 list_add(&m->list, &pool->prepared_discards); 1226 list_add(&m->list, &pool->prepared_discards);
1227 spin_unlock_irqrestore(&pool->lock, flags);
1222 wake_worker(pool); 1228 wake_worker(pool);
1223 } 1229 }
1224 } else { 1230 } else {
@@ -2626,8 +2632,10 @@ static int thin_endio(struct dm_target *ti,
2626 if (h->all_io_entry) { 2632 if (h->all_io_entry) {
2627 INIT_LIST_HEAD(&work); 2633 INIT_LIST_HEAD(&work);
2628 ds_dec(h->all_io_entry, &work); 2634 ds_dec(h->all_io_entry, &work);
2635 spin_lock_irqsave(&pool->lock, flags);
2629 list_for_each_entry_safe(m, tmp, &work, list) 2636 list_for_each_entry_safe(m, tmp, &work, list)
2630 list_add(&m->list, &pool->prepared_discards); 2637 list_add(&m->list, &pool->prepared_discards);
2638 spin_unlock_irqrestore(&pool->lock, flags);
2631 } 2639 }
2632 2640
2633 mempool_free(h, pool->endio_hook_pool); 2641 mempool_free(h, pool->endio_hook_pool);
@@ -2759,6 +2767,6 @@ static void dm_thin_exit(void)
2759module_init(dm_thin_init); 2767module_init(dm_thin_init);
2760module_exit(dm_thin_exit); 2768module_exit(dm_thin_exit);
2761 2769
2762MODULE_DESCRIPTION(DM_NAME "device-mapper thin provisioning target"); 2770MODULE_DESCRIPTION(DM_NAME " thin provisioning target");
2763MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 2771MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
2764MODULE_LICENSE("GPL"); 2772MODULE_LICENSE("GPL");
diff --git a/drivers/md/md.c b/drivers/md/md.c
index b572e1e386ce..477eb2e180c0 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -7560,14 +7560,14 @@ void md_check_recovery(struct mddev *mddev)
7560 * any transients in the value of "sync_action". 7560 * any transients in the value of "sync_action".
7561 */ 7561 */
7562 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 7562 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
7563 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7564 /* Clear some bits that don't mean anything, but 7563 /* Clear some bits that don't mean anything, but
7565 * might be left set 7564 * might be left set
7566 */ 7565 */
7567 clear_bit(MD_RECOVERY_INTR, &mddev->recovery); 7566 clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
7568 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); 7567 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
7569 7568
7570 if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) 7569 if (!test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
7570 test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
7571 goto unlock; 7571 goto unlock;
7572 /* no recovery is running. 7572 /* no recovery is running.
7573 * remove any failed drives, then 7573 * remove any failed drives, then
@@ -8140,7 +8140,8 @@ static int md_notify_reboot(struct notifier_block *this,
8140 8140
8141 for_each_mddev(mddev, tmp) { 8141 for_each_mddev(mddev, tmp) {
8142 if (mddev_trylock(mddev)) { 8142 if (mddev_trylock(mddev)) {
8143 __md_stop_writes(mddev); 8143 if (mddev->pers)
8144 __md_stop_writes(mddev);
8144 mddev->safemode = 2; 8145 mddev->safemode = 2;
8145 mddev_unlock(mddev); 8146 mddev_unlock(mddev);
8146 } 8147 }
diff --git a/drivers/media/common/tuners/xc5000.c b/drivers/media/common/tuners/xc5000.c
index 7f98984e4fad..eab2ea424200 100644
--- a/drivers/media/common/tuners/xc5000.c
+++ b/drivers/media/common/tuners/xc5000.c
@@ -54,6 +54,7 @@ struct xc5000_priv {
54 struct list_head hybrid_tuner_instance_list; 54 struct list_head hybrid_tuner_instance_list;
55 55
56 u32 if_khz; 56 u32 if_khz;
57 u32 xtal_khz;
57 u32 freq_hz; 58 u32 freq_hz;
58 u32 bandwidth; 59 u32 bandwidth;
59 u8 video_standard; 60 u8 video_standard;
@@ -214,9 +215,9 @@ static const struct xc5000_fw_cfg xc5000a_1_6_114 = {
214 .size = 12401, 215 .size = 12401,
215}; 216};
216 217
217static const struct xc5000_fw_cfg xc5000c_41_024_5_31875 = { 218static const struct xc5000_fw_cfg xc5000c_41_024_5 = {
218 .name = "dvb-fe-xc5000c-41.024.5-31875.fw", 219 .name = "dvb-fe-xc5000c-41.024.5.fw",
219 .size = 16503, 220 .size = 16497,
220}; 221};
221 222
222static inline const struct xc5000_fw_cfg *xc5000_assign_firmware(int chip_id) 223static inline const struct xc5000_fw_cfg *xc5000_assign_firmware(int chip_id)
@@ -226,7 +227,7 @@ static inline const struct xc5000_fw_cfg *xc5000_assign_firmware(int chip_id)
226 case XC5000A: 227 case XC5000A:
227 return &xc5000a_1_6_114; 228 return &xc5000a_1_6_114;
228 case XC5000C: 229 case XC5000C:
229 return &xc5000c_41_024_5_31875; 230 return &xc5000c_41_024_5;
230 } 231 }
231} 232}
232 233
@@ -572,6 +573,31 @@ static int xc_tune_channel(struct xc5000_priv *priv, u32 freq_hz, int mode)
572 return found; 573 return found;
573} 574}
574 575
576static int xc_set_xtal(struct dvb_frontend *fe)
577{
578 struct xc5000_priv *priv = fe->tuner_priv;
579 int ret = XC_RESULT_SUCCESS;
580
581 switch (priv->chip_id) {
582 default:
583 case XC5000A:
584 /* 32.000 MHz xtal is default */
585 break;
586 case XC5000C:
587 switch (priv->xtal_khz) {
588 default:
589 case 32000:
590 /* 32.000 MHz xtal is default */
591 break;
592 case 31875:
593 /* 31.875 MHz xtal configuration */
594 ret = xc_write_reg(priv, 0x000f, 0x8081);
595 break;
596 }
597 break;
598 }
599 return ret;
600}
575 601
576static int xc5000_fwupload(struct dvb_frontend *fe) 602static int xc5000_fwupload(struct dvb_frontend *fe)
577{ 603{
@@ -603,6 +629,8 @@ static int xc5000_fwupload(struct dvb_frontend *fe)
603 } else { 629 } else {
604 printk(KERN_INFO "xc5000: firmware uploading...\n"); 630 printk(KERN_INFO "xc5000: firmware uploading...\n");
605 ret = xc_load_i2c_sequence(fe, fw->data); 631 ret = xc_load_i2c_sequence(fe, fw->data);
632 if (XC_RESULT_SUCCESS == ret)
633 ret = xc_set_xtal(fe);
606 printk(KERN_INFO "xc5000: firmware upload complete...\n"); 634 printk(KERN_INFO "xc5000: firmware upload complete...\n");
607 } 635 }
608 636
@@ -1164,6 +1192,9 @@ struct dvb_frontend *xc5000_attach(struct dvb_frontend *fe,
1164 priv->if_khz = cfg->if_khz; 1192 priv->if_khz = cfg->if_khz;
1165 } 1193 }
1166 1194
1195 if (priv->xtal_khz == 0)
1196 priv->xtal_khz = cfg->xtal_khz;
1197
1167 if (priv->radio_input == 0) 1198 if (priv->radio_input == 0)
1168 priv->radio_input = cfg->radio_input; 1199 priv->radio_input = cfg->radio_input;
1169 1200
diff --git a/drivers/media/common/tuners/xc5000.h b/drivers/media/common/tuners/xc5000.h
index 3396f8e02b40..39a73bf01406 100644
--- a/drivers/media/common/tuners/xc5000.h
+++ b/drivers/media/common/tuners/xc5000.h
@@ -34,6 +34,7 @@ struct xc5000_config {
34 u8 i2c_address; 34 u8 i2c_address;
35 u32 if_khz; 35 u32 if_khz;
36 u8 radio_input; 36 u8 radio_input;
37 u32 xtal_khz;
37 38
38 int chip_id; 39 int chip_id;
39}; 40};
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.c b/drivers/media/dvb/dvb-core/dvb_frontend.c
index 39696c6a4ed7..0f64d7182657 100644
--- a/drivers/media/dvb/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb/dvb-core/dvb_frontend.c
@@ -1446,6 +1446,28 @@ static int set_delivery_system(struct dvb_frontend *fe, u32 desired_system)
1446 __func__); 1446 __func__);
1447 return -EINVAL; 1447 return -EINVAL;
1448 } 1448 }
1449 /*
1450 * Get a delivery system that is compatible with DVBv3
1451 * NOTE: in order for this to work with softwares like Kaffeine that
1452 * uses a DVBv5 call for DVB-S2 and a DVBv3 call to go back to
1453 * DVB-S, drivers that support both should put the SYS_DVBS entry
1454 * before the SYS_DVBS2, otherwise it won't switch back to DVB-S.
1455 * The real fix is that userspace applications should not use DVBv3
1456 * and not trust on calling FE_SET_FRONTEND to switch the delivery
1457 * system.
1458 */
1459 ncaps = 0;
1460 while (fe->ops.delsys[ncaps] && ncaps < MAX_DELSYS) {
1461 if (fe->ops.delsys[ncaps] == desired_system) {
1462 delsys = desired_system;
1463 break;
1464 }
1465 ncaps++;
1466 }
1467 if (delsys == SYS_UNDEFINED) {
1468 dprintk("%s() Couldn't find a delivery system that matches %d\n",
1469 __func__, desired_system);
1470 }
1449 } else { 1471 } else {
1450 /* 1472 /*
1451 * This is a DVBv5 call. So, it likely knows the supported 1473 * This is a DVBv5 call. So, it likely knows the supported
@@ -1494,9 +1516,10 @@ static int set_delivery_system(struct dvb_frontend *fe, u32 desired_system)
1494 __func__); 1516 __func__);
1495 return -EINVAL; 1517 return -EINVAL;
1496 } 1518 }
1497 c->delivery_system = delsys;
1498 } 1519 }
1499 1520
1521 c->delivery_system = delsys;
1522
1500 /* 1523 /*
1501 * The DVBv3 or DVBv5 call is requesting a different system. So, 1524 * The DVBv3 or DVBv5 call is requesting a different system. So,
1502 * emulation is needed. 1525 * emulation is needed.
diff --git a/drivers/media/dvb/frontends/drxk_hard.c b/drivers/media/dvb/frontends/drxk_hard.c
index 36d11756492f..a414b1f2b6a5 100644
--- a/drivers/media/dvb/frontends/drxk_hard.c
+++ b/drivers/media/dvb/frontends/drxk_hard.c
@@ -1520,8 +1520,10 @@ static int scu_command(struct drxk_state *state,
1520 dprintk(1, "\n"); 1520 dprintk(1, "\n");
1521 1521
1522 if ((cmd == 0) || ((parameterLen > 0) && (parameter == NULL)) || 1522 if ((cmd == 0) || ((parameterLen > 0) && (parameter == NULL)) ||
1523 ((resultLen > 0) && (result == NULL))) 1523 ((resultLen > 0) && (result == NULL))) {
1524 goto error; 1524 printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
1525 return status;
1526 }
1525 1527
1526 mutex_lock(&state->mutex); 1528 mutex_lock(&state->mutex);
1527 1529
diff --git a/drivers/media/rc/winbond-cir.c b/drivers/media/rc/winbond-cir.c
index b09c5fae489b..af526586fa26 100644
--- a/drivers/media/rc/winbond-cir.c
+++ b/drivers/media/rc/winbond-cir.c
@@ -1046,6 +1046,7 @@ wbcir_probe(struct pnp_dev *device, const struct pnp_device_id *dev_id)
1046 goto exit_unregister_led; 1046 goto exit_unregister_led;
1047 } 1047 }
1048 1048
1049 data->dev->driver_type = RC_DRIVER_IR_RAW;
1049 data->dev->driver_name = WBCIR_NAME; 1050 data->dev->driver_name = WBCIR_NAME;
1050 data->dev->input_name = WBCIR_NAME; 1051 data->dev->input_name = WBCIR_NAME;
1051 data->dev->input_phys = "wbcir/cir0"; 1052 data->dev->input_phys = "wbcir/cir0";
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index f2479c5c0eb2..ce1e7ba940f6 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -492,7 +492,7 @@ config VIDEO_VS6624
492 492
493config VIDEO_MT9M032 493config VIDEO_MT9M032
494 tristate "MT9M032 camera sensor support" 494 tristate "MT9M032 camera sensor support"
495 depends on I2C && VIDEO_V4L2 495 depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
496 select VIDEO_APTINA_PLL 496 select VIDEO_APTINA_PLL
497 ---help--- 497 ---help---
498 This driver supports MT9M032 camera sensors from Aptina, monochrome 498 This driver supports MT9M032 camera sensors from Aptina, monochrome
diff --git a/drivers/media/video/mt9m032.c b/drivers/media/video/mt9m032.c
index 7636672c3548..645973c5feb0 100644
--- a/drivers/media/video/mt9m032.c
+++ b/drivers/media/video/mt9m032.c
@@ -392,10 +392,11 @@ static int mt9m032_set_pad_format(struct v4l2_subdev *subdev,
392 } 392 }
393 393
394 /* Scaling is not supported, the format is thus fixed. */ 394 /* Scaling is not supported, the format is thus fixed. */
395 ret = mt9m032_get_pad_format(subdev, fh, fmt); 395 fmt->format = *__mt9m032_get_pad_format(sensor, fh, fmt->which);
396 ret = 0;
396 397
397done: 398done:
398 mutex_lock(&sensor->lock); 399 mutex_unlock(&sensor->lock);
399 return ret; 400 return ret;
400} 401}
401 402
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 29f463cc09cb..11e44386fa9b 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -268,10 +268,17 @@ config TWL6030_PWM
268 This is used to control charging LED brightness. 268 This is used to control charging LED brightness.
269 269
270config TWL6040_CORE 270config TWL6040_CORE
271 bool 271 bool "Support for TWL6040 audio codec"
272 depends on TWL4030_CORE && GENERIC_HARDIRQS 272 depends on I2C=y && GENERIC_HARDIRQS
273 select MFD_CORE 273 select MFD_CORE
274 select REGMAP_I2C
274 default n 275 default n
276 help
277 Say yes here if you want support for Texas Instruments TWL6040 audio
278 codec.
279 This driver provides common support for accessing the device,
280 additional drivers must be enabled in order to use the
281 functionality of the device (audio, vibra).
275 282
276config MFD_STMPE 283config MFD_STMPE
277 bool "Support STMicroelectronics STMPE" 284 bool "Support STMicroelectronics STMPE"
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
index 1895cf9fab8c..1582c3d95257 100644
--- a/drivers/mfd/asic3.c
+++ b/drivers/mfd/asic3.c
@@ -527,7 +527,9 @@ static void asic3_gpio_set(struct gpio_chip *chip,
527 527
528static int asic3_gpio_to_irq(struct gpio_chip *chip, unsigned offset) 528static int asic3_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
529{ 529{
530 return (offset < ASIC3_NUM_GPIOS) ? IRQ_BOARD_START + offset : -ENXIO; 530 struct asic3 *asic = container_of(chip, struct asic3, gpio);
531
532 return (offset < ASIC3_NUM_GPIOS) ? asic->irq_base + offset : -ENXIO;
531} 533}
532 534
533static __init int asic3_gpio_probe(struct platform_device *pdev, 535static __init int asic3_gpio_probe(struct platform_device *pdev,
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
index 95a2e546a489..7e96bb229724 100644
--- a/drivers/mfd/omap-usb-host.c
+++ b/drivers/mfd/omap-usb-host.c
@@ -25,7 +25,7 @@
25#include <linux/clk.h> 25#include <linux/clk.h>
26#include <linux/dma-mapping.h> 26#include <linux/dma-mapping.h>
27#include <linux/spinlock.h> 27#include <linux/spinlock.h>
28#include <linux/gpio.h> 28#include <plat/cpu.h>
29#include <plat/usb.h> 29#include <plat/usb.h>
30#include <linux/pm_runtime.h> 30#include <linux/pm_runtime.h>
31 31
@@ -502,19 +502,6 @@ static void omap_usbhs_init(struct device *dev)
502 pm_runtime_get_sync(dev); 502 pm_runtime_get_sync(dev);
503 spin_lock_irqsave(&omap->lock, flags); 503 spin_lock_irqsave(&omap->lock, flags);
504 504
505 if (pdata->ehci_data->phy_reset) {
506 if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
507 gpio_request_one(pdata->ehci_data->reset_gpio_port[0],
508 GPIOF_OUT_INIT_LOW, "USB1 PHY reset");
509
510 if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
511 gpio_request_one(pdata->ehci_data->reset_gpio_port[1],
512 GPIOF_OUT_INIT_LOW, "USB2 PHY reset");
513
514 /* Hold the PHY in RESET for enough time till DIR is high */
515 udelay(10);
516 }
517
518 omap->usbhs_rev = usbhs_read(omap->uhh_base, OMAP_UHH_REVISION); 505 omap->usbhs_rev = usbhs_read(omap->uhh_base, OMAP_UHH_REVISION);
519 dev_dbg(dev, "OMAP UHH_REVISION 0x%x\n", omap->usbhs_rev); 506 dev_dbg(dev, "OMAP UHH_REVISION 0x%x\n", omap->usbhs_rev);
520 507
@@ -593,39 +580,10 @@ static void omap_usbhs_init(struct device *dev)
593 usbhs_omap_tll_init(dev, OMAP_TLL_CHANNEL_COUNT); 580 usbhs_omap_tll_init(dev, OMAP_TLL_CHANNEL_COUNT);
594 } 581 }
595 582
596 if (pdata->ehci_data->phy_reset) {
597 /* Hold the PHY in RESET for enough time till
598 * PHY is settled and ready
599 */
600 udelay(10);
601
602 if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
603 gpio_set_value
604 (pdata->ehci_data->reset_gpio_port[0], 1);
605
606 if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
607 gpio_set_value
608 (pdata->ehci_data->reset_gpio_port[1], 1);
609 }
610
611 spin_unlock_irqrestore(&omap->lock, flags); 583 spin_unlock_irqrestore(&omap->lock, flags);
612 pm_runtime_put_sync(dev); 584 pm_runtime_put_sync(dev);
613} 585}
614 586
615static void omap_usbhs_deinit(struct device *dev)
616{
617 struct usbhs_hcd_omap *omap = dev_get_drvdata(dev);
618 struct usbhs_omap_platform_data *pdata = &omap->platdata;
619
620 if (pdata->ehci_data->phy_reset) {
621 if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
622 gpio_free(pdata->ehci_data->reset_gpio_port[0]);
623
624 if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
625 gpio_free(pdata->ehci_data->reset_gpio_port[1]);
626 }
627}
628
629 587
630/** 588/**
631 * usbhs_omap_probe - initialize TI-based HCDs 589 * usbhs_omap_probe - initialize TI-based HCDs
@@ -860,7 +818,6 @@ static int __devexit usbhs_omap_remove(struct platform_device *pdev)
860{ 818{
861 struct usbhs_hcd_omap *omap = platform_get_drvdata(pdev); 819 struct usbhs_hcd_omap *omap = platform_get_drvdata(pdev);
862 820
863 omap_usbhs_deinit(&pdev->dev);
864 iounmap(omap->tll_base); 821 iounmap(omap->tll_base);
865 iounmap(omap->uhh_base); 822 iounmap(omap->uhh_base);
866 clk_put(omap->init_60m_fclk); 823 clk_put(omap->init_60m_fclk);
diff --git a/drivers/mfd/rc5t583.c b/drivers/mfd/rc5t583.c
index 99ef944c621d..44afae0a69ce 100644
--- a/drivers/mfd/rc5t583.c
+++ b/drivers/mfd/rc5t583.c
@@ -80,44 +80,6 @@ static struct mfd_cell rc5t583_subdevs[] = {
80 {.name = "rc5t583-key", } 80 {.name = "rc5t583-key", }
81}; 81};
82 82
83int rc5t583_write(struct device *dev, uint8_t reg, uint8_t val)
84{
85 struct rc5t583 *rc5t583 = dev_get_drvdata(dev);
86 return regmap_write(rc5t583->regmap, reg, val);
87}
88
89int rc5t583_read(struct device *dev, uint8_t reg, uint8_t *val)
90{
91 struct rc5t583 *rc5t583 = dev_get_drvdata(dev);
92 unsigned int ival;
93 int ret;
94 ret = regmap_read(rc5t583->regmap, reg, &ival);
95 if (!ret)
96 *val = (uint8_t)ival;
97 return ret;
98}
99
100int rc5t583_set_bits(struct device *dev, unsigned int reg,
101 unsigned int bit_mask)
102{
103 struct rc5t583 *rc5t583 = dev_get_drvdata(dev);
104 return regmap_update_bits(rc5t583->regmap, reg, bit_mask, bit_mask);
105}
106
107int rc5t583_clear_bits(struct device *dev, unsigned int reg,
108 unsigned int bit_mask)
109{
110 struct rc5t583 *rc5t583 = dev_get_drvdata(dev);
111 return regmap_update_bits(rc5t583->regmap, reg, bit_mask, 0);
112}
113
114int rc5t583_update(struct device *dev, unsigned int reg,
115 unsigned int val, unsigned int mask)
116{
117 struct rc5t583 *rc5t583 = dev_get_drvdata(dev);
118 return regmap_update_bits(rc5t583->regmap, reg, mask, val);
119}
120
121static int __rc5t583_set_ext_pwrreq1_control(struct device *dev, 83static int __rc5t583_set_ext_pwrreq1_control(struct device *dev,
122 int id, int ext_pwr, int slots) 84 int id, int ext_pwr, int slots)
123{ 85{
@@ -197,6 +159,7 @@ int rc5t583_ext_power_req_config(struct device *dev, int ds_id,
197 ds_id, ext_pwr_req); 159 ds_id, ext_pwr_req);
198 return 0; 160 return 0;
199} 161}
162EXPORT_SYMBOL(rc5t583_ext_power_req_config);
200 163
201static int rc5t583_clear_ext_power_req(struct rc5t583 *rc5t583, 164static int rc5t583_clear_ext_power_req(struct rc5t583 *rc5t583,
202 struct rc5t583_platform_data *pdata) 165 struct rc5t583_platform_data *pdata)
diff --git a/drivers/mfd/twl6040-core.c b/drivers/mfd/twl6040-core.c
index b2d8e512d3cb..2d6bedadca09 100644
--- a/drivers/mfd/twl6040-core.c
+++ b/drivers/mfd/twl6040-core.c
@@ -30,7 +30,9 @@
30#include <linux/platform_device.h> 30#include <linux/platform_device.h>
31#include <linux/gpio.h> 31#include <linux/gpio.h>
32#include <linux/delay.h> 32#include <linux/delay.h>
33#include <linux/i2c/twl.h> 33#include <linux/i2c.h>
34#include <linux/regmap.h>
35#include <linux/err.h>
34#include <linux/mfd/core.h> 36#include <linux/mfd/core.h>
35#include <linux/mfd/twl6040.h> 37#include <linux/mfd/twl6040.h>
36 38
@@ -39,7 +41,7 @@
39int twl6040_reg_read(struct twl6040 *twl6040, unsigned int reg) 41int twl6040_reg_read(struct twl6040 *twl6040, unsigned int reg)
40{ 42{
41 int ret; 43 int ret;
42 u8 val = 0; 44 unsigned int val;
43 45
44 mutex_lock(&twl6040->io_mutex); 46 mutex_lock(&twl6040->io_mutex);
45 /* Vibra control registers from cache */ 47 /* Vibra control registers from cache */
@@ -47,7 +49,7 @@ int twl6040_reg_read(struct twl6040 *twl6040, unsigned int reg)
47 reg == TWL6040_REG_VIBCTLR)) { 49 reg == TWL6040_REG_VIBCTLR)) {
48 val = twl6040->vibra_ctrl_cache[VIBRACTRL_MEMBER(reg)]; 50 val = twl6040->vibra_ctrl_cache[VIBRACTRL_MEMBER(reg)];
49 } else { 51 } else {
50 ret = twl_i2c_read_u8(TWL_MODULE_AUDIO_VOICE, &val, reg); 52 ret = regmap_read(twl6040->regmap, reg, &val);
51 if (ret < 0) { 53 if (ret < 0) {
52 mutex_unlock(&twl6040->io_mutex); 54 mutex_unlock(&twl6040->io_mutex);
53 return ret; 55 return ret;
@@ -64,7 +66,7 @@ int twl6040_reg_write(struct twl6040 *twl6040, unsigned int reg, u8 val)
64 int ret; 66 int ret;
65 67
66 mutex_lock(&twl6040->io_mutex); 68 mutex_lock(&twl6040->io_mutex);
67 ret = twl_i2c_write_u8(TWL_MODULE_AUDIO_VOICE, val, reg); 69 ret = regmap_write(twl6040->regmap, reg, val);
68 /* Cache the vibra control registers */ 70 /* Cache the vibra control registers */
69 if (reg == TWL6040_REG_VIBCTLL || reg == TWL6040_REG_VIBCTLR) 71 if (reg == TWL6040_REG_VIBCTLL || reg == TWL6040_REG_VIBCTLR)
70 twl6040->vibra_ctrl_cache[VIBRACTRL_MEMBER(reg)] = val; 72 twl6040->vibra_ctrl_cache[VIBRACTRL_MEMBER(reg)] = val;
@@ -77,16 +79,9 @@ EXPORT_SYMBOL(twl6040_reg_write);
77int twl6040_set_bits(struct twl6040 *twl6040, unsigned int reg, u8 mask) 79int twl6040_set_bits(struct twl6040 *twl6040, unsigned int reg, u8 mask)
78{ 80{
79 int ret; 81 int ret;
80 u8 val;
81 82
82 mutex_lock(&twl6040->io_mutex); 83 mutex_lock(&twl6040->io_mutex);
83 ret = twl_i2c_read_u8(TWL_MODULE_AUDIO_VOICE, &val, reg); 84 ret = regmap_update_bits(twl6040->regmap, reg, mask, mask);
84 if (ret)
85 goto out;
86
87 val |= mask;
88 ret = twl_i2c_write_u8(TWL_MODULE_AUDIO_VOICE, val, reg);
89out:
90 mutex_unlock(&twl6040->io_mutex); 85 mutex_unlock(&twl6040->io_mutex);
91 return ret; 86 return ret;
92} 87}
@@ -95,16 +90,9 @@ EXPORT_SYMBOL(twl6040_set_bits);
95int twl6040_clear_bits(struct twl6040 *twl6040, unsigned int reg, u8 mask) 90int twl6040_clear_bits(struct twl6040 *twl6040, unsigned int reg, u8 mask)
96{ 91{
97 int ret; 92 int ret;
98 u8 val;
99 93
100 mutex_lock(&twl6040->io_mutex); 94 mutex_lock(&twl6040->io_mutex);
101 ret = twl_i2c_read_u8(TWL_MODULE_AUDIO_VOICE, &val, reg); 95 ret = regmap_update_bits(twl6040->regmap, reg, mask, 0);
102 if (ret)
103 goto out;
104
105 val &= ~mask;
106 ret = twl_i2c_write_u8(TWL_MODULE_AUDIO_VOICE, val, reg);
107out:
108 mutex_unlock(&twl6040->io_mutex); 96 mutex_unlock(&twl6040->io_mutex);
109 return ret; 97 return ret;
110} 98}
@@ -494,32 +482,58 @@ static struct resource twl6040_codec_rsrc[] = {
494 }, 482 },
495}; 483};
496 484
497static int __devinit twl6040_probe(struct platform_device *pdev) 485static bool twl6040_readable_reg(struct device *dev, unsigned int reg)
498{ 486{
499 struct twl4030_audio_data *pdata = pdev->dev.platform_data; 487 /* Register 0 is not readable */
488 if (!reg)
489 return false;
490 return true;
491}
492
493static struct regmap_config twl6040_regmap_config = {
494 .reg_bits = 8,
495 .val_bits = 8,
496 .max_register = TWL6040_REG_STATUS, /* 0x2e */
497
498 .readable_reg = twl6040_readable_reg,
499};
500
501static int __devinit twl6040_probe(struct i2c_client *client,
502 const struct i2c_device_id *id)
503{
504 struct twl6040_platform_data *pdata = client->dev.platform_data;
500 struct twl6040 *twl6040; 505 struct twl6040 *twl6040;
501 struct mfd_cell *cell = NULL; 506 struct mfd_cell *cell = NULL;
502 int ret, children = 0; 507 int ret, children = 0;
503 508
504 if (!pdata) { 509 if (!pdata) {
505 dev_err(&pdev->dev, "Platform data is missing\n"); 510 dev_err(&client->dev, "Platform data is missing\n");
506 return -EINVAL; 511 return -EINVAL;
507 } 512 }
508 513
509 /* In order to operate correctly we need valid interrupt config */ 514 /* In order to operate correctly we need valid interrupt config */
510 if (!pdata->naudint_irq || !pdata->irq_base) { 515 if (!client->irq || !pdata->irq_base) {
511 dev_err(&pdev->dev, "Invalid IRQ configuration\n"); 516 dev_err(&client->dev, "Invalid IRQ configuration\n");
512 return -EINVAL; 517 return -EINVAL;
513 } 518 }
514 519
515 twl6040 = kzalloc(sizeof(struct twl6040), GFP_KERNEL); 520 twl6040 = devm_kzalloc(&client->dev, sizeof(struct twl6040),
516 if (!twl6040) 521 GFP_KERNEL);
517 return -ENOMEM; 522 if (!twl6040) {
523 ret = -ENOMEM;
524 goto err;
525 }
526
527 twl6040->regmap = regmap_init_i2c(client, &twl6040_regmap_config);
528 if (IS_ERR(twl6040->regmap)) {
529 ret = PTR_ERR(twl6040->regmap);
530 goto err;
531 }
518 532
519 platform_set_drvdata(pdev, twl6040); 533 i2c_set_clientdata(client, twl6040);
520 534
521 twl6040->dev = &pdev->dev; 535 twl6040->dev = &client->dev;
522 twl6040->irq = pdata->naudint_irq; 536 twl6040->irq = client->irq;
523 twl6040->irq_base = pdata->irq_base; 537 twl6040->irq_base = pdata->irq_base;
524 538
525 mutex_init(&twl6040->mutex); 539 mutex_init(&twl6040->mutex);
@@ -588,12 +602,12 @@ static int __devinit twl6040_probe(struct platform_device *pdev)
588 } 602 }
589 603
590 if (children) { 604 if (children) {
591 ret = mfd_add_devices(&pdev->dev, pdev->id, twl6040->cells, 605 ret = mfd_add_devices(&client->dev, -1, twl6040->cells,
592 children, NULL, 0); 606 children, NULL, 0);
593 if (ret) 607 if (ret)
594 goto mfd_err; 608 goto mfd_err;
595 } else { 609 } else {
596 dev_err(&pdev->dev, "No platform data found for children\n"); 610 dev_err(&client->dev, "No platform data found for children\n");
597 ret = -ENODEV; 611 ret = -ENODEV;
598 goto mfd_err; 612 goto mfd_err;
599 } 613 }
@@ -608,14 +622,15 @@ gpio2_err:
608 if (gpio_is_valid(twl6040->audpwron)) 622 if (gpio_is_valid(twl6040->audpwron))
609 gpio_free(twl6040->audpwron); 623 gpio_free(twl6040->audpwron);
610gpio1_err: 624gpio1_err:
611 platform_set_drvdata(pdev, NULL); 625 i2c_set_clientdata(client, NULL);
612 kfree(twl6040); 626 regmap_exit(twl6040->regmap);
627err:
613 return ret; 628 return ret;
614} 629}
615 630
616static int __devexit twl6040_remove(struct platform_device *pdev) 631static int __devexit twl6040_remove(struct i2c_client *client)
617{ 632{
618 struct twl6040 *twl6040 = platform_get_drvdata(pdev); 633 struct twl6040 *twl6040 = i2c_get_clientdata(client);
619 634
620 if (twl6040->power_count) 635 if (twl6040->power_count)
621 twl6040_power(twl6040, 0); 636 twl6040_power(twl6040, 0);
@@ -626,23 +641,30 @@ static int __devexit twl6040_remove(struct platform_device *pdev)
626 free_irq(twl6040->irq_base + TWL6040_IRQ_READY, twl6040); 641 free_irq(twl6040->irq_base + TWL6040_IRQ_READY, twl6040);
627 twl6040_irq_exit(twl6040); 642 twl6040_irq_exit(twl6040);
628 643
629 mfd_remove_devices(&pdev->dev); 644 mfd_remove_devices(&client->dev);
630 platform_set_drvdata(pdev, NULL); 645 i2c_set_clientdata(client, NULL);
631 kfree(twl6040); 646 regmap_exit(twl6040->regmap);
632 647
633 return 0; 648 return 0;
634} 649}
635 650
636static struct platform_driver twl6040_driver = { 651static const struct i2c_device_id twl6040_i2c_id[] = {
652 { "twl6040", 0, },
653 { },
654};
655MODULE_DEVICE_TABLE(i2c, twl6040_i2c_id);
656
657static struct i2c_driver twl6040_driver = {
658 .driver = {
659 .name = "twl6040",
660 .owner = THIS_MODULE,
661 },
637 .probe = twl6040_probe, 662 .probe = twl6040_probe,
638 .remove = __devexit_p(twl6040_remove), 663 .remove = __devexit_p(twl6040_remove),
639 .driver = { 664 .id_table = twl6040_i2c_id,
640 .owner = THIS_MODULE,
641 .name = "twl6040",
642 },
643}; 665};
644 666
645module_platform_driver(twl6040_driver); 667module_i2c_driver(twl6040_driver);
646 668
647MODULE_DESCRIPTION("TWL6040 MFD"); 669MODULE_DESCRIPTION("TWL6040 MFD");
648MODULE_AUTHOR("Misael Lopez Cruz <misael.lopez@ti.com>"); 670MODULE_AUTHOR("Misael Lopez Cruz <misael.lopez@ti.com>");
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index b1809650b7aa..dabec556ebb8 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -873,7 +873,7 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
873{ 873{
874 struct mmc_blk_data *md = mq->data; 874 struct mmc_blk_data *md = mq->data;
875 struct mmc_card *card = md->queue.card; 875 struct mmc_card *card = md->queue.card;
876 unsigned int from, nr, arg; 876 unsigned int from, nr, arg, trim_arg, erase_arg;
877 int err = 0, type = MMC_BLK_SECDISCARD; 877 int err = 0, type = MMC_BLK_SECDISCARD;
878 878
879 if (!(mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card))) { 879 if (!(mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card))) {
@@ -881,20 +881,26 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
881 goto out; 881 goto out;
882 } 882 }
883 883
884 from = blk_rq_pos(req);
885 nr = blk_rq_sectors(req);
886
884 /* The sanitize operation is supported at v4.5 only */ 887 /* The sanitize operation is supported at v4.5 only */
885 if (mmc_can_sanitize(card)) { 888 if (mmc_can_sanitize(card)) {
886 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 889 erase_arg = MMC_ERASE_ARG;
887 EXT_CSD_SANITIZE_START, 1, 0); 890 trim_arg = MMC_TRIM_ARG;
888 goto out; 891 } else {
892 erase_arg = MMC_SECURE_ERASE_ARG;
893 trim_arg = MMC_SECURE_TRIM1_ARG;
889 } 894 }
890 895
891 from = blk_rq_pos(req); 896 if (mmc_erase_group_aligned(card, from, nr))
892 nr = blk_rq_sectors(req); 897 arg = erase_arg;
893 898 else if (mmc_can_trim(card))
894 if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr)) 899 arg = trim_arg;
895 arg = MMC_SECURE_TRIM1_ARG; 900 else {
896 else 901 err = -EINVAL;
897 arg = MMC_SECURE_ERASE_ARG; 902 goto out;
903 }
898retry: 904retry:
899 if (card->quirks & MMC_QUIRK_INAND_CMD38) { 905 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
900 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 906 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
@@ -904,25 +910,41 @@ retry:
904 INAND_CMD38_ARG_SECERASE, 910 INAND_CMD38_ARG_SECERASE,
905 0); 911 0);
906 if (err) 912 if (err)
907 goto out; 913 goto out_retry;
908 } 914 }
915
909 err = mmc_erase(card, from, nr, arg); 916 err = mmc_erase(card, from, nr, arg);
910 if (!err && arg == MMC_SECURE_TRIM1_ARG) { 917 if (err == -EIO)
918 goto out_retry;
919 if (err)
920 goto out;
921
922 if (arg == MMC_SECURE_TRIM1_ARG) {
911 if (card->quirks & MMC_QUIRK_INAND_CMD38) { 923 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
912 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 924 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
913 INAND_CMD38_ARG_EXT_CSD, 925 INAND_CMD38_ARG_EXT_CSD,
914 INAND_CMD38_ARG_SECTRIM2, 926 INAND_CMD38_ARG_SECTRIM2,
915 0); 927 0);
916 if (err) 928 if (err)
917 goto out; 929 goto out_retry;
918 } 930 }
931
919 err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG); 932 err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
933 if (err == -EIO)
934 goto out_retry;
935 if (err)
936 goto out;
920 } 937 }
921out: 938
922 if (err == -EIO && !mmc_blk_reset(md, card->host, type)) 939 if (mmc_can_sanitize(card))
940 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
941 EXT_CSD_SANITIZE_START, 1, 0);
942out_retry:
943 if (err && !mmc_blk_reset(md, card->host, type))
923 goto retry; 944 goto retry;
924 if (!err) 945 if (!err)
925 mmc_blk_reset_success(md, type); 946 mmc_blk_reset_success(md, type);
947out:
926 spin_lock_irq(&md->lock); 948 spin_lock_irq(&md->lock);
927 __blk_end_request(req, err, blk_rq_bytes(req)); 949 __blk_end_request(req, err, blk_rq_bytes(req));
928 spin_unlock_irq(&md->lock); 950 spin_unlock_irq(&md->lock);
@@ -1802,7 +1824,7 @@ static void mmc_blk_remove(struct mmc_card *card)
1802} 1824}
1803 1825
1804#ifdef CONFIG_PM 1826#ifdef CONFIG_PM
1805static int mmc_blk_suspend(struct mmc_card *card, pm_message_t state) 1827static int mmc_blk_suspend(struct mmc_card *card)
1806{ 1828{
1807 struct mmc_blk_data *part_md; 1829 struct mmc_blk_data *part_md;
1808 struct mmc_blk_data *md = mmc_get_drvdata(card); 1830 struct mmc_blk_data *md = mmc_get_drvdata(card);
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 2517547b4366..996f8e36e23d 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -139,7 +139,7 @@ static void mmc_queue_setup_discard(struct request_queue *q,
139 139
140 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); 140 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
141 q->limits.max_discard_sectors = max_discard; 141 q->limits.max_discard_sectors = max_discard;
142 if (card->erased_byte == 0) 142 if (card->erased_byte == 0 && !mmc_can_discard(card))
143 q->limits.discard_zeroes_data = 1; 143 q->limits.discard_zeroes_data = 1;
144 q->limits.discard_granularity = card->pref_erase << 9; 144 q->limits.discard_granularity = card->pref_erase << 9;
145 /* granularity must not be greater than max. discard */ 145 /* granularity must not be greater than max. discard */
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index 3f606068d552..c60cee92a2b2 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -122,14 +122,14 @@ static int mmc_bus_remove(struct device *dev)
122 return 0; 122 return 0;
123} 123}
124 124
125static int mmc_bus_suspend(struct device *dev, pm_message_t state) 125static int mmc_bus_suspend(struct device *dev)
126{ 126{
127 struct mmc_driver *drv = to_mmc_driver(dev->driver); 127 struct mmc_driver *drv = to_mmc_driver(dev->driver);
128 struct mmc_card *card = mmc_dev_to_card(dev); 128 struct mmc_card *card = mmc_dev_to_card(dev);
129 int ret = 0; 129 int ret = 0;
130 130
131 if (dev->driver && drv->suspend) 131 if (dev->driver && drv->suspend)
132 ret = drv->suspend(card, state); 132 ret = drv->suspend(card);
133 return ret; 133 return ret;
134} 134}
135 135
@@ -165,20 +165,14 @@ static int mmc_runtime_idle(struct device *dev)
165 return pm_runtime_suspend(dev); 165 return pm_runtime_suspend(dev);
166} 166}
167 167
168#endif /* !CONFIG_PM_RUNTIME */
169
168static const struct dev_pm_ops mmc_bus_pm_ops = { 170static const struct dev_pm_ops mmc_bus_pm_ops = {
169 .runtime_suspend = mmc_runtime_suspend, 171 SET_RUNTIME_PM_OPS(mmc_runtime_suspend, mmc_runtime_resume,
170 .runtime_resume = mmc_runtime_resume, 172 mmc_runtime_idle)
171 .runtime_idle = mmc_runtime_idle, 173 SET_SYSTEM_SLEEP_PM_OPS(mmc_bus_suspend, mmc_bus_resume)
172}; 174};
173 175
174#define MMC_PM_OPS_PTR (&mmc_bus_pm_ops)
175
176#else /* !CONFIG_PM_RUNTIME */
177
178#define MMC_PM_OPS_PTR NULL
179
180#endif /* !CONFIG_PM_RUNTIME */
181
182static struct bus_type mmc_bus_type = { 176static struct bus_type mmc_bus_type = {
183 .name = "mmc", 177 .name = "mmc",
184 .dev_attrs = mmc_dev_attrs, 178 .dev_attrs = mmc_dev_attrs,
@@ -186,9 +180,7 @@ static struct bus_type mmc_bus_type = {
186 .uevent = mmc_bus_uevent, 180 .uevent = mmc_bus_uevent,
187 .probe = mmc_bus_probe, 181 .probe = mmc_bus_probe,
188 .remove = mmc_bus_remove, 182 .remove = mmc_bus_remove,
189 .suspend = mmc_bus_suspend, 183 .pm = &mmc_bus_pm_ops,
190 .resume = mmc_bus_resume,
191 .pm = MMC_PM_OPS_PTR,
192}; 184};
193 185
194int mmc_register_bus(void) 186int mmc_register_bus(void)
diff --git a/drivers/mmc/core/cd-gpio.c b/drivers/mmc/core/cd-gpio.c
index 29de31e260dd..2c14be73254c 100644
--- a/drivers/mmc/core/cd-gpio.c
+++ b/drivers/mmc/core/cd-gpio.c
@@ -12,6 +12,7 @@
12#include <linux/gpio.h> 12#include <linux/gpio.h>
13#include <linux/interrupt.h> 13#include <linux/interrupt.h>
14#include <linux/jiffies.h> 14#include <linux/jiffies.h>
15#include <linux/mmc/cd-gpio.h>
15#include <linux/mmc/host.h> 16#include <linux/mmc/host.h>
16#include <linux/module.h> 17#include <linux/module.h>
17#include <linux/slab.h> 18#include <linux/slab.h>
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 7474c47b9c08..ba821fe70bca 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -1409,7 +1409,10 @@ static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
1409{ 1409{
1410 unsigned int erase_timeout; 1410 unsigned int erase_timeout;
1411 1411
1412 if (card->ext_csd.erase_group_def & 1) { 1412 if (arg == MMC_DISCARD_ARG ||
1413 (arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) {
1414 erase_timeout = card->ext_csd.trim_timeout;
1415 } else if (card->ext_csd.erase_group_def & 1) {
1413 /* High Capacity Erase Group Size uses HC timeouts */ 1416 /* High Capacity Erase Group Size uses HC timeouts */
1414 if (arg == MMC_TRIM_ARG) 1417 if (arg == MMC_TRIM_ARG)
1415 erase_timeout = card->ext_csd.trim_timeout; 1418 erase_timeout = card->ext_csd.trim_timeout;
@@ -1681,8 +1684,6 @@ int mmc_can_trim(struct mmc_card *card)
1681{ 1684{
1682 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN) 1685 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN)
1683 return 1; 1686 return 1;
1684 if (mmc_can_discard(card))
1685 return 1;
1686 return 0; 1687 return 0;
1687} 1688}
1688EXPORT_SYMBOL(mmc_can_trim); 1689EXPORT_SYMBOL(mmc_can_trim);
@@ -1701,6 +1702,8 @@ EXPORT_SYMBOL(mmc_can_discard);
1701 1702
1702int mmc_can_sanitize(struct mmc_card *card) 1703int mmc_can_sanitize(struct mmc_card *card)
1703{ 1704{
1705 if (!mmc_can_trim(card) && !mmc_can_erase(card))
1706 return 0;
1704 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE) 1707 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE)
1705 return 1; 1708 return 1;
1706 return 0; 1709 return 0;
@@ -2235,6 +2238,7 @@ int mmc_cache_ctrl(struct mmc_host *host, u8 enable)
2235 mmc_card_is_removable(host)) 2238 mmc_card_is_removable(host))
2236 return err; 2239 return err;
2237 2240
2241 mmc_claim_host(host);
2238 if (card && mmc_card_mmc(card) && 2242 if (card && mmc_card_mmc(card) &&
2239 (card->ext_csd.cache_size > 0)) { 2243 (card->ext_csd.cache_size > 0)) {
2240 enable = !!enable; 2244 enable = !!enable;
@@ -2252,6 +2256,7 @@ int mmc_cache_ctrl(struct mmc_host *host, u8 enable)
2252 card->ext_csd.cache_ctrl = enable; 2256 card->ext_csd.cache_ctrl = enable;
2253 } 2257 }
2254 } 2258 }
2259 mmc_release_host(host);
2255 2260
2256 return err; 2261 return err;
2257} 2262}
@@ -2269,49 +2274,32 @@ int mmc_suspend_host(struct mmc_host *host)
2269 2274
2270 cancel_delayed_work(&host->detect); 2275 cancel_delayed_work(&host->detect);
2271 mmc_flush_scheduled_work(); 2276 mmc_flush_scheduled_work();
2272 if (mmc_try_claim_host(host)) {
2273 err = mmc_cache_ctrl(host, 0);
2274 mmc_release_host(host);
2275 } else {
2276 err = -EBUSY;
2277 }
2278 2277
2278 err = mmc_cache_ctrl(host, 0);
2279 if (err) 2279 if (err)
2280 goto out; 2280 goto out;
2281 2281
2282 mmc_bus_get(host); 2282 mmc_bus_get(host);
2283 if (host->bus_ops && !host->bus_dead) { 2283 if (host->bus_ops && !host->bus_dead) {
2284 2284
2285 /* 2285 if (host->bus_ops->suspend)
2286 * A long response time is not acceptable for device drivers 2286 err = host->bus_ops->suspend(host);
2287 * when doing suspend. Prevent mmc_claim_host in the suspend
2288 * sequence, to potentially wait "forever" by trying to
2289 * pre-claim the host.
2290 */
2291 if (mmc_try_claim_host(host)) {
2292 if (host->bus_ops->suspend) {
2293 err = host->bus_ops->suspend(host);
2294 }
2295 mmc_release_host(host);
2296 2287
2297 if (err == -ENOSYS || !host->bus_ops->resume) { 2288 if (err == -ENOSYS || !host->bus_ops->resume) {
2298 /* 2289 /*
2299 * We simply "remove" the card in this case. 2290 * We simply "remove" the card in this case.
2300 * It will be redetected on resume. (Calling 2291 * It will be redetected on resume. (Calling
2301 * bus_ops->remove() with a claimed host can 2292 * bus_ops->remove() with a claimed host can
2302 * deadlock.) 2293 * deadlock.)
2303 */ 2294 */
2304 if (host->bus_ops->remove) 2295 if (host->bus_ops->remove)
2305 host->bus_ops->remove(host); 2296 host->bus_ops->remove(host);
2306 mmc_claim_host(host); 2297 mmc_claim_host(host);
2307 mmc_detach_bus(host); 2298 mmc_detach_bus(host);
2308 mmc_power_off(host); 2299 mmc_power_off(host);
2309 mmc_release_host(host); 2300 mmc_release_host(host);
2310 host->pm_flags = 0; 2301 host->pm_flags = 0;
2311 err = 0; 2302 err = 0;
2312 }
2313 } else {
2314 err = -EBUSY;
2315 } 2303 }
2316 } 2304 }
2317 mmc_bus_put(host); 2305 mmc_bus_put(host);
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index bf3c9b456aaf..ab3fc4617107 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -526,8 +526,10 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
526 return -ENODEV; 526 return -ENODEV;
527 527
528 sg_len = dw_mci_pre_dma_transfer(host, data, 0); 528 sg_len = dw_mci_pre_dma_transfer(host, data, 0);
529 if (sg_len < 0) 529 if (sg_len < 0) {
530 host->dma_ops->stop(host);
530 return sg_len; 531 return sg_len;
532 }
531 533
532 host->using_dma = 1; 534 host->using_dma = 1;
533 535
@@ -1879,7 +1881,8 @@ static void dw_mci_init_dma(struct dw_mci *host)
1879 if (!host->dma_ops) 1881 if (!host->dma_ops)
1880 goto no_dma; 1882 goto no_dma;
1881 1883
1882 if (host->dma_ops->init) { 1884 if (host->dma_ops->init && host->dma_ops->start &&
1885 host->dma_ops->stop && host->dma_ops->cleanup) {
1883 if (host->dma_ops->init(host)) { 1886 if (host->dma_ops->init(host)) {
1884 dev_err(&host->dev, "%s: Unable to initialize " 1887 dev_err(&host->dev, "%s: Unable to initialize "
1885 "DMA Controller.\n", __func__); 1888 "DMA Controller.\n", __func__);
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index b0f2ef988188..e3f5af96ab87 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -363,6 +363,7 @@ static void mxs_mmc_bc(struct mxs_mmc_host *host)
363 goto out; 363 goto out;
364 364
365 dmaengine_submit(desc); 365 dmaengine_submit(desc);
366 dma_async_issue_pending(host->dmach);
366 return; 367 return;
367 368
368out: 369out:
@@ -403,6 +404,7 @@ static void mxs_mmc_ac(struct mxs_mmc_host *host)
403 goto out; 404 goto out;
404 405
405 dmaengine_submit(desc); 406 dmaengine_submit(desc);
407 dma_async_issue_pending(host->dmach);
406 return; 408 return;
407 409
408out: 410out:
@@ -531,6 +533,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
531 goto out; 533 goto out;
532 534
533 dmaengine_submit(desc); 535 dmaengine_submit(desc);
536 dma_async_issue_pending(host->dmach);
534 return; 537 return;
535out: 538out:
536 dev_warn(mmc_dev(host->mmc), 539 dev_warn(mmc_dev(host->mmc),
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 5c2b1c10af9c..56d4499d4388 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -249,7 +249,7 @@ static int omap_hsmmc_set_power(struct device *dev, int slot, int power_on,
249 * the pbias cell programming support is still missing when 249 * the pbias cell programming support is still missing when
250 * booting with Device tree 250 * booting with Device tree
251 */ 251 */
252 if (of_have_populated_dt() && !vdd) 252 if (dev->of_node && !vdd)
253 return 0; 253 return 0;
254 254
255 if (mmc_slot(host).before_set_reg) 255 if (mmc_slot(host).before_set_reg)
@@ -1549,7 +1549,7 @@ static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1549 * can't be allowed when booting with device 1549 * can't be allowed when booting with device
1550 * tree. 1550 * tree.
1551 */ 1551 */
1552 (!of_have_populated_dt())) { 1552 !host->dev->of_node) {
1553 /* 1553 /*
1554 * The mmc_select_voltage fn of the core does 1554 * The mmc_select_voltage fn of the core does
1555 * not seem to set the power_mode to 1555 * not seem to set the power_mode to
@@ -1741,7 +1741,7 @@ static const struct of_device_id omap_mmc_of_match[] = {
1741 .data = &omap4_reg_offset, 1741 .data = &omap4_reg_offset,
1742 }, 1742 },
1743 {}, 1743 {},
1744} 1744};
1745MODULE_DEVICE_TABLE(of, omap_mmc_of_match); 1745MODULE_DEVICE_TABLE(of, omap_mmc_of_match);
1746 1746
1747static struct omap_mmc_platform_data *of_get_hsmmc_pdata(struct device *dev) 1747static struct omap_mmc_platform_data *of_get_hsmmc_pdata(struct device *dev)
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 6193a0d7bde5..8abdaf6697a8 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -467,8 +467,7 @@ static int __devinit sdhci_esdhc_imx_probe(struct platform_device *pdev)
467 clk_prepare_enable(clk); 467 clk_prepare_enable(clk);
468 pltfm_host->clk = clk; 468 pltfm_host->clk = clk;
469 469
470 if (!is_imx25_esdhc(imx_data)) 470 host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
471 host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
472 471
473 if (is_imx25_esdhc(imx_data) || is_imx35_esdhc(imx_data)) 472 if (is_imx25_esdhc(imx_data) || is_imx35_esdhc(imx_data))
474 /* Fix errata ENGcm07207 present on i.MX25 and i.MX35 */ 473 /* Fix errata ENGcm07207 present on i.MX25 and i.MX35 */
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 9aa77f3f04a8..ccefdebeff14 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -147,7 +147,7 @@ static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
147 u32 present, irqs; 147 u32 present, irqs;
148 148
149 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) || 149 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
150 !mmc_card_is_removable(host->mmc)) 150 (host->mmc->caps & MMC_CAP_NONREMOVABLE))
151 return; 151 return;
152 152
153 present = sdhci_readl(host, SDHCI_PRESENT_STATE) & 153 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index 75b1dde16358..9ec51cec2e14 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -266,6 +266,7 @@ int start_dma_without_bch_irq(struct gpmi_nand_data *this,
266 desc->callback = dma_irq_callback; 266 desc->callback = dma_irq_callback;
267 desc->callback_param = this; 267 desc->callback_param = this;
268 dmaengine_submit(desc); 268 dmaengine_submit(desc);
269 dma_async_issue_pending(get_dma_chan(this));
269 270
270 /* Wait for the interrupt from the DMA block. */ 271 /* Wait for the interrupt from the DMA block. */
271 err = wait_for_completion_timeout(dma_c, msecs_to_jiffies(1000)); 272 err = wait_for_completion_timeout(dma_c, msecs_to_jiffies(1000));
diff --git a/drivers/net/arcnet/arc-rimi.c b/drivers/net/arcnet/arc-rimi.c
index 25197b698dd6..b8b4c7ba884f 100644
--- a/drivers/net/arcnet/arc-rimi.c
+++ b/drivers/net/arcnet/arc-rimi.c
@@ -89,16 +89,16 @@ static int __init arcrimi_probe(struct net_device *dev)
89 BUGLVL(D_NORMAL) printk(VERSION); 89 BUGLVL(D_NORMAL) printk(VERSION);
90 BUGLVL(D_NORMAL) printk("E-mail me if you actually test the RIM I driver, please!\n"); 90 BUGLVL(D_NORMAL) printk("E-mail me if you actually test the RIM I driver, please!\n");
91 91
92 BUGMSG(D_NORMAL, "Given: node %02Xh, shmem %lXh, irq %d\n", 92 BUGLVL(D_NORMAL) printk("Given: node %02Xh, shmem %lXh, irq %d\n",
93 dev->dev_addr[0], dev->mem_start, dev->irq); 93 dev->dev_addr[0], dev->mem_start, dev->irq);
94 94
95 if (dev->mem_start <= 0 || dev->irq <= 0) { 95 if (dev->mem_start <= 0 || dev->irq <= 0) {
96 BUGMSG(D_NORMAL, "No autoprobe for RIM I; you " 96 BUGLVL(D_NORMAL) printk("No autoprobe for RIM I; you "
97 "must specify the shmem and irq!\n"); 97 "must specify the shmem and irq!\n");
98 return -ENODEV; 98 return -ENODEV;
99 } 99 }
100 if (dev->dev_addr[0] == 0) { 100 if (dev->dev_addr[0] == 0) {
101 BUGMSG(D_NORMAL, "You need to specify your card's station " 101 BUGLVL(D_NORMAL) printk("You need to specify your card's station "
102 "ID!\n"); 102 "ID!\n");
103 return -ENODEV; 103 return -ENODEV;
104 } 104 }
@@ -109,7 +109,7 @@ static int __init arcrimi_probe(struct net_device *dev)
109 * will be taken. 109 * will be taken.
110 */ 110 */
111 if (!request_mem_region(dev->mem_start, MIRROR_SIZE, "arcnet (90xx)")) { 111 if (!request_mem_region(dev->mem_start, MIRROR_SIZE, "arcnet (90xx)")) {
112 BUGMSG(D_NORMAL, "Card memory already allocated\n"); 112 BUGLVL(D_NORMAL) printk("Card memory already allocated\n");
113 return -ENODEV; 113 return -ENODEV;
114 } 114 }
115 return arcrimi_found(dev); 115 return arcrimi_found(dev);
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 793b00138275..3463b469e657 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -2173,9 +2173,10 @@ re_arm:
2173 * received frames (loopback). Since only the payload is given to this 2173 * received frames (loopback). Since only the payload is given to this
2174 * function, it check for loopback. 2174 * function, it check for loopback.
2175 */ 2175 */
2176static void bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u16 length) 2176static int bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u16 length)
2177{ 2177{
2178 struct port *port; 2178 struct port *port;
2179 int ret = RX_HANDLER_ANOTHER;
2179 2180
2180 if (length >= sizeof(struct lacpdu)) { 2181 if (length >= sizeof(struct lacpdu)) {
2181 2182
@@ -2184,11 +2185,12 @@ static void bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u
2184 if (!port->slave) { 2185 if (!port->slave) {
2185 pr_warning("%s: Warning: port of slave %s is uninitialized\n", 2186 pr_warning("%s: Warning: port of slave %s is uninitialized\n",
2186 slave->dev->name, slave->dev->master->name); 2187 slave->dev->name, slave->dev->master->name);
2187 return; 2188 return ret;
2188 } 2189 }
2189 2190
2190 switch (lacpdu->subtype) { 2191 switch (lacpdu->subtype) {
2191 case AD_TYPE_LACPDU: 2192 case AD_TYPE_LACPDU:
2193 ret = RX_HANDLER_CONSUMED;
2192 pr_debug("Received LACPDU on port %d\n", 2194 pr_debug("Received LACPDU on port %d\n",
2193 port->actor_port_number); 2195 port->actor_port_number);
2194 /* Protect against concurrent state machines */ 2196 /* Protect against concurrent state machines */
@@ -2198,6 +2200,7 @@ static void bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u
2198 break; 2200 break;
2199 2201
2200 case AD_TYPE_MARKER: 2202 case AD_TYPE_MARKER:
2203 ret = RX_HANDLER_CONSUMED;
2201 // No need to convert fields to Little Endian since we don't use the marker's fields. 2204 // No need to convert fields to Little Endian since we don't use the marker's fields.
2202 2205
2203 switch (((struct bond_marker *)lacpdu)->tlv_type) { 2206 switch (((struct bond_marker *)lacpdu)->tlv_type) {
@@ -2219,6 +2222,7 @@ static void bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u
2219 } 2222 }
2220 } 2223 }
2221 } 2224 }
2225 return ret;
2222} 2226}
2223 2227
2224/** 2228/**
@@ -2456,18 +2460,20 @@ out:
2456 return NETDEV_TX_OK; 2460 return NETDEV_TX_OK;
2457} 2461}
2458 2462
2459void bond_3ad_lacpdu_recv(struct sk_buff *skb, struct bonding *bond, 2463int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct bonding *bond,
2460 struct slave *slave) 2464 struct slave *slave)
2461{ 2465{
2466 int ret = RX_HANDLER_ANOTHER;
2462 if (skb->protocol != PKT_TYPE_LACPDU) 2467 if (skb->protocol != PKT_TYPE_LACPDU)
2463 return; 2468 return ret;
2464 2469
2465 if (!pskb_may_pull(skb, sizeof(struct lacpdu))) 2470 if (!pskb_may_pull(skb, sizeof(struct lacpdu)))
2466 return; 2471 return ret;
2467 2472
2468 read_lock(&bond->lock); 2473 read_lock(&bond->lock);
2469 bond_3ad_rx_indication((struct lacpdu *) skb->data, slave, skb->len); 2474 ret = bond_3ad_rx_indication((struct lacpdu *) skb->data, slave, skb->len);
2470 read_unlock(&bond->lock); 2475 read_unlock(&bond->lock);
2476 return ret;
2471} 2477}
2472 2478
2473/* 2479/*
diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h
index 235b2cc58b28..5ee7e3c45db7 100644
--- a/drivers/net/bonding/bond_3ad.h
+++ b/drivers/net/bonding/bond_3ad.h
@@ -274,7 +274,7 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave);
274void bond_3ad_handle_link_change(struct slave *slave, char link); 274void bond_3ad_handle_link_change(struct slave *slave, char link);
275int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info); 275int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info);
276int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev); 276int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev);
277void bond_3ad_lacpdu_recv(struct sk_buff *skb, struct bonding *bond, 277int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct bonding *bond,
278 struct slave *slave); 278 struct slave *slave);
279int bond_3ad_set_carrier(struct bonding *bond); 279int bond_3ad_set_carrier(struct bonding *bond);
280void bond_3ad_update_lacp_rate(struct bonding *bond); 280void bond_3ad_update_lacp_rate(struct bonding *bond);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 62d2409bb293..bc13b3d77432 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1444,8 +1444,9 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
1444 struct sk_buff *skb = *pskb; 1444 struct sk_buff *skb = *pskb;
1445 struct slave *slave; 1445 struct slave *slave;
1446 struct bonding *bond; 1446 struct bonding *bond;
1447 void (*recv_probe)(struct sk_buff *, struct bonding *, 1447 int (*recv_probe)(struct sk_buff *, struct bonding *,
1448 struct slave *); 1448 struct slave *);
1449 int ret = RX_HANDLER_ANOTHER;
1449 1450
1450 skb = skb_share_check(skb, GFP_ATOMIC); 1451 skb = skb_share_check(skb, GFP_ATOMIC);
1451 if (unlikely(!skb)) 1452 if (unlikely(!skb))
@@ -1464,8 +1465,12 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
1464 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); 1465 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
1465 1466
1466 if (likely(nskb)) { 1467 if (likely(nskb)) {
1467 recv_probe(nskb, bond, slave); 1468 ret = recv_probe(nskb, bond, slave);
1468 dev_kfree_skb(nskb); 1469 dev_kfree_skb(nskb);
1470 if (ret == RX_HANDLER_CONSUMED) {
1471 consume_skb(skb);
1472 return ret;
1473 }
1469 } 1474 }
1470 } 1475 }
1471 1476
@@ -1487,7 +1492,7 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
1487 memcpy(eth_hdr(skb)->h_dest, bond->dev->dev_addr, ETH_ALEN); 1492 memcpy(eth_hdr(skb)->h_dest, bond->dev->dev_addr, ETH_ALEN);
1488 } 1493 }
1489 1494
1490 return RX_HANDLER_ANOTHER; 1495 return ret;
1491} 1496}
1492 1497
1493/* enslave device <slave> to bond device <master> */ 1498/* enslave device <slave> to bond device <master> */
@@ -2723,7 +2728,7 @@ static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32
2723 } 2728 }
2724} 2729}
2725 2730
2726static void bond_arp_rcv(struct sk_buff *skb, struct bonding *bond, 2731static int bond_arp_rcv(struct sk_buff *skb, struct bonding *bond,
2727 struct slave *slave) 2732 struct slave *slave)
2728{ 2733{
2729 struct arphdr *arp; 2734 struct arphdr *arp;
@@ -2731,7 +2736,7 @@ static void bond_arp_rcv(struct sk_buff *skb, struct bonding *bond,
2731 __be32 sip, tip; 2736 __be32 sip, tip;
2732 2737
2733 if (skb->protocol != __cpu_to_be16(ETH_P_ARP)) 2738 if (skb->protocol != __cpu_to_be16(ETH_P_ARP))
2734 return; 2739 return RX_HANDLER_ANOTHER;
2735 2740
2736 read_lock(&bond->lock); 2741 read_lock(&bond->lock);
2737 2742
@@ -2776,6 +2781,7 @@ static void bond_arp_rcv(struct sk_buff *skb, struct bonding *bond,
2776 2781
2777out_unlock: 2782out_unlock:
2778 read_unlock(&bond->lock); 2783 read_unlock(&bond->lock);
2784 return RX_HANDLER_ANOTHER;
2779} 2785}
2780 2786
2781/* 2787/*
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
index 9a66e2a910ae..9c1c8cd5223f 100644
--- a/drivers/net/caif/caif_hsi.c
+++ b/drivers/net/caif/caif_hsi.c
@@ -744,14 +744,14 @@ static void cfhsi_wake_up(struct work_struct *work)
744 size_t fifo_occupancy = 0; 744 size_t fifo_occupancy = 0;
745 745
746 /* Wakeup timeout */ 746 /* Wakeup timeout */
747 dev_err(&cfhsi->ndev->dev, "%s: Timeout.\n", 747 dev_dbg(&cfhsi->ndev->dev, "%s: Timeout.\n",
748 __func__); 748 __func__);
749 749
750 /* Check FIFO to check if modem has sent something. */ 750 /* Check FIFO to check if modem has sent something. */
751 WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev, 751 WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
752 &fifo_occupancy)); 752 &fifo_occupancy));
753 753
754 dev_err(&cfhsi->ndev->dev, "%s: Bytes in FIFO: %u.\n", 754 dev_dbg(&cfhsi->ndev->dev, "%s: Bytes in FIFO: %u.\n",
755 __func__, (unsigned) fifo_occupancy); 755 __func__, (unsigned) fifo_occupancy);
756 756
757 /* Check if we misssed the interrupt. */ 757 /* Check if we misssed the interrupt. */
@@ -1210,7 +1210,7 @@ int cfhsi_probe(struct platform_device *pdev)
1210 1210
1211static void cfhsi_shutdown(struct cfhsi *cfhsi) 1211static void cfhsi_shutdown(struct cfhsi *cfhsi)
1212{ 1212{
1213 u8 *tx_buf, *rx_buf; 1213 u8 *tx_buf, *rx_buf, *flip_buf;
1214 1214
1215 /* Stop TXing */ 1215 /* Stop TXing */
1216 netif_tx_stop_all_queues(cfhsi->ndev); 1216 netif_tx_stop_all_queues(cfhsi->ndev);
@@ -1234,7 +1234,7 @@ static void cfhsi_shutdown(struct cfhsi *cfhsi)
1234 /* Store bufferes: will be freed later. */ 1234 /* Store bufferes: will be freed later. */
1235 tx_buf = cfhsi->tx_buf; 1235 tx_buf = cfhsi->tx_buf;
1236 rx_buf = cfhsi->rx_buf; 1236 rx_buf = cfhsi->rx_buf;
1237 1237 flip_buf = cfhsi->rx_flip_buf;
1238 /* Flush transmit queues. */ 1238 /* Flush transmit queues. */
1239 cfhsi_abort_tx(cfhsi); 1239 cfhsi_abort_tx(cfhsi);
1240 1240
@@ -1247,6 +1247,7 @@ static void cfhsi_shutdown(struct cfhsi *cfhsi)
1247 /* Free buffers. */ 1247 /* Free buffers. */
1248 kfree(tx_buf); 1248 kfree(tx_buf);
1249 kfree(rx_buf); 1249 kfree(rx_buf);
1250 kfree(flip_buf);
1250} 1251}
1251 1252
1252int cfhsi_remove(struct platform_device *pdev) 1253int cfhsi_remove(struct platform_device *pdev)
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index 5234586dff15..629c4ba5d49d 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -875,6 +875,7 @@ static int pcan_usb_pro_init(struct peak_usb_device *dev)
875 PCAN_USBPRO_INFO_FW, 875 PCAN_USBPRO_INFO_FW,
876 &fi, sizeof(fi)); 876 &fi, sizeof(fi));
877 if (err) { 877 if (err) {
878 kfree(usb_if);
878 dev_err(dev->netdev->dev.parent, 879 dev_err(dev->netdev->dev.parent,
879 "unable to read %s firmware info (err %d)\n", 880 "unable to read %s firmware info (err %d)\n",
880 pcan_usb_pro.name, err); 881 pcan_usb_pro.name, err);
@@ -885,6 +886,7 @@ static int pcan_usb_pro_init(struct peak_usb_device *dev)
885 PCAN_USBPRO_INFO_BL, 886 PCAN_USBPRO_INFO_BL,
886 &bi, sizeof(bi)); 887 &bi, sizeof(bi));
887 if (err) { 888 if (err) {
889 kfree(usb_if);
888 dev_err(dev->netdev->dev.parent, 890 dev_err(dev->netdev->dev.parent,
889 "unable to read %s bootloader info (err %d)\n", 891 "unable to read %s bootloader info (err %d)\n",
890 pcan_usb_pro.name, err); 892 pcan_usb_pro.name, err);
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index d5c6d92f1ee7..442d91a2747b 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -107,14 +107,14 @@ static int dummy_dev_init(struct net_device *dev)
107 return 0; 107 return 0;
108} 108}
109 109
110static void dummy_dev_free(struct net_device *dev) 110static void dummy_dev_uninit(struct net_device *dev)
111{ 111{
112 free_percpu(dev->dstats); 112 free_percpu(dev->dstats);
113 free_netdev(dev);
114} 113}
115 114
116static const struct net_device_ops dummy_netdev_ops = { 115static const struct net_device_ops dummy_netdev_ops = {
117 .ndo_init = dummy_dev_init, 116 .ndo_init = dummy_dev_init,
117 .ndo_uninit = dummy_dev_uninit,
118 .ndo_start_xmit = dummy_xmit, 118 .ndo_start_xmit = dummy_xmit,
119 .ndo_validate_addr = eth_validate_addr, 119 .ndo_validate_addr = eth_validate_addr,
120 .ndo_set_rx_mode = set_multicast_list, 120 .ndo_set_rx_mode = set_multicast_list,
@@ -128,7 +128,7 @@ static void dummy_setup(struct net_device *dev)
128 128
129 /* Initialize the device structure. */ 129 /* Initialize the device structure. */
130 dev->netdev_ops = &dummy_netdev_ops; 130 dev->netdev_ops = &dummy_netdev_ops;
131 dev->destructor = dummy_dev_free; 131 dev->destructor = free_netdev;
132 132
133 /* Fill in device structure with ethernet-generic values. */ 133 /* Fill in device structure with ethernet-generic values. */
134 dev->tx_queue_len = 0; 134 dev->tx_queue_len = 0;
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 40ac41436549..c926857e8205 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -2476,7 +2476,7 @@ static irqreturn_t atl1_intr(int irq, void *data)
2476 "pcie phy link down %x\n", status); 2476 "pcie phy link down %x\n", status);
2477 if (netif_running(adapter->netdev)) { /* reset MAC */ 2477 if (netif_running(adapter->netdev)) { /* reset MAC */
2478 iowrite32(0, adapter->hw.hw_addr + REG_IMR); 2478 iowrite32(0, adapter->hw.hw_addr + REG_IMR);
2479 schedule_work(&adapter->pcie_dma_to_rst_task); 2479 schedule_work(&adapter->reset_dev_task);
2480 return IRQ_HANDLED; 2480 return IRQ_HANDLED;
2481 } 2481 }
2482 } 2482 }
@@ -2488,7 +2488,7 @@ static irqreturn_t atl1_intr(int irq, void *data)
2488 "pcie DMA r/w error (status = 0x%x)\n", 2488 "pcie DMA r/w error (status = 0x%x)\n",
2489 status); 2489 status);
2490 iowrite32(0, adapter->hw.hw_addr + REG_IMR); 2490 iowrite32(0, adapter->hw.hw_addr + REG_IMR);
2491 schedule_work(&adapter->pcie_dma_to_rst_task); 2491 schedule_work(&adapter->reset_dev_task);
2492 return IRQ_HANDLED; 2492 return IRQ_HANDLED;
2493 } 2493 }
2494 2494
@@ -2633,10 +2633,10 @@ static void atl1_down(struct atl1_adapter *adapter)
2633 atl1_clean_rx_ring(adapter); 2633 atl1_clean_rx_ring(adapter);
2634} 2634}
2635 2635
2636static void atl1_tx_timeout_task(struct work_struct *work) 2636static void atl1_reset_dev_task(struct work_struct *work)
2637{ 2637{
2638 struct atl1_adapter *adapter = 2638 struct atl1_adapter *adapter =
2639 container_of(work, struct atl1_adapter, tx_timeout_task); 2639 container_of(work, struct atl1_adapter, reset_dev_task);
2640 struct net_device *netdev = adapter->netdev; 2640 struct net_device *netdev = adapter->netdev;
2641 2641
2642 netif_device_detach(netdev); 2642 netif_device_detach(netdev);
@@ -3038,12 +3038,10 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
3038 (unsigned long)adapter); 3038 (unsigned long)adapter);
3039 adapter->phy_timer_pending = false; 3039 adapter->phy_timer_pending = false;
3040 3040
3041 INIT_WORK(&adapter->tx_timeout_task, atl1_tx_timeout_task); 3041 INIT_WORK(&adapter->reset_dev_task, atl1_reset_dev_task);
3042 3042
3043 INIT_WORK(&adapter->link_chg_task, atlx_link_chg_task); 3043 INIT_WORK(&adapter->link_chg_task, atlx_link_chg_task);
3044 3044
3045 INIT_WORK(&adapter->pcie_dma_to_rst_task, atl1_tx_timeout_task);
3046
3047 err = register_netdev(netdev); 3045 err = register_netdev(netdev);
3048 if (err) 3046 if (err)
3049 goto err_common; 3047 goto err_common;
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.h b/drivers/net/ethernet/atheros/atlx/atl1.h
index 109d6da8be97..e04bf4d71e46 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.h
+++ b/drivers/net/ethernet/atheros/atlx/atl1.h
@@ -758,9 +758,8 @@ struct atl1_adapter {
758 u16 link_speed; 758 u16 link_speed;
759 u16 link_duplex; 759 u16 link_duplex;
760 spinlock_t lock; 760 spinlock_t lock;
761 struct work_struct tx_timeout_task; 761 struct work_struct reset_dev_task;
762 struct work_struct link_chg_task; 762 struct work_struct link_chg_task;
763 struct work_struct pcie_dma_to_rst_task;
764 763
765 struct timer_list phy_config_timer; 764 struct timer_list phy_config_timer;
766 bool phy_timer_pending; 765 bool phy_timer_pending;
diff --git a/drivers/net/ethernet/atheros/atlx/atlx.c b/drivers/net/ethernet/atheros/atlx/atlx.c
index 3cd8837236dc..c9e9dc57986c 100644
--- a/drivers/net/ethernet/atheros/atlx/atlx.c
+++ b/drivers/net/ethernet/atheros/atlx/atlx.c
@@ -194,7 +194,7 @@ static void atlx_tx_timeout(struct net_device *netdev)
194{ 194{
195 struct atlx_adapter *adapter = netdev_priv(netdev); 195 struct atlx_adapter *adapter = netdev_priv(netdev);
196 /* Do the reset outside of interrupt context */ 196 /* Do the reset outside of interrupt context */
197 schedule_work(&adapter->tx_timeout_task); 197 schedule_work(&adapter->reset_dev_task);
198} 198}
199 199
200/* 200/*
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index ad95324dc042..64392ec410a3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -942,6 +942,12 @@ static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params,
942 const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 : 942 const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 :
943 DCBX_E3B0_MAX_NUM_COS_PORT0; 943 DCBX_E3B0_MAX_NUM_COS_PORT0;
944 944
945 if (pri >= max_num_of_cos) {
946 DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid "
947 "parameter Illegal strict priority\n");
948 return -EINVAL;
949 }
950
945 if (sp_pri_to_cos[pri] != DCBX_INVALID_COS) { 951 if (sp_pri_to_cos[pri] != DCBX_INVALID_COS) {
946 DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid " 952 DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid "
947 "parameter There can't be two COS's with " 953 "parameter There can't be two COS's with "
@@ -949,12 +955,6 @@ static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params,
949 return -EINVAL; 955 return -EINVAL;
950 } 956 }
951 957
952 if (pri > max_num_of_cos) {
953 DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid "
954 "parameter Illegal strict priority\n");
955 return -EINVAL;
956 }
957
958 sp_pri_to_cos[pri] = cos_entry; 958 sp_pri_to_cos[pri] = cos_entry;
959 return 0; 959 return 0;
960 960
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index e077d2508727..6af310195bae 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -9122,13 +9122,34 @@ static int __devinit bnx2x_prev_unload_common(struct bnx2x *bp)
9122 return bnx2x_prev_mcp_done(bp); 9122 return bnx2x_prev_mcp_done(bp);
9123} 9123}
9124 9124
9125/* previous driver DMAE transaction may have occurred when pre-boot stage ended
9126 * and boot began, or when kdump kernel was loaded. Either case would invalidate
9127 * the addresses of the transaction, resulting in was-error bit set in the pci
9128 * causing all hw-to-host pcie transactions to timeout. If this happened we want
9129 * to clear the interrupt which detected this from the pglueb and the was done
9130 * bit
9131 */
9132static void __devinit bnx2x_prev_interrupted_dmae(struct bnx2x *bp)
9133{
9134 u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS);
9135 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
9136 BNX2X_ERR("was error bit was found to be set in pglueb upon startup. Clearing");
9137 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << BP_FUNC(bp));
9138 }
9139}
9140
9125static int __devinit bnx2x_prev_unload(struct bnx2x *bp) 9141static int __devinit bnx2x_prev_unload(struct bnx2x *bp)
9126{ 9142{
9127 int time_counter = 10; 9143 int time_counter = 10;
9128 u32 rc, fw, hw_lock_reg, hw_lock_val; 9144 u32 rc, fw, hw_lock_reg, hw_lock_val;
9129 BNX2X_DEV_INFO("Entering Previous Unload Flow\n"); 9145 BNX2X_DEV_INFO("Entering Previous Unload Flow\n");
9130 9146
9131 /* Release previously held locks */ 9147 /* clear hw from errors which may have resulted from an interrupted
9148 * dmae transaction.
9149 */
9150 bnx2x_prev_interrupted_dmae(bp);
9151
9152 /* Release previously held locks */
9132 hw_lock_reg = (BP_FUNC(bp) <= 5) ? 9153 hw_lock_reg = (BP_FUNC(bp) <= 5) ?
9133 (MISC_REG_DRIVER_CONTROL_1 + BP_FUNC(bp) * 8) : 9154 (MISC_REG_DRIVER_CONTROL_1 + BP_FUNC(bp) * 8) :
9134 (MISC_REG_DRIVER_CONTROL_7 + (BP_FUNC(bp) - 6) * 8); 9155 (MISC_REG_DRIVER_CONTROL_7 + (BP_FUNC(bp) - 6) * 8);
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 062ac333fde6..ceeab8e852ef 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -879,8 +879,13 @@ static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
879 if (sblk->status & SD_STATUS_LINK_CHG) 879 if (sblk->status & SD_STATUS_LINK_CHG)
880 work_exists = 1; 880 work_exists = 1;
881 } 881 }
882 /* check for RX/TX work to do */ 882
883 if (sblk->idx[0].tx_consumer != tnapi->tx_cons || 883 /* check for TX work to do */
884 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
885 work_exists = 1;
886
887 /* check for RX work to do */
888 if (tnapi->rx_rcb_prod_idx &&
884 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) 889 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
885 work_exists = 1; 890 work_exists = 1;
886 891
@@ -6124,6 +6129,9 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6124 return work_done; 6129 return work_done;
6125 } 6130 }
6126 6131
6132 if (!tnapi->rx_rcb_prod_idx)
6133 return work_done;
6134
6127 /* run RX thread, within the bounds set by NAPI. 6135 /* run RX thread, within the bounds set by NAPI.
6128 * All RX "locking" is done by ensuring outside 6136 * All RX "locking" is done by ensuring outside
6129 * code synchronizes with tg3->napi.poll() 6137 * code synchronizes with tg3->napi.poll()
@@ -7567,6 +7575,12 @@ static int tg3_alloc_consistent(struct tg3 *tp)
7567 */ 7575 */
7568 switch (i) { 7576 switch (i) {
7569 default: 7577 default:
7578 if (tg3_flag(tp, ENABLE_RSS)) {
7579 tnapi->rx_rcb_prod_idx = NULL;
7580 break;
7581 }
7582 /* Fall through */
7583 case 1:
7570 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer; 7584 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
7571 break; 7585 break;
7572 case 2: 7586 case 2:
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 63bfdd10bd6d..abb6ce7c1b7e 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -1150,6 +1150,48 @@ release_tpsram:
1150} 1150}
1151 1151
1152/** 1152/**
1153 * t3_synchronize_rx - wait for current Rx processing on a port to complete
1154 * @adap: the adapter
1155 * @p: the port
1156 *
1157 * Ensures that current Rx processing on any of the queues associated with
1158 * the given port completes before returning. We do this by acquiring and
1159 * releasing the locks of the response queues associated with the port.
1160 */
1161static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
1162{
1163 int i;
1164
1165 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
1166 struct sge_rspq *q = &adap->sge.qs[i].rspq;
1167
1168 spin_lock_irq(&q->lock);
1169 spin_unlock_irq(&q->lock);
1170 }
1171}
1172
1173static void cxgb_vlan_mode(struct net_device *dev, netdev_features_t features)
1174{
1175 struct port_info *pi = netdev_priv(dev);
1176 struct adapter *adapter = pi->adapter;
1177
1178 if (adapter->params.rev > 0) {
1179 t3_set_vlan_accel(adapter, 1 << pi->port_id,
1180 features & NETIF_F_HW_VLAN_RX);
1181 } else {
1182 /* single control for all ports */
1183 unsigned int i, have_vlans = features & NETIF_F_HW_VLAN_RX;
1184
1185 for_each_port(adapter, i)
1186 have_vlans |=
1187 adapter->port[i]->features & NETIF_F_HW_VLAN_RX;
1188
1189 t3_set_vlan_accel(adapter, 1, have_vlans);
1190 }
1191 t3_synchronize_rx(adapter, pi);
1192}
1193
1194/**
1153 * cxgb_up - enable the adapter 1195 * cxgb_up - enable the adapter
1154 * @adapter: adapter being enabled 1196 * @adapter: adapter being enabled
1155 * 1197 *
@@ -1161,7 +1203,7 @@ release_tpsram:
1161 */ 1203 */
1162static int cxgb_up(struct adapter *adap) 1204static int cxgb_up(struct adapter *adap)
1163{ 1205{
1164 int err; 1206 int i, err;
1165 1207
1166 if (!(adap->flags & FULL_INIT_DONE)) { 1208 if (!(adap->flags & FULL_INIT_DONE)) {
1167 err = t3_check_fw_version(adap); 1209 err = t3_check_fw_version(adap);
@@ -1198,6 +1240,9 @@ static int cxgb_up(struct adapter *adap)
1198 if (err) 1240 if (err)
1199 goto out; 1241 goto out;
1200 1242
1243 for_each_port(adap, i)
1244 cxgb_vlan_mode(adap->port[i], adap->port[i]->features);
1245
1201 setup_rss(adap); 1246 setup_rss(adap);
1202 if (!(adap->flags & NAPI_INIT)) 1247 if (!(adap->flags & NAPI_INIT))
1203 init_napi(adap); 1248 init_napi(adap);
@@ -2508,48 +2553,6 @@ static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2508 return 0; 2553 return 0;
2509} 2554}
2510 2555
2511/**
2512 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2513 * @adap: the adapter
2514 * @p: the port
2515 *
2516 * Ensures that current Rx processing on any of the queues associated with
2517 * the given port completes before returning. We do this by acquiring and
2518 * releasing the locks of the response queues associated with the port.
2519 */
2520static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2521{
2522 int i;
2523
2524 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
2525 struct sge_rspq *q = &adap->sge.qs[i].rspq;
2526
2527 spin_lock_irq(&q->lock);
2528 spin_unlock_irq(&q->lock);
2529 }
2530}
2531
2532static void cxgb_vlan_mode(struct net_device *dev, netdev_features_t features)
2533{
2534 struct port_info *pi = netdev_priv(dev);
2535 struct adapter *adapter = pi->adapter;
2536
2537 if (adapter->params.rev > 0) {
2538 t3_set_vlan_accel(adapter, 1 << pi->port_id,
2539 features & NETIF_F_HW_VLAN_RX);
2540 } else {
2541 /* single control for all ports */
2542 unsigned int i, have_vlans = features & NETIF_F_HW_VLAN_RX;
2543
2544 for_each_port(adapter, i)
2545 have_vlans |=
2546 adapter->port[i]->features & NETIF_F_HW_VLAN_RX;
2547
2548 t3_set_vlan_accel(adapter, 1, have_vlans);
2549 }
2550 t3_synchronize_rx(adapter, pi);
2551}
2552
2553static netdev_features_t cxgb_fix_features(struct net_device *dev, 2556static netdev_features_t cxgb_fix_features(struct net_device *dev,
2554 netdev_features_t features) 2557 netdev_features_t features)
2555{ 2558{
@@ -3353,9 +3356,6 @@ static int __devinit init_one(struct pci_dev *pdev,
3353 err = sysfs_create_group(&adapter->port[0]->dev.kobj, 3356 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
3354 &cxgb3_attr_group); 3357 &cxgb3_attr_group);
3355 3358
3356 for_each_port(adapter, i)
3357 cxgb_vlan_mode(adapter->port[i], adapter->port[i]->features);
3358
3359 print_port_info(adapter, ai); 3359 print_port_info(adapter, ai);
3360 return 0; 3360 return 0;
3361 3361
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index b2dc2c81a147..2e09edb9cdf8 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -1259,55 +1259,21 @@ rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
1259{ 1259{
1260 int phy_addr; 1260 int phy_addr;
1261 struct netdev_private *np = netdev_priv(dev); 1261 struct netdev_private *np = netdev_priv(dev);
1262 struct mii_data *miidata = (struct mii_data *) &rq->ifr_ifru; 1262 struct mii_ioctl_data *miidata = if_mii(rq);
1263
1264 struct netdev_desc *desc;
1265 int i;
1266 1263
1267 phy_addr = np->phy_addr; 1264 phy_addr = np->phy_addr;
1268 switch (cmd) { 1265 switch (cmd) {
1269 case SIOCDEVPRIVATE: 1266 case SIOCGMIIPHY:
1270 break; 1267 miidata->phy_id = phy_addr;
1271
1272 case SIOCDEVPRIVATE + 1:
1273 miidata->out_value = mii_read (dev, phy_addr, miidata->reg_num);
1274 break; 1268 break;
1275 case SIOCDEVPRIVATE + 2: 1269 case SIOCGMIIREG:
1276 mii_write (dev, phy_addr, miidata->reg_num, miidata->in_value); 1270 miidata->val_out = mii_read (dev, phy_addr, miidata->reg_num);
1277 break; 1271 break;
1278 case SIOCDEVPRIVATE + 3: 1272 case SIOCSMIIREG:
1279 break; 1273 if (!capable(CAP_NET_ADMIN))
1280 case SIOCDEVPRIVATE + 4: 1274 return -EPERM;
1281 break; 1275 mii_write (dev, phy_addr, miidata->reg_num, miidata->val_in);
1282 case SIOCDEVPRIVATE + 5:
1283 netif_stop_queue (dev);
1284 break; 1276 break;
1285 case SIOCDEVPRIVATE + 6:
1286 netif_wake_queue (dev);
1287 break;
1288 case SIOCDEVPRIVATE + 7:
1289 printk
1290 ("tx_full=%x cur_tx=%lx old_tx=%lx cur_rx=%lx old_rx=%lx\n",
1291 netif_queue_stopped(dev), np->cur_tx, np->old_tx, np->cur_rx,
1292 np->old_rx);
1293 break;
1294 case SIOCDEVPRIVATE + 8:
1295 printk("TX ring:\n");
1296 for (i = 0; i < TX_RING_SIZE; i++) {
1297 desc = &np->tx_ring[i];
1298 printk
1299 ("%02x:cur:%08x next:%08x status:%08x frag1:%08x frag0:%08x",
1300 i,
1301 (u32) (np->tx_ring_dma + i * sizeof (*desc)),
1302 (u32)le64_to_cpu(desc->next_desc),
1303 (u32)le64_to_cpu(desc->status),
1304 (u32)(le64_to_cpu(desc->fraginfo) >> 32),
1305 (u32)le64_to_cpu(desc->fraginfo));
1306 printk ("\n");
1307 }
1308 printk ("\n");
1309 break;
1310
1311 default: 1277 default:
1312 return -EOPNOTSUPP; 1278 return -EOPNOTSUPP;
1313 } 1279 }
diff --git a/drivers/net/ethernet/dlink/dl2k.h b/drivers/net/ethernet/dlink/dl2k.h
index ba0adcafa55a..30c2da3de548 100644
--- a/drivers/net/ethernet/dlink/dl2k.h
+++ b/drivers/net/ethernet/dlink/dl2k.h
@@ -365,13 +365,6 @@ struct ioctl_data {
365 char *data; 365 char *data;
366}; 366};
367 367
368struct mii_data {
369 __u16 reserved;
370 __u16 reg_num;
371 __u16 in_value;
372 __u16 out_value;
373};
374
375/* The Rx and Tx buffer descriptors. */ 368/* The Rx and Tx buffer descriptors. */
376struct netdev_desc { 369struct netdev_desc {
377 __le64 next_desc; 370 __le64 next_desc;
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 17a46e76123f..9ac14f804851 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -116,10 +116,10 @@ static struct ucc_geth_info ugeth_primary_info = {
116 .maxGroupAddrInHash = 4, 116 .maxGroupAddrInHash = 4,
117 .maxIndAddrInHash = 4, 117 .maxIndAddrInHash = 4,
118 .prel = 7, 118 .prel = 7,
119 .maxFrameLength = 1518, 119 .maxFrameLength = 1518+16, /* Add extra bytes for VLANs etc. */
120 .minFrameLength = 64, 120 .minFrameLength = 64,
121 .maxD1Length = 1520, 121 .maxD1Length = 1520+16, /* Add extra bytes for VLANs etc. */
122 .maxD2Length = 1520, 122 .maxD2Length = 1520+16, /* Add extra bytes for VLANs etc. */
123 .vlantype = 0x8100, 123 .vlantype = 0x8100,
124 .ecamptr = ((uint32_t) NULL), 124 .ecamptr = ((uint32_t) NULL),
125 .eventRegMask = UCCE_OTHER, 125 .eventRegMask = UCCE_OTHER,
diff --git a/drivers/net/ethernet/freescale/ucc_geth.h b/drivers/net/ethernet/freescale/ucc_geth.h
index 2e395a2566b8..f71b3e7b12de 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.h
+++ b/drivers/net/ethernet/freescale/ucc_geth.h
@@ -877,7 +877,7 @@ struct ucc_geth_hardware_statistics {
877 877
878/* Driver definitions */ 878/* Driver definitions */
879#define TX_BD_RING_LEN 0x10 879#define TX_BD_RING_LEN 0x10
880#define RX_BD_RING_LEN 0x10 880#define RX_BD_RING_LEN 0x20
881 881
882#define TX_RING_MOD_MASK(size) (size-1) 882#define TX_RING_MOD_MASK(size) (size-1)
883#define RX_RING_MOD_MASK(size) (size-1) 883#define RX_RING_MOD_MASK(size) (size-1)
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 3516e17a399d..f4d2da0db1b1 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -290,16 +290,18 @@ static void ehea_update_bcmc_registrations(void)
290 290
291 arr[i].adh = adapter->handle; 291 arr[i].adh = adapter->handle;
292 arr[i].port_id = port->logical_port_id; 292 arr[i].port_id = port->logical_port_id;
293 arr[i].reg_type = EHEA_BCMC_SCOPE_ALL | 293 arr[i].reg_type = EHEA_BCMC_MULTICAST |
294 EHEA_BCMC_MULTICAST |
295 EHEA_BCMC_UNTAGGED; 294 EHEA_BCMC_UNTAGGED;
295 if (mc_entry->macaddr == 0)
296 arr[i].reg_type |= EHEA_BCMC_SCOPE_ALL;
296 arr[i++].macaddr = mc_entry->macaddr; 297 arr[i++].macaddr = mc_entry->macaddr;
297 298
298 arr[i].adh = adapter->handle; 299 arr[i].adh = adapter->handle;
299 arr[i].port_id = port->logical_port_id; 300 arr[i].port_id = port->logical_port_id;
300 arr[i].reg_type = EHEA_BCMC_SCOPE_ALL | 301 arr[i].reg_type = EHEA_BCMC_MULTICAST |
301 EHEA_BCMC_MULTICAST |
302 EHEA_BCMC_VLANID_ALL; 302 EHEA_BCMC_VLANID_ALL;
303 if (mc_entry->macaddr == 0)
304 arr[i].reg_type |= EHEA_BCMC_SCOPE_ALL;
303 arr[i++].macaddr = mc_entry->macaddr; 305 arr[i++].macaddr = mc_entry->macaddr;
304 num_registrations -= 2; 306 num_registrations -= 2;
305 } 307 }
@@ -1838,8 +1840,9 @@ static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr,
1838 u64 hret; 1840 u64 hret;
1839 u8 reg_type; 1841 u8 reg_type;
1840 1842
1841 reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST 1843 reg_type = EHEA_BCMC_MULTICAST | EHEA_BCMC_UNTAGGED;
1842 | EHEA_BCMC_UNTAGGED; 1844 if (mc_mac_addr == 0)
1845 reg_type |= EHEA_BCMC_SCOPE_ALL;
1843 1846
1844 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle, 1847 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1845 port->logical_port_id, 1848 port->logical_port_id,
@@ -1847,8 +1850,9 @@ static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr,
1847 if (hret) 1850 if (hret)
1848 goto out; 1851 goto out;
1849 1852
1850 reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST 1853 reg_type = EHEA_BCMC_MULTICAST | EHEA_BCMC_VLANID_ALL;
1851 | EHEA_BCMC_VLANID_ALL; 1854 if (mc_mac_addr == 0)
1855 reg_type |= EHEA_BCMC_SCOPE_ALL;
1852 1856
1853 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle, 1857 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1854 port->logical_port_id, 1858 port->logical_port_id,
@@ -1898,7 +1902,7 @@ static void ehea_allmulti(struct net_device *dev, int enable)
1898 netdev_err(dev, 1902 netdev_err(dev,
1899 "failed enabling IFF_ALLMULTI\n"); 1903 "failed enabling IFF_ALLMULTI\n");
1900 } 1904 }
1901 } else 1905 } else {
1902 if (!enable) { 1906 if (!enable) {
1903 /* Disable ALLMULTI */ 1907 /* Disable ALLMULTI */
1904 hret = ehea_multicast_reg_helper(port, 0, H_DEREG_BCMC); 1908 hret = ehea_multicast_reg_helper(port, 0, H_DEREG_BCMC);
@@ -1908,6 +1912,7 @@ static void ehea_allmulti(struct net_device *dev, int enable)
1908 netdev_err(dev, 1912 netdev_err(dev,
1909 "failed disabling IFF_ALLMULTI\n"); 1913 "failed disabling IFF_ALLMULTI\n");
1910 } 1914 }
1915 }
1911} 1916}
1912 1917
1913static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr) 1918static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
@@ -1941,11 +1946,7 @@ static void ehea_set_multicast_list(struct net_device *dev)
1941 struct netdev_hw_addr *ha; 1946 struct netdev_hw_addr *ha;
1942 int ret; 1947 int ret;
1943 1948
1944 if (port->promisc) { 1949 ehea_promiscuous(dev, !!(dev->flags & IFF_PROMISC));
1945 ehea_promiscuous(dev, 1);
1946 return;
1947 }
1948 ehea_promiscuous(dev, 0);
1949 1950
1950 if (dev->flags & IFF_ALLMULTI) { 1951 if (dev->flags & IFF_ALLMULTI) {
1951 ehea_allmulti(dev, 1); 1952 ehea_allmulti(dev, 1);
@@ -2463,6 +2464,7 @@ static int ehea_down(struct net_device *dev)
2463 return 0; 2464 return 0;
2464 2465
2465 ehea_drop_multicast_list(dev); 2466 ehea_drop_multicast_list(dev);
2467 ehea_allmulti(dev, 0);
2466 ehea_broadcast_reg_helper(port, H_DEREG_BCMC); 2468 ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
2467 2469
2468 ehea_free_interrupts(dev); 2470 ehea_free_interrupts(dev);
@@ -3261,6 +3263,7 @@ static int __devinit ehea_probe_adapter(struct platform_device *dev,
3261 struct ehea_adapter *adapter; 3263 struct ehea_adapter *adapter;
3262 const u64 *adapter_handle; 3264 const u64 *adapter_handle;
3263 int ret; 3265 int ret;
3266 int i;
3264 3267
3265 if (!dev || !dev->dev.of_node) { 3268 if (!dev || !dev->dev.of_node) {
3266 pr_err("Invalid ibmebus device probed\n"); 3269 pr_err("Invalid ibmebus device probed\n");
@@ -3314,17 +3317,9 @@ static int __devinit ehea_probe_adapter(struct platform_device *dev,
3314 tasklet_init(&adapter->neq_tasklet, ehea_neq_tasklet, 3317 tasklet_init(&adapter->neq_tasklet, ehea_neq_tasklet,
3315 (unsigned long)adapter); 3318 (unsigned long)adapter);
3316 3319
3317 ret = ibmebus_request_irq(adapter->neq->attr.ist1,
3318 ehea_interrupt_neq, IRQF_DISABLED,
3319 "ehea_neq", adapter);
3320 if (ret) {
3321 dev_err(&dev->dev, "requesting NEQ IRQ failed\n");
3322 goto out_kill_eq;
3323 }
3324
3325 ret = ehea_create_device_sysfs(dev); 3320 ret = ehea_create_device_sysfs(dev);
3326 if (ret) 3321 if (ret)
3327 goto out_free_irq; 3322 goto out_kill_eq;
3328 3323
3329 ret = ehea_setup_ports(adapter); 3324 ret = ehea_setup_ports(adapter);
3330 if (ret) { 3325 if (ret) {
@@ -3332,15 +3327,30 @@ static int __devinit ehea_probe_adapter(struct platform_device *dev,
3332 goto out_rem_dev_sysfs; 3327 goto out_rem_dev_sysfs;
3333 } 3328 }
3334 3329
3330 ret = ibmebus_request_irq(adapter->neq->attr.ist1,
3331 ehea_interrupt_neq, IRQF_DISABLED,
3332 "ehea_neq", adapter);
3333 if (ret) {
3334 dev_err(&dev->dev, "requesting NEQ IRQ failed\n");
3335 goto out_shutdown_ports;
3336 }
3337
3338 /* Handle any events that might be pending. */
3339 tasklet_hi_schedule(&adapter->neq_tasklet);
3340
3335 ret = 0; 3341 ret = 0;
3336 goto out; 3342 goto out;
3337 3343
3344out_shutdown_ports:
3345 for (i = 0; i < EHEA_MAX_PORTS; i++)
3346 if (adapter->port[i]) {
3347 ehea_shutdown_single_port(adapter->port[i]);
3348 adapter->port[i] = NULL;
3349 }
3350
3338out_rem_dev_sysfs: 3351out_rem_dev_sysfs:
3339 ehea_remove_device_sysfs(dev); 3352 ehea_remove_device_sysfs(dev);
3340 3353
3341out_free_irq:
3342 ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
3343
3344out_kill_eq: 3354out_kill_eq:
3345 ehea_destroy_eq(adapter->neq); 3355 ehea_destroy_eq(adapter->neq);
3346 3356
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_phyp.h b/drivers/net/ethernet/ibm/ehea/ehea_phyp.h
index 52c456ec4d6c..8364815c32ff 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_phyp.h
+++ b/drivers/net/ethernet/ibm/ehea/ehea_phyp.h
@@ -450,7 +450,7 @@ u64 ehea_h_modify_ehea_port(const u64 adapter_handle, const u16 port_num,
450 void *cb_addr); 450 void *cb_addr);
451 451
452#define H_REGBCMC_PN EHEA_BMASK_IBM(48, 63) 452#define H_REGBCMC_PN EHEA_BMASK_IBM(48, 63)
453#define H_REGBCMC_REGTYPE EHEA_BMASK_IBM(61, 63) 453#define H_REGBCMC_REGTYPE EHEA_BMASK_IBM(60, 63)
454#define H_REGBCMC_MACADDR EHEA_BMASK_IBM(16, 63) 454#define H_REGBCMC_MACADDR EHEA_BMASK_IBM(16, 63)
455#define H_REGBCMC_VLANID EHEA_BMASK_IBM(52, 63) 455#define H_REGBCMC_VLANID EHEA_BMASK_IBM(52, 63)
456 456
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 4348b6fd44fa..37caa8885c2a 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -3380,7 +3380,7 @@ static void e1000_dump(struct e1000_adapter *adapter)
3380 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { 3380 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
3381 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i); 3381 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
3382 struct e1000_buffer *buffer_info = &tx_ring->buffer_info[i]; 3382 struct e1000_buffer *buffer_info = &tx_ring->buffer_info[i];
3383 struct my_u { u64 a; u64 b; }; 3383 struct my_u { __le64 a; __le64 b; };
3384 struct my_u *u = (struct my_u *)tx_desc; 3384 struct my_u *u = (struct my_u *)tx_desc;
3385 const char *type; 3385 const char *type;
3386 3386
@@ -3424,7 +3424,7 @@ rx_ring_summary:
3424 for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) { 3424 for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
3425 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i); 3425 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
3426 struct e1000_buffer *buffer_info = &rx_ring->buffer_info[i]; 3426 struct e1000_buffer *buffer_info = &rx_ring->buffer_info[i];
3427 struct my_u { u64 a; u64 b; }; 3427 struct my_u { __le64 a; __le64 b; };
3428 struct my_u *u = (struct my_u *)rx_desc; 3428 struct my_u *u = (struct my_u *)rx_desc;
3429 const char *type; 3429 const char *type;
3430 3430
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 64c76443a7aa..b461c24945e3 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1310,10 +1310,6 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
1310 1310
1311 if (mac_reg & E1000_PHY_CTRL_D0A_LPLU) 1311 if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
1312 oem_reg |= HV_OEM_BITS_LPLU; 1312 oem_reg |= HV_OEM_BITS_LPLU;
1313
1314 /* Set Restart auto-neg to activate the bits */
1315 if (!hw->phy.ops.check_reset_block(hw))
1316 oem_reg |= HV_OEM_BITS_RESTART_AN;
1317 } else { 1313 } else {
1318 if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE | 1314 if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
1319 E1000_PHY_CTRL_NOND0A_GBE_DISABLE)) 1315 E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
@@ -1324,6 +1320,11 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
1324 oem_reg |= HV_OEM_BITS_LPLU; 1320 oem_reg |= HV_OEM_BITS_LPLU;
1325 } 1321 }
1326 1322
1323 /* Set Restart auto-neg to activate the bits */
1324 if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
1325 !hw->phy.ops.check_reset_block(hw))
1326 oem_reg |= HV_OEM_BITS_RESTART_AN;
1327
1327 ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg); 1328 ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
1328 1329
1329release: 1330release:
@@ -3682,7 +3683,11 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
3682 3683
3683 if (hw->mac.type >= e1000_pchlan) { 3684 if (hw->mac.type >= e1000_pchlan) {
3684 e1000_oem_bits_config_ich8lan(hw, false); 3685 e1000_oem_bits_config_ich8lan(hw, false);
3685 e1000_phy_hw_reset_ich8lan(hw); 3686
3687 /* Reset PHY to activate OEM bits on 82577/8 */
3688 if (hw->mac.type == e1000_pchlan)
3689 e1000e_phy_hw_reset_generic(hw);
3690
3686 ret_val = hw->phy.ops.acquire(hw); 3691 ret_val = hw->phy.ops.acquire(hw);
3687 if (ret_val) 3692 if (ret_val)
3688 return; 3693 return;
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 19ab2154802c..9520a6ac1f30 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -3799,7 +3799,7 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
3799 /* fire an unusual interrupt on the test handler */ 3799 /* fire an unusual interrupt on the test handler */
3800 ew32(ICS, E1000_ICS_RXSEQ); 3800 ew32(ICS, E1000_ICS_RXSEQ);
3801 e1e_flush(); 3801 e1e_flush();
3802 msleep(50); 3802 msleep(100);
3803 3803
3804 e1000_irq_disable(adapter); 3804 e1000_irq_disable(adapter);
3805 3805
diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c
index ff796e42c3eb..16adeb9418a8 100644
--- a/drivers/net/ethernet/intel/e1000e/param.c
+++ b/drivers/net/ethernet/intel/e1000e/param.c
@@ -106,7 +106,7 @@ E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay");
106/* 106/*
107 * Interrupt Throttle Rate (interrupts/sec) 107 * Interrupt Throttle Rate (interrupts/sec)
108 * 108 *
109 * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative) 109 * Valid Range: 100-100000 or one of: 0=off, 1=dynamic, 3=dynamic conservative
110 */ 110 */
111E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate"); 111E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
112#define DEFAULT_ITR 3 112#define DEFAULT_ITR 3
@@ -344,53 +344,60 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
344 344
345 if (num_InterruptThrottleRate > bd) { 345 if (num_InterruptThrottleRate > bd) {
346 adapter->itr = InterruptThrottleRate[bd]; 346 adapter->itr = InterruptThrottleRate[bd];
347 switch (adapter->itr) { 347
348 case 0: 348 /*
349 e_info("%s turned off\n", opt.name); 349 * Make sure a message is printed for non-special
350 break; 350 * values. And in case of an invalid option, display
351 case 1: 351 * warning, use default and got through itr/itr_setting
352 e_info("%s set to dynamic mode\n", opt.name); 352 * adjustment logic below
353 adapter->itr_setting = adapter->itr; 353 */
354 adapter->itr = 20000; 354 if ((adapter->itr > 4) &&
355 break; 355 e1000_validate_option(&adapter->itr, &opt, adapter))
356 case 3: 356 adapter->itr = opt.def;
357 e_info("%s set to dynamic conservative mode\n",
358 opt.name);
359 adapter->itr_setting = adapter->itr;
360 adapter->itr = 20000;
361 break;
362 case 4:
363 e_info("%s set to simplified (2000-8000 ints) "
364 "mode\n", opt.name);
365 adapter->itr_setting = 4;
366 break;
367 default:
368 /*
369 * Save the setting, because the dynamic bits
370 * change itr.
371 */
372 if (e1000_validate_option(&adapter->itr, &opt,
373 adapter) &&
374 (adapter->itr == 3)) {
375 /*
376 * In case of invalid user value,
377 * default to conservative mode.
378 */
379 adapter->itr_setting = adapter->itr;
380 adapter->itr = 20000;
381 } else {
382 /*
383 * Clear the lower two bits because
384 * they are used as control.
385 */
386 adapter->itr_setting =
387 adapter->itr & ~3;
388 }
389 break;
390 }
391 } else { 357 } else {
392 adapter->itr_setting = opt.def; 358 /*
359 * If no option specified, use default value and go
360 * through the logic below to adjust itr/itr_setting
361 */
362 adapter->itr = opt.def;
363
364 /*
365 * Make sure a message is printed for non-special
366 * default values
367 */
368 if (adapter->itr > 40)
369 e_info("%s set to default %d\n", opt.name,
370 adapter->itr);
371 }
372
373 adapter->itr_setting = adapter->itr;
374 switch (adapter->itr) {
375 case 0:
376 e_info("%s turned off\n", opt.name);
377 break;
378 case 1:
379 e_info("%s set to dynamic mode\n", opt.name);
380 adapter->itr = 20000;
381 break;
382 case 3:
383 e_info("%s set to dynamic conservative mode\n",
384 opt.name);
393 adapter->itr = 20000; 385 adapter->itr = 20000;
386 break;
387 case 4:
388 e_info("%s set to simplified (2000-8000 ints) mode\n",
389 opt.name);
390 break;
391 default:
392 /*
393 * Save the setting, because the dynamic bits
394 * change itr.
395 *
396 * Clear the lower two bits because
397 * they are used as control.
398 */
399 adapter->itr_setting &= ~3;
400 break;
394 } 401 }
395 } 402 }
396 { /* Interrupt Mode */ 403 { /* Interrupt Mode */
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 5ec31598ee47..8683ca4748c8 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1111,9 +1111,12 @@ msi_only:
1111 adapter->flags |= IGB_FLAG_HAS_MSI; 1111 adapter->flags |= IGB_FLAG_HAS_MSI;
1112out: 1112out:
1113 /* Notify the stack of the (possibly) reduced queue counts. */ 1113 /* Notify the stack of the (possibly) reduced queue counts. */
1114 rtnl_lock();
1114 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues); 1115 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
1115 return netif_set_real_num_rx_queues(adapter->netdev, 1116 err = netif_set_real_num_rx_queues(adapter->netdev,
1116 adapter->num_rx_queues); 1117 adapter->num_rx_queues);
1118 rtnl_unlock();
1119 return err;
1117} 1120}
1118 1121
1119/** 1122/**
@@ -2771,8 +2774,6 @@ void igb_configure_tx_ring(struct igb_adapter *adapter,
2771 2774
2772 txdctl |= E1000_TXDCTL_QUEUE_ENABLE; 2775 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2773 wr32(E1000_TXDCTL(reg_idx), txdctl); 2776 wr32(E1000_TXDCTL(reg_idx), txdctl);
2774
2775 netdev_tx_reset_queue(txring_txq(ring));
2776} 2777}
2777 2778
2778/** 2779/**
@@ -3282,6 +3283,8 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring)
3282 igb_unmap_and_free_tx_resource(tx_ring, buffer_info); 3283 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
3283 } 3284 }
3284 3285
3286 netdev_tx_reset_queue(txring_txq(tx_ring));
3287
3285 size = sizeof(struct igb_tx_buffer) * tx_ring->count; 3288 size = sizeof(struct igb_tx_buffer) * tx_ring->count;
3286 memset(tx_ring->tx_buffer_info, 0, size); 3289 memset(tx_ring->tx_buffer_info, 0, size);
3287 3290
@@ -6796,18 +6799,7 @@ static int igb_resume(struct device *dev)
6796 pci_enable_wake(pdev, PCI_D3hot, 0); 6799 pci_enable_wake(pdev, PCI_D3hot, 0);
6797 pci_enable_wake(pdev, PCI_D3cold, 0); 6800 pci_enable_wake(pdev, PCI_D3cold, 0);
6798 6801
6799 if (!rtnl_is_locked()) { 6802 if (igb_init_interrupt_scheme(adapter)) {
6800 /*
6801 * shut up ASSERT_RTNL() warning in
6802 * netif_set_real_num_tx/rx_queues.
6803 */
6804 rtnl_lock();
6805 err = igb_init_interrupt_scheme(adapter);
6806 rtnl_unlock();
6807 } else {
6808 err = igb_init_interrupt_scheme(adapter);
6809 }
6810 if (err) {
6811 dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); 6803 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
6812 return -ENOMEM; 6804 return -ENOMEM;
6813 } 6805 }
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index d61ca2a732f0..8ec74b07f940 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -2731,14 +2731,14 @@ static int __devinit igbvf_probe(struct pci_dev *pdev,
2731 netdev->addr_len); 2731 netdev->addr_len);
2732 } 2732 }
2733 2733
2734 if (!is_valid_ether_addr(netdev->perm_addr)) { 2734 if (!is_valid_ether_addr(netdev->dev_addr)) {
2735 dev_err(&pdev->dev, "Invalid MAC Address: %pM\n", 2735 dev_err(&pdev->dev, "Invalid MAC Address: %pM\n",
2736 netdev->dev_addr); 2736 netdev->dev_addr);
2737 err = -EIO; 2737 err = -EIO;
2738 goto err_hw_init; 2738 goto err_hw_init;
2739 } 2739 }
2740 2740
2741 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); 2741 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
2742 2742
2743 setup_timer(&adapter->watchdog_timer, &igbvf_watchdog, 2743 setup_timer(&adapter->watchdog_timer, &igbvf_watchdog,
2744 (unsigned long) adapter); 2744 (unsigned long) adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 74e192107f9a..81b155589532 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -574,9 +574,6 @@ extern struct ixgbe_info ixgbe_82599_info;
574extern struct ixgbe_info ixgbe_X540_info; 574extern struct ixgbe_info ixgbe_X540_info;
575#ifdef CONFIG_IXGBE_DCB 575#ifdef CONFIG_IXGBE_DCB
576extern const struct dcbnl_rtnl_ops dcbnl_ops; 576extern const struct dcbnl_rtnl_ops dcbnl_ops;
577extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg,
578 struct ixgbe_dcb_config *dst_dcb_cfg,
579 int tc_max);
580#endif 577#endif
581 578
582extern char ixgbe_driver_name[]; 579extern char ixgbe_driver_name[];
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index 652e4b09546d..32e5c02ff6d0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -44,18 +44,26 @@
44#define DCB_NO_HW_CHG 1 /* DCB configuration did not change */ 44#define DCB_NO_HW_CHG 1 /* DCB configuration did not change */
45#define DCB_HW_CHG 2 /* DCB configuration changed, no reset */ 45#define DCB_HW_CHG 2 /* DCB configuration changed, no reset */
46 46
47int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *scfg, 47static int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max)
48 struct ixgbe_dcb_config *dcfg, int tc_max)
49{ 48{
49 struct ixgbe_dcb_config *scfg = &adapter->temp_dcb_cfg;
50 struct ixgbe_dcb_config *dcfg = &adapter->dcb_cfg;
50 struct tc_configuration *src = NULL; 51 struct tc_configuration *src = NULL;
51 struct tc_configuration *dst = NULL; 52 struct tc_configuration *dst = NULL;
52 int i, j; 53 int i, j;
53 int tx = DCB_TX_CONFIG; 54 int tx = DCB_TX_CONFIG;
54 int rx = DCB_RX_CONFIG; 55 int rx = DCB_RX_CONFIG;
55 int changes = 0; 56 int changes = 0;
57#ifdef IXGBE_FCOE
58 struct dcb_app app = {
59 .selector = DCB_APP_IDTYPE_ETHTYPE,
60 .protocol = ETH_P_FCOE,
61 };
62 u8 up = dcb_getapp(adapter->netdev, &app);
56 63
57 if (!scfg || !dcfg) 64 if (up && !(up & (1 << adapter->fcoe.up)))
58 return changes; 65 changes |= BIT_APP_UPCHG;
66#endif
59 67
60 for (i = DCB_PG_ATTR_TC_0; i < tc_max + DCB_PG_ATTR_TC_0; i++) { 68 for (i = DCB_PG_ATTR_TC_0; i < tc_max + DCB_PG_ATTR_TC_0; i++) {
61 src = &scfg->tc_config[i - DCB_PG_ATTR_TC_0]; 69 src = &scfg->tc_config[i - DCB_PG_ATTR_TC_0];
@@ -332,28 +340,12 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
332 struct ixgbe_adapter *adapter = netdev_priv(netdev); 340 struct ixgbe_adapter *adapter = netdev_priv(netdev);
333 int ret = DCB_NO_HW_CHG; 341 int ret = DCB_NO_HW_CHG;
334 int i; 342 int i;
335#ifdef IXGBE_FCOE
336 struct dcb_app app = {
337 .selector = DCB_APP_IDTYPE_ETHTYPE,
338 .protocol = ETH_P_FCOE,
339 };
340 u8 up;
341
342 /* In IEEE mode, use the IEEE Ethertype selector value */
343 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) {
344 app.selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE;
345 up = dcb_ieee_getapp_mask(netdev, &app);
346 } else {
347 up = dcb_getapp(netdev, &app);
348 }
349#endif
350 343
351 /* Fail command if not in CEE mode */ 344 /* Fail command if not in CEE mode */
352 if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) 345 if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
353 return ret; 346 return ret;
354 347
355 adapter->dcb_set_bitmap |= ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, 348 adapter->dcb_set_bitmap |= ixgbe_copy_dcb_cfg(adapter,
356 &adapter->dcb_cfg,
357 MAX_TRAFFIC_CLASS); 349 MAX_TRAFFIC_CLASS);
358 if (!adapter->dcb_set_bitmap) 350 if (!adapter->dcb_set_bitmap)
359 return ret; 351 return ret;
@@ -440,8 +432,13 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
440 * FCoE is using changes. This happens if the APP info 432 * FCoE is using changes. This happens if the APP info
441 * changes or the up2tc mapping is updated. 433 * changes or the up2tc mapping is updated.
442 */ 434 */
443 if ((up && !(up & (1 << adapter->fcoe.up))) || 435 if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
444 (adapter->dcb_set_bitmap & BIT_APP_UPCHG)) { 436 struct dcb_app app = {
437 .selector = DCB_APP_IDTYPE_ETHTYPE,
438 .protocol = ETH_P_FCOE,
439 };
440 u8 up = dcb_getapp(netdev, &app);
441
445 adapter->fcoe.up = ffs(up) - 1; 442 adapter->fcoe.up = ffs(up) - 1;
446 ixgbe_dcbnl_devreset(netdev); 443 ixgbe_dcbnl_devreset(netdev);
447 ret = DCB_HW_CHG_RST; 444 ret = DCB_HW_CHG_RST;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 31a2bf76a346..cfe7d269590c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -1780,6 +1780,8 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
1780 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc); 1780 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
1781 } 1781 }
1782 1782
1783 netdev_tx_reset_queue(txring_txq(tx_ring));
1784
1783 /* re-map buffers to ring, store next to clean values */ 1785 /* re-map buffers to ring, store next to clean values */
1784 ixgbe_alloc_rx_buffers(rx_ring, count); 1786 ixgbe_alloc_rx_buffers(rx_ring, count);
1785 rx_ring->next_to_clean = rx_ntc; 1787 rx_ring->next_to_clean = rx_ntc;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index 77ea4b716535..bc07933d67da 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -437,6 +437,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
437 */ 437 */
438 if ((fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA) && 438 if ((fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA) &&
439 (fctl & FC_FC_END_SEQ)) { 439 (fctl & FC_FC_END_SEQ)) {
440 skb_linearize(skb);
440 crc = (struct fcoe_crc_eof *)skb_put(skb, sizeof(*crc)); 441 crc = (struct fcoe_crc_eof *)skb_put(skb, sizeof(*crc));
441 crc->fcoe_eof = FC_EOF_T; 442 crc->fcoe_eof = FC_EOF_T;
442 } 443 }
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 027d7a75be39..ed1b47dc0834 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -622,6 +622,16 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, int v_idx,
622 if (adapter->hw.mac.type == ixgbe_mac_82599EB) 622 if (adapter->hw.mac.type == ixgbe_mac_82599EB)
623 set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state); 623 set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state);
624 624
625#ifdef IXGBE_FCOE
626 if (adapter->netdev->features & NETIF_F_FCOE_MTU) {
627 struct ixgbe_ring_feature *f;
628 f = &adapter->ring_feature[RING_F_FCOE];
629 if ((rxr_idx >= f->mask) &&
630 (rxr_idx < f->mask + f->indices))
631 set_bit(__IXGBE_RX_FCOE_BUFSZ, &ring->state);
632 }
633
634#endif /* IXGBE_FCOE */
625 /* apply Rx specific ring traits */ 635 /* apply Rx specific ring traits */
626 ring->count = adapter->rx_ring_count; 636 ring->count = adapter->rx_ring_count;
627 ring->queue_index = rxr_idx; 637 ring->queue_index = rxr_idx;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 3e26b1f9ac75..467948e9ecd9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -2671,8 +2671,6 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
2671 /* enable queue */ 2671 /* enable queue */
2672 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl); 2672 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl);
2673 2673
2674 netdev_tx_reset_queue(txring_txq(ring));
2675
2676 /* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */ 2674 /* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */
2677 if (hw->mac.type == ixgbe_mac_82598EB && 2675 if (hw->mac.type == ixgbe_mac_82598EB &&
2678 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) 2676 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
@@ -3154,14 +3152,6 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
3154 set_ring_rsc_enabled(rx_ring); 3152 set_ring_rsc_enabled(rx_ring);
3155 else 3153 else
3156 clear_ring_rsc_enabled(rx_ring); 3154 clear_ring_rsc_enabled(rx_ring);
3157#ifdef IXGBE_FCOE
3158 if (netdev->features & NETIF_F_FCOE_MTU) {
3159 struct ixgbe_ring_feature *f;
3160 f = &adapter->ring_feature[RING_F_FCOE];
3161 if ((i >= f->mask) && (i < f->mask + f->indices))
3162 set_bit(__IXGBE_RX_FCOE_BUFSZ, &rx_ring->state);
3163 }
3164#endif /* IXGBE_FCOE */
3165 } 3155 }
3166} 3156}
3167 3157
@@ -4175,6 +4165,8 @@ static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
4175 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); 4165 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
4176 } 4166 }
4177 4167
4168 netdev_tx_reset_queue(txring_txq(tx_ring));
4169
4178 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; 4170 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
4179 memset(tx_ring->tx_buffer_info, 0, size); 4171 memset(tx_ring->tx_buffer_info, 0, size);
4180 4172
@@ -4426,8 +4418,8 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
4426 adapter->dcb_cfg.pfc_mode_enable = false; 4418 adapter->dcb_cfg.pfc_mode_enable = false;
4427 adapter->dcb_set_bitmap = 0x00; 4419 adapter->dcb_set_bitmap = 0x00;
4428 adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE; 4420 adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
4429 ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg, 4421 memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
4430 MAX_TRAFFIC_CLASS); 4422 sizeof(adapter->temp_dcb_cfg));
4431 4423
4432#endif 4424#endif
4433 4425
@@ -4836,7 +4828,9 @@ static int ixgbe_resume(struct pci_dev *pdev)
4836 4828
4837 pci_wake_from_d3(pdev, false); 4829 pci_wake_from_d3(pdev, false);
4838 4830
4831 rtnl_lock();
4839 err = ixgbe_init_interrupt_scheme(adapter); 4832 err = ixgbe_init_interrupt_scheme(adapter);
4833 rtnl_unlock();
4840 if (err) { 4834 if (err) {
4841 e_dev_err("Cannot initialize interrupts for device\n"); 4835 e_dev_err("Cannot initialize interrupts for device\n");
4842 return err; 4836 return err;
@@ -4872,17 +4866,15 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
4872 netif_device_detach(netdev); 4866 netif_device_detach(netdev);
4873 4867
4874 if (netif_running(netdev)) { 4868 if (netif_running(netdev)) {
4869 rtnl_lock();
4875 ixgbe_down(adapter); 4870 ixgbe_down(adapter);
4876 ixgbe_free_irq(adapter); 4871 ixgbe_free_irq(adapter);
4877 ixgbe_free_all_tx_resources(adapter); 4872 ixgbe_free_all_tx_resources(adapter);
4878 ixgbe_free_all_rx_resources(adapter); 4873 ixgbe_free_all_rx_resources(adapter);
4874 rtnl_unlock();
4879 } 4875 }
4880 4876
4881 ixgbe_clear_interrupt_scheme(adapter); 4877 ixgbe_clear_interrupt_scheme(adapter);
4882#ifdef CONFIG_DCB
4883 kfree(adapter->ixgbe_ieee_pfc);
4884 kfree(adapter->ixgbe_ieee_ets);
4885#endif
4886 4878
4887#ifdef CONFIG_PM 4879#ifdef CONFIG_PM
4888 retval = pci_save_state(pdev); 4880 retval = pci_save_state(pdev);
@@ -4893,6 +4885,16 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
4893 if (wufc) { 4885 if (wufc) {
4894 ixgbe_set_rx_mode(netdev); 4886 ixgbe_set_rx_mode(netdev);
4895 4887
4888 /*
4889 * enable the optics for both mult-speed fiber and
4890 * 82599 SFP+ fiber as we can WoL.
4891 */
4892 if (hw->mac.ops.enable_tx_laser &&
4893 (hw->phy.multispeed_fiber ||
4894 (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber &&
4895 hw->mac.type == ixgbe_mac_82599EB)))
4896 hw->mac.ops.enable_tx_laser(hw);
4897
4896 /* turn on all-multi mode if wake on multicast is enabled */ 4898 /* turn on all-multi mode if wake on multicast is enabled */
4897 if (wufc & IXGBE_WUFC_MC) { 4899 if (wufc & IXGBE_WUFC_MC) {
4898 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 4900 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
@@ -7220,6 +7222,11 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
7220 7222
7221 ixgbe_release_hw_control(adapter); 7223 ixgbe_release_hw_control(adapter);
7222 7224
7225#ifdef CONFIG_DCB
7226 kfree(adapter->ixgbe_ieee_pfc);
7227 kfree(adapter->ixgbe_ieee_ets);
7228
7229#endif
7223 iounmap(adapter->hw.hw_addr); 7230 iounmap(adapter->hw.hw_addr);
7224 pci_release_selected_regions(pdev, pci_select_bars(pdev, 7231 pci_release_selected_regions(pdev, pci_select_bars(pdev,
7225 IORESOURCE_MEM)); 7232 IORESOURCE_MEM));
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index c9b504e2dfc3..487a6c8bd4ec 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -2494,8 +2494,13 @@ static struct sk_buff *receive_copy(struct sky2_port *sky2,
2494 skb_copy_from_linear_data(re->skb, skb->data, length); 2494 skb_copy_from_linear_data(re->skb, skb->data, length);
2495 skb->ip_summed = re->skb->ip_summed; 2495 skb->ip_summed = re->skb->ip_summed;
2496 skb->csum = re->skb->csum; 2496 skb->csum = re->skb->csum;
2497 skb->rxhash = re->skb->rxhash;
2498 skb->vlan_tci = re->skb->vlan_tci;
2499
2497 pci_dma_sync_single_for_device(sky2->hw->pdev, re->data_addr, 2500 pci_dma_sync_single_for_device(sky2->hw->pdev, re->data_addr,
2498 length, PCI_DMA_FROMDEVICE); 2501 length, PCI_DMA_FROMDEVICE);
2502 re->skb->vlan_tci = 0;
2503 re->skb->rxhash = 0;
2499 re->skb->ip_summed = CHECKSUM_NONE; 2504 re->skb->ip_summed = CHECKSUM_NONE;
2500 skb_put(skb, length); 2505 skb_put(skb, length);
2501 } 2506 }
@@ -2580,9 +2585,6 @@ static struct sk_buff *sky2_receive(struct net_device *dev,
2580 struct sk_buff *skb = NULL; 2585 struct sk_buff *skb = NULL;
2581 u16 count = (status & GMR_FS_LEN) >> 16; 2586 u16 count = (status & GMR_FS_LEN) >> 16;
2582 2587
2583 if (status & GMR_FS_VLAN)
2584 count -= VLAN_HLEN; /* Account for vlan tag */
2585
2586 netif_printk(sky2, rx_status, KERN_DEBUG, dev, 2588 netif_printk(sky2, rx_status, KERN_DEBUG, dev,
2587 "rx slot %u status 0x%x len %d\n", 2589 "rx slot %u status 0x%x len %d\n",
2588 sky2->rx_next, status, length); 2590 sky2->rx_next, status, length);
@@ -2590,6 +2592,9 @@ static struct sk_buff *sky2_receive(struct net_device *dev,
2590 sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending; 2592 sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending;
2591 prefetch(sky2->rx_ring + sky2->rx_next); 2593 prefetch(sky2->rx_ring + sky2->rx_next);
2592 2594
2595 if (vlan_tx_tag_present(re->skb))
2596 count -= VLAN_HLEN; /* Account for vlan tag */
2597
2593 /* This chip has hardware problems that generates bogus status. 2598 /* This chip has hardware problems that generates bogus status.
2594 * So do only marginal checking and expect higher level protocols 2599 * So do only marginal checking and expect higher level protocols
2595 * to handle crap frames. 2600 * to handle crap frames.
@@ -2647,11 +2652,8 @@ static inline void sky2_tx_done(struct net_device *dev, u16 last)
2647} 2652}
2648 2653
2649static inline void sky2_skb_rx(const struct sky2_port *sky2, 2654static inline void sky2_skb_rx(const struct sky2_port *sky2,
2650 u32 status, struct sk_buff *skb) 2655 struct sk_buff *skb)
2651{ 2656{
2652 if (status & GMR_FS_VLAN)
2653 __vlan_hwaccel_put_tag(skb, be16_to_cpu(sky2->rx_tag));
2654
2655 if (skb->ip_summed == CHECKSUM_NONE) 2657 if (skb->ip_summed == CHECKSUM_NONE)
2656 netif_receive_skb(skb); 2658 netif_receive_skb(skb);
2657 else 2659 else
@@ -2705,6 +2707,14 @@ static void sky2_rx_checksum(struct sky2_port *sky2, u32 status)
2705 } 2707 }
2706} 2708}
2707 2709
2710static void sky2_rx_tag(struct sky2_port *sky2, u16 length)
2711{
2712 struct sk_buff *skb;
2713
2714 skb = sky2->rx_ring[sky2->rx_next].skb;
2715 __vlan_hwaccel_put_tag(skb, be16_to_cpu(length));
2716}
2717
2708static void sky2_rx_hash(struct sky2_port *sky2, u32 status) 2718static void sky2_rx_hash(struct sky2_port *sky2, u32 status)
2709{ 2719{
2710 struct sk_buff *skb; 2720 struct sk_buff *skb;
@@ -2763,8 +2773,7 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
2763 } 2773 }
2764 2774
2765 skb->protocol = eth_type_trans(skb, dev); 2775 skb->protocol = eth_type_trans(skb, dev);
2766 2776 sky2_skb_rx(sky2, skb);
2767 sky2_skb_rx(sky2, status, skb);
2768 2777
2769 /* Stop after net poll weight */ 2778 /* Stop after net poll weight */
2770 if (++work_done >= to_do) 2779 if (++work_done >= to_do)
@@ -2772,11 +2781,11 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
2772 break; 2781 break;
2773 2782
2774 case OP_RXVLAN: 2783 case OP_RXVLAN:
2775 sky2->rx_tag = length; 2784 sky2_rx_tag(sky2, length);
2776 break; 2785 break;
2777 2786
2778 case OP_RXCHKSVLAN: 2787 case OP_RXCHKSVLAN:
2779 sky2->rx_tag = length; 2788 sky2_rx_tag(sky2, length);
2780 /* fall through */ 2789 /* fall through */
2781 case OP_RXCHKS: 2790 case OP_RXCHKS:
2782 if (likely(dev->features & NETIF_F_RXCSUM)) 2791 if (likely(dev->features & NETIF_F_RXCSUM))
diff --git a/drivers/net/ethernet/marvell/sky2.h b/drivers/net/ethernet/marvell/sky2.h
index ff6f58bf822a..3c896ce80b71 100644
--- a/drivers/net/ethernet/marvell/sky2.h
+++ b/drivers/net/ethernet/marvell/sky2.h
@@ -2241,7 +2241,6 @@ struct sky2_port {
2241 u16 rx_pending; 2241 u16 rx_pending;
2242 u16 rx_data_size; 2242 u16 rx_data_size;
2243 u16 rx_nfrags; 2243 u16 rx_nfrags;
2244 u16 rx_tag;
2245 2244
2246 struct { 2245 struct {
2247 unsigned long last; 2246 unsigned long last;
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index c722aa607d07..5e313e9a252f 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -618,10 +618,8 @@ static void ks8851_irq_work(struct work_struct *work)
618 netif_dbg(ks, intr, ks->netdev, 618 netif_dbg(ks, intr, ks->netdev,
619 "%s: status 0x%04x\n", __func__, status); 619 "%s: status 0x%04x\n", __func__, status);
620 620
621 if (status & IRQ_LCI) { 621 if (status & IRQ_LCI)
622 /* should do something about checking link status */
623 handled |= IRQ_LCI; 622 handled |= IRQ_LCI;
624 }
625 623
626 if (status & IRQ_LDI) { 624 if (status & IRQ_LDI) {
627 u16 pmecr = ks8851_rdreg16(ks, KS_PMECR); 625 u16 pmecr = ks8851_rdreg16(ks, KS_PMECR);
@@ -684,6 +682,9 @@ static void ks8851_irq_work(struct work_struct *work)
684 682
685 mutex_unlock(&ks->lock); 683 mutex_unlock(&ks->lock);
686 684
685 if (status & IRQ_LCI)
686 mii_check_link(&ks->mii);
687
687 if (status & IRQ_TXI) 688 if (status & IRQ_TXI)
688 netif_wake_queue(ks->netdev); 689 netif_wake_queue(ks->netdev);
689 690
@@ -889,16 +890,17 @@ static int ks8851_net_stop(struct net_device *dev)
889 netif_stop_queue(dev); 890 netif_stop_queue(dev);
890 891
891 mutex_lock(&ks->lock); 892 mutex_lock(&ks->lock);
893 /* turn off the IRQs and ack any outstanding */
894 ks8851_wrreg16(ks, KS_IER, 0x0000);
895 ks8851_wrreg16(ks, KS_ISR, 0xffff);
896 mutex_unlock(&ks->lock);
892 897
893 /* stop any outstanding work */ 898 /* stop any outstanding work */
894 flush_work(&ks->irq_work); 899 flush_work(&ks->irq_work);
895 flush_work(&ks->tx_work); 900 flush_work(&ks->tx_work);
896 flush_work(&ks->rxctrl_work); 901 flush_work(&ks->rxctrl_work);
897 902
898 /* turn off the IRQs and ack any outstanding */ 903 mutex_lock(&ks->lock);
899 ks8851_wrreg16(ks, KS_IER, 0x0000);
900 ks8851_wrreg16(ks, KS_ISR, 0xffff);
901
902 /* shutdown RX process */ 904 /* shutdown RX process */
903 ks8851_wrreg16(ks, KS_RXCR1, 0x0000); 905 ks8851_wrreg16(ks, KS_RXCR1, 0x0000);
904 906
@@ -907,6 +909,7 @@ static int ks8851_net_stop(struct net_device *dev)
907 909
908 /* set powermode to soft power down to save power */ 910 /* set powermode to soft power down to save power */
909 ks8851_set_powermode(ks, PMECR_PM_SOFTDOWN); 911 ks8851_set_powermode(ks, PMECR_PM_SOFTDOWN);
912 mutex_unlock(&ks->lock);
910 913
911 /* ensure any queued tx buffers are dumped */ 914 /* ensure any queued tx buffers are dumped */
912 while (!skb_queue_empty(&ks->txq)) { 915 while (!skb_queue_empty(&ks->txq)) {
@@ -918,7 +921,6 @@ static int ks8851_net_stop(struct net_device *dev)
918 dev_kfree_skb(txb); 921 dev_kfree_skb(txb);
919 } 922 }
920 923
921 mutex_unlock(&ks->lock);
922 return 0; 924 return 0;
923} 925}
924 926
@@ -1418,6 +1420,7 @@ static int __devinit ks8851_probe(struct spi_device *spi)
1418 struct net_device *ndev; 1420 struct net_device *ndev;
1419 struct ks8851_net *ks; 1421 struct ks8851_net *ks;
1420 int ret; 1422 int ret;
1423 unsigned cider;
1421 1424
1422 ndev = alloc_etherdev(sizeof(struct ks8851_net)); 1425 ndev = alloc_etherdev(sizeof(struct ks8851_net));
1423 if (!ndev) 1426 if (!ndev)
@@ -1484,8 +1487,8 @@ static int __devinit ks8851_probe(struct spi_device *spi)
1484 ks8851_soft_reset(ks, GRR_GSR); 1487 ks8851_soft_reset(ks, GRR_GSR);
1485 1488
1486 /* simple check for a valid chip being connected to the bus */ 1489 /* simple check for a valid chip being connected to the bus */
1487 1490 cider = ks8851_rdreg16(ks, KS_CIDER);
1488 if ((ks8851_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) { 1491 if ((cider & ~CIDER_REV_MASK) != CIDER_ID) {
1489 dev_err(&spi->dev, "failed to read device ID\n"); 1492 dev_err(&spi->dev, "failed to read device ID\n");
1490 ret = -ENODEV; 1493 ret = -ENODEV;
1491 goto err_id; 1494 goto err_id;
@@ -1516,15 +1519,14 @@ static int __devinit ks8851_probe(struct spi_device *spi)
1516 } 1519 }
1517 1520
1518 netdev_info(ndev, "revision %d, MAC %pM, IRQ %d, %s EEPROM\n", 1521 netdev_info(ndev, "revision %d, MAC %pM, IRQ %d, %s EEPROM\n",
1519 CIDER_REV_GET(ks8851_rdreg16(ks, KS_CIDER)), 1522 CIDER_REV_GET(cider), ndev->dev_addr, ndev->irq,
1520 ndev->dev_addr, ndev->irq,
1521 ks->rc_ccr & CCR_EEPROM ? "has" : "no"); 1523 ks->rc_ccr & CCR_EEPROM ? "has" : "no");
1522 1524
1523 return 0; 1525 return 0;
1524 1526
1525 1527
1526err_netdev: 1528err_netdev:
1527 free_irq(ndev->irq, ndev); 1529 free_irq(ndev->irq, ks);
1528 1530
1529err_id: 1531err_id:
1530err_irq: 1532err_irq:
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index b8104d9f4081..5ffde23ac8fb 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -40,7 +40,7 @@
40#define DRV_NAME "ks8851_mll" 40#define DRV_NAME "ks8851_mll"
41 41
42static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 }; 42static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 };
43#define MAX_RECV_FRAMES 32 43#define MAX_RECV_FRAMES 255
44#define MAX_BUF_SIZE 2048 44#define MAX_BUF_SIZE 2048
45#define TX_BUF_SIZE 2000 45#define TX_BUF_SIZE 2000
46#define RX_BUF_SIZE 2000 46#define RX_BUF_SIZE 2000
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index ef723b185d85..eaf9ff0262a9 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -5675,7 +5675,7 @@ static int netdev_set_mac_address(struct net_device *dev, void *addr)
5675 memcpy(hw->override_addr, mac->sa_data, ETH_ALEN); 5675 memcpy(hw->override_addr, mac->sa_data, ETH_ALEN);
5676 } 5676 }
5677 5677
5678 memcpy(dev->dev_addr, mac->sa_data, MAX_ADDR_LEN); 5678 memcpy(dev->dev_addr, mac->sa_data, ETH_ALEN);
5679 5679
5680 interrupt = hw_block_intr(hw); 5680 interrupt = hw_block_intr(hw);
5681 5681
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index abc79076f867..b3287c0fe279 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -958,6 +958,11 @@ static inline void cp_start_hw (struct cp_private *cp)
958 cpw8(Cmd, RxOn | TxOn); 958 cpw8(Cmd, RxOn | TxOn);
959} 959}
960 960
961static void cp_enable_irq(struct cp_private *cp)
962{
963 cpw16_f(IntrMask, cp_intr_mask);
964}
965
961static void cp_init_hw (struct cp_private *cp) 966static void cp_init_hw (struct cp_private *cp)
962{ 967{
963 struct net_device *dev = cp->dev; 968 struct net_device *dev = cp->dev;
@@ -997,8 +1002,6 @@ static void cp_init_hw (struct cp_private *cp)
997 1002
998 cpw16(MultiIntr, 0); 1003 cpw16(MultiIntr, 0);
999 1004
1000 cpw16_f(IntrMask, cp_intr_mask);
1001
1002 cpw8_f(Cfg9346, Cfg9346_Lock); 1005 cpw8_f(Cfg9346, Cfg9346_Lock);
1003} 1006}
1004 1007
@@ -1130,6 +1133,8 @@ static int cp_open (struct net_device *dev)
1130 if (rc) 1133 if (rc)
1131 goto err_out_hw; 1134 goto err_out_hw;
1132 1135
1136 cp_enable_irq(cp);
1137
1133 netif_carrier_off(dev); 1138 netif_carrier_off(dev);
1134 mii_check_media(&cp->mii_if, netif_msg_link(cp), true); 1139 mii_check_media(&cp->mii_if, netif_msg_link(cp), true);
1135 netif_start_queue(dev); 1140 netif_start_queue(dev);
@@ -2031,6 +2036,7 @@ static int cp_resume (struct pci_dev *pdev)
2031 /* FIXME: sh*t may happen if the Rx ring buffer is depleted */ 2036 /* FIXME: sh*t may happen if the Rx ring buffer is depleted */
2032 cp_init_rings_index (cp); 2037 cp_init_rings_index (cp);
2033 cp_init_hw (cp); 2038 cp_init_hw (cp);
2039 cp_enable_irq(cp);
2034 netif_start_queue (dev); 2040 netif_start_queue (dev);
2035 2041
2036 spin_lock_irqsave (&cp->lock, flags); 2042 spin_lock_irqsave (&cp->lock, flags);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index f54509377efa..ce6b44d1f252 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -61,8 +61,12 @@
61#define R8169_MSG_DEFAULT \ 61#define R8169_MSG_DEFAULT \
62 (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN) 62 (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
63 63
64#define TX_BUFFS_AVAIL(tp) \ 64#define TX_SLOTS_AVAIL(tp) \
65 (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx - 1) 65 (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx)
66
67/* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */
68#define TX_FRAGS_READY_FOR(tp,nr_frags) \
69 (TX_SLOTS_AVAIL(tp) >= (nr_frags + 1))
66 70
67/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). 71/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
68 The RTL chips use a 64 element hash table based on the Ethernet CRC. */ 72 The RTL chips use a 64 element hash table based on the Ethernet CRC. */
@@ -5115,7 +5119,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
5115 u32 opts[2]; 5119 u32 opts[2];
5116 int frags; 5120 int frags;
5117 5121
5118 if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) { 5122 if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) {
5119 netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n"); 5123 netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
5120 goto err_stop_0; 5124 goto err_stop_0;
5121 } 5125 }
@@ -5169,7 +5173,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
5169 5173
5170 mmiowb(); 5174 mmiowb();
5171 5175
5172 if (TX_BUFFS_AVAIL(tp) < MAX_SKB_FRAGS) { 5176 if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
5173 /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must 5177 /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
5174 * not miss a ring update when it notices a stopped queue. 5178 * not miss a ring update when it notices a stopped queue.
5175 */ 5179 */
@@ -5183,7 +5187,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
5183 * can't. 5187 * can't.
5184 */ 5188 */
5185 smp_mb(); 5189 smp_mb();
5186 if (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS) 5190 if (TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS))
5187 netif_wake_queue(dev); 5191 netif_wake_queue(dev);
5188 } 5192 }
5189 5193
@@ -5306,7 +5310,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
5306 */ 5310 */
5307 smp_mb(); 5311 smp_mb();
5308 if (netif_queue_stopped(dev) && 5312 if (netif_queue_stopped(dev) &&
5309 (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)) { 5313 TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
5310 netif_wake_queue(dev); 5314 netif_wake_queue(dev);
5311 } 5315 }
5312 /* 5316 /*
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 3cbfbffe3f00..4a0005342e65 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -1349,7 +1349,7 @@ static int efx_probe_interrupts(struct efx_nic *efx)
1349 } 1349 }
1350 1350
1351 /* RSS might be usable on VFs even if it is disabled on the PF */ 1351 /* RSS might be usable on VFs even if it is disabled on the PF */
1352 efx->rss_spread = (efx->n_rx_channels > 1 ? 1352 efx->rss_spread = ((efx->n_rx_channels > 1 || !efx_sriov_wanted(efx)) ?
1353 efx->n_rx_channels : efx_vf_size(efx)); 1353 efx->n_rx_channels : efx_vf_size(efx));
1354 1354
1355 return 0; 1355 return 0;
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 4a6971027076..cd3defb11ffb 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -1166,10 +1166,8 @@ smsc911x_rx_counterrors(struct net_device *dev, unsigned int rxstat)
1166 1166
1167/* Quickly dumps bad packets */ 1167/* Quickly dumps bad packets */
1168static void 1168static void
1169smsc911x_rx_fastforward(struct smsc911x_data *pdata, unsigned int pktbytes) 1169smsc911x_rx_fastforward(struct smsc911x_data *pdata, unsigned int pktwords)
1170{ 1170{
1171 unsigned int pktwords = (pktbytes + NET_IP_ALIGN + 3) >> 2;
1172
1173 if (likely(pktwords >= 4)) { 1171 if (likely(pktwords >= 4)) {
1174 unsigned int timeout = 500; 1172 unsigned int timeout = 500;
1175 unsigned int val; 1173 unsigned int val;
@@ -1233,7 +1231,7 @@ static int smsc911x_poll(struct napi_struct *napi, int budget)
1233 continue; 1231 continue;
1234 } 1232 }
1235 1233
1236 skb = netdev_alloc_skb(dev, pktlength + NET_IP_ALIGN); 1234 skb = netdev_alloc_skb(dev, pktwords << 2);
1237 if (unlikely(!skb)) { 1235 if (unlikely(!skb)) {
1238 SMSC_WARN(pdata, rx_err, 1236 SMSC_WARN(pdata, rx_err,
1239 "Unable to allocate skb for rx packet"); 1237 "Unable to allocate skb for rx packet");
@@ -1243,14 +1241,12 @@ static int smsc911x_poll(struct napi_struct *napi, int budget)
1243 break; 1241 break;
1244 } 1242 }
1245 1243
1246 skb->data = skb->head; 1244 pdata->ops->rx_readfifo(pdata,
1247 skb_reset_tail_pointer(skb); 1245 (unsigned int *)skb->data, pktwords);
1248 1246
1249 /* Align IP on 16B boundary */ 1247 /* Align IP on 16B boundary */
1250 skb_reserve(skb, NET_IP_ALIGN); 1248 skb_reserve(skb, NET_IP_ALIGN);
1251 skb_put(skb, pktlength - 4); 1249 skb_put(skb, pktlength - 4);
1252 pdata->ops->rx_readfifo(pdata,
1253 (unsigned int *)skb->head, pktwords);
1254 skb->protocol = eth_type_trans(skb, dev); 1250 skb->protocol = eth_type_trans(skb, dev);
1255 skb_checksum_none_assert(skb); 1251 skb_checksum_none_assert(skb);
1256 netif_receive_skb(skb); 1252 netif_receive_skb(skb);
@@ -1565,7 +1561,7 @@ static int smsc911x_open(struct net_device *dev)
1565 smsc911x_reg_write(pdata, FIFO_INT, temp); 1561 smsc911x_reg_write(pdata, FIFO_INT, temp);
1566 1562
1567 /* set RX Data offset to 2 bytes for alignment */ 1563 /* set RX Data offset to 2 bytes for alignment */
1568 smsc911x_reg_write(pdata, RX_CFG, (2 << 8)); 1564 smsc911x_reg_write(pdata, RX_CFG, (NET_IP_ALIGN << 8));
1569 1565
1570 /* enable NAPI polling before enabling RX interrupts */ 1566 /* enable NAPI polling before enabling RX interrupts */
1571 napi_enable(&pdata->napi); 1567 napi_enable(&pdata->napi);
@@ -2382,7 +2378,6 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
2382 SET_NETDEV_DEV(dev, &pdev->dev); 2378 SET_NETDEV_DEV(dev, &pdev->dev);
2383 2379
2384 pdata = netdev_priv(dev); 2380 pdata = netdev_priv(dev);
2385
2386 dev->irq = irq_res->start; 2381 dev->irq = irq_res->start;
2387 irq_flags = irq_res->flags & IRQF_TRIGGER_MASK; 2382 irq_flags = irq_res->flags & IRQF_TRIGGER_MASK;
2388 pdata->ioaddr = ioremap_nocache(res->start, res_size); 2383 pdata->ioaddr = ioremap_nocache(res->start, res_size);
@@ -2446,7 +2441,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
2446 if (retval) { 2441 if (retval) {
2447 SMSC_WARN(pdata, probe, 2442 SMSC_WARN(pdata, probe,
2448 "Unable to claim requested irq: %d", dev->irq); 2443 "Unable to claim requested irq: %d", dev->irq);
2449 goto out_free_irq; 2444 goto out_disable_resources;
2450 } 2445 }
2451 2446
2452 retval = register_netdev(dev); 2447 retval = register_netdev(dev);
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index 558409ff4058..4ba969096717 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -2339,7 +2339,7 @@ static int gem_suspend(struct pci_dev *pdev, pm_message_t state)
2339 netif_device_detach(dev); 2339 netif_device_detach(dev);
2340 2340
2341 /* Switch off chip, remember WOL setting */ 2341 /* Switch off chip, remember WOL setting */
2342 gp->asleep_wol = gp->wake_on_lan; 2342 gp->asleep_wol = !!gp->wake_on_lan;
2343 gem_do_stop(dev, gp->asleep_wol); 2343 gem_do_stop(dev, gp->asleep_wol);
2344 2344
2345 /* Unlock the network stack */ 2345 /* Unlock the network stack */
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 174a3348f676..08aff1a2087c 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1511,7 +1511,7 @@ static int emac_devioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd)
1511 1511
1512static int match_first_device(struct device *dev, void *data) 1512static int match_first_device(struct device *dev, void *data)
1513{ 1513{
1514 return 1; 1514 return !strncmp(dev_name(dev), "davinci_mdio", 12);
1515} 1515}
1516 1516
1517/** 1517/**
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 2757c7d6e633..e4e47088e26b 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -181,6 +181,11 @@ static inline int wait_for_user_access(struct davinci_mdio_data *data)
181 __davinci_mdio_reset(data); 181 __davinci_mdio_reset(data);
182 return -EAGAIN; 182 return -EAGAIN;
183 } 183 }
184
185 reg = __raw_readl(&regs->user[0].access);
186 if ((reg & USERACCESS_GO) == 0)
187 return 0;
188
184 dev_err(data->dev, "timed out waiting for user access\n"); 189 dev_err(data->dev, "timed out waiting for user access\n");
185 return -ETIMEDOUT; 190 return -ETIMEDOUT;
186} 191}
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index 817ad3bc4957..efd36691ce54 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -228,7 +228,7 @@ tlan_get_skb(const struct tlan_list *tag)
228 unsigned long addr; 228 unsigned long addr;
229 229
230 addr = tag->buffer[9].address; 230 addr = tag->buffer[9].address;
231 addr |= (tag->buffer[8].address << 16) << 16; 231 addr |= ((unsigned long) tag->buffer[8].address << 16) << 16;
232 return (struct sk_buff *) addr; 232 return (struct sk_buff *) addr;
233} 233}
234 234
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index cc83af083fd7..44b8d2bad8c3 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -2,9 +2,7 @@
2 * Definitions for Xilinx Axi Ethernet device driver. 2 * Definitions for Xilinx Axi Ethernet device driver.
3 * 3 *
4 * Copyright (c) 2009 Secret Lab Technologies, Ltd. 4 * Copyright (c) 2009 Secret Lab Technologies, Ltd.
5 * Copyright (c) 2010 Xilinx, Inc. All rights reserved. 5 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
6 * Copyright (c) 2012 Daniel Borkmann, <daniel.borkmann@tik.ee.ethz.ch>
7 * Copyright (c) 2012 Ariane Keller, <ariane.keller@tik.ee.ethz.ch>
8 */ 6 */
9 7
10#ifndef XILINX_AXIENET_H 8#ifndef XILINX_AXIENET_H
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 2fcbeba6814b..9c365e192a31 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -4,9 +4,9 @@
4 * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi 4 * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi
5 * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net> 5 * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net>
6 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd. 6 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
7 * Copyright (c) 2010 Xilinx, Inc. All rights reserved. 7 * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
8 * Copyright (c) 2012 Daniel Borkmann, <daniel.borkmann@tik.ee.ethz.ch> 8 * Copyright (c) 2010 - 2011 PetaLogix
9 * Copyright (c) 2012 Ariane Keller, <ariane.keller@tik.ee.ethz.ch> 9 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
10 * 10 *
11 * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6 11 * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6
12 * and Spartan6. 12 * and Spartan6.
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
index d70b6e79f6c0..e90e1f46121e 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
@@ -2,9 +2,9 @@
2 * MDIO bus driver for the Xilinx Axi Ethernet device 2 * MDIO bus driver for the Xilinx Axi Ethernet device
3 * 3 *
4 * Copyright (c) 2009 Secret Lab Technologies, Ltd. 4 * Copyright (c) 2009 Secret Lab Technologies, Ltd.
5 * Copyright (c) 2010 Xilinx, Inc. All rights reserved. 5 * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
6 * Copyright (c) 2012 Daniel Borkmann, <daniel.borkmann@tik.ee.ethz.ch> 6 * Copyright (c) 2010 - 2011 PetaLogix
7 * Copyright (c) 2012 Ariane Keller, <ariane.keller@tik.ee.ethz.ch> 7 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
8 */ 8 */
9 9
10#include <linux/of_address.h> 10#include <linux/of_address.h>
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index dd294783b5c5..2d59138db7f3 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -44,6 +44,7 @@ struct net_device_context {
44 /* point back to our device context */ 44 /* point back to our device context */
45 struct hv_device *device_ctx; 45 struct hv_device *device_ctx;
46 struct delayed_work dwork; 46 struct delayed_work dwork;
47 struct work_struct work;
47}; 48};
48 49
49 50
@@ -51,30 +52,22 @@ static int ring_size = 128;
51module_param(ring_size, int, S_IRUGO); 52module_param(ring_size, int, S_IRUGO);
52MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)"); 53MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
53 54
54struct set_multicast_work {
55 struct work_struct work;
56 struct net_device *net;
57};
58
59static void do_set_multicast(struct work_struct *w) 55static void do_set_multicast(struct work_struct *w)
60{ 56{
61 struct set_multicast_work *swk = 57 struct net_device_context *ndevctx =
62 container_of(w, struct set_multicast_work, work); 58 container_of(w, struct net_device_context, work);
63 struct net_device *net = swk->net;
64
65 struct net_device_context *ndevctx = netdev_priv(net);
66 struct netvsc_device *nvdev; 59 struct netvsc_device *nvdev;
67 struct rndis_device *rdev; 60 struct rndis_device *rdev;
68 61
69 nvdev = hv_get_drvdata(ndevctx->device_ctx); 62 nvdev = hv_get_drvdata(ndevctx->device_ctx);
70 if (nvdev == NULL) 63 if (nvdev == NULL || nvdev->ndev == NULL)
71 goto out; 64 return;
72 65
73 rdev = nvdev->extension; 66 rdev = nvdev->extension;
74 if (rdev == NULL) 67 if (rdev == NULL)
75 goto out; 68 return;
76 69
77 if (net->flags & IFF_PROMISC) 70 if (nvdev->ndev->flags & IFF_PROMISC)
78 rndis_filter_set_packet_filter(rdev, 71 rndis_filter_set_packet_filter(rdev,
79 NDIS_PACKET_TYPE_PROMISCUOUS); 72 NDIS_PACKET_TYPE_PROMISCUOUS);
80 else 73 else
@@ -82,21 +75,13 @@ static void do_set_multicast(struct work_struct *w)
82 NDIS_PACKET_TYPE_BROADCAST | 75 NDIS_PACKET_TYPE_BROADCAST |
83 NDIS_PACKET_TYPE_ALL_MULTICAST | 76 NDIS_PACKET_TYPE_ALL_MULTICAST |
84 NDIS_PACKET_TYPE_DIRECTED); 77 NDIS_PACKET_TYPE_DIRECTED);
85
86out:
87 kfree(w);
88} 78}
89 79
90static void netvsc_set_multicast_list(struct net_device *net) 80static void netvsc_set_multicast_list(struct net_device *net)
91{ 81{
92 struct set_multicast_work *swk = 82 struct net_device_context *net_device_ctx = netdev_priv(net);
93 kmalloc(sizeof(struct set_multicast_work), GFP_ATOMIC);
94 if (swk == NULL)
95 return;
96 83
97 swk->net = net; 84 schedule_work(&net_device_ctx->work);
98 INIT_WORK(&swk->work, do_set_multicast);
99 schedule_work(&swk->work);
100} 85}
101 86
102static int netvsc_open(struct net_device *net) 87static int netvsc_open(struct net_device *net)
@@ -125,6 +110,8 @@ static int netvsc_close(struct net_device *net)
125 110
126 netif_tx_disable(net); 111 netif_tx_disable(net);
127 112
113 /* Make sure netvsc_set_multicast_list doesn't re-enable filter! */
114 cancel_work_sync(&net_device_ctx->work);
128 ret = rndis_filter_close(device_obj); 115 ret = rndis_filter_close(device_obj);
129 if (ret != 0) 116 if (ret != 0)
130 netdev_err(net, "unable to close device (ret %d).\n", ret); 117 netdev_err(net, "unable to close device (ret %d).\n", ret);
@@ -335,6 +322,7 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
335 322
336 nvdev->start_remove = true; 323 nvdev->start_remove = true;
337 cancel_delayed_work_sync(&ndevctx->dwork); 324 cancel_delayed_work_sync(&ndevctx->dwork);
325 cancel_work_sync(&ndevctx->work);
338 netif_tx_disable(ndev); 326 netif_tx_disable(ndev);
339 rndis_filter_device_remove(hdev); 327 rndis_filter_device_remove(hdev);
340 328
@@ -403,6 +391,7 @@ static int netvsc_probe(struct hv_device *dev,
403 net_device_ctx->device_ctx = dev; 391 net_device_ctx->device_ctx = dev;
404 hv_set_drvdata(dev, net); 392 hv_set_drvdata(dev, net);
405 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_send_garp); 393 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_send_garp);
394 INIT_WORK(&net_device_ctx->work, do_set_multicast);
406 395
407 net->netdev_ops = &device_ops; 396 net->netdev_ops = &device_ops;
408 397
@@ -456,6 +445,7 @@ static int netvsc_remove(struct hv_device *dev)
456 445
457 ndev_ctx = netdev_priv(net); 446 ndev_ctx = netdev_priv(net);
458 cancel_delayed_work_sync(&ndev_ctx->dwork); 447 cancel_delayed_work_sync(&ndev_ctx->dwork);
448 cancel_work_sync(&ndev_ctx->work);
459 449
460 /* Stop outbound asap */ 450 /* Stop outbound asap */
461 netif_tx_disable(net); 451 netif_tx_disable(net);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index f975afdc315c..025367a94add 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -259,7 +259,7 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
259 259
260xmit_world: 260xmit_world:
261 skb->ip_summed = ip_summed; 261 skb->ip_summed = ip_summed;
262 skb_set_dev(skb, vlan->lowerdev); 262 skb->dev = vlan->lowerdev;
263 return dev_queue_xmit(skb); 263 return dev_queue_xmit(skb);
264} 264}
265 265
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 0427c6561c84..cb8fd5069dbe 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -1,5 +1,6 @@
1#include <linux/etherdevice.h> 1#include <linux/etherdevice.h>
2#include <linux/if_macvlan.h> 2#include <linux/if_macvlan.h>
3#include <linux/if_vlan.h>
3#include <linux/interrupt.h> 4#include <linux/interrupt.h>
4#include <linux/nsproxy.h> 5#include <linux/nsproxy.h>
5#include <linux/compat.h> 6#include <linux/compat.h>
@@ -759,6 +760,8 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
759 struct macvlan_dev *vlan; 760 struct macvlan_dev *vlan;
760 int ret; 761 int ret;
761 int vnet_hdr_len = 0; 762 int vnet_hdr_len = 0;
763 int vlan_offset = 0;
764 int copied;
762 765
763 if (q->flags & IFF_VNET_HDR) { 766 if (q->flags & IFF_VNET_HDR) {
764 struct virtio_net_hdr vnet_hdr; 767 struct virtio_net_hdr vnet_hdr;
@@ -773,18 +776,48 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
773 if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, sizeof(vnet_hdr))) 776 if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, sizeof(vnet_hdr)))
774 return -EFAULT; 777 return -EFAULT;
775 } 778 }
779 copied = vnet_hdr_len;
780
781 if (!vlan_tx_tag_present(skb))
782 len = min_t(int, skb->len, len);
783 else {
784 int copy;
785 struct {
786 __be16 h_vlan_proto;
787 __be16 h_vlan_TCI;
788 } veth;
789 veth.h_vlan_proto = htons(ETH_P_8021Q);
790 veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb));
791
792 vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
793 len = min_t(int, skb->len + VLAN_HLEN, len);
794
795 copy = min_t(int, vlan_offset, len);
796 ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy);
797 len -= copy;
798 copied += copy;
799 if (ret || !len)
800 goto done;
801
802 copy = min_t(int, sizeof(veth), len);
803 ret = memcpy_toiovecend(iv, (void *)&veth, copied, copy);
804 len -= copy;
805 copied += copy;
806 if (ret || !len)
807 goto done;
808 }
776 809
777 len = min_t(int, skb->len, len); 810 ret = skb_copy_datagram_const_iovec(skb, vlan_offset, iv, copied, len);
778 811 copied += len;
779 ret = skb_copy_datagram_const_iovec(skb, 0, iv, vnet_hdr_len, len);
780 812
813done:
781 rcu_read_lock_bh(); 814 rcu_read_lock_bh();
782 vlan = rcu_dereference_bh(q->vlan); 815 vlan = rcu_dereference_bh(q->vlan);
783 if (vlan) 816 if (vlan)
784 macvlan_count_rx(vlan, len, ret == 0, 0); 817 macvlan_count_rx(vlan, copied - vnet_hdr_len, ret == 0, 0);
785 rcu_read_unlock_bh(); 818 rcu_read_unlock_bh();
786 819
787 return ret ? ret : (len + vnet_hdr_len); 820 return ret ? ret : copied;
788} 821}
789 822
790static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb, 823static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb,
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index f08c85acf761..5ac46f5226f3 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -40,6 +40,7 @@ MODULE_LICENSE("GPL");
40#define IP1001_PHASE_SEL_MASK 3 /* IP1001 RX/TXPHASE_SEL */ 40#define IP1001_PHASE_SEL_MASK 3 /* IP1001 RX/TXPHASE_SEL */
41#define IP1001_APS_ON 11 /* IP1001 APS Mode bit */ 41#define IP1001_APS_ON 11 /* IP1001 APS Mode bit */
42#define IP101A_G_APS_ON 2 /* IP101A/G APS Mode bit */ 42#define IP101A_G_APS_ON 2 /* IP101A/G APS Mode bit */
43#define IP101A_G_IRQ_CONF_STATUS 0x11 /* Conf Info IRQ & Status Reg */
43 44
44static int ip175c_config_init(struct phy_device *phydev) 45static int ip175c_config_init(struct phy_device *phydev)
45{ 46{
@@ -185,6 +186,15 @@ static int ip175c_config_aneg(struct phy_device *phydev)
185 return 0; 186 return 0;
186} 187}
187 188
189static int ip101a_g_ack_interrupt(struct phy_device *phydev)
190{
191 int err = phy_read(phydev, IP101A_G_IRQ_CONF_STATUS);
192 if (err < 0)
193 return err;
194
195 return 0;
196}
197
188static struct phy_driver ip175c_driver = { 198static struct phy_driver ip175c_driver = {
189 .phy_id = 0x02430d80, 199 .phy_id = 0x02430d80,
190 .name = "ICPlus IP175C", 200 .name = "ICPlus IP175C",
@@ -204,7 +214,6 @@ static struct phy_driver ip1001_driver = {
204 .phy_id_mask = 0x0ffffff0, 214 .phy_id_mask = 0x0ffffff0,
205 .features = PHY_GBIT_FEATURES | SUPPORTED_Pause | 215 .features = PHY_GBIT_FEATURES | SUPPORTED_Pause |
206 SUPPORTED_Asym_Pause, 216 SUPPORTED_Asym_Pause,
207 .flags = PHY_HAS_INTERRUPT,
208 .config_init = &ip1001_config_init, 217 .config_init = &ip1001_config_init,
209 .config_aneg = &genphy_config_aneg, 218 .config_aneg = &genphy_config_aneg,
210 .read_status = &genphy_read_status, 219 .read_status = &genphy_read_status,
@@ -220,6 +229,7 @@ static struct phy_driver ip101a_g_driver = {
220 .features = PHY_BASIC_FEATURES | SUPPORTED_Pause | 229 .features = PHY_BASIC_FEATURES | SUPPORTED_Pause |
221 SUPPORTED_Asym_Pause, 230 SUPPORTED_Asym_Pause,
222 .flags = PHY_HAS_INTERRUPT, 231 .flags = PHY_HAS_INTERRUPT,
232 .ack_interrupt = ip101a_g_ack_interrupt,
223 .config_init = &ip101a_g_config_init, 233 .config_init = &ip101a_g_config_init,
224 .config_aneg = &genphy_config_aneg, 234 .config_aneg = &genphy_config_aneg,
225 .read_status = &genphy_read_status, 235 .read_status = &genphy_read_status,
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 33f8c51968b6..21d7151fb0ab 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -235,7 +235,7 @@ struct ppp_net {
235/* Prototypes. */ 235/* Prototypes. */
236static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf, 236static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
237 struct file *file, unsigned int cmd, unsigned long arg); 237 struct file *file, unsigned int cmd, unsigned long arg);
238static int ppp_xmit_process(struct ppp *ppp); 238static void ppp_xmit_process(struct ppp *ppp);
239static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb); 239static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb);
240static void ppp_push(struct ppp *ppp); 240static void ppp_push(struct ppp *ppp);
241static void ppp_channel_push(struct channel *pch); 241static void ppp_channel_push(struct channel *pch);
@@ -969,8 +969,7 @@ ppp_start_xmit(struct sk_buff *skb, struct net_device *dev)
969 put_unaligned_be16(proto, pp); 969 put_unaligned_be16(proto, pp);
970 970
971 skb_queue_tail(&ppp->file.xq, skb); 971 skb_queue_tail(&ppp->file.xq, skb);
972 if (!ppp_xmit_process(ppp)) 972 ppp_xmit_process(ppp);
973 netif_stop_queue(dev);
974 return NETDEV_TX_OK; 973 return NETDEV_TX_OK;
975 974
976 outf: 975 outf:
@@ -1048,11 +1047,10 @@ static void ppp_setup(struct net_device *dev)
1048 * Called to do any work queued up on the transmit side 1047 * Called to do any work queued up on the transmit side
1049 * that can now be done. 1048 * that can now be done.
1050 */ 1049 */
1051static int 1050static void
1052ppp_xmit_process(struct ppp *ppp) 1051ppp_xmit_process(struct ppp *ppp)
1053{ 1052{
1054 struct sk_buff *skb; 1053 struct sk_buff *skb;
1055 int ret = 0;
1056 1054
1057 ppp_xmit_lock(ppp); 1055 ppp_xmit_lock(ppp);
1058 if (!ppp->closing) { 1056 if (!ppp->closing) {
@@ -1062,13 +1060,12 @@ ppp_xmit_process(struct ppp *ppp)
1062 ppp_send_frame(ppp, skb); 1060 ppp_send_frame(ppp, skb);
1063 /* If there's no work left to do, tell the core net 1061 /* If there's no work left to do, tell the core net
1064 code that we can accept some more. */ 1062 code that we can accept some more. */
1065 if (!ppp->xmit_pending && !skb_peek(&ppp->file.xq)) { 1063 if (!ppp->xmit_pending && !skb_peek(&ppp->file.xq))
1066 netif_wake_queue(ppp->dev); 1064 netif_wake_queue(ppp->dev);
1067 ret = 1; 1065 else
1068 } 1066 netif_stop_queue(ppp->dev);
1069 } 1067 }
1070 ppp_xmit_unlock(ppp); 1068 ppp_xmit_unlock(ppp);
1071 return ret;
1072} 1069}
1073 1070
1074static inline struct sk_buff * 1071static inline struct sk_buff *
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index 5ee032cafade..42b5151aa78a 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -355,7 +355,7 @@ static struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
355 u32 packet_len; 355 u32 packet_len;
356 u32 padbytes = 0xffff0000; 356 u32 padbytes = 0xffff0000;
357 357
358 padlen = ((skb->len + 4) % 512) ? 0 : 4; 358 padlen = ((skb->len + 4) & (dev->maxpacket - 1)) ? 0 : 4;
359 359
360 if ((!skb_cloned(skb)) && 360 if ((!skb_cloned(skb)) &&
361 ((headroom + tailroom) >= (4 + padlen))) { 361 ((headroom + tailroom) >= (4 + padlen))) {
@@ -377,7 +377,7 @@ static struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
377 cpu_to_le32s(&packet_len); 377 cpu_to_le32s(&packet_len);
378 skb_copy_to_linear_data(skb, &packet_len, sizeof(packet_len)); 378 skb_copy_to_linear_data(skb, &packet_len, sizeof(packet_len));
379 379
380 if ((skb->len % 512) == 0) { 380 if (padlen) {
381 cpu_to_le32s(&padbytes); 381 cpu_to_le32s(&padbytes);
382 memcpy(skb_tail_pointer(skb), &padbytes, sizeof(padbytes)); 382 memcpy(skb_tail_pointer(skb), &padbytes, sizeof(padbytes));
383 skb_put(skb, sizeof(padbytes)); 383 skb_put(skb, sizeof(padbytes));
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 90a30026a931..00880edba048 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -83,6 +83,7 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
83 struct cdc_state *info = (void *) &dev->data; 83 struct cdc_state *info = (void *) &dev->data;
84 int status; 84 int status;
85 int rndis; 85 int rndis;
86 bool android_rndis_quirk = false;
86 struct usb_driver *driver = driver_of(intf); 87 struct usb_driver *driver = driver_of(intf);
87 struct usb_cdc_mdlm_desc *desc = NULL; 88 struct usb_cdc_mdlm_desc *desc = NULL;
88 struct usb_cdc_mdlm_detail_desc *detail = NULL; 89 struct usb_cdc_mdlm_detail_desc *detail = NULL;
@@ -195,6 +196,11 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
195 info->control, 196 info->control,
196 info->u->bSlaveInterface0, 197 info->u->bSlaveInterface0,
197 info->data); 198 info->data);
199 /* fall back to hard-wiring for RNDIS */
200 if (rndis) {
201 android_rndis_quirk = true;
202 goto next_desc;
203 }
198 goto bad_desc; 204 goto bad_desc;
199 } 205 }
200 if (info->control != intf) { 206 if (info->control != intf) {
@@ -271,11 +277,15 @@ next_desc:
271 /* Microsoft ActiveSync based and some regular RNDIS devices lack the 277 /* Microsoft ActiveSync based and some regular RNDIS devices lack the
272 * CDC descriptors, so we'll hard-wire the interfaces and not check 278 * CDC descriptors, so we'll hard-wire the interfaces and not check
273 * for descriptors. 279 * for descriptors.
280 *
281 * Some Android RNDIS devices have a CDC Union descriptor pointing
282 * to non-existing interfaces. Ignore that and attempt the same
283 * hard-wired 0 and 1 interfaces.
274 */ 284 */
275 if (rndis && !info->u) { 285 if (rndis && (!info->u || android_rndis_quirk)) {
276 info->control = usb_ifnum_to_if(dev->udev, 0); 286 info->control = usb_ifnum_to_if(dev->udev, 0);
277 info->data = usb_ifnum_to_if(dev->udev, 1); 287 info->data = usb_ifnum_to_if(dev->udev, 1);
278 if (!info->control || !info->data) { 288 if (!info->control || !info->data || info->control != intf) {
279 dev_dbg(&intf->dev, 289 dev_dbg(&intf->dev,
280 "rndis: master #0/%p slave #1/%p\n", 290 "rndis: master #0/%p slave #1/%p\n",
281 info->control, 291 info->control,
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 552d24bf862e..d316503b35d4 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -365,6 +365,27 @@ static const struct driver_info qmi_wwan_force_int4 = {
365 .data = BIT(4), /* interface whitelist bitmap */ 365 .data = BIT(4), /* interface whitelist bitmap */
366}; 366};
367 367
368/* Sierra Wireless provide equally useless interface descriptors
369 * Devices in QMI mode can be switched between two different
370 * configurations:
371 * a) USB interface #8 is QMI/wwan
372 * b) USB interfaces #8, #19 and #20 are QMI/wwan
373 *
374 * Both configurations provide a number of other interfaces (serial++),
375 * some of which have the same endpoint configuration as we expect, so
376 * a whitelist or blacklist is necessary.
377 *
378 * FIXME: The below whitelist should include BIT(20). It does not
379 * because I cannot get it to work...
380 */
381static const struct driver_info qmi_wwan_sierra = {
382 .description = "Sierra Wireless wwan/QMI device",
383 .flags = FLAG_WWAN,
384 .bind = qmi_wwan_bind_gobi,
385 .unbind = qmi_wwan_unbind_shared,
386 .manage_power = qmi_wwan_manage_power,
387 .data = BIT(8) | BIT(19), /* interface whitelist bitmap */
388};
368 389
369#define HUAWEI_VENDOR_ID 0x12D1 390#define HUAWEI_VENDOR_ID 0x12D1
370#define QMI_GOBI_DEVICE(vend, prod) \ 391#define QMI_GOBI_DEVICE(vend, prod) \
@@ -445,6 +466,15 @@ static const struct usb_device_id products[] = {
445 .bInterfaceProtocol = 0xff, 466 .bInterfaceProtocol = 0xff,
446 .driver_info = (unsigned long)&qmi_wwan_force_int4, 467 .driver_info = (unsigned long)&qmi_wwan_force_int4,
447 }, 468 },
469 { /* Sierra Wireless MC77xx in QMI mode */
470 .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
471 .idVendor = 0x1199,
472 .idProduct = 0x68a2,
473 .bInterfaceClass = 0xff,
474 .bInterfaceSubClass = 0xff,
475 .bInterfaceProtocol = 0xff,
476 .driver_info = (unsigned long)&qmi_wwan_sierra,
477 },
448 {QMI_GOBI_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ 478 {QMI_GOBI_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
449 {QMI_GOBI_DEVICE(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */ 479 {QMI_GOBI_DEVICE(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */
450 {QMI_GOBI_DEVICE(0x03f0, 0x371d)}, /* HP un2430 Mobile Broadband Module */ 480 {QMI_GOBI_DEVICE(0x03f0, 0x371d)}, /* HP un2430 Mobile Broadband Module */
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 187d01ccb973..00103a8c5e04 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -98,7 +98,7 @@ static int __must_check smsc75xx_read_reg(struct usbnet *dev, u32 index,
98 98
99 if (unlikely(ret < 0)) 99 if (unlikely(ret < 0))
100 netdev_warn(dev->net, 100 netdev_warn(dev->net,
101 "Failed to read register index 0x%08x", index); 101 "Failed to read reg index 0x%08x: %d", index, ret);
102 102
103 le32_to_cpus(buf); 103 le32_to_cpus(buf);
104 *data = *buf; 104 *data = *buf;
@@ -128,7 +128,7 @@ static int __must_check smsc75xx_write_reg(struct usbnet *dev, u32 index,
128 128
129 if (unlikely(ret < 0)) 129 if (unlikely(ret < 0))
130 netdev_warn(dev->net, 130 netdev_warn(dev->net,
131 "Failed to write register index 0x%08x", index); 131 "Failed to write reg index 0x%08x: %d", index, ret);
132 132
133 kfree(buf); 133 kfree(buf);
134 134
@@ -171,7 +171,7 @@ static int smsc75xx_mdio_read(struct net_device *netdev, int phy_id, int idx)
171 idx &= dev->mii.reg_num_mask; 171 idx &= dev->mii.reg_num_mask;
172 addr = ((phy_id << MII_ACCESS_PHY_ADDR_SHIFT) & MII_ACCESS_PHY_ADDR) 172 addr = ((phy_id << MII_ACCESS_PHY_ADDR_SHIFT) & MII_ACCESS_PHY_ADDR)
173 | ((idx << MII_ACCESS_REG_ADDR_SHIFT) & MII_ACCESS_REG_ADDR) 173 | ((idx << MII_ACCESS_REG_ADDR_SHIFT) & MII_ACCESS_REG_ADDR)
174 | MII_ACCESS_READ; 174 | MII_ACCESS_READ | MII_ACCESS_BUSY;
175 ret = smsc75xx_write_reg(dev, MII_ACCESS, addr); 175 ret = smsc75xx_write_reg(dev, MII_ACCESS, addr);
176 check_warn_goto_done(ret, "Error writing MII_ACCESS"); 176 check_warn_goto_done(ret, "Error writing MII_ACCESS");
177 177
@@ -210,7 +210,7 @@ static void smsc75xx_mdio_write(struct net_device *netdev, int phy_id, int idx,
210 idx &= dev->mii.reg_num_mask; 210 idx &= dev->mii.reg_num_mask;
211 addr = ((phy_id << MII_ACCESS_PHY_ADDR_SHIFT) & MII_ACCESS_PHY_ADDR) 211 addr = ((phy_id << MII_ACCESS_PHY_ADDR_SHIFT) & MII_ACCESS_PHY_ADDR)
212 | ((idx << MII_ACCESS_REG_ADDR_SHIFT) & MII_ACCESS_REG_ADDR) 212 | ((idx << MII_ACCESS_REG_ADDR_SHIFT) & MII_ACCESS_REG_ADDR)
213 | MII_ACCESS_WRITE; 213 | MII_ACCESS_WRITE | MII_ACCESS_BUSY;
214 ret = smsc75xx_write_reg(dev, MII_ACCESS, addr); 214 ret = smsc75xx_write_reg(dev, MII_ACCESS, addr);
215 check_warn_goto_done(ret, "Error writing MII_ACCESS"); 215 check_warn_goto_done(ret, "Error writing MII_ACCESS");
216 216
@@ -508,9 +508,10 @@ static int smsc75xx_link_reset(struct usbnet *dev)
508 u16 lcladv, rmtadv; 508 u16 lcladv, rmtadv;
509 int ret; 509 int ret;
510 510
511 /* clear interrupt status */ 511 /* read and write to clear phy interrupt status */
512 ret = smsc75xx_mdio_read(dev->net, mii->phy_id, PHY_INT_SRC); 512 ret = smsc75xx_mdio_read(dev->net, mii->phy_id, PHY_INT_SRC);
513 check_warn_return(ret, "Error reading PHY_INT_SRC"); 513 check_warn_return(ret, "Error reading PHY_INT_SRC");
514 smsc75xx_mdio_write(dev->net, mii->phy_id, PHY_INT_SRC, 0xffff);
514 515
515 ret = smsc75xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL); 516 ret = smsc75xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL);
516 check_warn_return(ret, "Error writing INT_STS"); 517 check_warn_return(ret, "Error writing INT_STS");
@@ -643,7 +644,7 @@ static int smsc75xx_set_mac_address(struct usbnet *dev)
643 644
644static int smsc75xx_phy_initialize(struct usbnet *dev) 645static int smsc75xx_phy_initialize(struct usbnet *dev)
645{ 646{
646 int bmcr, timeout = 0; 647 int bmcr, ret, timeout = 0;
647 648
648 /* Initialize MII structure */ 649 /* Initialize MII structure */
649 dev->mii.dev = dev->net; 650 dev->mii.dev = dev->net;
@@ -651,6 +652,7 @@ static int smsc75xx_phy_initialize(struct usbnet *dev)
651 dev->mii.mdio_write = smsc75xx_mdio_write; 652 dev->mii.mdio_write = smsc75xx_mdio_write;
652 dev->mii.phy_id_mask = 0x1f; 653 dev->mii.phy_id_mask = 0x1f;
653 dev->mii.reg_num_mask = 0x1f; 654 dev->mii.reg_num_mask = 0x1f;
655 dev->mii.supports_gmii = 1;
654 dev->mii.phy_id = SMSC75XX_INTERNAL_PHY_ID; 656 dev->mii.phy_id = SMSC75XX_INTERNAL_PHY_ID;
655 657
656 /* reset phy and wait for reset to complete */ 658 /* reset phy and wait for reset to complete */
@@ -661,7 +663,7 @@ static int smsc75xx_phy_initialize(struct usbnet *dev)
661 bmcr = smsc75xx_mdio_read(dev->net, dev->mii.phy_id, MII_BMCR); 663 bmcr = smsc75xx_mdio_read(dev->net, dev->mii.phy_id, MII_BMCR);
662 check_warn_return(bmcr, "Error reading MII_BMCR"); 664 check_warn_return(bmcr, "Error reading MII_BMCR");
663 timeout++; 665 timeout++;
664 } while ((bmcr & MII_BMCR) && (timeout < 100)); 666 } while ((bmcr & BMCR_RESET) && (timeout < 100));
665 667
666 if (timeout >= 100) { 668 if (timeout >= 100) {
667 netdev_warn(dev->net, "timeout on PHY Reset"); 669 netdev_warn(dev->net, "timeout on PHY Reset");
@@ -671,10 +673,13 @@ static int smsc75xx_phy_initialize(struct usbnet *dev)
671 smsc75xx_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE, 673 smsc75xx_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE,
672 ADVERTISE_ALL | ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP | 674 ADVERTISE_ALL | ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP |
673 ADVERTISE_PAUSE_ASYM); 675 ADVERTISE_PAUSE_ASYM);
676 smsc75xx_mdio_write(dev->net, dev->mii.phy_id, MII_CTRL1000,
677 ADVERTISE_1000FULL);
674 678
675 /* read to clear */ 679 /* read and write to clear phy interrupt status */
676 smsc75xx_mdio_read(dev->net, dev->mii.phy_id, PHY_INT_SRC); 680 ret = smsc75xx_mdio_read(dev->net, dev->mii.phy_id, PHY_INT_SRC);
677 check_warn_return(bmcr, "Error reading PHY_INT_SRC"); 681 check_warn_return(ret, "Error reading PHY_INT_SRC");
682 smsc75xx_mdio_write(dev->net, dev->mii.phy_id, PHY_INT_SRC, 0xffff);
678 683
679 smsc75xx_mdio_write(dev->net, dev->mii.phy_id, PHY_INT_MASK, 684 smsc75xx_mdio_write(dev->net, dev->mii.phy_id, PHY_INT_MASK,
680 PHY_INT_MASK_DEFAULT); 685 PHY_INT_MASK_DEFAULT);
@@ -946,6 +951,14 @@ static int smsc75xx_reset(struct usbnet *dev)
946 ret = smsc75xx_write_reg(dev, INT_EP_CTL, buf); 951 ret = smsc75xx_write_reg(dev, INT_EP_CTL, buf);
947 check_warn_return(ret, "Failed to write INT_EP_CTL: %d", ret); 952 check_warn_return(ret, "Failed to write INT_EP_CTL: %d", ret);
948 953
954 /* allow mac to detect speed and duplex from phy */
955 ret = smsc75xx_read_reg(dev, MAC_CR, &buf);
956 check_warn_return(ret, "Failed to read MAC_CR: %d", ret);
957
958 buf |= (MAC_CR_ADD | MAC_CR_ASD);
959 ret = smsc75xx_write_reg(dev, MAC_CR, buf);
960 check_warn_return(ret, "Failed to write MAC_CR: %d", ret);
961
949 ret = smsc75xx_read_reg(dev, MAC_TX, &buf); 962 ret = smsc75xx_read_reg(dev, MAC_TX, &buf);
950 check_warn_return(ret, "Failed to read MAC_TX: %d", ret); 963 check_warn_return(ret, "Failed to read MAC_TX: %d", ret);
951 964
@@ -1051,6 +1064,7 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
1051 dev->net->ethtool_ops = &smsc75xx_ethtool_ops; 1064 dev->net->ethtool_ops = &smsc75xx_ethtool_ops;
1052 dev->net->flags |= IFF_MULTICAST; 1065 dev->net->flags |= IFF_MULTICAST;
1053 dev->net->hard_header_len += SMSC75XX_TX_OVERHEAD; 1066 dev->net->hard_header_len += SMSC75XX_TX_OVERHEAD;
1067 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
1054 return 0; 1068 return 0;
1055} 1069}
1056 1070
@@ -1211,7 +1225,7 @@ static const struct driver_info smsc75xx_info = {
1211 .rx_fixup = smsc75xx_rx_fixup, 1225 .rx_fixup = smsc75xx_rx_fixup,
1212 .tx_fixup = smsc75xx_tx_fixup, 1226 .tx_fixup = smsc75xx_tx_fixup,
1213 .status = smsc75xx_status, 1227 .status = smsc75xx_status,
1214 .flags = FLAG_ETHER | FLAG_SEND_ZLP, 1228 .flags = FLAG_ETHER | FLAG_SEND_ZLP | FLAG_LINK_INTR,
1215}; 1229};
1216 1230
1217static const struct usb_device_id products[] = { 1231static const struct usb_device_id products[] = {
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 5f19f84d3494..94ae66999f59 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -1017,6 +1017,7 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
1017 dev->net->ethtool_ops = &smsc95xx_ethtool_ops; 1017 dev->net->ethtool_ops = &smsc95xx_ethtool_ops;
1018 dev->net->flags |= IFF_MULTICAST; 1018 dev->net->flags |= IFF_MULTICAST;
1019 dev->net->hard_header_len += SMSC95XX_TX_OVERHEAD_CSUM; 1019 dev->net->hard_header_len += SMSC95XX_TX_OVERHEAD_CSUM;
1020 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
1020 return 0; 1021 return 0;
1021} 1022}
1022 1023
@@ -1191,7 +1192,7 @@ static const struct driver_info smsc95xx_info = {
1191 .rx_fixup = smsc95xx_rx_fixup, 1192 .rx_fixup = smsc95xx_rx_fixup,
1192 .tx_fixup = smsc95xx_tx_fixup, 1193 .tx_fixup = smsc95xx_tx_fixup,
1193 .status = smsc95xx_status, 1194 .status = smsc95xx_status,
1194 .flags = FLAG_ETHER | FLAG_SEND_ZLP, 1195 .flags = FLAG_ETHER | FLAG_SEND_ZLP | FLAG_LINK_INTR,
1195}; 1196};
1196 1197
1197static const struct usb_device_id products[] = { 1198static const struct usb_device_id products[] = {
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index b7b3f5b0d406..2d927fb4adf4 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -210,6 +210,7 @@ static int init_status (struct usbnet *dev, struct usb_interface *intf)
210 } else { 210 } else {
211 usb_fill_int_urb(dev->interrupt, dev->udev, pipe, 211 usb_fill_int_urb(dev->interrupt, dev->udev, pipe,
212 buf, maxp, intr_complete, dev, period); 212 buf, maxp, intr_complete, dev, period);
213 dev->interrupt->transfer_flags |= URB_FREE_BUFFER;
213 dev_dbg(&intf->dev, 214 dev_dbg(&intf->dev,
214 "status ep%din, %d bytes period %d\n", 215 "status ep%din, %d bytes period %d\n",
215 usb_pipeendpoint(pipe), maxp, period); 216 usb_pipeendpoint(pipe), maxp, period);
@@ -1443,7 +1444,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
1443 1444
1444 status = register_netdev (net); 1445 status = register_netdev (net);
1445 if (status) 1446 if (status)
1446 goto out3; 1447 goto out4;
1447 netif_info(dev, probe, dev->net, 1448 netif_info(dev, probe, dev->net,
1448 "register '%s' at usb-%s-%s, %s, %pM\n", 1449 "register '%s' at usb-%s-%s, %s, %pM\n",
1449 udev->dev.driver->name, 1450 udev->dev.driver->name,
@@ -1461,6 +1462,8 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
1461 1462
1462 return 0; 1463 return 0;
1463 1464
1465out4:
1466 usb_free_urb(dev->interrupt);
1464out3: 1467out3:
1465 if (info->unbind) 1468 if (info->unbind)
1466 info->unbind (dev, udev); 1469 info->unbind (dev, udev);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 4de2760c5937..af8acc85f4bb 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -626,16 +626,15 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
626 /* This can happen with OOM and indirect buffers. */ 626 /* This can happen with OOM and indirect buffers. */
627 if (unlikely(capacity < 0)) { 627 if (unlikely(capacity < 0)) {
628 if (likely(capacity == -ENOMEM)) { 628 if (likely(capacity == -ENOMEM)) {
629 if (net_ratelimit()) { 629 if (net_ratelimit())
630 dev_warn(&dev->dev, 630 dev_warn(&dev->dev,
631 "TX queue failure: out of memory\n"); 631 "TX queue failure: out of memory\n");
632 } else { 632 } else {
633 dev->stats.tx_fifo_errors++; 633 dev->stats.tx_fifo_errors++;
634 if (net_ratelimit()) 634 if (net_ratelimit())
635 dev_warn(&dev->dev, 635 dev_warn(&dev->dev,
636 "Unexpected TX queue failure: %d\n", 636 "Unexpected TX queue failure: %d\n",
637 capacity); 637 capacity);
638 }
639 } 638 }
640 dev->stats.tx_dropped++; 639 dev->stats.tx_dropped++;
641 kfree_skb(skb); 640 kfree_skb(skb);
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index ebb9f24eefb5..1a623183cbe5 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -2483,6 +2483,7 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2483 pr_err("Control memory remap failed\n"); 2483 pr_err("Control memory remap failed\n");
2484 pci_release_regions(pdev); 2484 pci_release_regions(pdev);
2485 pci_disable_device(pdev); 2485 pci_disable_device(pdev);
2486 iounmap(card->mem);
2486 kfree(card); 2487 kfree(card);
2487 return -ENODEV; 2488 return -ENODEV;
2488 } 2489 }
diff --git a/drivers/net/wireless/ath/ath5k/ahb.c b/drivers/net/wireless/ath/ath5k/ahb.c
index 8faa129da5a0..aec33cc207fd 100644
--- a/drivers/net/wireless/ath/ath5k/ahb.c
+++ b/drivers/net/wireless/ath/ath5k/ahb.c
@@ -19,6 +19,7 @@
19#include <linux/nl80211.h> 19#include <linux/nl80211.h>
20#include <linux/platform_device.h> 20#include <linux/platform_device.h>
21#include <linux/etherdevice.h> 21#include <linux/etherdevice.h>
22#include <linux/export.h>
22#include <ar231x_platform.h> 23#include <ar231x_platform.h>
23#include "ath5k.h" 24#include "ath5k.h"
24#include "debug.h" 25#include "debug.h"
@@ -119,7 +120,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
119 if (res == NULL) { 120 if (res == NULL) {
120 dev_err(&pdev->dev, "no IRQ resource found\n"); 121 dev_err(&pdev->dev, "no IRQ resource found\n");
121 ret = -ENXIO; 122 ret = -ENXIO;
122 goto err_out; 123 goto err_iounmap;
123 } 124 }
124 125
125 irq = res->start; 126 irq = res->start;
@@ -128,7 +129,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
128 if (hw == NULL) { 129 if (hw == NULL) {
129 dev_err(&pdev->dev, "no memory for ieee80211_hw\n"); 130 dev_err(&pdev->dev, "no memory for ieee80211_hw\n");
130 ret = -ENOMEM; 131 ret = -ENOMEM;
131 goto err_out; 132 goto err_iounmap;
132 } 133 }
133 134
134 ah = hw->priv; 135 ah = hw->priv;
@@ -185,6 +186,8 @@ static int ath_ahb_probe(struct platform_device *pdev)
185 err_free_hw: 186 err_free_hw:
186 ieee80211_free_hw(hw); 187 ieee80211_free_hw(hw);
187 platform_set_drvdata(pdev, NULL); 188 platform_set_drvdata(pdev, NULL);
189 err_iounmap:
190 iounmap(mem);
188 err_out: 191 err_out:
189 return ret; 192 return ret;
190} 193}
@@ -217,6 +220,7 @@ static int ath_ahb_remove(struct platform_device *pdev)
217 } 220 }
218 221
219 ath5k_deinit_ah(ah); 222 ath5k_deinit_ah(ah);
223 iounmap(ah->iobase);
220 platform_set_drvdata(pdev, NULL); 224 platform_set_drvdata(pdev, NULL);
221 ieee80211_free_hw(hw); 225 ieee80211_free_hw(hw);
222 226
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index d7d8e9199140..aba088005b22 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -869,7 +869,7 @@ static int ar5008_hw_process_ini(struct ath_hw *ah,
869 ar5008_hw_set_channel_regs(ah, chan); 869 ar5008_hw_set_channel_regs(ah, chan);
870 ar5008_hw_init_chain_masks(ah); 870 ar5008_hw_init_chain_masks(ah);
871 ath9k_olc_init(ah); 871 ath9k_olc_init(ah);
872 ath9k_hw_apply_txpower(ah, chan); 872 ath9k_hw_apply_txpower(ah, chan, false);
873 873
874 /* Write analog registers */ 874 /* Write analog registers */
875 if (!ath9k_hw_set_rf_regs(ah, chan, freqIndex)) { 875 if (!ath9k_hw_set_rf_regs(ah, chan, freqIndex)) {
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
index 59647a3ceb7f..3d400e8d6535 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
@@ -54,7 +54,7 @@ void ar9003_paprd_enable(struct ath_hw *ah, bool val)
54 54
55 if (val) { 55 if (val) {
56 ah->paprd_table_write_done = true; 56 ah->paprd_table_write_done = true;
57 ath9k_hw_apply_txpower(ah, chan); 57 ath9k_hw_apply_txpower(ah, chan, false);
58 } 58 }
59 59
60 REG_RMW_FIELD(ah, AR_PHY_PAPRD_CTRL0_B0, 60 REG_RMW_FIELD(ah, AR_PHY_PAPRD_CTRL0_B0,
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index bc992b237ae5..600aca9fe6b1 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -373,7 +373,7 @@ static void ar9003_hw_spur_ofdm_work(struct ath_hw *ah,
373 else 373 else
374 spur_subchannel_sd = 0; 374 spur_subchannel_sd = 0;
375 375
376 spur_freq_sd = (freq_offset << 9) / 11; 376 spur_freq_sd = ((freq_offset + 10) << 9) / 11;
377 377
378 } else { 378 } else {
379 if (REG_READ_FIELD(ah, AR_PHY_GEN_CTRL, 379 if (REG_READ_FIELD(ah, AR_PHY_GEN_CTRL,
@@ -382,7 +382,7 @@ static void ar9003_hw_spur_ofdm_work(struct ath_hw *ah,
382 else 382 else
383 spur_subchannel_sd = 1; 383 spur_subchannel_sd = 1;
384 384
385 spur_freq_sd = (freq_offset << 9) / 11; 385 spur_freq_sd = ((freq_offset - 10) << 9) / 11;
386 386
387 } 387 }
388 388
@@ -694,7 +694,7 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
694 ar9003_hw_override_ini(ah); 694 ar9003_hw_override_ini(ah);
695 ar9003_hw_set_channel_regs(ah, chan); 695 ar9003_hw_set_channel_regs(ah, chan);
696 ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask); 696 ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask);
697 ath9k_hw_apply_txpower(ah, chan); 697 ath9k_hw_apply_txpower(ah, chan, false);
698 698
699 if (AR_SREV_9462(ah)) { 699 if (AR_SREV_9462(ah)) {
700 if (REG_READ_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_0, 700 if (REG_READ_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_0,
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index f272236d8053..b34e8b2990b1 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -824,6 +824,8 @@ static void ath9k_hw_ar9287_set_txpower(struct ath_hw *ah,
824 regulatory->max_power_level = ratesArray[i]; 824 regulatory->max_power_level = ratesArray[i];
825 } 825 }
826 826
827 ath9k_hw_update_regulatory_maxpower(ah);
828
827 if (test) 829 if (test)
828 return; 830 return;
829 831
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 6c69e4e8b1cb..fa84e37bf091 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1454,7 +1454,7 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
1454 return false; 1454 return false;
1455 } 1455 }
1456 ath9k_hw_set_clockrate(ah); 1456 ath9k_hw_set_clockrate(ah);
1457 ath9k_hw_apply_txpower(ah, chan); 1457 ath9k_hw_apply_txpower(ah, chan, false);
1458 ath9k_hw_rfbus_done(ah); 1458 ath9k_hw_rfbus_done(ah);
1459 1459
1460 if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan)) 1460 if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan))
@@ -2652,7 +2652,8 @@ static int get_antenna_gain(struct ath_hw *ah, struct ath9k_channel *chan)
2652 return ah->eep_ops->get_eeprom(ah, gain_param); 2652 return ah->eep_ops->get_eeprom(ah, gain_param);
2653} 2653}
2654 2654
2655void ath9k_hw_apply_txpower(struct ath_hw *ah, struct ath9k_channel *chan) 2655void ath9k_hw_apply_txpower(struct ath_hw *ah, struct ath9k_channel *chan,
2656 bool test)
2656{ 2657{
2657 struct ath_regulatory *reg = ath9k_hw_regulatory(ah); 2658 struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
2658 struct ieee80211_channel *channel; 2659 struct ieee80211_channel *channel;
@@ -2673,7 +2674,7 @@ void ath9k_hw_apply_txpower(struct ath_hw *ah, struct ath9k_channel *chan)
2673 2674
2674 ah->eep_ops->set_txpower(ah, chan, 2675 ah->eep_ops->set_txpower(ah, chan,
2675 ath9k_regd_get_ctl(reg, chan), 2676 ath9k_regd_get_ctl(reg, chan),
2676 ant_reduction, new_pwr, false); 2677 ant_reduction, new_pwr, test);
2677} 2678}
2678 2679
2679void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit, bool test) 2680void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit, bool test)
@@ -2686,7 +2687,7 @@ void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit, bool test)
2686 if (test) 2687 if (test)
2687 channel->max_power = MAX_RATE_POWER / 2; 2688 channel->max_power = MAX_RATE_POWER / 2;
2688 2689
2689 ath9k_hw_apply_txpower(ah, chan); 2690 ath9k_hw_apply_txpower(ah, chan, test);
2690 2691
2691 if (test) 2692 if (test)
2692 channel->max_power = DIV_ROUND_UP(reg->max_power_level, 2); 2693 channel->max_power = DIV_ROUND_UP(reg->max_power_level, 2);
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index aa1680a0c7fd..e88f182ff45c 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -985,7 +985,8 @@ void ath9k_hw_name(struct ath_hw *ah, char *hw_name, size_t len);
985/* PHY */ 985/* PHY */
986void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah, u32 coef_scaled, 986void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah, u32 coef_scaled,
987 u32 *coef_mantissa, u32 *coef_exponent); 987 u32 *coef_mantissa, u32 *coef_exponent);
988void ath9k_hw_apply_txpower(struct ath_hw *ah, struct ath9k_channel *chan); 988void ath9k_hw_apply_txpower(struct ath_hw *ah, struct ath9k_channel *chan,
989 bool test);
989 990
990/* 991/*
991 * Code Specific to AR5008, AR9001 or AR9002, 992 * Code Specific to AR5008, AR9001 or AR9002,
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 2504ab005589..798ea57252b4 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1548,6 +1548,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1548 struct ath_hw *ah = sc->sc_ah; 1548 struct ath_hw *ah = sc->sc_ah;
1549 struct ath_common *common = ath9k_hw_common(ah); 1549 struct ath_common *common = ath9k_hw_common(ah);
1550 struct ieee80211_conf *conf = &hw->conf; 1550 struct ieee80211_conf *conf = &hw->conf;
1551 bool reset_channel = false;
1551 1552
1552 ath9k_ps_wakeup(sc); 1553 ath9k_ps_wakeup(sc);
1553 mutex_lock(&sc->mutex); 1554 mutex_lock(&sc->mutex);
@@ -1556,6 +1557,12 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1556 sc->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE); 1557 sc->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE);
1557 if (sc->ps_idle) 1558 if (sc->ps_idle)
1558 ath_cancel_work(sc); 1559 ath_cancel_work(sc);
1560 else
1561 /*
1562 * The chip needs a reset to properly wake up from
1563 * full sleep
1564 */
1565 reset_channel = ah->chip_fullsleep;
1559 } 1566 }
1560 1567
1561 /* 1568 /*
@@ -1584,7 +1591,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1584 } 1591 }
1585 } 1592 }
1586 1593
1587 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { 1594 if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) || reset_channel) {
1588 struct ieee80211_channel *curchan = hw->conf.channel; 1595 struct ieee80211_channel *curchan = hw->conf.channel;
1589 int pos = curchan->hw_value; 1596 int pos = curchan->hw_value;
1590 int old_pos = -1; 1597 int old_pos = -1;
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 834e6bc45e8b..23eaa1b26ebe 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1820,6 +1820,7 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
1820 struct ath_frame_info *fi = get_frame_info(skb); 1820 struct ath_frame_info *fi = get_frame_info(skb);
1821 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1821 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1822 struct ath_buf *bf; 1822 struct ath_buf *bf;
1823 int fragno;
1823 u16 seqno; 1824 u16 seqno;
1824 1825
1825 bf = ath_tx_get_buffer(sc); 1826 bf = ath_tx_get_buffer(sc);
@@ -1831,9 +1832,16 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
1831 ATH_TXBUF_RESET(bf); 1832 ATH_TXBUF_RESET(bf);
1832 1833
1833 if (tid) { 1834 if (tid) {
1835 fragno = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
1834 seqno = tid->seq_next; 1836 seqno = tid->seq_next;
1835 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT); 1837 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
1836 INCR(tid->seq_next, IEEE80211_SEQ_MAX); 1838
1839 if (fragno)
1840 hdr->seq_ctrl |= cpu_to_le16(fragno);
1841
1842 if (!ieee80211_has_morefrags(hdr->frame_control))
1843 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1844
1837 bf->bf_state.seqno = seqno; 1845 bf->bf_state.seqno = seqno;
1838 } 1846 }
1839 1847
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index c79e6638c88d..e4d6dc2e37d1 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -4827,8 +4827,14 @@ static int b43_op_start(struct ieee80211_hw *hw)
4827 out_mutex_unlock: 4827 out_mutex_unlock:
4828 mutex_unlock(&wl->mutex); 4828 mutex_unlock(&wl->mutex);
4829 4829
4830 /* reload configuration */ 4830 /*
4831 b43_op_config(hw, ~0); 4831 * Configuration may have been overwritten during initialization.
4832 * Reload the configuration, but only if initialization was
4833 * successful. Reloading the configuration after a failed init
4834 * may hang the system.
4835 */
4836 if (!err)
4837 b43_op_config(hw, ~0);
4832 4838
4833 return err; 4839 return err;
4834} 4840}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
index 4688904908ec..758c115b556e 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
@@ -108,9 +108,15 @@ static inline int brcmf_sdioh_f0_write_byte(struct brcmf_sdio_dev *sdiodev,
108 sdio_release_host(sdfunc); 108 sdio_release_host(sdfunc);
109 } 109 }
110 } else if (regaddr == SDIO_CCCR_ABORT) { 110 } else if (regaddr == SDIO_CCCR_ABORT) {
111 sdfunc = kmemdup(sdiodev->func[0], sizeof(struct sdio_func),
112 GFP_KERNEL);
113 if (!sdfunc)
114 return -ENOMEM;
115 sdfunc->num = 0;
111 sdio_claim_host(sdfunc); 116 sdio_claim_host(sdfunc);
112 sdio_writeb(sdfunc, *byte, regaddr, &err_ret); 117 sdio_writeb(sdfunc, *byte, regaddr, &err_ret);
113 sdio_release_host(sdfunc); 118 sdio_release_host(sdfunc);
119 kfree(sdfunc);
114 } else if (regaddr < 0xF0) { 120 } else if (regaddr < 0xF0) {
115 brcmf_dbg(ERROR, "F0 Wr:0x%02x: write disallowed\n", regaddr); 121 brcmf_dbg(ERROR, "F0 Wr:0x%02x: write disallowed\n", regaddr);
116 err_ret = -EPERM; 122 err_ret = -EPERM;
@@ -486,7 +492,7 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
486 kfree(bus_if); 492 kfree(bus_if);
487 return -ENOMEM; 493 return -ENOMEM;
488 } 494 }
489 sdiodev->func[0] = func->card->sdio_func[0]; 495 sdiodev->func[0] = func;
490 sdiodev->func[1] = func; 496 sdiodev->func[1] = func;
491 sdiodev->bus_if = bus_if; 497 sdiodev->bus_if = bus_if;
492 bus_if->bus_priv.sdio = sdiodev; 498 bus_if->bus_priv.sdio = sdiodev;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index 2bf5dda29291..e2b34e1563f4 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -574,6 +574,8 @@ struct brcmf_sdio {
574 574
575 struct task_struct *dpc_tsk; 575 struct task_struct *dpc_tsk;
576 struct completion dpc_wait; 576 struct completion dpc_wait;
577 struct list_head dpc_tsklst;
578 spinlock_t dpc_tl_lock;
577 579
578 struct semaphore sdsem; 580 struct semaphore sdsem;
579 581
@@ -2594,29 +2596,59 @@ clkwait:
2594 return resched; 2596 return resched;
2595} 2597}
2596 2598
2599static inline void brcmf_sdbrcm_adddpctsk(struct brcmf_sdio *bus)
2600{
2601 struct list_head *new_hd;
2602 unsigned long flags;
2603
2604 if (in_interrupt())
2605 new_hd = kzalloc(sizeof(struct list_head), GFP_ATOMIC);
2606 else
2607 new_hd = kzalloc(sizeof(struct list_head), GFP_KERNEL);
2608 if (new_hd == NULL)
2609 return;
2610
2611 spin_lock_irqsave(&bus->dpc_tl_lock, flags);
2612 list_add_tail(new_hd, &bus->dpc_tsklst);
2613 spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
2614}
2615
2597static int brcmf_sdbrcm_dpc_thread(void *data) 2616static int brcmf_sdbrcm_dpc_thread(void *data)
2598{ 2617{
2599 struct brcmf_sdio *bus = (struct brcmf_sdio *) data; 2618 struct brcmf_sdio *bus = (struct brcmf_sdio *) data;
2619 struct list_head *cur_hd, *tmp_hd;
2620 unsigned long flags;
2600 2621
2601 allow_signal(SIGTERM); 2622 allow_signal(SIGTERM);
2602 /* Run until signal received */ 2623 /* Run until signal received */
2603 while (1) { 2624 while (1) {
2604 if (kthread_should_stop()) 2625 if (kthread_should_stop())
2605 break; 2626 break;
2606 if (!wait_for_completion_interruptible(&bus->dpc_wait)) { 2627
2607 /* Call bus dpc unless it indicated down 2628 if (list_empty(&bus->dpc_tsklst))
2608 (then clean stop) */ 2629 if (wait_for_completion_interruptible(&bus->dpc_wait))
2609 if (bus->sdiodev->bus_if->state != BRCMF_BUS_DOWN) { 2630 break;
2610 if (brcmf_sdbrcm_dpc(bus)) 2631
2611 complete(&bus->dpc_wait); 2632 spin_lock_irqsave(&bus->dpc_tl_lock, flags);
2612 } else { 2633 list_for_each_safe(cur_hd, tmp_hd, &bus->dpc_tsklst) {
2634 spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
2635
2636 if (bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) {
2613 /* after stopping the bus, exit thread */ 2637 /* after stopping the bus, exit thread */
2614 brcmf_sdbrcm_bus_stop(bus->sdiodev->dev); 2638 brcmf_sdbrcm_bus_stop(bus->sdiodev->dev);
2615 bus->dpc_tsk = NULL; 2639 bus->dpc_tsk = NULL;
2640 spin_lock_irqsave(&bus->dpc_tl_lock, flags);
2616 break; 2641 break;
2617 } 2642 }
2618 } else 2643
2619 break; 2644 if (brcmf_sdbrcm_dpc(bus))
2645 brcmf_sdbrcm_adddpctsk(bus);
2646
2647 spin_lock_irqsave(&bus->dpc_tl_lock, flags);
2648 list_del(cur_hd);
2649 kfree(cur_hd);
2650 }
2651 spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
2620 } 2652 }
2621 return 0; 2653 return 0;
2622} 2654}
@@ -2669,8 +2701,10 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
2669 /* Schedule DPC if needed to send queued packet(s) */ 2701 /* Schedule DPC if needed to send queued packet(s) */
2670 if (!bus->dpc_sched) { 2702 if (!bus->dpc_sched) {
2671 bus->dpc_sched = true; 2703 bus->dpc_sched = true;
2672 if (bus->dpc_tsk) 2704 if (bus->dpc_tsk) {
2705 brcmf_sdbrcm_adddpctsk(bus);
2673 complete(&bus->dpc_wait); 2706 complete(&bus->dpc_wait);
2707 }
2674 } 2708 }
2675 2709
2676 return ret; 2710 return ret;
@@ -3514,8 +3548,10 @@ void brcmf_sdbrcm_isr(void *arg)
3514 brcmf_dbg(ERROR, "isr w/o interrupt configured!\n"); 3548 brcmf_dbg(ERROR, "isr w/o interrupt configured!\n");
3515 3549
3516 bus->dpc_sched = true; 3550 bus->dpc_sched = true;
3517 if (bus->dpc_tsk) 3551 if (bus->dpc_tsk) {
3552 brcmf_sdbrcm_adddpctsk(bus);
3518 complete(&bus->dpc_wait); 3553 complete(&bus->dpc_wait);
3554 }
3519} 3555}
3520 3556
3521static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus) 3557static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
@@ -3559,8 +3595,10 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
3559 bus->ipend = true; 3595 bus->ipend = true;
3560 3596
3561 bus->dpc_sched = true; 3597 bus->dpc_sched = true;
3562 if (bus->dpc_tsk) 3598 if (bus->dpc_tsk) {
3599 brcmf_sdbrcm_adddpctsk(bus);
3563 complete(&bus->dpc_wait); 3600 complete(&bus->dpc_wait);
3601 }
3564 } 3602 }
3565 } 3603 }
3566 3604
@@ -3897,6 +3935,8 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
3897 } 3935 }
3898 /* Initialize DPC thread */ 3936 /* Initialize DPC thread */
3899 init_completion(&bus->dpc_wait); 3937 init_completion(&bus->dpc_wait);
3938 INIT_LIST_HEAD(&bus->dpc_tsklst);
3939 spin_lock_init(&bus->dpc_tl_lock);
3900 bus->dpc_tsk = kthread_run(brcmf_sdbrcm_dpc_thread, 3940 bus->dpc_tsk = kthread_run(brcmf_sdbrcm_dpc_thread,
3901 bus, "brcmf_dpc"); 3941 bus, "brcmf_dpc");
3902 if (IS_ERR(bus->dpc_tsk)) { 3942 if (IS_ERR(bus->dpc_tsk)) {
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index 231ddf4a674f..b4d92792c502 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -847,8 +847,7 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
847 */ 847 */
848 if (!(txs->status & TX_STATUS_AMPDU) 848 if (!(txs->status & TX_STATUS_AMPDU)
849 && (txs->status & TX_STATUS_INTERMEDIATE)) { 849 && (txs->status & TX_STATUS_INTERMEDIATE)) {
850 wiphy_err(wlc->wiphy, "%s: INTERMEDIATE but not AMPDU\n", 850 BCMMSG(wlc->wiphy, "INTERMEDIATE but not AMPDU\n");
851 __func__);
852 return false; 851 return false;
853 } 852 }
854 853
@@ -7614,6 +7613,7 @@ brcms_c_recvctl(struct brcms_c_info *wlc, struct d11rxhdr *rxh,
7614{ 7613{
7615 int len_mpdu; 7614 int len_mpdu;
7616 struct ieee80211_rx_status rx_status; 7615 struct ieee80211_rx_status rx_status;
7616 struct ieee80211_hdr *hdr;
7617 7617
7618 memset(&rx_status, 0, sizeof(rx_status)); 7618 memset(&rx_status, 0, sizeof(rx_status));
7619 prep_mac80211_status(wlc, rxh, p, &rx_status); 7619 prep_mac80211_status(wlc, rxh, p, &rx_status);
@@ -7623,6 +7623,13 @@ brcms_c_recvctl(struct brcms_c_info *wlc, struct d11rxhdr *rxh,
7623 skb_pull(p, D11_PHY_HDR_LEN); 7623 skb_pull(p, D11_PHY_HDR_LEN);
7624 __skb_trim(p, len_mpdu); 7624 __skb_trim(p, len_mpdu);
7625 7625
7626 /* unmute transmit */
7627 if (wlc->hw->suspended_fifos) {
7628 hdr = (struct ieee80211_hdr *)p->data;
7629 if (ieee80211_is_beacon(hdr->frame_control))
7630 brcms_b_mute(wlc->hw, false);
7631 }
7632
7626 memcpy(IEEE80211_SKB_RXCB(p), &rx_status, sizeof(rx_status)); 7633 memcpy(IEEE80211_SKB_RXCB(p), &rx_status, sizeof(rx_status));
7627 ieee80211_rx_irqsafe(wlc->pub->ieee_hw, p); 7634 ieee80211_rx_irqsafe(wlc->pub->ieee_hw, p);
7628} 7635}
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 2b022571a859..1779db3aa2b0 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -2191,6 +2191,7 @@ static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
2191{ 2191{
2192 int rc = 0; 2192 int rc = 0;
2193 unsigned long flags; 2193 unsigned long flags;
2194 unsigned long now, end;
2194 2195
2195 spin_lock_irqsave(&priv->lock, flags); 2196 spin_lock_irqsave(&priv->lock, flags);
2196 if (priv->status & STATUS_HCMD_ACTIVE) { 2197 if (priv->status & STATUS_HCMD_ACTIVE) {
@@ -2232,10 +2233,20 @@ static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
2232 } 2233 }
2233 spin_unlock_irqrestore(&priv->lock, flags); 2234 spin_unlock_irqrestore(&priv->lock, flags);
2234 2235
2236 now = jiffies;
2237 end = now + HOST_COMPLETE_TIMEOUT;
2238again:
2235 rc = wait_event_interruptible_timeout(priv->wait_command_queue, 2239 rc = wait_event_interruptible_timeout(priv->wait_command_queue,
2236 !(priv-> 2240 !(priv->
2237 status & STATUS_HCMD_ACTIVE), 2241 status & STATUS_HCMD_ACTIVE),
2238 HOST_COMPLETE_TIMEOUT); 2242 end - now);
2243 if (rc < 0) {
2244 now = jiffies;
2245 if (time_before(now, end))
2246 goto again;
2247 rc = 0;
2248 }
2249
2239 if (rc == 0) { 2250 if (rc == 0) {
2240 spin_lock_irqsave(&priv->lock, flags); 2251 spin_lock_irqsave(&priv->lock, flags);
2241 if (priv->status & STATUS_HCMD_ACTIVE) { 2252 if (priv->status & STATUS_HCMD_ACTIVE) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index 5b0d888f746b..8d80e233bc7a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -46,8 +46,8 @@
46#include "iwl-prph.h" 46#include "iwl-prph.h"
47 47
48/* Highest firmware API version supported */ 48/* Highest firmware API version supported */
49#define IWL1000_UCODE_API_MAX 6 49#define IWL1000_UCODE_API_MAX 5
50#define IWL100_UCODE_API_MAX 6 50#define IWL100_UCODE_API_MAX 5
51 51
52/* Oldest version we won't warn about */ 52/* Oldest version we won't warn about */
53#define IWL1000_UCODE_API_OK 5 53#define IWL1000_UCODE_API_OK 5
@@ -226,5 +226,5 @@ const struct iwl_cfg iwl100_bg_cfg = {
226 IWL_DEVICE_100, 226 IWL_DEVICE_100,
227}; 227};
228 228
229MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_MAX)); 229MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_OK));
230MODULE_FIRMWARE(IWL100_MODULE_FIRMWARE(IWL100_UCODE_API_MAX)); 230MODULE_FIRMWARE(IWL100_MODULE_FIRMWARE(IWL100_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
index 5635b9e2c69e..ea108622e0bd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-2000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
@@ -51,10 +51,10 @@
51#define IWL135_UCODE_API_MAX 6 51#define IWL135_UCODE_API_MAX 6
52 52
53/* Oldest version we won't warn about */ 53/* Oldest version we won't warn about */
54#define IWL2030_UCODE_API_OK 5 54#define IWL2030_UCODE_API_OK 6
55#define IWL2000_UCODE_API_OK 5 55#define IWL2000_UCODE_API_OK 6
56#define IWL105_UCODE_API_OK 5 56#define IWL105_UCODE_API_OK 6
57#define IWL135_UCODE_API_OK 5 57#define IWL135_UCODE_API_OK 6
58 58
59/* Lowest firmware API version supported */ 59/* Lowest firmware API version supported */
60#define IWL2030_UCODE_API_MIN 5 60#define IWL2030_UCODE_API_MIN 5
@@ -328,7 +328,7 @@ const struct iwl_cfg iwl135_bgn_cfg = {
328 .ht_params = &iwl2000_ht_params, 328 .ht_params = &iwl2000_ht_params,
329}; 329};
330 330
331MODULE_FIRMWARE(IWL2000_MODULE_FIRMWARE(IWL2000_UCODE_API_MAX)); 331MODULE_FIRMWARE(IWL2000_MODULE_FIRMWARE(IWL2000_UCODE_API_OK));
332MODULE_FIRMWARE(IWL2030_MODULE_FIRMWARE(IWL2030_UCODE_API_MAX)); 332MODULE_FIRMWARE(IWL2030_MODULE_FIRMWARE(IWL2030_UCODE_API_OK));
333MODULE_FIRMWARE(IWL105_MODULE_FIRMWARE(IWL105_UCODE_API_MAX)); 333MODULE_FIRMWARE(IWL105_MODULE_FIRMWARE(IWL105_UCODE_API_OK));
334MODULE_FIRMWARE(IWL135_MODULE_FIRMWARE(IWL135_UCODE_API_MAX)); 334MODULE_FIRMWARE(IWL135_MODULE_FIRMWARE(IWL135_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index a805e97b89af..de0920c74cdd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -51,6 +51,10 @@
51#define IWL5000_UCODE_API_MAX 5 51#define IWL5000_UCODE_API_MAX 5
52#define IWL5150_UCODE_API_MAX 2 52#define IWL5150_UCODE_API_MAX 2
53 53
54/* Oldest version we won't warn about */
55#define IWL5000_UCODE_API_OK 5
56#define IWL5150_UCODE_API_OK 2
57
54/* Lowest firmware API version supported */ 58/* Lowest firmware API version supported */
55#define IWL5000_UCODE_API_MIN 1 59#define IWL5000_UCODE_API_MIN 1
56#define IWL5150_UCODE_API_MIN 1 60#define IWL5150_UCODE_API_MIN 1
@@ -326,6 +330,7 @@ static const struct iwl_ht_params iwl5000_ht_params = {
326#define IWL_DEVICE_5000 \ 330#define IWL_DEVICE_5000 \
327 .fw_name_pre = IWL5000_FW_PRE, \ 331 .fw_name_pre = IWL5000_FW_PRE, \
328 .ucode_api_max = IWL5000_UCODE_API_MAX, \ 332 .ucode_api_max = IWL5000_UCODE_API_MAX, \
333 .ucode_api_ok = IWL5000_UCODE_API_OK, \
329 .ucode_api_min = IWL5000_UCODE_API_MIN, \ 334 .ucode_api_min = IWL5000_UCODE_API_MIN, \
330 .max_inst_size = IWLAGN_RTC_INST_SIZE, \ 335 .max_inst_size = IWLAGN_RTC_INST_SIZE, \
331 .max_data_size = IWLAGN_RTC_DATA_SIZE, \ 336 .max_data_size = IWLAGN_RTC_DATA_SIZE, \
@@ -371,6 +376,7 @@ const struct iwl_cfg iwl5350_agn_cfg = {
371 .name = "Intel(R) WiMAX/WiFi Link 5350 AGN", 376 .name = "Intel(R) WiMAX/WiFi Link 5350 AGN",
372 .fw_name_pre = IWL5000_FW_PRE, 377 .fw_name_pre = IWL5000_FW_PRE,
373 .ucode_api_max = IWL5000_UCODE_API_MAX, 378 .ucode_api_max = IWL5000_UCODE_API_MAX,
379 .ucode_api_ok = IWL5000_UCODE_API_OK,
374 .ucode_api_min = IWL5000_UCODE_API_MIN, 380 .ucode_api_min = IWL5000_UCODE_API_MIN,
375 .max_inst_size = IWLAGN_RTC_INST_SIZE, 381 .max_inst_size = IWLAGN_RTC_INST_SIZE,
376 .max_data_size = IWLAGN_RTC_DATA_SIZE, 382 .max_data_size = IWLAGN_RTC_DATA_SIZE,
@@ -386,6 +392,7 @@ const struct iwl_cfg iwl5350_agn_cfg = {
386#define IWL_DEVICE_5150 \ 392#define IWL_DEVICE_5150 \
387 .fw_name_pre = IWL5150_FW_PRE, \ 393 .fw_name_pre = IWL5150_FW_PRE, \
388 .ucode_api_max = IWL5150_UCODE_API_MAX, \ 394 .ucode_api_max = IWL5150_UCODE_API_MAX, \
395 .ucode_api_ok = IWL5150_UCODE_API_OK, \
389 .ucode_api_min = IWL5150_UCODE_API_MIN, \ 396 .ucode_api_min = IWL5150_UCODE_API_MIN, \
390 .max_inst_size = IWLAGN_RTC_INST_SIZE, \ 397 .max_inst_size = IWLAGN_RTC_INST_SIZE, \
391 .max_data_size = IWLAGN_RTC_DATA_SIZE, \ 398 .max_data_size = IWLAGN_RTC_DATA_SIZE, \
@@ -409,5 +416,5 @@ const struct iwl_cfg iwl5150_abg_cfg = {
409 IWL_DEVICE_5150, 416 IWL_DEVICE_5150,
410}; 417};
411 418
412MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_MAX)); 419MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_OK));
413MODULE_FIRMWARE(IWL5150_MODULE_FIRMWARE(IWL5150_UCODE_API_MAX)); 420MODULE_FIRMWARE(IWL5150_MODULE_FIRMWARE(IWL5150_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 64060cd738b5..f0c91505a7f7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -53,6 +53,8 @@
53/* Oldest version we won't warn about */ 53/* Oldest version we won't warn about */
54#define IWL6000_UCODE_API_OK 4 54#define IWL6000_UCODE_API_OK 4
55#define IWL6000G2_UCODE_API_OK 5 55#define IWL6000G2_UCODE_API_OK 5
56#define IWL6050_UCODE_API_OK 5
57#define IWL6000G2B_UCODE_API_OK 6
56 58
57/* Lowest firmware API version supported */ 59/* Lowest firmware API version supported */
58#define IWL6000_UCODE_API_MIN 4 60#define IWL6000_UCODE_API_MIN 4
@@ -388,7 +390,7 @@ const struct iwl_cfg iwl6005_2agn_mow2_cfg = {
388#define IWL_DEVICE_6030 \ 390#define IWL_DEVICE_6030 \
389 .fw_name_pre = IWL6030_FW_PRE, \ 391 .fw_name_pre = IWL6030_FW_PRE, \
390 .ucode_api_max = IWL6000G2_UCODE_API_MAX, \ 392 .ucode_api_max = IWL6000G2_UCODE_API_MAX, \
391 .ucode_api_ok = IWL6000G2_UCODE_API_OK, \ 393 .ucode_api_ok = IWL6000G2B_UCODE_API_OK, \
392 .ucode_api_min = IWL6000G2_UCODE_API_MIN, \ 394 .ucode_api_min = IWL6000G2_UCODE_API_MIN, \
393 .max_inst_size = IWL60_RTC_INST_SIZE, \ 395 .max_inst_size = IWL60_RTC_INST_SIZE, \
394 .max_data_size = IWL60_RTC_DATA_SIZE, \ 396 .max_data_size = IWL60_RTC_DATA_SIZE, \
@@ -557,6 +559,6 @@ const struct iwl_cfg iwl6000_3agn_cfg = {
557}; 559};
558 560
559MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_OK)); 561MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_OK));
560MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX)); 562MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_OK));
561MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX)); 563MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_OK));
562MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX)); 564MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2B_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rx.c b/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
index f4b84d1596e3..22474608a70b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
@@ -773,8 +773,7 @@ static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv,
773 struct sk_buff *skb; 773 struct sk_buff *skb;
774 __le16 fc = hdr->frame_control; 774 __le16 fc = hdr->frame_control;
775 struct iwl_rxon_context *ctx; 775 struct iwl_rxon_context *ctx;
776 struct page *p; 776 unsigned int hdrlen, fraglen;
777 int offset;
778 777
779 /* We only process data packets if the interface is open */ 778 /* We only process data packets if the interface is open */
780 if (unlikely(!priv->is_open)) { 779 if (unlikely(!priv->is_open)) {
@@ -788,16 +787,24 @@ static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv,
788 iwlagn_set_decrypted_flag(priv, hdr, ampdu_status, stats)) 787 iwlagn_set_decrypted_flag(priv, hdr, ampdu_status, stats))
789 return; 788 return;
790 789
791 skb = dev_alloc_skb(128); 790 /* Dont use dev_alloc_skb(), we'll have enough headroom once
791 * ieee80211_hdr pulled.
792 */
793 skb = alloc_skb(128, GFP_ATOMIC);
792 if (!skb) { 794 if (!skb) {
793 IWL_ERR(priv, "dev_alloc_skb failed\n"); 795 IWL_ERR(priv, "alloc_skb failed\n");
794 return; 796 return;
795 } 797 }
798 hdrlen = min_t(unsigned int, len, skb_tailroom(skb));
799 memcpy(skb_put(skb, hdrlen), hdr, hdrlen);
800 fraglen = len - hdrlen;
796 801
797 offset = (void *)hdr - rxb_addr(rxb); 802 if (fraglen) {
798 p = rxb_steal_page(rxb); 803 int offset = (void *)hdr + hdrlen - rxb_addr(rxb);
799 skb_add_rx_frag(skb, 0, p, offset, len, len);
800 804
805 skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
806 fraglen, rxb->truesize);
807 }
801 iwl_update_stats(priv, false, fc, len); 808 iwl_update_stats(priv, false, fc, len);
802 809
803 /* 810 /*
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index f1226dbf789d..2a9a16f901c3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -863,7 +863,6 @@ static void iwl_bg_run_time_calib_work(struct work_struct *work)
863 863
864void iwlagn_prepare_restart(struct iwl_priv *priv) 864void iwlagn_prepare_restart(struct iwl_priv *priv)
865{ 865{
866 struct iwl_rxon_context *ctx;
867 bool bt_full_concurrent; 866 bool bt_full_concurrent;
868 u8 bt_ci_compliance; 867 u8 bt_ci_compliance;
869 u8 bt_load; 868 u8 bt_load;
@@ -872,8 +871,6 @@ void iwlagn_prepare_restart(struct iwl_priv *priv)
872 871
873 lockdep_assert_held(&priv->mutex); 872 lockdep_assert_held(&priv->mutex);
874 873
875 for_each_context(priv, ctx)
876 ctx->vif = NULL;
877 priv->is_open = 0; 874 priv->is_open = 0;
878 875
879 /* 876 /*
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
index 90208094b8eb..74bce97a8600 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -104,15 +104,29 @@
104 * (see struct iwl_tfd_frame). These 16 pointer registers are offset by 0x04 104 * (see struct iwl_tfd_frame). These 16 pointer registers are offset by 0x04
105 * bytes from one another. Each TFD circular buffer in DRAM must be 256-byte 105 * bytes from one another. Each TFD circular buffer in DRAM must be 256-byte
106 * aligned (address bits 0-7 must be 0). 106 * aligned (address bits 0-7 must be 0).
107 * Later devices have 20 (5000 series) or 30 (higher) queues, but the registers
108 * for them are in different places.
107 * 109 *
108 * Bit fields in each pointer register: 110 * Bit fields in each pointer register:
109 * 27-0: TFD CB physical base address [35:8], must be 256-byte aligned 111 * 27-0: TFD CB physical base address [35:8], must be 256-byte aligned
110 */ 112 */
111#define FH_MEM_CBBC_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0) 113#define FH_MEM_CBBC_0_15_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0)
112#define FH_MEM_CBBC_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xA10) 114#define FH_MEM_CBBC_0_15_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xA10)
113 115#define FH_MEM_CBBC_16_19_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xBF0)
114/* Find TFD CB base pointer for given queue (range 0-15). */ 116#define FH_MEM_CBBC_16_19_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xC00)
115#define FH_MEM_CBBC_QUEUE(x) (FH_MEM_CBBC_LOWER_BOUND + (x) * 0x4) 117#define FH_MEM_CBBC_20_31_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xB20)
118#define FH_MEM_CBBC_20_31_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xB80)
119
120/* Find TFD CB base pointer for given queue */
121static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
122{
123 if (chnl < 16)
124 return FH_MEM_CBBC_0_15_LOWER_BOUND + 4 * chnl;
125 if (chnl < 20)
126 return FH_MEM_CBBC_16_19_LOWER_BOUND + 4 * (chnl - 16);
127 WARN_ON_ONCE(chnl >= 32);
128 return FH_MEM_CBBC_20_31_LOWER_BOUND + 4 * (chnl - 20);
129}
116 130
117 131
118/** 132/**
diff --git a/drivers/net/wireless/iwlwifi/iwl-mac80211.c b/drivers/net/wireless/iwlwifi/iwl-mac80211.c
index b6805f8e9a01..c24a7134a6f9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-mac80211.c
+++ b/drivers/net/wireless/iwlwifi/iwl-mac80211.c
@@ -1244,6 +1244,7 @@ static int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
1244 struct iwl_rxon_context *tmp, *ctx = NULL; 1244 struct iwl_rxon_context *tmp, *ctx = NULL;
1245 int err; 1245 int err;
1246 enum nl80211_iftype viftype = ieee80211_vif_type_p2p(vif); 1246 enum nl80211_iftype viftype = ieee80211_vif_type_p2p(vif);
1247 bool reset = false;
1247 1248
1248 IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n", 1249 IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
1249 viftype, vif->addr); 1250 viftype, vif->addr);
@@ -1265,6 +1266,13 @@ static int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
1265 tmp->interface_modes | tmp->exclusive_interface_modes; 1266 tmp->interface_modes | tmp->exclusive_interface_modes;
1266 1267
1267 if (tmp->vif) { 1268 if (tmp->vif) {
1269 /* On reset we need to add the same interface again */
1270 if (tmp->vif == vif) {
1271 reset = true;
1272 ctx = tmp;
1273 break;
1274 }
1275
1268 /* check if this busy context is exclusive */ 1276 /* check if this busy context is exclusive */
1269 if (tmp->exclusive_interface_modes & 1277 if (tmp->exclusive_interface_modes &
1270 BIT(tmp->vif->type)) { 1278 BIT(tmp->vif->type)) {
@@ -1291,7 +1299,7 @@ static int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
1291 ctx->vif = vif; 1299 ctx->vif = vif;
1292 1300
1293 err = iwl_setup_interface(priv, ctx); 1301 err = iwl_setup_interface(priv, ctx);
1294 if (!err) 1302 if (!err || reset)
1295 goto out; 1303 goto out;
1296 1304
1297 ctx->vif = NULL; 1305 ctx->vif = NULL;
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index 75dc20bd965b..3b1069290fa9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -223,12 +223,33 @@
223#define SCD_AIT (SCD_BASE + 0x0c) 223#define SCD_AIT (SCD_BASE + 0x0c)
224#define SCD_TXFACT (SCD_BASE + 0x10) 224#define SCD_TXFACT (SCD_BASE + 0x10)
225#define SCD_ACTIVE (SCD_BASE + 0x14) 225#define SCD_ACTIVE (SCD_BASE + 0x14)
226#define SCD_QUEUE_WRPTR(x) (SCD_BASE + 0x18 + (x) * 4)
227#define SCD_QUEUE_RDPTR(x) (SCD_BASE + 0x68 + (x) * 4)
228#define SCD_QUEUECHAIN_SEL (SCD_BASE + 0xe8) 226#define SCD_QUEUECHAIN_SEL (SCD_BASE + 0xe8)
229#define SCD_AGGR_SEL (SCD_BASE + 0x248) 227#define SCD_AGGR_SEL (SCD_BASE + 0x248)
230#define SCD_INTERRUPT_MASK (SCD_BASE + 0x108) 228#define SCD_INTERRUPT_MASK (SCD_BASE + 0x108)
231#define SCD_QUEUE_STATUS_BITS(x) (SCD_BASE + 0x10c + (x) * 4) 229
230static inline unsigned int SCD_QUEUE_WRPTR(unsigned int chnl)
231{
232 if (chnl < 20)
233 return SCD_BASE + 0x18 + chnl * 4;
234 WARN_ON_ONCE(chnl >= 32);
235 return SCD_BASE + 0x284 + (chnl - 20) * 4;
236}
237
238static inline unsigned int SCD_QUEUE_RDPTR(unsigned int chnl)
239{
240 if (chnl < 20)
241 return SCD_BASE + 0x68 + chnl * 4;
242 WARN_ON_ONCE(chnl >= 32);
243 return SCD_BASE + 0x2B4 + (chnl - 20) * 4;
244}
245
246static inline unsigned int SCD_QUEUE_STATUS_BITS(unsigned int chnl)
247{
248 if (chnl < 20)
249 return SCD_BASE + 0x10c + chnl * 4;
250 WARN_ON_ONCE(chnl >= 32);
251 return SCD_BASE + 0x384 + (chnl - 20) * 4;
252}
232 253
233/*********************** END TX SCHEDULER *************************************/ 254/*********************** END TX SCHEDULER *************************************/
234 255
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
index 8b1a7988e176..aa7aea168138 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
@@ -374,8 +374,9 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
374 if (WARN_ON(!rxb)) 374 if (WARN_ON(!rxb))
375 return; 375 return;
376 376
377 rxcb.truesize = PAGE_SIZE << hw_params(trans).rx_page_order;
377 dma_unmap_page(trans->dev, rxb->page_dma, 378 dma_unmap_page(trans->dev, rxb->page_dma,
378 PAGE_SIZE << hw_params(trans).rx_page_order, 379 rxcb.truesize,
379 DMA_FROM_DEVICE); 380 DMA_FROM_DEVICE);
380 381
381 rxcb._page = rxb->page; 382 rxcb._page = rxb->page;
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index 0c81cbaa8088..fdf97886a5e4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -260,6 +260,7 @@ static inline void iwl_free_resp(struct iwl_host_cmd *cmd)
260 260
261struct iwl_rx_cmd_buffer { 261struct iwl_rx_cmd_buffer {
262 struct page *_page; 262 struct page *_page;
263 unsigned int truesize;
263}; 264};
264 265
265static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r) 266static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r)
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index 3fa1ecebadfd..2fa879b015b6 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -103,7 +103,7 @@ static const u32 cipher_suites[] = {
103 * Convert NL80211's auth_type to the one from Libertas, see chapter 5.9.1 103 * Convert NL80211's auth_type to the one from Libertas, see chapter 5.9.1
104 * in the firmware spec 104 * in the firmware spec
105 */ 105 */
106static u8 lbs_auth_to_authtype(enum nl80211_auth_type auth_type) 106static int lbs_auth_to_authtype(enum nl80211_auth_type auth_type)
107{ 107{
108 int ret = -ENOTSUPP; 108 int ret = -ENOTSUPP;
109 109
@@ -1411,7 +1411,12 @@ static int lbs_cfg_connect(struct wiphy *wiphy, struct net_device *dev,
1411 goto done; 1411 goto done;
1412 } 1412 }
1413 1413
1414 lbs_set_authtype(priv, sme); 1414 ret = lbs_set_authtype(priv, sme);
1415 if (ret == -ENOTSUPP) {
1416 wiphy_err(wiphy, "unsupported authtype 0x%x\n", sme->auth_type);
1417 goto done;
1418 }
1419
1415 lbs_set_radio(priv, preamble, 1); 1420 lbs_set_radio(priv, preamble, 1);
1416 1421
1417 /* Do the actual association */ 1422 /* Do the actual association */
diff --git a/drivers/net/wireless/mwifiex/pcie.h b/drivers/net/wireless/mwifiex/pcie.h
index 445ff21772e2..2f218f9a3fd3 100644
--- a/drivers/net/wireless/mwifiex/pcie.h
+++ b/drivers/net/wireless/mwifiex/pcie.h
@@ -48,15 +48,15 @@
48#define PCIE_HOST_INT_STATUS_MASK 0xC3C 48#define PCIE_HOST_INT_STATUS_MASK 0xC3C
49#define PCIE_SCRATCH_2_REG 0xC40 49#define PCIE_SCRATCH_2_REG 0xC40
50#define PCIE_SCRATCH_3_REG 0xC44 50#define PCIE_SCRATCH_3_REG 0xC44
51#define PCIE_SCRATCH_4_REG 0xCC0 51#define PCIE_SCRATCH_4_REG 0xCD0
52#define PCIE_SCRATCH_5_REG 0xCC4 52#define PCIE_SCRATCH_5_REG 0xCD4
53#define PCIE_SCRATCH_6_REG 0xCC8 53#define PCIE_SCRATCH_6_REG 0xCD8
54#define PCIE_SCRATCH_7_REG 0xCCC 54#define PCIE_SCRATCH_7_REG 0xCDC
55#define PCIE_SCRATCH_8_REG 0xCD0 55#define PCIE_SCRATCH_8_REG 0xCE0
56#define PCIE_SCRATCH_9_REG 0xCD4 56#define PCIE_SCRATCH_9_REG 0xCE4
57#define PCIE_SCRATCH_10_REG 0xCD8 57#define PCIE_SCRATCH_10_REG 0xCE8
58#define PCIE_SCRATCH_11_REG 0xCDC 58#define PCIE_SCRATCH_11_REG 0xCEC
59#define PCIE_SCRATCH_12_REG 0xCE0 59#define PCIE_SCRATCH_12_REG 0xCF0
60 60
61#define CPU_INTR_DNLD_RDY BIT(0) 61#define CPU_INTR_DNLD_RDY BIT(0)
62#define CPU_INTR_DOOR_BELL BIT(1) 62#define CPU_INTR_DOOR_BELL BIT(1)
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 288b035a3579..cc15fdb36060 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -1941,6 +1941,7 @@ void rtl_pci_disconnect(struct pci_dev *pdev)
1941 rtl_deinit_deferred_work(hw); 1941 rtl_deinit_deferred_work(hw);
1942 rtlpriv->intf_ops->adapter_stop(hw); 1942 rtlpriv->intf_ops->adapter_stop(hw);
1943 } 1943 }
1944 rtlpriv->cfg->ops->disable_interrupt(hw);
1944 1945
1945 /*deinit rfkill */ 1946 /*deinit rfkill */
1946 rtl_deinit_rfkill(hw); 1947 rtl_deinit_rfkill(hw);
diff --git a/drivers/net/wireless/wl1251/main.c b/drivers/net/wireless/wl1251/main.c
index 41302c7b1ad0..d1afb8e3b2ef 100644
--- a/drivers/net/wireless/wl1251/main.c
+++ b/drivers/net/wireless/wl1251/main.c
@@ -479,6 +479,7 @@ static void wl1251_op_stop(struct ieee80211_hw *hw)
479 cancel_work_sync(&wl->irq_work); 479 cancel_work_sync(&wl->irq_work);
480 cancel_work_sync(&wl->tx_work); 480 cancel_work_sync(&wl->tx_work);
481 cancel_work_sync(&wl->filter_work); 481 cancel_work_sync(&wl->filter_work);
482 cancel_delayed_work_sync(&wl->elp_work);
482 483
483 mutex_lock(&wl->mutex); 484 mutex_lock(&wl->mutex);
484 485
diff --git a/drivers/net/wireless/wl1251/sdio.c b/drivers/net/wireless/wl1251/sdio.c
index f78694295c39..1b851f650e07 100644
--- a/drivers/net/wireless/wl1251/sdio.c
+++ b/drivers/net/wireless/wl1251/sdio.c
@@ -315,8 +315,8 @@ static void __devexit wl1251_sdio_remove(struct sdio_func *func)
315 315
316 if (wl->irq) 316 if (wl->irq)
317 free_irq(wl->irq, wl); 317 free_irq(wl->irq, wl);
318 kfree(wl_sdio);
319 wl1251_free_hw(wl); 318 wl1251_free_hw(wl);
319 kfree(wl_sdio);
320 320
321 sdio_claim_host(func); 321 sdio_claim_host(func);
322 sdio_release_irq(func); 322 sdio_release_irq(func);
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index 8644d5372e7f..42cfcd9eb9aa 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -44,6 +44,7 @@
44#include <asm/ropes.h> 44#include <asm/ropes.h>
45#include <asm/mckinley.h> /* for proc_mckinley_root */ 45#include <asm/mckinley.h> /* for proc_mckinley_root */
46#include <asm/runway.h> /* for proc_runway_root */ 46#include <asm/runway.h> /* for proc_runway_root */
47#include <asm/page.h> /* for PAGE0 */
47#include <asm/pdc.h> /* for PDC_MODEL_* */ 48#include <asm/pdc.h> /* for PDC_MODEL_* */
48#include <asm/pdcpat.h> /* for is_pdc_pat() */ 49#include <asm/pdcpat.h> /* for is_pdc_pat() */
49#include <asm/parisc-device.h> 50#include <asm/parisc-device.h>
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 083a49fee56a..165274c064bc 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -42,6 +42,7 @@ obj-$(CONFIG_UNICORE32) += setup-bus.o setup-irq.o
42obj-$(CONFIG_PARISC) += setup-bus.o 42obj-$(CONFIG_PARISC) += setup-bus.o
43obj-$(CONFIG_SUPERH) += setup-bus.o setup-irq.o 43obj-$(CONFIG_SUPERH) += setup-bus.o setup-irq.o
44obj-$(CONFIG_PPC) += setup-bus.o 44obj-$(CONFIG_PPC) += setup-bus.o
45obj-$(CONFIG_FRV) += setup-bus.o
45obj-$(CONFIG_MIPS) += setup-bus.o setup-irq.o 46obj-$(CONFIG_MIPS) += setup-bus.o setup-irq.o
46obj-$(CONFIG_X86_VISWS) += setup-irq.o 47obj-$(CONFIG_X86_VISWS) += setup-irq.o
47obj-$(CONFIG_MN10300) += setup-bus.o 48obj-$(CONFIG_MN10300) += setup-bus.o
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 0f150f271c2a..1929c0c63b75 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -200,7 +200,7 @@ static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
200 return PCI_D1; 200 return PCI_D1;
201 case ACPI_STATE_D2: 201 case ACPI_STATE_D2:
202 return PCI_D2; 202 return PCI_D2;
203 case ACPI_STATE_D3: 203 case ACPI_STATE_D3_HOT:
204 return PCI_D3hot; 204 return PCI_D3hot;
205 case ACPI_STATE_D3_COLD: 205 case ACPI_STATE_D3_COLD:
206 return PCI_D3cold; 206 return PCI_D3cold;
@@ -223,7 +223,7 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
223 [PCI_D0] = ACPI_STATE_D0, 223 [PCI_D0] = ACPI_STATE_D0,
224 [PCI_D1] = ACPI_STATE_D1, 224 [PCI_D1] = ACPI_STATE_D1,
225 [PCI_D2] = ACPI_STATE_D2, 225 [PCI_D2] = ACPI_STATE_D2,
226 [PCI_D3hot] = ACPI_STATE_D3, 226 [PCI_D3hot] = ACPI_STATE_D3_HOT,
227 [PCI_D3cold] = ACPI_STATE_D3 227 [PCI_D3cold] = ACPI_STATE_D3
228 }; 228 };
229 int error = -EINVAL; 229 int error = -EINVAL;
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index ec3b8cc188af..df6296c5f47b 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -908,10 +908,6 @@ static int pinctrl_groups_show(struct seq_file *s, void *what)
908 const struct pinctrl_ops *ops = pctldev->desc->pctlops; 908 const struct pinctrl_ops *ops = pctldev->desc->pctlops;
909 unsigned selector = 0; 909 unsigned selector = 0;
910 910
911 /* No grouping */
912 if (!ops)
913 return 0;
914
915 mutex_lock(&pinctrl_mutex); 911 mutex_lock(&pinctrl_mutex);
916 912
917 seq_puts(s, "registered pin groups:\n"); 913 seq_puts(s, "registered pin groups:\n");
@@ -1225,6 +1221,19 @@ static void pinctrl_remove_device_debugfs(struct pinctrl_dev *pctldev)
1225 1221
1226#endif 1222#endif
1227 1223
1224static int pinctrl_check_ops(struct pinctrl_dev *pctldev)
1225{
1226 const struct pinctrl_ops *ops = pctldev->desc->pctlops;
1227
1228 if (!ops ||
1229 !ops->list_groups ||
1230 !ops->get_group_name ||
1231 !ops->get_group_pins)
1232 return -EINVAL;
1233
1234 return 0;
1235}
1236
1228/** 1237/**
1229 * pinctrl_register() - register a pin controller device 1238 * pinctrl_register() - register a pin controller device
1230 * @pctldesc: descriptor for this pin controller 1239 * @pctldesc: descriptor for this pin controller
@@ -1256,6 +1265,14 @@ struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
1256 INIT_LIST_HEAD(&pctldev->gpio_ranges); 1265 INIT_LIST_HEAD(&pctldev->gpio_ranges);
1257 pctldev->dev = dev; 1266 pctldev->dev = dev;
1258 1267
1268 /* check core ops for sanity */
1269 ret = pinctrl_check_ops(pctldev);
1270 if (ret) {
1271 pr_err("%s pinctrl ops lacks necessary functions\n",
1272 pctldesc->name);
1273 goto out_err;
1274 }
1275
1259 /* If we're implementing pinmuxing, check the ops for sanity */ 1276 /* If we're implementing pinmuxing, check the ops for sanity */
1260 if (pctldesc->pmxops) { 1277 if (pctldesc->pmxops) {
1261 ret = pinmux_check_ops(pctldev); 1278 ret = pinmux_check_ops(pctldev);
diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c
index bc8384c6f3eb..639db4d0aa76 100644
--- a/drivers/platform/x86/acerhdf.c
+++ b/drivers/platform/x86/acerhdf.c
@@ -50,7 +50,7 @@
50 */ 50 */
51#undef START_IN_KERNEL_MODE 51#undef START_IN_KERNEL_MODE
52 52
53#define DRV_VER "0.5.24" 53#define DRV_VER "0.5.26"
54 54
55/* 55/*
56 * According to the Atom N270 datasheet, 56 * According to the Atom N270 datasheet,
@@ -83,8 +83,8 @@ static int kernelmode;
83#endif 83#endif
84 84
85static unsigned int interval = 10; 85static unsigned int interval = 10;
86static unsigned int fanon = 63000; 86static unsigned int fanon = 60000;
87static unsigned int fanoff = 58000; 87static unsigned int fanoff = 53000;
88static unsigned int verbose; 88static unsigned int verbose;
89static unsigned int fanstate = ACERHDF_FAN_AUTO; 89static unsigned int fanstate = ACERHDF_FAN_AUTO;
90static char force_bios[16]; 90static char force_bios[16];
@@ -150,6 +150,8 @@ static const struct bios_settings_t bios_tbl[] = {
150 {"Acer", "AOA150", "v0.3308", 0x55, 0x58, {0x20, 0x00} }, 150 {"Acer", "AOA150", "v0.3308", 0x55, 0x58, {0x20, 0x00} },
151 {"Acer", "AOA150", "v0.3309", 0x55, 0x58, {0x20, 0x00} }, 151 {"Acer", "AOA150", "v0.3309", 0x55, 0x58, {0x20, 0x00} },
152 {"Acer", "AOA150", "v0.3310", 0x55, 0x58, {0x20, 0x00} }, 152 {"Acer", "AOA150", "v0.3310", 0x55, 0x58, {0x20, 0x00} },
153 /* LT1005u */
154 {"Acer", "LT-10Q", "v0.3310", 0x55, 0x58, {0x20, 0x00} },
153 /* Acer 1410 */ 155 /* Acer 1410 */
154 {"Acer", "Aspire 1410", "v0.3108", 0x55, 0x58, {0x9e, 0x00} }, 156 {"Acer", "Aspire 1410", "v0.3108", 0x55, 0x58, {0x9e, 0x00} },
155 {"Acer", "Aspire 1410", "v0.3113", 0x55, 0x58, {0x9e, 0x00} }, 157 {"Acer", "Aspire 1410", "v0.3113", 0x55, 0x58, {0x9e, 0x00} },
@@ -161,6 +163,7 @@ static const struct bios_settings_t bios_tbl[] = {
161 {"Acer", "Aspire 1410", "v1.3303", 0x55, 0x58, {0x9e, 0x00} }, 163 {"Acer", "Aspire 1410", "v1.3303", 0x55, 0x58, {0x9e, 0x00} },
162 {"Acer", "Aspire 1410", "v1.3308", 0x55, 0x58, {0x9e, 0x00} }, 164 {"Acer", "Aspire 1410", "v1.3308", 0x55, 0x58, {0x9e, 0x00} },
163 {"Acer", "Aspire 1410", "v1.3310", 0x55, 0x58, {0x9e, 0x00} }, 165 {"Acer", "Aspire 1410", "v1.3310", 0x55, 0x58, {0x9e, 0x00} },
166 {"Acer", "Aspire 1410", "v1.3314", 0x55, 0x58, {0x9e, 0x00} },
164 /* Acer 1810xx */ 167 /* Acer 1810xx */
165 {"Acer", "Aspire 1810TZ", "v0.3108", 0x55, 0x58, {0x9e, 0x00} }, 168 {"Acer", "Aspire 1810TZ", "v0.3108", 0x55, 0x58, {0x9e, 0x00} },
166 {"Acer", "Aspire 1810T", "v0.3108", 0x55, 0x58, {0x9e, 0x00} }, 169 {"Acer", "Aspire 1810T", "v0.3108", 0x55, 0x58, {0x9e, 0x00} },
@@ -183,29 +186,44 @@ static const struct bios_settings_t bios_tbl[] = {
183 {"Acer", "Aspire 1810TZ", "v1.3310", 0x55, 0x58, {0x9e, 0x00} }, 186 {"Acer", "Aspire 1810TZ", "v1.3310", 0x55, 0x58, {0x9e, 0x00} },
184 {"Acer", "Aspire 1810T", "v1.3310", 0x55, 0x58, {0x9e, 0x00} }, 187 {"Acer", "Aspire 1810T", "v1.3310", 0x55, 0x58, {0x9e, 0x00} },
185 {"Acer", "Aspire 1810TZ", "v1.3314", 0x55, 0x58, {0x9e, 0x00} }, 188 {"Acer", "Aspire 1810TZ", "v1.3314", 0x55, 0x58, {0x9e, 0x00} },
189 {"Acer", "Aspire 1810T", "v1.3314", 0x55, 0x58, {0x9e, 0x00} },
186 /* Acer 531 */ 190 /* Acer 531 */
191 {"Acer", "AO531h", "v0.3104", 0x55, 0x58, {0x20, 0x00} },
187 {"Acer", "AO531h", "v0.3201", 0x55, 0x58, {0x20, 0x00} }, 192 {"Acer", "AO531h", "v0.3201", 0x55, 0x58, {0x20, 0x00} },
193 {"Acer", "AO531h", "v0.3304", 0x55, 0x58, {0x20, 0x00} },
194 /* Acer 751 */
195 {"Acer", "AO751h", "V0.3212", 0x55, 0x58, {0x21, 0x00} },
196 /* Acer 1825 */
197 {"Acer", "Aspire 1825PTZ", "V1.3118", 0x55, 0x58, {0x9e, 0x00} },
198 {"Acer", "Aspire 1825PTZ", "V1.3127", 0x55, 0x58, {0x9e, 0x00} },
199 /* Acer TravelMate 7730 */
200 {"Acer", "TravelMate 7730G", "v0.3509", 0x55, 0x58, {0xaf, 0x00} },
188 /* Gateway */ 201 /* Gateway */
189 {"Gateway", "AOA110", "v0.3103", 0x55, 0x58, {0x21, 0x00} }, 202 {"Gateway", "AOA110", "v0.3103", 0x55, 0x58, {0x21, 0x00} },
190 {"Gateway", "AOA150", "v0.3103", 0x55, 0x58, {0x20, 0x00} }, 203 {"Gateway", "AOA150", "v0.3103", 0x55, 0x58, {0x20, 0x00} },
191 {"Gateway", "LT31", "v1.3103", 0x55, 0x58, {0x9e, 0x00} }, 204 {"Gateway", "LT31", "v1.3103", 0x55, 0x58, {0x9e, 0x00} },
192 {"Gateway", "LT31", "v1.3201", 0x55, 0x58, {0x9e, 0x00} }, 205 {"Gateway", "LT31", "v1.3201", 0x55, 0x58, {0x9e, 0x00} },
193 {"Gateway", "LT31", "v1.3302", 0x55, 0x58, {0x9e, 0x00} }, 206 {"Gateway", "LT31", "v1.3302", 0x55, 0x58, {0x9e, 0x00} },
207 {"Gateway", "LT31", "v1.3303t", 0x55, 0x58, {0x9e, 0x00} },
194 /* Packard Bell */ 208 /* Packard Bell */
195 {"Packard Bell", "DOA150", "v0.3104", 0x55, 0x58, {0x21, 0x00} }, 209 {"Packard Bell", "DOA150", "v0.3104", 0x55, 0x58, {0x21, 0x00} },
196 {"Packard Bell", "DOA150", "v0.3105", 0x55, 0x58, {0x20, 0x00} }, 210 {"Packard Bell", "DOA150", "v0.3105", 0x55, 0x58, {0x20, 0x00} },
197 {"Packard Bell", "AOA110", "v0.3105", 0x55, 0x58, {0x21, 0x00} }, 211 {"Packard Bell", "AOA110", "v0.3105", 0x55, 0x58, {0x21, 0x00} },
198 {"Packard Bell", "AOA150", "v0.3105", 0x55, 0x58, {0x20, 0x00} }, 212 {"Packard Bell", "AOA150", "v0.3105", 0x55, 0x58, {0x20, 0x00} },
199 {"Packard Bell", "DOTMU", "v1.3303", 0x55, 0x58, {0x9e, 0x00} }, 213 {"Packard Bell", "ENBFT", "V1.3118", 0x55, 0x58, {0x9e, 0x00} },
200 {"Packard Bell", "DOTMU", "v0.3120", 0x55, 0x58, {0x9e, 0x00} }, 214 {"Packard Bell", "ENBFT", "V1.3127", 0x55, 0x58, {0x9e, 0x00} },
201 {"Packard Bell", "DOTMU", "v0.3108", 0x55, 0x58, {0x9e, 0x00} }, 215 {"Packard Bell", "DOTMU", "v1.3303", 0x55, 0x58, {0x9e, 0x00} },
202 {"Packard Bell", "DOTMU", "v0.3113", 0x55, 0x58, {0x9e, 0x00} }, 216 {"Packard Bell", "DOTMU", "v0.3120", 0x55, 0x58, {0x9e, 0x00} },
203 {"Packard Bell", "DOTMU", "v0.3115", 0x55, 0x58, {0x9e, 0x00} }, 217 {"Packard Bell", "DOTMU", "v0.3108", 0x55, 0x58, {0x9e, 0x00} },
204 {"Packard Bell", "DOTMU", "v0.3117", 0x55, 0x58, {0x9e, 0x00} }, 218 {"Packard Bell", "DOTMU", "v0.3113", 0x55, 0x58, {0x9e, 0x00} },
205 {"Packard Bell", "DOTMU", "v0.3119", 0x55, 0x58, {0x9e, 0x00} }, 219 {"Packard Bell", "DOTMU", "v0.3115", 0x55, 0x58, {0x9e, 0x00} },
206 {"Packard Bell", "DOTMU", "v1.3204", 0x55, 0x58, {0x9e, 0x00} }, 220 {"Packard Bell", "DOTMU", "v0.3117", 0x55, 0x58, {0x9e, 0x00} },
207 {"Packard Bell", "DOTMA", "v1.3201", 0x55, 0x58, {0x9e, 0x00} }, 221 {"Packard Bell", "DOTMU", "v0.3119", 0x55, 0x58, {0x9e, 0x00} },
208 {"Packard Bell", "DOTMA", "v1.3302", 0x55, 0x58, {0x9e, 0x00} }, 222 {"Packard Bell", "DOTMU", "v1.3204", 0x55, 0x58, {0x9e, 0x00} },
223 {"Packard Bell", "DOTMA", "v1.3201", 0x55, 0x58, {0x9e, 0x00} },
224 {"Packard Bell", "DOTMA", "v1.3302", 0x55, 0x58, {0x9e, 0x00} },
225 {"Packard Bell", "DOTMA", "v1.3303t", 0x55, 0x58, {0x9e, 0x00} },
226 {"Packard Bell", "DOTVR46", "v1.3308", 0x55, 0x58, {0x9e, 0x00} },
209 /* pewpew-terminator */ 227 /* pewpew-terminator */
210 {"", "", "", 0, 0, {0, 0} } 228 {"", "", "", 0, 0, {0, 0} }
211}; 229};
@@ -701,15 +719,20 @@ MODULE_LICENSE("GPL");
701MODULE_AUTHOR("Peter Feuerer"); 719MODULE_AUTHOR("Peter Feuerer");
702MODULE_DESCRIPTION("Aspire One temperature and fan driver"); 720MODULE_DESCRIPTION("Aspire One temperature and fan driver");
703MODULE_ALIAS("dmi:*:*Acer*:pnAOA*:"); 721MODULE_ALIAS("dmi:*:*Acer*:pnAOA*:");
722MODULE_ALIAS("dmi:*:*Acer*:pnAO751h*:");
704MODULE_ALIAS("dmi:*:*Acer*:pnAspire*1410*:"); 723MODULE_ALIAS("dmi:*:*Acer*:pnAspire*1410*:");
705MODULE_ALIAS("dmi:*:*Acer*:pnAspire*1810*:"); 724MODULE_ALIAS("dmi:*:*Acer*:pnAspire*1810*:");
725MODULE_ALIAS("dmi:*:*Acer*:pnAspire*1825PTZ:");
706MODULE_ALIAS("dmi:*:*Acer*:pnAO531*:"); 726MODULE_ALIAS("dmi:*:*Acer*:pnAO531*:");
727MODULE_ALIAS("dmi:*:*Acer*:TravelMate*7730G:");
707MODULE_ALIAS("dmi:*:*Gateway*:pnAOA*:"); 728MODULE_ALIAS("dmi:*:*Gateway*:pnAOA*:");
708MODULE_ALIAS("dmi:*:*Gateway*:pnLT31*:"); 729MODULE_ALIAS("dmi:*:*Gateway*:pnLT31*:");
709MODULE_ALIAS("dmi:*:*Packard*Bell*:pnAOA*:"); 730MODULE_ALIAS("dmi:*:*Packard*Bell*:pnAOA*:");
710MODULE_ALIAS("dmi:*:*Packard*Bell*:pnDOA*:"); 731MODULE_ALIAS("dmi:*:*Packard*Bell*:pnDOA*:");
711MODULE_ALIAS("dmi:*:*Packard*Bell*:pnDOTMU*:"); 732MODULE_ALIAS("dmi:*:*Packard*Bell*:pnDOTMU*:");
733MODULE_ALIAS("dmi:*:*Packard*Bell*:pnENBFT*:");
712MODULE_ALIAS("dmi:*:*Packard*Bell*:pnDOTMA*:"); 734MODULE_ALIAS("dmi:*:*Packard*Bell*:pnDOTMA*:");
735MODULE_ALIAS("dmi:*:*Packard*Bell*:pnDOTVR46*:");
713 736
714module_init(acerhdf_init); 737module_init(acerhdf_init);
715module_exit(acerhdf_exit); 738module_exit(acerhdf_exit);
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index a05fc9c955d8..e6c08ee8d46c 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -212,6 +212,7 @@ static struct dmi_system_id __devinitdata dell_quirks[] = {
212 }, 212 },
213 .driver_data = &quirk_dell_vostro_v130, 213 .driver_data = &quirk_dell_vostro_v130,
214 }, 214 },
215 { }
215}; 216};
216 217
217static struct calling_interface_buffer *buffer; 218static struct calling_interface_buffer *buffer;
diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c
index f7ba316e0ed6..0ffdb3cde2bb 100644
--- a/drivers/platform/x86/intel_ips.c
+++ b/drivers/platform/x86/intel_ips.c
@@ -1565,7 +1565,7 @@ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id)
1565 ips->poll_turbo_status = true; 1565 ips->poll_turbo_status = true;
1566 1566
1567 if (!ips_get_i915_syms(ips)) { 1567 if (!ips_get_i915_syms(ips)) {
1568 dev_err(&dev->dev, "failed to get i915 symbols, graphics turbo disabled\n"); 1568 dev_info(&dev->dev, "failed to get i915 symbols, graphics turbo disabled until i915 loads\n");
1569 ips->gpu_turbo_enabled = false; 1569 ips->gpu_turbo_enabled = false;
1570 } else { 1570 } else {
1571 dev_dbg(&dev->dev, "graphics turbo enabled\n"); 1571 dev_dbg(&dev->dev, "graphics turbo enabled\n");
diff --git a/drivers/platform/x86/intel_mid_powerbtn.c b/drivers/platform/x86/intel_mid_powerbtn.c
index 0a3594c7e912..bcbad8452a6f 100644
--- a/drivers/platform/x86/intel_mid_powerbtn.c
+++ b/drivers/platform/x86/intel_mid_powerbtn.c
@@ -78,7 +78,7 @@ static int __devinit mfld_pb_probe(struct platform_device *pdev)
78 78
79 input_set_capability(input, EV_KEY, KEY_POWER); 79 input_set_capability(input, EV_KEY, KEY_POWER);
80 80
81 error = request_threaded_irq(irq, NULL, mfld_pb_isr, 0, 81 error = request_threaded_irq(irq, NULL, mfld_pb_isr, IRQF_NO_SUSPEND,
82 DRIVER_NAME, input); 82 DRIVER_NAME, input);
83 if (error) { 83 if (error) {
84 dev_err(&pdev->dev, "Unable to request irq %d for mfld power" 84 dev_err(&pdev->dev, "Unable to request irq %d for mfld power"
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index e70dd382a009..046fb1bd8619 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1431,7 +1431,10 @@ void devm_regulator_put(struct regulator *regulator)
1431 1431
1432 rc = devres_destroy(regulator->dev, devm_regulator_release, 1432 rc = devres_destroy(regulator->dev, devm_regulator_release,
1433 devm_regulator_match, regulator); 1433 devm_regulator_match, regulator);
1434 WARN_ON(rc); 1434 if (rc == 0)
1435 regulator_put(regulator);
1436 else
1437 WARN_ON(rc);
1435} 1438}
1436EXPORT_SYMBOL_GPL(devm_regulator_put); 1439EXPORT_SYMBOL_GPL(devm_regulator_put);
1437 1440
diff --git a/drivers/regulator/max8997.c b/drivers/regulator/max8997.c
index 96579296f04d..17a58c56eebf 100644
--- a/drivers/regulator/max8997.c
+++ b/drivers/regulator/max8997.c
@@ -684,7 +684,7 @@ static int max8997_set_voltage_buck(struct regulator_dev *rdev,
684 } 684 }
685 685
686 new_val++; 686 new_val++;
687 } while (desc->min + desc->step + new_val <= desc->max); 687 } while (desc->min + desc->step * new_val <= desc->max);
688 688
689 new_idx = tmp_idx; 689 new_idx = tmp_idx;
690 new_val = tmp_val; 690 new_val = tmp_val;
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index cd188ab72f79..c293d0cdb104 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -902,6 +902,7 @@ read_rtc:
902 } 902 }
903 ds1307->nvram->attr.name = "nvram"; 903 ds1307->nvram->attr.name = "nvram";
904 ds1307->nvram->attr.mode = S_IRUGO | S_IWUSR; 904 ds1307->nvram->attr.mode = S_IRUGO | S_IWUSR;
905 sysfs_bin_attr_init(ds1307->nvram);
905 ds1307->nvram->read = ds1307_nvram_read, 906 ds1307->nvram->read = ds1307_nvram_read,
906 ds1307->nvram->write = ds1307_nvram_write, 907 ds1307->nvram->write = ds1307_nvram_write,
907 ds1307->nvram->size = chip->nvram_size; 908 ds1307->nvram->size = chip->nvram_size;
diff --git a/drivers/rtc/rtc-mpc5121.c b/drivers/rtc/rtc-mpc5121.c
index 42f5f829b3ee..029e421baaed 100644
--- a/drivers/rtc/rtc-mpc5121.c
+++ b/drivers/rtc/rtc-mpc5121.c
@@ -360,12 +360,11 @@ static int __devinit mpc5121_rtc_probe(struct platform_device *op)
360 &mpc5200_rtc_ops, THIS_MODULE); 360 &mpc5200_rtc_ops, THIS_MODULE);
361 } 361 }
362 362
363 rtc->rtc->uie_unsupported = 1;
364
365 if (IS_ERR(rtc->rtc)) { 363 if (IS_ERR(rtc->rtc)) {
366 err = PTR_ERR(rtc->rtc); 364 err = PTR_ERR(rtc->rtc);
367 goto out_free_irq; 365 goto out_free_irq;
368 } 366 }
367 rtc->rtc->uie_unsupported = 1;
369 368
370 return 0; 369 return 0;
371 370
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 120955c66410..8334dadc681d 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -1672,7 +1672,8 @@ static void qeth_configure_blkt_default(struct qeth_card *card, char *prcd)
1672{ 1672{
1673 QETH_DBF_TEXT(SETUP, 2, "cfgblkt"); 1673 QETH_DBF_TEXT(SETUP, 2, "cfgblkt");
1674 1674
1675 if (prcd[74] == 0xF0 && prcd[75] == 0xF0 && prcd[76] == 0xF5) { 1675 if (prcd[74] == 0xF0 && prcd[75] == 0xF0 &&
1676 (prcd[76] == 0xF5 || prcd[76] == 0xF6)) {
1676 card->info.blkt.time_total = 250; 1677 card->info.blkt.time_total = 250;
1677 card->info.blkt.inter_packet = 5; 1678 card->info.blkt.inter_packet = 5;
1678 card->info.blkt.inter_packet_jumbo = 15; 1679 card->info.blkt.inter_packet_jumbo = 15;
@@ -4540,7 +4541,8 @@ static void qeth_determine_capabilities(struct qeth_card *card)
4540 goto out_offline; 4541 goto out_offline;
4541 } 4542 }
4542 qeth_configure_unitaddr(card, prcd); 4543 qeth_configure_unitaddr(card, prcd);
4543 qeth_configure_blkt_default(card, prcd); 4544 if (ddev_offline)
4545 qeth_configure_blkt_default(card, prcd);
4544 kfree(prcd); 4546 kfree(prcd);
4545 4547
4546 rc = qdio_get_ssqd_desc(ddev, &card->ssqd); 4548 rc = qdio_get_ssqd_desc(ddev, &card->ssqd);
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 351dc0b86fab..a3a056a9db67 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -218,6 +218,9 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
218 218
219 if (!shost->shost_gendev.parent) 219 if (!shost->shost_gendev.parent)
220 shost->shost_gendev.parent = dev ? dev : &platform_bus; 220 shost->shost_gendev.parent = dev ? dev : &platform_bus;
221 if (!dma_dev)
222 dma_dev = shost->shost_gendev.parent;
223
221 shost->dma_dev = dma_dev; 224 shost->dma_dev = dma_dev;
222 225
223 error = device_add(&shost->shost_gendev); 226 error = device_add(&shost->shost_gendev);
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index e002cd466e9a..467dc38246f9 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -4549,8 +4549,12 @@ static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4549 ENTER; 4549 ENTER;
4550 if (sdev->sdev_target) 4550 if (sdev->sdev_target)
4551 sata_port = sdev->sdev_target->hostdata; 4551 sata_port = sdev->sdev_target->hostdata;
4552 if (sata_port) 4552 if (sata_port) {
4553 rc = ata_sas_port_init(sata_port->ap); 4553 rc = ata_sas_port_init(sata_port->ap);
4554 if (rc == 0)
4555 rc = ata_sas_sync_probe(sata_port->ap);
4556 }
4557
4554 if (rc) 4558 if (rc)
4555 ipr_slave_destroy(sdev); 4559 ipr_slave_destroy(sdev);
4556 4560
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c
index d4bf9c12ecd4..45385f531649 100644
--- a/drivers/scsi/isci/host.c
+++ b/drivers/scsi/isci/host.c
@@ -192,22 +192,27 @@ static bool sci_controller_completion_queue_has_entries(struct isci_host *ihost)
192 192
193static bool sci_controller_isr(struct isci_host *ihost) 193static bool sci_controller_isr(struct isci_host *ihost)
194{ 194{
195 if (sci_controller_completion_queue_has_entries(ihost)) { 195 if (sci_controller_completion_queue_has_entries(ihost))
196 return true; 196 return true;
197 } else {
198 /*
199 * we have a spurious interrupt it could be that we have already
200 * emptied the completion queue from a previous interrupt */
201 writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
202 197
203 /* 198 /* we have a spurious interrupt it could be that we have already
204 * There is a race in the hardware that could cause us not to be notified 199 * emptied the completion queue from a previous interrupt
205 * of an interrupt completion if we do not take this step. We will mask 200 * FIXME: really!?
206 * then unmask the interrupts so if there is another interrupt pending 201 */
207 * the clearing of the interrupt source we get the next interrupt message. */ 202 writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
203
204 /* There is a race in the hardware that could cause us not to be
205 * notified of an interrupt completion if we do not take this
206 * step. We will mask then unmask the interrupts so if there is
207 * another interrupt pending the clearing of the interrupt
208 * source we get the next interrupt message.
209 */
210 spin_lock(&ihost->scic_lock);
211 if (test_bit(IHOST_IRQ_ENABLED, &ihost->flags)) {
208 writel(0xFF000000, &ihost->smu_registers->interrupt_mask); 212 writel(0xFF000000, &ihost->smu_registers->interrupt_mask);
209 writel(0, &ihost->smu_registers->interrupt_mask); 213 writel(0, &ihost->smu_registers->interrupt_mask);
210 } 214 }
215 spin_unlock(&ihost->scic_lock);
211 216
212 return false; 217 return false;
213} 218}
@@ -642,7 +647,6 @@ static void isci_host_start_complete(struct isci_host *ihost, enum sci_status co
642 if (completion_status != SCI_SUCCESS) 647 if (completion_status != SCI_SUCCESS)
643 dev_info(&ihost->pdev->dev, 648 dev_info(&ihost->pdev->dev,
644 "controller start timed out, continuing...\n"); 649 "controller start timed out, continuing...\n");
645 isci_host_change_state(ihost, isci_ready);
646 clear_bit(IHOST_START_PENDING, &ihost->flags); 650 clear_bit(IHOST_START_PENDING, &ihost->flags);
647 wake_up(&ihost->eventq); 651 wake_up(&ihost->eventq);
648} 652}
@@ -657,12 +661,7 @@ int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time)
657 661
658 sas_drain_work(ha); 662 sas_drain_work(ha);
659 663
660 dev_dbg(&ihost->pdev->dev,
661 "%s: ihost->status = %d, time = %ld\n",
662 __func__, isci_host_get_state(ihost), time);
663
664 return 1; 664 return 1;
665
666} 665}
667 666
668/** 667/**
@@ -704,14 +703,15 @@ static u32 sci_controller_get_suggested_start_timeout(struct isci_host *ihost)
704 703
705static void sci_controller_enable_interrupts(struct isci_host *ihost) 704static void sci_controller_enable_interrupts(struct isci_host *ihost)
706{ 705{
707 BUG_ON(ihost->smu_registers == NULL); 706 set_bit(IHOST_IRQ_ENABLED, &ihost->flags);
708 writel(0, &ihost->smu_registers->interrupt_mask); 707 writel(0, &ihost->smu_registers->interrupt_mask);
709} 708}
710 709
711void sci_controller_disable_interrupts(struct isci_host *ihost) 710void sci_controller_disable_interrupts(struct isci_host *ihost)
712{ 711{
713 BUG_ON(ihost->smu_registers == NULL); 712 clear_bit(IHOST_IRQ_ENABLED, &ihost->flags);
714 writel(0xffffffff, &ihost->smu_registers->interrupt_mask); 713 writel(0xffffffff, &ihost->smu_registers->interrupt_mask);
714 readl(&ihost->smu_registers->interrupt_mask); /* flush */
715} 715}
716 716
717static void sci_controller_enable_port_task_scheduler(struct isci_host *ihost) 717static void sci_controller_enable_port_task_scheduler(struct isci_host *ihost)
@@ -822,7 +822,7 @@ static void sci_controller_initialize_unsolicited_frame_queue(struct isci_host *
822 &ihost->scu_registers->sdma.unsolicited_frame_put_pointer); 822 &ihost->scu_registers->sdma.unsolicited_frame_put_pointer);
823} 823}
824 824
825static void sci_controller_transition_to_ready(struct isci_host *ihost, enum sci_status status) 825void sci_controller_transition_to_ready(struct isci_host *ihost, enum sci_status status)
826{ 826{
827 if (ihost->sm.current_state_id == SCIC_STARTING) { 827 if (ihost->sm.current_state_id == SCIC_STARTING) {
828 /* 828 /*
@@ -849,6 +849,7 @@ static bool is_phy_starting(struct isci_phy *iphy)
849 case SCI_PHY_SUB_AWAIT_SATA_POWER: 849 case SCI_PHY_SUB_AWAIT_SATA_POWER:
850 case SCI_PHY_SUB_AWAIT_SATA_PHY_EN: 850 case SCI_PHY_SUB_AWAIT_SATA_PHY_EN:
851 case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN: 851 case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN:
852 case SCI_PHY_SUB_AWAIT_OSSP_EN:
852 case SCI_PHY_SUB_AWAIT_SIG_FIS_UF: 853 case SCI_PHY_SUB_AWAIT_SIG_FIS_UF:
853 case SCI_PHY_SUB_FINAL: 854 case SCI_PHY_SUB_FINAL:
854 return true; 855 return true;
@@ -857,6 +858,39 @@ static bool is_phy_starting(struct isci_phy *iphy)
857 } 858 }
858} 859}
859 860
861bool is_controller_start_complete(struct isci_host *ihost)
862{
863 int i;
864
865 for (i = 0; i < SCI_MAX_PHYS; i++) {
866 struct isci_phy *iphy = &ihost->phys[i];
867 u32 state = iphy->sm.current_state_id;
868
869 /* in apc mode we need to check every phy, in
870 * mpc mode we only need to check phys that have
871 * been configured into a port
872 */
873 if (is_port_config_apc(ihost))
874 /* pass */;
875 else if (!phy_get_non_dummy_port(iphy))
876 continue;
877
878 /* The controller start operation is complete iff:
879 * - all links have been given an opportunity to start
880 * - have no indication of a connected device
881 * - have an indication of a connected device and it has
882 * finished the link training process.
883 */
884 if ((iphy->is_in_link_training == false && state == SCI_PHY_INITIAL) ||
885 (iphy->is_in_link_training == false && state == SCI_PHY_STOPPED) ||
886 (iphy->is_in_link_training == true && is_phy_starting(iphy)) ||
887 (ihost->port_agent.phy_ready_mask != ihost->port_agent.phy_configured_mask))
888 return false;
889 }
890
891 return true;
892}
893
860/** 894/**
861 * sci_controller_start_next_phy - start phy 895 * sci_controller_start_next_phy - start phy
862 * @scic: controller 896 * @scic: controller
@@ -877,36 +911,7 @@ static enum sci_status sci_controller_start_next_phy(struct isci_host *ihost)
877 return status; 911 return status;
878 912
879 if (ihost->next_phy_to_start >= SCI_MAX_PHYS) { 913 if (ihost->next_phy_to_start >= SCI_MAX_PHYS) {
880 bool is_controller_start_complete = true; 914 if (is_controller_start_complete(ihost)) {
881 u32 state;
882 u8 index;
883
884 for (index = 0; index < SCI_MAX_PHYS; index++) {
885 iphy = &ihost->phys[index];
886 state = iphy->sm.current_state_id;
887
888 if (!phy_get_non_dummy_port(iphy))
889 continue;
890
891 /* The controller start operation is complete iff:
892 * - all links have been given an opportunity to start
893 * - have no indication of a connected device
894 * - have an indication of a connected device and it has
895 * finished the link training process.
896 */
897 if ((iphy->is_in_link_training == false && state == SCI_PHY_INITIAL) ||
898 (iphy->is_in_link_training == false && state == SCI_PHY_STOPPED) ||
899 (iphy->is_in_link_training == true && is_phy_starting(iphy)) ||
900 (ihost->port_agent.phy_ready_mask != ihost->port_agent.phy_configured_mask)) {
901 is_controller_start_complete = false;
902 break;
903 }
904 }
905
906 /*
907 * The controller has successfully finished the start process.
908 * Inform the SCI Core user and transition to the READY state. */
909 if (is_controller_start_complete == true) {
910 sci_controller_transition_to_ready(ihost, SCI_SUCCESS); 915 sci_controller_transition_to_ready(ihost, SCI_SUCCESS);
911 sci_del_timer(&ihost->phy_timer); 916 sci_del_timer(&ihost->phy_timer);
912 ihost->phy_startup_timer_pending = false; 917 ihost->phy_startup_timer_pending = false;
@@ -987,9 +992,8 @@ static enum sci_status sci_controller_start(struct isci_host *ihost,
987 u16 index; 992 u16 index;
988 993
989 if (ihost->sm.current_state_id != SCIC_INITIALIZED) { 994 if (ihost->sm.current_state_id != SCIC_INITIALIZED) {
990 dev_warn(&ihost->pdev->dev, 995 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
991 "SCIC Controller start operation requested in " 996 __func__, ihost->sm.current_state_id);
992 "invalid state\n");
993 return SCI_FAILURE_INVALID_STATE; 997 return SCI_FAILURE_INVALID_STATE;
994 } 998 }
995 999
@@ -1053,9 +1057,8 @@ void isci_host_scan_start(struct Scsi_Host *shost)
1053 spin_unlock_irq(&ihost->scic_lock); 1057 spin_unlock_irq(&ihost->scic_lock);
1054} 1058}
1055 1059
1056static void isci_host_stop_complete(struct isci_host *ihost, enum sci_status completion_status) 1060static void isci_host_stop_complete(struct isci_host *ihost)
1057{ 1061{
1058 isci_host_change_state(ihost, isci_stopped);
1059 sci_controller_disable_interrupts(ihost); 1062 sci_controller_disable_interrupts(ihost);
1060 clear_bit(IHOST_STOP_PENDING, &ihost->flags); 1063 clear_bit(IHOST_STOP_PENDING, &ihost->flags);
1061 wake_up(&ihost->eventq); 1064 wake_up(&ihost->eventq);
@@ -1074,6 +1077,32 @@ static void sci_controller_completion_handler(struct isci_host *ihost)
1074 writel(0, &ihost->smu_registers->interrupt_mask); 1077 writel(0, &ihost->smu_registers->interrupt_mask);
1075} 1078}
1076 1079
1080void ireq_done(struct isci_host *ihost, struct isci_request *ireq, struct sas_task *task)
1081{
1082 task->lldd_task = NULL;
1083 if (!test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags) &&
1084 !(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1085 if (test_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags)) {
1086 /* Normal notification (task_done) */
1087 dev_dbg(&ihost->pdev->dev,
1088 "%s: Normal - ireq/task = %p/%p\n",
1089 __func__, ireq, task);
1090
1091 task->task_done(task);
1092 } else {
1093 dev_dbg(&ihost->pdev->dev,
1094 "%s: Error - ireq/task = %p/%p\n",
1095 __func__, ireq, task);
1096
1097 sas_task_abort(task);
1098 }
1099 }
1100 if (test_and_clear_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags))
1101 wake_up_all(&ihost->eventq);
1102
1103 if (!test_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags))
1104 isci_free_tag(ihost, ireq->io_tag);
1105}
1077/** 1106/**
1078 * isci_host_completion_routine() - This function is the delayed service 1107 * isci_host_completion_routine() - This function is the delayed service
1079 * routine that calls the sci core library's completion handler. It's 1108 * routine that calls the sci core library's completion handler. It's
@@ -1082,107 +1111,15 @@ static void sci_controller_completion_handler(struct isci_host *ihost)
1082 * @data: This parameter specifies the ISCI host object 1111 * @data: This parameter specifies the ISCI host object
1083 * 1112 *
1084 */ 1113 */
1085static void isci_host_completion_routine(unsigned long data) 1114void isci_host_completion_routine(unsigned long data)
1086{ 1115{
1087 struct isci_host *ihost = (struct isci_host *)data; 1116 struct isci_host *ihost = (struct isci_host *)data;
1088 struct list_head completed_request_list;
1089 struct list_head errored_request_list;
1090 struct list_head *current_position;
1091 struct list_head *next_position;
1092 struct isci_request *request;
1093 struct isci_request *next_request;
1094 struct sas_task *task;
1095 u16 active; 1117 u16 active;
1096 1118
1097 INIT_LIST_HEAD(&completed_request_list);
1098 INIT_LIST_HEAD(&errored_request_list);
1099
1100 spin_lock_irq(&ihost->scic_lock); 1119 spin_lock_irq(&ihost->scic_lock);
1101
1102 sci_controller_completion_handler(ihost); 1120 sci_controller_completion_handler(ihost);
1103
1104 /* Take the lists of completed I/Os from the host. */
1105
1106 list_splice_init(&ihost->requests_to_complete,
1107 &completed_request_list);
1108
1109 /* Take the list of errored I/Os from the host. */
1110 list_splice_init(&ihost->requests_to_errorback,
1111 &errored_request_list);
1112
1113 spin_unlock_irq(&ihost->scic_lock); 1121 spin_unlock_irq(&ihost->scic_lock);
1114 1122
1115 /* Process any completions in the lists. */
1116 list_for_each_safe(current_position, next_position,
1117 &completed_request_list) {
1118
1119 request = list_entry(current_position, struct isci_request,
1120 completed_node);
1121 task = isci_request_access_task(request);
1122
1123 /* Normal notification (task_done) */
1124 dev_dbg(&ihost->pdev->dev,
1125 "%s: Normal - request/task = %p/%p\n",
1126 __func__,
1127 request,
1128 task);
1129
1130 /* Return the task to libsas */
1131 if (task != NULL) {
1132
1133 task->lldd_task = NULL;
1134 if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1135
1136 /* If the task is already in the abort path,
1137 * the task_done callback cannot be called.
1138 */
1139 task->task_done(task);
1140 }
1141 }
1142
1143 spin_lock_irq(&ihost->scic_lock);
1144 isci_free_tag(ihost, request->io_tag);
1145 spin_unlock_irq(&ihost->scic_lock);
1146 }
1147 list_for_each_entry_safe(request, next_request, &errored_request_list,
1148 completed_node) {
1149
1150 task = isci_request_access_task(request);
1151
1152 /* Use sas_task_abort */
1153 dev_warn(&ihost->pdev->dev,
1154 "%s: Error - request/task = %p/%p\n",
1155 __func__,
1156 request,
1157 task);
1158
1159 if (task != NULL) {
1160
1161 /* Put the task into the abort path if it's not there
1162 * already.
1163 */
1164 if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED))
1165 sas_task_abort(task);
1166
1167 } else {
1168 /* This is a case where the request has completed with a
1169 * status such that it needed further target servicing,
1170 * but the sas_task reference has already been removed
1171 * from the request. Since it was errored, it was not
1172 * being aborted, so there is nothing to do except free
1173 * it.
1174 */
1175
1176 spin_lock_irq(&ihost->scic_lock);
1177 /* Remove the request from the remote device's list
1178 * of pending requests.
1179 */
1180 list_del_init(&request->dev_node);
1181 isci_free_tag(ihost, request->io_tag);
1182 spin_unlock_irq(&ihost->scic_lock);
1183 }
1184 }
1185
1186 /* the coalesence timeout doubles at each encoding step, so 1123 /* the coalesence timeout doubles at each encoding step, so
1187 * update it based on the ilog2 value of the outstanding requests 1124 * update it based on the ilog2 value of the outstanding requests
1188 */ 1125 */
@@ -1213,9 +1150,8 @@ static void isci_host_completion_routine(unsigned long data)
1213static enum sci_status sci_controller_stop(struct isci_host *ihost, u32 timeout) 1150static enum sci_status sci_controller_stop(struct isci_host *ihost, u32 timeout)
1214{ 1151{
1215 if (ihost->sm.current_state_id != SCIC_READY) { 1152 if (ihost->sm.current_state_id != SCIC_READY) {
1216 dev_warn(&ihost->pdev->dev, 1153 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
1217 "SCIC Controller stop operation requested in " 1154 __func__, ihost->sm.current_state_id);
1218 "invalid state\n");
1219 return SCI_FAILURE_INVALID_STATE; 1155 return SCI_FAILURE_INVALID_STATE;
1220 } 1156 }
1221 1157
@@ -1241,7 +1177,7 @@ static enum sci_status sci_controller_reset(struct isci_host *ihost)
1241 switch (ihost->sm.current_state_id) { 1177 switch (ihost->sm.current_state_id) {
1242 case SCIC_RESET: 1178 case SCIC_RESET:
1243 case SCIC_READY: 1179 case SCIC_READY:
1244 case SCIC_STOPPED: 1180 case SCIC_STOPPING:
1245 case SCIC_FAILED: 1181 case SCIC_FAILED:
1246 /* 1182 /*
1247 * The reset operation is not a graceful cleanup, just 1183 * The reset operation is not a graceful cleanup, just
@@ -1250,13 +1186,50 @@ static enum sci_status sci_controller_reset(struct isci_host *ihost)
1250 sci_change_state(&ihost->sm, SCIC_RESETTING); 1186 sci_change_state(&ihost->sm, SCIC_RESETTING);
1251 return SCI_SUCCESS; 1187 return SCI_SUCCESS;
1252 default: 1188 default:
1253 dev_warn(&ihost->pdev->dev, 1189 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
1254 "SCIC Controller reset operation requested in " 1190 __func__, ihost->sm.current_state_id);
1255 "invalid state\n");
1256 return SCI_FAILURE_INVALID_STATE; 1191 return SCI_FAILURE_INVALID_STATE;
1257 } 1192 }
1258} 1193}
1259 1194
1195static enum sci_status sci_controller_stop_phys(struct isci_host *ihost)
1196{
1197 u32 index;
1198 enum sci_status status;
1199 enum sci_status phy_status;
1200
1201 status = SCI_SUCCESS;
1202
1203 for (index = 0; index < SCI_MAX_PHYS; index++) {
1204 phy_status = sci_phy_stop(&ihost->phys[index]);
1205
1206 if (phy_status != SCI_SUCCESS &&
1207 phy_status != SCI_FAILURE_INVALID_STATE) {
1208 status = SCI_FAILURE;
1209
1210 dev_warn(&ihost->pdev->dev,
1211 "%s: Controller stop operation failed to stop "
1212 "phy %d because of status %d.\n",
1213 __func__,
1214 ihost->phys[index].phy_index, phy_status);
1215 }
1216 }
1217
1218 return status;
1219}
1220
1221
1222/**
1223 * isci_host_deinit - shutdown frame reception and dma
1224 * @ihost: host to take down
1225 *
1226 * This is called in either the driver shutdown or the suspend path. In
1227 * the shutdown case libsas went through port teardown and normal device
1228 * removal (i.e. physical links stayed up to service scsi_device removal
1229 * commands). In the suspend case we disable the hardware without
1230 * notifying libsas of the link down events since we want libsas to
1231 * remember the domain across the suspend/resume cycle
1232 */
1260void isci_host_deinit(struct isci_host *ihost) 1233void isci_host_deinit(struct isci_host *ihost)
1261{ 1234{
1262 int i; 1235 int i;
@@ -1265,17 +1238,6 @@ void isci_host_deinit(struct isci_host *ihost)
1265 for (i = 0; i < isci_gpio_count(ihost); i++) 1238 for (i = 0; i < isci_gpio_count(ihost); i++)
1266 writel(SGPIO_HW_CONTROL, &ihost->scu_registers->peg0.sgpio.output_data_select[i]); 1239 writel(SGPIO_HW_CONTROL, &ihost->scu_registers->peg0.sgpio.output_data_select[i]);
1267 1240
1268 isci_host_change_state(ihost, isci_stopping);
1269 for (i = 0; i < SCI_MAX_PORTS; i++) {
1270 struct isci_port *iport = &ihost->ports[i];
1271 struct isci_remote_device *idev, *d;
1272
1273 list_for_each_entry_safe(idev, d, &iport->remote_dev_list, node) {
1274 if (test_bit(IDEV_ALLOCATED, &idev->flags))
1275 isci_remote_device_stop(ihost, idev);
1276 }
1277 }
1278
1279 set_bit(IHOST_STOP_PENDING, &ihost->flags); 1241 set_bit(IHOST_STOP_PENDING, &ihost->flags);
1280 1242
1281 spin_lock_irq(&ihost->scic_lock); 1243 spin_lock_irq(&ihost->scic_lock);
@@ -1284,12 +1246,21 @@ void isci_host_deinit(struct isci_host *ihost)
1284 1246
1285 wait_for_stop(ihost); 1247 wait_for_stop(ihost);
1286 1248
1249 /* phy stop is after controller stop to allow port and device to
1250 * go idle before shutting down the phys, but the expectation is
1251 * that i/o has been shut off well before we reach this
1252 * function.
1253 */
1254 sci_controller_stop_phys(ihost);
1255
1287 /* disable sgpio: where the above wait should give time for the 1256 /* disable sgpio: where the above wait should give time for the
1288 * enclosure to sample the gpios going inactive 1257 * enclosure to sample the gpios going inactive
1289 */ 1258 */
1290 writel(0, &ihost->scu_registers->peg0.sgpio.interface_control); 1259 writel(0, &ihost->scu_registers->peg0.sgpio.interface_control);
1291 1260
1261 spin_lock_irq(&ihost->scic_lock);
1292 sci_controller_reset(ihost); 1262 sci_controller_reset(ihost);
1263 spin_unlock_irq(&ihost->scic_lock);
1293 1264
1294 /* Cancel any/all outstanding port timers */ 1265 /* Cancel any/all outstanding port timers */
1295 for (i = 0; i < ihost->logical_port_entries; i++) { 1266 for (i = 0; i < ihost->logical_port_entries; i++) {
@@ -1328,29 +1299,6 @@ static void __iomem *smu_base(struct isci_host *isci_host)
1328 return pcim_iomap_table(pdev)[SCI_SMU_BAR * 2] + SCI_SMU_BAR_SIZE * id; 1299 return pcim_iomap_table(pdev)[SCI_SMU_BAR * 2] + SCI_SMU_BAR_SIZE * id;
1329} 1300}
1330 1301
1331static void isci_user_parameters_get(struct sci_user_parameters *u)
1332{
1333 int i;
1334
1335 for (i = 0; i < SCI_MAX_PHYS; i++) {
1336 struct sci_phy_user_params *u_phy = &u->phys[i];
1337
1338 u_phy->max_speed_generation = phy_gen;
1339
1340 /* we are not exporting these for now */
1341 u_phy->align_insertion_frequency = 0x7f;
1342 u_phy->in_connection_align_insertion_frequency = 0xff;
1343 u_phy->notify_enable_spin_up_insertion_frequency = 0x33;
1344 }
1345
1346 u->stp_inactivity_timeout = stp_inactive_to;
1347 u->ssp_inactivity_timeout = ssp_inactive_to;
1348 u->stp_max_occupancy_timeout = stp_max_occ_to;
1349 u->ssp_max_occupancy_timeout = ssp_max_occ_to;
1350 u->no_outbound_task_timeout = no_outbound_task_to;
1351 u->max_concurr_spinup = max_concurr_spinup;
1352}
1353
1354static void sci_controller_initial_state_enter(struct sci_base_state_machine *sm) 1302static void sci_controller_initial_state_enter(struct sci_base_state_machine *sm)
1355{ 1303{
1356 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); 1304 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
@@ -1510,32 +1458,6 @@ static void sci_controller_ready_state_exit(struct sci_base_state_machine *sm)
1510 sci_controller_set_interrupt_coalescence(ihost, 0, 0); 1458 sci_controller_set_interrupt_coalescence(ihost, 0, 0);
1511} 1459}
1512 1460
1513static enum sci_status sci_controller_stop_phys(struct isci_host *ihost)
1514{
1515 u32 index;
1516 enum sci_status status;
1517 enum sci_status phy_status;
1518
1519 status = SCI_SUCCESS;
1520
1521 for (index = 0; index < SCI_MAX_PHYS; index++) {
1522 phy_status = sci_phy_stop(&ihost->phys[index]);
1523
1524 if (phy_status != SCI_SUCCESS &&
1525 phy_status != SCI_FAILURE_INVALID_STATE) {
1526 status = SCI_FAILURE;
1527
1528 dev_warn(&ihost->pdev->dev,
1529 "%s: Controller stop operation failed to stop "
1530 "phy %d because of status %d.\n",
1531 __func__,
1532 ihost->phys[index].phy_index, phy_status);
1533 }
1534 }
1535
1536 return status;
1537}
1538
1539static enum sci_status sci_controller_stop_ports(struct isci_host *ihost) 1461static enum sci_status sci_controller_stop_ports(struct isci_host *ihost)
1540{ 1462{
1541 u32 index; 1463 u32 index;
@@ -1595,10 +1517,11 @@ static void sci_controller_stopping_state_enter(struct sci_base_state_machine *s
1595{ 1517{
1596 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); 1518 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1597 1519
1598 /* Stop all of the components for this controller */
1599 sci_controller_stop_phys(ihost);
1600 sci_controller_stop_ports(ihost);
1601 sci_controller_stop_devices(ihost); 1520 sci_controller_stop_devices(ihost);
1521 sci_controller_stop_ports(ihost);
1522
1523 if (!sci_controller_has_remote_devices_stopping(ihost))
1524 isci_host_stop_complete(ihost);
1602} 1525}
1603 1526
1604static void sci_controller_stopping_state_exit(struct sci_base_state_machine *sm) 1527static void sci_controller_stopping_state_exit(struct sci_base_state_machine *sm)
@@ -1624,6 +1547,9 @@ static void sci_controller_reset_hardware(struct isci_host *ihost)
1624 1547
1625 /* The write to the UFQGP clears the UFQPR */ 1548 /* The write to the UFQGP clears the UFQPR */
1626 writel(0, &ihost->scu_registers->sdma.unsolicited_frame_get_pointer); 1549 writel(0, &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
1550
1551 /* clear all interrupts */
1552 writel(~SMU_INTERRUPT_STATUS_RESERVED_MASK, &ihost->smu_registers->interrupt_status);
1627} 1553}
1628 1554
1629static void sci_controller_resetting_state_enter(struct sci_base_state_machine *sm) 1555static void sci_controller_resetting_state_enter(struct sci_base_state_machine *sm)
@@ -1655,59 +1581,9 @@ static const struct sci_base_state sci_controller_state_table[] = {
1655 .enter_state = sci_controller_stopping_state_enter, 1581 .enter_state = sci_controller_stopping_state_enter,
1656 .exit_state = sci_controller_stopping_state_exit, 1582 .exit_state = sci_controller_stopping_state_exit,
1657 }, 1583 },
1658 [SCIC_STOPPED] = {},
1659 [SCIC_FAILED] = {} 1584 [SCIC_FAILED] = {}
1660}; 1585};
1661 1586
1662static void sci_controller_set_default_config_parameters(struct isci_host *ihost)
1663{
1664 /* these defaults are overridden by the platform / firmware */
1665 u16 index;
1666
1667 /* Default to APC mode. */
1668 ihost->oem_parameters.controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE;
1669
1670 /* Default to APC mode. */
1671 ihost->oem_parameters.controller.max_concurr_spin_up = 1;
1672
1673 /* Default to no SSC operation. */
1674 ihost->oem_parameters.controller.do_enable_ssc = false;
1675
1676 /* Default to short cables on all phys. */
1677 ihost->oem_parameters.controller.cable_selection_mask = 0;
1678
1679 /* Initialize all of the port parameter information to narrow ports. */
1680 for (index = 0; index < SCI_MAX_PORTS; index++) {
1681 ihost->oem_parameters.ports[index].phy_mask = 0;
1682 }
1683
1684 /* Initialize all of the phy parameter information. */
1685 for (index = 0; index < SCI_MAX_PHYS; index++) {
1686 /* Default to 3G (i.e. Gen 2). */
1687 ihost->user_parameters.phys[index].max_speed_generation =
1688 SCIC_SDS_PARM_GEN2_SPEED;
1689
1690 /* the frequencies cannot be 0 */
1691 ihost->user_parameters.phys[index].align_insertion_frequency = 0x7f;
1692 ihost->user_parameters.phys[index].in_connection_align_insertion_frequency = 0xff;
1693 ihost->user_parameters.phys[index].notify_enable_spin_up_insertion_frequency = 0x33;
1694
1695 /*
1696 * Previous Vitesse based expanders had a arbitration issue that
1697 * is worked around by having the upper 32-bits of SAS address
1698 * with a value greater then the Vitesse company identifier.
1699 * Hence, usage of 0x5FCFFFFF. */
1700 ihost->oem_parameters.phys[index].sas_address.low = 0x1 + ihost->id;
1701 ihost->oem_parameters.phys[index].sas_address.high = 0x5FCFFFFF;
1702 }
1703
1704 ihost->user_parameters.stp_inactivity_timeout = 5;
1705 ihost->user_parameters.ssp_inactivity_timeout = 5;
1706 ihost->user_parameters.stp_max_occupancy_timeout = 5;
1707 ihost->user_parameters.ssp_max_occupancy_timeout = 20;
1708 ihost->user_parameters.no_outbound_task_timeout = 2;
1709}
1710
1711static void controller_timeout(unsigned long data) 1587static void controller_timeout(unsigned long data)
1712{ 1588{
1713 struct sci_timer *tmr = (struct sci_timer *)data; 1589 struct sci_timer *tmr = (struct sci_timer *)data;
@@ -1724,7 +1600,7 @@ static void controller_timeout(unsigned long data)
1724 sci_controller_transition_to_ready(ihost, SCI_FAILURE_TIMEOUT); 1600 sci_controller_transition_to_ready(ihost, SCI_FAILURE_TIMEOUT);
1725 else if (sm->current_state_id == SCIC_STOPPING) { 1601 else if (sm->current_state_id == SCIC_STOPPING) {
1726 sci_change_state(sm, SCIC_FAILED); 1602 sci_change_state(sm, SCIC_FAILED);
1727 isci_host_stop_complete(ihost, SCI_FAILURE_TIMEOUT); 1603 isci_host_stop_complete(ihost);
1728 } else /* / @todo Now what do we want to do in this case? */ 1604 } else /* / @todo Now what do we want to do in this case? */
1729 dev_err(&ihost->pdev->dev, 1605 dev_err(&ihost->pdev->dev,
1730 "%s: Controller timer fired when controller was not " 1606 "%s: Controller timer fired when controller was not "
@@ -1764,9 +1640,6 @@ static enum sci_status sci_controller_construct(struct isci_host *ihost,
1764 1640
1765 sci_init_timer(&ihost->timer, controller_timeout); 1641 sci_init_timer(&ihost->timer, controller_timeout);
1766 1642
1767 /* Initialize the User and OEM parameters to default values. */
1768 sci_controller_set_default_config_parameters(ihost);
1769
1770 return sci_controller_reset(ihost); 1643 return sci_controller_reset(ihost);
1771} 1644}
1772 1645
@@ -1846,27 +1719,6 @@ int sci_oem_parameters_validate(struct sci_oem_params *oem, u8 version)
1846 return 0; 1719 return 0;
1847} 1720}
1848 1721
1849static enum sci_status sci_oem_parameters_set(struct isci_host *ihost)
1850{
1851 u32 state = ihost->sm.current_state_id;
1852 struct isci_pci_info *pci_info = to_pci_info(ihost->pdev);
1853
1854 if (state == SCIC_RESET ||
1855 state == SCIC_INITIALIZING ||
1856 state == SCIC_INITIALIZED) {
1857 u8 oem_version = pci_info->orom ? pci_info->orom->hdr.version :
1858 ISCI_ROM_VER_1_0;
1859
1860 if (sci_oem_parameters_validate(&ihost->oem_parameters,
1861 oem_version))
1862 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
1863
1864 return SCI_SUCCESS;
1865 }
1866
1867 return SCI_FAILURE_INVALID_STATE;
1868}
1869
1870static u8 max_spin_up(struct isci_host *ihost) 1722static u8 max_spin_up(struct isci_host *ihost)
1871{ 1723{
1872 if (ihost->user_parameters.max_concurr_spinup) 1724 if (ihost->user_parameters.max_concurr_spinup)
@@ -1914,7 +1766,7 @@ static void power_control_timeout(unsigned long data)
1914 ihost->power_control.phys_granted_power++; 1766 ihost->power_control.phys_granted_power++;
1915 sci_phy_consume_power_handler(iphy); 1767 sci_phy_consume_power_handler(iphy);
1916 1768
1917 if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) { 1769 if (iphy->protocol == SAS_PROTOCOL_SSP) {
1918 u8 j; 1770 u8 j;
1919 1771
1920 for (j = 0; j < SCI_MAX_PHYS; j++) { 1772 for (j = 0; j < SCI_MAX_PHYS; j++) {
@@ -1988,7 +1840,7 @@ void sci_controller_power_control_queue_insert(struct isci_host *ihost,
1988 sizeof(current_phy->frame_rcvd.iaf.sas_addr)); 1840 sizeof(current_phy->frame_rcvd.iaf.sas_addr));
1989 1841
1990 if (current_phy->sm.current_state_id == SCI_PHY_READY && 1842 if (current_phy->sm.current_state_id == SCI_PHY_READY &&
1991 current_phy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS && 1843 current_phy->protocol == SAS_PROTOCOL_SSP &&
1992 other == 0) { 1844 other == 0) {
1993 sci_phy_consume_power_handler(iphy); 1845 sci_phy_consume_power_handler(iphy);
1994 break; 1846 break;
@@ -2279,9 +2131,8 @@ static enum sci_status sci_controller_initialize(struct isci_host *ihost)
2279 unsigned long i, state, val; 2131 unsigned long i, state, val;
2280 2132
2281 if (ihost->sm.current_state_id != SCIC_RESET) { 2133 if (ihost->sm.current_state_id != SCIC_RESET) {
2282 dev_warn(&ihost->pdev->dev, 2134 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
2283 "SCIC Controller initialize operation requested " 2135 __func__, ihost->sm.current_state_id);
2284 "in invalid state\n");
2285 return SCI_FAILURE_INVALID_STATE; 2136 return SCI_FAILURE_INVALID_STATE;
2286 } 2137 }
2287 2138
@@ -2384,96 +2235,76 @@ static enum sci_status sci_controller_initialize(struct isci_host *ihost)
2384 return result; 2235 return result;
2385} 2236}
2386 2237
2387static enum sci_status sci_user_parameters_set(struct isci_host *ihost, 2238static int sci_controller_dma_alloc(struct isci_host *ihost)
2388 struct sci_user_parameters *sci_parms)
2389{
2390 u32 state = ihost->sm.current_state_id;
2391
2392 if (state == SCIC_RESET ||
2393 state == SCIC_INITIALIZING ||
2394 state == SCIC_INITIALIZED) {
2395 u16 index;
2396
2397 /*
2398 * Validate the user parameters. If they are not legal, then
2399 * return a failure.
2400 */
2401 for (index = 0; index < SCI_MAX_PHYS; index++) {
2402 struct sci_phy_user_params *user_phy;
2403
2404 user_phy = &sci_parms->phys[index];
2405
2406 if (!((user_phy->max_speed_generation <=
2407 SCIC_SDS_PARM_MAX_SPEED) &&
2408 (user_phy->max_speed_generation >
2409 SCIC_SDS_PARM_NO_SPEED)))
2410 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2411
2412 if (user_phy->in_connection_align_insertion_frequency <
2413 3)
2414 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2415
2416 if ((user_phy->in_connection_align_insertion_frequency <
2417 3) ||
2418 (user_phy->align_insertion_frequency == 0) ||
2419 (user_phy->
2420 notify_enable_spin_up_insertion_frequency ==
2421 0))
2422 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2423 }
2424
2425 if ((sci_parms->stp_inactivity_timeout == 0) ||
2426 (sci_parms->ssp_inactivity_timeout == 0) ||
2427 (sci_parms->stp_max_occupancy_timeout == 0) ||
2428 (sci_parms->ssp_max_occupancy_timeout == 0) ||
2429 (sci_parms->no_outbound_task_timeout == 0))
2430 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2431
2432 memcpy(&ihost->user_parameters, sci_parms, sizeof(*sci_parms));
2433
2434 return SCI_SUCCESS;
2435 }
2436
2437 return SCI_FAILURE_INVALID_STATE;
2438}
2439
2440static int sci_controller_mem_init(struct isci_host *ihost)
2441{ 2239{
2442 struct device *dev = &ihost->pdev->dev; 2240 struct device *dev = &ihost->pdev->dev;
2443 dma_addr_t dma;
2444 size_t size; 2241 size_t size;
2445 int err; 2242 int i;
2243
2244 /* detect re-initialization */
2245 if (ihost->completion_queue)
2246 return 0;
2446 2247
2447 size = SCU_MAX_COMPLETION_QUEUE_ENTRIES * sizeof(u32); 2248 size = SCU_MAX_COMPLETION_QUEUE_ENTRIES * sizeof(u32);
2448 ihost->completion_queue = dmam_alloc_coherent(dev, size, &dma, GFP_KERNEL); 2249 ihost->completion_queue = dmam_alloc_coherent(dev, size, &ihost->cq_dma,
2250 GFP_KERNEL);
2449 if (!ihost->completion_queue) 2251 if (!ihost->completion_queue)
2450 return -ENOMEM; 2252 return -ENOMEM;
2451 2253
2452 writel(lower_32_bits(dma), &ihost->smu_registers->completion_queue_lower);
2453 writel(upper_32_bits(dma), &ihost->smu_registers->completion_queue_upper);
2454
2455 size = ihost->remote_node_entries * sizeof(union scu_remote_node_context); 2254 size = ihost->remote_node_entries * sizeof(union scu_remote_node_context);
2456 ihost->remote_node_context_table = dmam_alloc_coherent(dev, size, &dma, 2255 ihost->remote_node_context_table = dmam_alloc_coherent(dev, size, &ihost->rnc_dma,
2457 GFP_KERNEL); 2256 GFP_KERNEL);
2257
2458 if (!ihost->remote_node_context_table) 2258 if (!ihost->remote_node_context_table)
2459 return -ENOMEM; 2259 return -ENOMEM;
2460 2260
2461 writel(lower_32_bits(dma), &ihost->smu_registers->remote_node_context_lower);
2462 writel(upper_32_bits(dma), &ihost->smu_registers->remote_node_context_upper);
2463
2464 size = ihost->task_context_entries * sizeof(struct scu_task_context), 2261 size = ihost->task_context_entries * sizeof(struct scu_task_context),
2465 ihost->task_context_table = dmam_alloc_coherent(dev, size, &dma, GFP_KERNEL); 2262 ihost->task_context_table = dmam_alloc_coherent(dev, size, &ihost->tc_dma,
2263 GFP_KERNEL);
2466 if (!ihost->task_context_table) 2264 if (!ihost->task_context_table)
2467 return -ENOMEM; 2265 return -ENOMEM;
2468 2266
2469 ihost->task_context_dma = dma; 2267 size = SCI_UFI_TOTAL_SIZE;
2470 writel(lower_32_bits(dma), &ihost->smu_registers->host_task_table_lower); 2268 ihost->ufi_buf = dmam_alloc_coherent(dev, size, &ihost->ufi_dma, GFP_KERNEL);
2471 writel(upper_32_bits(dma), &ihost->smu_registers->host_task_table_upper); 2269 if (!ihost->ufi_buf)
2270 return -ENOMEM;
2271
2272 for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) {
2273 struct isci_request *ireq;
2274 dma_addr_t dma;
2275
2276 ireq = dmam_alloc_coherent(dev, sizeof(*ireq), &dma, GFP_KERNEL);
2277 if (!ireq)
2278 return -ENOMEM;
2279
2280 ireq->tc = &ihost->task_context_table[i];
2281 ireq->owning_controller = ihost;
2282 ireq->request_daddr = dma;
2283 ireq->isci_host = ihost;
2284 ihost->reqs[i] = ireq;
2285 }
2286
2287 return 0;
2288}
2289
2290static int sci_controller_mem_init(struct isci_host *ihost)
2291{
2292 int err = sci_controller_dma_alloc(ihost);
2472 2293
2473 err = sci_unsolicited_frame_control_construct(ihost);
2474 if (err) 2294 if (err)
2475 return err; 2295 return err;
2476 2296
2297 writel(lower_32_bits(ihost->cq_dma), &ihost->smu_registers->completion_queue_lower);
2298 writel(upper_32_bits(ihost->cq_dma), &ihost->smu_registers->completion_queue_upper);
2299
2300 writel(lower_32_bits(ihost->rnc_dma), &ihost->smu_registers->remote_node_context_lower);
2301 writel(upper_32_bits(ihost->rnc_dma), &ihost->smu_registers->remote_node_context_upper);
2302
2303 writel(lower_32_bits(ihost->tc_dma), &ihost->smu_registers->host_task_table_lower);
2304 writel(upper_32_bits(ihost->tc_dma), &ihost->smu_registers->host_task_table_upper);
2305
2306 sci_unsolicited_frame_control_construct(ihost);
2307
2477 /* 2308 /*
2478 * Inform the silicon as to the location of the UF headers and 2309 * Inform the silicon as to the location of the UF headers and
2479 * address table. 2310 * address table.
@@ -2491,22 +2322,22 @@ static int sci_controller_mem_init(struct isci_host *ihost)
2491 return 0; 2322 return 0;
2492} 2323}
2493 2324
2325/**
2326 * isci_host_init - (re-)initialize hardware and internal (private) state
2327 * @ihost: host to init
2328 *
2329 * Any public facing objects (like asd_sas_port, and asd_sas_phys), or
2330 * one-time initialization objects like locks and waitqueues, are
2331 * not touched (they are initialized in isci_host_alloc)
2332 */
2494int isci_host_init(struct isci_host *ihost) 2333int isci_host_init(struct isci_host *ihost)
2495{ 2334{
2496 int err = 0, i; 2335 int i, err;
2497 enum sci_status status; 2336 enum sci_status status;
2498 struct sci_user_parameters sci_user_params;
2499 struct isci_pci_info *pci_info = to_pci_info(ihost->pdev);
2500
2501 spin_lock_init(&ihost->state_lock);
2502 spin_lock_init(&ihost->scic_lock);
2503 init_waitqueue_head(&ihost->eventq);
2504
2505 isci_host_change_state(ihost, isci_starting);
2506
2507 status = sci_controller_construct(ihost, scu_base(ihost),
2508 smu_base(ihost));
2509 2337
2338 spin_lock_irq(&ihost->scic_lock);
2339 status = sci_controller_construct(ihost, scu_base(ihost), smu_base(ihost));
2340 spin_unlock_irq(&ihost->scic_lock);
2510 if (status != SCI_SUCCESS) { 2341 if (status != SCI_SUCCESS) {
2511 dev_err(&ihost->pdev->dev, 2342 dev_err(&ihost->pdev->dev,
2512 "%s: sci_controller_construct failed - status = %x\n", 2343 "%s: sci_controller_construct failed - status = %x\n",
@@ -2515,48 +2346,6 @@ int isci_host_init(struct isci_host *ihost)
2515 return -ENODEV; 2346 return -ENODEV;
2516 } 2347 }
2517 2348
2518 ihost->sas_ha.dev = &ihost->pdev->dev;
2519 ihost->sas_ha.lldd_ha = ihost;
2520
2521 /*
2522 * grab initial values stored in the controller object for OEM and USER
2523 * parameters
2524 */
2525 isci_user_parameters_get(&sci_user_params);
2526 status = sci_user_parameters_set(ihost, &sci_user_params);
2527 if (status != SCI_SUCCESS) {
2528 dev_warn(&ihost->pdev->dev,
2529 "%s: sci_user_parameters_set failed\n",
2530 __func__);
2531 return -ENODEV;
2532 }
2533
2534 /* grab any OEM parameters specified in orom */
2535 if (pci_info->orom) {
2536 status = isci_parse_oem_parameters(&ihost->oem_parameters,
2537 pci_info->orom,
2538 ihost->id);
2539 if (status != SCI_SUCCESS) {
2540 dev_warn(&ihost->pdev->dev,
2541 "parsing firmware oem parameters failed\n");
2542 return -EINVAL;
2543 }
2544 }
2545
2546 status = sci_oem_parameters_set(ihost);
2547 if (status != SCI_SUCCESS) {
2548 dev_warn(&ihost->pdev->dev,
2549 "%s: sci_oem_parameters_set failed\n",
2550 __func__);
2551 return -ENODEV;
2552 }
2553
2554 tasklet_init(&ihost->completion_tasklet,
2555 isci_host_completion_routine, (unsigned long)ihost);
2556
2557 INIT_LIST_HEAD(&ihost->requests_to_complete);
2558 INIT_LIST_HEAD(&ihost->requests_to_errorback);
2559
2560 spin_lock_irq(&ihost->scic_lock); 2349 spin_lock_irq(&ihost->scic_lock);
2561 status = sci_controller_initialize(ihost); 2350 status = sci_controller_initialize(ihost);
2562 spin_unlock_irq(&ihost->scic_lock); 2351 spin_unlock_irq(&ihost->scic_lock);
@@ -2572,43 +2361,12 @@ int isci_host_init(struct isci_host *ihost)
2572 if (err) 2361 if (err)
2573 return err; 2362 return err;
2574 2363
2575 for (i = 0; i < SCI_MAX_PORTS; i++)
2576 isci_port_init(&ihost->ports[i], ihost, i);
2577
2578 for (i = 0; i < SCI_MAX_PHYS; i++)
2579 isci_phy_init(&ihost->phys[i], ihost, i);
2580
2581 /* enable sgpio */ 2364 /* enable sgpio */
2582 writel(1, &ihost->scu_registers->peg0.sgpio.interface_control); 2365 writel(1, &ihost->scu_registers->peg0.sgpio.interface_control);
2583 for (i = 0; i < isci_gpio_count(ihost); i++) 2366 for (i = 0; i < isci_gpio_count(ihost); i++)
2584 writel(SGPIO_HW_CONTROL, &ihost->scu_registers->peg0.sgpio.output_data_select[i]); 2367 writel(SGPIO_HW_CONTROL, &ihost->scu_registers->peg0.sgpio.output_data_select[i]);
2585 writel(0, &ihost->scu_registers->peg0.sgpio.vendor_specific_code); 2368 writel(0, &ihost->scu_registers->peg0.sgpio.vendor_specific_code);
2586 2369
2587 for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
2588 struct isci_remote_device *idev = &ihost->devices[i];
2589
2590 INIT_LIST_HEAD(&idev->reqs_in_process);
2591 INIT_LIST_HEAD(&idev->node);
2592 }
2593
2594 for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) {
2595 struct isci_request *ireq;
2596 dma_addr_t dma;
2597
2598 ireq = dmam_alloc_coherent(&ihost->pdev->dev,
2599 sizeof(struct isci_request), &dma,
2600 GFP_KERNEL);
2601 if (!ireq)
2602 return -ENOMEM;
2603
2604 ireq->tc = &ihost->task_context_table[i];
2605 ireq->owning_controller = ihost;
2606 spin_lock_init(&ireq->state_lock);
2607 ireq->request_daddr = dma;
2608 ireq->isci_host = ihost;
2609 ihost->reqs[i] = ireq;
2610 }
2611
2612 return 0; 2370 return 0;
2613} 2371}
2614 2372
@@ -2654,7 +2412,7 @@ void sci_controller_link_down(struct isci_host *ihost, struct isci_port *iport,
2654 } 2412 }
2655} 2413}
2656 2414
2657static bool sci_controller_has_remote_devices_stopping(struct isci_host *ihost) 2415bool sci_controller_has_remote_devices_stopping(struct isci_host *ihost)
2658{ 2416{
2659 u32 index; 2417 u32 index;
2660 2418
@@ -2680,7 +2438,7 @@ void sci_controller_remote_device_stopped(struct isci_host *ihost,
2680 } 2438 }
2681 2439
2682 if (!sci_controller_has_remote_devices_stopping(ihost)) 2440 if (!sci_controller_has_remote_devices_stopping(ihost))
2683 sci_change_state(&ihost->sm, SCIC_STOPPED); 2441 isci_host_stop_complete(ihost);
2684} 2442}
2685 2443
2686void sci_controller_post_request(struct isci_host *ihost, u32 request) 2444void sci_controller_post_request(struct isci_host *ihost, u32 request)
@@ -2842,7 +2600,8 @@ enum sci_status sci_controller_start_io(struct isci_host *ihost,
2842 enum sci_status status; 2600 enum sci_status status;
2843 2601
2844 if (ihost->sm.current_state_id != SCIC_READY) { 2602 if (ihost->sm.current_state_id != SCIC_READY) {
2845 dev_warn(&ihost->pdev->dev, "invalid state to start I/O"); 2603 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
2604 __func__, ihost->sm.current_state_id);
2846 return SCI_FAILURE_INVALID_STATE; 2605 return SCI_FAILURE_INVALID_STATE;
2847 } 2606 }
2848 2607
@@ -2866,22 +2625,26 @@ enum sci_status sci_controller_terminate_request(struct isci_host *ihost,
2866 enum sci_status status; 2625 enum sci_status status;
2867 2626
2868 if (ihost->sm.current_state_id != SCIC_READY) { 2627 if (ihost->sm.current_state_id != SCIC_READY) {
2869 dev_warn(&ihost->pdev->dev, 2628 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
2870 "invalid state to terminate request\n"); 2629 __func__, ihost->sm.current_state_id);
2871 return SCI_FAILURE_INVALID_STATE; 2630 return SCI_FAILURE_INVALID_STATE;
2872 } 2631 }
2873
2874 status = sci_io_request_terminate(ireq); 2632 status = sci_io_request_terminate(ireq);
2875 if (status != SCI_SUCCESS)
2876 return status;
2877 2633
2878 /* 2634 dev_dbg(&ihost->pdev->dev, "%s: status=%d; ireq=%p; flags=%lx\n",
2879 * Utilize the original post context command and or in the POST_TC_ABORT 2635 __func__, status, ireq, ireq->flags);
2880 * request sub-type. 2636
2881 */ 2637 if ((status == SCI_SUCCESS) &&
2882 sci_controller_post_request(ihost, 2638 !test_bit(IREQ_PENDING_ABORT, &ireq->flags) &&
2883 ireq->post_context | SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT); 2639 !test_and_set_bit(IREQ_TC_ABORT_POSTED, &ireq->flags)) {
2884 return SCI_SUCCESS; 2640 /* Utilize the original post context command and or in the
2641 * POST_TC_ABORT request sub-type.
2642 */
2643 sci_controller_post_request(
2644 ihost, ireq->post_context |
2645 SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT);
2646 }
2647 return status;
2885} 2648}
2886 2649
2887/** 2650/**
@@ -2915,7 +2678,8 @@ enum sci_status sci_controller_complete_io(struct isci_host *ihost,
2915 clear_bit(IREQ_ACTIVE, &ireq->flags); 2678 clear_bit(IREQ_ACTIVE, &ireq->flags);
2916 return SCI_SUCCESS; 2679 return SCI_SUCCESS;
2917 default: 2680 default:
2918 dev_warn(&ihost->pdev->dev, "invalid state to complete I/O"); 2681 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
2682 __func__, ihost->sm.current_state_id);
2919 return SCI_FAILURE_INVALID_STATE; 2683 return SCI_FAILURE_INVALID_STATE;
2920 } 2684 }
2921 2685
@@ -2926,7 +2690,8 @@ enum sci_status sci_controller_continue_io(struct isci_request *ireq)
2926 struct isci_host *ihost = ireq->owning_controller; 2690 struct isci_host *ihost = ireq->owning_controller;
2927 2691
2928 if (ihost->sm.current_state_id != SCIC_READY) { 2692 if (ihost->sm.current_state_id != SCIC_READY) {
2929 dev_warn(&ihost->pdev->dev, "invalid state to continue I/O"); 2693 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
2694 __func__, ihost->sm.current_state_id);
2930 return SCI_FAILURE_INVALID_STATE; 2695 return SCI_FAILURE_INVALID_STATE;
2931 } 2696 }
2932 2697
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
index adbad69d1069..9ab58e0540e7 100644
--- a/drivers/scsi/isci/host.h
+++ b/drivers/scsi/isci/host.h
@@ -55,6 +55,7 @@
55#ifndef _SCI_HOST_H_ 55#ifndef _SCI_HOST_H_
56#define _SCI_HOST_H_ 56#define _SCI_HOST_H_
57 57
58#include <scsi/sas_ata.h>
58#include "remote_device.h" 59#include "remote_device.h"
59#include "phy.h" 60#include "phy.h"
60#include "isci.h" 61#include "isci.h"
@@ -108,6 +109,8 @@ struct sci_port_configuration_agent;
108typedef void (*port_config_fn)(struct isci_host *, 109typedef void (*port_config_fn)(struct isci_host *,
109 struct sci_port_configuration_agent *, 110 struct sci_port_configuration_agent *,
110 struct isci_port *, struct isci_phy *); 111 struct isci_port *, struct isci_phy *);
112bool is_port_config_apc(struct isci_host *ihost);
113bool is_controller_start_complete(struct isci_host *ihost);
111 114
112struct sci_port_configuration_agent { 115struct sci_port_configuration_agent {
113 u16 phy_configured_mask; 116 u16 phy_configured_mask;
@@ -157,13 +160,17 @@ struct isci_host {
157 struct sci_power_control power_control; 160 struct sci_power_control power_control;
158 u8 io_request_sequence[SCI_MAX_IO_REQUESTS]; 161 u8 io_request_sequence[SCI_MAX_IO_REQUESTS];
159 struct scu_task_context *task_context_table; 162 struct scu_task_context *task_context_table;
160 dma_addr_t task_context_dma; 163 dma_addr_t tc_dma;
161 union scu_remote_node_context *remote_node_context_table; 164 union scu_remote_node_context *remote_node_context_table;
165 dma_addr_t rnc_dma;
162 u32 *completion_queue; 166 u32 *completion_queue;
167 dma_addr_t cq_dma;
163 u32 completion_queue_get; 168 u32 completion_queue_get;
164 u32 logical_port_entries; 169 u32 logical_port_entries;
165 u32 remote_node_entries; 170 u32 remote_node_entries;
166 u32 task_context_entries; 171 u32 task_context_entries;
172 void *ufi_buf;
173 dma_addr_t ufi_dma;
167 struct sci_unsolicited_frame_control uf_control; 174 struct sci_unsolicited_frame_control uf_control;
168 175
169 /* phy startup */ 176 /* phy startup */
@@ -190,17 +197,13 @@ struct isci_host {
190 struct asd_sas_port sas_ports[SCI_MAX_PORTS]; 197 struct asd_sas_port sas_ports[SCI_MAX_PORTS];
191 struct sas_ha_struct sas_ha; 198 struct sas_ha_struct sas_ha;
192 199
193 spinlock_t state_lock;
194 struct pci_dev *pdev; 200 struct pci_dev *pdev;
195 enum isci_status status;
196 #define IHOST_START_PENDING 0 201 #define IHOST_START_PENDING 0
197 #define IHOST_STOP_PENDING 1 202 #define IHOST_STOP_PENDING 1
203 #define IHOST_IRQ_ENABLED 2
198 unsigned long flags; 204 unsigned long flags;
199 wait_queue_head_t eventq; 205 wait_queue_head_t eventq;
200 struct Scsi_Host *shost;
201 struct tasklet_struct completion_tasklet; 206 struct tasklet_struct completion_tasklet;
202 struct list_head requests_to_complete;
203 struct list_head requests_to_errorback;
204 spinlock_t scic_lock; 207 spinlock_t scic_lock;
205 struct isci_request *reqs[SCI_MAX_IO_REQUESTS]; 208 struct isci_request *reqs[SCI_MAX_IO_REQUESTS];
206 struct isci_remote_device devices[SCI_MAX_REMOTE_DEVICES]; 209 struct isci_remote_device devices[SCI_MAX_REMOTE_DEVICES];
@@ -274,13 +277,6 @@ enum sci_controller_states {
274 SCIC_STOPPING, 277 SCIC_STOPPING,
275 278
276 /** 279 /**
277 * This state indicates that the controller has successfully been stopped.
278 * In this state no new IO operations are permitted.
279 * This state is entered from the STOPPING state.
280 */
281 SCIC_STOPPED,
282
283 /**
284 * This state indicates that the controller could not successfully be 280 * This state indicates that the controller could not successfully be
285 * initialized. In this state no new IO operations are permitted. 281 * initialized. In this state no new IO operations are permitted.
286 * This state is entered from the INITIALIZING state. 282 * This state is entered from the INITIALIZING state.
@@ -309,32 +305,16 @@ static inline struct isci_pci_info *to_pci_info(struct pci_dev *pdev)
309 return pci_get_drvdata(pdev); 305 return pci_get_drvdata(pdev);
310} 306}
311 307
308static inline struct Scsi_Host *to_shost(struct isci_host *ihost)
309{
310 return ihost->sas_ha.core.shost;
311}
312
312#define for_each_isci_host(id, ihost, pdev) \ 313#define for_each_isci_host(id, ihost, pdev) \
313 for (id = 0, ihost = to_pci_info(pdev)->hosts[id]; \ 314 for (id = 0, ihost = to_pci_info(pdev)->hosts[id]; \
314 id < ARRAY_SIZE(to_pci_info(pdev)->hosts) && ihost; \ 315 id < ARRAY_SIZE(to_pci_info(pdev)->hosts) && ihost; \
315 ihost = to_pci_info(pdev)->hosts[++id]) 316 ihost = to_pci_info(pdev)->hosts[++id])
316 317
317static inline enum isci_status isci_host_get_state(struct isci_host *isci_host)
318{
319 return isci_host->status;
320}
321
322static inline void isci_host_change_state(struct isci_host *isci_host,
323 enum isci_status status)
324{
325 unsigned long flags;
326
327 dev_dbg(&isci_host->pdev->dev,
328 "%s: isci_host = %p, state = 0x%x",
329 __func__,
330 isci_host,
331 status);
332 spin_lock_irqsave(&isci_host->state_lock, flags);
333 isci_host->status = status;
334 spin_unlock_irqrestore(&isci_host->state_lock, flags);
335
336}
337
338static inline void wait_for_start(struct isci_host *ihost) 318static inline void wait_for_start(struct isci_host *ihost)
339{ 319{
340 wait_event(ihost->eventq, !test_bit(IHOST_START_PENDING, &ihost->flags)); 320 wait_event(ihost->eventq, !test_bit(IHOST_START_PENDING, &ihost->flags));
@@ -360,6 +340,11 @@ static inline struct isci_host *dev_to_ihost(struct domain_device *dev)
360 return dev->port->ha->lldd_ha; 340 return dev->port->ha->lldd_ha;
361} 341}
362 342
343static inline struct isci_host *idev_to_ihost(struct isci_remote_device *idev)
344{
345 return dev_to_ihost(idev->domain_dev);
346}
347
363/* we always use protocol engine group zero */ 348/* we always use protocol engine group zero */
364#define ISCI_PEG 0 349#define ISCI_PEG 0
365 350
@@ -378,8 +363,7 @@ static inline int sci_remote_device_node_count(struct isci_remote_device *idev)
378{ 363{
379 struct domain_device *dev = idev->domain_dev; 364 struct domain_device *dev = idev->domain_dev;
380 365
381 if ((dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) && 366 if (dev_is_sata(dev) && dev->parent)
382 !idev->is_direct_attached)
383 return SCU_STP_REMOTE_NODE_COUNT; 367 return SCU_STP_REMOTE_NODE_COUNT;
384 return SCU_SSP_REMOTE_NODE_COUNT; 368 return SCU_SSP_REMOTE_NODE_COUNT;
385} 369}
@@ -475,36 +459,17 @@ void sci_controller_free_remote_node_context(
475 struct isci_remote_device *idev, 459 struct isci_remote_device *idev,
476 u16 node_id); 460 u16 node_id);
477 461
478struct isci_request *sci_request_by_tag(struct isci_host *ihost, 462struct isci_request *sci_request_by_tag(struct isci_host *ihost, u16 io_tag);
479 u16 io_tag); 463void sci_controller_power_control_queue_insert(struct isci_host *ihost,
480 464 struct isci_phy *iphy);
481void sci_controller_power_control_queue_insert( 465void sci_controller_power_control_queue_remove(struct isci_host *ihost,
482 struct isci_host *ihost, 466 struct isci_phy *iphy);
483 struct isci_phy *iphy); 467void sci_controller_link_up(struct isci_host *ihost, struct isci_port *iport,
484 468 struct isci_phy *iphy);
485void sci_controller_power_control_queue_remove( 469void sci_controller_link_down(struct isci_host *ihost, struct isci_port *iport,
486 struct isci_host *ihost, 470 struct isci_phy *iphy);
487 struct isci_phy *iphy); 471void sci_controller_remote_device_stopped(struct isci_host *ihost,
488 472 struct isci_remote_device *idev);
489void sci_controller_link_up(
490 struct isci_host *ihost,
491 struct isci_port *iport,
492 struct isci_phy *iphy);
493
494void sci_controller_link_down(
495 struct isci_host *ihost,
496 struct isci_port *iport,
497 struct isci_phy *iphy);
498
499void sci_controller_remote_device_stopped(
500 struct isci_host *ihost,
501 struct isci_remote_device *idev);
502
503void sci_controller_copy_task_context(
504 struct isci_host *ihost,
505 struct isci_request *ireq);
506
507void sci_controller_register_setup(struct isci_host *ihost);
508 473
509enum sci_status sci_controller_continue_io(struct isci_request *ireq); 474enum sci_status sci_controller_continue_io(struct isci_request *ireq);
510int isci_host_scan_finished(struct Scsi_Host *, unsigned long); 475int isci_host_scan_finished(struct Scsi_Host *, unsigned long);
@@ -512,29 +477,14 @@ void isci_host_scan_start(struct Scsi_Host *);
512u16 isci_alloc_tag(struct isci_host *ihost); 477u16 isci_alloc_tag(struct isci_host *ihost);
513enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag); 478enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag);
514void isci_tci_free(struct isci_host *ihost, u16 tci); 479void isci_tci_free(struct isci_host *ihost, u16 tci);
480void ireq_done(struct isci_host *ihost, struct isci_request *ireq, struct sas_task *task);
515 481
516int isci_host_init(struct isci_host *); 482int isci_host_init(struct isci_host *);
517 483void isci_host_completion_routine(unsigned long data);
518void isci_host_init_controller_names( 484void isci_host_deinit(struct isci_host *);
519 struct isci_host *isci_host, 485void sci_controller_disable_interrupts(struct isci_host *ihost);
520 unsigned int controller_idx); 486bool sci_controller_has_remote_devices_stopping(struct isci_host *ihost);
521 487void sci_controller_transition_to_ready(struct isci_host *ihost, enum sci_status status);
522void isci_host_deinit(
523 struct isci_host *);
524
525void isci_host_port_link_up(
526 struct isci_host *,
527 struct isci_port *,
528 struct isci_phy *);
529int isci_host_dev_found(struct domain_device *);
530
531void isci_host_remote_device_start_complete(
532 struct isci_host *,
533 struct isci_remote_device *,
534 enum sci_status);
535
536void sci_controller_disable_interrupts(
537 struct isci_host *ihost);
538 488
539enum sci_status sci_controller_start_io( 489enum sci_status sci_controller_start_io(
540 struct isci_host *ihost, 490 struct isci_host *ihost,
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index 5137db5a5d85..47e28b555029 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -271,13 +271,12 @@ static void isci_unregister(struct isci_host *isci_host)
271 if (!isci_host) 271 if (!isci_host)
272 return; 272 return;
273 273
274 shost = isci_host->shost;
275
276 sas_unregister_ha(&isci_host->sas_ha); 274 sas_unregister_ha(&isci_host->sas_ha);
277 275
278 sas_remove_host(isci_host->shost); 276 shost = to_shost(isci_host);
279 scsi_remove_host(isci_host->shost); 277 sas_remove_host(shost);
280 scsi_host_put(isci_host->shost); 278 scsi_remove_host(shost);
279 scsi_host_put(shost);
281} 280}
282 281
283static int __devinit isci_pci_init(struct pci_dev *pdev) 282static int __devinit isci_pci_init(struct pci_dev *pdev)
@@ -397,38 +396,199 @@ static int isci_setup_interrupts(struct pci_dev *pdev)
397 return err; 396 return err;
398} 397}
399 398
399static void isci_user_parameters_get(struct sci_user_parameters *u)
400{
401 int i;
402
403 for (i = 0; i < SCI_MAX_PHYS; i++) {
404 struct sci_phy_user_params *u_phy = &u->phys[i];
405
406 u_phy->max_speed_generation = phy_gen;
407
408 /* we are not exporting these for now */
409 u_phy->align_insertion_frequency = 0x7f;
410 u_phy->in_connection_align_insertion_frequency = 0xff;
411 u_phy->notify_enable_spin_up_insertion_frequency = 0x33;
412 }
413
414 u->stp_inactivity_timeout = stp_inactive_to;
415 u->ssp_inactivity_timeout = ssp_inactive_to;
416 u->stp_max_occupancy_timeout = stp_max_occ_to;
417 u->ssp_max_occupancy_timeout = ssp_max_occ_to;
418 u->no_outbound_task_timeout = no_outbound_task_to;
419 u->max_concurr_spinup = max_concurr_spinup;
420}
421
422static enum sci_status sci_user_parameters_set(struct isci_host *ihost,
423 struct sci_user_parameters *sci_parms)
424{
425 u16 index;
426
427 /*
428 * Validate the user parameters. If they are not legal, then
429 * return a failure.
430 */
431 for (index = 0; index < SCI_MAX_PHYS; index++) {
432 struct sci_phy_user_params *u;
433
434 u = &sci_parms->phys[index];
435
436 if (!((u->max_speed_generation <= SCIC_SDS_PARM_MAX_SPEED) &&
437 (u->max_speed_generation > SCIC_SDS_PARM_NO_SPEED)))
438 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
439
440 if (u->in_connection_align_insertion_frequency < 3)
441 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
442
443 if ((u->in_connection_align_insertion_frequency < 3) ||
444 (u->align_insertion_frequency == 0) ||
445 (u->notify_enable_spin_up_insertion_frequency == 0))
446 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
447 }
448
449 if ((sci_parms->stp_inactivity_timeout == 0) ||
450 (sci_parms->ssp_inactivity_timeout == 0) ||
451 (sci_parms->stp_max_occupancy_timeout == 0) ||
452 (sci_parms->ssp_max_occupancy_timeout == 0) ||
453 (sci_parms->no_outbound_task_timeout == 0))
454 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
455
456 memcpy(&ihost->user_parameters, sci_parms, sizeof(*sci_parms));
457
458 return SCI_SUCCESS;
459}
460
461static void sci_oem_defaults(struct isci_host *ihost)
462{
463 /* these defaults are overridden by the platform / firmware */
464 struct sci_user_parameters *user = &ihost->user_parameters;
465 struct sci_oem_params *oem = &ihost->oem_parameters;
466 int i;
467
468 /* Default to APC mode. */
469 oem->controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE;
470
471 /* Default to APC mode. */
472 oem->controller.max_concurr_spin_up = 1;
473
474 /* Default to no SSC operation. */
475 oem->controller.do_enable_ssc = false;
476
477 /* Default to short cables on all phys. */
478 oem->controller.cable_selection_mask = 0;
479
480 /* Initialize all of the port parameter information to narrow ports. */
481 for (i = 0; i < SCI_MAX_PORTS; i++)
482 oem->ports[i].phy_mask = 0;
483
484 /* Initialize all of the phy parameter information. */
485 for (i = 0; i < SCI_MAX_PHYS; i++) {
486 /* Default to 3G (i.e. Gen 2). */
487 user->phys[i].max_speed_generation = SCIC_SDS_PARM_GEN2_SPEED;
488
489 /* the frequencies cannot be 0 */
490 user->phys[i].align_insertion_frequency = 0x7f;
491 user->phys[i].in_connection_align_insertion_frequency = 0xff;
492 user->phys[i].notify_enable_spin_up_insertion_frequency = 0x33;
493
494 /* Previous Vitesse based expanders had a arbitration issue that
495 * is worked around by having the upper 32-bits of SAS address
496 * with a value greater then the Vitesse company identifier.
497 * Hence, usage of 0x5FCFFFFF.
498 */
499 oem->phys[i].sas_address.low = 0x1 + ihost->id;
500 oem->phys[i].sas_address.high = 0x5FCFFFFF;
501 }
502
503 user->stp_inactivity_timeout = 5;
504 user->ssp_inactivity_timeout = 5;
505 user->stp_max_occupancy_timeout = 5;
506 user->ssp_max_occupancy_timeout = 20;
507 user->no_outbound_task_timeout = 2;
508}
509
400static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id) 510static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id)
401{ 511{
402 struct isci_host *isci_host; 512 struct isci_orom *orom = to_pci_info(pdev)->orom;
513 struct sci_user_parameters sci_user_params;
514 u8 oem_version = ISCI_ROM_VER_1_0;
515 struct isci_host *ihost;
403 struct Scsi_Host *shost; 516 struct Scsi_Host *shost;
404 int err; 517 int err, i;
405 518
406 isci_host = devm_kzalloc(&pdev->dev, sizeof(*isci_host), GFP_KERNEL); 519 ihost = devm_kzalloc(&pdev->dev, sizeof(*ihost), GFP_KERNEL);
407 if (!isci_host) 520 if (!ihost)
408 return NULL; 521 return NULL;
409 522
410 isci_host->pdev = pdev; 523 ihost->pdev = pdev;
411 isci_host->id = id; 524 ihost->id = id;
525 spin_lock_init(&ihost->scic_lock);
526 init_waitqueue_head(&ihost->eventq);
527 ihost->sas_ha.dev = &ihost->pdev->dev;
528 ihost->sas_ha.lldd_ha = ihost;
529 tasklet_init(&ihost->completion_tasklet,
530 isci_host_completion_routine, (unsigned long)ihost);
531
532 /* validate module parameters */
533 /* TODO: kill struct sci_user_parameters and reference directly */
534 sci_oem_defaults(ihost);
535 isci_user_parameters_get(&sci_user_params);
536 if (sci_user_parameters_set(ihost, &sci_user_params)) {
537 dev_warn(&pdev->dev,
538 "%s: sci_user_parameters_set failed\n", __func__);
539 return NULL;
540 }
541
542 /* sanity check platform (or 'firmware') oem parameters */
543 if (orom) {
544 if (id < 0 || id >= SCI_MAX_CONTROLLERS || id > orom->hdr.num_elements) {
545 dev_warn(&pdev->dev, "parsing firmware oem parameters failed\n");
546 return NULL;
547 }
548 ihost->oem_parameters = orom->ctrl[id];
549 oem_version = orom->hdr.version;
550 }
551
552 /* validate oem parameters (platform, firmware, or built-in defaults) */
553 if (sci_oem_parameters_validate(&ihost->oem_parameters, oem_version)) {
554 dev_warn(&pdev->dev, "oem parameter validation failed\n");
555 return NULL;
556 }
557
558 for (i = 0; i < SCI_MAX_PORTS; i++) {
559 struct isci_port *iport = &ihost->ports[i];
560
561 INIT_LIST_HEAD(&iport->remote_dev_list);
562 iport->isci_host = ihost;
563 }
564
565 for (i = 0; i < SCI_MAX_PHYS; i++)
566 isci_phy_init(&ihost->phys[i], ihost, i);
567
568 for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
569 struct isci_remote_device *idev = &ihost->devices[i];
570
571 INIT_LIST_HEAD(&idev->node);
572 }
412 573
413 shost = scsi_host_alloc(&isci_sht, sizeof(void *)); 574 shost = scsi_host_alloc(&isci_sht, sizeof(void *));
414 if (!shost) 575 if (!shost)
415 return NULL; 576 return NULL;
416 isci_host->shost = shost;
417 577
418 dev_info(&pdev->dev, "%sSCU controller %d: phy 3-0 cables: " 578 dev_info(&pdev->dev, "%sSCU controller %d: phy 3-0 cables: "
419 "{%s, %s, %s, %s}\n", 579 "{%s, %s, %s, %s}\n",
420 (is_cable_select_overridden() ? "* " : ""), isci_host->id, 580 (is_cable_select_overridden() ? "* " : ""), ihost->id,
421 lookup_cable_names(decode_cable_selection(isci_host, 3)), 581 lookup_cable_names(decode_cable_selection(ihost, 3)),
422 lookup_cable_names(decode_cable_selection(isci_host, 2)), 582 lookup_cable_names(decode_cable_selection(ihost, 2)),
423 lookup_cable_names(decode_cable_selection(isci_host, 1)), 583 lookup_cable_names(decode_cable_selection(ihost, 1)),
424 lookup_cable_names(decode_cable_selection(isci_host, 0))); 584 lookup_cable_names(decode_cable_selection(ihost, 0)));
425 585
426 err = isci_host_init(isci_host); 586 err = isci_host_init(ihost);
427 if (err) 587 if (err)
428 goto err_shost; 588 goto err_shost;
429 589
430 SHOST_TO_SAS_HA(shost) = &isci_host->sas_ha; 590 SHOST_TO_SAS_HA(shost) = &ihost->sas_ha;
431 isci_host->sas_ha.core.shost = shost; 591 ihost->sas_ha.core.shost = shost;
432 shost->transportt = isci_transport_template; 592 shost->transportt = isci_transport_template;
433 593
434 shost->max_id = ~0; 594 shost->max_id = ~0;
@@ -439,11 +599,11 @@ static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id)
439 if (err) 599 if (err)
440 goto err_shost; 600 goto err_shost;
441 601
442 err = isci_register_sas_ha(isci_host); 602 err = isci_register_sas_ha(ihost);
443 if (err) 603 if (err)
444 goto err_shost_remove; 604 goto err_shost_remove;
445 605
446 return isci_host; 606 return ihost;
447 607
448 err_shost_remove: 608 err_shost_remove:
449 scsi_remove_host(shost); 609 scsi_remove_host(shost);
@@ -476,7 +636,7 @@ static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_devic
476 if (!orom) 636 if (!orom)
477 orom = isci_request_oprom(pdev); 637 orom = isci_request_oprom(pdev);
478 638
479 for (i = 0; orom && i < ARRAY_SIZE(orom->ctrl); i++) { 639 for (i = 0; orom && i < num_controllers(pdev); i++) {
480 if (sci_oem_parameters_validate(&orom->ctrl[i], 640 if (sci_oem_parameters_validate(&orom->ctrl[i],
481 orom->hdr.version)) { 641 orom->hdr.version)) {
482 dev_warn(&pdev->dev, 642 dev_warn(&pdev->dev,
@@ -525,11 +685,11 @@ static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_devic
525 pci_info->hosts[i] = h; 685 pci_info->hosts[i] = h;
526 686
527 /* turn on DIF support */ 687 /* turn on DIF support */
528 scsi_host_set_prot(h->shost, 688 scsi_host_set_prot(to_shost(h),
529 SHOST_DIF_TYPE1_PROTECTION | 689 SHOST_DIF_TYPE1_PROTECTION |
530 SHOST_DIF_TYPE2_PROTECTION | 690 SHOST_DIF_TYPE2_PROTECTION |
531 SHOST_DIF_TYPE3_PROTECTION); 691 SHOST_DIF_TYPE3_PROTECTION);
532 scsi_host_set_guard(h->shost, SHOST_DIX_GUARD_CRC); 692 scsi_host_set_guard(to_shost(h), SHOST_DIX_GUARD_CRC);
533 } 693 }
534 694
535 err = isci_setup_interrupts(pdev); 695 err = isci_setup_interrupts(pdev);
@@ -537,7 +697,7 @@ static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_devic
537 goto err_host_alloc; 697 goto err_host_alloc;
538 698
539 for_each_isci_host(i, isci_host, pdev) 699 for_each_isci_host(i, isci_host, pdev)
540 scsi_scan_host(isci_host->shost); 700 scsi_scan_host(to_shost(isci_host));
541 701
542 return 0; 702 return 0;
543 703
diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c
index fab3586840b5..18f43d4c30ba 100644
--- a/drivers/scsi/isci/phy.c
+++ b/drivers/scsi/isci/phy.c
@@ -580,7 +580,7 @@ static void sci_phy_start_sas_link_training(struct isci_phy *iphy)
580 580
581 sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SAS_SPEED_EN); 581 sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SAS_SPEED_EN);
582 582
583 iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SAS; 583 iphy->protocol = SAS_PROTOCOL_SSP;
584} 584}
585 585
586static void sci_phy_start_sata_link_training(struct isci_phy *iphy) 586static void sci_phy_start_sata_link_training(struct isci_phy *iphy)
@@ -591,7 +591,7 @@ static void sci_phy_start_sata_link_training(struct isci_phy *iphy)
591 */ 591 */
592 sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_POWER); 592 sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_POWER);
593 593
594 iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SATA; 594 iphy->protocol = SAS_PROTOCOL_SATA;
595} 595}
596 596
597/** 597/**
@@ -668,6 +668,19 @@ static const char *phy_event_name(u32 event_code)
668 phy_to_host(iphy)->id, iphy->phy_index, \ 668 phy_to_host(iphy)->id, iphy->phy_index, \
669 phy_state_name(state), phy_event_name(code), code) 669 phy_state_name(state), phy_event_name(code), code)
670 670
671
672void scu_link_layer_set_txcomsas_timeout(struct isci_phy *iphy, u32 timeout)
673{
674 u32 val;
675
676 /* Extend timeout */
677 val = readl(&iphy->link_layer_registers->transmit_comsas_signal);
678 val &= ~SCU_SAS_LLTXCOMSAS_GEN_VAL(NEGTIME, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_MASK);
679 val |= SCU_SAS_LLTXCOMSAS_GEN_VAL(NEGTIME, timeout);
680
681 writel(val, &iphy->link_layer_registers->transmit_comsas_signal);
682}
683
671enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code) 684enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
672{ 685{
673 enum sci_phy_states state = iphy->sm.current_state_id; 686 enum sci_phy_states state = iphy->sm.current_state_id;
@@ -683,6 +696,13 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
683 sci_phy_start_sata_link_training(iphy); 696 sci_phy_start_sata_link_training(iphy);
684 iphy->is_in_link_training = true; 697 iphy->is_in_link_training = true;
685 break; 698 break;
699 case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT:
700 /* Extend timeout value */
701 scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_EXTENDED);
702
703 /* Start the oob/sn state machine over again */
704 sci_change_state(&iphy->sm, SCI_PHY_STARTING);
705 break;
686 default: 706 default:
687 phy_event_dbg(iphy, state, event_code); 707 phy_event_dbg(iphy, state, event_code);
688 return SCI_FAILURE; 708 return SCI_FAILURE;
@@ -717,9 +737,19 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
717 sci_phy_start_sata_link_training(iphy); 737 sci_phy_start_sata_link_training(iphy);
718 break; 738 break;
719 case SCU_EVENT_LINK_FAILURE: 739 case SCU_EVENT_LINK_FAILURE:
740 /* Change the timeout value to default */
741 scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT);
742
720 /* Link failure change state back to the starting state */ 743 /* Link failure change state back to the starting state */
721 sci_change_state(&iphy->sm, SCI_PHY_STARTING); 744 sci_change_state(&iphy->sm, SCI_PHY_STARTING);
722 break; 745 break;
746 case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT:
747 /* Extend the timeout value */
748 scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_EXTENDED);
749
750 /* Start the oob/sn state machine over again */
751 sci_change_state(&iphy->sm, SCI_PHY_STARTING);
752 break;
723 default: 753 default:
724 phy_event_warn(iphy, state, event_code); 754 phy_event_warn(iphy, state, event_code);
725 return SCI_FAILURE; 755 return SCI_FAILURE;
@@ -740,7 +770,14 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
740 sci_phy_start_sata_link_training(iphy); 770 sci_phy_start_sata_link_training(iphy);
741 break; 771 break;
742 case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT: 772 case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT:
773 /* Extend the timeout value */
774 scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_EXTENDED);
775
776 /* Start the oob/sn state machine over again */
777 sci_change_state(&iphy->sm, SCI_PHY_STARTING);
778 break;
743 case SCU_EVENT_LINK_FAILURE: 779 case SCU_EVENT_LINK_FAILURE:
780 scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT);
744 case SCU_EVENT_HARD_RESET_RECEIVED: 781 case SCU_EVENT_HARD_RESET_RECEIVED:
745 /* Start the oob/sn state machine over again */ 782 /* Start the oob/sn state machine over again */
746 sci_change_state(&iphy->sm, SCI_PHY_STARTING); 783 sci_change_state(&iphy->sm, SCI_PHY_STARTING);
@@ -753,6 +790,9 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
753 case SCI_PHY_SUB_AWAIT_SAS_POWER: 790 case SCI_PHY_SUB_AWAIT_SAS_POWER:
754 switch (scu_get_event_code(event_code)) { 791 switch (scu_get_event_code(event_code)) {
755 case SCU_EVENT_LINK_FAILURE: 792 case SCU_EVENT_LINK_FAILURE:
793 /* Change the timeout value to default */
794 scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT);
795
756 /* Link failure change state back to the starting state */ 796 /* Link failure change state back to the starting state */
757 sci_change_state(&iphy->sm, SCI_PHY_STARTING); 797 sci_change_state(&iphy->sm, SCI_PHY_STARTING);
758 break; 798 break;
@@ -764,6 +804,9 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
764 case SCI_PHY_SUB_AWAIT_SATA_POWER: 804 case SCI_PHY_SUB_AWAIT_SATA_POWER:
765 switch (scu_get_event_code(event_code)) { 805 switch (scu_get_event_code(event_code)) {
766 case SCU_EVENT_LINK_FAILURE: 806 case SCU_EVENT_LINK_FAILURE:
807 /* Change the timeout value to default */
808 scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT);
809
767 /* Link failure change state back to the starting state */ 810 /* Link failure change state back to the starting state */
768 sci_change_state(&iphy->sm, SCI_PHY_STARTING); 811 sci_change_state(&iphy->sm, SCI_PHY_STARTING);
769 break; 812 break;
@@ -788,6 +831,9 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
788 case SCI_PHY_SUB_AWAIT_SATA_PHY_EN: 831 case SCI_PHY_SUB_AWAIT_SATA_PHY_EN:
789 switch (scu_get_event_code(event_code)) { 832 switch (scu_get_event_code(event_code)) {
790 case SCU_EVENT_LINK_FAILURE: 833 case SCU_EVENT_LINK_FAILURE:
834 /* Change the timeout value to default */
835 scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT);
836
791 /* Link failure change state back to the starting state */ 837 /* Link failure change state back to the starting state */
792 sci_change_state(&iphy->sm, SCI_PHY_STARTING); 838 sci_change_state(&iphy->sm, SCI_PHY_STARTING);
793 break; 839 break;
@@ -797,7 +843,7 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
797 */ 843 */
798 break; 844 break;
799 case SCU_EVENT_SATA_PHY_DETECTED: 845 case SCU_EVENT_SATA_PHY_DETECTED:
800 iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SATA; 846 iphy->protocol = SAS_PROTOCOL_SATA;
801 847
802 /* We have received the SATA PHY notification change state */ 848 /* We have received the SATA PHY notification change state */
803 sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_SPEED_EN); 849 sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_SPEED_EN);
@@ -836,6 +882,9 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
836 SCI_PHY_SUB_AWAIT_SIG_FIS_UF); 882 SCI_PHY_SUB_AWAIT_SIG_FIS_UF);
837 break; 883 break;
838 case SCU_EVENT_LINK_FAILURE: 884 case SCU_EVENT_LINK_FAILURE:
885 /* Change the timeout value to default */
886 scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT);
887
839 /* Link failure change state back to the starting state */ 888 /* Link failure change state back to the starting state */
840 sci_change_state(&iphy->sm, SCI_PHY_STARTING); 889 sci_change_state(&iphy->sm, SCI_PHY_STARTING);
841 break; 890 break;
@@ -859,6 +908,9 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
859 break; 908 break;
860 909
861 case SCU_EVENT_LINK_FAILURE: 910 case SCU_EVENT_LINK_FAILURE:
911 /* Change the timeout value to default */
912 scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT);
913
862 /* Link failure change state back to the starting state */ 914 /* Link failure change state back to the starting state */
863 sci_change_state(&iphy->sm, SCI_PHY_STARTING); 915 sci_change_state(&iphy->sm, SCI_PHY_STARTING);
864 break; 916 break;
@@ -871,16 +923,26 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
871 case SCI_PHY_READY: 923 case SCI_PHY_READY:
872 switch (scu_get_event_code(event_code)) { 924 switch (scu_get_event_code(event_code)) {
873 case SCU_EVENT_LINK_FAILURE: 925 case SCU_EVENT_LINK_FAILURE:
926 /* Set default timeout */
927 scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT);
928
874 /* Link failure change state back to the starting state */ 929 /* Link failure change state back to the starting state */
875 sci_change_state(&iphy->sm, SCI_PHY_STARTING); 930 sci_change_state(&iphy->sm, SCI_PHY_STARTING);
876 break; 931 break;
877 case SCU_EVENT_BROADCAST_CHANGE: 932 case SCU_EVENT_BROADCAST_CHANGE:
933 case SCU_EVENT_BROADCAST_SES:
934 case SCU_EVENT_BROADCAST_RESERVED0:
935 case SCU_EVENT_BROADCAST_RESERVED1:
936 case SCU_EVENT_BROADCAST_EXPANDER:
937 case SCU_EVENT_BROADCAST_AEN:
878 /* Broadcast change received. Notify the port. */ 938 /* Broadcast change received. Notify the port. */
879 if (phy_get_non_dummy_port(iphy) != NULL) 939 if (phy_get_non_dummy_port(iphy) != NULL)
880 sci_port_broadcast_change_received(iphy->owning_port, iphy); 940 sci_port_broadcast_change_received(iphy->owning_port, iphy);
881 else 941 else
882 iphy->bcn_received_while_port_unassigned = true; 942 iphy->bcn_received_while_port_unassigned = true;
883 break; 943 break;
944 case SCU_EVENT_BROADCAST_RESERVED3:
945 case SCU_EVENT_BROADCAST_RESERVED4:
884 default: 946 default:
885 phy_event_warn(iphy, state, event_code); 947 phy_event_warn(iphy, state, event_code);
886 return SCI_FAILURE_INVALID_STATE; 948 return SCI_FAILURE_INVALID_STATE;
@@ -1215,7 +1277,7 @@ static void sci_phy_starting_state_enter(struct sci_base_state_machine *sm)
1215 scu_link_layer_start_oob(iphy); 1277 scu_link_layer_start_oob(iphy);
1216 1278
1217 /* We don't know what kind of phy we are going to be just yet */ 1279 /* We don't know what kind of phy we are going to be just yet */
1218 iphy->protocol = SCIC_SDS_PHY_PROTOCOL_UNKNOWN; 1280 iphy->protocol = SAS_PROTOCOL_NONE;
1219 iphy->bcn_received_while_port_unassigned = false; 1281 iphy->bcn_received_while_port_unassigned = false;
1220 1282
1221 if (iphy->sm.previous_state_id == SCI_PHY_READY) 1283 if (iphy->sm.previous_state_id == SCI_PHY_READY)
@@ -1250,7 +1312,7 @@ static void sci_phy_resetting_state_enter(struct sci_base_state_machine *sm)
1250 */ 1312 */
1251 sci_port_deactivate_phy(iphy->owning_port, iphy, false); 1313 sci_port_deactivate_phy(iphy->owning_port, iphy, false);
1252 1314
1253 if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) { 1315 if (iphy->protocol == SAS_PROTOCOL_SSP) {
1254 scu_link_layer_tx_hard_reset(iphy); 1316 scu_link_layer_tx_hard_reset(iphy);
1255 } else { 1317 } else {
1256 /* The SCU does not need to have a discrete reset state so 1318 /* The SCU does not need to have a discrete reset state so
@@ -1316,7 +1378,7 @@ void sci_phy_construct(struct isci_phy *iphy,
1316 iphy->owning_port = iport; 1378 iphy->owning_port = iport;
1317 iphy->phy_index = phy_index; 1379 iphy->phy_index = phy_index;
1318 iphy->bcn_received_while_port_unassigned = false; 1380 iphy->bcn_received_while_port_unassigned = false;
1319 iphy->protocol = SCIC_SDS_PHY_PROTOCOL_UNKNOWN; 1381 iphy->protocol = SAS_PROTOCOL_NONE;
1320 iphy->link_layer_registers = NULL; 1382 iphy->link_layer_registers = NULL;
1321 iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN; 1383 iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN;
1322 1384
@@ -1380,12 +1442,14 @@ int isci_phy_control(struct asd_sas_phy *sas_phy,
1380 switch (func) { 1442 switch (func) {
1381 case PHY_FUNC_DISABLE: 1443 case PHY_FUNC_DISABLE:
1382 spin_lock_irqsave(&ihost->scic_lock, flags); 1444 spin_lock_irqsave(&ihost->scic_lock, flags);
1445 scu_link_layer_start_oob(iphy);
1383 sci_phy_stop(iphy); 1446 sci_phy_stop(iphy);
1384 spin_unlock_irqrestore(&ihost->scic_lock, flags); 1447 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1385 break; 1448 break;
1386 1449
1387 case PHY_FUNC_LINK_RESET: 1450 case PHY_FUNC_LINK_RESET:
1388 spin_lock_irqsave(&ihost->scic_lock, flags); 1451 spin_lock_irqsave(&ihost->scic_lock, flags);
1452 scu_link_layer_start_oob(iphy);
1389 sci_phy_stop(iphy); 1453 sci_phy_stop(iphy);
1390 sci_phy_start(iphy); 1454 sci_phy_start(iphy);
1391 spin_unlock_irqrestore(&ihost->scic_lock, flags); 1455 spin_unlock_irqrestore(&ihost->scic_lock, flags);
diff --git a/drivers/scsi/isci/phy.h b/drivers/scsi/isci/phy.h
index 0e45833ba06d..45fecfa36a98 100644
--- a/drivers/scsi/isci/phy.h
+++ b/drivers/scsi/isci/phy.h
@@ -76,13 +76,6 @@
76 */ 76 */
77#define SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT 250 77#define SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT 250
78 78
79enum sci_phy_protocol {
80 SCIC_SDS_PHY_PROTOCOL_UNKNOWN,
81 SCIC_SDS_PHY_PROTOCOL_SAS,
82 SCIC_SDS_PHY_PROTOCOL_SATA,
83 SCIC_SDS_MAX_PHY_PROTOCOLS
84};
85
86/** 79/**
87 * isci_phy - hba local phy infrastructure 80 * isci_phy - hba local phy infrastructure
88 * @sm: 81 * @sm:
@@ -95,7 +88,7 @@ struct isci_phy {
95 struct sci_base_state_machine sm; 88 struct sci_base_state_machine sm;
96 struct isci_port *owning_port; 89 struct isci_port *owning_port;
97 enum sas_linkrate max_negotiated_speed; 90 enum sas_linkrate max_negotiated_speed;
98 enum sci_phy_protocol protocol; 91 enum sas_protocol protocol;
99 u8 phy_index; 92 u8 phy_index;
100 bool bcn_received_while_port_unassigned; 93 bool bcn_received_while_port_unassigned;
101 bool is_in_link_training; 94 bool is_in_link_training;
diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c
index 5fada73b71ff..2fb85bf75449 100644
--- a/drivers/scsi/isci/port.c
+++ b/drivers/scsi/isci/port.c
@@ -184,7 +184,7 @@ static void isci_port_link_up(struct isci_host *isci_host,
184 184
185 sci_port_get_properties(iport, &properties); 185 sci_port_get_properties(iport, &properties);
186 186
187 if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA) { 187 if (iphy->protocol == SAS_PROTOCOL_SATA) {
188 u64 attached_sas_address; 188 u64 attached_sas_address;
189 189
190 iphy->sas_phy.oob_mode = SATA_OOB_MODE; 190 iphy->sas_phy.oob_mode = SATA_OOB_MODE;
@@ -204,7 +204,7 @@ static void isci_port_link_up(struct isci_host *isci_host,
204 204
205 memcpy(&iphy->sas_phy.attached_sas_addr, 205 memcpy(&iphy->sas_phy.attached_sas_addr,
206 &attached_sas_address, sizeof(attached_sas_address)); 206 &attached_sas_address, sizeof(attached_sas_address));
207 } else if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) { 207 } else if (iphy->protocol == SAS_PROTOCOL_SSP) {
208 iphy->sas_phy.oob_mode = SAS_OOB_MODE; 208 iphy->sas_phy.oob_mode = SAS_OOB_MODE;
209 iphy->sas_phy.frame_rcvd_size = sizeof(struct sas_identify_frame); 209 iphy->sas_phy.frame_rcvd_size = sizeof(struct sas_identify_frame);
210 210
@@ -251,10 +251,10 @@ static void isci_port_link_down(struct isci_host *isci_host,
251 if (isci_phy->sas_phy.port && 251 if (isci_phy->sas_phy.port &&
252 isci_phy->sas_phy.port->num_phys == 1) { 252 isci_phy->sas_phy.port->num_phys == 1) {
253 /* change the state for all devices on this port. The 253 /* change the state for all devices on this port. The
254 * next task sent to this device will be returned as 254 * next task sent to this device will be returned as
255 * SAS_TASK_UNDELIVERED, and the scsi mid layer will 255 * SAS_TASK_UNDELIVERED, and the scsi mid layer will
256 * remove the target 256 * remove the target
257 */ 257 */
258 list_for_each_entry(isci_device, 258 list_for_each_entry(isci_device,
259 &isci_port->remote_dev_list, 259 &isci_port->remote_dev_list,
260 node) { 260 node) {
@@ -517,7 +517,7 @@ void sci_port_get_attached_sas_address(struct isci_port *iport, struct sci_sas_a
517 */ 517 */
518 iphy = sci_port_get_a_connected_phy(iport); 518 iphy = sci_port_get_a_connected_phy(iport);
519 if (iphy) { 519 if (iphy) {
520 if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA) { 520 if (iphy->protocol != SAS_PROTOCOL_SATA) {
521 sci_phy_get_attached_sas_address(iphy, sas); 521 sci_phy_get_attached_sas_address(iphy, sas);
522 } else { 522 } else {
523 sci_phy_get_sas_address(iphy, sas); 523 sci_phy_get_sas_address(iphy, sas);
@@ -624,7 +624,7 @@ static void sci_port_activate_phy(struct isci_port *iport,
624{ 624{
625 struct isci_host *ihost = iport->owning_controller; 625 struct isci_host *ihost = iport->owning_controller;
626 626
627 if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA && (flags & PF_RESUME)) 627 if (iphy->protocol != SAS_PROTOCOL_SATA && (flags & PF_RESUME))
628 sci_phy_resume(iphy); 628 sci_phy_resume(iphy);
629 629
630 iport->active_phy_mask |= 1 << iphy->phy_index; 630 iport->active_phy_mask |= 1 << iphy->phy_index;
@@ -751,12 +751,10 @@ static bool sci_port_is_wide(struct isci_port *iport)
751 * wide ports and direct attached phys. Since there are no wide ported SATA 751 * wide ports and direct attached phys. Since there are no wide ported SATA
752 * devices this could become an invalid port configuration. 752 * devices this could become an invalid port configuration.
753 */ 753 */
754bool sci_port_link_detected( 754bool sci_port_link_detected(struct isci_port *iport, struct isci_phy *iphy)
755 struct isci_port *iport,
756 struct isci_phy *iphy)
757{ 755{
758 if ((iport->logical_port_index != SCIC_SDS_DUMMY_PORT) && 756 if ((iport->logical_port_index != SCIC_SDS_DUMMY_PORT) &&
759 (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA)) { 757 (iphy->protocol == SAS_PROTOCOL_SATA)) {
760 if (sci_port_is_wide(iport)) { 758 if (sci_port_is_wide(iport)) {
761 sci_port_invalid_link_up(iport, iphy); 759 sci_port_invalid_link_up(iport, iphy);
762 return false; 760 return false;
@@ -1201,6 +1199,8 @@ enum sci_status sci_port_add_phy(struct isci_port *iport,
1201 enum sci_status status; 1199 enum sci_status status;
1202 enum sci_port_states state; 1200 enum sci_port_states state;
1203 1201
1202 sci_port_bcn_enable(iport);
1203
1204 state = iport->sm.current_state_id; 1204 state = iport->sm.current_state_id;
1205 switch (state) { 1205 switch (state) {
1206 case SCI_PORT_STOPPED: { 1206 case SCI_PORT_STOPPED: {
@@ -1548,6 +1548,29 @@ static void sci_port_failed_state_enter(struct sci_base_state_machine *sm)
1548 isci_port_hard_reset_complete(iport, SCI_FAILURE_TIMEOUT); 1548 isci_port_hard_reset_complete(iport, SCI_FAILURE_TIMEOUT);
1549} 1549}
1550 1550
1551void sci_port_set_hang_detection_timeout(struct isci_port *iport, u32 timeout)
1552{
1553 int phy_index;
1554 u32 phy_mask = iport->active_phy_mask;
1555
1556 if (timeout)
1557 ++iport->hang_detect_users;
1558 else if (iport->hang_detect_users > 1)
1559 --iport->hang_detect_users;
1560 else
1561 iport->hang_detect_users = 0;
1562
1563 if (timeout || (iport->hang_detect_users == 0)) {
1564 for (phy_index = 0; phy_index < SCI_MAX_PHYS; phy_index++) {
1565 if ((phy_mask >> phy_index) & 1) {
1566 writel(timeout,
1567 &iport->phy_table[phy_index]
1568 ->link_layer_registers
1569 ->link_layer_hang_detection_timeout);
1570 }
1571 }
1572 }
1573}
1551/* --------------------------------------------------------------------------- */ 1574/* --------------------------------------------------------------------------- */
1552 1575
1553static const struct sci_base_state sci_port_state_table[] = { 1576static const struct sci_base_state sci_port_state_table[] = {
@@ -1596,6 +1619,7 @@ void sci_port_construct(struct isci_port *iport, u8 index,
1596 1619
1597 iport->started_request_count = 0; 1620 iport->started_request_count = 0;
1598 iport->assigned_device_count = 0; 1621 iport->assigned_device_count = 0;
1622 iport->hang_detect_users = 0;
1599 1623
1600 iport->reserved_rni = SCU_DUMMY_INDEX; 1624 iport->reserved_rni = SCU_DUMMY_INDEX;
1601 iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG; 1625 iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG;
@@ -1608,13 +1632,6 @@ void sci_port_construct(struct isci_port *iport, u8 index,
1608 iport->phy_table[index] = NULL; 1632 iport->phy_table[index] = NULL;
1609} 1633}
1610 1634
1611void isci_port_init(struct isci_port *iport, struct isci_host *ihost, int index)
1612{
1613 INIT_LIST_HEAD(&iport->remote_dev_list);
1614 INIT_LIST_HEAD(&iport->domain_dev_list);
1615 iport->isci_host = ihost;
1616}
1617
1618void sci_port_broadcast_change_received(struct isci_port *iport, struct isci_phy *iphy) 1635void sci_port_broadcast_change_received(struct isci_port *iport, struct isci_phy *iphy)
1619{ 1636{
1620 struct isci_host *ihost = iport->owning_controller; 1637 struct isci_host *ihost = iport->owning_controller;
@@ -1671,17 +1688,6 @@ int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *ipor
1671 __func__, iport, status); 1688 __func__, iport, status);
1672 1689
1673 } 1690 }
1674
1675 /* If the hard reset for the port has failed, consider this
1676 * the same as link failures on all phys in the port.
1677 */
1678 if (ret != TMF_RESP_FUNC_COMPLETE) {
1679
1680 dev_err(&ihost->pdev->dev,
1681 "%s: iport = %p; hard reset failed "
1682 "(0x%x) - driving explicit link fail for all phys\n",
1683 __func__, iport, iport->hard_reset_status);
1684 }
1685 return ret; 1691 return ret;
1686} 1692}
1687 1693
@@ -1740,7 +1746,7 @@ void isci_port_formed(struct asd_sas_phy *phy)
1740 struct isci_host *ihost = phy->ha->lldd_ha; 1746 struct isci_host *ihost = phy->ha->lldd_ha;
1741 struct isci_phy *iphy = to_iphy(phy); 1747 struct isci_phy *iphy = to_iphy(phy);
1742 struct asd_sas_port *port = phy->port; 1748 struct asd_sas_port *port = phy->port;
1743 struct isci_port *iport; 1749 struct isci_port *iport = NULL;
1744 unsigned long flags; 1750 unsigned long flags;
1745 int i; 1751 int i;
1746 1752
diff --git a/drivers/scsi/isci/port.h b/drivers/scsi/isci/port.h
index 6b56240c2051..861e8f72811b 100644
--- a/drivers/scsi/isci/port.h
+++ b/drivers/scsi/isci/port.h
@@ -97,7 +97,6 @@ enum isci_status {
97struct isci_port { 97struct isci_port {
98 struct isci_host *isci_host; 98 struct isci_host *isci_host;
99 struct list_head remote_dev_list; 99 struct list_head remote_dev_list;
100 struct list_head domain_dev_list;
101 #define IPORT_RESET_PENDING 0 100 #define IPORT_RESET_PENDING 0
102 unsigned long state; 101 unsigned long state;
103 enum sci_status hard_reset_status; 102 enum sci_status hard_reset_status;
@@ -112,6 +111,7 @@ struct isci_port {
112 u16 reserved_tag; 111 u16 reserved_tag;
113 u32 started_request_count; 112 u32 started_request_count;
114 u32 assigned_device_count; 113 u32 assigned_device_count;
114 u32 hang_detect_users;
115 u32 not_ready_reason; 115 u32 not_ready_reason;
116 struct isci_phy *phy_table[SCI_MAX_PHYS]; 116 struct isci_phy *phy_table[SCI_MAX_PHYS];
117 struct isci_host *owning_controller; 117 struct isci_host *owning_controller;
@@ -270,14 +270,13 @@ void sci_port_get_attached_sas_address(
270 struct isci_port *iport, 270 struct isci_port *iport,
271 struct sci_sas_address *sas_address); 271 struct sci_sas_address *sas_address);
272 272
273void sci_port_set_hang_detection_timeout(
274 struct isci_port *isci_port,
275 u32 timeout);
276
273void isci_port_formed(struct asd_sas_phy *); 277void isci_port_formed(struct asd_sas_phy *);
274void isci_port_deformed(struct asd_sas_phy *); 278void isci_port_deformed(struct asd_sas_phy *);
275 279
276void isci_port_init(
277 struct isci_port *port,
278 struct isci_host *host,
279 int index);
280
281int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport, 280int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport,
282 struct isci_phy *iphy); 281 struct isci_phy *iphy);
283int isci_ata_check_ready(struct domain_device *dev); 282int isci_ata_check_ready(struct domain_device *dev);
diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c
index 6d1e9544cbe5..cd962da4a57a 100644
--- a/drivers/scsi/isci/port_config.c
+++ b/drivers/scsi/isci/port_config.c
@@ -57,7 +57,7 @@
57 57
58#define SCIC_SDS_MPC_RECONFIGURATION_TIMEOUT (10) 58#define SCIC_SDS_MPC_RECONFIGURATION_TIMEOUT (10)
59#define SCIC_SDS_APC_RECONFIGURATION_TIMEOUT (10) 59#define SCIC_SDS_APC_RECONFIGURATION_TIMEOUT (10)
60#define SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION (250) 60#define SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION (1000)
61 61
62enum SCIC_SDS_APC_ACTIVITY { 62enum SCIC_SDS_APC_ACTIVITY {
63 SCIC_SDS_APC_SKIP_PHY, 63 SCIC_SDS_APC_SKIP_PHY,
@@ -472,13 +472,9 @@ sci_apc_agent_validate_phy_configuration(struct isci_host *ihost,
472 * down event or a link up event where we can not yet tell to which a phy 472 * down event or a link up event where we can not yet tell to which a phy
473 * belongs. 473 * belongs.
474 */ 474 */
475static void sci_apc_agent_start_timer( 475static void sci_apc_agent_start_timer(struct sci_port_configuration_agent *port_agent,
476 struct sci_port_configuration_agent *port_agent, 476 u32 timeout)
477 u32 timeout)
478{ 477{
479 if (port_agent->timer_pending)
480 sci_del_timer(&port_agent->timer);
481
482 port_agent->timer_pending = true; 478 port_agent->timer_pending = true;
483 sci_mod_timer(&port_agent->timer, timeout); 479 sci_mod_timer(&port_agent->timer, timeout);
484} 480}
@@ -697,6 +693,9 @@ static void apc_agent_timeout(unsigned long data)
697 &ihost->phys[index], false); 693 &ihost->phys[index], false);
698 } 694 }
699 695
696 if (is_controller_start_complete(ihost))
697 sci_controller_transition_to_ready(ihost, SCI_SUCCESS);
698
700done: 699done:
701 spin_unlock_irqrestore(&ihost->scic_lock, flags); 700 spin_unlock_irqrestore(&ihost->scic_lock, flags);
702} 701}
@@ -732,6 +731,11 @@ void sci_port_configuration_agent_construct(
732 } 731 }
733} 732}
734 733
734bool is_port_config_apc(struct isci_host *ihost)
735{
736 return ihost->port_agent.link_up_handler == sci_apc_agent_link_up;
737}
738
735enum sci_status sci_port_configuration_agent_initialize( 739enum sci_status sci_port_configuration_agent_initialize(
736 struct isci_host *ihost, 740 struct isci_host *ihost,
737 struct sci_port_configuration_agent *port_agent) 741 struct sci_port_configuration_agent *port_agent)
diff --git a/drivers/scsi/isci/probe_roms.c b/drivers/scsi/isci/probe_roms.c
index 9b8117b9d756..4d95654c3fd4 100644
--- a/drivers/scsi/isci/probe_roms.c
+++ b/drivers/scsi/isci/probe_roms.c
@@ -112,18 +112,6 @@ struct isci_orom *isci_request_oprom(struct pci_dev *pdev)
112 return rom; 112 return rom;
113} 113}
114 114
115enum sci_status isci_parse_oem_parameters(struct sci_oem_params *oem,
116 struct isci_orom *orom, int scu_index)
117{
118 /* check for valid inputs */
119 if (scu_index < 0 || scu_index >= SCI_MAX_CONTROLLERS ||
120 scu_index > orom->hdr.num_elements || !oem)
121 return -EINVAL;
122
123 *oem = orom->ctrl[scu_index];
124 return 0;
125}
126
127struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmware *fw) 115struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmware *fw)
128{ 116{
129 struct isci_orom *orom = NULL, *data; 117 struct isci_orom *orom = NULL, *data;
diff --git a/drivers/scsi/isci/probe_roms.h b/drivers/scsi/isci/probe_roms.h
index bb0e9d4d97c9..e08b578241f8 100644
--- a/drivers/scsi/isci/probe_roms.h
+++ b/drivers/scsi/isci/probe_roms.h
@@ -156,8 +156,6 @@ int sci_oem_parameters_validate(struct sci_oem_params *oem, u8 version);
156 156
157struct isci_orom; 157struct isci_orom;
158struct isci_orom *isci_request_oprom(struct pci_dev *pdev); 158struct isci_orom *isci_request_oprom(struct pci_dev *pdev);
159enum sci_status isci_parse_oem_parameters(struct sci_oem_params *oem,
160 struct isci_orom *orom, int scu_index);
161struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmware *fw); 159struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmware *fw);
162struct isci_orom *isci_get_efi_var(struct pci_dev *pdev); 160struct isci_orom *isci_get_efi_var(struct pci_dev *pdev);
163 161
diff --git a/drivers/scsi/isci/registers.h b/drivers/scsi/isci/registers.h
index 7eb0ccd45fe6..97f3ceb8d724 100644
--- a/drivers/scsi/isci/registers.h
+++ b/drivers/scsi/isci/registers.h
@@ -1239,6 +1239,14 @@ struct scu_transport_layer_registers {
1239#define SCU_SAS_LLCTL_GEN_BIT(name) \ 1239#define SCU_SAS_LLCTL_GEN_BIT(name) \
1240 SCU_GEN_BIT(SCU_SAS_LINK_LAYER_CONTROL_ ## name) 1240 SCU_GEN_BIT(SCU_SAS_LINK_LAYER_CONTROL_ ## name)
1241 1241
1242#define SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT (0xF0)
1243#define SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_EXTENDED (0x1FF)
1244#define SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_SHIFT (0)
1245#define SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_MASK (0x3FF)
1246
1247#define SCU_SAS_LLTXCOMSAS_GEN_VAL(name, value) \
1248 SCU_GEN_VALUE(SCU_SAS_LINK_LAYER_TXCOMSAS_ ## name, value)
1249
1242 1250
1243/* #define SCU_FRXHECR_DCNT_OFFSET 0x00B0 */ 1251/* #define SCU_FRXHECR_DCNT_OFFSET 0x00B0 */
1244#define SCU_PSZGCR_OFFSET 0x00E4 1252#define SCU_PSZGCR_OFFSET 0x00E4
diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c
index 8f501b0a81d6..c3aa6c5457b9 100644
--- a/drivers/scsi/isci/remote_device.c
+++ b/drivers/scsi/isci/remote_device.c
@@ -72,46 +72,11 @@ const char *dev_state_name(enum sci_remote_device_states state)
72} 72}
73#undef C 73#undef C
74 74
75/** 75enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev,
76 * isci_remote_device_not_ready() - This function is called by the ihost when 76 enum sci_remote_node_suspension_reasons reason)
77 * the remote device is not ready. We mark the isci device as ready (not
78 * "ready_for_io") and signal the waiting proccess.
79 * @isci_host: This parameter specifies the isci host object.
80 * @isci_device: This parameter specifies the remote device
81 *
82 * sci_lock is held on entrance to this function.
83 */
84static void isci_remote_device_not_ready(struct isci_host *ihost,
85 struct isci_remote_device *idev, u32 reason)
86{ 77{
87 struct isci_request *ireq; 78 return sci_remote_node_context_suspend(&idev->rnc, reason,
88 79 SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT);
89 dev_dbg(&ihost->pdev->dev,
90 "%s: isci_device = %p\n", __func__, idev);
91
92 switch (reason) {
93 case SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED:
94 set_bit(IDEV_GONE, &idev->flags);
95 break;
96 case SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED:
97 set_bit(IDEV_IO_NCQERROR, &idev->flags);
98
99 /* Kill all outstanding requests for the device. */
100 list_for_each_entry(ireq, &idev->reqs_in_process, dev_node) {
101
102 dev_dbg(&ihost->pdev->dev,
103 "%s: isci_device = %p request = %p\n",
104 __func__, idev, ireq);
105
106 sci_controller_terminate_request(ihost,
107 idev,
108 ireq);
109 }
110 /* Fall through into the default case... */
111 default:
112 clear_bit(IDEV_IO_READY, &idev->flags);
113 break;
114 }
115} 80}
116 81
117/** 82/**
@@ -133,18 +98,29 @@ static void isci_remote_device_ready(struct isci_host *ihost, struct isci_remote
133 wake_up(&ihost->eventq); 98 wake_up(&ihost->eventq);
134} 99}
135 100
136/* called once the remote node context is ready to be freed. 101static enum sci_status sci_remote_device_terminate_req(
137 * The remote device can now report that its stop operation is complete. none 102 struct isci_host *ihost,
138 */ 103 struct isci_remote_device *idev,
139static void rnc_destruct_done(void *_dev) 104 int check_abort,
105 struct isci_request *ireq)
140{ 106{
141 struct isci_remote_device *idev = _dev; 107 if (!test_bit(IREQ_ACTIVE, &ireq->flags) ||
108 (ireq->target_device != idev) ||
109 (check_abort && !test_bit(IREQ_PENDING_ABORT, &ireq->flags)))
110 return SCI_SUCCESS;
142 111
143 BUG_ON(idev->started_request_count != 0); 112 dev_dbg(&ihost->pdev->dev,
144 sci_change_state(&idev->sm, SCI_DEV_STOPPED); 113 "%s: idev=%p; flags=%lx; req=%p; req target=%p\n",
114 __func__, idev, idev->flags, ireq, ireq->target_device);
115
116 set_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags);
117
118 return sci_controller_terminate_request(ihost, idev, ireq);
145} 119}
146 120
147static enum sci_status sci_remote_device_terminate_requests(struct isci_remote_device *idev) 121static enum sci_status sci_remote_device_terminate_reqs_checkabort(
122 struct isci_remote_device *idev,
123 int chk)
148{ 124{
149 struct isci_host *ihost = idev->owning_port->owning_controller; 125 struct isci_host *ihost = idev->owning_port->owning_controller;
150 enum sci_status status = SCI_SUCCESS; 126 enum sci_status status = SCI_SUCCESS;
@@ -154,18 +130,210 @@ static enum sci_status sci_remote_device_terminate_requests(struct isci_remote_d
154 struct isci_request *ireq = ihost->reqs[i]; 130 struct isci_request *ireq = ihost->reqs[i];
155 enum sci_status s; 131 enum sci_status s;
156 132
157 if (!test_bit(IREQ_ACTIVE, &ireq->flags) || 133 s = sci_remote_device_terminate_req(ihost, idev, chk, ireq);
158 ireq->target_device != idev)
159 continue;
160
161 s = sci_controller_terminate_request(ihost, idev, ireq);
162 if (s != SCI_SUCCESS) 134 if (s != SCI_SUCCESS)
163 status = s; 135 status = s;
164 } 136 }
137 return status;
138}
139
140static bool isci_compare_suspendcount(
141 struct isci_remote_device *idev,
142 u32 localcount)
143{
144 smp_rmb();
145
146 /* Check for a change in the suspend count, or the RNC
147 * being destroyed.
148 */
149 return (localcount != idev->rnc.suspend_count)
150 || sci_remote_node_context_is_being_destroyed(&idev->rnc);
151}
152
153static bool isci_check_reqterm(
154 struct isci_host *ihost,
155 struct isci_remote_device *idev,
156 struct isci_request *ireq,
157 u32 localcount)
158{
159 unsigned long flags;
160 bool res;
165 161
162 spin_lock_irqsave(&ihost->scic_lock, flags);
163 res = isci_compare_suspendcount(idev, localcount)
164 && !test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags);
165 spin_unlock_irqrestore(&ihost->scic_lock, flags);
166
167 return res;
168}
169
170static bool isci_check_devempty(
171 struct isci_host *ihost,
172 struct isci_remote_device *idev,
173 u32 localcount)
174{
175 unsigned long flags;
176 bool res;
177
178 spin_lock_irqsave(&ihost->scic_lock, flags);
179 res = isci_compare_suspendcount(idev, localcount)
180 && idev->started_request_count == 0;
181 spin_unlock_irqrestore(&ihost->scic_lock, flags);
182
183 return res;
184}
185
186enum sci_status isci_remote_device_terminate_requests(
187 struct isci_host *ihost,
188 struct isci_remote_device *idev,
189 struct isci_request *ireq)
190{
191 enum sci_status status = SCI_SUCCESS;
192 unsigned long flags;
193 u32 rnc_suspend_count;
194
195 spin_lock_irqsave(&ihost->scic_lock, flags);
196
197 if (isci_get_device(idev) == NULL) {
198 dev_dbg(&ihost->pdev->dev, "%s: failed isci_get_device(idev=%p)\n",
199 __func__, idev);
200 spin_unlock_irqrestore(&ihost->scic_lock, flags);
201 status = SCI_FAILURE;
202 } else {
203 /* If already suspended, don't wait for another suspension. */
204 smp_rmb();
205 rnc_suspend_count
206 = sci_remote_node_context_is_suspended(&idev->rnc)
207 ? 0 : idev->rnc.suspend_count;
208
209 dev_dbg(&ihost->pdev->dev,
210 "%s: idev=%p, ireq=%p; started_request_count=%d, "
211 "rnc_suspend_count=%d, rnc.suspend_count=%d"
212 "about to wait\n",
213 __func__, idev, ireq, idev->started_request_count,
214 rnc_suspend_count, idev->rnc.suspend_count);
215
216 #define MAX_SUSPEND_MSECS 10000
217 if (ireq) {
218 /* Terminate a specific TC. */
219 set_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags);
220 sci_remote_device_terminate_req(ihost, idev, 0, ireq);
221 spin_unlock_irqrestore(&ihost->scic_lock, flags);
222 if (!wait_event_timeout(ihost->eventq,
223 isci_check_reqterm(ihost, idev, ireq,
224 rnc_suspend_count),
225 msecs_to_jiffies(MAX_SUSPEND_MSECS))) {
226
227 dev_warn(&ihost->pdev->dev, "%s host%d timeout single\n",
228 __func__, ihost->id);
229 dev_dbg(&ihost->pdev->dev,
230 "%s: ******* Timeout waiting for "
231 "suspend; idev=%p, current state %s; "
232 "started_request_count=%d, flags=%lx\n\t"
233 "rnc_suspend_count=%d, rnc.suspend_count=%d "
234 "RNC: current state %s, current "
235 "suspend_type %x dest state %d;\n"
236 "ireq=%p, ireq->flags = %lx\n",
237 __func__, idev,
238 dev_state_name(idev->sm.current_state_id),
239 idev->started_request_count, idev->flags,
240 rnc_suspend_count, idev->rnc.suspend_count,
241 rnc_state_name(idev->rnc.sm.current_state_id),
242 idev->rnc.suspend_type,
243 idev->rnc.destination_state,
244 ireq, ireq->flags);
245 }
246 spin_lock_irqsave(&ihost->scic_lock, flags);
247 clear_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags);
248 if (!test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags))
249 isci_free_tag(ihost, ireq->io_tag);
250 spin_unlock_irqrestore(&ihost->scic_lock, flags);
251 } else {
252 /* Terminate all TCs. */
253 sci_remote_device_terminate_requests(idev);
254 spin_unlock_irqrestore(&ihost->scic_lock, flags);
255 if (!wait_event_timeout(ihost->eventq,
256 isci_check_devempty(ihost, idev,
257 rnc_suspend_count),
258 msecs_to_jiffies(MAX_SUSPEND_MSECS))) {
259
260 dev_warn(&ihost->pdev->dev, "%s host%d timeout all\n",
261 __func__, ihost->id);
262 dev_dbg(&ihost->pdev->dev,
263 "%s: ******* Timeout waiting for "
264 "suspend; idev=%p, current state %s; "
265 "started_request_count=%d, flags=%lx\n\t"
266 "rnc_suspend_count=%d, "
267 "RNC: current state %s, "
268 "rnc.suspend_count=%d, current "
269 "suspend_type %x dest state %d\n",
270 __func__, idev,
271 dev_state_name(idev->sm.current_state_id),
272 idev->started_request_count, idev->flags,
273 rnc_suspend_count,
274 rnc_state_name(idev->rnc.sm.current_state_id),
275 idev->rnc.suspend_count,
276 idev->rnc.suspend_type,
277 idev->rnc.destination_state);
278 }
279 }
280 dev_dbg(&ihost->pdev->dev, "%s: idev=%p, wait done\n",
281 __func__, idev);
282 isci_put_device(idev);
283 }
166 return status; 284 return status;
167} 285}
168 286
287/**
288* isci_remote_device_not_ready() - This function is called by the ihost when
289* the remote device is not ready. We mark the isci device as ready (not
290* "ready_for_io") and signal the waiting proccess.
291* @isci_host: This parameter specifies the isci host object.
292* @isci_device: This parameter specifies the remote device
293*
294* sci_lock is held on entrance to this function.
295*/
296static void isci_remote_device_not_ready(struct isci_host *ihost,
297 struct isci_remote_device *idev,
298 u32 reason)
299{
300 dev_dbg(&ihost->pdev->dev,
301 "%s: isci_device = %p; reason = %d\n", __func__, idev, reason);
302
303 switch (reason) {
304 case SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED:
305 set_bit(IDEV_IO_NCQERROR, &idev->flags);
306
307 /* Suspend the remote device so the I/O can be terminated. */
308 sci_remote_device_suspend(idev, SCI_SW_SUSPEND_NORMAL);
309
310 /* Kill all outstanding requests for the device. */
311 sci_remote_device_terminate_requests(idev);
312
313 /* Fall through into the default case... */
314 default:
315 clear_bit(IDEV_IO_READY, &idev->flags);
316 break;
317 }
318}
319
320/* called once the remote node context is ready to be freed.
321 * The remote device can now report that its stop operation is complete. none
322 */
323static void rnc_destruct_done(void *_dev)
324{
325 struct isci_remote_device *idev = _dev;
326
327 BUG_ON(idev->started_request_count != 0);
328 sci_change_state(&idev->sm, SCI_DEV_STOPPED);
329}
330
331enum sci_status sci_remote_device_terminate_requests(
332 struct isci_remote_device *idev)
333{
334 return sci_remote_device_terminate_reqs_checkabort(idev, 0);
335}
336
169enum sci_status sci_remote_device_stop(struct isci_remote_device *idev, 337enum sci_status sci_remote_device_stop(struct isci_remote_device *idev,
170 u32 timeout) 338 u32 timeout)
171{ 339{
@@ -201,13 +369,16 @@ enum sci_status sci_remote_device_stop(struct isci_remote_device *idev,
201 case SCI_SMP_DEV_IDLE: 369 case SCI_SMP_DEV_IDLE:
202 case SCI_SMP_DEV_CMD: 370 case SCI_SMP_DEV_CMD:
203 sci_change_state(sm, SCI_DEV_STOPPING); 371 sci_change_state(sm, SCI_DEV_STOPPING);
204 if (idev->started_request_count == 0) { 372 if (idev->started_request_count == 0)
205 sci_remote_node_context_destruct(&idev->rnc, 373 sci_remote_node_context_destruct(&idev->rnc,
206 rnc_destruct_done, idev); 374 rnc_destruct_done,
207 return SCI_SUCCESS; 375 idev);
208 } else 376 else {
209 return sci_remote_device_terminate_requests(idev); 377 sci_remote_device_suspend(
210 break; 378 idev, SCI_SW_SUSPEND_LINKHANG_DETECT);
379 sci_remote_device_terminate_requests(idev);
380 }
381 return SCI_SUCCESS;
211 case SCI_DEV_STOPPING: 382 case SCI_DEV_STOPPING:
212 /* All requests should have been terminated, but if there is an 383 /* All requests should have been terminated, but if there is an
213 * attempt to stop a device already in the stopping state, then 384 * attempt to stop a device already in the stopping state, then
@@ -265,22 +436,6 @@ enum sci_status sci_remote_device_reset_complete(struct isci_remote_device *idev
265 return SCI_SUCCESS; 436 return SCI_SUCCESS;
266} 437}
267 438
268enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev,
269 u32 suspend_type)
270{
271 struct sci_base_state_machine *sm = &idev->sm;
272 enum sci_remote_device_states state = sm->current_state_id;
273
274 if (state != SCI_STP_DEV_CMD) {
275 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
276 __func__, dev_state_name(state));
277 return SCI_FAILURE_INVALID_STATE;
278 }
279
280 return sci_remote_node_context_suspend(&idev->rnc,
281 suspend_type, NULL, NULL);
282}
283
284enum sci_status sci_remote_device_frame_handler(struct isci_remote_device *idev, 439enum sci_status sci_remote_device_frame_handler(struct isci_remote_device *idev,
285 u32 frame_index) 440 u32 frame_index)
286{ 441{
@@ -412,9 +567,9 @@ static void atapi_remote_device_resume_done(void *_dev)
412enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev, 567enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev,
413 u32 event_code) 568 u32 event_code)
414{ 569{
570 enum sci_status status;
415 struct sci_base_state_machine *sm = &idev->sm; 571 struct sci_base_state_machine *sm = &idev->sm;
416 enum sci_remote_device_states state = sm->current_state_id; 572 enum sci_remote_device_states state = sm->current_state_id;
417 enum sci_status status;
418 573
419 switch (scu_get_event_type(event_code)) { 574 switch (scu_get_event_type(event_code)) {
420 case SCU_EVENT_TYPE_RNC_OPS_MISC: 575 case SCU_EVENT_TYPE_RNC_OPS_MISC:
@@ -427,9 +582,7 @@ enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev,
427 status = SCI_SUCCESS; 582 status = SCI_SUCCESS;
428 583
429 /* Suspend the associated RNC */ 584 /* Suspend the associated RNC */
430 sci_remote_node_context_suspend(&idev->rnc, 585 sci_remote_device_suspend(idev, SCI_SW_SUSPEND_NORMAL);
431 SCI_SOFTWARE_SUSPENSION,
432 NULL, NULL);
433 586
434 dev_dbg(scirdev_to_dev(idev), 587 dev_dbg(scirdev_to_dev(idev),
435 "%s: device: %p event code: %x: %s\n", 588 "%s: device: %p event code: %x: %s\n",
@@ -455,6 +608,10 @@ enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev,
455 if (status != SCI_SUCCESS) 608 if (status != SCI_SUCCESS)
456 return status; 609 return status;
457 610
611 /* Decode device-specific states that may require an RNC resume during
612 * normal operation. When the abort path is active, these resumes are
613 * managed when the abort path exits.
614 */
458 if (state == SCI_STP_DEV_ATAPI_ERROR) { 615 if (state == SCI_STP_DEV_ATAPI_ERROR) {
459 /* For ATAPI error state resume the RNC right away. */ 616 /* For ATAPI error state resume the RNC right away. */
460 if (scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX || 617 if (scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX ||
@@ -743,10 +900,6 @@ enum sci_status sci_remote_device_start_task(struct isci_host *ihost,
743 if (status != SCI_SUCCESS) 900 if (status != SCI_SUCCESS)
744 return status; 901 return status;
745 902
746 status = sci_remote_node_context_start_task(&idev->rnc, ireq);
747 if (status != SCI_SUCCESS)
748 goto out;
749
750 status = sci_request_start(ireq); 903 status = sci_request_start(ireq);
751 if (status != SCI_SUCCESS) 904 if (status != SCI_SUCCESS)
752 goto out; 905 goto out;
@@ -765,11 +918,11 @@ enum sci_status sci_remote_device_start_task(struct isci_host *ihost,
765 * the correct action when the remote node context is suspended 918 * the correct action when the remote node context is suspended
766 * and later resumed. 919 * and later resumed.
767 */ 920 */
768 sci_remote_node_context_suspend(&idev->rnc, 921 sci_remote_device_suspend(idev,
769 SCI_SOFTWARE_SUSPENSION, NULL, NULL); 922 SCI_SW_SUSPEND_LINKHANG_DETECT);
770 sci_remote_node_context_resume(&idev->rnc, 923
771 sci_remote_device_continue_request, 924 status = sci_remote_node_context_start_task(&idev->rnc, ireq,
772 idev); 925 sci_remote_device_continue_request, idev);
773 926
774 out: 927 out:
775 sci_remote_device_start_request(idev, ireq, status); 928 sci_remote_device_start_request(idev, ireq, status);
@@ -783,7 +936,9 @@ enum sci_status sci_remote_device_start_task(struct isci_host *ihost,
783 if (status != SCI_SUCCESS) 936 if (status != SCI_SUCCESS)
784 return status; 937 return status;
785 938
786 status = sci_remote_node_context_start_task(&idev->rnc, ireq); 939 /* Resume the RNC as needed: */
940 status = sci_remote_node_context_start_task(&idev->rnc, ireq,
941 NULL, NULL);
787 if (status != SCI_SUCCESS) 942 if (status != SCI_SUCCESS)
788 break; 943 break;
789 944
@@ -892,7 +1047,7 @@ static void isci_remote_device_deconstruct(struct isci_host *ihost, struct isci_
892 * here should go through isci_remote_device_nuke_requests. 1047 * here should go through isci_remote_device_nuke_requests.
893 * If we hit this condition, we will need a way to complete 1048 * If we hit this condition, we will need a way to complete
894 * io requests in process */ 1049 * io requests in process */
895 BUG_ON(!list_empty(&idev->reqs_in_process)); 1050 BUG_ON(idev->started_request_count > 0);
896 1051
897 sci_remote_device_destruct(idev); 1052 sci_remote_device_destruct(idev);
898 list_del_init(&idev->node); 1053 list_del_init(&idev->node);
@@ -954,14 +1109,21 @@ static void sci_remote_device_ready_state_exit(struct sci_base_state_machine *sm
954static void sci_remote_device_resetting_state_enter(struct sci_base_state_machine *sm) 1109static void sci_remote_device_resetting_state_enter(struct sci_base_state_machine *sm)
955{ 1110{
956 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); 1111 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1112 struct isci_host *ihost = idev->owning_port->owning_controller;
957 1113
958 sci_remote_node_context_suspend( 1114 dev_dbg(&ihost->pdev->dev,
959 &idev->rnc, SCI_SOFTWARE_SUSPENSION, NULL, NULL); 1115 "%s: isci_device = %p\n", __func__, idev);
1116
1117 sci_remote_device_suspend(idev, SCI_SW_SUSPEND_LINKHANG_DETECT);
960} 1118}
961 1119
962static void sci_remote_device_resetting_state_exit(struct sci_base_state_machine *sm) 1120static void sci_remote_device_resetting_state_exit(struct sci_base_state_machine *sm)
963{ 1121{
964 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); 1122 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1123 struct isci_host *ihost = idev->owning_port->owning_controller;
1124
1125 dev_dbg(&ihost->pdev->dev,
1126 "%s: isci_device = %p\n", __func__, idev);
965 1127
966 sci_remote_node_context_resume(&idev->rnc, NULL, NULL); 1128 sci_remote_node_context_resume(&idev->rnc, NULL, NULL);
967} 1129}
@@ -1113,33 +1275,20 @@ static enum sci_status sci_remote_device_da_construct(struct isci_port *iport,
1113{ 1275{
1114 enum sci_status status; 1276 enum sci_status status;
1115 struct sci_port_properties properties; 1277 struct sci_port_properties properties;
1116 struct domain_device *dev = idev->domain_dev;
1117 1278
1118 sci_remote_device_construct(iport, idev); 1279 sci_remote_device_construct(iport, idev);
1119 1280
1120 /*
1121 * This information is request to determine how many remote node context
1122 * entries will be needed to store the remote node.
1123 */
1124 idev->is_direct_attached = true;
1125
1126 sci_port_get_properties(iport, &properties); 1281 sci_port_get_properties(iport, &properties);
1127 /* Get accurate port width from port's phy mask for a DA device. */ 1282 /* Get accurate port width from port's phy mask for a DA device. */
1128 idev->device_port_width = hweight32(properties.phy_mask); 1283 idev->device_port_width = hweight32(properties.phy_mask);
1129 1284
1130 status = sci_controller_allocate_remote_node_context(iport->owning_controller, 1285 status = sci_controller_allocate_remote_node_context(iport->owning_controller,
1131 idev, 1286 idev,
1132 &idev->rnc.remote_node_index); 1287 &idev->rnc.remote_node_index);
1133 1288
1134 if (status != SCI_SUCCESS) 1289 if (status != SCI_SUCCESS)
1135 return status; 1290 return status;
1136 1291
1137 if (dev->dev_type == SAS_END_DEV || dev->dev_type == SATA_DEV ||
1138 (dev->tproto & SAS_PROTOCOL_STP) || dev_is_expander(dev))
1139 /* pass */;
1140 else
1141 return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
1142
1143 idev->connection_rate = sci_port_get_max_allowed_speed(iport); 1292 idev->connection_rate = sci_port_get_max_allowed_speed(iport);
1144 1293
1145 return SCI_SUCCESS; 1294 return SCI_SUCCESS;
@@ -1171,19 +1320,13 @@ static enum sci_status sci_remote_device_ea_construct(struct isci_port *iport,
1171 if (status != SCI_SUCCESS) 1320 if (status != SCI_SUCCESS)
1172 return status; 1321 return status;
1173 1322
1174 if (dev->dev_type == SAS_END_DEV || dev->dev_type == SATA_DEV || 1323 /* For SAS-2 the physical link rate is actually a logical link
1175 (dev->tproto & SAS_PROTOCOL_STP) || dev_is_expander(dev))
1176 /* pass */;
1177 else
1178 return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
1179
1180 /*
1181 * For SAS-2 the physical link rate is actually a logical link
1182 * rate that incorporates multiplexing. The SCU doesn't 1324 * rate that incorporates multiplexing. The SCU doesn't
1183 * incorporate multiplexing and for the purposes of the 1325 * incorporate multiplexing and for the purposes of the
1184 * connection the logical link rate is that same as the 1326 * connection the logical link rate is that same as the
1185 * physical. Furthermore, the SAS-2 and SAS-1.1 fields overlay 1327 * physical. Furthermore, the SAS-2 and SAS-1.1 fields overlay
1186 * one another, so this code works for both situations. */ 1328 * one another, so this code works for both situations.
1329 */
1187 idev->connection_rate = min_t(u16, sci_port_get_max_allowed_speed(iport), 1330 idev->connection_rate = min_t(u16, sci_port_get_max_allowed_speed(iport),
1188 dev->linkrate); 1331 dev->linkrate);
1189 1332
@@ -1193,6 +1336,105 @@ static enum sci_status sci_remote_device_ea_construct(struct isci_port *iport,
1193 return SCI_SUCCESS; 1336 return SCI_SUCCESS;
1194} 1337}
1195 1338
1339enum sci_status sci_remote_device_resume(
1340 struct isci_remote_device *idev,
1341 scics_sds_remote_node_context_callback cb_fn,
1342 void *cb_p)
1343{
1344 enum sci_status status;
1345
1346 status = sci_remote_node_context_resume(&idev->rnc, cb_fn, cb_p);
1347 if (status != SCI_SUCCESS)
1348 dev_dbg(scirdev_to_dev(idev), "%s: failed to resume: %d\n",
1349 __func__, status);
1350 return status;
1351}
1352
1353static void isci_remote_device_resume_from_abort_complete(void *cbparam)
1354{
1355 struct isci_remote_device *idev = cbparam;
1356 struct isci_host *ihost = idev->owning_port->owning_controller;
1357 scics_sds_remote_node_context_callback abort_resume_cb =
1358 idev->abort_resume_cb;
1359
1360 dev_dbg(scirdev_to_dev(idev), "%s: passing-along resume: %p\n",
1361 __func__, abort_resume_cb);
1362
1363 if (abort_resume_cb != NULL) {
1364 idev->abort_resume_cb = NULL;
1365 abort_resume_cb(idev->abort_resume_cbparam);
1366 }
1367 clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags);
1368 wake_up(&ihost->eventq);
1369}
1370
1371static bool isci_remote_device_test_resume_done(
1372 struct isci_host *ihost,
1373 struct isci_remote_device *idev)
1374{
1375 unsigned long flags;
1376 bool done;
1377
1378 spin_lock_irqsave(&ihost->scic_lock, flags);
1379 done = !test_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags)
1380 || test_bit(IDEV_STOP_PENDING, &idev->flags)
1381 || sci_remote_node_context_is_being_destroyed(&idev->rnc);
1382 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1383
1384 return done;
1385}
1386
1387void isci_remote_device_wait_for_resume_from_abort(
1388 struct isci_host *ihost,
1389 struct isci_remote_device *idev)
1390{
1391 dev_dbg(&ihost->pdev->dev, "%s: starting resume wait: %p\n",
1392 __func__, idev);
1393
1394 #define MAX_RESUME_MSECS 10000
1395 if (!wait_event_timeout(ihost->eventq,
1396 isci_remote_device_test_resume_done(ihost, idev),
1397 msecs_to_jiffies(MAX_RESUME_MSECS))) {
1398
1399 dev_warn(&ihost->pdev->dev, "%s: #### Timeout waiting for "
1400 "resume: %p\n", __func__, idev);
1401 }
1402 clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags);
1403
1404 dev_dbg(&ihost->pdev->dev, "%s: resume wait done: %p\n",
1405 __func__, idev);
1406}
1407
1408enum sci_status isci_remote_device_resume_from_abort(
1409 struct isci_host *ihost,
1410 struct isci_remote_device *idev)
1411{
1412 unsigned long flags;
1413 enum sci_status status = SCI_SUCCESS;
1414 int destroyed;
1415
1416 spin_lock_irqsave(&ihost->scic_lock, flags);
1417 /* Preserve any current resume callbacks, for instance from other
1418 * resumptions.
1419 */
1420 idev->abort_resume_cb = idev->rnc.user_callback;
1421 idev->abort_resume_cbparam = idev->rnc.user_cookie;
1422 set_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags);
1423 clear_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags);
1424 destroyed = sci_remote_node_context_is_being_destroyed(&idev->rnc);
1425 if (!destroyed)
1426 status = sci_remote_device_resume(
1427 idev, isci_remote_device_resume_from_abort_complete,
1428 idev);
1429 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1430 if (!destroyed && (status == SCI_SUCCESS))
1431 isci_remote_device_wait_for_resume_from_abort(ihost, idev);
1432 else
1433 clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags);
1434
1435 return status;
1436}
1437
1196/** 1438/**
1197 * sci_remote_device_start() - This method will start the supplied remote 1439 * sci_remote_device_start() - This method will start the supplied remote
1198 * device. This method enables normal IO requests to flow through to the 1440 * device. This method enables normal IO requests to flow through to the
@@ -1207,7 +1449,7 @@ static enum sci_status sci_remote_device_ea_construct(struct isci_port *iport,
1207 * the device when there have been no phys added to it. 1449 * the device when there have been no phys added to it.
1208 */ 1450 */
1209static enum sci_status sci_remote_device_start(struct isci_remote_device *idev, 1451static enum sci_status sci_remote_device_start(struct isci_remote_device *idev,
1210 u32 timeout) 1452 u32 timeout)
1211{ 1453{
1212 struct sci_base_state_machine *sm = &idev->sm; 1454 struct sci_base_state_machine *sm = &idev->sm;
1213 enum sci_remote_device_states state = sm->current_state_id; 1455 enum sci_remote_device_states state = sm->current_state_id;
@@ -1219,9 +1461,8 @@ static enum sci_status sci_remote_device_start(struct isci_remote_device *idev,
1219 return SCI_FAILURE_INVALID_STATE; 1461 return SCI_FAILURE_INVALID_STATE;
1220 } 1462 }
1221 1463
1222 status = sci_remote_node_context_resume(&idev->rnc, 1464 status = sci_remote_device_resume(idev, remote_device_resume_done,
1223 remote_device_resume_done, 1465 idev);
1224 idev);
1225 if (status != SCI_SUCCESS) 1466 if (status != SCI_SUCCESS)
1226 return status; 1467 return status;
1227 1468
@@ -1259,20 +1500,6 @@ static enum sci_status isci_remote_device_construct(struct isci_port *iport,
1259 return status; 1500 return status;
1260} 1501}
1261 1502
1262void isci_remote_device_nuke_requests(struct isci_host *ihost, struct isci_remote_device *idev)
1263{
1264 DECLARE_COMPLETION_ONSTACK(aborted_task_completion);
1265
1266 dev_dbg(&ihost->pdev->dev,
1267 "%s: idev = %p\n", __func__, idev);
1268
1269 /* Cleanup all requests pending for this device. */
1270 isci_terminate_pending_requests(ihost, idev);
1271
1272 dev_dbg(&ihost->pdev->dev,
1273 "%s: idev = %p, done\n", __func__, idev);
1274}
1275
1276/** 1503/**
1277 * This function builds the isci_remote_device when a libsas dev_found message 1504 * This function builds the isci_remote_device when a libsas dev_found message
1278 * is received. 1505 * is received.
@@ -1297,10 +1524,6 @@ isci_remote_device_alloc(struct isci_host *ihost, struct isci_port *iport)
1297 dev_warn(&ihost->pdev->dev, "%s: failed\n", __func__); 1524 dev_warn(&ihost->pdev->dev, "%s: failed\n", __func__);
1298 return NULL; 1525 return NULL;
1299 } 1526 }
1300
1301 if (WARN_ONCE(!list_empty(&idev->reqs_in_process), "found requests in process\n"))
1302 return NULL;
1303
1304 if (WARN_ONCE(!list_empty(&idev->node), "found non-idle remote device\n")) 1527 if (WARN_ONCE(!list_empty(&idev->node), "found non-idle remote device\n"))
1305 return NULL; 1528 return NULL;
1306 1529
@@ -1342,14 +1565,8 @@ enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_rem
1342 spin_lock_irqsave(&ihost->scic_lock, flags); 1565 spin_lock_irqsave(&ihost->scic_lock, flags);
1343 idev->domain_dev->lldd_dev = NULL; /* disable new lookups */ 1566 idev->domain_dev->lldd_dev = NULL; /* disable new lookups */
1344 set_bit(IDEV_GONE, &idev->flags); 1567 set_bit(IDEV_GONE, &idev->flags);
1345 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1346
1347 /* Kill all outstanding requests. */
1348 isci_remote_device_nuke_requests(ihost, idev);
1349 1568
1350 set_bit(IDEV_STOP_PENDING, &idev->flags); 1569 set_bit(IDEV_STOP_PENDING, &idev->flags);
1351
1352 spin_lock_irqsave(&ihost->scic_lock, flags);
1353 status = sci_remote_device_stop(idev, 50); 1570 status = sci_remote_device_stop(idev, 50);
1354 spin_unlock_irqrestore(&ihost->scic_lock, flags); 1571 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1355 1572
@@ -1359,6 +1576,9 @@ enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_rem
1359 else 1576 else
1360 wait_for_device_stop(ihost, idev); 1577 wait_for_device_stop(ihost, idev);
1361 1578
1579 dev_dbg(&ihost->pdev->dev,
1580 "%s: isci_device = %p, waiting done.\n", __func__, idev);
1581
1362 return status; 1582 return status;
1363} 1583}
1364 1584
@@ -1434,3 +1654,73 @@ int isci_remote_device_found(struct domain_device *dev)
1434 1654
1435 return status == SCI_SUCCESS ? 0 : -ENODEV; 1655 return status == SCI_SUCCESS ? 0 : -ENODEV;
1436} 1656}
1657
1658enum sci_status isci_remote_device_suspend_terminate(
1659 struct isci_host *ihost,
1660 struct isci_remote_device *idev,
1661 struct isci_request *ireq)
1662{
1663 unsigned long flags;
1664 enum sci_status status;
1665
1666 /* Put the device into suspension. */
1667 spin_lock_irqsave(&ihost->scic_lock, flags);
1668 set_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags);
1669 sci_remote_device_suspend(idev, SCI_SW_SUSPEND_LINKHANG_DETECT);
1670 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1671
1672 /* Terminate and wait for the completions. */
1673 status = isci_remote_device_terminate_requests(ihost, idev, ireq);
1674 if (status != SCI_SUCCESS)
1675 dev_dbg(&ihost->pdev->dev,
1676 "%s: isci_remote_device_terminate_requests(%p) "
1677 "returned %d!\n",
1678 __func__, idev, status);
1679
1680 /* NOTE: RNC resumption is left to the caller! */
1681 return status;
1682}
1683
1684int isci_remote_device_is_safe_to_abort(
1685 struct isci_remote_device *idev)
1686{
1687 return sci_remote_node_context_is_safe_to_abort(&idev->rnc);
1688}
1689
1690enum sci_status sci_remote_device_abort_requests_pending_abort(
1691 struct isci_remote_device *idev)
1692{
1693 return sci_remote_device_terminate_reqs_checkabort(idev, 1);
1694}
1695
1696enum sci_status isci_remote_device_reset_complete(
1697 struct isci_host *ihost,
1698 struct isci_remote_device *idev)
1699{
1700 unsigned long flags;
1701 enum sci_status status;
1702
1703 spin_lock_irqsave(&ihost->scic_lock, flags);
1704 status = sci_remote_device_reset_complete(idev);
1705 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1706
1707 return status;
1708}
1709
1710void isci_dev_set_hang_detection_timeout(
1711 struct isci_remote_device *idev,
1712 u32 timeout)
1713{
1714 if (dev_is_sata(idev->domain_dev)) {
1715 if (timeout) {
1716 if (test_and_set_bit(IDEV_RNC_LLHANG_ENABLED,
1717 &idev->flags))
1718 return; /* Already enabled. */
1719 } else if (!test_and_clear_bit(IDEV_RNC_LLHANG_ENABLED,
1720 &idev->flags))
1721 return; /* Not enabled. */
1722
1723 sci_port_set_hang_detection_timeout(idev->owning_port,
1724 timeout);
1725 }
1726}
diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h
index 58637ee08f55..7674caae1d88 100644
--- a/drivers/scsi/isci/remote_device.h
+++ b/drivers/scsi/isci/remote_device.h
@@ -85,27 +85,38 @@ struct isci_remote_device {
85 #define IDEV_GONE 3 85 #define IDEV_GONE 3
86 #define IDEV_IO_READY 4 86 #define IDEV_IO_READY 4
87 #define IDEV_IO_NCQERROR 5 87 #define IDEV_IO_NCQERROR 5
88 #define IDEV_RNC_LLHANG_ENABLED 6
89 #define IDEV_ABORT_PATH_ACTIVE 7
90 #define IDEV_ABORT_PATH_RESUME_PENDING 8
88 unsigned long flags; 91 unsigned long flags;
89 struct kref kref; 92 struct kref kref;
90 struct isci_port *isci_port; 93 struct isci_port *isci_port;
91 struct domain_device *domain_dev; 94 struct domain_device *domain_dev;
92 struct list_head node; 95 struct list_head node;
93 struct list_head reqs_in_process;
94 struct sci_base_state_machine sm; 96 struct sci_base_state_machine sm;
95 u32 device_port_width; 97 u32 device_port_width;
96 enum sas_linkrate connection_rate; 98 enum sas_linkrate connection_rate;
97 bool is_direct_attached;
98 struct isci_port *owning_port; 99 struct isci_port *owning_port;
99 struct sci_remote_node_context rnc; 100 struct sci_remote_node_context rnc;
100 /* XXX unify with device reference counting and delete */ 101 /* XXX unify with device reference counting and delete */
101 u32 started_request_count; 102 u32 started_request_count;
102 struct isci_request *working_request; 103 struct isci_request *working_request;
103 u32 not_ready_reason; 104 u32 not_ready_reason;
105 scics_sds_remote_node_context_callback abort_resume_cb;
106 void *abort_resume_cbparam;
104}; 107};
105 108
106#define ISCI_REMOTE_DEVICE_START_TIMEOUT 5000 109#define ISCI_REMOTE_DEVICE_START_TIMEOUT 5000
107 110
108/* device reference routines must be called under sci_lock */ 111/* device reference routines must be called under sci_lock */
112static inline struct isci_remote_device *isci_get_device(
113 struct isci_remote_device *idev)
114{
115 if (idev)
116 kref_get(&idev->kref);
117 return idev;
118}
119
109static inline struct isci_remote_device *isci_lookup_device(struct domain_device *dev) 120static inline struct isci_remote_device *isci_lookup_device(struct domain_device *dev)
110{ 121{
111 struct isci_remote_device *idev = dev->lldd_dev; 122 struct isci_remote_device *idev = dev->lldd_dev;
@@ -302,6 +313,8 @@ static inline void sci_remote_device_decrement_request_count(struct isci_remote_
302 idev->started_request_count--; 313 idev->started_request_count--;
303} 314}
304 315
316void isci_dev_set_hang_detection_timeout(struct isci_remote_device *idev, u32 timeout);
317
305enum sci_status sci_remote_device_frame_handler( 318enum sci_status sci_remote_device_frame_handler(
306 struct isci_remote_device *idev, 319 struct isci_remote_device *idev,
307 u32 frame_index); 320 u32 frame_index);
@@ -325,12 +338,50 @@ enum sci_status sci_remote_device_complete_io(
325 struct isci_remote_device *idev, 338 struct isci_remote_device *idev,
326 struct isci_request *ireq); 339 struct isci_request *ireq);
327 340
328enum sci_status sci_remote_device_suspend(
329 struct isci_remote_device *idev,
330 u32 suspend_type);
331
332void sci_remote_device_post_request( 341void sci_remote_device_post_request(
333 struct isci_remote_device *idev, 342 struct isci_remote_device *idev,
334 u32 request); 343 u32 request);
335 344
345enum sci_status sci_remote_device_terminate_requests(
346 struct isci_remote_device *idev);
347
348int isci_remote_device_is_safe_to_abort(
349 struct isci_remote_device *idev);
350
351enum sci_status
352sci_remote_device_abort_requests_pending_abort(
353 struct isci_remote_device *idev);
354
355enum sci_status isci_remote_device_suspend(
356 struct isci_host *ihost,
357 struct isci_remote_device *idev);
358
359enum sci_status sci_remote_device_resume(
360 struct isci_remote_device *idev,
361 scics_sds_remote_node_context_callback cb_fn,
362 void *cb_p);
363
364enum sci_status isci_remote_device_resume_from_abort(
365 struct isci_host *ihost,
366 struct isci_remote_device *idev);
367
368enum sci_status isci_remote_device_reset(
369 struct isci_host *ihost,
370 struct isci_remote_device *idev);
371
372enum sci_status isci_remote_device_reset_complete(
373 struct isci_host *ihost,
374 struct isci_remote_device *idev);
375
376enum sci_status isci_remote_device_suspend_terminate(
377 struct isci_host *ihost,
378 struct isci_remote_device *idev,
379 struct isci_request *ireq);
380
381enum sci_status isci_remote_device_terminate_requests(
382 struct isci_host *ihost,
383 struct isci_remote_device *idev,
384 struct isci_request *ireq);
385enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev,
386 enum sci_remote_node_suspension_reasons reason);
336#endif /* !defined(_ISCI_REMOTE_DEVICE_H_) */ 387#endif /* !defined(_ISCI_REMOTE_DEVICE_H_) */
diff --git a/drivers/scsi/isci/remote_node_context.c b/drivers/scsi/isci/remote_node_context.c
index 3a9463481f38..1910100638a2 100644
--- a/drivers/scsi/isci/remote_node_context.c
+++ b/drivers/scsi/isci/remote_node_context.c
@@ -52,7 +52,7 @@
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */ 54 */
55 55#include <scsi/sas_ata.h>
56#include "host.h" 56#include "host.h"
57#include "isci.h" 57#include "isci.h"
58#include "remote_device.h" 58#include "remote_device.h"
@@ -90,6 +90,15 @@ bool sci_remote_node_context_is_ready(
90 return false; 90 return false;
91} 91}
92 92
93bool sci_remote_node_context_is_suspended(struct sci_remote_node_context *sci_rnc)
94{
95 u32 current_state = sci_rnc->sm.current_state_id;
96
97 if (current_state == SCI_RNC_TX_RX_SUSPENDED)
98 return true;
99 return false;
100}
101
93static union scu_remote_node_context *sci_rnc_by_id(struct isci_host *ihost, u16 id) 102static union scu_remote_node_context *sci_rnc_by_id(struct isci_host *ihost, u16 id)
94{ 103{
95 if (id < ihost->remote_node_entries && 104 if (id < ihost->remote_node_entries &&
@@ -131,7 +140,7 @@ static void sci_remote_node_context_construct_buffer(struct sci_remote_node_cont
131 140
132 rnc->ssp.arbitration_wait_time = 0; 141 rnc->ssp.arbitration_wait_time = 0;
133 142
134 if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { 143 if (dev_is_sata(dev)) {
135 rnc->ssp.connection_occupancy_timeout = 144 rnc->ssp.connection_occupancy_timeout =
136 ihost->user_parameters.stp_max_occupancy_timeout; 145 ihost->user_parameters.stp_max_occupancy_timeout;
137 rnc->ssp.connection_inactivity_timeout = 146 rnc->ssp.connection_inactivity_timeout =
@@ -151,7 +160,6 @@ static void sci_remote_node_context_construct_buffer(struct sci_remote_node_cont
151 rnc->ssp.oaf_source_zone_group = 0; 160 rnc->ssp.oaf_source_zone_group = 0;
152 rnc->ssp.oaf_more_compatibility_features = 0; 161 rnc->ssp.oaf_more_compatibility_features = 0;
153} 162}
154
155/** 163/**
156 * 164 *
157 * @sci_rnc: 165 * @sci_rnc:
@@ -165,23 +173,30 @@ static void sci_remote_node_context_construct_buffer(struct sci_remote_node_cont
165static void sci_remote_node_context_setup_to_resume( 173static void sci_remote_node_context_setup_to_resume(
166 struct sci_remote_node_context *sci_rnc, 174 struct sci_remote_node_context *sci_rnc,
167 scics_sds_remote_node_context_callback callback, 175 scics_sds_remote_node_context_callback callback,
168 void *callback_parameter) 176 void *callback_parameter,
177 enum sci_remote_node_context_destination_state dest_param)
169{ 178{
170 if (sci_rnc->destination_state != SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL) { 179 if (sci_rnc->destination_state != RNC_DEST_FINAL) {
171 sci_rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY; 180 sci_rnc->destination_state = dest_param;
172 sci_rnc->user_callback = callback; 181 if (callback != NULL) {
173 sci_rnc->user_cookie = callback_parameter; 182 sci_rnc->user_callback = callback;
183 sci_rnc->user_cookie = callback_parameter;
184 }
174 } 185 }
175} 186}
176 187
177static void sci_remote_node_context_setup_to_destory( 188static void sci_remote_node_context_setup_to_destroy(
178 struct sci_remote_node_context *sci_rnc, 189 struct sci_remote_node_context *sci_rnc,
179 scics_sds_remote_node_context_callback callback, 190 scics_sds_remote_node_context_callback callback,
180 void *callback_parameter) 191 void *callback_parameter)
181{ 192{
182 sci_rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL; 193 struct isci_host *ihost = idev_to_ihost(rnc_to_dev(sci_rnc));
194
195 sci_rnc->destination_state = RNC_DEST_FINAL;
183 sci_rnc->user_callback = callback; 196 sci_rnc->user_callback = callback;
184 sci_rnc->user_cookie = callback_parameter; 197 sci_rnc->user_cookie = callback_parameter;
198
199 wake_up(&ihost->eventq);
185} 200}
186 201
187/** 202/**
@@ -203,9 +218,19 @@ static void sci_remote_node_context_notify_user(
203 218
204static void sci_remote_node_context_continue_state_transitions(struct sci_remote_node_context *rnc) 219static void sci_remote_node_context_continue_state_transitions(struct sci_remote_node_context *rnc)
205{ 220{
206 if (rnc->destination_state == SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY) 221 switch (rnc->destination_state) {
222 case RNC_DEST_READY:
223 case RNC_DEST_SUSPENDED_RESUME:
224 rnc->destination_state = RNC_DEST_READY;
225 /* Fall through... */
226 case RNC_DEST_FINAL:
207 sci_remote_node_context_resume(rnc, rnc->user_callback, 227 sci_remote_node_context_resume(rnc, rnc->user_callback,
208 rnc->user_cookie); 228 rnc->user_cookie);
229 break;
230 default:
231 rnc->destination_state = RNC_DEST_UNSPECIFIED;
232 break;
233 }
209} 234}
210 235
211static void sci_remote_node_context_validate_context_buffer(struct sci_remote_node_context *sci_rnc) 236static void sci_remote_node_context_validate_context_buffer(struct sci_remote_node_context *sci_rnc)
@@ -219,13 +244,12 @@ static void sci_remote_node_context_validate_context_buffer(struct sci_remote_no
219 244
220 rnc_buffer->ssp.is_valid = true; 245 rnc_buffer->ssp.is_valid = true;
221 246
222 if (!idev->is_direct_attached && 247 if (dev_is_sata(dev) && dev->parent) {
223 (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))) {
224 sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_96); 248 sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_96);
225 } else { 249 } else {
226 sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_32); 250 sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_32);
227 251
228 if (idev->is_direct_attached) 252 if (!dev->parent)
229 sci_port_setup_transports(idev->owning_port, 253 sci_port_setup_transports(idev->owning_port,
230 sci_rnc->remote_node_index); 254 sci_rnc->remote_node_index);
231 } 255 }
@@ -248,13 +272,18 @@ static void sci_remote_node_context_invalidate_context_buffer(struct sci_remote_
248static void sci_remote_node_context_initial_state_enter(struct sci_base_state_machine *sm) 272static void sci_remote_node_context_initial_state_enter(struct sci_base_state_machine *sm)
249{ 273{
250 struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); 274 struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
275 struct isci_remote_device *idev = rnc_to_dev(rnc);
276 struct isci_host *ihost = idev->owning_port->owning_controller;
251 277
252 /* Check to see if we have gotten back to the initial state because 278 /* Check to see if we have gotten back to the initial state because
253 * someone requested to destroy the remote node context object. 279 * someone requested to destroy the remote node context object.
254 */ 280 */
255 if (sm->previous_state_id == SCI_RNC_INVALIDATING) { 281 if (sm->previous_state_id == SCI_RNC_INVALIDATING) {
256 rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED; 282 rnc->destination_state = RNC_DEST_UNSPECIFIED;
257 sci_remote_node_context_notify_user(rnc); 283 sci_remote_node_context_notify_user(rnc);
284
285 smp_wmb();
286 wake_up(&ihost->eventq);
258 } 287 }
259} 288}
260 289
@@ -269,6 +298,8 @@ static void sci_remote_node_context_invalidating_state_enter(struct sci_base_sta
269{ 298{
270 struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); 299 struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
271 300
301 /* Terminate all outstanding requests. */
302 sci_remote_device_terminate_requests(rnc_to_dev(rnc));
272 sci_remote_node_context_invalidate_context_buffer(rnc); 303 sci_remote_node_context_invalidate_context_buffer(rnc);
273} 304}
274 305
@@ -287,10 +318,8 @@ static void sci_remote_node_context_resuming_state_enter(struct sci_base_state_m
287 * resume because of a target reset we also need to update 318 * resume because of a target reset we also need to update
288 * the STPTLDARNI register with the RNi of the device 319 * the STPTLDARNI register with the RNi of the device
289 */ 320 */
290 if ((dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) && 321 if (dev_is_sata(dev) && !dev->parent)
291 idev->is_direct_attached) 322 sci_port_setup_transports(idev->owning_port, rnc->remote_node_index);
292 sci_port_setup_transports(idev->owning_port,
293 rnc->remote_node_index);
294 323
295 sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_RESUME); 324 sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_RESUME);
296} 325}
@@ -298,10 +327,22 @@ static void sci_remote_node_context_resuming_state_enter(struct sci_base_state_m
298static void sci_remote_node_context_ready_state_enter(struct sci_base_state_machine *sm) 327static void sci_remote_node_context_ready_state_enter(struct sci_base_state_machine *sm)
299{ 328{
300 struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); 329 struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
330 enum sci_remote_node_context_destination_state dest_select;
331 int tell_user = 1;
332
333 dest_select = rnc->destination_state;
334 rnc->destination_state = RNC_DEST_UNSPECIFIED;
301 335
302 rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED; 336 if ((dest_select == RNC_DEST_SUSPENDED) ||
337 (dest_select == RNC_DEST_SUSPENDED_RESUME)) {
338 sci_remote_node_context_suspend(
339 rnc, rnc->suspend_reason,
340 SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT);
303 341
304 if (rnc->user_callback) 342 if (dest_select == RNC_DEST_SUSPENDED_RESUME)
343 tell_user = 0; /* Wait until ready again. */
344 }
345 if (tell_user)
305 sci_remote_node_context_notify_user(rnc); 346 sci_remote_node_context_notify_user(rnc);
306} 347}
307 348
@@ -315,10 +356,34 @@ static void sci_remote_node_context_tx_suspended_state_enter(struct sci_base_sta
315static void sci_remote_node_context_tx_rx_suspended_state_enter(struct sci_base_state_machine *sm) 356static void sci_remote_node_context_tx_rx_suspended_state_enter(struct sci_base_state_machine *sm)
316{ 357{
317 struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); 358 struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
359 struct isci_remote_device *idev = rnc_to_dev(rnc);
360 struct isci_host *ihost = idev->owning_port->owning_controller;
361 u32 new_count = rnc->suspend_count + 1;
362
363 if (new_count == 0)
364 rnc->suspend_count = 1;
365 else
366 rnc->suspend_count = new_count;
367 smp_wmb();
318 368
369 /* Terminate outstanding requests pending abort. */
370 sci_remote_device_abort_requests_pending_abort(idev);
371
372 wake_up(&ihost->eventq);
319 sci_remote_node_context_continue_state_transitions(rnc); 373 sci_remote_node_context_continue_state_transitions(rnc);
320} 374}
321 375
376static void sci_remote_node_context_await_suspend_state_exit(
377 struct sci_base_state_machine *sm)
378{
379 struct sci_remote_node_context *rnc
380 = container_of(sm, typeof(*rnc), sm);
381 struct isci_remote_device *idev = rnc_to_dev(rnc);
382
383 if (dev_is_sata(idev->domain_dev))
384 isci_dev_set_hang_detection_timeout(idev, 0);
385}
386
322static const struct sci_base_state sci_remote_node_context_state_table[] = { 387static const struct sci_base_state sci_remote_node_context_state_table[] = {
323 [SCI_RNC_INITIAL] = { 388 [SCI_RNC_INITIAL] = {
324 .enter_state = sci_remote_node_context_initial_state_enter, 389 .enter_state = sci_remote_node_context_initial_state_enter,
@@ -341,7 +406,9 @@ static const struct sci_base_state sci_remote_node_context_state_table[] = {
341 [SCI_RNC_TX_RX_SUSPENDED] = { 406 [SCI_RNC_TX_RX_SUSPENDED] = {
342 .enter_state = sci_remote_node_context_tx_rx_suspended_state_enter, 407 .enter_state = sci_remote_node_context_tx_rx_suspended_state_enter,
343 }, 408 },
344 [SCI_RNC_AWAIT_SUSPENSION] = { }, 409 [SCI_RNC_AWAIT_SUSPENSION] = {
410 .exit_state = sci_remote_node_context_await_suspend_state_exit,
411 },
345}; 412};
346 413
347void sci_remote_node_context_construct(struct sci_remote_node_context *rnc, 414void sci_remote_node_context_construct(struct sci_remote_node_context *rnc,
@@ -350,7 +417,7 @@ void sci_remote_node_context_construct(struct sci_remote_node_context *rnc,
350 memset(rnc, 0, sizeof(struct sci_remote_node_context)); 417 memset(rnc, 0, sizeof(struct sci_remote_node_context));
351 418
352 rnc->remote_node_index = remote_node_index; 419 rnc->remote_node_index = remote_node_index;
353 rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED; 420 rnc->destination_state = RNC_DEST_UNSPECIFIED;
354 421
355 sci_init_sm(&rnc->sm, sci_remote_node_context_state_table, SCI_RNC_INITIAL); 422 sci_init_sm(&rnc->sm, sci_remote_node_context_state_table, SCI_RNC_INITIAL);
356} 423}
@@ -359,6 +426,7 @@ enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_con
359 u32 event_code) 426 u32 event_code)
360{ 427{
361 enum scis_sds_remote_node_context_states state; 428 enum scis_sds_remote_node_context_states state;
429 u32 next_state;
362 430
363 state = sci_rnc->sm.current_state_id; 431 state = sci_rnc->sm.current_state_id;
364 switch (state) { 432 switch (state) {
@@ -373,18 +441,18 @@ enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_con
373 break; 441 break;
374 case SCI_RNC_INVALIDATING: 442 case SCI_RNC_INVALIDATING:
375 if (scu_get_event_code(event_code) == SCU_EVENT_POST_RNC_INVALIDATE_COMPLETE) { 443 if (scu_get_event_code(event_code) == SCU_EVENT_POST_RNC_INVALIDATE_COMPLETE) {
376 if (sci_rnc->destination_state == SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL) 444 if (sci_rnc->destination_state == RNC_DEST_FINAL)
377 state = SCI_RNC_INITIAL; 445 next_state = SCI_RNC_INITIAL;
378 else 446 else
379 state = SCI_RNC_POSTING; 447 next_state = SCI_RNC_POSTING;
380 sci_change_state(&sci_rnc->sm, state); 448 sci_change_state(&sci_rnc->sm, next_state);
381 } else { 449 } else {
382 switch (scu_get_event_type(event_code)) { 450 switch (scu_get_event_type(event_code)) {
383 case SCU_EVENT_TYPE_RNC_SUSPEND_TX: 451 case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
384 case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX: 452 case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
385 /* We really dont care if the hardware is going to suspend 453 /* We really dont care if the hardware is going to suspend
386 * the device since it's being invalidated anyway */ 454 * the device since it's being invalidated anyway */
387 dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)), 455 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
388 "%s: SCIC Remote Node Context 0x%p was " 456 "%s: SCIC Remote Node Context 0x%p was "
389 "suspeneded by hardware while being " 457 "suspeneded by hardware while being "
390 "invalidated.\n", __func__, sci_rnc); 458 "invalidated.\n", __func__, sci_rnc);
@@ -403,7 +471,7 @@ enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_con
403 case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX: 471 case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
404 /* We really dont care if the hardware is going to suspend 472 /* We really dont care if the hardware is going to suspend
405 * the device since it's being resumed anyway */ 473 * the device since it's being resumed anyway */
406 dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)), 474 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
407 "%s: SCIC Remote Node Context 0x%p was " 475 "%s: SCIC Remote Node Context 0x%p was "
408 "suspeneded by hardware while being resumed.\n", 476 "suspeneded by hardware while being resumed.\n",
409 __func__, sci_rnc); 477 __func__, sci_rnc);
@@ -417,11 +485,11 @@ enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_con
417 switch (scu_get_event_type(event_code)) { 485 switch (scu_get_event_type(event_code)) {
418 case SCU_EVENT_TL_RNC_SUSPEND_TX: 486 case SCU_EVENT_TL_RNC_SUSPEND_TX:
419 sci_change_state(&sci_rnc->sm, SCI_RNC_TX_SUSPENDED); 487 sci_change_state(&sci_rnc->sm, SCI_RNC_TX_SUSPENDED);
420 sci_rnc->suspension_code = scu_get_event_specifier(event_code); 488 sci_rnc->suspend_type = scu_get_event_type(event_code);
421 break; 489 break;
422 case SCU_EVENT_TL_RNC_SUSPEND_TX_RX: 490 case SCU_EVENT_TL_RNC_SUSPEND_TX_RX:
423 sci_change_state(&sci_rnc->sm, SCI_RNC_TX_RX_SUSPENDED); 491 sci_change_state(&sci_rnc->sm, SCI_RNC_TX_RX_SUSPENDED);
424 sci_rnc->suspension_code = scu_get_event_specifier(event_code); 492 sci_rnc->suspend_type = scu_get_event_type(event_code);
425 break; 493 break;
426 default: 494 default:
427 goto out; 495 goto out;
@@ -430,27 +498,29 @@ enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_con
430 case SCI_RNC_AWAIT_SUSPENSION: 498 case SCI_RNC_AWAIT_SUSPENSION:
431 switch (scu_get_event_type(event_code)) { 499 switch (scu_get_event_type(event_code)) {
432 case SCU_EVENT_TL_RNC_SUSPEND_TX: 500 case SCU_EVENT_TL_RNC_SUSPEND_TX:
433 sci_change_state(&sci_rnc->sm, SCI_RNC_TX_SUSPENDED); 501 next_state = SCI_RNC_TX_SUSPENDED;
434 sci_rnc->suspension_code = scu_get_event_specifier(event_code);
435 break; 502 break;
436 case SCU_EVENT_TL_RNC_SUSPEND_TX_RX: 503 case SCU_EVENT_TL_RNC_SUSPEND_TX_RX:
437 sci_change_state(&sci_rnc->sm, SCI_RNC_TX_RX_SUSPENDED); 504 next_state = SCI_RNC_TX_RX_SUSPENDED;
438 sci_rnc->suspension_code = scu_get_event_specifier(event_code);
439 break; 505 break;
440 default: 506 default:
441 goto out; 507 goto out;
442 } 508 }
509 if (sci_rnc->suspend_type == scu_get_event_type(event_code))
510 sci_change_state(&sci_rnc->sm, next_state);
443 break; 511 break;
444 default: 512 default:
445 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), 513 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
446 "%s: invalid state %d\n", __func__, state); 514 "%s: invalid state: %s\n", __func__,
515 rnc_state_name(state));
447 return SCI_FAILURE_INVALID_STATE; 516 return SCI_FAILURE_INVALID_STATE;
448 } 517 }
449 return SCI_SUCCESS; 518 return SCI_SUCCESS;
450 519
451 out: 520 out:
452 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), 521 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
453 "%s: code: %#x state: %d\n", __func__, event_code, state); 522 "%s: code: %#x state: %s\n", __func__, event_code,
523 rnc_state_name(state));
454 return SCI_FAILURE; 524 return SCI_FAILURE;
455 525
456} 526}
@@ -464,20 +534,23 @@ enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context
464 state = sci_rnc->sm.current_state_id; 534 state = sci_rnc->sm.current_state_id;
465 switch (state) { 535 switch (state) {
466 case SCI_RNC_INVALIDATING: 536 case SCI_RNC_INVALIDATING:
467 sci_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p); 537 sci_remote_node_context_setup_to_destroy(sci_rnc, cb_fn, cb_p);
468 return SCI_SUCCESS; 538 return SCI_SUCCESS;
469 case SCI_RNC_POSTING: 539 case SCI_RNC_POSTING:
470 case SCI_RNC_RESUMING: 540 case SCI_RNC_RESUMING:
471 case SCI_RNC_READY: 541 case SCI_RNC_READY:
472 case SCI_RNC_TX_SUSPENDED: 542 case SCI_RNC_TX_SUSPENDED:
473 case SCI_RNC_TX_RX_SUSPENDED: 543 case SCI_RNC_TX_RX_SUSPENDED:
474 case SCI_RNC_AWAIT_SUSPENSION: 544 sci_remote_node_context_setup_to_destroy(sci_rnc, cb_fn, cb_p);
475 sci_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p);
476 sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING); 545 sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING);
477 return SCI_SUCCESS; 546 return SCI_SUCCESS;
547 case SCI_RNC_AWAIT_SUSPENSION:
548 sci_remote_node_context_setup_to_destroy(sci_rnc, cb_fn, cb_p);
549 return SCI_SUCCESS;
478 case SCI_RNC_INITIAL: 550 case SCI_RNC_INITIAL:
479 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), 551 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
480 "%s: invalid state %d\n", __func__, state); 552 "%s: invalid state: %s\n", __func__,
553 rnc_state_name(state));
481 /* We have decided that the destruct request on the remote node context 554 /* We have decided that the destruct request on the remote node context
482 * can not fail since it is either in the initial/destroyed state or is 555 * can not fail since it is either in the initial/destroyed state or is
483 * can be destroyed. 556 * can be destroyed.
@@ -485,35 +558,101 @@ enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context
485 return SCI_SUCCESS; 558 return SCI_SUCCESS;
486 default: 559 default:
487 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), 560 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
488 "%s: invalid state %d\n", __func__, state); 561 "%s: invalid state %s\n", __func__,
562 rnc_state_name(state));
489 return SCI_FAILURE_INVALID_STATE; 563 return SCI_FAILURE_INVALID_STATE;
490 } 564 }
491} 565}
492 566
493enum sci_status sci_remote_node_context_suspend(struct sci_remote_node_context *sci_rnc, 567enum sci_status sci_remote_node_context_suspend(
494 u32 suspend_type, 568 struct sci_remote_node_context *sci_rnc,
495 scics_sds_remote_node_context_callback cb_fn, 569 enum sci_remote_node_suspension_reasons suspend_reason,
496 void *cb_p) 570 u32 suspend_type)
497{ 571{
498 enum scis_sds_remote_node_context_states state; 572 enum scis_sds_remote_node_context_states state
573 = sci_rnc->sm.current_state_id;
574 struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
575 enum sci_status status = SCI_FAILURE_INVALID_STATE;
576 enum sci_remote_node_context_destination_state dest_param =
577 RNC_DEST_UNSPECIFIED;
578
579 dev_dbg(scirdev_to_dev(idev),
580 "%s: current state %s, current suspend_type %x dest state %d,"
581 " arg suspend_reason %d, arg suspend_type %x",
582 __func__, rnc_state_name(state), sci_rnc->suspend_type,
583 sci_rnc->destination_state, suspend_reason,
584 suspend_type);
585
586 /* Disable automatic state continuations if explicitly suspending. */
587 if ((suspend_reason == SCI_HW_SUSPEND) ||
588 (sci_rnc->destination_state == RNC_DEST_FINAL))
589 dest_param = sci_rnc->destination_state;
499 590
500 state = sci_rnc->sm.current_state_id; 591 switch (state) {
501 if (state != SCI_RNC_READY) { 592 case SCI_RNC_READY:
593 break;
594 case SCI_RNC_INVALIDATING:
595 if (sci_rnc->destination_state == RNC_DEST_FINAL) {
596 dev_warn(scirdev_to_dev(idev),
597 "%s: already destroying %p\n",
598 __func__, sci_rnc);
599 return SCI_FAILURE_INVALID_STATE;
600 }
601 /* Fall through and handle like SCI_RNC_POSTING */
602 case SCI_RNC_RESUMING:
603 /* Fall through and handle like SCI_RNC_POSTING */
604 case SCI_RNC_POSTING:
605 /* Set the destination state to AWAIT - this signals the
606 * entry into the SCI_RNC_READY state that a suspension
607 * needs to be done immediately.
608 */
609 if (sci_rnc->destination_state != RNC_DEST_FINAL)
610 sci_rnc->destination_state = RNC_DEST_SUSPENDED;
611 sci_rnc->suspend_type = suspend_type;
612 sci_rnc->suspend_reason = suspend_reason;
613 return SCI_SUCCESS;
614
615 case SCI_RNC_TX_SUSPENDED:
616 if (suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX)
617 status = SCI_SUCCESS;
618 break;
619 case SCI_RNC_TX_RX_SUSPENDED:
620 if (suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX_RX)
621 status = SCI_SUCCESS;
622 break;
623 case SCI_RNC_AWAIT_SUSPENSION:
624 if ((sci_rnc->suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX_RX)
625 || (suspend_type == sci_rnc->suspend_type))
626 return SCI_SUCCESS;
627 break;
628 default:
502 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), 629 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
503 "%s: invalid state %d\n", __func__, state); 630 "%s: invalid state %s\n", __func__,
631 rnc_state_name(state));
504 return SCI_FAILURE_INVALID_STATE; 632 return SCI_FAILURE_INVALID_STATE;
505 } 633 }
634 sci_rnc->destination_state = dest_param;
635 sci_rnc->suspend_type = suspend_type;
636 sci_rnc->suspend_reason = suspend_reason;
637
638 if (status == SCI_SUCCESS) { /* Already in the destination state? */
639 struct isci_host *ihost = idev->owning_port->owning_controller;
640
641 wake_up_all(&ihost->eventq); /* Let observers look. */
642 return SCI_SUCCESS;
643 }
644 if ((suspend_reason == SCI_SW_SUSPEND_NORMAL) ||
645 (suspend_reason == SCI_SW_SUSPEND_LINKHANG_DETECT)) {
506 646
507 sci_rnc->user_callback = cb_fn; 647 if (suspend_reason == SCI_SW_SUSPEND_LINKHANG_DETECT)
508 sci_rnc->user_cookie = cb_p; 648 isci_dev_set_hang_detection_timeout(idev, 0x00000001);
509 sci_rnc->suspension_code = suspend_type;
510 649
511 if (suspend_type == SCI_SOFTWARE_SUSPENSION) { 650 sci_remote_device_post_request(
512 sci_remote_device_post_request(rnc_to_dev(sci_rnc), 651 idev, SCI_SOFTWARE_SUSPEND_CMD);
513 SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX);
514 } 652 }
653 if (state != SCI_RNC_AWAIT_SUSPENSION)
654 sci_change_state(&sci_rnc->sm, SCI_RNC_AWAIT_SUSPENSION);
515 655
516 sci_change_state(&sci_rnc->sm, SCI_RNC_AWAIT_SUSPENSION);
517 return SCI_SUCCESS; 656 return SCI_SUCCESS;
518} 657}
519 658
@@ -522,56 +661,86 @@ enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *s
522 void *cb_p) 661 void *cb_p)
523{ 662{
524 enum scis_sds_remote_node_context_states state; 663 enum scis_sds_remote_node_context_states state;
664 struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
525 665
526 state = sci_rnc->sm.current_state_id; 666 state = sci_rnc->sm.current_state_id;
667 dev_dbg(scirdev_to_dev(idev),
668 "%s: state %s, cb_fn = %p, cb_p = %p; dest_state = %d; "
669 "dev resume path %s\n",
670 __func__, rnc_state_name(state), cb_fn, cb_p,
671 sci_rnc->destination_state,
672 test_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags)
673 ? "<abort active>" : "<normal>");
674
527 switch (state) { 675 switch (state) {
528 case SCI_RNC_INITIAL: 676 case SCI_RNC_INITIAL:
529 if (sci_rnc->remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) 677 if (sci_rnc->remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
530 return SCI_FAILURE_INVALID_STATE; 678 return SCI_FAILURE_INVALID_STATE;
531 679
532 sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p); 680 sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p,
533 sci_remote_node_context_construct_buffer(sci_rnc); 681 RNC_DEST_READY);
534 sci_change_state(&sci_rnc->sm, SCI_RNC_POSTING); 682 if (!test_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags)) {
683 sci_remote_node_context_construct_buffer(sci_rnc);
684 sci_change_state(&sci_rnc->sm, SCI_RNC_POSTING);
685 }
535 return SCI_SUCCESS; 686 return SCI_SUCCESS;
687
536 case SCI_RNC_POSTING: 688 case SCI_RNC_POSTING:
537 case SCI_RNC_INVALIDATING: 689 case SCI_RNC_INVALIDATING:
538 case SCI_RNC_RESUMING: 690 case SCI_RNC_RESUMING:
539 if (sci_rnc->destination_state != SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY) 691 /* We are still waiting to post when a resume was
540 return SCI_FAILURE_INVALID_STATE; 692 * requested.
541 693 */
542 sci_rnc->user_callback = cb_fn; 694 switch (sci_rnc->destination_state) {
543 sci_rnc->user_cookie = cb_p; 695 case RNC_DEST_SUSPENDED:
696 case RNC_DEST_SUSPENDED_RESUME:
697 /* Previously waiting to suspend after posting.
698 * Now continue onto resumption.
699 */
700 sci_remote_node_context_setup_to_resume(
701 sci_rnc, cb_fn, cb_p,
702 RNC_DEST_SUSPENDED_RESUME);
703 break;
704 default:
705 sci_remote_node_context_setup_to_resume(
706 sci_rnc, cb_fn, cb_p,
707 RNC_DEST_READY);
708 break;
709 }
544 return SCI_SUCCESS; 710 return SCI_SUCCESS;
545 case SCI_RNC_TX_SUSPENDED: { 711
546 struct isci_remote_device *idev = rnc_to_dev(sci_rnc); 712 case SCI_RNC_TX_SUSPENDED:
547 struct domain_device *dev = idev->domain_dev; 713 case SCI_RNC_TX_RX_SUSPENDED:
548 714 {
549 sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p); 715 struct domain_device *dev = idev->domain_dev;
550 716 /* If this is an expander attached SATA device we must
551 /* TODO: consider adding a resume action of NONE, INVALIDATE, WRITE_TLCR */ 717 * invalidate and repost the RNC since this is the only
552 if (dev->dev_type == SAS_END_DEV || dev_is_expander(dev)) 718 * way to clear the TCi to NCQ tag mapping table for
553 sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING); 719 * the RNi. All other device types we can just resume.
554 else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { 720 */
555 if (idev->is_direct_attached) { 721 sci_remote_node_context_setup_to_resume(
556 /* @todo Fix this since I am being silly in writing to the STPTLDARNI register. */ 722 sci_rnc, cb_fn, cb_p, RNC_DEST_READY);
557 sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING); 723
558 } else { 724 if (!test_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags)) {
559 sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING); 725 if ((dev_is_sata(dev) && dev->parent) ||
726 (sci_rnc->destination_state == RNC_DEST_FINAL))
727 sci_change_state(&sci_rnc->sm,
728 SCI_RNC_INVALIDATING);
729 else
730 sci_change_state(&sci_rnc->sm,
731 SCI_RNC_RESUMING);
560 } 732 }
561 } else 733 }
562 return SCI_FAILURE;
563 return SCI_SUCCESS; 734 return SCI_SUCCESS;
564 } 735
565 case SCI_RNC_TX_RX_SUSPENDED:
566 sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
567 sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING);
568 return SCI_FAILURE_INVALID_STATE;
569 case SCI_RNC_AWAIT_SUSPENSION: 736 case SCI_RNC_AWAIT_SUSPENSION:
570 sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p); 737 sci_remote_node_context_setup_to_resume(
738 sci_rnc, cb_fn, cb_p, RNC_DEST_SUSPENDED_RESUME);
571 return SCI_SUCCESS; 739 return SCI_SUCCESS;
572 default: 740 default:
573 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), 741 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
574 "%s: invalid state %d\n", __func__, state); 742 "%s: invalid state %s\n", __func__,
743 rnc_state_name(state));
575 return SCI_FAILURE_INVALID_STATE; 744 return SCI_FAILURE_INVALID_STATE;
576 } 745 }
577} 746}
@@ -590,35 +759,51 @@ enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context
590 case SCI_RNC_TX_RX_SUSPENDED: 759 case SCI_RNC_TX_RX_SUSPENDED:
591 case SCI_RNC_AWAIT_SUSPENSION: 760 case SCI_RNC_AWAIT_SUSPENSION:
592 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), 761 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
593 "%s: invalid state %d\n", __func__, state); 762 "%s: invalid state %s\n", __func__,
763 rnc_state_name(state));
594 return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; 764 return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
595 default: 765 default:
596 break; 766 dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)),
767 "%s: invalid state %s\n", __func__,
768 rnc_state_name(state));
769 return SCI_FAILURE_INVALID_STATE;
597 } 770 }
598 dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)),
599 "%s: requested to start IO while still resuming, %d\n",
600 __func__, state);
601 return SCI_FAILURE_INVALID_STATE;
602} 771}
603 772
604enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_context *sci_rnc, 773enum sci_status sci_remote_node_context_start_task(
605 struct isci_request *ireq) 774 struct sci_remote_node_context *sci_rnc,
775 struct isci_request *ireq,
776 scics_sds_remote_node_context_callback cb_fn,
777 void *cb_p)
778{
779 enum sci_status status = sci_remote_node_context_resume(sci_rnc,
780 cb_fn, cb_p);
781 if (status != SCI_SUCCESS)
782 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
783 "%s: resume failed: %d\n", __func__, status);
784 return status;
785}
786
787int sci_remote_node_context_is_safe_to_abort(
788 struct sci_remote_node_context *sci_rnc)
606{ 789{
607 enum scis_sds_remote_node_context_states state; 790 enum scis_sds_remote_node_context_states state;
608 791
609 state = sci_rnc->sm.current_state_id; 792 state = sci_rnc->sm.current_state_id;
610 switch (state) { 793 switch (state) {
794 case SCI_RNC_INVALIDATING:
795 case SCI_RNC_TX_RX_SUSPENDED:
796 return 1;
797 case SCI_RNC_POSTING:
611 case SCI_RNC_RESUMING: 798 case SCI_RNC_RESUMING:
612 case SCI_RNC_READY: 799 case SCI_RNC_READY:
613 case SCI_RNC_AWAIT_SUSPENSION:
614 return SCI_SUCCESS;
615 case SCI_RNC_TX_SUSPENDED: 800 case SCI_RNC_TX_SUSPENDED:
616 case SCI_RNC_TX_RX_SUSPENDED: 801 case SCI_RNC_AWAIT_SUSPENSION:
617 sci_remote_node_context_resume(sci_rnc, NULL, NULL); 802 case SCI_RNC_INITIAL:
618 return SCI_SUCCESS; 803 return 0;
619 default: 804 default:
620 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), 805 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
621 "%s: invalid state %d\n", __func__, state); 806 "%s: invalid state %d\n", __func__, state);
622 return SCI_FAILURE_INVALID_STATE; 807 return 0;
623 } 808 }
624} 809}
diff --git a/drivers/scsi/isci/remote_node_context.h b/drivers/scsi/isci/remote_node_context.h
index a241e0f4c865..a703b9ce0c2c 100644
--- a/drivers/scsi/isci/remote_node_context.h
+++ b/drivers/scsi/isci/remote_node_context.h
@@ -75,8 +75,13 @@
75 */ 75 */
76#define SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX 0x0FFF 76#define SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX 0x0FFF
77 77
78#define SCU_HARDWARE_SUSPENSION (0) 78enum sci_remote_node_suspension_reasons {
79#define SCI_SOFTWARE_SUSPENSION (1) 79 SCI_HW_SUSPEND,
80 SCI_SW_SUSPEND_NORMAL,
81 SCI_SW_SUSPEND_LINKHANG_DETECT
82};
83#define SCI_SOFTWARE_SUSPEND_CMD SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX_RX
84#define SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT SCU_EVENT_TL_RNC_SUSPEND_TX_RX
80 85
81struct isci_request; 86struct isci_request;
82struct isci_remote_device; 87struct isci_remote_device;
@@ -137,9 +142,13 @@ const char *rnc_state_name(enum scis_sds_remote_node_context_states state);
137 * node context. 142 * node context.
138 */ 143 */
139enum sci_remote_node_context_destination_state { 144enum sci_remote_node_context_destination_state {
140 SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED, 145 RNC_DEST_UNSPECIFIED,
141 SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY, 146 RNC_DEST_READY,
142 SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL 147 RNC_DEST_FINAL,
148 RNC_DEST_SUSPENDED, /* Set when suspend during post/invalidate */
149 RNC_DEST_SUSPENDED_RESUME /* Set when a resume was done during posting
150 * or invalidating and already suspending.
151 */
143}; 152};
144 153
145/** 154/**
@@ -156,10 +165,12 @@ struct sci_remote_node_context {
156 u16 remote_node_index; 165 u16 remote_node_index;
157 166
158 /** 167 /**
159 * This field is the recored suspension code or the reason for the remote node 168 * This field is the recored suspension type of the remote node
160 * context suspension. 169 * context suspension.
161 */ 170 */
162 u32 suspension_code; 171 u32 suspend_type;
172 enum sci_remote_node_suspension_reasons suspend_reason;
173 u32 suspend_count;
163 174
164 /** 175 /**
165 * This field is true if the remote node context is resuming from its current 176 * This field is true if the remote node context is resuming from its current
@@ -193,6 +204,8 @@ void sci_remote_node_context_construct(struct sci_remote_node_context *rnc,
193bool sci_remote_node_context_is_ready( 204bool sci_remote_node_context_is_ready(
194 struct sci_remote_node_context *sci_rnc); 205 struct sci_remote_node_context *sci_rnc);
195 206
207bool sci_remote_node_context_is_suspended(struct sci_remote_node_context *sci_rnc);
208
196enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc, 209enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc,
197 u32 event_code); 210 u32 event_code);
198enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc, 211enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc,
@@ -200,14 +213,24 @@ enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context
200 void *callback_parameter); 213 void *callback_parameter);
201enum sci_status sci_remote_node_context_suspend(struct sci_remote_node_context *sci_rnc, 214enum sci_status sci_remote_node_context_suspend(struct sci_remote_node_context *sci_rnc,
202 u32 suspend_type, 215 u32 suspend_type,
203 scics_sds_remote_node_context_callback cb_fn, 216 u32 suspension_code);
204 void *cb_p);
205enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc, 217enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc,
206 scics_sds_remote_node_context_callback cb_fn, 218 scics_sds_remote_node_context_callback cb_fn,
207 void *cb_p); 219 void *cb_p);
208enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_context *sci_rnc, 220enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_context *sci_rnc,
209 struct isci_request *ireq); 221 struct isci_request *ireq,
222 scics_sds_remote_node_context_callback cb_fn,
223 void *cb_p);
210enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc, 224enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc,
211 struct isci_request *ireq); 225 struct isci_request *ireq);
226int sci_remote_node_context_is_safe_to_abort(
227 struct sci_remote_node_context *sci_rnc);
212 228
229static inline bool sci_remote_node_context_is_being_destroyed(
230 struct sci_remote_node_context *sci_rnc)
231{
232 return (sci_rnc->destination_state == RNC_DEST_FINAL)
233 || ((sci_rnc->sm.current_state_id == SCI_RNC_INITIAL)
234 && (sci_rnc->destination_state == RNC_DEST_UNSPECIFIED));
235}
213#endif /* _SCIC_SDS_REMOTE_NODE_CONTEXT_H_ */ 236#endif /* _SCIC_SDS_REMOTE_NODE_CONTEXT_H_ */
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index 2def1e3960f6..7a0431c73493 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -92,11 +92,11 @@ static dma_addr_t to_sgl_element_pair_dma(struct isci_host *ihost,
92 if (idx == 0) { 92 if (idx == 0) {
93 offset = (void *) &ireq->tc->sgl_pair_ab - 93 offset = (void *) &ireq->tc->sgl_pair_ab -
94 (void *) &ihost->task_context_table[0]; 94 (void *) &ihost->task_context_table[0];
95 return ihost->task_context_dma + offset; 95 return ihost->tc_dma + offset;
96 } else if (idx == 1) { 96 } else if (idx == 1) {
97 offset = (void *) &ireq->tc->sgl_pair_cd - 97 offset = (void *) &ireq->tc->sgl_pair_cd -
98 (void *) &ihost->task_context_table[0]; 98 (void *) &ihost->task_context_table[0];
99 return ihost->task_context_dma + offset; 99 return ihost->tc_dma + offset;
100 } 100 }
101 101
102 return sci_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]); 102 return sci_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]);
@@ -730,7 +730,7 @@ static enum sci_status sci_io_request_construct_basic_ssp(struct isci_request *i
730{ 730{
731 struct sas_task *task = isci_request_access_task(ireq); 731 struct sas_task *task = isci_request_access_task(ireq);
732 732
733 ireq->protocol = SCIC_SSP_PROTOCOL; 733 ireq->protocol = SAS_PROTOCOL_SSP;
734 734
735 scu_ssp_io_request_construct_task_context(ireq, 735 scu_ssp_io_request_construct_task_context(ireq,
736 task->data_dir, 736 task->data_dir,
@@ -763,7 +763,7 @@ static enum sci_status sci_io_request_construct_basic_sata(struct isci_request *
763 bool copy = false; 763 bool copy = false;
764 struct sas_task *task = isci_request_access_task(ireq); 764 struct sas_task *task = isci_request_access_task(ireq);
765 765
766 ireq->protocol = SCIC_STP_PROTOCOL; 766 ireq->protocol = SAS_PROTOCOL_STP;
767 767
768 copy = (task->data_dir == DMA_NONE) ? false : true; 768 copy = (task->data_dir == DMA_NONE) ? false : true;
769 769
@@ -863,6 +863,8 @@ sci_io_request_terminate(struct isci_request *ireq)
863 863
864 switch (state) { 864 switch (state) {
865 case SCI_REQ_CONSTRUCTED: 865 case SCI_REQ_CONSTRUCTED:
866 /* Set to make sure no HW terminate posting is done: */
867 set_bit(IREQ_TC_ABORT_POSTED, &ireq->flags);
866 ireq->scu_status = SCU_TASK_DONE_TASK_ABORT; 868 ireq->scu_status = SCU_TASK_DONE_TASK_ABORT;
867 ireq->sci_status = SCI_FAILURE_IO_TERMINATED; 869 ireq->sci_status = SCI_FAILURE_IO_TERMINATED;
868 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 870 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
@@ -883,8 +885,7 @@ sci_io_request_terminate(struct isci_request *ireq)
883 case SCI_REQ_ATAPI_WAIT_PIO_SETUP: 885 case SCI_REQ_ATAPI_WAIT_PIO_SETUP:
884 case SCI_REQ_ATAPI_WAIT_D2H: 886 case SCI_REQ_ATAPI_WAIT_D2H:
885 case SCI_REQ_ATAPI_WAIT_TC_COMP: 887 case SCI_REQ_ATAPI_WAIT_TC_COMP:
886 sci_change_state(&ireq->sm, SCI_REQ_ABORTING); 888 /* Fall through and change state to ABORTING... */
887 return SCI_SUCCESS;
888 case SCI_REQ_TASK_WAIT_TC_RESP: 889 case SCI_REQ_TASK_WAIT_TC_RESP:
889 /* The task frame was already confirmed to have been 890 /* The task frame was already confirmed to have been
890 * sent by the SCU HW. Since the state machine is 891 * sent by the SCU HW. Since the state machine is
@@ -893,20 +894,21 @@ sci_io_request_terminate(struct isci_request *ireq)
893 * and don't wait for the task response. 894 * and don't wait for the task response.
894 */ 895 */
895 sci_change_state(&ireq->sm, SCI_REQ_ABORTING); 896 sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
896 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 897 /* Fall through and handle like ABORTING... */
897 return SCI_SUCCESS;
898 case SCI_REQ_ABORTING: 898 case SCI_REQ_ABORTING:
899 /* If a request has a termination requested twice, return 899 if (!isci_remote_device_is_safe_to_abort(ireq->target_device))
900 * a failure indication, since HW confirmation of the first 900 set_bit(IREQ_PENDING_ABORT, &ireq->flags);
901 * abort is still outstanding. 901 else
902 clear_bit(IREQ_PENDING_ABORT, &ireq->flags);
903 /* If the request is only waiting on the remote device
904 * suspension, return SUCCESS so the caller will wait too.
902 */ 905 */
906 return SCI_SUCCESS;
903 case SCI_REQ_COMPLETED: 907 case SCI_REQ_COMPLETED:
904 default: 908 default:
905 dev_warn(&ireq->owning_controller->pdev->dev, 909 dev_warn(&ireq->owning_controller->pdev->dev,
906 "%s: SCIC IO Request requested to abort while in wrong " 910 "%s: SCIC IO Request requested to abort while in wrong "
907 "state %d\n", 911 "state %d\n", __func__, ireq->sm.current_state_id);
908 __func__,
909 ireq->sm.current_state_id);
910 break; 912 break;
911 } 913 }
912 914
@@ -1070,7 +1072,7 @@ request_started_state_tc_event(struct isci_request *ireq,
1070 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS): 1072 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS):
1071 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR): 1073 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
1072 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR): 1074 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR):
1073 if (ireq->protocol == SCIC_STP_PROTOCOL) { 1075 if (ireq->protocol == SAS_PROTOCOL_STP) {
1074 ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> 1076 ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
1075 SCU_COMPLETION_TL_STATUS_SHIFT; 1077 SCU_COMPLETION_TL_STATUS_SHIFT;
1076 ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; 1078 ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
@@ -2117,7 +2119,7 @@ static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq
2117 */ 2119 */
2118 if (ireq->stp.rsp.fis_type == FIS_REGD2H) { 2120 if (ireq->stp.rsp.fis_type == FIS_REGD2H) {
2119 sci_remote_device_suspend(ireq->target_device, 2121 sci_remote_device_suspend(ireq->target_device,
2120 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code))); 2122 SCI_SW_SUSPEND_NORMAL);
2121 2123
2122 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 2124 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
2123 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 2125 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
@@ -2138,13 +2140,6 @@ static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq
2138 /* TODO We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR 2140 /* TODO We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR
2139 * - this comes only for B0 2141 * - this comes only for B0
2140 */ 2142 */
2141 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_INV_FIS_LEN):
2142 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
2143 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR):
2144 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR):
2145 sci_remote_device_suspend(ireq->target_device,
2146 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
2147 /* Fall through to the default case */
2148 default: 2143 default:
2149 /* All other completion status cause the IO to be complete. */ 2144 /* All other completion status cause the IO to be complete. */
2150 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); 2145 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
@@ -2262,15 +2257,151 @@ static enum sci_status atapi_data_tc_completion_handler(struct isci_request *ire
2262 return status; 2257 return status;
2263} 2258}
2264 2259
2260static int sci_request_smp_completion_status_is_tx_suspend(
2261 unsigned int completion_status)
2262{
2263 switch (completion_status) {
2264 case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
2265 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
2266 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
2267 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
2268 case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
2269 case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
2270 return 1;
2271 }
2272 return 0;
2273}
2274
2275static int sci_request_smp_completion_status_is_tx_rx_suspend(
2276 unsigned int completion_status)
2277{
2278 return 0; /* There are no Tx/Rx SMP suspend conditions. */
2279}
2280
2281static int sci_request_ssp_completion_status_is_tx_suspend(
2282 unsigned int completion_status)
2283{
2284 switch (completion_status) {
2285 case SCU_TASK_DONE_TX_RAW_CMD_ERR:
2286 case SCU_TASK_DONE_LF_ERR:
2287 case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
2288 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
2289 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
2290 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
2291 case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
2292 case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
2293 case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY:
2294 case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED:
2295 case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED:
2296 return 1;
2297 }
2298 return 0;
2299}
2300
2301static int sci_request_ssp_completion_status_is_tx_rx_suspend(
2302 unsigned int completion_status)
2303{
2304 return 0; /* There are no Tx/Rx SSP suspend conditions. */
2305}
2306
2307static int sci_request_stpsata_completion_status_is_tx_suspend(
2308 unsigned int completion_status)
2309{
2310 switch (completion_status) {
2311 case SCU_TASK_DONE_TX_RAW_CMD_ERR:
2312 case SCU_TASK_DONE_LL_R_ERR:
2313 case SCU_TASK_DONE_LL_PERR:
2314 case SCU_TASK_DONE_REG_ERR:
2315 case SCU_TASK_DONE_SDB_ERR:
2316 case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
2317 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
2318 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
2319 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
2320 case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
2321 case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
2322 case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY:
2323 case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED:
2324 case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED:
2325 return 1;
2326 }
2327 return 0;
2328}
2329
2330
2331static int sci_request_stpsata_completion_status_is_tx_rx_suspend(
2332 unsigned int completion_status)
2333{
2334 switch (completion_status) {
2335 case SCU_TASK_DONE_LF_ERR:
2336 case SCU_TASK_DONE_LL_SY_TERM:
2337 case SCU_TASK_DONE_LL_LF_TERM:
2338 case SCU_TASK_DONE_BREAK_RCVD:
2339 case SCU_TASK_DONE_INV_FIS_LEN:
2340 case SCU_TASK_DONE_UNEXP_FIS:
2341 case SCU_TASK_DONE_UNEXP_SDBFIS:
2342 case SCU_TASK_DONE_MAX_PLD_ERR:
2343 return 1;
2344 }
2345 return 0;
2346}
2347
2348static void sci_request_handle_suspending_completions(
2349 struct isci_request *ireq,
2350 u32 completion_code)
2351{
2352 int is_tx = 0;
2353 int is_tx_rx = 0;
2354
2355 switch (ireq->protocol) {
2356 case SAS_PROTOCOL_SMP:
2357 is_tx = sci_request_smp_completion_status_is_tx_suspend(
2358 completion_code);
2359 is_tx_rx = sci_request_smp_completion_status_is_tx_rx_suspend(
2360 completion_code);
2361 break;
2362 case SAS_PROTOCOL_SSP:
2363 is_tx = sci_request_ssp_completion_status_is_tx_suspend(
2364 completion_code);
2365 is_tx_rx = sci_request_ssp_completion_status_is_tx_rx_suspend(
2366 completion_code);
2367 break;
2368 case SAS_PROTOCOL_STP:
2369 is_tx = sci_request_stpsata_completion_status_is_tx_suspend(
2370 completion_code);
2371 is_tx_rx =
2372 sci_request_stpsata_completion_status_is_tx_rx_suspend(
2373 completion_code);
2374 break;
2375 default:
2376 dev_warn(&ireq->isci_host->pdev->dev,
2377 "%s: request %p has no valid protocol\n",
2378 __func__, ireq);
2379 break;
2380 }
2381 if (is_tx || is_tx_rx) {
2382 BUG_ON(is_tx && is_tx_rx);
2383
2384 sci_remote_node_context_suspend(
2385 &ireq->target_device->rnc,
2386 SCI_HW_SUSPEND,
2387 (is_tx_rx) ? SCU_EVENT_TL_RNC_SUSPEND_TX_RX
2388 : SCU_EVENT_TL_RNC_SUSPEND_TX);
2389 }
2390}
2391
2265enum sci_status 2392enum sci_status
2266sci_io_request_tc_completion(struct isci_request *ireq, 2393sci_io_request_tc_completion(struct isci_request *ireq,
2267 u32 completion_code) 2394 u32 completion_code)
2268{ 2395{
2269 enum sci_base_request_states state; 2396 enum sci_base_request_states state;
2270 struct isci_host *ihost = ireq->owning_controller; 2397 struct isci_host *ihost = ireq->owning_controller;
2271 2398
2272 state = ireq->sm.current_state_id; 2399 state = ireq->sm.current_state_id;
2273 2400
2401 /* Decode those completions that signal upcoming suspension events. */
2402 sci_request_handle_suspending_completions(
2403 ireq, SCU_GET_COMPLETION_TL_STATUS(completion_code));
2404
2274 switch (state) { 2405 switch (state) {
2275 case SCI_REQ_STARTED: 2406 case SCI_REQ_STARTED:
2276 return request_started_state_tc_event(ireq, completion_code); 2407 return request_started_state_tc_event(ireq, completion_code);
@@ -2362,9 +2493,6 @@ static void isci_request_process_response_iu(
2362 * @request: This parameter is the completed isci_request object. 2493 * @request: This parameter is the completed isci_request object.
2363 * @response_ptr: This parameter specifies the service response for the I/O. 2494 * @response_ptr: This parameter specifies the service response for the I/O.
2364 * @status_ptr: This parameter specifies the exec status for the I/O. 2495 * @status_ptr: This parameter specifies the exec status for the I/O.
2365 * @complete_to_host_ptr: This parameter specifies the action to be taken by
2366 * the LLDD with respect to completing this request or forcing an abort
2367 * condition on the I/O.
2368 * @open_rej_reason: This parameter specifies the encoded reason for the 2496 * @open_rej_reason: This parameter specifies the encoded reason for the
2369 * abandon-class reject. 2497 * abandon-class reject.
2370 * 2498 *
@@ -2375,14 +2503,12 @@ static void isci_request_set_open_reject_status(
2375 struct sas_task *task, 2503 struct sas_task *task,
2376 enum service_response *response_ptr, 2504 enum service_response *response_ptr,
2377 enum exec_status *status_ptr, 2505 enum exec_status *status_ptr,
2378 enum isci_completion_selection *complete_to_host_ptr,
2379 enum sas_open_rej_reason open_rej_reason) 2506 enum sas_open_rej_reason open_rej_reason)
2380{ 2507{
2381 /* Task in the target is done. */ 2508 /* Task in the target is done. */
2382 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2509 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2383 *response_ptr = SAS_TASK_UNDELIVERED; 2510 *response_ptr = SAS_TASK_UNDELIVERED;
2384 *status_ptr = SAS_OPEN_REJECT; 2511 *status_ptr = SAS_OPEN_REJECT;
2385 *complete_to_host_ptr = isci_perform_normal_io_completion;
2386 task->task_status.open_rej_reason = open_rej_reason; 2512 task->task_status.open_rej_reason = open_rej_reason;
2387} 2513}
2388 2514
@@ -2392,9 +2518,6 @@ static void isci_request_set_open_reject_status(
2392 * @request: This parameter is the completed isci_request object. 2518 * @request: This parameter is the completed isci_request object.
2393 * @response_ptr: This parameter specifies the service response for the I/O. 2519 * @response_ptr: This parameter specifies the service response for the I/O.
2394 * @status_ptr: This parameter specifies the exec status for the I/O. 2520 * @status_ptr: This parameter specifies the exec status for the I/O.
2395 * @complete_to_host_ptr: This parameter specifies the action to be taken by
2396 * the LLDD with respect to completing this request or forcing an abort
2397 * condition on the I/O.
2398 * 2521 *
2399 * none. 2522 * none.
2400 */ 2523 */
@@ -2403,8 +2526,7 @@ static void isci_request_handle_controller_specific_errors(
2403 struct isci_request *request, 2526 struct isci_request *request,
2404 struct sas_task *task, 2527 struct sas_task *task,
2405 enum service_response *response_ptr, 2528 enum service_response *response_ptr,
2406 enum exec_status *status_ptr, 2529 enum exec_status *status_ptr)
2407 enum isci_completion_selection *complete_to_host_ptr)
2408{ 2530{
2409 unsigned int cstatus; 2531 unsigned int cstatus;
2410 2532
@@ -2445,9 +2567,6 @@ static void isci_request_handle_controller_specific_errors(
2445 *status_ptr = SAS_ABORTED_TASK; 2567 *status_ptr = SAS_ABORTED_TASK;
2446 2568
2447 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2569 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2448
2449 *complete_to_host_ptr =
2450 isci_perform_normal_io_completion;
2451 } else { 2570 } else {
2452 /* Task in the target is not done. */ 2571 /* Task in the target is not done. */
2453 *response_ptr = SAS_TASK_UNDELIVERED; 2572 *response_ptr = SAS_TASK_UNDELIVERED;
@@ -2458,9 +2577,6 @@ static void isci_request_handle_controller_specific_errors(
2458 *status_ptr = SAM_STAT_TASK_ABORTED; 2577 *status_ptr = SAM_STAT_TASK_ABORTED;
2459 2578
2460 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2579 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2461
2462 *complete_to_host_ptr =
2463 isci_perform_error_io_completion;
2464 } 2580 }
2465 2581
2466 break; 2582 break;
@@ -2489,8 +2605,6 @@ static void isci_request_handle_controller_specific_errors(
2489 *status_ptr = SAS_ABORTED_TASK; 2605 *status_ptr = SAS_ABORTED_TASK;
2490 2606
2491 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2607 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2492
2493 *complete_to_host_ptr = isci_perform_normal_io_completion;
2494 break; 2608 break;
2495 2609
2496 2610
@@ -2501,7 +2615,7 @@ static void isci_request_handle_controller_specific_errors(
2501 2615
2502 isci_request_set_open_reject_status( 2616 isci_request_set_open_reject_status(
2503 request, task, response_ptr, status_ptr, 2617 request, task, response_ptr, status_ptr,
2504 complete_to_host_ptr, SAS_OREJ_WRONG_DEST); 2618 SAS_OREJ_WRONG_DEST);
2505 break; 2619 break;
2506 2620
2507 case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION: 2621 case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
@@ -2511,56 +2625,56 @@ static void isci_request_handle_controller_specific_errors(
2511 */ 2625 */
2512 isci_request_set_open_reject_status( 2626 isci_request_set_open_reject_status(
2513 request, task, response_ptr, status_ptr, 2627 request, task, response_ptr, status_ptr,
2514 complete_to_host_ptr, SAS_OREJ_RESV_AB0); 2628 SAS_OREJ_RESV_AB0);
2515 break; 2629 break;
2516 2630
2517 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1: 2631 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
2518 2632
2519 isci_request_set_open_reject_status( 2633 isci_request_set_open_reject_status(
2520 request, task, response_ptr, status_ptr, 2634 request, task, response_ptr, status_ptr,
2521 complete_to_host_ptr, SAS_OREJ_RESV_AB1); 2635 SAS_OREJ_RESV_AB1);
2522 break; 2636 break;
2523 2637
2524 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2: 2638 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
2525 2639
2526 isci_request_set_open_reject_status( 2640 isci_request_set_open_reject_status(
2527 request, task, response_ptr, status_ptr, 2641 request, task, response_ptr, status_ptr,
2528 complete_to_host_ptr, SAS_OREJ_RESV_AB2); 2642 SAS_OREJ_RESV_AB2);
2529 break; 2643 break;
2530 2644
2531 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3: 2645 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
2532 2646
2533 isci_request_set_open_reject_status( 2647 isci_request_set_open_reject_status(
2534 request, task, response_ptr, status_ptr, 2648 request, task, response_ptr, status_ptr,
2535 complete_to_host_ptr, SAS_OREJ_RESV_AB3); 2649 SAS_OREJ_RESV_AB3);
2536 break; 2650 break;
2537 2651
2538 case SCU_TASK_OPEN_REJECT_BAD_DESTINATION: 2652 case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
2539 2653
2540 isci_request_set_open_reject_status( 2654 isci_request_set_open_reject_status(
2541 request, task, response_ptr, status_ptr, 2655 request, task, response_ptr, status_ptr,
2542 complete_to_host_ptr, SAS_OREJ_BAD_DEST); 2656 SAS_OREJ_BAD_DEST);
2543 break; 2657 break;
2544 2658
2545 case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY: 2659 case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY:
2546 2660
2547 isci_request_set_open_reject_status( 2661 isci_request_set_open_reject_status(
2548 request, task, response_ptr, status_ptr, 2662 request, task, response_ptr, status_ptr,
2549 complete_to_host_ptr, SAS_OREJ_STP_NORES); 2663 SAS_OREJ_STP_NORES);
2550 break; 2664 break;
2551 2665
2552 case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED: 2666 case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED:
2553 2667
2554 isci_request_set_open_reject_status( 2668 isci_request_set_open_reject_status(
2555 request, task, response_ptr, status_ptr, 2669 request, task, response_ptr, status_ptr,
2556 complete_to_host_ptr, SAS_OREJ_EPROTO); 2670 SAS_OREJ_EPROTO);
2557 break; 2671 break;
2558 2672
2559 case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED: 2673 case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED:
2560 2674
2561 isci_request_set_open_reject_status( 2675 isci_request_set_open_reject_status(
2562 request, task, response_ptr, status_ptr, 2676 request, task, response_ptr, status_ptr,
2563 complete_to_host_ptr, SAS_OREJ_CONN_RATE); 2677 SAS_OREJ_CONN_RATE);
2564 break; 2678 break;
2565 2679
2566 case SCU_TASK_DONE_LL_R_ERR: 2680 case SCU_TASK_DONE_LL_R_ERR:
@@ -2592,95 +2706,12 @@ static void isci_request_handle_controller_specific_errors(
2592 *response_ptr = SAS_TASK_UNDELIVERED; 2706 *response_ptr = SAS_TASK_UNDELIVERED;
2593 *status_ptr = SAM_STAT_TASK_ABORTED; 2707 *status_ptr = SAM_STAT_TASK_ABORTED;
2594 2708
2595 if (task->task_proto == SAS_PROTOCOL_SMP) { 2709 if (task->task_proto == SAS_PROTOCOL_SMP)
2596 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2710 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2597 2711 else
2598 *complete_to_host_ptr = isci_perform_normal_io_completion;
2599 } else {
2600 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2712 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2601
2602 *complete_to_host_ptr = isci_perform_error_io_completion;
2603 }
2604 break;
2605 }
2606}
2607
2608/**
2609 * isci_task_save_for_upper_layer_completion() - This function saves the
2610 * request for later completion to the upper layer driver.
2611 * @host: This parameter is a pointer to the host on which the the request
2612 * should be queued (either as an error or success).
2613 * @request: This parameter is the completed request.
2614 * @response: This parameter is the response code for the completed task.
2615 * @status: This parameter is the status code for the completed task.
2616 *
2617 * none.
2618 */
2619static void isci_task_save_for_upper_layer_completion(
2620 struct isci_host *host,
2621 struct isci_request *request,
2622 enum service_response response,
2623 enum exec_status status,
2624 enum isci_completion_selection task_notification_selection)
2625{
2626 struct sas_task *task = isci_request_access_task(request);
2627
2628 task_notification_selection
2629 = isci_task_set_completion_status(task, response, status,
2630 task_notification_selection);
2631
2632 /* Tasks aborted specifically by a call to the lldd_abort_task
2633 * function should not be completed to the host in the regular path.
2634 */
2635 switch (task_notification_selection) {
2636
2637 case isci_perform_normal_io_completion:
2638 /* Normal notification (task_done) */
2639
2640 /* Add to the completed list. */
2641 list_add(&request->completed_node,
2642 &host->requests_to_complete);
2643
2644 /* Take the request off the device's pending request list. */
2645 list_del_init(&request->dev_node);
2646 break;
2647
2648 case isci_perform_aborted_io_completion:
2649 /* No notification to libsas because this request is
2650 * already in the abort path.
2651 */
2652 /* Wake up whatever process was waiting for this
2653 * request to complete.
2654 */
2655 WARN_ON(request->io_request_completion == NULL);
2656
2657 if (request->io_request_completion != NULL) {
2658
2659 /* Signal whoever is waiting that this
2660 * request is complete.
2661 */
2662 complete(request->io_request_completion);
2663 }
2664 break;
2665
2666 case isci_perform_error_io_completion:
2667 /* Use sas_task_abort */
2668 /* Add to the aborted list. */
2669 list_add(&request->completed_node,
2670 &host->requests_to_errorback);
2671 break;
2672
2673 default:
2674 /* Add to the error to libsas list. */
2675 list_add(&request->completed_node,
2676 &host->requests_to_errorback);
2677 break; 2713 break;
2678 } 2714 }
2679 dev_dbg(&host->pdev->dev,
2680 "%s: %d - task = %p, response=%d (%d), status=%d (%d)\n",
2681 __func__, task_notification_selection, task,
2682 (task) ? task->task_status.resp : 0, response,
2683 (task) ? task->task_status.stat : 0, status);
2684} 2715}
2685 2716
2686static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_fis *fis) 2717static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_fis *fis)
@@ -2715,295 +2746,164 @@ static void isci_request_io_request_complete(struct isci_host *ihost,
2715 struct isci_remote_device *idev = request->target_device; 2746 struct isci_remote_device *idev = request->target_device;
2716 enum service_response response = SAS_TASK_UNDELIVERED; 2747 enum service_response response = SAS_TASK_UNDELIVERED;
2717 enum exec_status status = SAS_ABORTED_TASK; 2748 enum exec_status status = SAS_ABORTED_TASK;
2718 enum isci_request_status request_status;
2719 enum isci_completion_selection complete_to_host
2720 = isci_perform_normal_io_completion;
2721 2749
2722 dev_dbg(&ihost->pdev->dev, 2750 dev_dbg(&ihost->pdev->dev,
2723 "%s: request = %p, task = %p,\n" 2751 "%s: request = %p, task = %p, "
2724 "task->data_dir = %d completion_status = 0x%x\n", 2752 "task->data_dir = %d completion_status = 0x%x\n",
2725 __func__, 2753 __func__, request, task, task->data_dir, completion_status);
2726 request,
2727 task,
2728 task->data_dir,
2729 completion_status);
2730 2754
2731 spin_lock(&request->state_lock); 2755 /* The request is done from an SCU HW perspective. */
2732 request_status = request->status;
2733 2756
2734 /* Decode the request status. Note that if the request has been 2757 /* This is an active request being completed from the core. */
2735 * aborted by a task management function, we don't care 2758 switch (completion_status) {
2736 * what the status is.
2737 */
2738 switch (request_status) {
2739
2740 case aborted:
2741 /* "aborted" indicates that the request was aborted by a task
2742 * management function, since once a task management request is
2743 * perfomed by the device, the request only completes because
2744 * of the subsequent driver terminate.
2745 *
2746 * Aborted also means an external thread is explicitly managing
2747 * this request, so that we do not complete it up the stack.
2748 *
2749 * The target is still there (since the TMF was successful).
2750 */
2751 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2752 response = SAS_TASK_COMPLETE;
2753 2759
2754 /* See if the device has been/is being stopped. Note 2760 case SCI_IO_FAILURE_RESPONSE_VALID:
2755 * that we ignore the quiesce state, since we are 2761 dev_dbg(&ihost->pdev->dev,
2756 * concerned about the actual device state. 2762 "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n",
2757 */ 2763 __func__, request, task);
2758 if (!idev) 2764
2759 status = SAS_DEVICE_UNKNOWN; 2765 if (sas_protocol_ata(task->task_proto)) {
2760 else 2766 isci_process_stp_response(task, &request->stp.rsp);
2761 status = SAS_ABORTED_TASK; 2767 } else if (SAS_PROTOCOL_SSP == task->task_proto) {
2768
2769 /* crack the iu response buffer. */
2770 resp_iu = &request->ssp.rsp;
2771 isci_request_process_response_iu(task, resp_iu,
2772 &ihost->pdev->dev);
2773
2774 } else if (SAS_PROTOCOL_SMP == task->task_proto) {
2775
2776 dev_err(&ihost->pdev->dev,
2777 "%s: SCI_IO_FAILURE_RESPONSE_VALID: "
2778 "SAS_PROTOCOL_SMP protocol\n",
2779 __func__);
2762 2780
2763 complete_to_host = isci_perform_aborted_io_completion; 2781 } else
2764 /* This was an aborted request. */ 2782 dev_err(&ihost->pdev->dev,
2783 "%s: unknown protocol\n", __func__);
2765 2784
2766 spin_unlock(&request->state_lock); 2785 /* use the task status set in the task struct by the
2786 * isci_request_process_response_iu call.
2787 */
2788 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2789 response = task->task_status.resp;
2790 status = task->task_status.stat;
2767 break; 2791 break;
2768 2792
2769 case aborting: 2793 case SCI_IO_SUCCESS:
2770 /* aborting means that the task management function tried and 2794 case SCI_IO_SUCCESS_IO_DONE_EARLY:
2771 * failed to abort the request. We need to note the request 2795
2772 * as SAS_TASK_UNDELIVERED, so that the scsi mid layer marks the 2796 response = SAS_TASK_COMPLETE;
2773 * target as down. 2797 status = SAM_STAT_GOOD;
2774 *
2775 * Aborting also means an external thread is explicitly managing
2776 * this request, so that we do not complete it up the stack.
2777 */
2778 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2798 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2779 response = SAS_TASK_UNDELIVERED;
2780 2799
2781 if (!idev) 2800 if (completion_status == SCI_IO_SUCCESS_IO_DONE_EARLY) {
2782 /* The device has been /is being stopped. Note that
2783 * we ignore the quiesce state, since we are
2784 * concerned about the actual device state.
2785 */
2786 status = SAS_DEVICE_UNKNOWN;
2787 else
2788 status = SAS_PHY_DOWN;
2789 2801
2790 complete_to_host = isci_perform_aborted_io_completion; 2802 /* This was an SSP / STP / SATA transfer.
2803 * There is a possibility that less data than
2804 * the maximum was transferred.
2805 */
2806 u32 transferred_length = sci_req_tx_bytes(request);
2791 2807
2792 /* This was an aborted request. */ 2808 task->task_status.residual
2809 = task->total_xfer_len - transferred_length;
2810
2811 /* If there were residual bytes, call this an
2812 * underrun.
2813 */
2814 if (task->task_status.residual != 0)
2815 status = SAS_DATA_UNDERRUN;
2793 2816
2794 spin_unlock(&request->state_lock); 2817 dev_dbg(&ihost->pdev->dev,
2818 "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n",
2819 __func__, status);
2820
2821 } else
2822 dev_dbg(&ihost->pdev->dev, "%s: SCI_IO_SUCCESS\n",
2823 __func__);
2795 break; 2824 break;
2796 2825
2797 case terminating: 2826 case SCI_IO_FAILURE_TERMINATED:
2798 2827
2799 /* This was an terminated request. This happens when 2828 dev_dbg(&ihost->pdev->dev,
2800 * the I/O is being terminated because of an action on 2829 "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n",
2801 * the device (reset, tear down, etc.), and the I/O needs 2830 __func__, request, task);
2802 * to be completed up the stack. 2831
2803 */ 2832 /* The request was terminated explicitly. */
2804 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2833 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2805 response = SAS_TASK_UNDELIVERED; 2834 response = SAS_TASK_UNDELIVERED;
2806 2835
2807 /* See if the device has been/is being stopped. Note 2836 /* See if the device has been/is being stopped. Note
2808 * that we ignore the quiesce state, since we are 2837 * that we ignore the quiesce state, since we are
2809 * concerned about the actual device state. 2838 * concerned about the actual device state.
2810 */ 2839 */
2811 if (!idev) 2840 if (!idev)
2812 status = SAS_DEVICE_UNKNOWN; 2841 status = SAS_DEVICE_UNKNOWN;
2813 else 2842 else
2814 status = SAS_ABORTED_TASK; 2843 status = SAS_ABORTED_TASK;
2815
2816 complete_to_host = isci_perform_aborted_io_completion;
2817
2818 /* This was a terminated request. */
2819
2820 spin_unlock(&request->state_lock);
2821 break; 2844 break;
2822 2845
2823 case dead: 2846 case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR:
2824 /* This was a terminated request that timed-out during the
2825 * termination process. There is no task to complete to
2826 * libsas.
2827 */
2828 complete_to_host = isci_perform_normal_io_completion;
2829 spin_unlock(&request->state_lock);
2830 break;
2831 2847
2832 default: 2848 isci_request_handle_controller_specific_errors(idev, request,
2833 2849 task, &response,
2834 /* The request is done from an SCU HW perspective. */ 2850 &status);
2835 request->status = completed; 2851 break;
2836
2837 spin_unlock(&request->state_lock);
2838
2839 /* This is an active request being completed from the core. */
2840 switch (completion_status) {
2841
2842 case SCI_IO_FAILURE_RESPONSE_VALID:
2843 dev_dbg(&ihost->pdev->dev,
2844 "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n",
2845 __func__,
2846 request,
2847 task);
2848
2849 if (sas_protocol_ata(task->task_proto)) {
2850 isci_process_stp_response(task, &request->stp.rsp);
2851 } else if (SAS_PROTOCOL_SSP == task->task_proto) {
2852
2853 /* crack the iu response buffer. */
2854 resp_iu = &request->ssp.rsp;
2855 isci_request_process_response_iu(task, resp_iu,
2856 &ihost->pdev->dev);
2857
2858 } else if (SAS_PROTOCOL_SMP == task->task_proto) {
2859
2860 dev_err(&ihost->pdev->dev,
2861 "%s: SCI_IO_FAILURE_RESPONSE_VALID: "
2862 "SAS_PROTOCOL_SMP protocol\n",
2863 __func__);
2864
2865 } else
2866 dev_err(&ihost->pdev->dev,
2867 "%s: unknown protocol\n", __func__);
2868
2869 /* use the task status set in the task struct by the
2870 * isci_request_process_response_iu call.
2871 */
2872 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2873 response = task->task_status.resp;
2874 status = task->task_status.stat;
2875 break;
2876 2852
2877 case SCI_IO_SUCCESS: 2853 case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED:
2878 case SCI_IO_SUCCESS_IO_DONE_EARLY: 2854 /* This is a special case, in that the I/O completion
2855 * is telling us that the device needs a reset.
2856 * In order for the device reset condition to be
2857 * noticed, the I/O has to be handled in the error
2858 * handler. Set the reset flag and cause the
2859 * SCSI error thread to be scheduled.
2860 */
2861 spin_lock_irqsave(&task->task_state_lock, task_flags);
2862 task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
2863 spin_unlock_irqrestore(&task->task_state_lock, task_flags);
2879 2864
2880 response = SAS_TASK_COMPLETE; 2865 /* Fail the I/O. */
2881 status = SAM_STAT_GOOD; 2866 response = SAS_TASK_UNDELIVERED;
2882 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2867 status = SAM_STAT_TASK_ABORTED;
2883 2868
2884 if (completion_status == SCI_IO_SUCCESS_IO_DONE_EARLY) { 2869 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2870 break;
2885 2871
2886 /* This was an SSP / STP / SATA transfer. 2872 case SCI_FAILURE_RETRY_REQUIRED:
2887 * There is a possibility that less data than
2888 * the maximum was transferred.
2889 */
2890 u32 transferred_length = sci_req_tx_bytes(request);
2891 2873
2892 task->task_status.residual 2874 /* Fail the I/O so it can be retried. */
2893 = task->total_xfer_len - transferred_length; 2875 response = SAS_TASK_UNDELIVERED;
2876 if (!idev)
2877 status = SAS_DEVICE_UNKNOWN;
2878 else
2879 status = SAS_ABORTED_TASK;
2894 2880
2895 /* If there were residual bytes, call this an 2881 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2896 * underrun. 2882 break;
2897 */
2898 if (task->task_status.residual != 0)
2899 status = SAS_DATA_UNDERRUN;
2900 2883
2901 dev_dbg(&ihost->pdev->dev,
2902 "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n",
2903 __func__,
2904 status);
2905 2884
2906 } else 2885 default:
2907 dev_dbg(&ihost->pdev->dev, 2886 /* Catch any otherwise unhandled error codes here. */
2908 "%s: SCI_IO_SUCCESS\n", 2887 dev_dbg(&ihost->pdev->dev,
2909 __func__); 2888 "%s: invalid completion code: 0x%x - "
2889 "isci_request = %p\n",
2890 __func__, completion_status, request);
2910 2891
2911 break; 2892 response = SAS_TASK_UNDELIVERED;
2912 2893
2913 case SCI_IO_FAILURE_TERMINATED: 2894 /* See if the device has been/is being stopped. Note
2914 dev_dbg(&ihost->pdev->dev, 2895 * that we ignore the quiesce state, since we are
2915 "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n", 2896 * concerned about the actual device state.
2916 __func__, 2897 */
2917 request, 2898 if (!idev)
2918 task); 2899 status = SAS_DEVICE_UNKNOWN;
2900 else
2901 status = SAS_ABORTED_TASK;
2919 2902
2920 /* The request was terminated explicitly. No handling 2903 if (SAS_PROTOCOL_SMP == task->task_proto)
2921 * is needed in the SCSI error handler path.
2922 */
2923 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2904 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2924 response = SAS_TASK_UNDELIVERED; 2905 else
2925
2926 /* See if the device has been/is being stopped. Note
2927 * that we ignore the quiesce state, since we are
2928 * concerned about the actual device state.
2929 */
2930 if (!idev)
2931 status = SAS_DEVICE_UNKNOWN;
2932 else
2933 status = SAS_ABORTED_TASK;
2934
2935 complete_to_host = isci_perform_normal_io_completion;
2936 break;
2937
2938 case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR:
2939
2940 isci_request_handle_controller_specific_errors(
2941 idev, request, task, &response, &status,
2942 &complete_to_host);
2943
2944 break;
2945
2946 case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED:
2947 /* This is a special case, in that the I/O completion
2948 * is telling us that the device needs a reset.
2949 * In order for the device reset condition to be
2950 * noticed, the I/O has to be handled in the error
2951 * handler. Set the reset flag and cause the
2952 * SCSI error thread to be scheduled.
2953 */
2954 spin_lock_irqsave(&task->task_state_lock, task_flags);
2955 task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
2956 spin_unlock_irqrestore(&task->task_state_lock, task_flags);
2957
2958 /* Fail the I/O. */
2959 response = SAS_TASK_UNDELIVERED;
2960 status = SAM_STAT_TASK_ABORTED;
2961
2962 complete_to_host = isci_perform_error_io_completion;
2963 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2906 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2964 break;
2965
2966 case SCI_FAILURE_RETRY_REQUIRED:
2967
2968 /* Fail the I/O so it can be retried. */
2969 response = SAS_TASK_UNDELIVERED;
2970 if (!idev)
2971 status = SAS_DEVICE_UNKNOWN;
2972 else
2973 status = SAS_ABORTED_TASK;
2974
2975 complete_to_host = isci_perform_normal_io_completion;
2976 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2977 break;
2978
2979
2980 default:
2981 /* Catch any otherwise unhandled error codes here. */
2982 dev_dbg(&ihost->pdev->dev,
2983 "%s: invalid completion code: 0x%x - "
2984 "isci_request = %p\n",
2985 __func__, completion_status, request);
2986
2987 response = SAS_TASK_UNDELIVERED;
2988
2989 /* See if the device has been/is being stopped. Note
2990 * that we ignore the quiesce state, since we are
2991 * concerned about the actual device state.
2992 */
2993 if (!idev)
2994 status = SAS_DEVICE_UNKNOWN;
2995 else
2996 status = SAS_ABORTED_TASK;
2997
2998 if (SAS_PROTOCOL_SMP == task->task_proto) {
2999 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
3000 complete_to_host = isci_perform_normal_io_completion;
3001 } else {
3002 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
3003 complete_to_host = isci_perform_error_io_completion;
3004 }
3005 break;
3006 }
3007 break; 2907 break;
3008 } 2908 }
3009 2909
@@ -3038,10 +2938,18 @@ static void isci_request_io_request_complete(struct isci_host *ihost,
3038 break; 2938 break;
3039 } 2939 }
3040 2940
3041 /* Put the completed request on the correct list */ 2941 spin_lock_irqsave(&task->task_state_lock, task_flags);
3042 isci_task_save_for_upper_layer_completion(ihost, request, response, 2942
3043 status, complete_to_host 2943 task->task_status.resp = response;
3044 ); 2944 task->task_status.stat = status;
2945
2946 if (test_bit(IREQ_COMPLETE_IN_TARGET, &request->flags)) {
2947 /* Normal notification (task_done) */
2948 task->task_state_flags |= SAS_TASK_STATE_DONE;
2949 task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
2950 SAS_TASK_STATE_PENDING);
2951 }
2952 spin_unlock_irqrestore(&task->task_state_lock, task_flags);
3045 2953
3046 /* complete the io request to the core. */ 2954 /* complete the io request to the core. */
3047 sci_controller_complete_io(ihost, request->target_device, request); 2955 sci_controller_complete_io(ihost, request->target_device, request);
@@ -3051,6 +2959,8 @@ static void isci_request_io_request_complete(struct isci_host *ihost,
3051 * task to recognize the already completed case. 2959 * task to recognize the already completed case.
3052 */ 2960 */
3053 set_bit(IREQ_TERMINATED, &request->flags); 2961 set_bit(IREQ_TERMINATED, &request->flags);
2962
2963 ireq_done(ihost, request, task);
3054} 2964}
3055 2965
3056static void sci_request_started_state_enter(struct sci_base_state_machine *sm) 2966static void sci_request_started_state_enter(struct sci_base_state_machine *sm)
@@ -3169,7 +3079,7 @@ sci_general_request_construct(struct isci_host *ihost,
3169 sci_init_sm(&ireq->sm, sci_request_state_table, SCI_REQ_INIT); 3079 sci_init_sm(&ireq->sm, sci_request_state_table, SCI_REQ_INIT);
3170 3080
3171 ireq->target_device = idev; 3081 ireq->target_device = idev;
3172 ireq->protocol = SCIC_NO_PROTOCOL; 3082 ireq->protocol = SAS_PROTOCOL_NONE;
3173 ireq->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX; 3083 ireq->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX;
3174 3084
3175 ireq->sci_status = SCI_SUCCESS; 3085 ireq->sci_status = SCI_SUCCESS;
@@ -3193,7 +3103,7 @@ sci_io_request_construct(struct isci_host *ihost,
3193 3103
3194 if (dev->dev_type == SAS_END_DEV) 3104 if (dev->dev_type == SAS_END_DEV)
3195 /* pass */; 3105 /* pass */;
3196 else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) 3106 else if (dev_is_sata(dev))
3197 memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd)); 3107 memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd));
3198 else if (dev_is_expander(dev)) 3108 else if (dev_is_expander(dev))
3199 /* pass */; 3109 /* pass */;
@@ -3215,10 +3125,15 @@ enum sci_status sci_task_request_construct(struct isci_host *ihost,
3215 /* Build the common part of the request */ 3125 /* Build the common part of the request */
3216 sci_general_request_construct(ihost, idev, ireq); 3126 sci_general_request_construct(ihost, idev, ireq);
3217 3127
3218 if (dev->dev_type == SAS_END_DEV || 3128 if (dev->dev_type == SAS_END_DEV || dev_is_sata(dev)) {
3219 dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
3220 set_bit(IREQ_TMF, &ireq->flags); 3129 set_bit(IREQ_TMF, &ireq->flags);
3221 memset(ireq->tc, 0, sizeof(struct scu_task_context)); 3130 memset(ireq->tc, 0, sizeof(struct scu_task_context));
3131
3132 /* Set the protocol indicator. */
3133 if (dev_is_sata(dev))
3134 ireq->protocol = SAS_PROTOCOL_STP;
3135 else
3136 ireq->protocol = SAS_PROTOCOL_SSP;
3222 } else 3137 } else
3223 status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; 3138 status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
3224 3139
@@ -3311,7 +3226,7 @@ sci_io_request_construct_smp(struct device *dev,
3311 if (!dma_map_sg(dev, sg, 1, DMA_TO_DEVICE)) 3226 if (!dma_map_sg(dev, sg, 1, DMA_TO_DEVICE))
3312 return SCI_FAILURE; 3227 return SCI_FAILURE;
3313 3228
3314 ireq->protocol = SCIC_SMP_PROTOCOL; 3229 ireq->protocol = SAS_PROTOCOL_SMP;
3315 3230
3316 /* byte swap the smp request. */ 3231 /* byte swap the smp request. */
3317 3232
@@ -3496,9 +3411,6 @@ static struct isci_request *isci_request_from_tag(struct isci_host *ihost, u16 t
3496 ireq->io_request_completion = NULL; 3411 ireq->io_request_completion = NULL;
3497 ireq->flags = 0; 3412 ireq->flags = 0;
3498 ireq->num_sg_entries = 0; 3413 ireq->num_sg_entries = 0;
3499 INIT_LIST_HEAD(&ireq->completed_node);
3500 INIT_LIST_HEAD(&ireq->dev_node);
3501 isci_request_change_state(ireq, allocated);
3502 3414
3503 return ireq; 3415 return ireq;
3504} 3416}
@@ -3582,26 +3494,15 @@ int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *ide
3582 spin_unlock_irqrestore(&ihost->scic_lock, flags); 3494 spin_unlock_irqrestore(&ihost->scic_lock, flags);
3583 return status; 3495 return status;
3584 } 3496 }
3585
3586 /* Either I/O started OK, or the core has signaled that 3497 /* Either I/O started OK, or the core has signaled that
3587 * the device needs a target reset. 3498 * the device needs a target reset.
3588 *
3589 * In either case, hold onto the I/O for later.
3590 *
3591 * Update it's status and add it to the list in the
3592 * remote device object.
3593 */ 3499 */
3594 list_add(&ireq->dev_node, &idev->reqs_in_process); 3500 if (status != SCI_SUCCESS) {
3595
3596 if (status == SCI_SUCCESS) {
3597 isci_request_change_state(ireq, started);
3598 } else {
3599 /* The request did not really start in the 3501 /* The request did not really start in the
3600 * hardware, so clear the request handle 3502 * hardware, so clear the request handle
3601 * here so no terminations will be done. 3503 * here so no terminations will be done.
3602 */ 3504 */
3603 set_bit(IREQ_TERMINATED, &ireq->flags); 3505 set_bit(IREQ_TERMINATED, &ireq->flags);
3604 isci_request_change_state(ireq, completed);
3605 } 3506 }
3606 spin_unlock_irqrestore(&ihost->scic_lock, flags); 3507 spin_unlock_irqrestore(&ihost->scic_lock, flags);
3607 3508
diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h
index 057f2378452d..aff95317fcf4 100644
--- a/drivers/scsi/isci/request.h
+++ b/drivers/scsi/isci/request.h
@@ -61,30 +61,6 @@
61#include "scu_task_context.h" 61#include "scu_task_context.h"
62 62
63/** 63/**
64 * struct isci_request_status - This enum defines the possible states of an I/O
65 * request.
66 *
67 *
68 */
69enum isci_request_status {
70 unallocated = 0x00,
71 allocated = 0x01,
72 started = 0x02,
73 completed = 0x03,
74 aborting = 0x04,
75 aborted = 0x05,
76 terminating = 0x06,
77 dead = 0x07
78};
79
80enum sci_request_protocol {
81 SCIC_NO_PROTOCOL,
82 SCIC_SMP_PROTOCOL,
83 SCIC_SSP_PROTOCOL,
84 SCIC_STP_PROTOCOL
85}; /* XXX remove me, use sas_task.{dev|task_proto} instead */;
86
87/**
88 * isci_stp_request - extra request infrastructure to handle pio/atapi protocol 64 * isci_stp_request - extra request infrastructure to handle pio/atapi protocol
89 * @pio_len - number of bytes requested at PIO setup 65 * @pio_len - number of bytes requested at PIO setup
90 * @status - pio setup ending status value to tell us if we need 66 * @status - pio setup ending status value to tell us if we need
@@ -104,11 +80,14 @@ struct isci_stp_request {
104}; 80};
105 81
106struct isci_request { 82struct isci_request {
107 enum isci_request_status status;
108 #define IREQ_COMPLETE_IN_TARGET 0 83 #define IREQ_COMPLETE_IN_TARGET 0
109 #define IREQ_TERMINATED 1 84 #define IREQ_TERMINATED 1
110 #define IREQ_TMF 2 85 #define IREQ_TMF 2
111 #define IREQ_ACTIVE 3 86 #define IREQ_ACTIVE 3
87 #define IREQ_PENDING_ABORT 4 /* Set == device was not suspended yet */
88 #define IREQ_TC_ABORT_POSTED 5
89 #define IREQ_ABORT_PATH_ACTIVE 6
90 #define IREQ_NO_AUTO_FREE_TAG 7 /* Set when being explicitly managed */
112 unsigned long flags; 91 unsigned long flags;
113 /* XXX kill ttype and ttype_ptr, allocate full sas_task */ 92 /* XXX kill ttype and ttype_ptr, allocate full sas_task */
114 union ttype_ptr_union { 93 union ttype_ptr_union {
@@ -116,11 +95,6 @@ struct isci_request {
116 struct isci_tmf *tmf_task_ptr; /* When ttype==tmf_task */ 95 struct isci_tmf *tmf_task_ptr; /* When ttype==tmf_task */
117 } ttype_ptr; 96 } ttype_ptr;
118 struct isci_host *isci_host; 97 struct isci_host *isci_host;
119 /* For use in the requests_to_{complete|abort} lists: */
120 struct list_head completed_node;
121 /* For use in the reqs_in_process list: */
122 struct list_head dev_node;
123 spinlock_t state_lock;
124 dma_addr_t request_daddr; 98 dma_addr_t request_daddr;
125 dma_addr_t zero_scatter_daddr; 99 dma_addr_t zero_scatter_daddr;
126 unsigned int num_sg_entries; 100 unsigned int num_sg_entries;
@@ -140,7 +114,7 @@ struct isci_request {
140 struct isci_host *owning_controller; 114 struct isci_host *owning_controller;
141 struct isci_remote_device *target_device; 115 struct isci_remote_device *target_device;
142 u16 io_tag; 116 u16 io_tag;
143 enum sci_request_protocol protocol; 117 enum sas_protocol protocol;
144 u32 scu_status; /* hardware result */ 118 u32 scu_status; /* hardware result */
145 u32 sci_status; /* upper layer disposition */ 119 u32 sci_status; /* upper layer disposition */
146 u32 post_context; 120 u32 post_context;
@@ -309,92 +283,6 @@ sci_io_request_get_dma_addr(struct isci_request *ireq, void *virt_addr)
309 return ireq->request_daddr + (requested_addr - base_addr); 283 return ireq->request_daddr + (requested_addr - base_addr);
310} 284}
311 285
312/**
313 * isci_request_change_state() - This function sets the status of the request
314 * object.
315 * @request: This parameter points to the isci_request object
316 * @status: This Parameter is the new status of the object
317 *
318 */
319static inline enum isci_request_status
320isci_request_change_state(struct isci_request *isci_request,
321 enum isci_request_status status)
322{
323 enum isci_request_status old_state;
324 unsigned long flags;
325
326 dev_dbg(&isci_request->isci_host->pdev->dev,
327 "%s: isci_request = %p, state = 0x%x\n",
328 __func__,
329 isci_request,
330 status);
331
332 BUG_ON(isci_request == NULL);
333
334 spin_lock_irqsave(&isci_request->state_lock, flags);
335 old_state = isci_request->status;
336 isci_request->status = status;
337 spin_unlock_irqrestore(&isci_request->state_lock, flags);
338
339 return old_state;
340}
341
342/**
343 * isci_request_change_started_to_newstate() - This function sets the status of
344 * the request object.
345 * @request: This parameter points to the isci_request object
346 * @status: This Parameter is the new status of the object
347 *
348 * state previous to any change.
349 */
350static inline enum isci_request_status
351isci_request_change_started_to_newstate(struct isci_request *isci_request,
352 struct completion *completion_ptr,
353 enum isci_request_status newstate)
354{
355 enum isci_request_status old_state;
356 unsigned long flags;
357
358 spin_lock_irqsave(&isci_request->state_lock, flags);
359
360 old_state = isci_request->status;
361
362 if (old_state == started || old_state == aborting) {
363 BUG_ON(isci_request->io_request_completion != NULL);
364
365 isci_request->io_request_completion = completion_ptr;
366 isci_request->status = newstate;
367 }
368
369 spin_unlock_irqrestore(&isci_request->state_lock, flags);
370
371 dev_dbg(&isci_request->isci_host->pdev->dev,
372 "%s: isci_request = %p, old_state = 0x%x\n",
373 __func__,
374 isci_request,
375 old_state);
376
377 return old_state;
378}
379
380/**
381 * isci_request_change_started_to_aborted() - This function sets the status of
382 * the request object.
383 * @request: This parameter points to the isci_request object
384 * @completion_ptr: This parameter is saved as the kernel completion structure
385 * signalled when the old request completes.
386 *
387 * state previous to any change.
388 */
389static inline enum isci_request_status
390isci_request_change_started_to_aborted(struct isci_request *isci_request,
391 struct completion *completion_ptr)
392{
393 return isci_request_change_started_to_newstate(isci_request,
394 completion_ptr,
395 aborted);
396}
397
398#define isci_request_access_task(req) ((req)->ttype_ptr.io_task_ptr) 286#define isci_request_access_task(req) ((req)->ttype_ptr.io_task_ptr)
399 287
400#define isci_request_access_tmf(req) ((req)->ttype_ptr.tmf_task_ptr) 288#define isci_request_access_tmf(req) ((req)->ttype_ptr.tmf_task_ptr)
@@ -404,8 +292,6 @@ struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost,
404 u16 tag); 292 u16 tag);
405int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev, 293int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev,
406 struct sas_task *task, u16 tag); 294 struct sas_task *task, u16 tag);
407void isci_terminate_pending_requests(struct isci_host *ihost,
408 struct isci_remote_device *idev);
409enum sci_status 295enum sci_status
410sci_task_request_construct(struct isci_host *ihost, 296sci_task_request_construct(struct isci_host *ihost,
411 struct isci_remote_device *idev, 297 struct isci_remote_device *idev,
@@ -421,5 +307,4 @@ static inline int isci_task_is_ncq_recovery(struct sas_task *task)
421 task->ata_task.fis.lbal == ATA_LOG_SATA_NCQ); 307 task->ata_task.fis.lbal == ATA_LOG_SATA_NCQ);
422 308
423} 309}
424
425#endif /* !defined(_ISCI_REQUEST_H_) */ 310#endif /* !defined(_ISCI_REQUEST_H_) */
diff --git a/drivers/scsi/isci/scu_completion_codes.h b/drivers/scsi/isci/scu_completion_codes.h
index c8b329c695f9..071cb74a211c 100644
--- a/drivers/scsi/isci/scu_completion_codes.h
+++ b/drivers/scsi/isci/scu_completion_codes.h
@@ -224,6 +224,7 @@
224 * 32-bit value like we want, each immediate value must be cast to a u32. 224 * 32-bit value like we want, each immediate value must be cast to a u32.
225 */ 225 */
226#define SCU_TASK_DONE_GOOD ((u32)0x00) 226#define SCU_TASK_DONE_GOOD ((u32)0x00)
227#define SCU_TASK_DONE_TX_RAW_CMD_ERR ((u32)0x08)
227#define SCU_TASK_DONE_CRC_ERR ((u32)0x14) 228#define SCU_TASK_DONE_CRC_ERR ((u32)0x14)
228#define SCU_TASK_DONE_CHECK_RESPONSE ((u32)0x14) 229#define SCU_TASK_DONE_CHECK_RESPONSE ((u32)0x14)
229#define SCU_TASK_DONE_GEN_RESPONSE ((u32)0x15) 230#define SCU_TASK_DONE_GEN_RESPONSE ((u32)0x15)
@@ -237,6 +238,7 @@
237#define SCU_TASK_DONE_LL_LF_TERM ((u32)0x1A) 238#define SCU_TASK_DONE_LL_LF_TERM ((u32)0x1A)
238#define SCU_TASK_DONE_DATA_LEN_ERR ((u32)0x1A) 239#define SCU_TASK_DONE_DATA_LEN_ERR ((u32)0x1A)
239#define SCU_TASK_DONE_LL_CL_TERM ((u32)0x1B) 240#define SCU_TASK_DONE_LL_CL_TERM ((u32)0x1B)
241#define SCU_TASK_DONE_BREAK_RCVD ((u32)0x1B)
240#define SCU_TASK_DONE_LL_ABORT_ERR ((u32)0x1B) 242#define SCU_TASK_DONE_LL_ABORT_ERR ((u32)0x1B)
241#define SCU_TASK_DONE_SEQ_INV_TYPE ((u32)0x1C) 243#define SCU_TASK_DONE_SEQ_INV_TYPE ((u32)0x1C)
242#define SCU_TASK_DONE_UNEXP_XR ((u32)0x1C) 244#define SCU_TASK_DONE_UNEXP_XR ((u32)0x1C)
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
index 374254ede9d4..6bc74eb012c9 100644
--- a/drivers/scsi/isci/task.c
+++ b/drivers/scsi/isci/task.c
@@ -78,54 +78,25 @@ static void isci_task_refuse(struct isci_host *ihost, struct sas_task *task,
78 enum exec_status status) 78 enum exec_status status)
79 79
80{ 80{
81 enum isci_completion_selection disposition; 81 unsigned long flags;
82 82
83 disposition = isci_perform_normal_io_completion; 83 /* Normal notification (task_done) */
84 disposition = isci_task_set_completion_status(task, response, status, 84 dev_dbg(&ihost->pdev->dev, "%s: task = %p, response=%d, status=%d\n",
85 disposition); 85 __func__, task, response, status);
86 86
87 /* Tasks aborted specifically by a call to the lldd_abort_task 87 spin_lock_irqsave(&task->task_state_lock, flags);
88 * function should not be completed to the host in the regular path.
89 */
90 switch (disposition) {
91 case isci_perform_normal_io_completion:
92 /* Normal notification (task_done) */
93 dev_dbg(&ihost->pdev->dev,
94 "%s: Normal - task = %p, response=%d, "
95 "status=%d\n",
96 __func__, task, response, status);
97
98 task->lldd_task = NULL;
99 task->task_done(task);
100 break;
101
102 case isci_perform_aborted_io_completion:
103 /*
104 * No notification because this request is already in the
105 * abort path.
106 */
107 dev_dbg(&ihost->pdev->dev,
108 "%s: Aborted - task = %p, response=%d, "
109 "status=%d\n",
110 __func__, task, response, status);
111 break;
112 88
113 case isci_perform_error_io_completion: 89 task->task_status.resp = response;
114 /* Use sas_task_abort */ 90 task->task_status.stat = status;
115 dev_dbg(&ihost->pdev->dev,
116 "%s: Error - task = %p, response=%d, "
117 "status=%d\n",
118 __func__, task, response, status);
119 sas_task_abort(task);
120 break;
121 91
122 default: 92 /* Normal notification (task_done) */
123 dev_dbg(&ihost->pdev->dev, 93 task->task_state_flags |= SAS_TASK_STATE_DONE;
124 "%s: isci task notification default case!", 94 task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
125 __func__); 95 SAS_TASK_STATE_PENDING);
126 sas_task_abort(task); 96 task->lldd_task = NULL;
127 break; 97 spin_unlock_irqrestore(&task->task_state_lock, flags);
128 } 98
99 task->task_done(task);
129} 100}
130 101
131#define for_each_sas_task(num, task) \ 102#define for_each_sas_task(num, task) \
@@ -289,60 +260,6 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost,
289 return ireq; 260 return ireq;
290} 261}
291 262
292/**
293* isci_request_mark_zombie() - This function must be called with scic_lock held.
294*/
295static void isci_request_mark_zombie(struct isci_host *ihost, struct isci_request *ireq)
296{
297 struct completion *tmf_completion = NULL;
298 struct completion *req_completion;
299
300 /* Set the request state to "dead". */
301 ireq->status = dead;
302
303 req_completion = ireq->io_request_completion;
304 ireq->io_request_completion = NULL;
305
306 if (test_bit(IREQ_TMF, &ireq->flags)) {
307 /* Break links with the TMF request. */
308 struct isci_tmf *tmf = isci_request_access_tmf(ireq);
309
310 /* In the case where a task request is dying,
311 * the thread waiting on the complete will sit and
312 * timeout unless we wake it now. Since the TMF
313 * has a default error status, complete it here
314 * to wake the waiting thread.
315 */
316 if (tmf) {
317 tmf_completion = tmf->complete;
318 tmf->complete = NULL;
319 }
320 ireq->ttype_ptr.tmf_task_ptr = NULL;
321 dev_dbg(&ihost->pdev->dev, "%s: tmf_code %d, managed tag %#x\n",
322 __func__, tmf->tmf_code, tmf->io_tag);
323 } else {
324 /* Break links with the sas_task - the callback is done
325 * elsewhere.
326 */
327 struct sas_task *task = isci_request_access_task(ireq);
328
329 if (task)
330 task->lldd_task = NULL;
331
332 ireq->ttype_ptr.io_task_ptr = NULL;
333 }
334
335 dev_warn(&ihost->pdev->dev, "task context unrecoverable (tag: %#x)\n",
336 ireq->io_tag);
337
338 /* Don't force waiting threads to timeout. */
339 if (req_completion)
340 complete(req_completion);
341
342 if (tmf_completion != NULL)
343 complete(tmf_completion);
344}
345
346static int isci_task_execute_tmf(struct isci_host *ihost, 263static int isci_task_execute_tmf(struct isci_host *ihost,
347 struct isci_remote_device *idev, 264 struct isci_remote_device *idev,
348 struct isci_tmf *tmf, unsigned long timeout_ms) 265 struct isci_tmf *tmf, unsigned long timeout_ms)
@@ -400,17 +317,11 @@ static int isci_task_execute_tmf(struct isci_host *ihost,
400 spin_unlock_irqrestore(&ihost->scic_lock, flags); 317 spin_unlock_irqrestore(&ihost->scic_lock, flags);
401 goto err_tci; 318 goto err_tci;
402 } 319 }
403
404 if (tmf->cb_state_func != NULL)
405 tmf->cb_state_func(isci_tmf_started, tmf, tmf->cb_data);
406
407 isci_request_change_state(ireq, started);
408
409 /* add the request to the remote device request list. */
410 list_add(&ireq->dev_node, &idev->reqs_in_process);
411
412 spin_unlock_irqrestore(&ihost->scic_lock, flags); 320 spin_unlock_irqrestore(&ihost->scic_lock, flags);
413 321
322 /* The RNC must be unsuspended before the TMF can get a response. */
323 isci_remote_device_resume_from_abort(ihost, idev);
324
414 /* Wait for the TMF to complete, or a timeout. */ 325 /* Wait for the TMF to complete, or a timeout. */
415 timeleft = wait_for_completion_timeout(&completion, 326 timeleft = wait_for_completion_timeout(&completion,
416 msecs_to_jiffies(timeout_ms)); 327 msecs_to_jiffies(timeout_ms));
@@ -419,32 +330,7 @@ static int isci_task_execute_tmf(struct isci_host *ihost,
419 /* The TMF did not complete - this could be because 330 /* The TMF did not complete - this could be because
420 * of an unplug. Terminate the TMF request now. 331 * of an unplug. Terminate the TMF request now.
421 */ 332 */
422 spin_lock_irqsave(&ihost->scic_lock, flags); 333 isci_remote_device_suspend_terminate(ihost, idev, ireq);
423
424 if (tmf->cb_state_func != NULL)
425 tmf->cb_state_func(isci_tmf_timed_out, tmf,
426 tmf->cb_data);
427
428 sci_controller_terminate_request(ihost, idev, ireq);
429
430 spin_unlock_irqrestore(&ihost->scic_lock, flags);
431
432 timeleft = wait_for_completion_timeout(
433 &completion,
434 msecs_to_jiffies(ISCI_TERMINATION_TIMEOUT_MSEC));
435
436 if (!timeleft) {
437 /* Strange condition - the termination of the TMF
438 * request timed-out.
439 */
440 spin_lock_irqsave(&ihost->scic_lock, flags);
441
442 /* If the TMF status has not changed, kill it. */
443 if (tmf->status == SCI_FAILURE_TIMEOUT)
444 isci_request_mark_zombie(ihost, ireq);
445
446 spin_unlock_irqrestore(&ihost->scic_lock, flags);
447 }
448 } 334 }
449 335
450 isci_print_tmf(ihost, tmf); 336 isci_print_tmf(ihost, tmf);
@@ -476,315 +362,21 @@ static int isci_task_execute_tmf(struct isci_host *ihost,
476} 362}
477 363
478static void isci_task_build_tmf(struct isci_tmf *tmf, 364static void isci_task_build_tmf(struct isci_tmf *tmf,
479 enum isci_tmf_function_codes code, 365 enum isci_tmf_function_codes code)
480 void (*tmf_sent_cb)(enum isci_tmf_cb_state,
481 struct isci_tmf *,
482 void *),
483 void *cb_data)
484{ 366{
485 memset(tmf, 0, sizeof(*tmf)); 367 memset(tmf, 0, sizeof(*tmf));
486 368 tmf->tmf_code = code;
487 tmf->tmf_code = code;
488 tmf->cb_state_func = tmf_sent_cb;
489 tmf->cb_data = cb_data;
490} 369}
491 370
492static void isci_task_build_abort_task_tmf(struct isci_tmf *tmf, 371static void isci_task_build_abort_task_tmf(struct isci_tmf *tmf,
493 enum isci_tmf_function_codes code, 372 enum isci_tmf_function_codes code,
494 void (*tmf_sent_cb)(enum isci_tmf_cb_state,
495 struct isci_tmf *,
496 void *),
497 struct isci_request *old_request) 373 struct isci_request *old_request)
498{ 374{
499 isci_task_build_tmf(tmf, code, tmf_sent_cb, old_request); 375 isci_task_build_tmf(tmf, code);
500 tmf->io_tag = old_request->io_tag; 376 tmf->io_tag = old_request->io_tag;
501} 377}
502 378
503/** 379/**
504 * isci_task_validate_request_to_abort() - This function checks the given I/O
505 * against the "started" state. If the request is still "started", it's
506 * state is changed to aborted. NOTE: isci_host->scic_lock MUST BE HELD
507 * BEFORE CALLING THIS FUNCTION.
508 * @isci_request: This parameter specifies the request object to control.
509 * @isci_host: This parameter specifies the ISCI host object
510 * @isci_device: This is the device to which the request is pending.
511 * @aborted_io_completion: This is a completion structure that will be added to
512 * the request in case it is changed to aborting; this completion is
513 * triggered when the request is fully completed.
514 *
515 * Either "started" on successful change of the task status to "aborted", or
516 * "unallocated" if the task cannot be controlled.
517 */
518static enum isci_request_status isci_task_validate_request_to_abort(
519 struct isci_request *isci_request,
520 struct isci_host *isci_host,
521 struct isci_remote_device *isci_device,
522 struct completion *aborted_io_completion)
523{
524 enum isci_request_status old_state = unallocated;
525
526 /* Only abort the task if it's in the
527 * device's request_in_process list
528 */
529 if (isci_request && !list_empty(&isci_request->dev_node)) {
530 old_state = isci_request_change_started_to_aborted(
531 isci_request, aborted_io_completion);
532
533 }
534
535 return old_state;
536}
537
538static int isci_request_is_dealloc_managed(enum isci_request_status stat)
539{
540 switch (stat) {
541 case aborted:
542 case aborting:
543 case terminating:
544 case completed:
545 case dead:
546 return true;
547 default:
548 return false;
549 }
550}
551
552/**
553 * isci_terminate_request_core() - This function will terminate the given
554 * request, and wait for it to complete. This function must only be called
555 * from a thread that can wait. Note that the request is terminated and
556 * completed (back to the host, if started there).
557 * @ihost: This SCU.
558 * @idev: The target.
559 * @isci_request: The I/O request to be terminated.
560 *
561 */
562static void isci_terminate_request_core(struct isci_host *ihost,
563 struct isci_remote_device *idev,
564 struct isci_request *isci_request)
565{
566 enum sci_status status = SCI_SUCCESS;
567 bool was_terminated = false;
568 bool needs_cleanup_handling = false;
569 unsigned long flags;
570 unsigned long termination_completed = 1;
571 struct completion *io_request_completion;
572
573 dev_dbg(&ihost->pdev->dev,
574 "%s: device = %p; request = %p\n",
575 __func__, idev, isci_request);
576
577 spin_lock_irqsave(&ihost->scic_lock, flags);
578
579 io_request_completion = isci_request->io_request_completion;
580
581 /* Note that we are not going to control
582 * the target to abort the request.
583 */
584 set_bit(IREQ_COMPLETE_IN_TARGET, &isci_request->flags);
585
586 /* Make sure the request wasn't just sitting around signalling
587 * device condition (if the request handle is NULL, then the
588 * request completed but needed additional handling here).
589 */
590 if (!test_bit(IREQ_TERMINATED, &isci_request->flags)) {
591 was_terminated = true;
592 needs_cleanup_handling = true;
593 status = sci_controller_terminate_request(ihost,
594 idev,
595 isci_request);
596 }
597 spin_unlock_irqrestore(&ihost->scic_lock, flags);
598
599 /*
600 * The only time the request to terminate will
601 * fail is when the io request is completed and
602 * being aborted.
603 */
604 if (status != SCI_SUCCESS) {
605 dev_dbg(&ihost->pdev->dev,
606 "%s: sci_controller_terminate_request"
607 " returned = 0x%x\n",
608 __func__, status);
609
610 isci_request->io_request_completion = NULL;
611
612 } else {
613 if (was_terminated) {
614 dev_dbg(&ihost->pdev->dev,
615 "%s: before completion wait (%p/%p)\n",
616 __func__, isci_request, io_request_completion);
617
618 /* Wait here for the request to complete. */
619 termination_completed
620 = wait_for_completion_timeout(
621 io_request_completion,
622 msecs_to_jiffies(ISCI_TERMINATION_TIMEOUT_MSEC));
623
624 if (!termination_completed) {
625
626 /* The request to terminate has timed out. */
627 spin_lock_irqsave(&ihost->scic_lock, flags);
628
629 /* Check for state changes. */
630 if (!test_bit(IREQ_TERMINATED,
631 &isci_request->flags)) {
632
633 /* The best we can do is to have the
634 * request die a silent death if it
635 * ever really completes.
636 */
637 isci_request_mark_zombie(ihost,
638 isci_request);
639 needs_cleanup_handling = true;
640 } else
641 termination_completed = 1;
642
643 spin_unlock_irqrestore(&ihost->scic_lock,
644 flags);
645
646 if (!termination_completed) {
647
648 dev_dbg(&ihost->pdev->dev,
649 "%s: *** Timeout waiting for "
650 "termination(%p/%p)\n",
651 __func__, io_request_completion,
652 isci_request);
653
654 /* The request can no longer be referenced
655 * safely since it may go away if the
656 * termination every really does complete.
657 */
658 isci_request = NULL;
659 }
660 }
661 if (termination_completed)
662 dev_dbg(&ihost->pdev->dev,
663 "%s: after completion wait (%p/%p)\n",
664 __func__, isci_request, io_request_completion);
665 }
666
667 if (termination_completed) {
668
669 isci_request->io_request_completion = NULL;
670
671 /* Peek at the status of the request. This will tell
672 * us if there was special handling on the request such that it
673 * needs to be detached and freed here.
674 */
675 spin_lock_irqsave(&isci_request->state_lock, flags);
676
677 needs_cleanup_handling
678 = isci_request_is_dealloc_managed(
679 isci_request->status);
680
681 spin_unlock_irqrestore(&isci_request->state_lock, flags);
682
683 }
684 if (needs_cleanup_handling) {
685
686 dev_dbg(&ihost->pdev->dev,
687 "%s: cleanup isci_device=%p, request=%p\n",
688 __func__, idev, isci_request);
689
690 if (isci_request != NULL) {
691 spin_lock_irqsave(&ihost->scic_lock, flags);
692 isci_free_tag(ihost, isci_request->io_tag);
693 isci_request_change_state(isci_request, unallocated);
694 list_del_init(&isci_request->dev_node);
695 spin_unlock_irqrestore(&ihost->scic_lock, flags);
696 }
697 }
698 }
699}
700
701/**
702 * isci_terminate_pending_requests() - This function will change the all of the
703 * requests on the given device's state to "aborting", will terminate the
704 * requests, and wait for them to complete. This function must only be
705 * called from a thread that can wait. Note that the requests are all
706 * terminated and completed (back to the host, if started there).
707 * @isci_host: This parameter specifies SCU.
708 * @idev: This parameter specifies the target.
709 *
710 */
711void isci_terminate_pending_requests(struct isci_host *ihost,
712 struct isci_remote_device *idev)
713{
714 struct completion request_completion;
715 enum isci_request_status old_state;
716 unsigned long flags;
717 LIST_HEAD(list);
718
719 spin_lock_irqsave(&ihost->scic_lock, flags);
720 list_splice_init(&idev->reqs_in_process, &list);
721
722 /* assumes that isci_terminate_request_core deletes from the list */
723 while (!list_empty(&list)) {
724 struct isci_request *ireq = list_entry(list.next, typeof(*ireq), dev_node);
725
726 /* Change state to "terminating" if it is currently
727 * "started".
728 */
729 old_state = isci_request_change_started_to_newstate(ireq,
730 &request_completion,
731 terminating);
732 switch (old_state) {
733 case started:
734 case completed:
735 case aborting:
736 break;
737 default:
738 /* termination in progress, or otherwise dispositioned.
739 * We know the request was on 'list' so should be safe
740 * to move it back to reqs_in_process
741 */
742 list_move(&ireq->dev_node, &idev->reqs_in_process);
743 ireq = NULL;
744 break;
745 }
746
747 if (!ireq)
748 continue;
749 spin_unlock_irqrestore(&ihost->scic_lock, flags);
750
751 init_completion(&request_completion);
752
753 dev_dbg(&ihost->pdev->dev,
754 "%s: idev=%p request=%p; task=%p old_state=%d\n",
755 __func__, idev, ireq,
756 (!test_bit(IREQ_TMF, &ireq->flags)
757 ? isci_request_access_task(ireq)
758 : NULL),
759 old_state);
760
761 /* If the old_state is started:
762 * This request was not already being aborted. If it had been,
763 * then the aborting I/O (ie. the TMF request) would not be in
764 * the aborting state, and thus would be terminated here. Note
765 * that since the TMF completion's call to the kernel function
766 * "complete()" does not happen until the pending I/O request
767 * terminate fully completes, we do not have to implement a
768 * special wait here for already aborting requests - the
769 * termination of the TMF request will force the request
770 * to finish it's already started terminate.
771 *
772 * If old_state == completed:
773 * This request completed from the SCU hardware perspective
774 * and now just needs cleaning up in terms of freeing the
775 * request and potentially calling up to libsas.
776 *
777 * If old_state == aborting:
778 * This request has already gone through a TMF timeout, but may
779 * not have been terminated; needs cleaning up at least.
780 */
781 isci_terminate_request_core(ihost, idev, ireq);
782 spin_lock_irqsave(&ihost->scic_lock, flags);
783 }
784 spin_unlock_irqrestore(&ihost->scic_lock, flags);
785}
786
787/**
788 * isci_task_send_lu_reset_sas() - This function is called by of the SAS Domain 380 * isci_task_send_lu_reset_sas() - This function is called by of the SAS Domain
789 * Template functions. 381 * Template functions.
790 * @lun: This parameter specifies the lun to be reset. 382 * @lun: This parameter specifies the lun to be reset.
@@ -807,7 +399,7 @@ static int isci_task_send_lu_reset_sas(
807 * value is "TMF_RESP_FUNC_COMPLETE", or the request timed-out (or 399 * value is "TMF_RESP_FUNC_COMPLETE", or the request timed-out (or
808 * was otherwise unable to be executed ("TMF_RESP_FUNC_FAILED"). 400 * was otherwise unable to be executed ("TMF_RESP_FUNC_FAILED").
809 */ 401 */
810 isci_task_build_tmf(&tmf, isci_tmf_ssp_lun_reset, NULL, NULL); 402 isci_task_build_tmf(&tmf, isci_tmf_ssp_lun_reset);
811 403
812 #define ISCI_LU_RESET_TIMEOUT_MS 2000 /* 2 second timeout. */ 404 #define ISCI_LU_RESET_TIMEOUT_MS 2000 /* 2 second timeout. */
813 ret = isci_task_execute_tmf(isci_host, isci_device, &tmf, ISCI_LU_RESET_TIMEOUT_MS); 405 ret = isci_task_execute_tmf(isci_host, isci_device, &tmf, ISCI_LU_RESET_TIMEOUT_MS);
@@ -826,42 +418,44 @@ static int isci_task_send_lu_reset_sas(
826 418
827int isci_task_lu_reset(struct domain_device *dev, u8 *lun) 419int isci_task_lu_reset(struct domain_device *dev, u8 *lun)
828{ 420{
829 struct isci_host *isci_host = dev_to_ihost(dev); 421 struct isci_host *ihost = dev_to_ihost(dev);
830 struct isci_remote_device *isci_device; 422 struct isci_remote_device *idev;
831 unsigned long flags; 423 unsigned long flags;
832 int ret; 424 int ret = TMF_RESP_FUNC_COMPLETE;
833 425
834 spin_lock_irqsave(&isci_host->scic_lock, flags); 426 spin_lock_irqsave(&ihost->scic_lock, flags);
835 isci_device = isci_lookup_device(dev); 427 idev = isci_get_device(dev->lldd_dev);
836 spin_unlock_irqrestore(&isci_host->scic_lock, flags); 428 spin_unlock_irqrestore(&ihost->scic_lock, flags);
837 429
838 dev_dbg(&isci_host->pdev->dev, 430 dev_dbg(&ihost->pdev->dev,
839 "%s: domain_device=%p, isci_host=%p; isci_device=%p\n", 431 "%s: domain_device=%p, isci_host=%p; isci_device=%p\n",
840 __func__, dev, isci_host, isci_device); 432 __func__, dev, ihost, idev);
841 433
842 if (!isci_device) { 434 if (!idev) {
843 /* If the device is gone, stop the escalations. */ 435 /* If the device is gone, escalate to I_T_Nexus_Reset. */
844 dev_dbg(&isci_host->pdev->dev, "%s: No dev\n", __func__); 436 dev_dbg(&ihost->pdev->dev, "%s: No dev\n", __func__);
845 437
846 ret = TMF_RESP_FUNC_COMPLETE; 438 ret = TMF_RESP_FUNC_FAILED;
847 goto out; 439 goto out;
848 } 440 }
849 441
850 /* Send the task management part of the reset. */ 442 /* Suspend the RNC, kill all TCs */
851 if (dev_is_sata(dev)) { 443 if (isci_remote_device_suspend_terminate(ihost, idev, NULL)
852 sas_ata_schedule_reset(dev); 444 != SCI_SUCCESS) {
853 ret = TMF_RESP_FUNC_COMPLETE; 445 /* The suspend/terminate only fails if isci_get_device fails */
854 } else 446 ret = TMF_RESP_FUNC_FAILED;
855 ret = isci_task_send_lu_reset_sas(isci_host, isci_device, lun); 447 goto out;
856 448 }
857 /* If the LUN reset worked, all the I/O can now be terminated. */ 449 /* All pending I/Os have been terminated and cleaned up. */
858 if (ret == TMF_RESP_FUNC_COMPLETE) 450 if (!test_bit(IDEV_GONE, &idev->flags)) {
859 /* Terminate all I/O now. */ 451 if (dev_is_sata(dev))
860 isci_terminate_pending_requests(isci_host, 452 sas_ata_schedule_reset(dev);
861 isci_device); 453 else
862 454 /* Send the task management part of the reset. */
455 ret = isci_task_send_lu_reset_sas(ihost, idev, lun);
456 }
863 out: 457 out:
864 isci_put_device(isci_device); 458 isci_put_device(idev);
865 return ret; 459 return ret;
866} 460}
867 461
@@ -882,63 +476,6 @@ int isci_task_clear_nexus_ha(struct sas_ha_struct *ha)
882/* Task Management Functions. Must be called from process context. */ 476/* Task Management Functions. Must be called from process context. */
883 477
884/** 478/**
885 * isci_abort_task_process_cb() - This is a helper function for the abort task
886 * TMF command. It manages the request state with respect to the successful
887 * transmission / completion of the abort task request.
888 * @cb_state: This parameter specifies when this function was called - after
889 * the TMF request has been started and after it has timed-out.
890 * @tmf: This parameter specifies the TMF in progress.
891 *
892 *
893 */
894static void isci_abort_task_process_cb(
895 enum isci_tmf_cb_state cb_state,
896 struct isci_tmf *tmf,
897 void *cb_data)
898{
899 struct isci_request *old_request;
900
901 old_request = (struct isci_request *)cb_data;
902
903 dev_dbg(&old_request->isci_host->pdev->dev,
904 "%s: tmf=%p, old_request=%p\n",
905 __func__, tmf, old_request);
906
907 switch (cb_state) {
908
909 case isci_tmf_started:
910 /* The TMF has been started. Nothing to do here, since the
911 * request state was already set to "aborted" by the abort
912 * task function.
913 */
914 if ((old_request->status != aborted)
915 && (old_request->status != completed))
916 dev_dbg(&old_request->isci_host->pdev->dev,
917 "%s: Bad request status (%d): tmf=%p, old_request=%p\n",
918 __func__, old_request->status, tmf, old_request);
919 break;
920
921 case isci_tmf_timed_out:
922
923 /* Set the task's state to "aborting", since the abort task
924 * function thread set it to "aborted" (above) in anticipation
925 * of the task management request working correctly. Since the
926 * timeout has now fired, the TMF request failed. We set the
927 * state such that the request completion will indicate the
928 * device is no longer present.
929 */
930 isci_request_change_state(old_request, aborting);
931 break;
932
933 default:
934 dev_dbg(&old_request->isci_host->pdev->dev,
935 "%s: Bad cb_state (%d): tmf=%p, old_request=%p\n",
936 __func__, cb_state, tmf, old_request);
937 break;
938 }
939}
940
941/**
942 * isci_task_abort_task() - This function is one of the SAS Domain Template 479 * isci_task_abort_task() - This function is one of the SAS Domain Template
943 * functions. This function is called by libsas to abort a specified task. 480 * functions. This function is called by libsas to abort a specified task.
944 * @task: This parameter specifies the SAS task to abort. 481 * @task: This parameter specifies the SAS task to abort.
@@ -947,22 +484,20 @@ static void isci_abort_task_process_cb(
947 */ 484 */
948int isci_task_abort_task(struct sas_task *task) 485int isci_task_abort_task(struct sas_task *task)
949{ 486{
950 struct isci_host *isci_host = dev_to_ihost(task->dev); 487 struct isci_host *ihost = dev_to_ihost(task->dev);
951 DECLARE_COMPLETION_ONSTACK(aborted_io_completion); 488 DECLARE_COMPLETION_ONSTACK(aborted_io_completion);
952 struct isci_request *old_request = NULL; 489 struct isci_request *old_request = NULL;
953 enum isci_request_status old_state; 490 struct isci_remote_device *idev = NULL;
954 struct isci_remote_device *isci_device = NULL;
955 struct isci_tmf tmf; 491 struct isci_tmf tmf;
956 int ret = TMF_RESP_FUNC_FAILED; 492 int ret = TMF_RESP_FUNC_FAILED;
957 unsigned long flags; 493 unsigned long flags;
958 int perform_termination = 0;
959 494
960 /* Get the isci_request reference from the task. Note that 495 /* Get the isci_request reference from the task. Note that
961 * this check does not depend on the pending request list 496 * this check does not depend on the pending request list
962 * in the device, because tasks driving resets may land here 497 * in the device, because tasks driving resets may land here
963 * after completion in the core. 498 * after completion in the core.
964 */ 499 */
965 spin_lock_irqsave(&isci_host->scic_lock, flags); 500 spin_lock_irqsave(&ihost->scic_lock, flags);
966 spin_lock(&task->task_state_lock); 501 spin_lock(&task->task_state_lock);
967 502
968 old_request = task->lldd_task; 503 old_request = task->lldd_task;
@@ -971,20 +506,29 @@ int isci_task_abort_task(struct sas_task *task)
971 if (!(task->task_state_flags & SAS_TASK_STATE_DONE) && 506 if (!(task->task_state_flags & SAS_TASK_STATE_DONE) &&
972 (task->task_state_flags & SAS_TASK_AT_INITIATOR) && 507 (task->task_state_flags & SAS_TASK_AT_INITIATOR) &&
973 old_request) 508 old_request)
974 isci_device = isci_lookup_device(task->dev); 509 idev = isci_get_device(task->dev->lldd_dev);
975 510
976 spin_unlock(&task->task_state_lock); 511 spin_unlock(&task->task_state_lock);
977 spin_unlock_irqrestore(&isci_host->scic_lock, flags); 512 spin_unlock_irqrestore(&ihost->scic_lock, flags);
978 513
979 dev_dbg(&isci_host->pdev->dev, 514 dev_warn(&ihost->pdev->dev,
980 "%s: dev = %p, task = %p, old_request == %p\n", 515 "%s: dev = %p (%s%s), task = %p, old_request == %p\n",
981 __func__, isci_device, task, old_request); 516 __func__, idev,
517 (dev_is_sata(task->dev) ? "STP/SATA"
518 : ((dev_is_expander(task->dev))
519 ? "SMP"
520 : "SSP")),
521 ((idev) ? ((test_bit(IDEV_GONE, &idev->flags))
522 ? " IDEV_GONE"
523 : "")
524 : " <NULL>"),
525 task, old_request);
982 526
983 /* Device reset conditions signalled in task_state_flags are the 527 /* Device reset conditions signalled in task_state_flags are the
984 * responsbility of libsas to observe at the start of the error 528 * responsbility of libsas to observe at the start of the error
985 * handler thread. 529 * handler thread.
986 */ 530 */
987 if (!isci_device || !old_request) { 531 if (!idev || !old_request) {
988 /* The request has already completed and there 532 /* The request has already completed and there
989 * is nothing to do here other than to set the task 533 * is nothing to do here other than to set the task
990 * done bit, and indicate that the task abort function 534 * done bit, and indicate that the task abort function
@@ -998,108 +542,72 @@ int isci_task_abort_task(struct sas_task *task)
998 542
999 ret = TMF_RESP_FUNC_COMPLETE; 543 ret = TMF_RESP_FUNC_COMPLETE;
1000 544
1001 dev_dbg(&isci_host->pdev->dev, 545 dev_warn(&ihost->pdev->dev,
1002 "%s: abort task not needed for %p\n", 546 "%s: abort task not needed for %p\n",
1003 __func__, task); 547 __func__, task);
1004 goto out; 548 goto out;
1005 } 549 }
1006 550 /* Suspend the RNC, kill the TC */
1007 spin_lock_irqsave(&isci_host->scic_lock, flags); 551 if (isci_remote_device_suspend_terminate(ihost, idev, old_request)
1008 552 != SCI_SUCCESS) {
1009 /* Check the request status and change to "aborted" if currently 553 dev_warn(&ihost->pdev->dev,
1010 * "starting"; if true then set the I/O kernel completion 554 "%s: isci_remote_device_reset_terminate(dev=%p, "
1011 * struct that will be triggered when the request completes. 555 "req=%p, task=%p) failed\n",
1012 */ 556 __func__, idev, old_request, task);
1013 old_state = isci_task_validate_request_to_abort( 557 ret = TMF_RESP_FUNC_FAILED;
1014 old_request, isci_host, isci_device,
1015 &aborted_io_completion);
1016 if ((old_state != started) &&
1017 (old_state != completed) &&
1018 (old_state != aborting)) {
1019
1020 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
1021
1022 /* The request was already being handled by someone else (because
1023 * they got to set the state away from started).
1024 */
1025 dev_dbg(&isci_host->pdev->dev,
1026 "%s: device = %p; old_request %p already being aborted\n",
1027 __func__,
1028 isci_device, old_request);
1029 ret = TMF_RESP_FUNC_COMPLETE;
1030 goto out; 558 goto out;
1031 } 559 }
560 spin_lock_irqsave(&ihost->scic_lock, flags);
561
1032 if (task->task_proto == SAS_PROTOCOL_SMP || 562 if (task->task_proto == SAS_PROTOCOL_SMP ||
1033 sas_protocol_ata(task->task_proto) || 563 sas_protocol_ata(task->task_proto) ||
1034 test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags)) { 564 test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags) ||
565 test_bit(IDEV_GONE, &idev->flags)) {
1035 566
1036 spin_unlock_irqrestore(&isci_host->scic_lock, flags); 567 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1037 568
1038 dev_dbg(&isci_host->pdev->dev, 569 /* No task to send, so explicitly resume the device here */
1039 "%s: %s request" 570 isci_remote_device_resume_from_abort(ihost, idev);
1040 " or complete_in_target (%d), thus no TMF\n",
1041 __func__,
1042 ((task->task_proto == SAS_PROTOCOL_SMP)
1043 ? "SMP"
1044 : (sas_protocol_ata(task->task_proto)
1045 ? "SATA/STP"
1046 : "<other>")
1047 ),
1048 test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags));
1049
1050 if (test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags)) {
1051 spin_lock_irqsave(&task->task_state_lock, flags);
1052 task->task_state_flags |= SAS_TASK_STATE_DONE;
1053 task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
1054 SAS_TASK_STATE_PENDING);
1055 spin_unlock_irqrestore(&task->task_state_lock, flags);
1056 ret = TMF_RESP_FUNC_COMPLETE;
1057 } else {
1058 spin_lock_irqsave(&task->task_state_lock, flags);
1059 task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
1060 SAS_TASK_STATE_PENDING);
1061 spin_unlock_irqrestore(&task->task_state_lock, flags);
1062 }
1063 571
1064 /* STP and SMP devices are not sent a TMF, but the 572 dev_warn(&ihost->pdev->dev,
1065 * outstanding I/O request is terminated below. This is 573 "%s: %s request"
1066 * because SATA/STP and SMP discovery path timeouts directly 574 " or complete_in_target (%d), "
1067 * call the abort task interface for cleanup. 575 "or IDEV_GONE (%d), thus no TMF\n",
1068 */ 576 __func__,
1069 perform_termination = 1; 577 ((task->task_proto == SAS_PROTOCOL_SMP)
578 ? "SMP"
579 : (sas_protocol_ata(task->task_proto)
580 ? "SATA/STP"
581 : "<other>")
582 ),
583 test_bit(IREQ_COMPLETE_IN_TARGET,
584 &old_request->flags),
585 test_bit(IDEV_GONE, &idev->flags));
586
587 spin_lock_irqsave(&task->task_state_lock, flags);
588 task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
589 SAS_TASK_STATE_PENDING);
590 task->task_state_flags |= SAS_TASK_STATE_DONE;
591 spin_unlock_irqrestore(&task->task_state_lock, flags);
1070 592
593 ret = TMF_RESP_FUNC_COMPLETE;
1071 } else { 594 } else {
1072 /* Fill in the tmf stucture */ 595 /* Fill in the tmf stucture */
1073 isci_task_build_abort_task_tmf(&tmf, isci_tmf_ssp_task_abort, 596 isci_task_build_abort_task_tmf(&tmf, isci_tmf_ssp_task_abort,
1074 isci_abort_task_process_cb,
1075 old_request); 597 old_request);
1076 598
1077 spin_unlock_irqrestore(&isci_host->scic_lock, flags); 599 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1078 600
601 /* Send the task management request. */
1079 #define ISCI_ABORT_TASK_TIMEOUT_MS 500 /* 1/2 second timeout */ 602 #define ISCI_ABORT_TASK_TIMEOUT_MS 500 /* 1/2 second timeout */
1080 ret = isci_task_execute_tmf(isci_host, isci_device, &tmf, 603 ret = isci_task_execute_tmf(ihost, idev, &tmf,
1081 ISCI_ABORT_TASK_TIMEOUT_MS); 604 ISCI_ABORT_TASK_TIMEOUT_MS);
1082
1083 if (ret == TMF_RESP_FUNC_COMPLETE)
1084 perform_termination = 1;
1085 else
1086 dev_dbg(&isci_host->pdev->dev,
1087 "%s: isci_task_send_tmf failed\n", __func__);
1088 } 605 }
1089 if (perform_termination) { 606out:
1090 set_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags); 607 dev_warn(&ihost->pdev->dev,
1091 608 "%s: Done; dev = %p, task = %p , old_request == %p\n",
1092 /* Clean up the request on our side, and wait for the aborted 609 __func__, idev, task, old_request);
1093 * I/O to complete. 610 isci_put_device(idev);
1094 */
1095 isci_terminate_request_core(isci_host, isci_device,
1096 old_request);
1097 }
1098
1099 /* Make sure we do not leave a reference to aborted_io_completion */
1100 old_request->io_request_completion = NULL;
1101 out:
1102 isci_put_device(isci_device);
1103 return ret; 611 return ret;
1104} 612}
1105 613
@@ -1195,14 +703,11 @@ isci_task_request_complete(struct isci_host *ihost,
1195{ 703{
1196 struct isci_tmf *tmf = isci_request_access_tmf(ireq); 704 struct isci_tmf *tmf = isci_request_access_tmf(ireq);
1197 struct completion *tmf_complete = NULL; 705 struct completion *tmf_complete = NULL;
1198 struct completion *request_complete = ireq->io_request_completion;
1199 706
1200 dev_dbg(&ihost->pdev->dev, 707 dev_dbg(&ihost->pdev->dev,
1201 "%s: request = %p, status=%d\n", 708 "%s: request = %p, status=%d\n",
1202 __func__, ireq, completion_status); 709 __func__, ireq, completion_status);
1203 710
1204 isci_request_change_state(ireq, completed);
1205
1206 set_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags); 711 set_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags);
1207 712
1208 if (tmf) { 713 if (tmf) {
@@ -1226,20 +731,11 @@ isci_task_request_complete(struct isci_host *ihost,
1226 */ 731 */
1227 set_bit(IREQ_TERMINATED, &ireq->flags); 732 set_bit(IREQ_TERMINATED, &ireq->flags);
1228 733
1229 /* As soon as something is in the terminate path, deallocation is 734 if (test_and_clear_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags))
1230 * managed there. Note that the final non-managed state of a task 735 wake_up_all(&ihost->eventq);
1231 * request is "completed".
1232 */
1233 if ((ireq->status == completed) ||
1234 !isci_request_is_dealloc_managed(ireq->status)) {
1235 isci_request_change_state(ireq, unallocated);
1236 isci_free_tag(ihost, ireq->io_tag);
1237 list_del_init(&ireq->dev_node);
1238 }
1239 736
1240 /* "request_complete" is set if the task was being terminated. */ 737 if (!test_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags))
1241 if (request_complete) 738 isci_free_tag(ihost, ireq->io_tag);
1242 complete(request_complete);
1243 739
1244 /* The task management part completes last. */ 740 /* The task management part completes last. */
1245 if (tmf_complete) 741 if (tmf_complete)
@@ -1250,48 +746,38 @@ static int isci_reset_device(struct isci_host *ihost,
1250 struct domain_device *dev, 746 struct domain_device *dev,
1251 struct isci_remote_device *idev) 747 struct isci_remote_device *idev)
1252{ 748{
1253 int rc; 749 int rc = TMF_RESP_FUNC_COMPLETE, reset_stat = -1;
1254 unsigned long flags;
1255 enum sci_status status;
1256 struct sas_phy *phy = sas_get_local_phy(dev); 750 struct sas_phy *phy = sas_get_local_phy(dev);
1257 struct isci_port *iport = dev->port->lldd_port; 751 struct isci_port *iport = dev->port->lldd_port;
1258 752
1259 dev_dbg(&ihost->pdev->dev, "%s: idev %p\n", __func__, idev); 753 dev_dbg(&ihost->pdev->dev, "%s: idev %p\n", __func__, idev);
1260 754
1261 spin_lock_irqsave(&ihost->scic_lock, flags); 755 /* Suspend the RNC, terminate all outstanding TCs. */
1262 status = sci_remote_device_reset(idev); 756 if (isci_remote_device_suspend_terminate(ihost, idev, NULL)
1263 spin_unlock_irqrestore(&ihost->scic_lock, flags); 757 != SCI_SUCCESS) {
1264
1265 if (status != SCI_SUCCESS) {
1266 dev_dbg(&ihost->pdev->dev,
1267 "%s: sci_remote_device_reset(%p) returned %d!\n",
1268 __func__, idev, status);
1269 rc = TMF_RESP_FUNC_FAILED; 758 rc = TMF_RESP_FUNC_FAILED;
1270 goto out; 759 goto out;
1271 } 760 }
1272 761 /* Note that since the termination for outstanding requests succeeded,
1273 if (scsi_is_sas_phy_local(phy)) { 762 * this function will return success. This is because the resets will
1274 struct isci_phy *iphy = &ihost->phys[phy->number]; 763 * only fail if the device has been removed (ie. hotplug), and the
1275 764 * primary duty of this function is to cleanup tasks, so that is the
1276 rc = isci_port_perform_hard_reset(ihost, iport, iphy); 765 * relevant status.
1277 } else 766 */
1278 rc = sas_phy_reset(phy, !dev_is_sata(dev)); 767 if (!test_bit(IDEV_GONE, &idev->flags)) {
1279 768 if (scsi_is_sas_phy_local(phy)) {
1280 /* Terminate in-progress I/O now. */ 769 struct isci_phy *iphy = &ihost->phys[phy->number];
1281 isci_remote_device_nuke_requests(ihost, idev); 770
1282 771 reset_stat = isci_port_perform_hard_reset(ihost, iport,
1283 /* Since all pending TCs have been cleaned, resume the RNC. */ 772 iphy);
1284 spin_lock_irqsave(&ihost->scic_lock, flags); 773 } else
1285 status = sci_remote_device_reset_complete(idev); 774 reset_stat = sas_phy_reset(phy, !dev_is_sata(dev));
1286 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1287
1288 if (status != SCI_SUCCESS) {
1289 dev_dbg(&ihost->pdev->dev,
1290 "%s: sci_remote_device_reset_complete(%p) "
1291 "returned %d!\n", __func__, idev, status);
1292 } 775 }
776 /* Explicitly resume the RNC here, since there was no task sent. */
777 isci_remote_device_resume_from_abort(ihost, idev);
1293 778
1294 dev_dbg(&ihost->pdev->dev, "%s: idev %p complete.\n", __func__, idev); 779 dev_dbg(&ihost->pdev->dev, "%s: idev %p complete, reset_stat=%d.\n",
780 __func__, idev, reset_stat);
1295 out: 781 out:
1296 sas_put_local_phy(phy); 782 sas_put_local_phy(phy);
1297 return rc; 783 return rc;
@@ -1305,7 +791,7 @@ int isci_task_I_T_nexus_reset(struct domain_device *dev)
1305 int ret; 791 int ret;
1306 792
1307 spin_lock_irqsave(&ihost->scic_lock, flags); 793 spin_lock_irqsave(&ihost->scic_lock, flags);
1308 idev = isci_lookup_device(dev); 794 idev = isci_get_device(dev->lldd_dev);
1309 spin_unlock_irqrestore(&ihost->scic_lock, flags); 795 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1310 796
1311 if (!idev) { 797 if (!idev) {
diff --git a/drivers/scsi/isci/task.h b/drivers/scsi/isci/task.h
index 7b6d0e32fd9b..9c06cbad1d26 100644
--- a/drivers/scsi/isci/task.h
+++ b/drivers/scsi/isci/task.h
@@ -63,19 +63,6 @@
63struct isci_request; 63struct isci_request;
64 64
65/** 65/**
66 * enum isci_tmf_cb_state - This enum defines the possible states in which the
67 * TMF callback function is invoked during the TMF execution process.
68 *
69 *
70 */
71enum isci_tmf_cb_state {
72
73 isci_tmf_init_state = 0,
74 isci_tmf_started,
75 isci_tmf_timed_out
76};
77
78/**
79 * enum isci_tmf_function_codes - This enum defines the possible preparations 66 * enum isci_tmf_function_codes - This enum defines the possible preparations
80 * of task management requests. 67 * of task management requests.
81 * 68 *
@@ -87,6 +74,7 @@ enum isci_tmf_function_codes {
87 isci_tmf_ssp_task_abort = TMF_ABORT_TASK, 74 isci_tmf_ssp_task_abort = TMF_ABORT_TASK,
88 isci_tmf_ssp_lun_reset = TMF_LU_RESET, 75 isci_tmf_ssp_lun_reset = TMF_LU_RESET,
89}; 76};
77
90/** 78/**
91 * struct isci_tmf - This class represents the task management object which 79 * struct isci_tmf - This class represents the task management object which
92 * acts as an interface to libsas for processing task management requests 80 * acts as an interface to libsas for processing task management requests
@@ -106,15 +94,6 @@ struct isci_tmf {
106 u16 io_tag; 94 u16 io_tag;
107 enum isci_tmf_function_codes tmf_code; 95 enum isci_tmf_function_codes tmf_code;
108 int status; 96 int status;
109
110 /* The optional callback function allows the user process to
111 * track the TMF transmit / timeout conditions.
112 */
113 void (*cb_state_func)(
114 enum isci_tmf_cb_state,
115 struct isci_tmf *, void *);
116 void *cb_data;
117
118}; 97};
119 98
120static inline void isci_print_tmf(struct isci_host *ihost, struct isci_tmf *tmf) 99static inline void isci_print_tmf(struct isci_host *ihost, struct isci_tmf *tmf)
@@ -208,113 +187,4 @@ int isci_queuecommand(
208 struct scsi_cmnd *scsi_cmd, 187 struct scsi_cmnd *scsi_cmd,
209 void (*donefunc)(struct scsi_cmnd *)); 188 void (*donefunc)(struct scsi_cmnd *));
210 189
211/**
212 * enum isci_completion_selection - This enum defines the possible actions to
213 * take with respect to a given request's notification back to libsas.
214 *
215 *
216 */
217enum isci_completion_selection {
218
219 isci_perform_normal_io_completion, /* Normal notify (task_done) */
220 isci_perform_aborted_io_completion, /* No notification. */
221 isci_perform_error_io_completion /* Use sas_task_abort */
222};
223
224/**
225 * isci_task_set_completion_status() - This function sets the completion status
226 * for the request.
227 * @task: This parameter is the completed request.
228 * @response: This parameter is the response code for the completed task.
229 * @status: This parameter is the status code for the completed task.
230 *
231* @return The new notification mode for the request.
232*/
233static inline enum isci_completion_selection
234isci_task_set_completion_status(
235 struct sas_task *task,
236 enum service_response response,
237 enum exec_status status,
238 enum isci_completion_selection task_notification_selection)
239{
240 unsigned long flags;
241
242 spin_lock_irqsave(&task->task_state_lock, flags);
243
244 /* If a device reset is being indicated, make sure the I/O
245 * is in the error path.
246 */
247 if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET) {
248 /* Fail the I/O to make sure it goes into the error path. */
249 response = SAS_TASK_UNDELIVERED;
250 status = SAM_STAT_TASK_ABORTED;
251
252 task_notification_selection = isci_perform_error_io_completion;
253 }
254 task->task_status.resp = response;
255 task->task_status.stat = status;
256
257 switch (task->task_proto) {
258
259 case SAS_PROTOCOL_SATA:
260 case SAS_PROTOCOL_STP:
261 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
262
263 if (task_notification_selection
264 == isci_perform_error_io_completion) {
265 /* SATA/STP I/O has it's own means of scheduling device
266 * error handling on the normal path.
267 */
268 task_notification_selection
269 = isci_perform_normal_io_completion;
270 }
271 break;
272 default:
273 break;
274 }
275
276 switch (task_notification_selection) {
277
278 case isci_perform_error_io_completion:
279
280 if (task->task_proto == SAS_PROTOCOL_SMP) {
281 /* There is no error escalation in the SMP case.
282 * Convert to a normal completion to avoid the
283 * timeout in the discovery path and to let the
284 * next action take place quickly.
285 */
286 task_notification_selection
287 = isci_perform_normal_io_completion;
288
289 /* Fall through to the normal case... */
290 } else {
291 /* Use sas_task_abort */
292 /* Leave SAS_TASK_STATE_DONE clear
293 * Leave SAS_TASK_AT_INITIATOR set.
294 */
295 break;
296 }
297
298 case isci_perform_aborted_io_completion:
299 /* This path can occur with task-managed requests as well as
300 * requests terminated because of LUN or device resets.
301 */
302 /* Fall through to the normal case... */
303 case isci_perform_normal_io_completion:
304 /* Normal notification (task_done) */
305 task->task_state_flags |= SAS_TASK_STATE_DONE;
306 task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
307 SAS_TASK_STATE_PENDING);
308 break;
309 default:
310 WARN_ONCE(1, "unknown task_notification_selection: %d\n",
311 task_notification_selection);
312 break;
313 }
314
315 spin_unlock_irqrestore(&task->task_state_lock, flags);
316
317 return task_notification_selection;
318
319}
320#endif /* !defined(_SCI_TASK_H_) */ 190#endif /* !defined(_SCI_TASK_H_) */
diff --git a/drivers/scsi/isci/unsolicited_frame_control.c b/drivers/scsi/isci/unsolicited_frame_control.c
index 16f88ab939c8..04a6d0d59a22 100644
--- a/drivers/scsi/isci/unsolicited_frame_control.c
+++ b/drivers/scsi/isci/unsolicited_frame_control.c
@@ -57,31 +57,19 @@
57#include "unsolicited_frame_control.h" 57#include "unsolicited_frame_control.h"
58#include "registers.h" 58#include "registers.h"
59 59
60int sci_unsolicited_frame_control_construct(struct isci_host *ihost) 60void sci_unsolicited_frame_control_construct(struct isci_host *ihost)
61{ 61{
62 struct sci_unsolicited_frame_control *uf_control = &ihost->uf_control; 62 struct sci_unsolicited_frame_control *uf_control = &ihost->uf_control;
63 struct sci_unsolicited_frame *uf; 63 struct sci_unsolicited_frame *uf;
64 u32 buf_len, header_len, i; 64 dma_addr_t dma = ihost->ufi_dma;
65 dma_addr_t dma; 65 void *virt = ihost->ufi_buf;
66 size_t size; 66 int i;
67 void *virt;
68
69 /*
70 * Prepare all of the memory sizes for the UF headers, UF address
71 * table, and UF buffers themselves.
72 */
73 buf_len = SCU_MAX_UNSOLICITED_FRAMES * SCU_UNSOLICITED_FRAME_BUFFER_SIZE;
74 header_len = SCU_MAX_UNSOLICITED_FRAMES * sizeof(struct scu_unsolicited_frame_header);
75 size = buf_len + header_len + SCU_MAX_UNSOLICITED_FRAMES * sizeof(uf_control->address_table.array[0]);
76 67
77 /* 68 /*
78 * The Unsolicited Frame buffers are set at the start of the UF 69 * The Unsolicited Frame buffers are set at the start of the UF
79 * memory descriptor entry. The headers and address table will be 70 * memory descriptor entry. The headers and address table will be
80 * placed after the buffers. 71 * placed after the buffers.
81 */ 72 */
82 virt = dmam_alloc_coherent(&ihost->pdev->dev, size, &dma, GFP_KERNEL);
83 if (!virt)
84 return -ENOMEM;
85 73
86 /* 74 /*
87 * Program the location of the UF header table into the SCU. 75 * Program the location of the UF header table into the SCU.
@@ -93,8 +81,8 @@ int sci_unsolicited_frame_control_construct(struct isci_host *ihost)
93 * headers, since we program the UF address table pointers to 81 * headers, since we program the UF address table pointers to
94 * NULL. 82 * NULL.
95 */ 83 */
96 uf_control->headers.physical_address = dma + buf_len; 84 uf_control->headers.physical_address = dma + SCI_UFI_BUF_SIZE;
97 uf_control->headers.array = virt + buf_len; 85 uf_control->headers.array = virt + SCI_UFI_BUF_SIZE;
98 86
99 /* 87 /*
100 * Program the location of the UF address table into the SCU. 88 * Program the location of the UF address table into the SCU.
@@ -103,8 +91,8 @@ int sci_unsolicited_frame_control_construct(struct isci_host *ihost)
103 * byte boundary already due to above programming headers being on a 91 * byte boundary already due to above programming headers being on a
104 * 64-bit boundary and headers are on a 64-bytes in size. 92 * 64-bit boundary and headers are on a 64-bytes in size.
105 */ 93 */
106 uf_control->address_table.physical_address = dma + buf_len + header_len; 94 uf_control->address_table.physical_address = dma + SCI_UFI_BUF_SIZE + SCI_UFI_HDR_SIZE;
107 uf_control->address_table.array = virt + buf_len + header_len; 95 uf_control->address_table.array = virt + SCI_UFI_BUF_SIZE + SCI_UFI_HDR_SIZE;
108 uf_control->get = 0; 96 uf_control->get = 0;
109 97
110 /* 98 /*
@@ -135,8 +123,6 @@ int sci_unsolicited_frame_control_construct(struct isci_host *ihost)
135 virt += SCU_UNSOLICITED_FRAME_BUFFER_SIZE; 123 virt += SCU_UNSOLICITED_FRAME_BUFFER_SIZE;
136 dma += SCU_UNSOLICITED_FRAME_BUFFER_SIZE; 124 dma += SCU_UNSOLICITED_FRAME_BUFFER_SIZE;
137 } 125 }
138
139 return 0;
140} 126}
141 127
142enum sci_status sci_unsolicited_frame_control_get_header(struct sci_unsolicited_frame_control *uf_control, 128enum sci_status sci_unsolicited_frame_control_get_header(struct sci_unsolicited_frame_control *uf_control,
diff --git a/drivers/scsi/isci/unsolicited_frame_control.h b/drivers/scsi/isci/unsolicited_frame_control.h
index 75d896686f5a..1bc551ec611f 100644
--- a/drivers/scsi/isci/unsolicited_frame_control.h
+++ b/drivers/scsi/isci/unsolicited_frame_control.h
@@ -257,9 +257,13 @@ struct sci_unsolicited_frame_control {
257 257
258}; 258};
259 259
260#define SCI_UFI_BUF_SIZE (SCU_MAX_UNSOLICITED_FRAMES * SCU_UNSOLICITED_FRAME_BUFFER_SIZE)
261#define SCI_UFI_HDR_SIZE (SCU_MAX_UNSOLICITED_FRAMES * sizeof(struct scu_unsolicited_frame_header))
262#define SCI_UFI_TOTAL_SIZE (SCI_UFI_BUF_SIZE + SCI_UFI_HDR_SIZE + SCU_MAX_UNSOLICITED_FRAMES * sizeof(u64))
263
260struct isci_host; 264struct isci_host;
261 265
262int sci_unsolicited_frame_control_construct(struct isci_host *ihost); 266void sci_unsolicited_frame_control_construct(struct isci_host *ihost);
263 267
264enum sci_status sci_unsolicited_frame_control_get_header( 268enum sci_status sci_unsolicited_frame_control_get_header(
265 struct sci_unsolicited_frame_control *uf_control, 269 struct sci_unsolicited_frame_control *uf_control,
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index fb6610b249e1..c1402fb499ab 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -1742,17 +1742,19 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
1742 1742
1743 mfs = ntohs(flp->fl_csp.sp_bb_data) & 1743 mfs = ntohs(flp->fl_csp.sp_bb_data) &
1744 FC_SP_BB_DATA_MASK; 1744 FC_SP_BB_DATA_MASK;
1745 if (mfs >= FC_SP_MIN_MAX_PAYLOAD && 1745
1746 mfs <= lport->mfs) { 1746 if (mfs < FC_SP_MIN_MAX_PAYLOAD || mfs > FC_SP_MAX_MAX_PAYLOAD) {
1747 lport->mfs = mfs;
1748 fc_host_maxframe_size(lport->host) = mfs;
1749 } else {
1750 FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, " 1747 FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, "
1751 "lport->mfs:%hu\n", mfs, lport->mfs); 1748 "lport->mfs:%hu\n", mfs, lport->mfs);
1752 fc_lport_error(lport, fp); 1749 fc_lport_error(lport, fp);
1753 goto err; 1750 goto err;
1754 } 1751 }
1755 1752
1753 if (mfs <= lport->mfs) {
1754 lport->mfs = mfs;
1755 fc_host_maxframe_size(lport->host) = mfs;
1756 }
1757
1756 csp_flags = ntohs(flp->fl_csp.sp_features); 1758 csp_flags = ntohs(flp->fl_csp.sp_features);
1757 r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov); 1759 r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov);
1758 e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov); 1760 e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov);
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index bc0cecc6ad62..441d88ad99a7 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -546,11 +546,12 @@ static struct ata_port_info sata_port_info = {
546 .port_ops = &sas_sata_ops 546 .port_ops = &sas_sata_ops
547}; 547};
548 548
549int sas_ata_init_host_and_port(struct domain_device *found_dev) 549int sas_ata_init(struct domain_device *found_dev)
550{ 550{
551 struct sas_ha_struct *ha = found_dev->port->ha; 551 struct sas_ha_struct *ha = found_dev->port->ha;
552 struct Scsi_Host *shost = ha->core.shost; 552 struct Scsi_Host *shost = ha->core.shost;
553 struct ata_port *ap; 553 struct ata_port *ap;
554 int rc;
554 555
555 ata_host_init(&found_dev->sata_dev.ata_host, 556 ata_host_init(&found_dev->sata_dev.ata_host,
556 ha->dev, 557 ha->dev,
@@ -567,8 +568,11 @@ int sas_ata_init_host_and_port(struct domain_device *found_dev)
567 ap->private_data = found_dev; 568 ap->private_data = found_dev;
568 ap->cbl = ATA_CBL_SATA; 569 ap->cbl = ATA_CBL_SATA;
569 ap->scsi_host = shost; 570 ap->scsi_host = shost;
570 /* publish initialized ata port */ 571 rc = ata_sas_port_init(ap);
571 smp_wmb(); 572 if (rc) {
573 ata_sas_port_destroy(ap);
574 return rc;
575 }
572 found_dev->sata_dev.ap = ap; 576 found_dev->sata_dev.ap = ap;
573 577
574 return 0; 578 return 0;
@@ -648,18 +652,13 @@ static void sas_get_ata_command_set(struct domain_device *dev)
648void sas_probe_sata(struct asd_sas_port *port) 652void sas_probe_sata(struct asd_sas_port *port)
649{ 653{
650 struct domain_device *dev, *n; 654 struct domain_device *dev, *n;
651 int err;
652 655
653 mutex_lock(&port->ha->disco_mutex); 656 mutex_lock(&port->ha->disco_mutex);
654 list_for_each_entry_safe(dev, n, &port->disco_list, disco_list_node) { 657 list_for_each_entry(dev, &port->disco_list, disco_list_node) {
655 if (!dev_is_sata(dev)) 658 if (!dev_is_sata(dev))
656 continue; 659 continue;
657 660
658 err = sas_ata_init_host_and_port(dev); 661 ata_sas_async_probe(dev->sata_dev.ap);
659 if (err)
660 sas_fail_probe(dev, __func__, err);
661 else
662 ata_sas_async_port_init(dev->sata_dev.ap);
663 } 662 }
664 mutex_unlock(&port->ha->disco_mutex); 663 mutex_unlock(&port->ha->disco_mutex);
665 664
@@ -718,18 +717,6 @@ static void async_sas_ata_eh(void *data, async_cookie_t cookie)
718 sas_put_device(dev); 717 sas_put_device(dev);
719} 718}
720 719
721static bool sas_ata_dev_eh_valid(struct domain_device *dev)
722{
723 struct ata_port *ap;
724
725 if (!dev_is_sata(dev))
726 return false;
727 ap = dev->sata_dev.ap;
728 /* consume fully initialized ata ports */
729 smp_rmb();
730 return !!ap;
731}
732
733void sas_ata_strategy_handler(struct Scsi_Host *shost) 720void sas_ata_strategy_handler(struct Scsi_Host *shost)
734{ 721{
735 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost); 722 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
@@ -753,7 +740,7 @@ void sas_ata_strategy_handler(struct Scsi_Host *shost)
753 740
754 spin_lock(&port->dev_list_lock); 741 spin_lock(&port->dev_list_lock);
755 list_for_each_entry(dev, &port->dev_list, dev_list_node) { 742 list_for_each_entry(dev, &port->dev_list, dev_list_node) {
756 if (!sas_ata_dev_eh_valid(dev)) 743 if (!dev_is_sata(dev))
757 continue; 744 continue;
758 async_schedule_domain(async_sas_ata_eh, dev, &async); 745 async_schedule_domain(async_sas_ata_eh, dev, &async);
759 } 746 }
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index 364679675602..629a0865b130 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -72,6 +72,7 @@ static int sas_get_port_device(struct asd_sas_port *port)
72 struct asd_sas_phy *phy; 72 struct asd_sas_phy *phy;
73 struct sas_rphy *rphy; 73 struct sas_rphy *rphy;
74 struct domain_device *dev; 74 struct domain_device *dev;
75 int rc = -ENODEV;
75 76
76 dev = sas_alloc_device(); 77 dev = sas_alloc_device();
77 if (!dev) 78 if (!dev)
@@ -110,9 +111,16 @@ static int sas_get_port_device(struct asd_sas_port *port)
110 111
111 sas_init_dev(dev); 112 sas_init_dev(dev);
112 113
114 dev->port = port;
113 switch (dev->dev_type) { 115 switch (dev->dev_type) {
114 case SAS_END_DEV:
115 case SATA_DEV: 116 case SATA_DEV:
117 rc = sas_ata_init(dev);
118 if (rc) {
119 rphy = NULL;
120 break;
121 }
122 /* fall through */
123 case SAS_END_DEV:
116 rphy = sas_end_device_alloc(port->port); 124 rphy = sas_end_device_alloc(port->port);
117 break; 125 break;
118 case EDGE_DEV: 126 case EDGE_DEV:
@@ -131,19 +139,14 @@ static int sas_get_port_device(struct asd_sas_port *port)
131 139
132 if (!rphy) { 140 if (!rphy) {
133 sas_put_device(dev); 141 sas_put_device(dev);
134 return -ENODEV; 142 return rc;
135 } 143 }
136 144
137 spin_lock_irq(&port->phy_list_lock);
138 list_for_each_entry(phy, &port->phy_list, port_phy_el)
139 sas_phy_set_target(phy, dev);
140 spin_unlock_irq(&port->phy_list_lock);
141 rphy->identify.phy_identifier = phy->phy->identify.phy_identifier; 145 rphy->identify.phy_identifier = phy->phy->identify.phy_identifier;
142 memcpy(dev->sas_addr, port->attached_sas_addr, SAS_ADDR_SIZE); 146 memcpy(dev->sas_addr, port->attached_sas_addr, SAS_ADDR_SIZE);
143 sas_fill_in_rphy(dev, rphy); 147 sas_fill_in_rphy(dev, rphy);
144 sas_hash_addr(dev->hashed_sas_addr, dev->sas_addr); 148 sas_hash_addr(dev->hashed_sas_addr, dev->sas_addr);
145 port->port_dev = dev; 149 port->port_dev = dev;
146 dev->port = port;
147 dev->linkrate = port->linkrate; 150 dev->linkrate = port->linkrate;
148 dev->min_linkrate = port->linkrate; 151 dev->min_linkrate = port->linkrate;
149 dev->max_linkrate = port->linkrate; 152 dev->max_linkrate = port->linkrate;
@@ -155,6 +158,7 @@ static int sas_get_port_device(struct asd_sas_port *port)
155 sas_device_set_phy(dev, port->port); 158 sas_device_set_phy(dev, port->port);
156 159
157 dev->rphy = rphy; 160 dev->rphy = rphy;
161 get_device(&dev->rphy->dev);
158 162
159 if (dev_is_sata(dev) || dev->dev_type == SAS_END_DEV) 163 if (dev_is_sata(dev) || dev->dev_type == SAS_END_DEV)
160 list_add_tail(&dev->disco_list_node, &port->disco_list); 164 list_add_tail(&dev->disco_list_node, &port->disco_list);
@@ -164,6 +168,11 @@ static int sas_get_port_device(struct asd_sas_port *port)
164 spin_unlock_irq(&port->dev_list_lock); 168 spin_unlock_irq(&port->dev_list_lock);
165 } 169 }
166 170
171 spin_lock_irq(&port->phy_list_lock);
172 list_for_each_entry(phy, &port->phy_list, port_phy_el)
173 sas_phy_set_target(phy, dev);
174 spin_unlock_irq(&port->phy_list_lock);
175
167 return 0; 176 return 0;
168} 177}
169 178
@@ -205,8 +214,7 @@ void sas_notify_lldd_dev_gone(struct domain_device *dev)
205static void sas_probe_devices(struct work_struct *work) 214static void sas_probe_devices(struct work_struct *work)
206{ 215{
207 struct domain_device *dev, *n; 216 struct domain_device *dev, *n;
208 struct sas_discovery_event *ev = 217 struct sas_discovery_event *ev = to_sas_discovery_event(work);
209 container_of(work, struct sas_discovery_event, work);
210 struct asd_sas_port *port = ev->port; 218 struct asd_sas_port *port = ev->port;
211 219
212 clear_bit(DISCE_PROBE, &port->disc.pending); 220 clear_bit(DISCE_PROBE, &port->disc.pending);
@@ -255,6 +263,9 @@ void sas_free_device(struct kref *kref)
255{ 263{
256 struct domain_device *dev = container_of(kref, typeof(*dev), kref); 264 struct domain_device *dev = container_of(kref, typeof(*dev), kref);
257 265
266 put_device(&dev->rphy->dev);
267 dev->rphy = NULL;
268
258 if (dev->parent) 269 if (dev->parent)
259 sas_put_device(dev->parent); 270 sas_put_device(dev->parent);
260 271
@@ -291,8 +302,7 @@ static void sas_unregister_common_dev(struct asd_sas_port *port, struct domain_d
291static void sas_destruct_devices(struct work_struct *work) 302static void sas_destruct_devices(struct work_struct *work)
292{ 303{
293 struct domain_device *dev, *n; 304 struct domain_device *dev, *n;
294 struct sas_discovery_event *ev = 305 struct sas_discovery_event *ev = to_sas_discovery_event(work);
295 container_of(work, struct sas_discovery_event, work);
296 struct asd_sas_port *port = ev->port; 306 struct asd_sas_port *port = ev->port;
297 307
298 clear_bit(DISCE_DESTRUCT, &port->disc.pending); 308 clear_bit(DISCE_DESTRUCT, &port->disc.pending);
@@ -302,7 +312,6 @@ static void sas_destruct_devices(struct work_struct *work)
302 312
303 sas_remove_children(&dev->rphy->dev); 313 sas_remove_children(&dev->rphy->dev);
304 sas_rphy_delete(dev->rphy); 314 sas_rphy_delete(dev->rphy);
305 dev->rphy = NULL;
306 sas_unregister_common_dev(port, dev); 315 sas_unregister_common_dev(port, dev);
307 } 316 }
308} 317}
@@ -314,11 +323,11 @@ void sas_unregister_dev(struct asd_sas_port *port, struct domain_device *dev)
314 /* this rphy never saw sas_rphy_add */ 323 /* this rphy never saw sas_rphy_add */
315 list_del_init(&dev->disco_list_node); 324 list_del_init(&dev->disco_list_node);
316 sas_rphy_free(dev->rphy); 325 sas_rphy_free(dev->rphy);
317 dev->rphy = NULL;
318 sas_unregister_common_dev(port, dev); 326 sas_unregister_common_dev(port, dev);
327 return;
319 } 328 }
320 329
321 if (dev->rphy && !test_and_set_bit(SAS_DEV_DESTROY, &dev->state)) { 330 if (!test_and_set_bit(SAS_DEV_DESTROY, &dev->state)) {
322 sas_rphy_unlink(dev->rphy); 331 sas_rphy_unlink(dev->rphy);
323 list_move_tail(&dev->disco_list_node, &port->destroy_list); 332 list_move_tail(&dev->disco_list_node, &port->destroy_list);
324 sas_discover_event(dev->port, DISCE_DESTRUCT); 333 sas_discover_event(dev->port, DISCE_DESTRUCT);
@@ -377,8 +386,7 @@ static void sas_discover_domain(struct work_struct *work)
377{ 386{
378 struct domain_device *dev; 387 struct domain_device *dev;
379 int error = 0; 388 int error = 0;
380 struct sas_discovery_event *ev = 389 struct sas_discovery_event *ev = to_sas_discovery_event(work);
381 container_of(work, struct sas_discovery_event, work);
382 struct asd_sas_port *port = ev->port; 390 struct asd_sas_port *port = ev->port;
383 391
384 clear_bit(DISCE_DISCOVER_DOMAIN, &port->disc.pending); 392 clear_bit(DISCE_DISCOVER_DOMAIN, &port->disc.pending);
@@ -419,8 +427,6 @@ static void sas_discover_domain(struct work_struct *work)
419 427
420 if (error) { 428 if (error) {
421 sas_rphy_free(dev->rphy); 429 sas_rphy_free(dev->rphy);
422 dev->rphy = NULL;
423
424 list_del_init(&dev->disco_list_node); 430 list_del_init(&dev->disco_list_node);
425 spin_lock_irq(&port->dev_list_lock); 431 spin_lock_irq(&port->dev_list_lock);
426 list_del_init(&dev->dev_list_node); 432 list_del_init(&dev->dev_list_node);
@@ -437,8 +443,7 @@ static void sas_discover_domain(struct work_struct *work)
437static void sas_revalidate_domain(struct work_struct *work) 443static void sas_revalidate_domain(struct work_struct *work)
438{ 444{
439 int res = 0; 445 int res = 0;
440 struct sas_discovery_event *ev = 446 struct sas_discovery_event *ev = to_sas_discovery_event(work);
441 container_of(work, struct sas_discovery_event, work);
442 struct asd_sas_port *port = ev->port; 447 struct asd_sas_port *port = ev->port;
443 struct sas_ha_struct *ha = port->ha; 448 struct sas_ha_struct *ha = port->ha;
444 449
@@ -466,21 +471,25 @@ static void sas_revalidate_domain(struct work_struct *work)
466 471
467/* ---------- Events ---------- */ 472/* ---------- Events ---------- */
468 473
469static void sas_chain_work(struct sas_ha_struct *ha, struct work_struct *work) 474static void sas_chain_work(struct sas_ha_struct *ha, struct sas_work *sw)
470{ 475{
471 /* chained work is not subject to SA_HA_DRAINING or SAS_HA_REGISTERED */ 476 /* chained work is not subject to SA_HA_DRAINING or
472 scsi_queue_work(ha->core.shost, work); 477 * SAS_HA_REGISTERED, because it is either submitted in the
478 * workqueue, or known to be submitted from a context that is
479 * not racing against draining
480 */
481 scsi_queue_work(ha->core.shost, &sw->work);
473} 482}
474 483
475static void sas_chain_event(int event, unsigned long *pending, 484static void sas_chain_event(int event, unsigned long *pending,
476 struct work_struct *work, 485 struct sas_work *sw,
477 struct sas_ha_struct *ha) 486 struct sas_ha_struct *ha)
478{ 487{
479 if (!test_and_set_bit(event, pending)) { 488 if (!test_and_set_bit(event, pending)) {
480 unsigned long flags; 489 unsigned long flags;
481 490
482 spin_lock_irqsave(&ha->state_lock, flags); 491 spin_lock_irqsave(&ha->state_lock, flags);
483 sas_chain_work(ha, work); 492 sas_chain_work(ha, sw);
484 spin_unlock_irqrestore(&ha->state_lock, flags); 493 spin_unlock_irqrestore(&ha->state_lock, flags);
485 } 494 }
486} 495}
@@ -519,7 +528,7 @@ void sas_init_disc(struct sas_discovery *disc, struct asd_sas_port *port)
519 528
520 disc->pending = 0; 529 disc->pending = 0;
521 for (i = 0; i < DISC_NUM_EVENTS; i++) { 530 for (i = 0; i < DISC_NUM_EVENTS; i++) {
522 INIT_WORK(&disc->disc_work[i].work, sas_event_fns[i]); 531 INIT_SAS_WORK(&disc->disc_work[i].work, sas_event_fns[i]);
523 disc->disc_work[i].port = port; 532 disc->disc_work[i].port = port;
524 } 533 }
525} 534}
diff --git a/drivers/scsi/libsas/sas_event.c b/drivers/scsi/libsas/sas_event.c
index 16639bbae629..4e4292d210c1 100644
--- a/drivers/scsi/libsas/sas_event.c
+++ b/drivers/scsi/libsas/sas_event.c
@@ -27,19 +27,21 @@
27#include "sas_internal.h" 27#include "sas_internal.h"
28#include "sas_dump.h" 28#include "sas_dump.h"
29 29
30void sas_queue_work(struct sas_ha_struct *ha, struct work_struct *work) 30void sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw)
31{ 31{
32 if (!test_bit(SAS_HA_REGISTERED, &ha->state)) 32 if (!test_bit(SAS_HA_REGISTERED, &ha->state))
33 return; 33 return;
34 34
35 if (test_bit(SAS_HA_DRAINING, &ha->state)) 35 if (test_bit(SAS_HA_DRAINING, &ha->state)) {
36 list_add(&work->entry, &ha->defer_q); 36 /* add it to the defer list, if not already pending */
37 else 37 if (list_empty(&sw->drain_node))
38 scsi_queue_work(ha->core.shost, work); 38 list_add(&sw->drain_node, &ha->defer_q);
39 } else
40 scsi_queue_work(ha->core.shost, &sw->work);
39} 41}
40 42
41static void sas_queue_event(int event, unsigned long *pending, 43static void sas_queue_event(int event, unsigned long *pending,
42 struct work_struct *work, 44 struct sas_work *work,
43 struct sas_ha_struct *ha) 45 struct sas_ha_struct *ha)
44{ 46{
45 if (!test_and_set_bit(event, pending)) { 47 if (!test_and_set_bit(event, pending)) {
@@ -55,7 +57,7 @@ static void sas_queue_event(int event, unsigned long *pending,
55void __sas_drain_work(struct sas_ha_struct *ha) 57void __sas_drain_work(struct sas_ha_struct *ha)
56{ 58{
57 struct workqueue_struct *wq = ha->core.shost->work_q; 59 struct workqueue_struct *wq = ha->core.shost->work_q;
58 struct work_struct *w, *_w; 60 struct sas_work *sw, *_sw;
59 61
60 set_bit(SAS_HA_DRAINING, &ha->state); 62 set_bit(SAS_HA_DRAINING, &ha->state);
61 /* flush submitters */ 63 /* flush submitters */
@@ -66,9 +68,9 @@ void __sas_drain_work(struct sas_ha_struct *ha)
66 68
67 spin_lock_irq(&ha->state_lock); 69 spin_lock_irq(&ha->state_lock);
68 clear_bit(SAS_HA_DRAINING, &ha->state); 70 clear_bit(SAS_HA_DRAINING, &ha->state);
69 list_for_each_entry_safe(w, _w, &ha->defer_q, entry) { 71 list_for_each_entry_safe(sw, _sw, &ha->defer_q, drain_node) {
70 list_del_init(&w->entry); 72 list_del_init(&sw->drain_node);
71 sas_queue_work(ha, w); 73 sas_queue_work(ha, sw);
72 } 74 }
73 spin_unlock_irq(&ha->state_lock); 75 spin_unlock_irq(&ha->state_lock);
74} 76}
@@ -151,7 +153,7 @@ int sas_init_events(struct sas_ha_struct *sas_ha)
151 int i; 153 int i;
152 154
153 for (i = 0; i < HA_NUM_EVENTS; i++) { 155 for (i = 0; i < HA_NUM_EVENTS; i++) {
154 INIT_WORK(&sas_ha->ha_events[i].work, sas_ha_event_fns[i]); 156 INIT_SAS_WORK(&sas_ha->ha_events[i].work, sas_ha_event_fns[i]);
155 sas_ha->ha_events[i].ha = sas_ha; 157 sas_ha->ha_events[i].ha = sas_ha;
156 } 158 }
157 159
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 05acd9e35fc4..caa0525d2523 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -202,6 +202,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
202 u8 sas_addr[SAS_ADDR_SIZE]; 202 u8 sas_addr[SAS_ADDR_SIZE];
203 struct smp_resp *resp = rsp; 203 struct smp_resp *resp = rsp;
204 struct discover_resp *dr = &resp->disc; 204 struct discover_resp *dr = &resp->disc;
205 struct sas_ha_struct *ha = dev->port->ha;
205 struct expander_device *ex = &dev->ex_dev; 206 struct expander_device *ex = &dev->ex_dev;
206 struct ex_phy *phy = &ex->ex_phy[phy_id]; 207 struct ex_phy *phy = &ex->ex_phy[phy_id];
207 struct sas_rphy *rphy = dev->rphy; 208 struct sas_rphy *rphy = dev->rphy;
@@ -209,6 +210,8 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
209 char *type; 210 char *type;
210 211
211 if (new_phy) { 212 if (new_phy) {
213 if (WARN_ON_ONCE(test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)))
214 return;
212 phy->phy = sas_phy_alloc(&rphy->dev, phy_id); 215 phy->phy = sas_phy_alloc(&rphy->dev, phy_id);
213 216
214 /* FIXME: error_handling */ 217 /* FIXME: error_handling */
@@ -233,6 +236,8 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
233 memcpy(sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE); 236 memcpy(sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE);
234 237
235 phy->attached_dev_type = to_dev_type(dr); 238 phy->attached_dev_type = to_dev_type(dr);
239 if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state))
240 goto out;
236 phy->phy_id = phy_id; 241 phy->phy_id = phy_id;
237 phy->linkrate = dr->linkrate; 242 phy->linkrate = dr->linkrate;
238 phy->attached_sata_host = dr->attached_sata_host; 243 phy->attached_sata_host = dr->attached_sata_host;
@@ -240,7 +245,14 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
240 phy->attached_sata_ps = dr->attached_sata_ps; 245 phy->attached_sata_ps = dr->attached_sata_ps;
241 phy->attached_iproto = dr->iproto << 1; 246 phy->attached_iproto = dr->iproto << 1;
242 phy->attached_tproto = dr->tproto << 1; 247 phy->attached_tproto = dr->tproto << 1;
243 memcpy(phy->attached_sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE); 248 /* help some expanders that fail to zero sas_address in the 'no
249 * device' case
250 */
251 if (phy->attached_dev_type == NO_DEVICE ||
252 phy->linkrate < SAS_LINK_RATE_1_5_GBPS)
253 memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
254 else
255 memcpy(phy->attached_sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE);
244 phy->attached_phy_id = dr->attached_phy_id; 256 phy->attached_phy_id = dr->attached_phy_id;
245 phy->phy_change_count = dr->change_count; 257 phy->phy_change_count = dr->change_count;
246 phy->routing_attr = dr->routing_attr; 258 phy->routing_attr = dr->routing_attr;
@@ -266,6 +278,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
266 return; 278 return;
267 } 279 }
268 280
281 out:
269 switch (phy->attached_dev_type) { 282 switch (phy->attached_dev_type) {
270 case SATA_PENDING: 283 case SATA_PENDING:
271 type = "stp pending"; 284 type = "stp pending";
@@ -304,7 +317,15 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
304 else 317 else
305 return; 318 return;
306 319
307 SAS_DPRINTK("ex %016llx phy%02d:%c:%X attached: %016llx (%s)\n", 320 /* if the attached device type changed and ata_eh is active,
321 * make sure we run revalidation when eh completes (see:
322 * sas_enable_revalidation)
323 */
324 if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state))
325 set_bit(DISCE_REVALIDATE_DOMAIN, &dev->port->disc.pending);
326
327 SAS_DPRINTK("%sex %016llx phy%02d:%c:%X attached: %016llx (%s)\n",
328 test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state) ? "ata: " : "",
308 SAS_ADDR(dev->sas_addr), phy->phy_id, 329 SAS_ADDR(dev->sas_addr), phy->phy_id,
309 sas_route_char(dev, phy), phy->linkrate, 330 sas_route_char(dev, phy), phy->linkrate,
310 SAS_ADDR(phy->attached_sas_addr), type); 331 SAS_ADDR(phy->attached_sas_addr), type);
@@ -776,13 +797,16 @@ static struct domain_device *sas_ex_discover_end_dev(
776 if (res) 797 if (res)
777 goto out_free; 798 goto out_free;
778 799
800 sas_init_dev(child);
801 res = sas_ata_init(child);
802 if (res)
803 goto out_free;
779 rphy = sas_end_device_alloc(phy->port); 804 rphy = sas_end_device_alloc(phy->port);
780 if (unlikely(!rphy)) 805 if (!rphy)
781 goto out_free; 806 goto out_free;
782 807
783 sas_init_dev(child);
784
785 child->rphy = rphy; 808 child->rphy = rphy;
809 get_device(&rphy->dev);
786 810
787 list_add_tail(&child->disco_list_node, &parent->port->disco_list); 811 list_add_tail(&child->disco_list_node, &parent->port->disco_list);
788 812
@@ -806,6 +830,7 @@ static struct domain_device *sas_ex_discover_end_dev(
806 sas_init_dev(child); 830 sas_init_dev(child);
807 831
808 child->rphy = rphy; 832 child->rphy = rphy;
833 get_device(&rphy->dev);
809 sas_fill_in_rphy(child, rphy); 834 sas_fill_in_rphy(child, rphy);
810 835
811 list_add_tail(&child->disco_list_node, &parent->port->disco_list); 836 list_add_tail(&child->disco_list_node, &parent->port->disco_list);
@@ -830,8 +855,6 @@ static struct domain_device *sas_ex_discover_end_dev(
830 855
831 out_list_del: 856 out_list_del:
832 sas_rphy_free(child->rphy); 857 sas_rphy_free(child->rphy);
833 child->rphy = NULL;
834
835 list_del(&child->disco_list_node); 858 list_del(&child->disco_list_node);
836 spin_lock_irq(&parent->port->dev_list_lock); 859 spin_lock_irq(&parent->port->dev_list_lock);
837 list_del(&child->dev_list_node); 860 list_del(&child->dev_list_node);
@@ -911,6 +934,7 @@ static struct domain_device *sas_ex_discover_expander(
911 } 934 }
912 port = parent->port; 935 port = parent->port;
913 child->rphy = rphy; 936 child->rphy = rphy;
937 get_device(&rphy->dev);
914 edev = rphy_to_expander_device(rphy); 938 edev = rphy_to_expander_device(rphy);
915 child->dev_type = phy->attached_dev_type; 939 child->dev_type = phy->attached_dev_type;
916 kref_get(&parent->kref); 940 kref_get(&parent->kref);
@@ -934,6 +958,7 @@ static struct domain_device *sas_ex_discover_expander(
934 958
935 res = sas_discover_expander(child); 959 res = sas_discover_expander(child);
936 if (res) { 960 if (res) {
961 sas_rphy_delete(rphy);
937 spin_lock_irq(&parent->port->dev_list_lock); 962 spin_lock_irq(&parent->port->dev_list_lock);
938 list_del(&child->dev_list_node); 963 list_del(&child->dev_list_node);
939 spin_unlock_irq(&parent->port->dev_list_lock); 964 spin_unlock_irq(&parent->port->dev_list_lock);
@@ -1718,9 +1743,17 @@ static int sas_find_bcast_phy(struct domain_device *dev, int *phy_id,
1718 int phy_change_count = 0; 1743 int phy_change_count = 0;
1719 1744
1720 res = sas_get_phy_change_count(dev, i, &phy_change_count); 1745 res = sas_get_phy_change_count(dev, i, &phy_change_count);
1721 if (res) 1746 switch (res) {
1722 goto out; 1747 case SMP_RESP_PHY_VACANT:
1723 else if (phy_change_count != ex->ex_phy[i].phy_change_count) { 1748 case SMP_RESP_NO_PHY:
1749 continue;
1750 case SMP_RESP_FUNC_ACC:
1751 break;
1752 default:
1753 return res;
1754 }
1755
1756 if (phy_change_count != ex->ex_phy[i].phy_change_count) {
1724 if (update) 1757 if (update)
1725 ex->ex_phy[i].phy_change_count = 1758 ex->ex_phy[i].phy_change_count =
1726 phy_change_count; 1759 phy_change_count;
@@ -1728,8 +1761,7 @@ static int sas_find_bcast_phy(struct domain_device *dev, int *phy_id,
1728 return 0; 1761 return 0;
1729 } 1762 }
1730 } 1763 }
1731out: 1764 return 0;
1732 return res;
1733} 1765}
1734 1766
1735static int sas_get_ex_change_count(struct domain_device *dev, int *ecc) 1767static int sas_get_ex_change_count(struct domain_device *dev, int *ecc)
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
index 120bff64be30..10cb5ae30977 100644
--- a/drivers/scsi/libsas/sas_init.c
+++ b/drivers/scsi/libsas/sas_init.c
@@ -94,8 +94,7 @@ void sas_hash_addr(u8 *hashed, const u8 *sas_addr)
94 94
95void sas_hae_reset(struct work_struct *work) 95void sas_hae_reset(struct work_struct *work)
96{ 96{
97 struct sas_ha_event *ev = 97 struct sas_ha_event *ev = to_sas_ha_event(work);
98 container_of(work, struct sas_ha_event, work);
99 struct sas_ha_struct *ha = ev->ha; 98 struct sas_ha_struct *ha = ev->ha;
100 99
101 clear_bit(HAE_RESET, &ha->pending); 100 clear_bit(HAE_RESET, &ha->pending);
@@ -369,14 +368,14 @@ static void sas_phy_release(struct sas_phy *phy)
369 368
370static void phy_reset_work(struct work_struct *work) 369static void phy_reset_work(struct work_struct *work)
371{ 370{
372 struct sas_phy_data *d = container_of(work, typeof(*d), reset_work); 371 struct sas_phy_data *d = container_of(work, typeof(*d), reset_work.work);
373 372
374 d->reset_result = transport_sas_phy_reset(d->phy, d->hard_reset); 373 d->reset_result = transport_sas_phy_reset(d->phy, d->hard_reset);
375} 374}
376 375
377static void phy_enable_work(struct work_struct *work) 376static void phy_enable_work(struct work_struct *work)
378{ 377{
379 struct sas_phy_data *d = container_of(work, typeof(*d), enable_work); 378 struct sas_phy_data *d = container_of(work, typeof(*d), enable_work.work);
380 379
381 d->enable_result = sas_phy_enable(d->phy, d->enable); 380 d->enable_result = sas_phy_enable(d->phy, d->enable);
382} 381}
@@ -389,8 +388,8 @@ static int sas_phy_setup(struct sas_phy *phy)
389 return -ENOMEM; 388 return -ENOMEM;
390 389
391 mutex_init(&d->event_lock); 390 mutex_init(&d->event_lock);
392 INIT_WORK(&d->reset_work, phy_reset_work); 391 INIT_SAS_WORK(&d->reset_work, phy_reset_work);
393 INIT_WORK(&d->enable_work, phy_enable_work); 392 INIT_SAS_WORK(&d->enable_work, phy_enable_work);
394 d->phy = phy; 393 d->phy = phy;
395 phy->hostdata = d; 394 phy->hostdata = d;
396 395
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index f05c63879949..507e4cf12e56 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -45,10 +45,10 @@ struct sas_phy_data {
45 struct mutex event_lock; 45 struct mutex event_lock;
46 int hard_reset; 46 int hard_reset;
47 int reset_result; 47 int reset_result;
48 struct work_struct reset_work; 48 struct sas_work reset_work;
49 int enable; 49 int enable;
50 int enable_result; 50 int enable_result;
51 struct work_struct enable_work; 51 struct sas_work enable_work;
52}; 52};
53 53
54void sas_scsi_recover_host(struct Scsi_Host *shost); 54void sas_scsi_recover_host(struct Scsi_Host *shost);
@@ -80,7 +80,7 @@ void sas_porte_broadcast_rcvd(struct work_struct *work);
80void sas_porte_link_reset_err(struct work_struct *work); 80void sas_porte_link_reset_err(struct work_struct *work);
81void sas_porte_timer_event(struct work_struct *work); 81void sas_porte_timer_event(struct work_struct *work);
82void sas_porte_hard_reset(struct work_struct *work); 82void sas_porte_hard_reset(struct work_struct *work);
83void sas_queue_work(struct sas_ha_struct *ha, struct work_struct *work); 83void sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw);
84 84
85int sas_notify_lldd_dev_found(struct domain_device *); 85int sas_notify_lldd_dev_found(struct domain_device *);
86void sas_notify_lldd_dev_gone(struct domain_device *); 86void sas_notify_lldd_dev_gone(struct domain_device *);
diff --git a/drivers/scsi/libsas/sas_phy.c b/drivers/scsi/libsas/sas_phy.c
index dcfd4a9105c5..521422e857ab 100644
--- a/drivers/scsi/libsas/sas_phy.c
+++ b/drivers/scsi/libsas/sas_phy.c
@@ -32,8 +32,7 @@
32 32
33static void sas_phye_loss_of_signal(struct work_struct *work) 33static void sas_phye_loss_of_signal(struct work_struct *work)
34{ 34{
35 struct asd_sas_event *ev = 35 struct asd_sas_event *ev = to_asd_sas_event(work);
36 container_of(work, struct asd_sas_event, work);
37 struct asd_sas_phy *phy = ev->phy; 36 struct asd_sas_phy *phy = ev->phy;
38 37
39 clear_bit(PHYE_LOSS_OF_SIGNAL, &phy->phy_events_pending); 38 clear_bit(PHYE_LOSS_OF_SIGNAL, &phy->phy_events_pending);
@@ -43,8 +42,7 @@ static void sas_phye_loss_of_signal(struct work_struct *work)
43 42
44static void sas_phye_oob_done(struct work_struct *work) 43static void sas_phye_oob_done(struct work_struct *work)
45{ 44{
46 struct asd_sas_event *ev = 45 struct asd_sas_event *ev = to_asd_sas_event(work);
47 container_of(work, struct asd_sas_event, work);
48 struct asd_sas_phy *phy = ev->phy; 46 struct asd_sas_phy *phy = ev->phy;
49 47
50 clear_bit(PHYE_OOB_DONE, &phy->phy_events_pending); 48 clear_bit(PHYE_OOB_DONE, &phy->phy_events_pending);
@@ -53,8 +51,7 @@ static void sas_phye_oob_done(struct work_struct *work)
53 51
54static void sas_phye_oob_error(struct work_struct *work) 52static void sas_phye_oob_error(struct work_struct *work)
55{ 53{
56 struct asd_sas_event *ev = 54 struct asd_sas_event *ev = to_asd_sas_event(work);
57 container_of(work, struct asd_sas_event, work);
58 struct asd_sas_phy *phy = ev->phy; 55 struct asd_sas_phy *phy = ev->phy;
59 struct sas_ha_struct *sas_ha = phy->ha; 56 struct sas_ha_struct *sas_ha = phy->ha;
60 struct asd_sas_port *port = phy->port; 57 struct asd_sas_port *port = phy->port;
@@ -85,8 +82,7 @@ static void sas_phye_oob_error(struct work_struct *work)
85 82
86static void sas_phye_spinup_hold(struct work_struct *work) 83static void sas_phye_spinup_hold(struct work_struct *work)
87{ 84{
88 struct asd_sas_event *ev = 85 struct asd_sas_event *ev = to_asd_sas_event(work);
89 container_of(work, struct asd_sas_event, work);
90 struct asd_sas_phy *phy = ev->phy; 86 struct asd_sas_phy *phy = ev->phy;
91 struct sas_ha_struct *sas_ha = phy->ha; 87 struct sas_ha_struct *sas_ha = phy->ha;
92 struct sas_internal *i = 88 struct sas_internal *i =
@@ -127,14 +123,12 @@ int sas_register_phys(struct sas_ha_struct *sas_ha)
127 phy->error = 0; 123 phy->error = 0;
128 INIT_LIST_HEAD(&phy->port_phy_el); 124 INIT_LIST_HEAD(&phy->port_phy_el);
129 for (k = 0; k < PORT_NUM_EVENTS; k++) { 125 for (k = 0; k < PORT_NUM_EVENTS; k++) {
130 INIT_WORK(&phy->port_events[k].work, 126 INIT_SAS_WORK(&phy->port_events[k].work, sas_port_event_fns[k]);
131 sas_port_event_fns[k]);
132 phy->port_events[k].phy = phy; 127 phy->port_events[k].phy = phy;
133 } 128 }
134 129
135 for (k = 0; k < PHY_NUM_EVENTS; k++) { 130 for (k = 0; k < PHY_NUM_EVENTS; k++) {
136 INIT_WORK(&phy->phy_events[k].work, 131 INIT_SAS_WORK(&phy->phy_events[k].work, sas_phy_event_fns[k]);
137 sas_phy_event_fns[k]);
138 phy->phy_events[k].phy = phy; 132 phy->phy_events[k].phy = phy;
139 } 133 }
140 134
@@ -144,8 +138,7 @@ int sas_register_phys(struct sas_ha_struct *sas_ha)
144 spin_lock_init(&phy->sas_prim_lock); 138 spin_lock_init(&phy->sas_prim_lock);
145 phy->frame_rcvd_size = 0; 139 phy->frame_rcvd_size = 0;
146 140
147 phy->phy = sas_phy_alloc(&sas_ha->core.shost->shost_gendev, 141 phy->phy = sas_phy_alloc(&sas_ha->core.shost->shost_gendev, i);
148 i);
149 if (!phy->phy) 142 if (!phy->phy)
150 return -ENOMEM; 143 return -ENOMEM;
151 144
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
index eb19c016d500..e884a8c58a0c 100644
--- a/drivers/scsi/libsas/sas_port.c
+++ b/drivers/scsi/libsas/sas_port.c
@@ -123,7 +123,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
123 spin_unlock_irqrestore(&sas_ha->phy_port_lock, flags); 123 spin_unlock_irqrestore(&sas_ha->phy_port_lock, flags);
124 124
125 if (!port->port) { 125 if (!port->port) {
126 port->port = sas_port_alloc(phy->phy->dev.parent, phy->id); 126 port->port = sas_port_alloc(phy->phy->dev.parent, port->id);
127 BUG_ON(!port->port); 127 BUG_ON(!port->port);
128 sas_port_add(port->port); 128 sas_port_add(port->port);
129 } 129 }
@@ -208,8 +208,7 @@ void sas_deform_port(struct asd_sas_phy *phy, int gone)
208 208
209void sas_porte_bytes_dmaed(struct work_struct *work) 209void sas_porte_bytes_dmaed(struct work_struct *work)
210{ 210{
211 struct asd_sas_event *ev = 211 struct asd_sas_event *ev = to_asd_sas_event(work);
212 container_of(work, struct asd_sas_event, work);
213 struct asd_sas_phy *phy = ev->phy; 212 struct asd_sas_phy *phy = ev->phy;
214 213
215 clear_bit(PORTE_BYTES_DMAED, &phy->port_events_pending); 214 clear_bit(PORTE_BYTES_DMAED, &phy->port_events_pending);
@@ -219,8 +218,7 @@ void sas_porte_bytes_dmaed(struct work_struct *work)
219 218
220void sas_porte_broadcast_rcvd(struct work_struct *work) 219void sas_porte_broadcast_rcvd(struct work_struct *work)
221{ 220{
222 struct asd_sas_event *ev = 221 struct asd_sas_event *ev = to_asd_sas_event(work);
223 container_of(work, struct asd_sas_event, work);
224 struct asd_sas_phy *phy = ev->phy; 222 struct asd_sas_phy *phy = ev->phy;
225 unsigned long flags; 223 unsigned long flags;
226 u32 prim; 224 u32 prim;
@@ -237,8 +235,7 @@ void sas_porte_broadcast_rcvd(struct work_struct *work)
237 235
238void sas_porte_link_reset_err(struct work_struct *work) 236void sas_porte_link_reset_err(struct work_struct *work)
239{ 237{
240 struct asd_sas_event *ev = 238 struct asd_sas_event *ev = to_asd_sas_event(work);
241 container_of(work, struct asd_sas_event, work);
242 struct asd_sas_phy *phy = ev->phy; 239 struct asd_sas_phy *phy = ev->phy;
243 240
244 clear_bit(PORTE_LINK_RESET_ERR, &phy->port_events_pending); 241 clear_bit(PORTE_LINK_RESET_ERR, &phy->port_events_pending);
@@ -248,8 +245,7 @@ void sas_porte_link_reset_err(struct work_struct *work)
248 245
249void sas_porte_timer_event(struct work_struct *work) 246void sas_porte_timer_event(struct work_struct *work)
250{ 247{
251 struct asd_sas_event *ev = 248 struct asd_sas_event *ev = to_asd_sas_event(work);
252 container_of(work, struct asd_sas_event, work);
253 struct asd_sas_phy *phy = ev->phy; 249 struct asd_sas_phy *phy = ev->phy;
254 250
255 clear_bit(PORTE_TIMER_EVENT, &phy->port_events_pending); 251 clear_bit(PORTE_TIMER_EVENT, &phy->port_events_pending);
@@ -259,8 +255,7 @@ void sas_porte_timer_event(struct work_struct *work)
259 255
260void sas_porte_hard_reset(struct work_struct *work) 256void sas_porte_hard_reset(struct work_struct *work)
261{ 257{
262 struct asd_sas_event *ev = 258 struct asd_sas_event *ev = to_asd_sas_event(work);
263 container_of(work, struct asd_sas_event, work);
264 struct asd_sas_phy *phy = ev->phy; 259 struct asd_sas_phy *phy = ev->phy;
265 260
266 clear_bit(PORTE_HARD_RESET, &phy->port_events_pending); 261 clear_bit(PORTE_HARD_RESET, &phy->port_events_pending);
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index f74cc0602f3b..bc3cc6d91117 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -1367,6 +1367,9 @@ qla2x00_read_optrom(struct fc_bsg_job *bsg_job)
1367 struct qla_hw_data *ha = vha->hw; 1367 struct qla_hw_data *ha = vha->hw;
1368 int rval = 0; 1368 int rval = 0;
1369 1369
1370 if (ha->flags.isp82xx_reset_hdlr_active)
1371 return -EBUSY;
1372
1370 rval = qla2x00_optrom_setup(bsg_job, vha, 0); 1373 rval = qla2x00_optrom_setup(bsg_job, vha, 0);
1371 if (rval) 1374 if (rval)
1372 return rval; 1375 return rval;
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 897731b93df2..62324a1d5573 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -15,7 +15,7 @@
15 * | Mailbox commands | 0x113e | 0x112c-0x112e | 15 * | Mailbox commands | 0x113e | 0x112c-0x112e |
16 * | | | 0x113a | 16 * | | | 0x113a |
17 * | Device Discovery | 0x2086 | 0x2020-0x2022 | 17 * | Device Discovery | 0x2086 | 0x2020-0x2022 |
18 * | Queue Command and IO tracing | 0x302f | 0x3006,0x3008 | 18 * | Queue Command and IO tracing | 0x3030 | 0x3006,0x3008 |
19 * | | | 0x302d-0x302e | 19 * | | | 0x302d-0x302e |
20 * | DPC Thread | 0x401c | | 20 * | DPC Thread | 0x401c | |
21 * | Async Events | 0x505d | 0x502b-0x502f | 21 * | Async Events | 0x505d | 0x502b-0x502f |
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index f79844ce7122..ce42288049b5 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1715,13 +1715,24 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1715 res = DID_ERROR << 16; 1715 res = DID_ERROR << 16;
1716 break; 1716 break;
1717 } 1717 }
1718 } else { 1718 } else if (lscsi_status != SAM_STAT_TASK_SET_FULL &&
1719 lscsi_status != SAM_STAT_BUSY) {
1720 /*
1721 * scsi status of task set and busy are considered to be
1722 * task not completed.
1723 */
1724
1719 ql_dbg(ql_dbg_io, fcport->vha, 0x301f, 1725 ql_dbg(ql_dbg_io, fcport->vha, 0x301f,
1720 "Dropped frame(s) detected (0x%x " 1726 "Dropped frame(s) detected (0x%x "
1721 "of 0x%x bytes).\n", resid, scsi_bufflen(cp)); 1727 "of 0x%x bytes).\n", resid,
1728 scsi_bufflen(cp));
1722 1729
1723 res = DID_ERROR << 16 | lscsi_status; 1730 res = DID_ERROR << 16 | lscsi_status;
1724 goto check_scsi_status; 1731 goto check_scsi_status;
1732 } else {
1733 ql_dbg(ql_dbg_io, fcport->vha, 0x3030,
1734 "scsi_status: 0x%x, lscsi_status: 0x%x\n",
1735 scsi_status, lscsi_status);
1725 } 1736 }
1726 1737
1727 res = DID_OK << 16 | lscsi_status; 1738 res = DID_OK << 16 | lscsi_status;
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index f0528539bbbc..de722a933438 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -3125,6 +3125,7 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha)
3125 ql_log(ql_log_info, vha, 0x00b7, 3125 ql_log(ql_log_info, vha, 0x00b7,
3126 "HW State: COLD/RE-INIT.\n"); 3126 "HW State: COLD/RE-INIT.\n");
3127 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD); 3127 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD);
3128 qla82xx_set_rst_ready(ha);
3128 if (ql2xmdenable) { 3129 if (ql2xmdenable) {
3129 if (qla82xx_md_collect(vha)) 3130 if (qla82xx_md_collect(vha))
3130 ql_log(ql_log_warn, vha, 0xb02c, 3131 ql_log(ql_log_warn, vha, 0xb02c,
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index a2f999273a5f..7db803377c64 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -3577,9 +3577,25 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
3577 continue; 3577 continue;
3578 /* Attempt a retry. */ 3578 /* Attempt a retry. */
3579 status = 1; 3579 status = 1;
3580 } else 3580 } else {
3581 status = qla2x00_fabric_login(vha, 3581 status = qla2x00_fabric_login(vha,
3582 fcport, &next_loopid); 3582 fcport, &next_loopid);
3583 if (status == QLA_SUCCESS) {
3584 int status2;
3585 uint8_t opts;
3586
3587 opts = 0;
3588 if (fcport->flags &
3589 FCF_FCP2_DEVICE)
3590 opts |= BIT_1;
3591 status2 =
3592 qla2x00_get_port_database(
3593 vha, fcport,
3594 opts);
3595 if (status2 != QLA_SUCCESS)
3596 status = 1;
3597 }
3598 }
3583 } else 3599 } else
3584 status = qla2x00_local_device_login(vha, 3600 status = qla2x00_local_device_login(vha,
3585 fcport); 3601 fcport);
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 3c13c0a6be63..a683e766d1ae 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -1017,6 +1017,9 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
1017 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha)) 1017 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha))
1018 return; 1018 return;
1019 1019
1020 if (ha->flags.isp82xx_reset_hdlr_active)
1021 return;
1022
1020 ha->isp_ops->read_optrom(vha, (uint8_t *)&hdr, 1023 ha->isp_ops->read_optrom(vha, (uint8_t *)&hdr,
1021 ha->flt_region_npiv_conf << 2, sizeof(struct qla_npiv_header)); 1024 ha->flt_region_npiv_conf << 2, sizeof(struct qla_npiv_header));
1022 if (hdr.version == __constant_cpu_to_le16(0xffff)) 1025 if (hdr.version == __constant_cpu_to_le16(0xffff))
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 29d780c38040..f5fdb16bec9b 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,9 +7,9 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.03.07.13-k" 10#define QLA2XXX_VERSION "8.04.00.03-k"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 3 13#define QLA_DRIVER_MINOR_VER 4
14#define QLA_DRIVER_PATCH_VER 7 14#define QLA_DRIVER_PATCH_VER 0
15#define QLA_DRIVER_BETA_VER 3 15#define QLA_DRIVER_BETA_VER 3
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index dbe43924d7ae..62ddfd31d4ce 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1638,7 +1638,7 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
1638 request_fn_proc *request_fn) 1638 request_fn_proc *request_fn)
1639{ 1639{
1640 struct request_queue *q; 1640 struct request_queue *q;
1641 struct device *dev = shost->shost_gendev.parent; 1641 struct device *dev = shost->dma_dev;
1642 1642
1643 q = blk_init_queue(request_fn, NULL); 1643 q = blk_init_queue(request_fn, NULL);
1644 if (!q) 1644 if (!q)
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index efccd72c4a3e..1b3843117268 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -175,7 +175,8 @@ static void virtscsi_complete_free(void *buf)
175 175
176 if (cmd->comp) 176 if (cmd->comp)
177 complete_all(cmd->comp); 177 complete_all(cmd->comp);
178 mempool_free(cmd, virtscsi_cmd_pool); 178 else
179 mempool_free(cmd, virtscsi_cmd_pool);
179} 180}
180 181
181static void virtscsi_ctrl_done(struct virtqueue *vq) 182static void virtscsi_ctrl_done(struct virtqueue *vq)
@@ -311,21 +312,22 @@ out:
311static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd) 312static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd)
312{ 313{
313 DECLARE_COMPLETION_ONSTACK(comp); 314 DECLARE_COMPLETION_ONSTACK(comp);
314 int ret; 315 int ret = FAILED;
315 316
316 cmd->comp = &comp; 317 cmd->comp = &comp;
317 ret = virtscsi_kick_cmd(vscsi, vscsi->ctrl_vq, cmd, 318 if (virtscsi_kick_cmd(vscsi, vscsi->ctrl_vq, cmd,
318 sizeof cmd->req.tmf, sizeof cmd->resp.tmf, 319 sizeof cmd->req.tmf, sizeof cmd->resp.tmf,
319 GFP_NOIO); 320 GFP_NOIO) < 0)
320 if (ret < 0) 321 goto out;
321 return FAILED;
322 322
323 wait_for_completion(&comp); 323 wait_for_completion(&comp);
324 if (cmd->resp.tmf.response != VIRTIO_SCSI_S_OK && 324 if (cmd->resp.tmf.response == VIRTIO_SCSI_S_OK ||
325 cmd->resp.tmf.response != VIRTIO_SCSI_S_FUNCTION_SUCCEEDED) 325 cmd->resp.tmf.response == VIRTIO_SCSI_S_FUNCTION_SUCCEEDED)
326 return FAILED; 326 ret = SUCCESS;
327 327
328 return SUCCESS; 328out:
329 mempool_free(cmd, virtscsi_cmd_pool);
330 return ret;
329} 331}
330 332
331static int virtscsi_device_reset(struct scsi_cmnd *sc) 333static int virtscsi_device_reset(struct scsi_cmnd *sc)
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 3ed748355b98..00c024039c97 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -74,7 +74,7 @@ config SPI_ATMEL
74 This selects a driver for the Atmel SPI Controller, present on 74 This selects a driver for the Atmel SPI Controller, present on
75 many AT32 (AVR32) and AT91 (ARM) chips. 75 many AT32 (AVR32) and AT91 (ARM) chips.
76 76
77config SPI_BFIN 77config SPI_BFIN5XX
78 tristate "SPI controller driver for ADI Blackfin5xx" 78 tristate "SPI controller driver for ADI Blackfin5xx"
79 depends on BLACKFIN 79 depends on BLACKFIN
80 help 80 help
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index a1d48e0ba3dc..9d75d2198ff5 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -15,7 +15,7 @@ obj-$(CONFIG_SPI_ATMEL) += spi-atmel.o
15obj-$(CONFIG_SPI_ATH79) += spi-ath79.o 15obj-$(CONFIG_SPI_ATH79) += spi-ath79.o
16obj-$(CONFIG_SPI_AU1550) += spi-au1550.o 16obj-$(CONFIG_SPI_AU1550) += spi-au1550.o
17obj-$(CONFIG_SPI_BCM63XX) += spi-bcm63xx.o 17obj-$(CONFIG_SPI_BCM63XX) += spi-bcm63xx.o
18obj-$(CONFIG_SPI_BFIN) += spi-bfin5xx.o 18obj-$(CONFIG_SPI_BFIN5XX) += spi-bfin5xx.o
19obj-$(CONFIG_SPI_BFIN_SPORT) += spi-bfin-sport.o 19obj-$(CONFIG_SPI_BFIN_SPORT) += spi-bfin-sport.o
20obj-$(CONFIG_SPI_BITBANG) += spi-bitbang.o 20obj-$(CONFIG_SPI_BITBANG) += spi-bitbang.o
21obj-$(CONFIG_SPI_BUTTERFLY) += spi-butterfly.o 21obj-$(CONFIG_SPI_BUTTERFLY) += spi-butterfly.o
diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c
index f01b2648452e..7491971139a6 100644
--- a/drivers/spi/spi-bcm63xx.c
+++ b/drivers/spi/spi-bcm63xx.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Broadcom BCM63xx SPI controller support 2 * Broadcom BCM63xx SPI controller support
3 * 3 *
4 * Copyright (C) 2009-2011 Florian Fainelli <florian@openwrt.org> 4 * Copyright (C) 2009-2012 Florian Fainelli <florian@openwrt.org>
5 * Copyright (C) 2010 Tanguy Bouzeloc <tanguy.bouzeloc@efixo.com> 5 * Copyright (C) 2010 Tanguy Bouzeloc <tanguy.bouzeloc@efixo.com>
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
@@ -30,6 +30,8 @@
30#include <linux/spi/spi.h> 30#include <linux/spi/spi.h>
31#include <linux/completion.h> 31#include <linux/completion.h>
32#include <linux/err.h> 32#include <linux/err.h>
33#include <linux/workqueue.h>
34#include <linux/pm_runtime.h>
33 35
34#include <bcm63xx_dev_spi.h> 36#include <bcm63xx_dev_spi.h>
35 37
@@ -37,8 +39,6 @@
37#define DRV_VER "0.1.2" 39#define DRV_VER "0.1.2"
38 40
39struct bcm63xx_spi { 41struct bcm63xx_spi {
40 spinlock_t lock;
41 int stopping;
42 struct completion done; 42 struct completion done;
43 43
44 void __iomem *regs; 44 void __iomem *regs;
@@ -96,17 +96,12 @@ static const unsigned bcm63xx_spi_freq_table[SPI_CLK_MASK][2] = {
96 { 391000, SPI_CLK_0_391MHZ } 96 { 391000, SPI_CLK_0_391MHZ }
97}; 97};
98 98
99static int bcm63xx_spi_setup_transfer(struct spi_device *spi, 99static int bcm63xx_spi_check_transfer(struct spi_device *spi,
100 struct spi_transfer *t) 100 struct spi_transfer *t)
101{ 101{
102 struct bcm63xx_spi *bs = spi_master_get_devdata(spi->master);
103 u8 bits_per_word; 102 u8 bits_per_word;
104 u8 clk_cfg, reg;
105 u32 hz;
106 int i;
107 103
108 bits_per_word = (t) ? t->bits_per_word : spi->bits_per_word; 104 bits_per_word = (t) ? t->bits_per_word : spi->bits_per_word;
109 hz = (t) ? t->speed_hz : spi->max_speed_hz;
110 if (bits_per_word != 8) { 105 if (bits_per_word != 8) {
111 dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n", 106 dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n",
112 __func__, bits_per_word); 107 __func__, bits_per_word);
@@ -119,6 +114,19 @@ static int bcm63xx_spi_setup_transfer(struct spi_device *spi,
119 return -EINVAL; 114 return -EINVAL;
120 } 115 }
121 116
117 return 0;
118}
119
120static void bcm63xx_spi_setup_transfer(struct spi_device *spi,
121 struct spi_transfer *t)
122{
123 struct bcm63xx_spi *bs = spi_master_get_devdata(spi->master);
124 u32 hz;
125 u8 clk_cfg, reg;
126 int i;
127
128 hz = (t) ? t->speed_hz : spi->max_speed_hz;
129
122 /* Find the closest clock configuration */ 130 /* Find the closest clock configuration */
123 for (i = 0; i < SPI_CLK_MASK; i++) { 131 for (i = 0; i < SPI_CLK_MASK; i++) {
124 if (hz <= bcm63xx_spi_freq_table[i][0]) { 132 if (hz <= bcm63xx_spi_freq_table[i][0]) {
@@ -139,8 +147,6 @@ static int bcm63xx_spi_setup_transfer(struct spi_device *spi,
139 bcm_spi_writeb(bs, reg, SPI_CLK_CFG); 147 bcm_spi_writeb(bs, reg, SPI_CLK_CFG);
140 dev_dbg(&spi->dev, "Setting clock register to %02x (hz %d)\n", 148 dev_dbg(&spi->dev, "Setting clock register to %02x (hz %d)\n",
141 clk_cfg, hz); 149 clk_cfg, hz);
142
143 return 0;
144} 150}
145 151
146/* the spi->mode bits understood by this driver: */ 152/* the spi->mode bits understood by this driver: */
@@ -153,9 +159,6 @@ static int bcm63xx_spi_setup(struct spi_device *spi)
153 159
154 bs = spi_master_get_devdata(spi->master); 160 bs = spi_master_get_devdata(spi->master);
155 161
156 if (bs->stopping)
157 return -ESHUTDOWN;
158
159 if (!spi->bits_per_word) 162 if (!spi->bits_per_word)
160 spi->bits_per_word = 8; 163 spi->bits_per_word = 8;
161 164
@@ -165,7 +168,7 @@ static int bcm63xx_spi_setup(struct spi_device *spi)
165 return -EINVAL; 168 return -EINVAL;
166 } 169 }
167 170
168 ret = bcm63xx_spi_setup_transfer(spi, NULL); 171 ret = bcm63xx_spi_check_transfer(spi, NULL);
169 if (ret < 0) { 172 if (ret < 0) {
170 dev_err(&spi->dev, "setup: unsupported mode bits %x\n", 173 dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
171 spi->mode & ~MODEBITS); 174 spi->mode & ~MODEBITS);
@@ -190,28 +193,29 @@ static void bcm63xx_spi_fill_tx_fifo(struct bcm63xx_spi *bs)
190 bs->remaining_bytes -= size; 193 bs->remaining_bytes -= size;
191} 194}
192 195
193static int bcm63xx_txrx_bufs(struct spi_device *spi, struct spi_transfer *t) 196static unsigned int bcm63xx_txrx_bufs(struct spi_device *spi,
197 struct spi_transfer *t)
194{ 198{
195 struct bcm63xx_spi *bs = spi_master_get_devdata(spi->master); 199 struct bcm63xx_spi *bs = spi_master_get_devdata(spi->master);
196 u16 msg_ctl; 200 u16 msg_ctl;
197 u16 cmd; 201 u16 cmd;
198 202
203 /* Disable the CMD_DONE interrupt */
204 bcm_spi_writeb(bs, 0, SPI_INT_MASK);
205
199 dev_dbg(&spi->dev, "txrx: tx %p, rx %p, len %d\n", 206 dev_dbg(&spi->dev, "txrx: tx %p, rx %p, len %d\n",
200 t->tx_buf, t->rx_buf, t->len); 207 t->tx_buf, t->rx_buf, t->len);
201 208
202 /* Transmitter is inhibited */ 209 /* Transmitter is inhibited */
203 bs->tx_ptr = t->tx_buf; 210 bs->tx_ptr = t->tx_buf;
204 bs->rx_ptr = t->rx_buf; 211 bs->rx_ptr = t->rx_buf;
205 init_completion(&bs->done);
206 212
207 if (t->tx_buf) { 213 if (t->tx_buf) {
208 bs->remaining_bytes = t->len; 214 bs->remaining_bytes = t->len;
209 bcm63xx_spi_fill_tx_fifo(bs); 215 bcm63xx_spi_fill_tx_fifo(bs);
210 } 216 }
211 217
212 /* Enable the command done interrupt which 218 init_completion(&bs->done);
213 * we use to determine completion of a command */
214 bcm_spi_writeb(bs, SPI_INTR_CMD_DONE, SPI_INT_MASK);
215 219
216 /* Fill in the Message control register */ 220 /* Fill in the Message control register */
217 msg_ctl = (t->len << SPI_BYTE_CNT_SHIFT); 221 msg_ctl = (t->len << SPI_BYTE_CNT_SHIFT);
@@ -230,33 +234,76 @@ static int bcm63xx_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
230 cmd |= (0 << SPI_CMD_PREPEND_BYTE_CNT_SHIFT); 234 cmd |= (0 << SPI_CMD_PREPEND_BYTE_CNT_SHIFT);
231 cmd |= (spi->chip_select << SPI_CMD_DEVICE_ID_SHIFT); 235 cmd |= (spi->chip_select << SPI_CMD_DEVICE_ID_SHIFT);
232 bcm_spi_writew(bs, cmd, SPI_CMD); 236 bcm_spi_writew(bs, cmd, SPI_CMD);
233 wait_for_completion(&bs->done);
234 237
235 /* Disable the CMD_DONE interrupt */ 238 /* Enable the CMD_DONE interrupt */
236 bcm_spi_writeb(bs, 0, SPI_INT_MASK); 239 bcm_spi_writeb(bs, SPI_INTR_CMD_DONE, SPI_INT_MASK);
237 240
238 return t->len - bs->remaining_bytes; 241 return t->len - bs->remaining_bytes;
239} 242}
240 243
241static int bcm63xx_transfer(struct spi_device *spi, struct spi_message *m) 244static int bcm63xx_spi_prepare_transfer(struct spi_master *master)
242{ 245{
243 struct bcm63xx_spi *bs = spi_master_get_devdata(spi->master); 246 struct bcm63xx_spi *bs = spi_master_get_devdata(master);
244 struct spi_transfer *t;
245 int ret = 0;
246 247
247 if (unlikely(list_empty(&m->transfers))) 248 pm_runtime_get_sync(&bs->pdev->dev);
248 return -EINVAL;
249 249
250 if (bs->stopping) 250 return 0;
251 return -ESHUTDOWN; 251}
252
253static int bcm63xx_spi_unprepare_transfer(struct spi_master *master)
254{
255 struct bcm63xx_spi *bs = spi_master_get_devdata(master);
256
257 pm_runtime_put(&bs->pdev->dev);
258
259 return 0;
260}
261
262static int bcm63xx_spi_transfer_one(struct spi_master *master,
263 struct spi_message *m)
264{
265 struct bcm63xx_spi *bs = spi_master_get_devdata(master);
266 struct spi_transfer *t;
267 struct spi_device *spi = m->spi;
268 int status = 0;
269 unsigned int timeout = 0;
252 270
253 list_for_each_entry(t, &m->transfers, transfer_list) { 271 list_for_each_entry(t, &m->transfers, transfer_list) {
254 ret += bcm63xx_txrx_bufs(spi, t); 272 unsigned int len = t->len;
255 } 273 u8 rx_tail;
256 274
257 m->complete(m->context); 275 status = bcm63xx_spi_check_transfer(spi, t);
276 if (status < 0)
277 goto exit;
258 278
259 return ret; 279 /* configure adapter for a new transfer */
280 bcm63xx_spi_setup_transfer(spi, t);
281
282 while (len) {
283 /* send the data */
284 len -= bcm63xx_txrx_bufs(spi, t);
285
286 timeout = wait_for_completion_timeout(&bs->done, HZ);
287 if (!timeout) {
288 status = -ETIMEDOUT;
289 goto exit;
290 }
291
292 /* read out all data */
293 rx_tail = bcm_spi_readb(bs, SPI_RX_TAIL);
294
295 /* Read out all the data */
296 if (rx_tail)
297 memcpy_fromio(bs->rx_ptr, bs->rx_io, rx_tail);
298 }
299
300 m->actual_length += t->len;
301 }
302exit:
303 m->status = status;
304 spi_finalize_current_message(master);
305
306 return 0;
260} 307}
261 308
262/* This driver supports single master mode only. Hence 309/* This driver supports single master mode only. Hence
@@ -267,39 +314,15 @@ static irqreturn_t bcm63xx_spi_interrupt(int irq, void *dev_id)
267 struct spi_master *master = (struct spi_master *)dev_id; 314 struct spi_master *master = (struct spi_master *)dev_id;
268 struct bcm63xx_spi *bs = spi_master_get_devdata(master); 315 struct bcm63xx_spi *bs = spi_master_get_devdata(master);
269 u8 intr; 316 u8 intr;
270 u16 cmd;
271 317
272 /* Read interupts and clear them immediately */ 318 /* Read interupts and clear them immediately */
273 intr = bcm_spi_readb(bs, SPI_INT_STATUS); 319 intr = bcm_spi_readb(bs, SPI_INT_STATUS);
274 bcm_spi_writeb(bs, SPI_INTR_CLEAR_ALL, SPI_INT_STATUS); 320 bcm_spi_writeb(bs, SPI_INTR_CLEAR_ALL, SPI_INT_STATUS);
275 bcm_spi_writeb(bs, 0, SPI_INT_MASK); 321 bcm_spi_writeb(bs, 0, SPI_INT_MASK);
276 322
277 /* A tansfer completed */ 323 /* A transfer completed */
278 if (intr & SPI_INTR_CMD_DONE) { 324 if (intr & SPI_INTR_CMD_DONE)
279 u8 rx_tail; 325 complete(&bs->done);
280
281 rx_tail = bcm_spi_readb(bs, SPI_RX_TAIL);
282
283 /* Read out all the data */
284 if (rx_tail)
285 memcpy_fromio(bs->rx_ptr, bs->rx_io, rx_tail);
286
287 /* See if there is more data to send */
288 if (bs->remaining_bytes > 0) {
289 bcm63xx_spi_fill_tx_fifo(bs);
290
291 /* Start the transfer */
292 bcm_spi_writew(bs, SPI_HD_W << SPI_MSG_TYPE_SHIFT,
293 SPI_MSG_CTL);
294 cmd = bcm_spi_readw(bs, SPI_CMD);
295 cmd |= SPI_CMD_START_IMMEDIATE;
296 cmd |= (0 << SPI_CMD_PREPEND_BYTE_CNT_SHIFT);
297 bcm_spi_writeb(bs, SPI_INTR_CMD_DONE, SPI_INT_MASK);
298 bcm_spi_writew(bs, cmd, SPI_CMD);
299 } else {
300 complete(&bs->done);
301 }
302 }
303 326
304 return IRQ_HANDLED; 327 return IRQ_HANDLED;
305} 328}
@@ -345,7 +368,6 @@ static int __devinit bcm63xx_spi_probe(struct platform_device *pdev)
345 } 368 }
346 369
347 bs = spi_master_get_devdata(master); 370 bs = spi_master_get_devdata(master);
348 init_completion(&bs->done);
349 371
350 platform_set_drvdata(pdev, master); 372 platform_set_drvdata(pdev, master);
351 bs->pdev = pdev; 373 bs->pdev = pdev;
@@ -379,12 +401,13 @@ static int __devinit bcm63xx_spi_probe(struct platform_device *pdev)
379 master->bus_num = pdata->bus_num; 401 master->bus_num = pdata->bus_num;
380 master->num_chipselect = pdata->num_chipselect; 402 master->num_chipselect = pdata->num_chipselect;
381 master->setup = bcm63xx_spi_setup; 403 master->setup = bcm63xx_spi_setup;
382 master->transfer = bcm63xx_transfer; 404 master->prepare_transfer_hardware = bcm63xx_spi_prepare_transfer;
405 master->unprepare_transfer_hardware = bcm63xx_spi_unprepare_transfer;
406 master->transfer_one_message = bcm63xx_spi_transfer_one;
407 master->mode_bits = MODEBITS;
383 bs->speed_hz = pdata->speed_hz; 408 bs->speed_hz = pdata->speed_hz;
384 bs->stopping = 0;
385 bs->tx_io = (u8 *)(bs->regs + bcm63xx_spireg(SPI_MSG_DATA)); 409 bs->tx_io = (u8 *)(bs->regs + bcm63xx_spireg(SPI_MSG_DATA));
386 bs->rx_io = (const u8 *)(bs->regs + bcm63xx_spireg(SPI_RX_DATA)); 410 bs->rx_io = (const u8 *)(bs->regs + bcm63xx_spireg(SPI_RX_DATA));
387 spin_lock_init(&bs->lock);
388 411
389 /* Initialize hardware */ 412 /* Initialize hardware */
390 clk_enable(bs->clk); 413 clk_enable(bs->clk);
@@ -418,18 +441,16 @@ static int __devexit bcm63xx_spi_remove(struct platform_device *pdev)
418 struct spi_master *master = platform_get_drvdata(pdev); 441 struct spi_master *master = platform_get_drvdata(pdev);
419 struct bcm63xx_spi *bs = spi_master_get_devdata(master); 442 struct bcm63xx_spi *bs = spi_master_get_devdata(master);
420 443
444 spi_unregister_master(master);
445
421 /* reset spi block */ 446 /* reset spi block */
422 bcm_spi_writeb(bs, 0, SPI_INT_MASK); 447 bcm_spi_writeb(bs, 0, SPI_INT_MASK);
423 spin_lock(&bs->lock);
424 bs->stopping = 1;
425 448
426 /* HW shutdown */ 449 /* HW shutdown */
427 clk_disable(bs->clk); 450 clk_disable(bs->clk);
428 clk_put(bs->clk); 451 clk_put(bs->clk);
429 452
430 spin_unlock(&bs->lock);
431 platform_set_drvdata(pdev, 0); 453 platform_set_drvdata(pdev, 0);
432 spi_unregister_master(master);
433 454
434 return 0; 455 return 0;
435} 456}
diff --git a/drivers/spi/spi-bfin-sport.c b/drivers/spi/spi-bfin-sport.c
index 248a2cc671a9..1fe51198a622 100644
--- a/drivers/spi/spi-bfin-sport.c
+++ b/drivers/spi/spi-bfin-sport.c
@@ -252,19 +252,15 @@ static void
252bfin_sport_spi_restore_state(struct bfin_sport_spi_master_data *drv_data) 252bfin_sport_spi_restore_state(struct bfin_sport_spi_master_data *drv_data)
253{ 253{
254 struct bfin_sport_spi_slave_data *chip = drv_data->cur_chip; 254 struct bfin_sport_spi_slave_data *chip = drv_data->cur_chip;
255 unsigned int bits = (drv_data->ops == &bfin_sport_transfer_ops_u8 ? 7 : 15);
256 255
257 bfin_sport_spi_disable(drv_data); 256 bfin_sport_spi_disable(drv_data);
258 dev_dbg(drv_data->dev, "restoring spi ctl state\n"); 257 dev_dbg(drv_data->dev, "restoring spi ctl state\n");
259 258
260 bfin_write(&drv_data->regs->tcr1, chip->ctl_reg); 259 bfin_write(&drv_data->regs->tcr1, chip->ctl_reg);
261 bfin_write(&drv_data->regs->tcr2, bits);
262 bfin_write(&drv_data->regs->tclkdiv, chip->baud); 260 bfin_write(&drv_data->regs->tclkdiv, chip->baud);
263 bfin_write(&drv_data->regs->tfsdiv, bits);
264 SSYNC(); 261 SSYNC();
265 262
266 bfin_write(&drv_data->regs->rcr1, chip->ctl_reg & ~(ITCLK | ITFS)); 263 bfin_write(&drv_data->regs->rcr1, chip->ctl_reg & ~(ITCLK | ITFS));
267 bfin_write(&drv_data->regs->rcr2, bits);
268 SSYNC(); 264 SSYNC();
269 265
270 bfin_sport_spi_cs_active(chip); 266 bfin_sport_spi_cs_active(chip);
@@ -420,11 +416,15 @@ bfin_sport_spi_pump_transfers(unsigned long data)
420 drv_data->cs_change = transfer->cs_change; 416 drv_data->cs_change = transfer->cs_change;
421 417
422 /* Bits per word setup */ 418 /* Bits per word setup */
423 bits_per_word = transfer->bits_per_word ? : message->spi->bits_per_word; 419 bits_per_word = transfer->bits_per_word ? :
424 if (bits_per_word == 8) 420 message->spi->bits_per_word ? : 8;
425 drv_data->ops = &bfin_sport_transfer_ops_u8; 421 if (bits_per_word % 16 == 0)
426 else
427 drv_data->ops = &bfin_sport_transfer_ops_u16; 422 drv_data->ops = &bfin_sport_transfer_ops_u16;
423 else
424 drv_data->ops = &bfin_sport_transfer_ops_u8;
425 bfin_write(&drv_data->regs->tcr2, bits_per_word - 1);
426 bfin_write(&drv_data->regs->tfsdiv, bits_per_word - 1);
427 bfin_write(&drv_data->regs->rcr2, bits_per_word - 1);
428 428
429 drv_data->state = RUNNING_STATE; 429 drv_data->state = RUNNING_STATE;
430 430
@@ -598,11 +598,12 @@ bfin_sport_spi_setup(struct spi_device *spi)
598 } 598 }
599 chip->cs_chg_udelay = chip_info->cs_chg_udelay; 599 chip->cs_chg_udelay = chip_info->cs_chg_udelay;
600 chip->idle_tx_val = chip_info->idle_tx_val; 600 chip->idle_tx_val = chip_info->idle_tx_val;
601 spi->bits_per_word = chip_info->bits_per_word;
602 } 601 }
603 } 602 }
604 603
605 if (spi->bits_per_word != 8 && spi->bits_per_word != 16) { 604 if (spi->bits_per_word % 8) {
605 dev_err(&spi->dev, "%d bits_per_word is not supported\n",
606 spi->bits_per_word);
606 ret = -EINVAL; 607 ret = -EINVAL;
607 goto error; 608 goto error;
608 } 609 }
diff --git a/drivers/spi/spi-bfin5xx.c b/drivers/spi/spi-bfin5xx.c
index 3b83ff8b1e2b..9bb4d4af8547 100644
--- a/drivers/spi/spi-bfin5xx.c
+++ b/drivers/spi/spi-bfin5xx.c
@@ -396,7 +396,7 @@ static irqreturn_t bfin_spi_pio_irq_handler(int irq, void *dev_id)
396 /* last read */ 396 /* last read */
397 if (drv_data->rx) { 397 if (drv_data->rx) {
398 dev_dbg(&drv_data->pdev->dev, "last read\n"); 398 dev_dbg(&drv_data->pdev->dev, "last read\n");
399 if (n_bytes % 2) { 399 if (!(n_bytes % 2)) {
400 u16 *buf = (u16 *)drv_data->rx; 400 u16 *buf = (u16 *)drv_data->rx;
401 for (loop = 0; loop < n_bytes / 2; loop++) 401 for (loop = 0; loop < n_bytes / 2; loop++)
402 *buf++ = bfin_read(&drv_data->regs->rdbr); 402 *buf++ = bfin_read(&drv_data->regs->rdbr);
@@ -424,7 +424,7 @@ static irqreturn_t bfin_spi_pio_irq_handler(int irq, void *dev_id)
424 if (drv_data->rx && drv_data->tx) { 424 if (drv_data->rx && drv_data->tx) {
425 /* duplex */ 425 /* duplex */
426 dev_dbg(&drv_data->pdev->dev, "duplex: write_TDBR\n"); 426 dev_dbg(&drv_data->pdev->dev, "duplex: write_TDBR\n");
427 if (n_bytes % 2) { 427 if (!(n_bytes % 2)) {
428 u16 *buf = (u16 *)drv_data->rx; 428 u16 *buf = (u16 *)drv_data->rx;
429 u16 *buf2 = (u16 *)drv_data->tx; 429 u16 *buf2 = (u16 *)drv_data->tx;
430 for (loop = 0; loop < n_bytes / 2; loop++) { 430 for (loop = 0; loop < n_bytes / 2; loop++) {
@@ -442,7 +442,7 @@ static irqreturn_t bfin_spi_pio_irq_handler(int irq, void *dev_id)
442 } else if (drv_data->rx) { 442 } else if (drv_data->rx) {
443 /* read */ 443 /* read */
444 dev_dbg(&drv_data->pdev->dev, "read: write_TDBR\n"); 444 dev_dbg(&drv_data->pdev->dev, "read: write_TDBR\n");
445 if (n_bytes % 2) { 445 if (!(n_bytes % 2)) {
446 u16 *buf = (u16 *)drv_data->rx; 446 u16 *buf = (u16 *)drv_data->rx;
447 for (loop = 0; loop < n_bytes / 2; loop++) { 447 for (loop = 0; loop < n_bytes / 2; loop++) {
448 *buf++ = bfin_read(&drv_data->regs->rdbr); 448 *buf++ = bfin_read(&drv_data->regs->rdbr);
@@ -458,7 +458,7 @@ static irqreturn_t bfin_spi_pio_irq_handler(int irq, void *dev_id)
458 } else if (drv_data->tx) { 458 } else if (drv_data->tx) {
459 /* write */ 459 /* write */
460 dev_dbg(&drv_data->pdev->dev, "write: write_TDBR\n"); 460 dev_dbg(&drv_data->pdev->dev, "write: write_TDBR\n");
461 if (n_bytes % 2) { 461 if (!(n_bytes % 2)) {
462 u16 *buf = (u16 *)drv_data->tx; 462 u16 *buf = (u16 *)drv_data->tx;
463 for (loop = 0; loop < n_bytes / 2; loop++) { 463 for (loop = 0; loop < n_bytes / 2; loop++) {
464 bfin_read(&drv_data->regs->rdbr); 464 bfin_read(&drv_data->regs->rdbr);
@@ -587,6 +587,7 @@ static void bfin_spi_pump_transfers(unsigned long data)
587 if (message->state == DONE_STATE) { 587 if (message->state == DONE_STATE) {
588 dev_dbg(&drv_data->pdev->dev, "transfer: all done!\n"); 588 dev_dbg(&drv_data->pdev->dev, "transfer: all done!\n");
589 message->status = 0; 589 message->status = 0;
590 bfin_spi_flush(drv_data);
590 bfin_spi_giveback(drv_data); 591 bfin_spi_giveback(drv_data);
591 return; 592 return;
592 } 593 }
@@ -870,8 +871,10 @@ static void bfin_spi_pump_transfers(unsigned long data)
870 message->actual_length += drv_data->len_in_bytes; 871 message->actual_length += drv_data->len_in_bytes;
871 /* Move to next transfer of this msg */ 872 /* Move to next transfer of this msg */
872 message->state = bfin_spi_next_transfer(drv_data); 873 message->state = bfin_spi_next_transfer(drv_data);
873 if (drv_data->cs_change) 874 if (drv_data->cs_change && message->state != DONE_STATE) {
875 bfin_spi_flush(drv_data);
874 bfin_spi_cs_deactive(drv_data, chip); 876 bfin_spi_cs_deactive(drv_data, chip);
877 }
875 } 878 }
876 879
877 /* Schedule next transfer tasklet */ 880 /* Schedule next transfer tasklet */
@@ -1026,7 +1029,6 @@ static int bfin_spi_setup(struct spi_device *spi)
1026 chip->cs_chg_udelay = chip_info->cs_chg_udelay; 1029 chip->cs_chg_udelay = chip_info->cs_chg_udelay;
1027 chip->idle_tx_val = chip_info->idle_tx_val; 1030 chip->idle_tx_val = chip_info->idle_tx_val;
1028 chip->pio_interrupt = chip_info->pio_interrupt; 1031 chip->pio_interrupt = chip_info->pio_interrupt;
1029 spi->bits_per_word = chip_info->bits_per_word;
1030 } else { 1032 } else {
1031 /* force a default base state */ 1033 /* force a default base state */
1032 chip->ctl_reg &= bfin_ctl_reg; 1034 chip->ctl_reg &= bfin_ctl_reg;
diff --git a/drivers/spi/spi-ep93xx.c b/drivers/spi/spi-ep93xx.c
index 6db2887852d6..e8055073e84d 100644
--- a/drivers/spi/spi-ep93xx.c
+++ b/drivers/spi/spi-ep93xx.c
@@ -545,13 +545,12 @@ static void ep93xx_spi_pio_transfer(struct ep93xx_spi *espi)
545 * in case of failure. 545 * in case of failure.
546 */ 546 */
547static struct dma_async_tx_descriptor * 547static struct dma_async_tx_descriptor *
548ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir) 548ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_transfer_direction dir)
549{ 549{
550 struct spi_transfer *t = espi->current_msg->state; 550 struct spi_transfer *t = espi->current_msg->state;
551 struct dma_async_tx_descriptor *txd; 551 struct dma_async_tx_descriptor *txd;
552 enum dma_slave_buswidth buswidth; 552 enum dma_slave_buswidth buswidth;
553 struct dma_slave_config conf; 553 struct dma_slave_config conf;
554 enum dma_transfer_direction slave_dirn;
555 struct scatterlist *sg; 554 struct scatterlist *sg;
556 struct sg_table *sgt; 555 struct sg_table *sgt;
557 struct dma_chan *chan; 556 struct dma_chan *chan;
@@ -567,14 +566,13 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir)
567 memset(&conf, 0, sizeof(conf)); 566 memset(&conf, 0, sizeof(conf));
568 conf.direction = dir; 567 conf.direction = dir;
569 568
570 if (dir == DMA_FROM_DEVICE) { 569 if (dir == DMA_DEV_TO_MEM) {
571 chan = espi->dma_rx; 570 chan = espi->dma_rx;
572 buf = t->rx_buf; 571 buf = t->rx_buf;
573 sgt = &espi->rx_sgt; 572 sgt = &espi->rx_sgt;
574 573
575 conf.src_addr = espi->sspdr_phys; 574 conf.src_addr = espi->sspdr_phys;
576 conf.src_addr_width = buswidth; 575 conf.src_addr_width = buswidth;
577 slave_dirn = DMA_DEV_TO_MEM;
578 } else { 576 } else {
579 chan = espi->dma_tx; 577 chan = espi->dma_tx;
580 buf = t->tx_buf; 578 buf = t->tx_buf;
@@ -582,7 +580,6 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir)
582 580
583 conf.dst_addr = espi->sspdr_phys; 581 conf.dst_addr = espi->sspdr_phys;
584 conf.dst_addr_width = buswidth; 582 conf.dst_addr_width = buswidth;
585 slave_dirn = DMA_MEM_TO_DEV;
586 } 583 }
587 584
588 ret = dmaengine_slave_config(chan, &conf); 585 ret = dmaengine_slave_config(chan, &conf);
@@ -633,8 +630,7 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir)
633 if (!nents) 630 if (!nents)
634 return ERR_PTR(-ENOMEM); 631 return ERR_PTR(-ENOMEM);
635 632
636 txd = dmaengine_prep_slave_sg(chan, sgt->sgl, nents, 633 txd = dmaengine_prep_slave_sg(chan, sgt->sgl, nents, dir, DMA_CTRL_ACK);
637 slave_dirn, DMA_CTRL_ACK);
638 if (!txd) { 634 if (!txd) {
639 dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir); 635 dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
640 return ERR_PTR(-ENOMEM); 636 return ERR_PTR(-ENOMEM);
@@ -651,12 +647,12 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir)
651 * unmapped. 647 * unmapped.
652 */ 648 */
653static void ep93xx_spi_dma_finish(struct ep93xx_spi *espi, 649static void ep93xx_spi_dma_finish(struct ep93xx_spi *espi,
654 enum dma_data_direction dir) 650 enum dma_transfer_direction dir)
655{ 651{
656 struct dma_chan *chan; 652 struct dma_chan *chan;
657 struct sg_table *sgt; 653 struct sg_table *sgt;
658 654
659 if (dir == DMA_FROM_DEVICE) { 655 if (dir == DMA_DEV_TO_MEM) {
660 chan = espi->dma_rx; 656 chan = espi->dma_rx;
661 sgt = &espi->rx_sgt; 657 sgt = &espi->rx_sgt;
662 } else { 658 } else {
@@ -677,16 +673,16 @@ static void ep93xx_spi_dma_transfer(struct ep93xx_spi *espi)
677 struct spi_message *msg = espi->current_msg; 673 struct spi_message *msg = espi->current_msg;
678 struct dma_async_tx_descriptor *rxd, *txd; 674 struct dma_async_tx_descriptor *rxd, *txd;
679 675
680 rxd = ep93xx_spi_dma_prepare(espi, DMA_FROM_DEVICE); 676 rxd = ep93xx_spi_dma_prepare(espi, DMA_DEV_TO_MEM);
681 if (IS_ERR(rxd)) { 677 if (IS_ERR(rxd)) {
682 dev_err(&espi->pdev->dev, "DMA RX failed: %ld\n", PTR_ERR(rxd)); 678 dev_err(&espi->pdev->dev, "DMA RX failed: %ld\n", PTR_ERR(rxd));
683 msg->status = PTR_ERR(rxd); 679 msg->status = PTR_ERR(rxd);
684 return; 680 return;
685 } 681 }
686 682
687 txd = ep93xx_spi_dma_prepare(espi, DMA_TO_DEVICE); 683 txd = ep93xx_spi_dma_prepare(espi, DMA_MEM_TO_DEV);
688 if (IS_ERR(txd)) { 684 if (IS_ERR(txd)) {
689 ep93xx_spi_dma_finish(espi, DMA_FROM_DEVICE); 685 ep93xx_spi_dma_finish(espi, DMA_DEV_TO_MEM);
690 dev_err(&espi->pdev->dev, "DMA TX failed: %ld\n", PTR_ERR(rxd)); 686 dev_err(&espi->pdev->dev, "DMA TX failed: %ld\n", PTR_ERR(rxd));
691 msg->status = PTR_ERR(txd); 687 msg->status = PTR_ERR(txd);
692 return; 688 return;
@@ -705,8 +701,8 @@ static void ep93xx_spi_dma_transfer(struct ep93xx_spi *espi)
705 701
706 wait_for_completion(&espi->wait); 702 wait_for_completion(&espi->wait);
707 703
708 ep93xx_spi_dma_finish(espi, DMA_TO_DEVICE); 704 ep93xx_spi_dma_finish(espi, DMA_MEM_TO_DEV);
709 ep93xx_spi_dma_finish(espi, DMA_FROM_DEVICE); 705 ep93xx_spi_dma_finish(espi, DMA_DEV_TO_MEM);
710} 706}
711 707
712/** 708/**
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index 09c925aaf320..400ae2121a2a 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -1667,9 +1667,15 @@ static int calculate_effective_freq(struct pl022 *pl022, int freq, struct
1667 /* cpsdvsr = 254 & scr = 255 */ 1667 /* cpsdvsr = 254 & scr = 255 */
1668 min_tclk = spi_rate(rate, CPSDVR_MAX, SCR_MAX); 1668 min_tclk = spi_rate(rate, CPSDVR_MAX, SCR_MAX);
1669 1669
1670 if (!((freq <= max_tclk) && (freq >= min_tclk))) { 1670 if (freq > max_tclk)
1671 dev_warn(&pl022->adev->dev,
1672 "Max speed that can be programmed is %d Hz, you requested %d\n",
1673 max_tclk, freq);
1674
1675 if (freq < min_tclk) {
1671 dev_err(&pl022->adev->dev, 1676 dev_err(&pl022->adev->dev,
1672 "controller data is incorrect: out of range frequency"); 1677 "Requested frequency: %d Hz is less than minimum possible %d Hz\n",
1678 freq, min_tclk);
1673 return -EINVAL; 1679 return -EINVAL;
1674 } 1680 }
1675 1681
@@ -1681,26 +1687,37 @@ static int calculate_effective_freq(struct pl022 *pl022, int freq, struct
1681 while (scr <= SCR_MAX) { 1687 while (scr <= SCR_MAX) {
1682 tmp = spi_rate(rate, cpsdvsr, scr); 1688 tmp = spi_rate(rate, cpsdvsr, scr);
1683 1689
1684 if (tmp > freq) 1690 if (tmp > freq) {
1691 /* we need lower freq */
1685 scr++; 1692 scr++;
1693 continue;
1694 }
1695
1686 /* 1696 /*
1687 * If found exact value, update and break. 1697 * If found exact value, mark found and break.
1688 * If found more closer value, update and continue. 1698 * If found more closer value, update and break.
1689 */ 1699 */
1690 else if ((tmp == freq) || (tmp > best_freq)) { 1700 if (tmp > best_freq) {
1691 best_freq = tmp; 1701 best_freq = tmp;
1692 best_cpsdvsr = cpsdvsr; 1702 best_cpsdvsr = cpsdvsr;
1693 best_scr = scr; 1703 best_scr = scr;
1694 1704
1695 if (tmp == freq) 1705 if (tmp == freq)
1696 break; 1706 found = 1;
1697 } 1707 }
1698 scr++; 1708 /*
1709 * increased scr will give lower rates, which are not
1710 * required
1711 */
1712 break;
1699 } 1713 }
1700 cpsdvsr += 2; 1714 cpsdvsr += 2;
1701 scr = SCR_MIN; 1715 scr = SCR_MIN;
1702 } 1716 }
1703 1717
1718 WARN(!best_freq, "pl022: Matching cpsdvsr and scr not found for %d Hz rate \n",
1719 freq);
1720
1704 clk_freq->cpsdvsr = (u8) (best_cpsdvsr & 0xFF); 1721 clk_freq->cpsdvsr = (u8) (best_cpsdvsr & 0xFF);
1705 clk_freq->scr = (u8) (best_scr & 0xFF); 1722 clk_freq->scr = (u8) (best_scr & 0xFF);
1706 dev_dbg(&pl022->adev->dev, 1723 dev_dbg(&pl022->adev->dev,
@@ -1823,9 +1840,12 @@ static int pl022_setup(struct spi_device *spi)
1823 } else 1840 } else
1824 chip->cs_control = chip_info->cs_control; 1841 chip->cs_control = chip_info->cs_control;
1825 1842
1826 if (bits <= 3) { 1843 /* Check bits per word with vendor specific range */
1827 /* PL022 doesn't support less than 4-bits */ 1844 if ((bits <= 3) || (bits > pl022->vendor->max_bpw)) {
1828 status = -ENOTSUPP; 1845 status = -ENOTSUPP;
1846 dev_err(&spi->dev, "illegal data size for this controller!\n");
1847 dev_err(&spi->dev, "This controller can only handle 4 <= n <= %d bit words\n",
1848 pl022->vendor->max_bpw);
1829 goto err_config_params; 1849 goto err_config_params;
1830 } else if (bits <= 8) { 1850 } else if (bits <= 8) {
1831 dev_dbg(&spi->dev, "4 <= n <=8 bits per word\n"); 1851 dev_dbg(&spi->dev, "4 <= n <=8 bits per word\n");
@@ -1838,20 +1858,10 @@ static int pl022_setup(struct spi_device *spi)
1838 chip->read = READING_U16; 1858 chip->read = READING_U16;
1839 chip->write = WRITING_U16; 1859 chip->write = WRITING_U16;
1840 } else { 1860 } else {
1841 if (pl022->vendor->max_bpw >= 32) { 1861 dev_dbg(&spi->dev, "17 <= n <= 32 bits per word\n");
1842 dev_dbg(&spi->dev, "17 <= n <= 32 bits per word\n"); 1862 chip->n_bytes = 4;
1843 chip->n_bytes = 4; 1863 chip->read = READING_U32;
1844 chip->read = READING_U32; 1864 chip->write = WRITING_U32;
1845 chip->write = WRITING_U32;
1846 } else {
1847 dev_err(&spi->dev,
1848 "illegal data size for this controller!\n");
1849 dev_err(&spi->dev,
1850 "a standard pl022 can only handle "
1851 "1 <= n <= 16 bit words\n");
1852 status = -ENOTSUPP;
1853 goto err_config_params;
1854 }
1855 } 1865 }
1856 1866
1857 /* Now Initialize all register settings required for this chip */ 1867 /* Now Initialize all register settings required for this chip */
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
index 400df8cbee53..d91751f9ffe8 100644
--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -36,6 +36,7 @@
36#include <linux/prefetch.h> 36#include <linux/prefetch.h>
37#include <linux/ratelimit.h> 37#include <linux/ratelimit.h>
38#include <linux/smp.h> 38#include <linux/smp.h>
39#include <linux/interrupt.h>
39#include <net/dst.h> 40#include <net/dst.h>
40#ifdef CONFIG_XFRM 41#ifdef CONFIG_XFRM
41#include <linux/xfrm.h> 42#include <linux/xfrm.h>
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index 56d74dc2fbd5..91a97b3e45c6 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -32,6 +32,7 @@
32#include <linux/ip.h> 32#include <linux/ip.h>
33#include <linux/ratelimit.h> 33#include <linux/ratelimit.h>
34#include <linux/string.h> 34#include <linux/string.h>
35#include <linux/interrupt.h>
35#include <net/dst.h> 36#include <net/dst.h>
36#ifdef CONFIG_XFRM 37#ifdef CONFIG_XFRM
37#include <linux/xfrm.h> 38#include <linux/xfrm.h>
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index 9112cd882154..60cba8194de3 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -31,6 +31,7 @@
31#include <linux/etherdevice.h> 31#include <linux/etherdevice.h>
32#include <linux/phy.h> 32#include <linux/phy.h>
33#include <linux/slab.h> 33#include <linux/slab.h>
34#include <linux/interrupt.h>
34 35
35#include <net/dst.h> 36#include <net/dst.h>
36 37
diff --git a/drivers/staging/ozwpan/ozpd.c b/drivers/staging/ozwpan/ozpd.c
index 2b45d3d1800c..04cd57f2a6da 100644
--- a/drivers/staging/ozwpan/ozpd.c
+++ b/drivers/staging/ozwpan/ozpd.c
@@ -383,8 +383,6 @@ static void oz_tx_frame_free(struct oz_pd *pd, struct oz_tx_frame *f)
383 pd->tx_pool = &f->link; 383 pd->tx_pool = &f->link;
384 pd->tx_pool_count++; 384 pd->tx_pool_count++;
385 f = 0; 385 f = 0;
386 } else {
387 kfree(f);
388 } 386 }
389 spin_unlock_bh(&pd->tx_frame_lock); 387 spin_unlock_bh(&pd->tx_frame_lock);
390 if (f) 388 if (f)
diff --git a/drivers/staging/tidspbridge/core/tiomap3430.c b/drivers/staging/tidspbridge/core/tiomap3430.c
index 7862513cc295..9cf29fcea11e 100644
--- a/drivers/staging/tidspbridge/core/tiomap3430.c
+++ b/drivers/staging/tidspbridge/core/tiomap3430.c
@@ -79,10 +79,6 @@
79#define OMAP343X_CONTROL_IVA2_BOOTADDR (OMAP2_CONTROL_GENERAL + 0x0190) 79#define OMAP343X_CONTROL_IVA2_BOOTADDR (OMAP2_CONTROL_GENERAL + 0x0190)
80#define OMAP343X_CONTROL_IVA2_BOOTMOD (OMAP2_CONTROL_GENERAL + 0x0194) 80#define OMAP343X_CONTROL_IVA2_BOOTMOD (OMAP2_CONTROL_GENERAL + 0x0194)
81 81
82#define OMAP343X_CTRL_REGADDR(reg) \
83 OMAP2_L4_IO_ADDRESS(OMAP343X_CTRL_BASE + (reg))
84
85
86/* Forward Declarations: */ 82/* Forward Declarations: */
87static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt); 83static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt);
88static int bridge_brd_read(struct bridge_dev_context *dev_ctxt, 84static int bridge_brd_read(struct bridge_dev_context *dev_ctxt,
@@ -418,19 +414,27 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
418 414
419 /* Assert RST1 i.e only the RST only for DSP megacell */ 415 /* Assert RST1 i.e only the RST only for DSP megacell */
420 if (!status) { 416 if (!status) {
417 /*
418 * XXX: ioremapping MUST be removed once ctrl
419 * function is made available.
420 */
421 void __iomem *ctrl = ioremap(OMAP343X_CTRL_BASE, SZ_4K);
422 if (!ctrl)
423 return -ENOMEM;
424
421 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, 425 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK,
422 OMAP3430_RST1_IVA2_MASK, OMAP3430_IVA2_MOD, 426 OMAP3430_RST1_IVA2_MASK, OMAP3430_IVA2_MOD,
423 OMAP2_RM_RSTCTRL); 427 OMAP2_RM_RSTCTRL);
424 /* Mask address with 1K for compatibility */ 428 /* Mask address with 1K for compatibility */
425 __raw_writel(dsp_addr & OMAP3_IVA2_BOOTADDR_MASK, 429 __raw_writel(dsp_addr & OMAP3_IVA2_BOOTADDR_MASK,
426 OMAP343X_CTRL_REGADDR( 430 ctrl + OMAP343X_CONTROL_IVA2_BOOTADDR);
427 OMAP343X_CONTROL_IVA2_BOOTADDR));
428 /* 431 /*
429 * Set bootmode to self loop if dsp_debug flag is true 432 * Set bootmode to self loop if dsp_debug flag is true
430 */ 433 */
431 __raw_writel((dsp_debug) ? OMAP3_IVA2_BOOTMOD_IDLE : 0, 434 __raw_writel((dsp_debug) ? OMAP3_IVA2_BOOTMOD_IDLE : 0,
432 OMAP343X_CTRL_REGADDR( 435 ctrl + OMAP343X_CONTROL_IVA2_BOOTMOD);
433 OMAP343X_CONTROL_IVA2_BOOTMOD)); 436
437 iounmap(ctrl);
434 } 438 }
435 } 439 }
436 if (!status) { 440 if (!status) {
diff --git a/drivers/staging/tidspbridge/core/wdt.c b/drivers/staging/tidspbridge/core/wdt.c
index 70055c8111ed..870f934f4f3b 100644
--- a/drivers/staging/tidspbridge/core/wdt.c
+++ b/drivers/staging/tidspbridge/core/wdt.c
@@ -53,7 +53,10 @@ int dsp_wdt_init(void)
53 int ret = 0; 53 int ret = 0;
54 54
55 dsp_wdt.sm_wdt = NULL; 55 dsp_wdt.sm_wdt = NULL;
56 dsp_wdt.reg_base = OMAP2_L4_IO_ADDRESS(OMAP34XX_WDT3_BASE); 56 dsp_wdt.reg_base = ioremap(OMAP34XX_WDT3_BASE, SZ_4K);
57 if (!dsp_wdt.reg_base)
58 return -ENOMEM;
59
57 tasklet_init(&dsp_wdt.wdt3_tasklet, dsp_wdt_dpc, 0); 60 tasklet_init(&dsp_wdt.wdt3_tasklet, dsp_wdt_dpc, 0);
58 61
59 dsp_wdt.fclk = clk_get(NULL, "wdt3_fck"); 62 dsp_wdt.fclk = clk_get(NULL, "wdt3_fck");
@@ -99,6 +102,9 @@ void dsp_wdt_exit(void)
99 dsp_wdt.fclk = NULL; 102 dsp_wdt.fclk = NULL;
100 dsp_wdt.iclk = NULL; 103 dsp_wdt.iclk = NULL;
101 dsp_wdt.sm_wdt = NULL; 104 dsp_wdt.sm_wdt = NULL;
105
106 if (dsp_wdt.reg_base)
107 iounmap(dsp_wdt.reg_base);
102 dsp_wdt.reg_base = NULL; 108 dsp_wdt.reg_base = NULL;
103} 109}
104 110
diff --git a/drivers/staging/zcache/Kconfig b/drivers/staging/zcache/Kconfig
index 3ed2c8f656a5..7048e01f0817 100644
--- a/drivers/staging/zcache/Kconfig
+++ b/drivers/staging/zcache/Kconfig
@@ -2,7 +2,7 @@ config ZCACHE
2 bool "Dynamic compression of swap pages and clean pagecache pages" 2 bool "Dynamic compression of swap pages and clean pagecache pages"
3 # X86 dependency is because zsmalloc uses non-portable pte/tlb 3 # X86 dependency is because zsmalloc uses non-portable pte/tlb
4 # functions 4 # functions
5 depends on (CLEANCACHE || FRONTSWAP) && CRYPTO && X86 5 depends on (CLEANCACHE || FRONTSWAP) && CRYPTO=y && X86
6 select ZSMALLOC 6 select ZSMALLOC
7 select CRYPTO_LZO 7 select CRYPTO_LZO
8 default n 8 default n
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 70c3ffb981e7..e320ec24aa1b 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -60,7 +60,6 @@ static void core_clear_initiator_node_from_tpg(
60 int i; 60 int i;
61 struct se_dev_entry *deve; 61 struct se_dev_entry *deve;
62 struct se_lun *lun; 62 struct se_lun *lun;
63 struct se_lun_acl *acl, *acl_tmp;
64 63
65 spin_lock_irq(&nacl->device_list_lock); 64 spin_lock_irq(&nacl->device_list_lock);
66 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { 65 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
@@ -81,28 +80,7 @@ static void core_clear_initiator_node_from_tpg(
81 core_update_device_list_for_node(lun, NULL, deve->mapped_lun, 80 core_update_device_list_for_node(lun, NULL, deve->mapped_lun,
82 TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0); 81 TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
83 82
84 spin_lock(&lun->lun_acl_lock);
85 list_for_each_entry_safe(acl, acl_tmp,
86 &lun->lun_acl_list, lacl_list) {
87 if (!strcmp(acl->initiatorname, nacl->initiatorname) &&
88 (acl->mapped_lun == deve->mapped_lun))
89 break;
90 }
91
92 if (!acl) {
93 pr_err("Unable to locate struct se_lun_acl for %s,"
94 " mapped_lun: %u\n", nacl->initiatorname,
95 deve->mapped_lun);
96 spin_unlock(&lun->lun_acl_lock);
97 spin_lock_irq(&nacl->device_list_lock);
98 continue;
99 }
100
101 list_del(&acl->lacl_list);
102 spin_unlock(&lun->lun_acl_lock);
103
104 spin_lock_irq(&nacl->device_list_lock); 83 spin_lock_irq(&nacl->device_list_lock);
105 kfree(acl);
106 } 84 }
107 spin_unlock_irq(&nacl->device_list_lock); 85 spin_unlock_irq(&nacl->device_list_lock);
108} 86}
diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c
index 24145c30c9b0..6cc4358f68c1 100644
--- a/drivers/tty/amiserial.c
+++ b/drivers/tty/amiserial.c
@@ -1073,8 +1073,10 @@ static int set_serial_info(struct tty_struct *tty, struct serial_state *state,
1073 (new_serial.close_delay != port->close_delay) || 1073 (new_serial.close_delay != port->close_delay) ||
1074 (new_serial.xmit_fifo_size != state->xmit_fifo_size) || 1074 (new_serial.xmit_fifo_size != state->xmit_fifo_size) ||
1075 ((new_serial.flags & ~ASYNC_USR_MASK) != 1075 ((new_serial.flags & ~ASYNC_USR_MASK) !=
1076 (port->flags & ~ASYNC_USR_MASK))) 1076 (port->flags & ~ASYNC_USR_MASK))) {
1077 tty_unlock();
1077 return -EPERM; 1078 return -EPERM;
1079 }
1078 port->flags = ((port->flags & ~ASYNC_USR_MASK) | 1080 port->flags = ((port->flags & ~ASYNC_USR_MASK) |
1079 (new_serial.flags & ASYNC_USR_MASK)); 1081 (new_serial.flags & ASYNC_USR_MASK));
1080 state->custom_divisor = new_serial.custom_divisor; 1082 state->custom_divisor = new_serial.custom_divisor;
diff --git a/drivers/tty/serial/clps711x.c b/drivers/tty/serial/clps711x.c
index e6c3dbd781d6..836fe2731234 100644
--- a/drivers/tty/serial/clps711x.c
+++ b/drivers/tty/serial/clps711x.c
@@ -154,10 +154,9 @@ static irqreturn_t clps711xuart_int_tx(int irq, void *dev_id)
154 port->x_char = 0; 154 port->x_char = 0;
155 return IRQ_HANDLED; 155 return IRQ_HANDLED;
156 } 156 }
157 if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { 157
158 clps711xuart_stop_tx(port); 158 if (uart_circ_empty(xmit) || uart_tx_stopped(port))
159 return IRQ_HANDLED; 159 goto disable_tx_irq;
160 }
161 160
162 count = port->fifosize >> 1; 161 count = port->fifosize >> 1;
163 do { 162 do {
@@ -171,8 +170,11 @@ static irqreturn_t clps711xuart_int_tx(int irq, void *dev_id)
171 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 170 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
172 uart_write_wakeup(port); 171 uart_write_wakeup(port);
173 172
174 if (uart_circ_empty(xmit)) 173 if (uart_circ_empty(xmit)) {
175 clps711xuart_stop_tx(port); 174 disable_tx_irq:
175 disable_irq_nosync(TX_IRQ(port));
176 tx_enabled(port) = 0;
177 }
176 178
177 return IRQ_HANDLED; 179 return IRQ_HANDLED;
178} 180}
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index bbbec4a74cfb..c2816f494807 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -1447,9 +1447,11 @@ static int pch_uart_verify_port(struct uart_port *port,
1447 __func__); 1447 __func__);
1448 return -EOPNOTSUPP; 1448 return -EOPNOTSUPP;
1449#endif 1449#endif
1450 priv->use_dma = 1;
1451 priv->use_dma_flag = 1; 1450 priv->use_dma_flag = 1;
1452 dev_info(priv->port.dev, "PCH UART : Use DMA Mode\n"); 1451 dev_info(priv->port.dev, "PCH UART : Use DMA Mode\n");
1452 if (!priv->use_dma)
1453 pch_request_dma(port);
1454 priv->use_dma = 1;
1453 } 1455 }
1454 1456
1455 return 0; 1457 return 0;
diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
index 08ebe901bb59..654755a990df 100644
--- a/drivers/tty/serial/pmac_zilog.c
+++ b/drivers/tty/serial/pmac_zilog.c
@@ -469,7 +469,7 @@ static irqreturn_t pmz_interrupt(int irq, void *dev_id)
469 tty = NULL; 469 tty = NULL;
470 if (r3 & (CHAEXT | CHATxIP | CHARxIP)) { 470 if (r3 & (CHAEXT | CHATxIP | CHARxIP)) {
471 if (!ZS_IS_OPEN(uap_a)) { 471 if (!ZS_IS_OPEN(uap_a)) {
472 pmz_debug("ChanA interrupt while open !\n"); 472 pmz_debug("ChanA interrupt while not open !\n");
473 goto skip_a; 473 goto skip_a;
474 } 474 }
475 write_zsreg(uap_a, R0, RES_H_IUS); 475 write_zsreg(uap_a, R0, RES_H_IUS);
@@ -493,8 +493,8 @@ static irqreturn_t pmz_interrupt(int irq, void *dev_id)
493 spin_lock(&uap_b->port.lock); 493 spin_lock(&uap_b->port.lock);
494 tty = NULL; 494 tty = NULL;
495 if (r3 & (CHBEXT | CHBTxIP | CHBRxIP)) { 495 if (r3 & (CHBEXT | CHBTxIP | CHBRxIP)) {
496 if (!ZS_IS_OPEN(uap_a)) { 496 if (!ZS_IS_OPEN(uap_b)) {
497 pmz_debug("ChanB interrupt while open !\n"); 497 pmz_debug("ChanB interrupt while not open !\n");
498 goto skip_b; 498 goto skip_b;
499 } 499 }
500 write_zsreg(uap_b, R0, RES_H_IUS); 500 write_zsreg(uap_b, R0, RES_H_IUS);
diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
index 86dd1e302bb3..29ca20dbd335 100644
--- a/drivers/tty/vt/keyboard.c
+++ b/drivers/tty/vt/keyboard.c
@@ -1085,15 +1085,21 @@ void vt_set_led_state(int console, int leds)
1085 * 1085 *
1086 * Handle console start. This is a wrapper for the VT layer 1086 * Handle console start. This is a wrapper for the VT layer
1087 * so that we can keep kbd knowledge internal 1087 * so that we can keep kbd knowledge internal
1088 *
1089 * FIXME: We eventually need to hold the kbd lock here to protect
1090 * the LED updating. We can't do it yet because fn_hold calls stop_tty
1091 * and start_tty under the kbd_event_lock, while normal tty paths
1092 * don't hold the lock. We probably need to split out an LED lock
1093 * but not during an -rc release!
1088 */ 1094 */
1089void vt_kbd_con_start(int console) 1095void vt_kbd_con_start(int console)
1090{ 1096{
1091 struct kbd_struct * kbd = kbd_table + console; 1097 struct kbd_struct * kbd = kbd_table + console;
1092 unsigned long flags; 1098/* unsigned long flags; */
1093 spin_lock_irqsave(&kbd_event_lock, flags); 1099/* spin_lock_irqsave(&kbd_event_lock, flags); */
1094 clr_vc_kbd_led(kbd, VC_SCROLLOCK); 1100 clr_vc_kbd_led(kbd, VC_SCROLLOCK);
1095 set_leds(); 1101 set_leds();
1096 spin_unlock_irqrestore(&kbd_event_lock, flags); 1102/* spin_unlock_irqrestore(&kbd_event_lock, flags); */
1097} 1103}
1098 1104
1099/** 1105/**
@@ -1102,22 +1108,28 @@ void vt_kbd_con_start(int console)
1102 * 1108 *
1103 * Handle console stop. This is a wrapper for the VT layer 1109 * Handle console stop. This is a wrapper for the VT layer
1104 * so that we can keep kbd knowledge internal 1110 * so that we can keep kbd knowledge internal
1111 *
1112 * FIXME: We eventually need to hold the kbd lock here to protect
1113 * the LED updating. We can't do it yet because fn_hold calls stop_tty
1114 * and start_tty under the kbd_event_lock, while normal tty paths
1115 * don't hold the lock. We probably need to split out an LED lock
1116 * but not during an -rc release!
1105 */ 1117 */
1106void vt_kbd_con_stop(int console) 1118void vt_kbd_con_stop(int console)
1107{ 1119{
1108 struct kbd_struct * kbd = kbd_table + console; 1120 struct kbd_struct * kbd = kbd_table + console;
1109 unsigned long flags; 1121/* unsigned long flags; */
1110 spin_lock_irqsave(&kbd_event_lock, flags); 1122/* spin_lock_irqsave(&kbd_event_lock, flags); */
1111 set_vc_kbd_led(kbd, VC_SCROLLOCK); 1123 set_vc_kbd_led(kbd, VC_SCROLLOCK);
1112 set_leds(); 1124 set_leds();
1113 spin_unlock_irqrestore(&kbd_event_lock, flags); 1125/* spin_unlock_irqrestore(&kbd_event_lock, flags); */
1114} 1126}
1115 1127
1116/* 1128/*
1117 * This is the tasklet that updates LED state on all keyboards 1129 * This is the tasklet that updates LED state on all keyboards
1118 * attached to the box. The reason we use tasklet is that we 1130 * attached to the box. The reason we use tasklet is that we
1119 * need to handle the scenario when keyboard handler is not 1131 * need to handle the scenario when keyboard handler is not
1120 * registered yet but we already getting updates form VT to 1132 * registered yet but we already getting updates from the VT to
1121 * update led state. 1133 * update led state.
1122 */ 1134 */
1123static void kbd_bh(unsigned long dummy) 1135static void kbd_bh(unsigned long dummy)
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index c6f6560d436c..0bb2b3248dad 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -157,8 +157,9 @@ static void wdm_out_callback(struct urb *urb)
157 spin_lock(&desc->iuspin); 157 spin_lock(&desc->iuspin);
158 desc->werr = urb->status; 158 desc->werr = urb->status;
159 spin_unlock(&desc->iuspin); 159 spin_unlock(&desc->iuspin);
160 clear_bit(WDM_IN_USE, &desc->flags);
161 kfree(desc->outbuf); 160 kfree(desc->outbuf);
161 desc->outbuf = NULL;
162 clear_bit(WDM_IN_USE, &desc->flags);
162 wake_up(&desc->wait); 163 wake_up(&desc->wait);
163} 164}
164 165
@@ -338,7 +339,7 @@ static ssize_t wdm_write
338 if (we < 0) 339 if (we < 0)
339 return -EIO; 340 return -EIO;
340 341
341 desc->outbuf = buf = kmalloc(count, GFP_KERNEL); 342 buf = kmalloc(count, GFP_KERNEL);
342 if (!buf) { 343 if (!buf) {
343 rv = -ENOMEM; 344 rv = -ENOMEM;
344 goto outnl; 345 goto outnl;
@@ -406,10 +407,12 @@ static ssize_t wdm_write
406 req->wIndex = desc->inum; 407 req->wIndex = desc->inum;
407 req->wLength = cpu_to_le16(count); 408 req->wLength = cpu_to_le16(count);
408 set_bit(WDM_IN_USE, &desc->flags); 409 set_bit(WDM_IN_USE, &desc->flags);
410 desc->outbuf = buf;
409 411
410 rv = usb_submit_urb(desc->command, GFP_KERNEL); 412 rv = usb_submit_urb(desc->command, GFP_KERNEL);
411 if (rv < 0) { 413 if (rv < 0) {
412 kfree(buf); 414 kfree(buf);
415 desc->outbuf = NULL;
413 clear_bit(WDM_IN_USE, &desc->flags); 416 clear_bit(WDM_IN_USE, &desc->flags);
414 dev_err(&desc->intf->dev, "Tx URB error: %d\n", rv); 417 dev_err(&desc->intf->dev, "Tx URB error: %d\n", rv);
415 } else { 418 } else {
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index 622b4a48e732..57ed9e400c06 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -493,6 +493,15 @@ static int hcd_pci_suspend_noirq(struct device *dev)
493 493
494 pci_save_state(pci_dev); 494 pci_save_state(pci_dev);
495 495
496 /*
497 * Some systems crash if an EHCI controller is in D3 during
498 * a sleep transition. We have to leave such controllers in D0.
499 */
500 if (hcd->broken_pci_sleep) {
501 dev_dbg(dev, "Staying in PCI D0\n");
502 return retval;
503 }
504
496 /* If the root hub is dead rather than suspended, disallow remote 505 /* If the root hub is dead rather than suspended, disallow remote
497 * wakeup. usb_hc_died() should ensure that both hosts are marked as 506 * wakeup. usb_hc_died() should ensure that both hosts are marked as
498 * dying, so we only need to check the primary roothub. 507 * dying, so we only need to check the primary roothub.
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index a2aa9d652c67..ec6c97dadbe4 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -1667,7 +1667,6 @@ void usb_disconnect(struct usb_device **pdev)
1667{ 1667{
1668 struct usb_device *udev = *pdev; 1668 struct usb_device *udev = *pdev;
1669 int i; 1669 int i;
1670 struct usb_hcd *hcd = bus_to_hcd(udev->bus);
1671 1670
1672 /* mark the device as inactive, so any further urb submissions for 1671 /* mark the device as inactive, so any further urb submissions for
1673 * this device (and any of its children) will fail immediately. 1672 * this device (and any of its children) will fail immediately.
@@ -1690,9 +1689,7 @@ void usb_disconnect(struct usb_device **pdev)
1690 * so that the hardware is now fully quiesced. 1689 * so that the hardware is now fully quiesced.
1691 */ 1690 */
1692 dev_dbg (&udev->dev, "unregistering device\n"); 1691 dev_dbg (&udev->dev, "unregistering device\n");
1693 mutex_lock(hcd->bandwidth_mutex);
1694 usb_disable_device(udev, 0); 1692 usb_disable_device(udev, 0);
1695 mutex_unlock(hcd->bandwidth_mutex);
1696 usb_hcd_synchronize_unlinks(udev); 1693 usb_hcd_synchronize_unlinks(udev);
1697 1694
1698 usb_remove_ep_devs(&udev->ep0); 1695 usb_remove_ep_devs(&udev->ep0);
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index aed3e07942d4..ca717da3be95 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1136,8 +1136,6 @@ void usb_disable_interface(struct usb_device *dev, struct usb_interface *intf,
1136 * Deallocates hcd/hardware state for the endpoints (nuking all or most 1136 * Deallocates hcd/hardware state for the endpoints (nuking all or most
1137 * pending urbs) and usbcore state for the interfaces, so that usbcore 1137 * pending urbs) and usbcore state for the interfaces, so that usbcore
1138 * must usb_set_configuration() before any interfaces could be used. 1138 * must usb_set_configuration() before any interfaces could be used.
1139 *
1140 * Must be called with hcd->bandwidth_mutex held.
1141 */ 1139 */
1142void usb_disable_device(struct usb_device *dev, int skip_ep0) 1140void usb_disable_device(struct usb_device *dev, int skip_ep0)
1143{ 1141{
@@ -1190,7 +1188,9 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0)
1190 usb_disable_endpoint(dev, i + USB_DIR_IN, false); 1188 usb_disable_endpoint(dev, i + USB_DIR_IN, false);
1191 } 1189 }
1192 /* Remove endpoints from the host controller internal state */ 1190 /* Remove endpoints from the host controller internal state */
1191 mutex_lock(hcd->bandwidth_mutex);
1193 usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL); 1192 usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL);
1193 mutex_unlock(hcd->bandwidth_mutex);
1194 /* Second pass: remove endpoint pointers */ 1194 /* Second pass: remove endpoint pointers */
1195 } 1195 }
1196 for (i = skip_ep0; i < 16; ++i) { 1196 for (i = skip_ep0; i < 16; ++i) {
@@ -1750,7 +1750,6 @@ free_interfaces:
1750 /* if it's already configured, clear out old state first. 1750 /* if it's already configured, clear out old state first.
1751 * getting rid of old interfaces means unbinding their drivers. 1751 * getting rid of old interfaces means unbinding their drivers.
1752 */ 1752 */
1753 mutex_lock(hcd->bandwidth_mutex);
1754 if (dev->state != USB_STATE_ADDRESS) 1753 if (dev->state != USB_STATE_ADDRESS)
1755 usb_disable_device(dev, 1); /* Skip ep0 */ 1754 usb_disable_device(dev, 1); /* Skip ep0 */
1756 1755
@@ -1763,6 +1762,7 @@ free_interfaces:
1763 * host controller will not allow submissions to dropped endpoints. If 1762 * host controller will not allow submissions to dropped endpoints. If
1764 * this call fails, the device state is unchanged. 1763 * this call fails, the device state is unchanged.
1765 */ 1764 */
1765 mutex_lock(hcd->bandwidth_mutex);
1766 ret = usb_hcd_alloc_bandwidth(dev, cp, NULL, NULL); 1766 ret = usb_hcd_alloc_bandwidth(dev, cp, NULL, NULL);
1767 if (ret < 0) { 1767 if (ret < 0) {
1768 mutex_unlock(hcd->bandwidth_mutex); 1768 mutex_unlock(hcd->bandwidth_mutex);
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 7bd815a507e8..99b58d84553a 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -206,11 +206,11 @@ static void dwc3_free_event_buffers(struct dwc3 *dwc)
206 206
207 for (i = 0; i < dwc->num_event_buffers; i++) { 207 for (i = 0; i < dwc->num_event_buffers; i++) {
208 evt = dwc->ev_buffs[i]; 208 evt = dwc->ev_buffs[i];
209 if (evt) { 209 if (evt)
210 dwc3_free_one_event_buffer(dwc, evt); 210 dwc3_free_one_event_buffer(dwc, evt);
211 dwc->ev_buffs[i] = NULL;
212 }
213 } 211 }
212
213 kfree(dwc->ev_buffs);
214} 214}
215 215
216/** 216/**
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 25910e251c04..3584a169886f 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -353,6 +353,9 @@ static int dwc3_ep0_handle_feature(struct dwc3 *dwc,
353 353
354 dwc->test_mode_nr = wIndex >> 8; 354 dwc->test_mode_nr = wIndex >> 8;
355 dwc->test_mode = true; 355 dwc->test_mode = true;
356 break;
357 default:
358 return -EINVAL;
356 } 359 }
357 break; 360 break;
358 361
@@ -559,15 +562,20 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc,
559 length = trb->size & DWC3_TRB_SIZE_MASK; 562 length = trb->size & DWC3_TRB_SIZE_MASK;
560 563
561 if (dwc->ep0_bounced) { 564 if (dwc->ep0_bounced) {
565 unsigned transfer_size = ur->length;
566 unsigned maxp = ep0->endpoint.maxpacket;
567
568 transfer_size += (maxp - (transfer_size % maxp));
562 transferred = min_t(u32, ur->length, 569 transferred = min_t(u32, ur->length,
563 ep0->endpoint.maxpacket - length); 570 transfer_size - length);
564 memcpy(ur->buf, dwc->ep0_bounce, transferred); 571 memcpy(ur->buf, dwc->ep0_bounce, transferred);
565 dwc->ep0_bounced = false; 572 dwc->ep0_bounced = false;
566 } else { 573 } else {
567 transferred = ur->length - length; 574 transferred = ur->length - length;
568 ur->actual += transferred;
569 } 575 }
570 576
577 ur->actual += transferred;
578
571 if ((epnum & 1) && ur->actual < ur->length) { 579 if ((epnum & 1) && ur->actual < ur->length) {
572 /* for some reason we did not get everything out */ 580 /* for some reason we did not get everything out */
573 581
diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
index 0c935d7c65bd..9d7bcd910074 100644
--- a/drivers/usb/gadget/at91_udc.c
+++ b/drivers/usb/gadget/at91_udc.c
@@ -1863,8 +1863,8 @@ static int __devinit at91udc_probe(struct platform_device *pdev)
1863 mod_timer(&udc->vbus_timer, 1863 mod_timer(&udc->vbus_timer,
1864 jiffies + VBUS_POLL_TIMEOUT); 1864 jiffies + VBUS_POLL_TIMEOUT);
1865 } else { 1865 } else {
1866 if (request_irq(udc->board.vbus_pin, at91_vbus_irq, 1866 if (request_irq(gpio_to_irq(udc->board.vbus_pin),
1867 0, driver_name, udc)) { 1867 at91_vbus_irq, 0, driver_name, udc)) {
1868 DBG("request vbus irq %d failed\n", 1868 DBG("request vbus irq %d failed\n",
1869 udc->board.vbus_pin); 1869 udc->board.vbus_pin);
1870 retval = -EBUSY; 1870 retval = -EBUSY;
@@ -1886,7 +1886,7 @@ static int __devinit at91udc_probe(struct platform_device *pdev)
1886 return 0; 1886 return 0;
1887fail4: 1887fail4:
1888 if (gpio_is_valid(udc->board.vbus_pin) && !udc->board.vbus_polled) 1888 if (gpio_is_valid(udc->board.vbus_pin) && !udc->board.vbus_polled)
1889 free_irq(udc->board.vbus_pin, udc); 1889 free_irq(gpio_to_irq(udc->board.vbus_pin), udc);
1890fail3: 1890fail3:
1891 if (gpio_is_valid(udc->board.vbus_pin)) 1891 if (gpio_is_valid(udc->board.vbus_pin))
1892 gpio_free(udc->board.vbus_pin); 1892 gpio_free(udc->board.vbus_pin);
@@ -1924,7 +1924,7 @@ static int __exit at91udc_remove(struct platform_device *pdev)
1924 device_init_wakeup(&pdev->dev, 0); 1924 device_init_wakeup(&pdev->dev, 0);
1925 remove_debug_file(udc); 1925 remove_debug_file(udc);
1926 if (gpio_is_valid(udc->board.vbus_pin)) { 1926 if (gpio_is_valid(udc->board.vbus_pin)) {
1927 free_irq(udc->board.vbus_pin, udc); 1927 free_irq(gpio_to_irq(udc->board.vbus_pin), udc);
1928 gpio_free(udc->board.vbus_pin); 1928 gpio_free(udc->board.vbus_pin);
1929 } 1929 }
1930 free_irq(udc->udp_irq, udc); 1930 free_irq(udc->udp_irq, udc);
diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c
index a6dfd2164166..170cbe89d9f8 100644
--- a/drivers/usb/gadget/dummy_hcd.c
+++ b/drivers/usb/gadget/dummy_hcd.c
@@ -927,7 +927,6 @@ static int dummy_udc_stop(struct usb_gadget *g,
927 927
928 dum->driver = NULL; 928 dum->driver = NULL;
929 929
930 dummy_pullup(&dum->gadget, 0);
931 return 0; 930 return 0;
932} 931}
933 932
diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c
index 1cbba70836bc..f52cb1ae45d9 100644
--- a/drivers/usb/gadget/f_fs.c
+++ b/drivers/usb/gadget/f_fs.c
@@ -712,7 +712,7 @@ static long ffs_ep0_ioctl(struct file *file, unsigned code, unsigned long value)
712 if (code == FUNCTIONFS_INTERFACE_REVMAP) { 712 if (code == FUNCTIONFS_INTERFACE_REVMAP) {
713 struct ffs_function *func = ffs->func; 713 struct ffs_function *func = ffs->func;
714 ret = func ? ffs_func_revmap_intf(func, value) : -ENODEV; 714 ret = func ? ffs_func_revmap_intf(func, value) : -ENODEV;
715 } else if (gadget->ops->ioctl) { 715 } else if (gadget && gadget->ops->ioctl) {
716 ret = gadget->ops->ioctl(gadget, code, value); 716 ret = gadget->ops->ioctl(gadget, code, value);
717 } else { 717 } else {
718 ret = -ENOTTY; 718 ret = -ENOTTY;
@@ -1382,6 +1382,7 @@ static void functionfs_unbind(struct ffs_data *ffs)
1382 ffs->ep0req = NULL; 1382 ffs->ep0req = NULL;
1383 ffs->gadget = NULL; 1383 ffs->gadget = NULL;
1384 ffs_data_put(ffs); 1384 ffs_data_put(ffs);
1385 clear_bit(FFS_FL_BOUND, &ffs->flags);
1385 } 1386 }
1386} 1387}
1387 1388
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
index a371e966425f..cb8c162cae5a 100644
--- a/drivers/usb/gadget/f_mass_storage.c
+++ b/drivers/usb/gadget/f_mass_storage.c
@@ -2189,7 +2189,7 @@ unknown_cmnd:
2189 common->data_size_from_cmnd = 0; 2189 common->data_size_from_cmnd = 0;
2190 sprintf(unknown, "Unknown x%02x", common->cmnd[0]); 2190 sprintf(unknown, "Unknown x%02x", common->cmnd[0]);
2191 reply = check_command(common, common->cmnd_size, 2191 reply = check_command(common, common->cmnd_size,
2192 DATA_DIR_UNKNOWN, 0xff, 0, unknown); 2192 DATA_DIR_UNKNOWN, ~0, 0, unknown);
2193 if (reply == 0) { 2193 if (reply == 0) {
2194 common->curlun->sense_data = SS_INVALID_COMMAND; 2194 common->curlun->sense_data = SS_INVALID_COMMAND;
2195 reply = -EINVAL; 2195 reply = -EINVAL;
diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c
index 7b1cf18df5e3..52343654f5df 100644
--- a/drivers/usb/gadget/f_rndis.c
+++ b/drivers/usb/gadget/f_rndis.c
@@ -500,6 +500,7 @@ rndis_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
500 if (buf) { 500 if (buf) {
501 memcpy(req->buf, buf, n); 501 memcpy(req->buf, buf, n);
502 req->complete = rndis_response_complete; 502 req->complete = rndis_response_complete;
503 req->context = rndis;
503 rndis_free_response(rndis->config, buf); 504 rndis_free_response(rndis->config, buf);
504 value = n; 505 value = n;
505 } 506 }
diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c
index 4fac56927741..a896d73f7a93 100644
--- a/drivers/usb/gadget/file_storage.c
+++ b/drivers/usb/gadget/file_storage.c
@@ -2579,7 +2579,7 @@ static int do_scsi_command(struct fsg_dev *fsg)
2579 fsg->data_size_from_cmnd = 0; 2579 fsg->data_size_from_cmnd = 0;
2580 sprintf(unknown, "Unknown x%02x", fsg->cmnd[0]); 2580 sprintf(unknown, "Unknown x%02x", fsg->cmnd[0]);
2581 if ((reply = check_command(fsg, fsg->cmnd_size, 2581 if ((reply = check_command(fsg, fsg->cmnd_size,
2582 DATA_DIR_UNKNOWN, 0xff, 0, unknown)) == 0) { 2582 DATA_DIR_UNKNOWN, ~0, 0, unknown)) == 0) {
2583 fsg->curlun->sense_data = SS_INVALID_COMMAND; 2583 fsg->curlun->sense_data = SS_INVALID_COMMAND;
2584 reply = -EINVAL; 2584 reply = -EINVAL;
2585 } 2585 }
diff --git a/drivers/usb/gadget/fsl_udc_core.c b/drivers/usb/gadget/fsl_udc_core.c
index 5f94e79cd6b9..55abfb6bd612 100644
--- a/drivers/usb/gadget/fsl_udc_core.c
+++ b/drivers/usb/gadget/fsl_udc_core.c
@@ -730,7 +730,7 @@ static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req)
730 : (1 << (ep_index(ep))); 730 : (1 << (ep_index(ep)));
731 731
732 /* check if the pipe is empty */ 732 /* check if the pipe is empty */
733 if (!(list_empty(&ep->queue))) { 733 if (!(list_empty(&ep->queue)) && !(ep_index(ep) == 0)) {
734 /* Add td to the end */ 734 /* Add td to the end */
735 struct fsl_req *lastreq; 735 struct fsl_req *lastreq;
736 lastreq = list_entry(ep->queue.prev, struct fsl_req, queue); 736 lastreq = list_entry(ep->queue.prev, struct fsl_req, queue);
@@ -918,10 +918,6 @@ fsl_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
918 return -ENOMEM; 918 return -ENOMEM;
919 } 919 }
920 920
921 /* Update ep0 state */
922 if ((ep_index(ep) == 0))
923 udc->ep0_state = DATA_STATE_XMIT;
924
925 /* irq handler advances the queue */ 921 /* irq handler advances the queue */
926 if (req != NULL) 922 if (req != NULL)
927 list_add_tail(&req->queue, &ep->queue); 923 list_add_tail(&req->queue, &ep->queue);
@@ -1279,7 +1275,8 @@ static int ep0_prime_status(struct fsl_udc *udc, int direction)
1279 udc->ep0_dir = USB_DIR_OUT; 1275 udc->ep0_dir = USB_DIR_OUT;
1280 1276
1281 ep = &udc->eps[0]; 1277 ep = &udc->eps[0];
1282 udc->ep0_state = WAIT_FOR_OUT_STATUS; 1278 if (udc->ep0_state != DATA_STATE_XMIT)
1279 udc->ep0_state = WAIT_FOR_OUT_STATUS;
1283 1280
1284 req->ep = ep; 1281 req->ep = ep;
1285 req->req.length = 0; 1282 req->req.length = 0;
@@ -1384,6 +1381,9 @@ static void ch9getstatus(struct fsl_udc *udc, u8 request_type, u16 value,
1384 1381
1385 list_add_tail(&req->queue, &ep->queue); 1382 list_add_tail(&req->queue, &ep->queue);
1386 udc->ep0_state = DATA_STATE_XMIT; 1383 udc->ep0_state = DATA_STATE_XMIT;
1384 if (ep0_prime_status(udc, EP_DIR_OUT))
1385 ep0stall(udc);
1386
1387 return; 1387 return;
1388stall: 1388stall:
1389 ep0stall(udc); 1389 ep0stall(udc);
@@ -1492,6 +1492,14 @@ static void setup_received_irq(struct fsl_udc *udc,
1492 spin_lock(&udc->lock); 1492 spin_lock(&udc->lock);
1493 udc->ep0_state = (setup->bRequestType & USB_DIR_IN) 1493 udc->ep0_state = (setup->bRequestType & USB_DIR_IN)
1494 ? DATA_STATE_XMIT : DATA_STATE_RECV; 1494 ? DATA_STATE_XMIT : DATA_STATE_RECV;
1495 /*
1496 * If the data stage is IN, send status prime immediately.
1497 * See 2.0 Spec chapter 8.5.3.3 for detail.
1498 */
1499 if (udc->ep0_state == DATA_STATE_XMIT)
1500 if (ep0_prime_status(udc, EP_DIR_OUT))
1501 ep0stall(udc);
1502
1495 } else { 1503 } else {
1496 /* No data phase, IN status from gadget */ 1504 /* No data phase, IN status from gadget */
1497 udc->ep0_dir = USB_DIR_IN; 1505 udc->ep0_dir = USB_DIR_IN;
@@ -1520,9 +1528,8 @@ static void ep0_req_complete(struct fsl_udc *udc, struct fsl_ep *ep0,
1520 1528
1521 switch (udc->ep0_state) { 1529 switch (udc->ep0_state) {
1522 case DATA_STATE_XMIT: 1530 case DATA_STATE_XMIT:
1523 /* receive status phase */ 1531 /* already primed at setup_received_irq */
1524 if (ep0_prime_status(udc, EP_DIR_OUT)) 1532 udc->ep0_state = WAIT_FOR_OUT_STATUS;
1525 ep0stall(udc);
1526 break; 1533 break;
1527 case DATA_STATE_RECV: 1534 case DATA_STATE_RECV:
1528 /* send status phase */ 1535 /* send status phase */
diff --git a/drivers/usb/gadget/g_ffs.c b/drivers/usb/gadget/g_ffs.c
index 331cd6729d3c..a85eaf40b948 100644
--- a/drivers/usb/gadget/g_ffs.c
+++ b/drivers/usb/gadget/g_ffs.c
@@ -161,7 +161,7 @@ static struct usb_composite_driver gfs_driver = {
161static struct ffs_data *gfs_ffs_data; 161static struct ffs_data *gfs_ffs_data;
162static unsigned long gfs_registered; 162static unsigned long gfs_registered;
163 163
164static int gfs_init(void) 164static int __init gfs_init(void)
165{ 165{
166 ENTER(); 166 ENTER();
167 167
@@ -169,7 +169,7 @@ static int gfs_init(void)
169} 169}
170module_init(gfs_init); 170module_init(gfs_init);
171 171
172static void gfs_exit(void) 172static void __exit gfs_exit(void)
173{ 173{
174 ENTER(); 174 ENTER();
175 175
diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c
index 69295ba9d99a..105b206cd844 100644
--- a/drivers/usb/gadget/s3c-hsotg.c
+++ b/drivers/usb/gadget/s3c-hsotg.c
@@ -340,7 +340,7 @@ static void s3c_hsotg_init_fifo(struct s3c_hsotg *hsotg)
340 /* currently we allocate TX FIFOs for all possible endpoints, 340 /* currently we allocate TX FIFOs for all possible endpoints,
341 * and assume that they are all the same size. */ 341 * and assume that they are all the same size. */
342 342
343 for (ep = 0; ep <= 15; ep++) { 343 for (ep = 1; ep <= 15; ep++) {
344 val = addr; 344 val = addr;
345 val |= size << S3C_DPTXFSIZn_DPTxFSize_SHIFT; 345 val |= size << S3C_DPTXFSIZn_DPTxFSize_SHIFT;
346 addr += size; 346 addr += size;
@@ -741,7 +741,7 @@ static void s3c_hsotg_start_req(struct s3c_hsotg *hsotg,
741 /* write size / packets */ 741 /* write size / packets */
742 writel(epsize, hsotg->regs + epsize_reg); 742 writel(epsize, hsotg->regs + epsize_reg);
743 743
744 if (using_dma(hsotg)) { 744 if (using_dma(hsotg) && !continuing) {
745 unsigned int dma_reg; 745 unsigned int dma_reg;
746 746
747 /* write DMA address to control register, buffer already 747 /* write DMA address to control register, buffer already
@@ -1696,10 +1696,12 @@ static void s3c_hsotg_set_ep_maxpacket(struct s3c_hsotg *hsotg,
1696 reg |= mpsval; 1696 reg |= mpsval;
1697 writel(reg, regs + S3C_DIEPCTL(ep)); 1697 writel(reg, regs + S3C_DIEPCTL(ep));
1698 1698
1699 reg = readl(regs + S3C_DOEPCTL(ep)); 1699 if (ep) {
1700 reg &= ~S3C_DxEPCTL_MPS_MASK; 1700 reg = readl(regs + S3C_DOEPCTL(ep));
1701 reg |= mpsval; 1701 reg &= ~S3C_DxEPCTL_MPS_MASK;
1702 writel(reg, regs + S3C_DOEPCTL(ep)); 1702 reg |= mpsval;
1703 writel(reg, regs + S3C_DOEPCTL(ep));
1704 }
1703 1705
1704 return; 1706 return;
1705 1707
@@ -1919,7 +1921,8 @@ static void s3c_hsotg_epint(struct s3c_hsotg *hsotg, unsigned int idx,
1919 ints & S3C_DIEPMSK_TxFIFOEmpty) { 1921 ints & S3C_DIEPMSK_TxFIFOEmpty) {
1920 dev_dbg(hsotg->dev, "%s: ep%d: TxFIFOEmpty\n", 1922 dev_dbg(hsotg->dev, "%s: ep%d: TxFIFOEmpty\n",
1921 __func__, idx); 1923 __func__, idx);
1922 s3c_hsotg_trytx(hsotg, hs_ep); 1924 if (!using_dma(hsotg))
1925 s3c_hsotg_trytx(hsotg, hs_ep);
1923 } 1926 }
1924 } 1927 }
1925} 1928}
diff --git a/drivers/usb/gadget/udc-core.c b/drivers/usb/gadget/udc-core.c
index 56da49f31d6c..e5e44f8cde9a 100644
--- a/drivers/usb/gadget/udc-core.c
+++ b/drivers/usb/gadget/udc-core.c
@@ -263,9 +263,9 @@ static void usb_gadget_remove_driver(struct usb_udc *udc)
263 263
264 if (udc_is_newstyle(udc)) { 264 if (udc_is_newstyle(udc)) {
265 udc->driver->disconnect(udc->gadget); 265 udc->driver->disconnect(udc->gadget);
266 usb_gadget_disconnect(udc->gadget);
266 udc->driver->unbind(udc->gadget); 267 udc->driver->unbind(udc->gadget);
267 usb_gadget_udc_stop(udc->gadget, udc->driver); 268 usb_gadget_udc_stop(udc->gadget, udc->driver);
268 usb_gadget_disconnect(udc->gadget);
269 } else { 269 } else {
270 usb_gadget_stop(udc->gadget, udc->driver); 270 usb_gadget_stop(udc->gadget, udc->driver);
271 } 271 }
@@ -411,9 +411,13 @@ static ssize_t usb_udc_softconn_store(struct device *dev,
411 struct usb_udc *udc = container_of(dev, struct usb_udc, dev); 411 struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
412 412
413 if (sysfs_streq(buf, "connect")) { 413 if (sysfs_streq(buf, "connect")) {
414 if (udc_is_newstyle(udc))
415 usb_gadget_udc_start(udc->gadget, udc->driver);
414 usb_gadget_connect(udc->gadget); 416 usb_gadget_connect(udc->gadget);
415 } else if (sysfs_streq(buf, "disconnect")) { 417 } else if (sysfs_streq(buf, "disconnect")) {
416 usb_gadget_disconnect(udc->gadget); 418 usb_gadget_disconnect(udc->gadget);
419 if (udc_is_newstyle(udc))
420 usb_gadget_udc_stop(udc->gadget, udc->driver);
417 } else { 421 } else {
418 dev_err(dev, "unsupported command '%s'\n", buf); 422 dev_err(dev, "unsupported command '%s'\n", buf);
419 return -EINVAL; 423 return -EINVAL;
diff --git a/drivers/usb/gadget/uvc.h b/drivers/usb/gadget/uvc.h
index bc78c606c12b..ca4e03a1c73a 100644
--- a/drivers/usb/gadget/uvc.h
+++ b/drivers/usb/gadget/uvc.h
@@ -28,7 +28,7 @@
28 28
29struct uvc_request_data 29struct uvc_request_data
30{ 30{
31 unsigned int length; 31 __s32 length;
32 __u8 data[60]; 32 __u8 data[60];
33}; 33};
34 34
diff --git a/drivers/usb/gadget/uvc_queue.c b/drivers/usb/gadget/uvc_queue.c
index d776adb2da67..0cdf89d32a15 100644
--- a/drivers/usb/gadget/uvc_queue.c
+++ b/drivers/usb/gadget/uvc_queue.c
@@ -543,11 +543,11 @@ done:
543 return ret; 543 return ret;
544} 544}
545 545
546/* called with queue->irqlock held.. */
546static struct uvc_buffer * 547static struct uvc_buffer *
547uvc_queue_next_buffer(struct uvc_video_queue *queue, struct uvc_buffer *buf) 548uvc_queue_next_buffer(struct uvc_video_queue *queue, struct uvc_buffer *buf)
548{ 549{
549 struct uvc_buffer *nextbuf; 550 struct uvc_buffer *nextbuf;
550 unsigned long flags;
551 551
552 if ((queue->flags & UVC_QUEUE_DROP_INCOMPLETE) && 552 if ((queue->flags & UVC_QUEUE_DROP_INCOMPLETE) &&
553 buf->buf.length != buf->buf.bytesused) { 553 buf->buf.length != buf->buf.bytesused) {
@@ -556,14 +556,12 @@ uvc_queue_next_buffer(struct uvc_video_queue *queue, struct uvc_buffer *buf)
556 return buf; 556 return buf;
557 } 557 }
558 558
559 spin_lock_irqsave(&queue->irqlock, flags);
560 list_del(&buf->queue); 559 list_del(&buf->queue);
561 if (!list_empty(&queue->irqqueue)) 560 if (!list_empty(&queue->irqqueue))
562 nextbuf = list_first_entry(&queue->irqqueue, struct uvc_buffer, 561 nextbuf = list_first_entry(&queue->irqqueue, struct uvc_buffer,
563 queue); 562 queue);
564 else 563 else
565 nextbuf = NULL; 564 nextbuf = NULL;
566 spin_unlock_irqrestore(&queue->irqlock, flags);
567 565
568 buf->buf.sequence = queue->sequence++; 566 buf->buf.sequence = queue->sequence++;
569 do_gettimeofday(&buf->buf.timestamp); 567 do_gettimeofday(&buf->buf.timestamp);
diff --git a/drivers/usb/gadget/uvc_v4l2.c b/drivers/usb/gadget/uvc_v4l2.c
index f6e083b50191..54d7ca559cb2 100644
--- a/drivers/usb/gadget/uvc_v4l2.c
+++ b/drivers/usb/gadget/uvc_v4l2.c
@@ -39,7 +39,7 @@ uvc_send_response(struct uvc_device *uvc, struct uvc_request_data *data)
39 if (data->length < 0) 39 if (data->length < 0)
40 return usb_ep_set_halt(cdev->gadget->ep0); 40 return usb_ep_set_halt(cdev->gadget->ep0);
41 41
42 req->length = min(uvc->event_length, data->length); 42 req->length = min_t(unsigned int, uvc->event_length, data->length);
43 req->zero = data->length < uvc->event_length; 43 req->zero = data->length < uvc->event_length;
44 req->dma = DMA_ADDR_INVALID; 44 req->dma = DMA_ADDR_INVALID;
45 45
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index 3e7345172e03..d0a84bd3f3eb 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -218,6 +218,9 @@ static void ehci_fsl_setup_phy(struct ehci_hcd *ehci,
218 u32 portsc; 218 u32 portsc;
219 struct usb_hcd *hcd = ehci_to_hcd(ehci); 219 struct usb_hcd *hcd = ehci_to_hcd(ehci);
220 void __iomem *non_ehci = hcd->regs; 220 void __iomem *non_ehci = hcd->regs;
221 struct fsl_usb2_platform_data *pdata;
222
223 pdata = hcd->self.controller->platform_data;
221 224
222 portsc = ehci_readl(ehci, &ehci->regs->port_status[port_offset]); 225 portsc = ehci_readl(ehci, &ehci->regs->port_status[port_offset]);
223 portsc &= ~(PORT_PTS_MSK | PORT_PTS_PTW); 226 portsc &= ~(PORT_PTS_MSK | PORT_PTS_PTW);
@@ -234,7 +237,9 @@ static void ehci_fsl_setup_phy(struct ehci_hcd *ehci,
234 /* fall through */ 237 /* fall through */
235 case FSL_USB2_PHY_UTMI: 238 case FSL_USB2_PHY_UTMI:
236 /* enable UTMI PHY */ 239 /* enable UTMI PHY */
237 setbits32(non_ehci + FSL_SOC_USB_CTRL, CTRL_UTMI_PHY_EN); 240 if (pdata->have_sysif_regs)
241 setbits32(non_ehci + FSL_SOC_USB_CTRL,
242 CTRL_UTMI_PHY_EN);
238 portsc |= PORT_PTS_UTMI; 243 portsc |= PORT_PTS_UTMI;
239 break; 244 break;
240 case FSL_USB2_PHY_NONE: 245 case FSL_USB2_PHY_NONE:
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 806cc95317aa..4a3bc5b7a06f 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -858,8 +858,13 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
858 goto dead; 858 goto dead;
859 } 859 }
860 860
861 /*
862 * We don't use STS_FLR, but some controllers don't like it to
863 * remain on, so mask it out along with the other status bits.
864 */
865 masked_status = status & (INTR_MASK | STS_FLR);
866
861 /* Shared IRQ? */ 867 /* Shared IRQ? */
862 masked_status = status & INTR_MASK;
863 if (!masked_status || unlikely(ehci->rh_state == EHCI_RH_HALTED)) { 868 if (!masked_status || unlikely(ehci->rh_state == EHCI_RH_HALTED)) {
864 spin_unlock(&ehci->lock); 869 spin_unlock(&ehci->lock);
865 return IRQ_NONE; 870 return IRQ_NONE;
@@ -910,7 +915,7 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
910 pcd_status = status; 915 pcd_status = status;
911 916
912 /* resume root hub? */ 917 /* resume root hub? */
913 if (!(cmd & CMD_RUN)) 918 if (ehci->rh_state == EHCI_RH_SUSPENDED)
914 usb_hcd_resume_root_hub(hcd); 919 usb_hcd_resume_root_hub(hcd);
915 920
916 /* get per-port change detect bits */ 921 /* get per-port change detect bits */
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index bba9850f32f0..5c78f9e71466 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -42,6 +42,7 @@
42#include <plat/usb.h> 42#include <plat/usb.h>
43#include <linux/regulator/consumer.h> 43#include <linux/regulator/consumer.h>
44#include <linux/pm_runtime.h> 44#include <linux/pm_runtime.h>
45#include <linux/gpio.h>
45 46
46/* EHCI Register Set */ 47/* EHCI Register Set */
47#define EHCI_INSNREG04 (0xA0) 48#define EHCI_INSNREG04 (0xA0)
@@ -191,6 +192,19 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
191 } 192 }
192 } 193 }
193 194
195 if (pdata->phy_reset) {
196 if (gpio_is_valid(pdata->reset_gpio_port[0]))
197 gpio_request_one(pdata->reset_gpio_port[0],
198 GPIOF_OUT_INIT_LOW, "USB1 PHY reset");
199
200 if (gpio_is_valid(pdata->reset_gpio_port[1]))
201 gpio_request_one(pdata->reset_gpio_port[1],
202 GPIOF_OUT_INIT_LOW, "USB2 PHY reset");
203
204 /* Hold the PHY in RESET for enough time till DIR is high */
205 udelay(10);
206 }
207
194 pm_runtime_enable(dev); 208 pm_runtime_enable(dev);
195 pm_runtime_get_sync(dev); 209 pm_runtime_get_sync(dev);
196 210
@@ -237,6 +251,19 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
237 /* root ports should always stay powered */ 251 /* root ports should always stay powered */
238 ehci_port_power(omap_ehci, 1); 252 ehci_port_power(omap_ehci, 1);
239 253
254 if (pdata->phy_reset) {
255 /* Hold the PHY in RESET for enough time till
256 * PHY is settled and ready
257 */
258 udelay(10);
259
260 if (gpio_is_valid(pdata->reset_gpio_port[0]))
261 gpio_set_value(pdata->reset_gpio_port[0], 1);
262
263 if (gpio_is_valid(pdata->reset_gpio_port[1]))
264 gpio_set_value(pdata->reset_gpio_port[1], 1);
265 }
266
240 return 0; 267 return 0;
241 268
242err_add_hcd: 269err_add_hcd:
@@ -259,8 +286,9 @@ err_io:
259 */ 286 */
260static int ehci_hcd_omap_remove(struct platform_device *pdev) 287static int ehci_hcd_omap_remove(struct platform_device *pdev)
261{ 288{
262 struct device *dev = &pdev->dev; 289 struct device *dev = &pdev->dev;
263 struct usb_hcd *hcd = dev_get_drvdata(dev); 290 struct usb_hcd *hcd = dev_get_drvdata(dev);
291 struct ehci_hcd_omap_platform_data *pdata = dev->platform_data;
264 292
265 usb_remove_hcd(hcd); 293 usb_remove_hcd(hcd);
266 disable_put_regulator(dev->platform_data); 294 disable_put_regulator(dev->platform_data);
@@ -269,6 +297,13 @@ static int ehci_hcd_omap_remove(struct platform_device *pdev)
269 pm_runtime_put_sync(dev); 297 pm_runtime_put_sync(dev);
270 pm_runtime_disable(dev); 298 pm_runtime_disable(dev);
271 299
300 if (pdata->phy_reset) {
301 if (gpio_is_valid(pdata->reset_gpio_port[0]))
302 gpio_free(pdata->reset_gpio_port[0]);
303
304 if (gpio_is_valid(pdata->reset_gpio_port[1]))
305 gpio_free(pdata->reset_gpio_port[1]);
306 }
272 return 0; 307 return 0;
273} 308}
274 309
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 01bb7241d6ef..fe8dc069164e 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -144,6 +144,14 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
144 hcd->has_tt = 1; 144 hcd->has_tt = 1;
145 tdi_reset(ehci); 145 tdi_reset(ehci);
146 } 146 }
147 if (pdev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK) {
148 /* EHCI #1 or #2 on 6 Series/C200 Series chipset */
149 if (pdev->device == 0x1c26 || pdev->device == 0x1c2d) {
150 ehci_info(ehci, "broken D3 during system sleep on ASUS\n");
151 hcd->broken_pci_sleep = 1;
152 device_set_wakeup_capable(&pdev->dev, false);
153 }
154 }
147 break; 155 break;
148 case PCI_VENDOR_ID_TDI: 156 case PCI_VENDOR_ID_TDI:
149 if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) { 157 if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) {
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
index 73544bd440bd..f214a80cdee2 100644
--- a/drivers/usb/host/ehci-tegra.c
+++ b/drivers/usb/host/ehci-tegra.c
@@ -24,6 +24,7 @@
24#include <linux/gpio.h> 24#include <linux/gpio.h>
25#include <linux/of.h> 25#include <linux/of.h>
26#include <linux/of_gpio.h> 26#include <linux/of_gpio.h>
27#include <linux/pm_runtime.h>
27 28
28#include <mach/usb_phy.h> 29#include <mach/usb_phy.h>
29#include <mach/iomap.h> 30#include <mach/iomap.h>
@@ -37,9 +38,7 @@ struct tegra_ehci_hcd {
37 struct clk *emc_clk; 38 struct clk *emc_clk;
38 struct usb_phy *transceiver; 39 struct usb_phy *transceiver;
39 int host_resumed; 40 int host_resumed;
40 int bus_suspended;
41 int port_resuming; 41 int port_resuming;
42 int power_down_on_bus_suspend;
43 enum tegra_usb_phy_port_speed port_speed; 42 enum tegra_usb_phy_port_speed port_speed;
44}; 43};
45 44
@@ -273,120 +272,6 @@ static void tegra_ehci_restart(struct usb_hcd *hcd)
273 up_write(&ehci_cf_port_reset_rwsem); 272 up_write(&ehci_cf_port_reset_rwsem);
274} 273}
275 274
276static int tegra_usb_suspend(struct usb_hcd *hcd)
277{
278 struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
279 struct ehci_regs __iomem *hw = tegra->ehci->regs;
280 unsigned long flags;
281
282 spin_lock_irqsave(&tegra->ehci->lock, flags);
283
284 tegra->port_speed = (readl(&hw->port_status[0]) >> 26) & 0x3;
285 ehci_halt(tegra->ehci);
286 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
287
288 spin_unlock_irqrestore(&tegra->ehci->lock, flags);
289
290 tegra_ehci_power_down(hcd);
291 return 0;
292}
293
294static int tegra_usb_resume(struct usb_hcd *hcd)
295{
296 struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
297 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
298 struct ehci_regs __iomem *hw = ehci->regs;
299 unsigned long val;
300
301 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
302 tegra_ehci_power_up(hcd);
303
304 if (tegra->port_speed > TEGRA_USB_PHY_PORT_SPEED_HIGH) {
305 /* Wait for the phy to detect new devices
306 * before we restart the controller */
307 msleep(10);
308 goto restart;
309 }
310
311 /* Force the phy to keep data lines in suspend state */
312 tegra_ehci_phy_restore_start(tegra->phy, tegra->port_speed);
313
314 /* Enable host mode */
315 tdi_reset(ehci);
316
317 /* Enable Port Power */
318 val = readl(&hw->port_status[0]);
319 val |= PORT_POWER;
320 writel(val, &hw->port_status[0]);
321 udelay(10);
322
323 /* Check if the phy resume from LP0. When the phy resume from LP0
324 * USB register will be reset. */
325 if (!readl(&hw->async_next)) {
326 /* Program the field PTC based on the saved speed mode */
327 val = readl(&hw->port_status[0]);
328 val &= ~PORT_TEST(~0);
329 if (tegra->port_speed == TEGRA_USB_PHY_PORT_SPEED_HIGH)
330 val |= PORT_TEST_FORCE;
331 else if (tegra->port_speed == TEGRA_USB_PHY_PORT_SPEED_FULL)
332 val |= PORT_TEST(6);
333 else if (tegra->port_speed == TEGRA_USB_PHY_PORT_SPEED_LOW)
334 val |= PORT_TEST(7);
335 writel(val, &hw->port_status[0]);
336 udelay(10);
337
338 /* Disable test mode by setting PTC field to NORMAL_OP */
339 val = readl(&hw->port_status[0]);
340 val &= ~PORT_TEST(~0);
341 writel(val, &hw->port_status[0]);
342 udelay(10);
343 }
344
345 /* Poll until CCS is enabled */
346 if (handshake(ehci, &hw->port_status[0], PORT_CONNECT,
347 PORT_CONNECT, 2000)) {
348 pr_err("%s: timeout waiting for PORT_CONNECT\n", __func__);
349 goto restart;
350 }
351
352 /* Poll until PE is enabled */
353 if (handshake(ehci, &hw->port_status[0], PORT_PE,
354 PORT_PE, 2000)) {
355 pr_err("%s: timeout waiting for USB_PORTSC1_PE\n", __func__);
356 goto restart;
357 }
358
359 /* Clear the PCI status, to avoid an interrupt taken upon resume */
360 val = readl(&hw->status);
361 val |= STS_PCD;
362 writel(val, &hw->status);
363
364 /* Put controller in suspend mode by writing 1 to SUSP bit of PORTSC */
365 val = readl(&hw->port_status[0]);
366 if ((val & PORT_POWER) && (val & PORT_PE)) {
367 val |= PORT_SUSPEND;
368 writel(val, &hw->port_status[0]);
369
370 /* Wait until port suspend completes */
371 if (handshake(ehci, &hw->port_status[0], PORT_SUSPEND,
372 PORT_SUSPEND, 1000)) {
373 pr_err("%s: timeout waiting for PORT_SUSPEND\n",
374 __func__);
375 goto restart;
376 }
377 }
378
379 tegra_ehci_phy_restore_end(tegra->phy);
380 return 0;
381
382restart:
383 if (tegra->port_speed <= TEGRA_USB_PHY_PORT_SPEED_HIGH)
384 tegra_ehci_phy_restore_end(tegra->phy);
385
386 tegra_ehci_restart(hcd);
387 return 0;
388}
389
390static void tegra_ehci_shutdown(struct usb_hcd *hcd) 275static void tegra_ehci_shutdown(struct usb_hcd *hcd)
391{ 276{
392 struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller); 277 struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
@@ -434,36 +319,6 @@ static int tegra_ehci_setup(struct usb_hcd *hcd)
434 return retval; 319 return retval;
435} 320}
436 321
437#ifdef CONFIG_PM
438static int tegra_ehci_bus_suspend(struct usb_hcd *hcd)
439{
440 struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
441 int error_status = 0;
442
443 error_status = ehci_bus_suspend(hcd);
444 if (!error_status && tegra->power_down_on_bus_suspend) {
445 tegra_usb_suspend(hcd);
446 tegra->bus_suspended = 1;
447 }
448
449 return error_status;
450}
451
452static int tegra_ehci_bus_resume(struct usb_hcd *hcd)
453{
454 struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
455
456 if (tegra->bus_suspended && tegra->power_down_on_bus_suspend) {
457 tegra_usb_resume(hcd);
458 tegra->bus_suspended = 0;
459 }
460
461 tegra_usb_phy_preresume(tegra->phy);
462 tegra->port_resuming = 1;
463 return ehci_bus_resume(hcd);
464}
465#endif
466
467struct temp_buffer { 322struct temp_buffer {
468 void *kmalloc_ptr; 323 void *kmalloc_ptr;
469 void *old_xfer_buffer; 324 void *old_xfer_buffer;
@@ -574,8 +429,8 @@ static const struct hc_driver tegra_ehci_hc_driver = {
574 .hub_control = tegra_ehci_hub_control, 429 .hub_control = tegra_ehci_hub_control,
575 .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete, 430 .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
576#ifdef CONFIG_PM 431#ifdef CONFIG_PM
577 .bus_suspend = tegra_ehci_bus_suspend, 432 .bus_suspend = ehci_bus_suspend,
578 .bus_resume = tegra_ehci_bus_resume, 433 .bus_resume = ehci_bus_resume,
579#endif 434#endif
580 .relinquish_port = ehci_relinquish_port, 435 .relinquish_port = ehci_relinquish_port,
581 .port_handed_over = ehci_port_handed_over, 436 .port_handed_over = ehci_port_handed_over,
@@ -603,11 +458,187 @@ static int setup_vbus_gpio(struct platform_device *pdev)
603 dev_err(&pdev->dev, "can't enable vbus\n"); 458 dev_err(&pdev->dev, "can't enable vbus\n");
604 return err; 459 return err;
605 } 460 }
606 gpio_set_value(gpio, 1);
607 461
608 return err; 462 return err;
609} 463}
610 464
465#ifdef CONFIG_PM
466
467static int controller_suspend(struct device *dev)
468{
469 struct tegra_ehci_hcd *tegra =
470 platform_get_drvdata(to_platform_device(dev));
471 struct ehci_hcd *ehci = tegra->ehci;
472 struct usb_hcd *hcd = ehci_to_hcd(ehci);
473 struct ehci_regs __iomem *hw = ehci->regs;
474 unsigned long flags;
475
476 if (time_before(jiffies, ehci->next_statechange))
477 msleep(10);
478
479 spin_lock_irqsave(&ehci->lock, flags);
480
481 tegra->port_speed = (readl(&hw->port_status[0]) >> 26) & 0x3;
482 ehci_halt(ehci);
483 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
484
485 spin_unlock_irqrestore(&ehci->lock, flags);
486
487 tegra_ehci_power_down(hcd);
488 return 0;
489}
490
491static int controller_resume(struct device *dev)
492{
493 struct tegra_ehci_hcd *tegra =
494 platform_get_drvdata(to_platform_device(dev));
495 struct ehci_hcd *ehci = tegra->ehci;
496 struct usb_hcd *hcd = ehci_to_hcd(ehci);
497 struct ehci_regs __iomem *hw = ehci->regs;
498 unsigned long val;
499
500 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
501 tegra_ehci_power_up(hcd);
502
503 if (tegra->port_speed > TEGRA_USB_PHY_PORT_SPEED_HIGH) {
504 /* Wait for the phy to detect new devices
505 * before we restart the controller */
506 msleep(10);
507 goto restart;
508 }
509
510 /* Force the phy to keep data lines in suspend state */
511 tegra_ehci_phy_restore_start(tegra->phy, tegra->port_speed);
512
513 /* Enable host mode */
514 tdi_reset(ehci);
515
516 /* Enable Port Power */
517 val = readl(&hw->port_status[0]);
518 val |= PORT_POWER;
519 writel(val, &hw->port_status[0]);
520 udelay(10);
521
522 /* Check if the phy resume from LP0. When the phy resume from LP0
523 * USB register will be reset. */
524 if (!readl(&hw->async_next)) {
525 /* Program the field PTC based on the saved speed mode */
526 val = readl(&hw->port_status[0]);
527 val &= ~PORT_TEST(~0);
528 if (tegra->port_speed == TEGRA_USB_PHY_PORT_SPEED_HIGH)
529 val |= PORT_TEST_FORCE;
530 else if (tegra->port_speed == TEGRA_USB_PHY_PORT_SPEED_FULL)
531 val |= PORT_TEST(6);
532 else if (tegra->port_speed == TEGRA_USB_PHY_PORT_SPEED_LOW)
533 val |= PORT_TEST(7);
534 writel(val, &hw->port_status[0]);
535 udelay(10);
536
537 /* Disable test mode by setting PTC field to NORMAL_OP */
538 val = readl(&hw->port_status[0]);
539 val &= ~PORT_TEST(~0);
540 writel(val, &hw->port_status[0]);
541 udelay(10);
542 }
543
544 /* Poll until CCS is enabled */
545 if (handshake(ehci, &hw->port_status[0], PORT_CONNECT,
546 PORT_CONNECT, 2000)) {
547 pr_err("%s: timeout waiting for PORT_CONNECT\n", __func__);
548 goto restart;
549 }
550
551 /* Poll until PE is enabled */
552 if (handshake(ehci, &hw->port_status[0], PORT_PE,
553 PORT_PE, 2000)) {
554 pr_err("%s: timeout waiting for USB_PORTSC1_PE\n", __func__);
555 goto restart;
556 }
557
558 /* Clear the PCI status, to avoid an interrupt taken upon resume */
559 val = readl(&hw->status);
560 val |= STS_PCD;
561 writel(val, &hw->status);
562
563 /* Put controller in suspend mode by writing 1 to SUSP bit of PORTSC */
564 val = readl(&hw->port_status[0]);
565 if ((val & PORT_POWER) && (val & PORT_PE)) {
566 val |= PORT_SUSPEND;
567 writel(val, &hw->port_status[0]);
568
569 /* Wait until port suspend completes */
570 if (handshake(ehci, &hw->port_status[0], PORT_SUSPEND,
571 PORT_SUSPEND, 1000)) {
572 pr_err("%s: timeout waiting for PORT_SUSPEND\n",
573 __func__);
574 goto restart;
575 }
576 }
577
578 tegra_ehci_phy_restore_end(tegra->phy);
579 goto done;
580
581 restart:
582 if (tegra->port_speed <= TEGRA_USB_PHY_PORT_SPEED_HIGH)
583 tegra_ehci_phy_restore_end(tegra->phy);
584
585 tegra_ehci_restart(hcd);
586
587 done:
588 tegra_usb_phy_preresume(tegra->phy);
589 tegra->port_resuming = 1;
590 return 0;
591}
592
593static int tegra_ehci_suspend(struct device *dev)
594{
595 struct tegra_ehci_hcd *tegra =
596 platform_get_drvdata(to_platform_device(dev));
597 struct usb_hcd *hcd = ehci_to_hcd(tegra->ehci);
598 int rc = 0;
599
600 /*
601 * When system sleep is supported and USB controller wakeup is
602 * implemented: If the controller is runtime-suspended and the
603 * wakeup setting needs to be changed, call pm_runtime_resume().
604 */
605 if (HCD_HW_ACCESSIBLE(hcd))
606 rc = controller_suspend(dev);
607 return rc;
608}
609
610static int tegra_ehci_resume(struct device *dev)
611{
612 int rc;
613
614 rc = controller_resume(dev);
615 if (rc == 0) {
616 pm_runtime_disable(dev);
617 pm_runtime_set_active(dev);
618 pm_runtime_enable(dev);
619 }
620 return rc;
621}
622
623static int tegra_ehci_runtime_suspend(struct device *dev)
624{
625 return controller_suspend(dev);
626}
627
628static int tegra_ehci_runtime_resume(struct device *dev)
629{
630 return controller_resume(dev);
631}
632
633static const struct dev_pm_ops tegra_ehci_pm_ops = {
634 .suspend = tegra_ehci_suspend,
635 .resume = tegra_ehci_resume,
636 .runtime_suspend = tegra_ehci_runtime_suspend,
637 .runtime_resume = tegra_ehci_runtime_resume,
638};
639
640#endif
641
611static u64 tegra_ehci_dma_mask = DMA_BIT_MASK(32); 642static u64 tegra_ehci_dma_mask = DMA_BIT_MASK(32);
612 643
613static int tegra_ehci_probe(struct platform_device *pdev) 644static int tegra_ehci_probe(struct platform_device *pdev)
@@ -722,7 +753,6 @@ static int tegra_ehci_probe(struct platform_device *pdev)
722 } 753 }
723 754
724 tegra->host_resumed = 1; 755 tegra->host_resumed = 1;
725 tegra->power_down_on_bus_suspend = pdata->power_down_on_bus_suspend;
726 tegra->ehci = hcd_to_ehci(hcd); 756 tegra->ehci = hcd_to_ehci(hcd);
727 757
728 irq = platform_get_irq(pdev, 0); 758 irq = platform_get_irq(pdev, 0);
@@ -731,7 +761,6 @@ static int tegra_ehci_probe(struct platform_device *pdev)
731 err = -ENODEV; 761 err = -ENODEV;
732 goto fail; 762 goto fail;
733 } 763 }
734 set_irq_flags(irq, IRQF_VALID);
735 764
736#ifdef CONFIG_USB_OTG_UTILS 765#ifdef CONFIG_USB_OTG_UTILS
737 if (pdata->operating_mode == TEGRA_USB_OTG) { 766 if (pdata->operating_mode == TEGRA_USB_OTG) {
@@ -747,6 +776,14 @@ static int tegra_ehci_probe(struct platform_device *pdev)
747 goto fail; 776 goto fail;
748 } 777 }
749 778
779 pm_runtime_set_active(&pdev->dev);
780 pm_runtime_get_noresume(&pdev->dev);
781
782 /* Don't skip the pm_runtime_forbid call if wakeup isn't working */
783 /* if (!pdata->power_down_on_bus_suspend) */
784 pm_runtime_forbid(&pdev->dev);
785 pm_runtime_enable(&pdev->dev);
786 pm_runtime_put_sync(&pdev->dev);
750 return err; 787 return err;
751 788
752fail: 789fail:
@@ -773,33 +810,6 @@ fail_hcd:
773 return err; 810 return err;
774} 811}
775 812
776#ifdef CONFIG_PM
777static int tegra_ehci_resume(struct platform_device *pdev)
778{
779 struct tegra_ehci_hcd *tegra = platform_get_drvdata(pdev);
780 struct usb_hcd *hcd = ehci_to_hcd(tegra->ehci);
781
782 if (tegra->bus_suspended)
783 return 0;
784
785 return tegra_usb_resume(hcd);
786}
787
788static int tegra_ehci_suspend(struct platform_device *pdev, pm_message_t state)
789{
790 struct tegra_ehci_hcd *tegra = platform_get_drvdata(pdev);
791 struct usb_hcd *hcd = ehci_to_hcd(tegra->ehci);
792
793 if (tegra->bus_suspended)
794 return 0;
795
796 if (time_before(jiffies, tegra->ehci->next_statechange))
797 msleep(10);
798
799 return tegra_usb_suspend(hcd);
800}
801#endif
802
803static int tegra_ehci_remove(struct platform_device *pdev) 813static int tegra_ehci_remove(struct platform_device *pdev)
804{ 814{
805 struct tegra_ehci_hcd *tegra = platform_get_drvdata(pdev); 815 struct tegra_ehci_hcd *tegra = platform_get_drvdata(pdev);
@@ -808,6 +818,10 @@ static int tegra_ehci_remove(struct platform_device *pdev)
808 if (tegra == NULL || hcd == NULL) 818 if (tegra == NULL || hcd == NULL)
809 return -EINVAL; 819 return -EINVAL;
810 820
821 pm_runtime_get_sync(&pdev->dev);
822 pm_runtime_disable(&pdev->dev);
823 pm_runtime_put_noidle(&pdev->dev);
824
811#ifdef CONFIG_USB_OTG_UTILS 825#ifdef CONFIG_USB_OTG_UTILS
812 if (tegra->transceiver) { 826 if (tegra->transceiver) {
813 otg_set_host(tegra->transceiver->otg, NULL); 827 otg_set_host(tegra->transceiver->otg, NULL);
@@ -848,13 +862,12 @@ static struct of_device_id tegra_ehci_of_match[] __devinitdata = {
848static struct platform_driver tegra_ehci_driver = { 862static struct platform_driver tegra_ehci_driver = {
849 .probe = tegra_ehci_probe, 863 .probe = tegra_ehci_probe,
850 .remove = tegra_ehci_remove, 864 .remove = tegra_ehci_remove,
851#ifdef CONFIG_PM
852 .suspend = tegra_ehci_suspend,
853 .resume = tegra_ehci_resume,
854#endif
855 .shutdown = tegra_ehci_hcd_shutdown, 865 .shutdown = tegra_ehci_hcd_shutdown,
856 .driver = { 866 .driver = {
857 .name = "tegra-ehci", 867 .name = "tegra-ehci",
858 .of_match_table = tegra_ehci_of_match, 868 .of_match_table = tegra_ehci_of_match,
869#ifdef CONFIG_PM
870 .pm = &tegra_ehci_pm_ops,
871#endif
859 } 872 }
860}; 873};
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index 09f597ad6e00..13ebeca8e73e 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -94,7 +94,7 @@ static void at91_stop_hc(struct platform_device *pdev)
94 94
95/*-------------------------------------------------------------------------*/ 95/*-------------------------------------------------------------------------*/
96 96
97static void usb_hcd_at91_remove (struct usb_hcd *, struct platform_device *); 97static void __devexit usb_hcd_at91_remove (struct usb_hcd *, struct platform_device *);
98 98
99/* configure so an HC device and id are always provided */ 99/* configure so an HC device and id are always provided */
100/* always called with process context; sleeping is OK */ 100/* always called with process context; sleeping is OK */
@@ -108,7 +108,7 @@ static void usb_hcd_at91_remove (struct usb_hcd *, struct platform_device *);
108 * then invokes the start() method for the HCD associated with it 108 * then invokes the start() method for the HCD associated with it
109 * through the hotplug entry's driver_data. 109 * through the hotplug entry's driver_data.
110 */ 110 */
111static int usb_hcd_at91_probe(const struct hc_driver *driver, 111static int __devinit usb_hcd_at91_probe(const struct hc_driver *driver,
112 struct platform_device *pdev) 112 struct platform_device *pdev)
113{ 113{
114 int retval; 114 int retval;
@@ -203,7 +203,7 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver,
203 * context, "rmmod" or something similar. 203 * context, "rmmod" or something similar.
204 * 204 *
205 */ 205 */
206static void usb_hcd_at91_remove(struct usb_hcd *hcd, 206static void __devexit usb_hcd_at91_remove(struct usb_hcd *hcd,
207 struct platform_device *pdev) 207 struct platform_device *pdev)
208{ 208{
209 usb_remove_hcd(hcd); 209 usb_remove_hcd(hcd);
@@ -545,7 +545,7 @@ static int __devinit ohci_at91_of_init(struct platform_device *pdev)
545 545
546/*-------------------------------------------------------------------------*/ 546/*-------------------------------------------------------------------------*/
547 547
548static int ohci_hcd_at91_drv_probe(struct platform_device *pdev) 548static int __devinit ohci_hcd_at91_drv_probe(struct platform_device *pdev)
549{ 549{
550 struct at91_usbh_data *pdata; 550 struct at91_usbh_data *pdata;
551 int i; 551 int i;
@@ -620,7 +620,7 @@ static int ohci_hcd_at91_drv_probe(struct platform_device *pdev)
620 return usb_hcd_at91_probe(&ohci_at91_hc_driver, pdev); 620 return usb_hcd_at91_probe(&ohci_at91_hc_driver, pdev);
621} 621}
622 622
623static int ohci_hcd_at91_drv_remove(struct platform_device *pdev) 623static int __devexit ohci_hcd_at91_drv_remove(struct platform_device *pdev)
624{ 624{
625 struct at91_usbh_data *pdata = pdev->dev.platform_data; 625 struct at91_usbh_data *pdata = pdev->dev.platform_data;
626 int i; 626 int i;
@@ -696,7 +696,7 @@ MODULE_ALIAS("platform:at91_ohci");
696 696
697static struct platform_driver ohci_hcd_at91_driver = { 697static struct platform_driver ohci_hcd_at91_driver = {
698 .probe = ohci_hcd_at91_drv_probe, 698 .probe = ohci_hcd_at91_drv_probe,
699 .remove = ohci_hcd_at91_drv_remove, 699 .remove = __devexit_p(ohci_hcd_at91_drv_remove),
700 .shutdown = usb_hcd_platform_shutdown, 700 .shutdown = usb_hcd_platform_shutdown,
701 .suspend = ohci_hcd_at91_drv_suspend, 701 .suspend = ohci_hcd_at91_drv_suspend,
702 .resume = ohci_hcd_at91_drv_resume, 702 .resume = ohci_hcd_at91_drv_resume,
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index 959145baf3cf..9dcb68f04f03 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -423,7 +423,7 @@ alloc_sglist(int nents, int max, int vary)
423 unsigned i; 423 unsigned i;
424 unsigned size = max; 424 unsigned size = max;
425 425
426 sg = kmalloc(nents * sizeof *sg, GFP_KERNEL); 426 sg = kmalloc_array(nents, sizeof *sg, GFP_KERNEL);
427 if (!sg) 427 if (!sg)
428 return NULL; 428 return NULL;
429 sg_init_table(sg, nents); 429 sg_init_table(sg, nents);
@@ -904,6 +904,9 @@ test_ctrl_queue(struct usbtest_dev *dev, struct usbtest_param *param)
904 struct ctrl_ctx context; 904 struct ctrl_ctx context;
905 int i; 905 int i;
906 906
907 if (param->sglen == 0 || param->iterations > UINT_MAX / param->sglen)
908 return -EOPNOTSUPP;
909
907 spin_lock_init(&context.lock); 910 spin_lock_init(&context.lock);
908 context.dev = dev; 911 context.dev = dev;
909 init_completion(&context.complete); 912 init_completion(&context.complete);
@@ -1981,8 +1984,6 @@ usbtest_ioctl(struct usb_interface *intf, unsigned int code, void *buf)
1981 1984
1982 /* queued control messaging */ 1985 /* queued control messaging */
1983 case 10: 1986 case 10:
1984 if (param->sglen == 0)
1985 break;
1986 retval = 0; 1987 retval = 0;
1987 dev_info(&intf->dev, 1988 dev_info(&intf->dev,
1988 "TEST 10: queue %d control calls, %d times\n", 1989 "TEST 10: queue %d control calls, %d times\n",
@@ -2276,6 +2277,8 @@ usbtest_probe(struct usb_interface *intf, const struct usb_device_id *id)
2276 if (status < 0) { 2277 if (status < 0) {
2277 WARNING(dev, "couldn't get endpoints, %d\n", 2278 WARNING(dev, "couldn't get endpoints, %d\n",
2278 status); 2279 status);
2280 kfree(dev->buf);
2281 kfree(dev);
2279 return status; 2282 return status;
2280 } 2283 }
2281 /* may find bulk or ISO pipes */ 2284 /* may find bulk or ISO pipes */
diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
index 897edda42270..70201462e19c 100644
--- a/drivers/usb/misc/yurex.c
+++ b/drivers/usb/misc/yurex.c
@@ -99,9 +99,7 @@ static void yurex_delete(struct kref *kref)
99 usb_put_dev(dev->udev); 99 usb_put_dev(dev->udev);
100 if (dev->cntl_urb) { 100 if (dev->cntl_urb) {
101 usb_kill_urb(dev->cntl_urb); 101 usb_kill_urb(dev->cntl_urb);
102 if (dev->cntl_req) 102 kfree(dev->cntl_req);
103 usb_free_coherent(dev->udev, YUREX_BUF_SIZE,
104 dev->cntl_req, dev->cntl_urb->setup_dma);
105 if (dev->cntl_buffer) 103 if (dev->cntl_buffer)
106 usb_free_coherent(dev->udev, YUREX_BUF_SIZE, 104 usb_free_coherent(dev->udev, YUREX_BUF_SIZE,
107 dev->cntl_buffer, dev->cntl_urb->transfer_dma); 105 dev->cntl_buffer, dev->cntl_urb->transfer_dma);
@@ -234,9 +232,7 @@ static int yurex_probe(struct usb_interface *interface, const struct usb_device_
234 } 232 }
235 233
236 /* allocate buffer for control req */ 234 /* allocate buffer for control req */
237 dev->cntl_req = usb_alloc_coherent(dev->udev, YUREX_BUF_SIZE, 235 dev->cntl_req = kmalloc(YUREX_BUF_SIZE, GFP_KERNEL);
238 GFP_KERNEL,
239 &dev->cntl_urb->setup_dma);
240 if (!dev->cntl_req) { 236 if (!dev->cntl_req) {
241 err("Could not allocate cntl_req"); 237 err("Could not allocate cntl_req");
242 goto error; 238 goto error;
@@ -286,7 +282,7 @@ static int yurex_probe(struct usb_interface *interface, const struct usb_device_
286 usb_rcvintpipe(dev->udev, dev->int_in_endpointAddr), 282 usb_rcvintpipe(dev->udev, dev->int_in_endpointAddr),
287 dev->int_buffer, YUREX_BUF_SIZE, yurex_interrupt, 283 dev->int_buffer, YUREX_BUF_SIZE, yurex_interrupt,
288 dev, 1); 284 dev, 1);
289 dev->cntl_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; 285 dev->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
290 if (usb_submit_urb(dev->urb, GFP_KERNEL)) { 286 if (usb_submit_urb(dev->urb, GFP_KERNEL)) {
291 retval = -EIO; 287 retval = -EIO;
292 err("Could not submitting URB"); 288 err("Could not submitting URB");
diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c
index 97ab975fa442..768b4b55c816 100644
--- a/drivers/usb/musb/davinci.c
+++ b/drivers/usb/musb/davinci.c
@@ -386,7 +386,7 @@ static int davinci_musb_init(struct musb *musb)
386 usb_nop_xceiv_register(); 386 usb_nop_xceiv_register();
387 musb->xceiv = usb_get_transceiver(); 387 musb->xceiv = usb_get_transceiver();
388 if (!musb->xceiv) 388 if (!musb->xceiv)
389 return -ENODEV; 389 goto unregister;
390 390
391 musb->mregs += DAVINCI_BASE_OFFSET; 391 musb->mregs += DAVINCI_BASE_OFFSET;
392 392
@@ -444,6 +444,7 @@ static int davinci_musb_init(struct musb *musb)
444 444
445fail: 445fail:
446 usb_put_transceiver(musb->xceiv); 446 usb_put_transceiver(musb->xceiv);
447unregister:
447 usb_nop_xceiv_unregister(); 448 usb_nop_xceiv_unregister();
448 return -ENODEV; 449 return -ENODEV;
449} 450}
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 0f8b82918a40..66aaccf04490 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -137,6 +137,9 @@ static int musb_ulpi_read(struct usb_phy *phy, u32 offset)
137 int i = 0; 137 int i = 0;
138 u8 r; 138 u8 r;
139 u8 power; 139 u8 power;
140 int ret;
141
142 pm_runtime_get_sync(phy->io_dev);
140 143
141 /* Make sure the transceiver is not in low power mode */ 144 /* Make sure the transceiver is not in low power mode */
142 power = musb_readb(addr, MUSB_POWER); 145 power = musb_readb(addr, MUSB_POWER);
@@ -154,15 +157,22 @@ static int musb_ulpi_read(struct usb_phy *phy, u32 offset)
154 while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL) 157 while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL)
155 & MUSB_ULPI_REG_CMPLT)) { 158 & MUSB_ULPI_REG_CMPLT)) {
156 i++; 159 i++;
157 if (i == 10000) 160 if (i == 10000) {
158 return -ETIMEDOUT; 161 ret = -ETIMEDOUT;
162 goto out;
163 }
159 164
160 } 165 }
161 r = musb_readb(addr, MUSB_ULPI_REG_CONTROL); 166 r = musb_readb(addr, MUSB_ULPI_REG_CONTROL);
162 r &= ~MUSB_ULPI_REG_CMPLT; 167 r &= ~MUSB_ULPI_REG_CMPLT;
163 musb_writeb(addr, MUSB_ULPI_REG_CONTROL, r); 168 musb_writeb(addr, MUSB_ULPI_REG_CONTROL, r);
164 169
165 return musb_readb(addr, MUSB_ULPI_REG_DATA); 170 ret = musb_readb(addr, MUSB_ULPI_REG_DATA);
171
172out:
173 pm_runtime_put(phy->io_dev);
174
175 return ret;
166} 176}
167 177
168static int musb_ulpi_write(struct usb_phy *phy, u32 offset, u32 data) 178static int musb_ulpi_write(struct usb_phy *phy, u32 offset, u32 data)
@@ -171,6 +181,9 @@ static int musb_ulpi_write(struct usb_phy *phy, u32 offset, u32 data)
171 int i = 0; 181 int i = 0;
172 u8 r = 0; 182 u8 r = 0;
173 u8 power; 183 u8 power;
184 int ret = 0;
185
186 pm_runtime_get_sync(phy->io_dev);
174 187
175 /* Make sure the transceiver is not in low power mode */ 188 /* Make sure the transceiver is not in low power mode */
176 power = musb_readb(addr, MUSB_POWER); 189 power = musb_readb(addr, MUSB_POWER);
@@ -184,15 +197,20 @@ static int musb_ulpi_write(struct usb_phy *phy, u32 offset, u32 data)
184 while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL) 197 while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL)
185 & MUSB_ULPI_REG_CMPLT)) { 198 & MUSB_ULPI_REG_CMPLT)) {
186 i++; 199 i++;
187 if (i == 10000) 200 if (i == 10000) {
188 return -ETIMEDOUT; 201 ret = -ETIMEDOUT;
202 goto out;
203 }
189 } 204 }
190 205
191 r = musb_readb(addr, MUSB_ULPI_REG_CONTROL); 206 r = musb_readb(addr, MUSB_ULPI_REG_CONTROL);
192 r &= ~MUSB_ULPI_REG_CMPLT; 207 r &= ~MUSB_ULPI_REG_CMPLT;
193 musb_writeb(addr, MUSB_ULPI_REG_CONTROL, r); 208 musb_writeb(addr, MUSB_ULPI_REG_CONTROL, r);
194 209
195 return 0; 210out:
211 pm_runtime_put(phy->io_dev);
212
213 return ret;
196} 214}
197#else 215#else
198#define musb_ulpi_read NULL 216#define musb_ulpi_read NULL
@@ -1904,14 +1922,17 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
1904 1922
1905 if (!musb->isr) { 1923 if (!musb->isr) {
1906 status = -ENODEV; 1924 status = -ENODEV;
1907 goto fail3; 1925 goto fail2;
1908 } 1926 }
1909 1927
1910 if (!musb->xceiv->io_ops) { 1928 if (!musb->xceiv->io_ops) {
1929 musb->xceiv->io_dev = musb->controller;
1911 musb->xceiv->io_priv = musb->mregs; 1930 musb->xceiv->io_priv = musb->mregs;
1912 musb->xceiv->io_ops = &musb_ulpi_access; 1931 musb->xceiv->io_ops = &musb_ulpi_access;
1913 } 1932 }
1914 1933
1934 pm_runtime_get_sync(musb->controller);
1935
1915#ifndef CONFIG_MUSB_PIO_ONLY 1936#ifndef CONFIG_MUSB_PIO_ONLY
1916 if (use_dma && dev->dma_mask) { 1937 if (use_dma && dev->dma_mask) {
1917 struct dma_controller *c; 1938 struct dma_controller *c;
@@ -2023,6 +2044,8 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
2023 goto fail5; 2044 goto fail5;
2024#endif 2045#endif
2025 2046
2047 pm_runtime_put(musb->controller);
2048
2026 dev_info(dev, "USB %s mode controller at %p using %s, IRQ %d\n", 2049 dev_info(dev, "USB %s mode controller at %p using %s, IRQ %d\n",
2027 ({char *s; 2050 ({char *s;
2028 switch (musb->board_mode) { 2051 switch (musb->board_mode) {
@@ -2047,6 +2070,9 @@ fail4:
2047 musb_gadget_cleanup(musb); 2070 musb_gadget_cleanup(musb);
2048 2071
2049fail3: 2072fail3:
2073 pm_runtime_put_sync(musb->controller);
2074
2075fail2:
2050 if (musb->irq_wake) 2076 if (musb->irq_wake)
2051 device_init_wakeup(dev, 0); 2077 device_init_wakeup(dev, 0);
2052 musb_platform_exit(musb); 2078 musb_platform_exit(musb);
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index 93de517a32a0..f4a40f001c88 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -449,7 +449,7 @@ struct musb {
449 * We added this flag to forcefully disable double 449 * We added this flag to forcefully disable double
450 * buffering until we get it working. 450 * buffering until we get it working.
451 */ 451 */
452 unsigned double_buffer_not_ok:1 __deprecated; 452 unsigned double_buffer_not_ok:1;
453 453
454 struct musb_hdrc_config *config; 454 struct musb_hdrc_config *config;
455 455
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 79cb0af779fa..ef8d744800ac 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -2098,7 +2098,7 @@ static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh)
2098 } 2098 }
2099 2099
2100 /* turn off DMA requests, discard state, stop polling ... */ 2100 /* turn off DMA requests, discard state, stop polling ... */
2101 if (is_in) { 2101 if (ep->epnum && is_in) {
2102 /* giveback saves bulk toggle */ 2102 /* giveback saves bulk toggle */
2103 csr = musb_h_flush_rxfifo(ep, 0); 2103 csr = musb_h_flush_rxfifo(ep, 0);
2104 2104
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index 2ae0bb309994..c7785e81254c 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -282,7 +282,8 @@ static void musb_otg_notifier_work(struct work_struct *data_notifier_work)
282 282
283static int omap2430_musb_init(struct musb *musb) 283static int omap2430_musb_init(struct musb *musb)
284{ 284{
285 u32 l, status = 0; 285 u32 l;
286 int status = 0;
286 struct device *dev = musb->controller; 287 struct device *dev = musb->controller;
287 struct musb_hdrc_platform_data *plat = dev->platform_data; 288 struct musb_hdrc_platform_data *plat = dev->platform_data;
288 struct omap_musb_board_data *data = plat->board_data; 289 struct omap_musb_board_data *data = plat->board_data;
@@ -301,7 +302,7 @@ static int omap2430_musb_init(struct musb *musb)
301 302
302 status = pm_runtime_get_sync(dev); 303 status = pm_runtime_get_sync(dev);
303 if (status < 0) { 304 if (status < 0) {
304 dev_err(dev, "pm_runtime_get_sync FAILED"); 305 dev_err(dev, "pm_runtime_get_sync FAILED %d\n", status);
305 goto err1; 306 goto err1;
306 } 307 }
307 308
@@ -333,6 +334,7 @@ static int omap2430_musb_init(struct musb *musb)
333 334
334 setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb); 335 setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb);
335 336
337 pm_runtime_put_noidle(musb->controller);
336 return 0; 338 return 0;
337 339
338err1: 340err1:
@@ -452,14 +454,14 @@ static int __devinit omap2430_probe(struct platform_device *pdev)
452 goto err2; 454 goto err2;
453 } 455 }
454 456
457 pm_runtime_enable(&pdev->dev);
458
455 ret = platform_device_add(musb); 459 ret = platform_device_add(musb);
456 if (ret) { 460 if (ret) {
457 dev_err(&pdev->dev, "failed to register musb device\n"); 461 dev_err(&pdev->dev, "failed to register musb device\n");
458 goto err2; 462 goto err2;
459 } 463 }
460 464
461 pm_runtime_enable(&pdev->dev);
462
463 return 0; 465 return 0;
464 466
465err2: 467err2:
@@ -478,7 +480,6 @@ static int __devexit omap2430_remove(struct platform_device *pdev)
478 480
479 platform_device_del(glue->musb); 481 platform_device_del(glue->musb);
480 platform_device_put(glue->musb); 482 platform_device_put(glue->musb);
481 pm_runtime_put(&pdev->dev);
482 kfree(glue); 483 kfree(glue);
483 484
484 return 0; 485 return 0;
@@ -491,11 +492,13 @@ static int omap2430_runtime_suspend(struct device *dev)
491 struct omap2430_glue *glue = dev_get_drvdata(dev); 492 struct omap2430_glue *glue = dev_get_drvdata(dev);
492 struct musb *musb = glue_to_musb(glue); 493 struct musb *musb = glue_to_musb(glue);
493 494
494 musb->context.otg_interfsel = musb_readl(musb->mregs, 495 if (musb) {
495 OTG_INTERFSEL); 496 musb->context.otg_interfsel = musb_readl(musb->mregs,
497 OTG_INTERFSEL);
496 498
497 omap2430_low_level_exit(musb); 499 omap2430_low_level_exit(musb);
498 usb_phy_set_suspend(musb->xceiv, 1); 500 usb_phy_set_suspend(musb->xceiv, 1);
501 }
499 502
500 return 0; 503 return 0;
501} 504}
@@ -505,11 +508,13 @@ static int omap2430_runtime_resume(struct device *dev)
505 struct omap2430_glue *glue = dev_get_drvdata(dev); 508 struct omap2430_glue *glue = dev_get_drvdata(dev);
506 struct musb *musb = glue_to_musb(glue); 509 struct musb *musb = glue_to_musb(glue);
507 510
508 omap2430_low_level_init(musb); 511 if (musb) {
509 musb_writel(musb->mregs, OTG_INTERFSEL, 512 omap2430_low_level_init(musb);
510 musb->context.otg_interfsel); 513 musb_writel(musb->mregs, OTG_INTERFSEL,
514 musb->context.otg_interfsel);
511 515
512 usb_phy_set_suspend(musb->xceiv, 0); 516 usb_phy_set_suspend(musb->xceiv, 0);
517 }
513 518
514 return 0; 519 return 0;
515} 520}
diff --git a/drivers/usb/otg/gpio_vbus.c b/drivers/usb/otg/gpio_vbus.c
index 3ece43a2e4c1..a0a2178974fe 100644
--- a/drivers/usb/otg/gpio_vbus.c
+++ b/drivers/usb/otg/gpio_vbus.c
@@ -96,7 +96,7 @@ static void gpio_vbus_work(struct work_struct *work)
96 struct gpio_vbus_data *gpio_vbus = 96 struct gpio_vbus_data *gpio_vbus =
97 container_of(work, struct gpio_vbus_data, work); 97 container_of(work, struct gpio_vbus_data, work);
98 struct gpio_vbus_mach_info *pdata = gpio_vbus->dev->platform_data; 98 struct gpio_vbus_mach_info *pdata = gpio_vbus->dev->platform_data;
99 int gpio; 99 int gpio, status;
100 100
101 if (!gpio_vbus->phy.otg->gadget) 101 if (!gpio_vbus->phy.otg->gadget)
102 return; 102 return;
@@ -108,7 +108,9 @@ static void gpio_vbus_work(struct work_struct *work)
108 */ 108 */
109 gpio = pdata->gpio_pullup; 109 gpio = pdata->gpio_pullup;
110 if (is_vbus_powered(pdata)) { 110 if (is_vbus_powered(pdata)) {
111 status = USB_EVENT_VBUS;
111 gpio_vbus->phy.state = OTG_STATE_B_PERIPHERAL; 112 gpio_vbus->phy.state = OTG_STATE_B_PERIPHERAL;
113 gpio_vbus->phy.last_event = status;
112 usb_gadget_vbus_connect(gpio_vbus->phy.otg->gadget); 114 usb_gadget_vbus_connect(gpio_vbus->phy.otg->gadget);
113 115
114 /* drawing a "unit load" is *always* OK, except for OTG */ 116 /* drawing a "unit load" is *always* OK, except for OTG */
@@ -117,6 +119,9 @@ static void gpio_vbus_work(struct work_struct *work)
117 /* optionally enable D+ pullup */ 119 /* optionally enable D+ pullup */
118 if (gpio_is_valid(gpio)) 120 if (gpio_is_valid(gpio))
119 gpio_set_value(gpio, !pdata->gpio_pullup_inverted); 121 gpio_set_value(gpio, !pdata->gpio_pullup_inverted);
122
123 atomic_notifier_call_chain(&gpio_vbus->phy.notifier,
124 status, gpio_vbus->phy.otg->gadget);
120 } else { 125 } else {
121 /* optionally disable D+ pullup */ 126 /* optionally disable D+ pullup */
122 if (gpio_is_valid(gpio)) 127 if (gpio_is_valid(gpio))
@@ -125,7 +130,12 @@ static void gpio_vbus_work(struct work_struct *work)
125 set_vbus_draw(gpio_vbus, 0); 130 set_vbus_draw(gpio_vbus, 0);
126 131
127 usb_gadget_vbus_disconnect(gpio_vbus->phy.otg->gadget); 132 usb_gadget_vbus_disconnect(gpio_vbus->phy.otg->gadget);
133 status = USB_EVENT_NONE;
128 gpio_vbus->phy.state = OTG_STATE_B_IDLE; 134 gpio_vbus->phy.state = OTG_STATE_B_IDLE;
135 gpio_vbus->phy.last_event = status;
136
137 atomic_notifier_call_chain(&gpio_vbus->phy.notifier,
138 status, gpio_vbus->phy.otg->gadget);
129 } 139 }
130} 140}
131 141
@@ -287,6 +297,9 @@ static int __init gpio_vbus_probe(struct platform_device *pdev)
287 irq, err); 297 irq, err);
288 goto err_irq; 298 goto err_irq;
289 } 299 }
300
301 ATOMIC_INIT_NOTIFIER_HEAD(&gpio_vbus->phy.notifier);
302
290 INIT_WORK(&gpio_vbus->work, gpio_vbus_work); 303 INIT_WORK(&gpio_vbus->work, gpio_vbus_work);
291 304
292 gpio_vbus->vbus_draw = regulator_get(&pdev->dev, "vbus_draw"); 305 gpio_vbus->vbus_draw = regulator_get(&pdev->dev, "vbus_draw");
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 0310e2df59f5..ec30f95ef399 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -287,7 +287,8 @@ static int cp210x_get_config(struct usb_serial_port *port, u8 request,
287 /* Issue the request, attempting to read 'size' bytes */ 287 /* Issue the request, attempting to read 'size' bytes */
288 result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), 288 result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
289 request, REQTYPE_DEVICE_TO_HOST, 0x0000, 289 request, REQTYPE_DEVICE_TO_HOST, 0x0000,
290 port_priv->bInterfaceNumber, buf, size, 300); 290 port_priv->bInterfaceNumber, buf, size,
291 USB_CTRL_GET_TIMEOUT);
291 292
292 /* Convert data into an array of integers */ 293 /* Convert data into an array of integers */
293 for (i = 0; i < length; i++) 294 for (i = 0; i < length; i++)
@@ -340,12 +341,14 @@ static int cp210x_set_config(struct usb_serial_port *port, u8 request,
340 result = usb_control_msg(serial->dev, 341 result = usb_control_msg(serial->dev,
341 usb_sndctrlpipe(serial->dev, 0), 342 usb_sndctrlpipe(serial->dev, 0),
342 request, REQTYPE_HOST_TO_DEVICE, 0x0000, 343 request, REQTYPE_HOST_TO_DEVICE, 0x0000,
343 port_priv->bInterfaceNumber, buf, size, 300); 344 port_priv->bInterfaceNumber, buf, size,
345 USB_CTRL_SET_TIMEOUT);
344 } else { 346 } else {
345 result = usb_control_msg(serial->dev, 347 result = usb_control_msg(serial->dev,
346 usb_sndctrlpipe(serial->dev, 0), 348 usb_sndctrlpipe(serial->dev, 0),
347 request, REQTYPE_HOST_TO_DEVICE, data[0], 349 request, REQTYPE_HOST_TO_DEVICE, data[0],
348 port_priv->bInterfaceNumber, NULL, 0, 300); 350 port_priv->bInterfaceNumber, NULL, 0,
351 USB_CTRL_SET_TIMEOUT);
349 } 352 }
350 353
351 kfree(buf); 354 kfree(buf);
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index fdd5aa2c8d82..8c8bf806f6fa 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -221,7 +221,7 @@ static const struct sierra_iface_info typeB_interface_list = {
221}; 221};
222 222
223/* 'blacklist' of interfaces not served by this driver */ 223/* 'blacklist' of interfaces not served by this driver */
224static const u8 direct_ip_non_serial_ifaces[] = { 7, 8, 9, 10, 11 }; 224static const u8 direct_ip_non_serial_ifaces[] = { 7, 8, 9, 10, 11, 19, 20 };
225static const struct sierra_iface_info direct_ip_interface_blacklist = { 225static const struct sierra_iface_info direct_ip_interface_blacklist = {
226 .infolen = ARRAY_SIZE(direct_ip_non_serial_ifaces), 226 .infolen = ARRAY_SIZE(direct_ip_non_serial_ifaces),
227 .ifaceinfo = direct_ip_non_serial_ifaces, 227 .ifaceinfo = direct_ip_non_serial_ifaces,
@@ -289,7 +289,6 @@ static const struct usb_device_id id_table[] = {
289 { USB_DEVICE(0x1199, 0x6856) }, /* Sierra Wireless AirCard 881 U */ 289 { USB_DEVICE(0x1199, 0x6856) }, /* Sierra Wireless AirCard 881 U */
290 { USB_DEVICE(0x1199, 0x6859) }, /* Sierra Wireless AirCard 885 E */ 290 { USB_DEVICE(0x1199, 0x6859) }, /* Sierra Wireless AirCard 885 E */
291 { USB_DEVICE(0x1199, 0x685A) }, /* Sierra Wireless AirCard 885 E */ 291 { USB_DEVICE(0x1199, 0x685A) }, /* Sierra Wireless AirCard 885 E */
292 { USB_DEVICE(0x1199, 0x68A2) }, /* Sierra Wireless MC7710 */
293 /* Sierra Wireless C885 */ 292 /* Sierra Wireless C885 */
294 { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6880, 0xFF, 0xFF, 0xFF)}, 293 { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6880, 0xFF, 0xFF, 0xFF)},
295 /* Sierra Wireless C888, Air Card 501, USB 303, USB 304 */ 294 /* Sierra Wireless C888, Air Card 501, USB 303, USB 304 */
@@ -299,6 +298,9 @@ static const struct usb_device_id id_table[] = {
299 /* Sierra Wireless HSPA Non-Composite Device */ 298 /* Sierra Wireless HSPA Non-Composite Device */
300 { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6892, 0xFF, 0xFF, 0xFF)}, 299 { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6892, 0xFF, 0xFF, 0xFF)},
301 { USB_DEVICE(0x1199, 0x6893) }, /* Sierra Wireless Device */ 300 { USB_DEVICE(0x1199, 0x6893) }, /* Sierra Wireless Device */
301 { USB_DEVICE(0x1199, 0x68A2), /* Sierra Wireless MC77xx in QMI mode */
302 .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
303 },
302 { USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */ 304 { USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */
303 .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist 305 .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
304 }, 306 },
diff --git a/drivers/uwb/hwa-rc.c b/drivers/uwb/hwa-rc.c
index 66797e9c5010..810c90ae2c55 100644
--- a/drivers/uwb/hwa-rc.c
+++ b/drivers/uwb/hwa-rc.c
@@ -645,7 +645,8 @@ void hwarc_neep_cb(struct urb *urb)
645 dev_err(dev, "NEEP: URB error %d\n", urb->status); 645 dev_err(dev, "NEEP: URB error %d\n", urb->status);
646 } 646 }
647 result = usb_submit_urb(urb, GFP_ATOMIC); 647 result = usb_submit_urb(urb, GFP_ATOMIC);
648 if (result < 0) { 648 if (result < 0 && result != -ENODEV && result != -EPERM) {
649 /* ignoring unrecoverable errors */
649 dev_err(dev, "NEEP: Can't resubmit URB (%d) resetting device\n", 650 dev_err(dev, "NEEP: Can't resubmit URB (%d) resetting device\n",
650 result); 651 result);
651 goto error; 652 goto error;
diff --git a/drivers/uwb/neh.c b/drivers/uwb/neh.c
index a269937be1b8..8cb71bb333c2 100644
--- a/drivers/uwb/neh.c
+++ b/drivers/uwb/neh.c
@@ -107,6 +107,7 @@ struct uwb_rc_neh {
107 u8 evt_type; 107 u8 evt_type;
108 __le16 evt; 108 __le16 evt;
109 u8 context; 109 u8 context;
110 u8 completed;
110 uwb_rc_cmd_cb_f cb; 111 uwb_rc_cmd_cb_f cb;
111 void *arg; 112 void *arg;
112 113
@@ -409,6 +410,7 @@ static void uwb_rc_neh_grok_event(struct uwb_rc *rc, struct uwb_rceb *rceb, size
409 struct device *dev = &rc->uwb_dev.dev; 410 struct device *dev = &rc->uwb_dev.dev;
410 struct uwb_rc_neh *neh; 411 struct uwb_rc_neh *neh;
411 struct uwb_rceb *notif; 412 struct uwb_rceb *notif;
413 unsigned long flags;
412 414
413 if (rceb->bEventContext == 0) { 415 if (rceb->bEventContext == 0) {
414 notif = kmalloc(size, GFP_ATOMIC); 416 notif = kmalloc(size, GFP_ATOMIC);
@@ -422,7 +424,11 @@ static void uwb_rc_neh_grok_event(struct uwb_rc *rc, struct uwb_rceb *rceb, size
422 } else { 424 } else {
423 neh = uwb_rc_neh_lookup(rc, rceb); 425 neh = uwb_rc_neh_lookup(rc, rceb);
424 if (neh) { 426 if (neh) {
425 del_timer_sync(&neh->timer); 427 spin_lock_irqsave(&rc->neh_lock, flags);
428 /* to guard against a timeout */
429 neh->completed = 1;
430 del_timer(&neh->timer);
431 spin_unlock_irqrestore(&rc->neh_lock, flags);
426 uwb_rc_neh_cb(neh, rceb, size); 432 uwb_rc_neh_cb(neh, rceb, size);
427 } else 433 } else
428 dev_warn(dev, "event 0x%02x/%04x/%02x (%zu bytes): nobody cared\n", 434 dev_warn(dev, "event 0x%02x/%04x/%02x (%zu bytes): nobody cared\n",
@@ -568,6 +574,10 @@ static void uwb_rc_neh_timer(unsigned long arg)
568 unsigned long flags; 574 unsigned long flags;
569 575
570 spin_lock_irqsave(&rc->neh_lock, flags); 576 spin_lock_irqsave(&rc->neh_lock, flags);
577 if (neh->completed) {
578 spin_unlock_irqrestore(&rc->neh_lock, flags);
579 return;
580 }
571 if (neh->context) 581 if (neh->context)
572 __uwb_rc_neh_rm(rc, neh); 582 __uwb_rc_neh_rm(rc, neh);
573 else 583 else
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index f0da2c32fbde..5c170100de9c 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -24,6 +24,7 @@
24#include <linux/if_arp.h> 24#include <linux/if_arp.h>
25#include <linux/if_tun.h> 25#include <linux/if_tun.h>
26#include <linux/if_macvlan.h> 26#include <linux/if_macvlan.h>
27#include <linux/if_vlan.h>
27 28
28#include <net/sock.h> 29#include <net/sock.h>
29 30
@@ -238,7 +239,7 @@ static void handle_tx(struct vhost_net *net)
238 239
239 vq->heads[vq->upend_idx].len = len; 240 vq->heads[vq->upend_idx].len = len;
240 ubuf->callback = vhost_zerocopy_callback; 241 ubuf->callback = vhost_zerocopy_callback;
241 ubuf->arg = vq->ubufs; 242 ubuf->ctx = vq->ubufs;
242 ubuf->desc = vq->upend_idx; 243 ubuf->desc = vq->upend_idx;
243 msg.msg_control = ubuf; 244 msg.msg_control = ubuf;
244 msg.msg_controllen = sizeof(ubuf); 245 msg.msg_controllen = sizeof(ubuf);
@@ -283,8 +284,12 @@ static int peek_head_len(struct sock *sk)
283 284
284 spin_lock_irqsave(&sk->sk_receive_queue.lock, flags); 285 spin_lock_irqsave(&sk->sk_receive_queue.lock, flags);
285 head = skb_peek(&sk->sk_receive_queue); 286 head = skb_peek(&sk->sk_receive_queue);
286 if (likely(head)) 287 if (likely(head)) {
287 len = head->len; 288 len = head->len;
289 if (vlan_tx_tag_present(head))
290 len += VLAN_HLEN;
291 }
292
288 spin_unlock_irqrestore(&sk->sk_receive_queue.lock, flags); 293 spin_unlock_irqrestore(&sk->sk_receive_queue.lock, flags);
289 return len; 294 return len;
290} 295}
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 947f00d8e091..51e4c1eeec4f 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -1598,10 +1598,9 @@ void vhost_ubuf_put_and_wait(struct vhost_ubuf_ref *ubufs)
1598 kfree(ubufs); 1598 kfree(ubufs);
1599} 1599}
1600 1600
1601void vhost_zerocopy_callback(void *arg) 1601void vhost_zerocopy_callback(struct ubuf_info *ubuf)
1602{ 1602{
1603 struct ubuf_info *ubuf = arg; 1603 struct vhost_ubuf_ref *ubufs = ubuf->ctx;
1604 struct vhost_ubuf_ref *ubufs = ubuf->arg;
1605 struct vhost_virtqueue *vq = ubufs->vq; 1604 struct vhost_virtqueue *vq = ubufs->vq;
1606 1605
1607 /* set len = 1 to mark this desc buffers done DMA */ 1606 /* set len = 1 to mark this desc buffers done DMA */
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 8dcf4cca6bf2..8de1fd5b8efb 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -188,7 +188,7 @@ bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
188 188
189int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log, 189int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
190 unsigned int log_num, u64 len); 190 unsigned int log_num, u64 len);
191void vhost_zerocopy_callback(void *arg); 191void vhost_zerocopy_callback(struct ubuf_info *);
192int vhost_zerocopy_signal_used(struct vhost_virtqueue *vq); 192int vhost_zerocopy_signal_used(struct vhost_virtqueue *vq);
193 193
194#define vq_err(vq, fmt, ...) do { \ 194#define vq_err(vq, fmt, ...) do { \
diff --git a/drivers/video/bfin-lq035q1-fb.c b/drivers/video/bfin-lq035q1-fb.c
index 86922ac84412..353c02fe8a95 100644
--- a/drivers/video/bfin-lq035q1-fb.c
+++ b/drivers/video/bfin-lq035q1-fb.c
@@ -13,6 +13,7 @@
13#include <linux/errno.h> 13#include <linux/errno.h>
14#include <linux/string.h> 14#include <linux/string.h>
15#include <linux/fb.h> 15#include <linux/fb.h>
16#include <linux/gpio.h>
16#include <linux/slab.h> 17#include <linux/slab.h>
17#include <linux/init.h> 18#include <linux/init.h>
18#include <linux/types.h> 19#include <linux/types.h>
diff --git a/drivers/video/console/sticore.c b/drivers/video/console/sticore.c
index 6468a297e341..39571f9e0162 100644
--- a/drivers/video/console/sticore.c
+++ b/drivers/video/console/sticore.c
@@ -22,7 +22,9 @@
22#include <linux/font.h> 22#include <linux/font.h>
23 23
24#include <asm/hardware.h> 24#include <asm/hardware.h>
25#include <asm/page.h>
25#include <asm/parisc-device.h> 26#include <asm/parisc-device.h>
27#include <asm/pdc.h>
26#include <asm/cacheflush.h> 28#include <asm/cacheflush.h>
27#include <asm/grfioctl.h> 29#include <asm/grfioctl.h>
28 30
diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
index 26e83d7fdd6f..b0e2a4261afe 100644
--- a/drivers/video/uvesafb.c
+++ b/drivers/video/uvesafb.c
@@ -73,7 +73,7 @@ static void uvesafb_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *ns
73 struct uvesafb_task *utask; 73 struct uvesafb_task *utask;
74 struct uvesafb_ktask *task; 74 struct uvesafb_ktask *task;
75 75
76 if (!cap_raised(current_cap(), CAP_SYS_ADMIN)) 76 if (!capable(CAP_SYS_ADMIN))
77 return; 77 return;
78 78
79 if (msg->seq >= UVESAFB_TASKS_MAX) 79 if (msg->seq >= UVESAFB_TASKS_MAX)
diff --git a/drivers/video/xen-fbfront.c b/drivers/video/xen-fbfront.c
index cb4529c40d74..b7f5173ff9e9 100644
--- a/drivers/video/xen-fbfront.c
+++ b/drivers/video/xen-fbfront.c
@@ -365,7 +365,7 @@ static int __devinit xenfb_probe(struct xenbus_device *dev,
365 struct fb_info *fb_info; 365 struct fb_info *fb_info;
366 int fb_size; 366 int fb_size;
367 int val; 367 int val;
368 int ret; 368 int ret = 0;
369 369
370 info = kzalloc(sizeof(*info), GFP_KERNEL); 370 info = kzalloc(sizeof(*info), GFP_KERNEL);
371 if (info == NULL) { 371 if (info == NULL) {
@@ -458,26 +458,31 @@ static int __devinit xenfb_probe(struct xenbus_device *dev,
458 xenfb_init_shared_page(info, fb_info); 458 xenfb_init_shared_page(info, fb_info);
459 459
460 ret = xenfb_connect_backend(dev, info); 460 ret = xenfb_connect_backend(dev, info);
461 if (ret < 0) 461 if (ret < 0) {
462 goto error; 462 xenbus_dev_fatal(dev, ret, "xenfb_connect_backend");
463 goto error_fb;
464 }
463 465
464 ret = register_framebuffer(fb_info); 466 ret = register_framebuffer(fb_info);
465 if (ret) { 467 if (ret) {
466 fb_deferred_io_cleanup(fb_info);
467 fb_dealloc_cmap(&fb_info->cmap);
468 framebuffer_release(fb_info);
469 xenbus_dev_fatal(dev, ret, "register_framebuffer"); 468 xenbus_dev_fatal(dev, ret, "register_framebuffer");
470 goto error; 469 goto error_fb;
471 } 470 }
472 info->fb_info = fb_info; 471 info->fb_info = fb_info;
473 472
474 xenfb_make_preferred_console(); 473 xenfb_make_preferred_console();
475 return 0; 474 return 0;
476 475
477 error_nomem: 476error_fb:
478 ret = -ENOMEM; 477 fb_deferred_io_cleanup(fb_info);
479 xenbus_dev_fatal(dev, ret, "allocating device memory"); 478 fb_dealloc_cmap(&fb_info->cmap);
480 error: 479 framebuffer_release(fb_info);
480error_nomem:
481 if (!ret) {
482 ret = -ENOMEM;
483 xenbus_dev_fatal(dev, ret, "allocating device memory");
484 }
485error:
481 xenfb_remove(dev); 486 xenfb_remove(dev);
482 return ret; 487 return ret;
483} 488}
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index cbc7ceef2786..9f13b897fd64 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -435,16 +435,16 @@ static void hpwdt_start(void)
435{ 435{
436 reload = SECS_TO_TICKS(soft_margin); 436 reload = SECS_TO_TICKS(soft_margin);
437 iowrite16(reload, hpwdt_timer_reg); 437 iowrite16(reload, hpwdt_timer_reg);
438 iowrite16(0x85, hpwdt_timer_con); 438 iowrite8(0x85, hpwdt_timer_con);
439} 439}
440 440
441static void hpwdt_stop(void) 441static void hpwdt_stop(void)
442{ 442{
443 unsigned long data; 443 unsigned long data;
444 444
445 data = ioread16(hpwdt_timer_con); 445 data = ioread8(hpwdt_timer_con);
446 data &= 0xFE; 446 data &= 0xFE;
447 iowrite16(data, hpwdt_timer_con); 447 iowrite8(data, hpwdt_timer_con);
448} 448}
449 449
450static void hpwdt_ping(void) 450static void hpwdt_ping(void)
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 94243136f6bf..ea20c51d24c7 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -183,15 +183,17 @@ config XEN_ACPI_PROCESSOR
183 depends on XEN && X86 && ACPI_PROCESSOR && CPU_FREQ 183 depends on XEN && X86 && ACPI_PROCESSOR && CPU_FREQ
184 default m 184 default m
185 help 185 help
186 This ACPI processor uploads Power Management information to the Xen hypervisor. 186 This ACPI processor uploads Power Management information to the Xen
187 187 hypervisor.
188 To do that the driver parses the Power Management data and uploads said 188
189 information to the Xen hypervisor. Then the Xen hypervisor can select the 189 To do that the driver parses the Power Management data and uploads
190 proper Cx and Pxx states. It also registers itslef as the SMM so that 190 said information to the Xen hypervisor. Then the Xen hypervisor can
191 other drivers (such as ACPI cpufreq scaling driver) will not load. 191 select the proper Cx and Pxx states. It also registers itslef as the
192 192 SMM so that other drivers (such as ACPI cpufreq scaling driver) will
193 To compile this driver as a module, choose M here: the 193 not load.
194 module will be called xen_acpi_processor If you do not know what to choose, 194
195 select M here. If the CPUFREQ drivers are built in, select Y here. 195 To compile this driver as a module, choose M here: the module will be
196 called xen_acpi_processor If you do not know what to choose, select
197 M here. If the CPUFREQ drivers are built in, select Y here.
196 198
197endmenu 199endmenu
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 4b33acd8ed4e..0a8a17cd80be 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -274,7 +274,7 @@ static unsigned int cpu_from_evtchn(unsigned int evtchn)
274 274
275static bool pirq_check_eoi_map(unsigned irq) 275static bool pirq_check_eoi_map(unsigned irq)
276{ 276{
277 return test_bit(irq, pirq_eoi_map); 277 return test_bit(pirq_from_irq(irq), pirq_eoi_map);
278} 278}
279 279
280static bool pirq_needs_eoi_flag(unsigned irq) 280static bool pirq_needs_eoi_flag(unsigned irq)
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 99d8151c824a..1ffd03bf8e10 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -722,7 +722,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
722 vma->vm_flags |= VM_RESERVED|VM_DONTEXPAND; 722 vma->vm_flags |= VM_RESERVED|VM_DONTEXPAND;
723 723
724 if (use_ptemod) 724 if (use_ptemod)
725 vma->vm_flags |= VM_DONTCOPY|VM_PFNMAP; 725 vma->vm_flags |= VM_DONTCOPY;
726 726
727 vma->vm_private_data = map; 727 vma->vm_private_data = map;
728 728
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index b4d4eac761db..f100ce20b16b 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -1029,6 +1029,7 @@ int gnttab_init(void)
1029 int i; 1029 int i;
1030 unsigned int max_nr_glist_frames, nr_glist_frames; 1030 unsigned int max_nr_glist_frames, nr_glist_frames;
1031 unsigned int nr_init_grefs; 1031 unsigned int nr_init_grefs;
1032 int ret;
1032 1033
1033 nr_grant_frames = 1; 1034 nr_grant_frames = 1;
1034 boot_max_nr_grant_frames = __max_nr_grant_frames(); 1035 boot_max_nr_grant_frames = __max_nr_grant_frames();
@@ -1047,12 +1048,16 @@ int gnttab_init(void)
1047 nr_glist_frames = (nr_grant_frames * GREFS_PER_GRANT_FRAME + RPP - 1) / RPP; 1048 nr_glist_frames = (nr_grant_frames * GREFS_PER_GRANT_FRAME + RPP - 1) / RPP;
1048 for (i = 0; i < nr_glist_frames; i++) { 1049 for (i = 0; i < nr_glist_frames; i++) {
1049 gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL); 1050 gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL);
1050 if (gnttab_list[i] == NULL) 1051 if (gnttab_list[i] == NULL) {
1052 ret = -ENOMEM;
1051 goto ini_nomem; 1053 goto ini_nomem;
1054 }
1052 } 1055 }
1053 1056
1054 if (gnttab_resume() < 0) 1057 if (gnttab_resume() < 0) {
1055 return -ENODEV; 1058 ret = -ENODEV;
1059 goto ini_nomem;
1060 }
1056 1061
1057 nr_init_grefs = nr_grant_frames * GREFS_PER_GRANT_FRAME; 1062 nr_init_grefs = nr_grant_frames * GREFS_PER_GRANT_FRAME;
1058 1063
@@ -1070,7 +1075,7 @@ int gnttab_init(void)
1070 for (i--; i >= 0; i--) 1075 for (i--; i >= 0; i--)
1071 free_page((unsigned long)gnttab_list[i]); 1076 free_page((unsigned long)gnttab_list[i]);
1072 kfree(gnttab_list); 1077 kfree(gnttab_list);
1073 return -ENOMEM; 1078 return ret;
1074} 1079}
1075EXPORT_SYMBOL_GPL(gnttab_init); 1080EXPORT_SYMBOL_GPL(gnttab_init);
1076 1081
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index 9e14ae6cd49c..412b96cc5305 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -132,6 +132,7 @@ static void do_suspend(void)
132 err = dpm_suspend_end(PMSG_FREEZE); 132 err = dpm_suspend_end(PMSG_FREEZE);
133 if (err) { 133 if (err) {
134 printk(KERN_ERR "dpm_suspend_end failed: %d\n", err); 134 printk(KERN_ERR "dpm_suspend_end failed: %d\n", err);
135 si.cancelled = 0;
135 goto out_resume; 136 goto out_resume;
136 } 137 }
137 138
diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
index 174b5653cd8a..0b48579a9cd6 100644
--- a/drivers/xen/xen-acpi-processor.c
+++ b/drivers/xen/xen-acpi-processor.c
@@ -128,7 +128,10 @@ static int push_cxx_to_hypervisor(struct acpi_processor *_pr)
128 pr_debug(" C%d: %s %d uS\n", 128 pr_debug(" C%d: %s %d uS\n",
129 cx->type, cx->desc, (u32)cx->latency); 129 cx->type, cx->desc, (u32)cx->latency);
130 } 130 }
131 } else 131 } else if (ret != -EINVAL)
132 /* EINVAL means the ACPI ID is incorrect - meaning the ACPI
133 * table is referencing a non-existing CPU - which can happen
134 * with broken ACPI tables. */
132 pr_err(DRV_NAME "(CX): Hypervisor error (%d) for ACPI CPU%u\n", 135 pr_err(DRV_NAME "(CX): Hypervisor error (%d) for ACPI CPU%u\n",
133 ret, _pr->acpi_id); 136 ret, _pr->acpi_id);
134 137
diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c
index f20c5f178b40..a31b54d48839 100644
--- a/drivers/xen/xenbus/xenbus_probe_frontend.c
+++ b/drivers/xen/xenbus/xenbus_probe_frontend.c
@@ -135,7 +135,7 @@ static int read_backend_details(struct xenbus_device *xendev)
135 return xenbus_read_otherend_details(xendev, "backend-id", "backend"); 135 return xenbus_read_otherend_details(xendev, "backend-id", "backend");
136} 136}
137 137
138static int is_device_connecting(struct device *dev, void *data) 138static int is_device_connecting(struct device *dev, void *data, bool ignore_nonessential)
139{ 139{
140 struct xenbus_device *xendev = to_xenbus_device(dev); 140 struct xenbus_device *xendev = to_xenbus_device(dev);
141 struct device_driver *drv = data; 141 struct device_driver *drv = data;
@@ -152,16 +152,41 @@ static int is_device_connecting(struct device *dev, void *data)
152 if (drv && (dev->driver != drv)) 152 if (drv && (dev->driver != drv))
153 return 0; 153 return 0;
154 154
155 if (ignore_nonessential) {
156 /* With older QEMU, for PVonHVM guests the guest config files
157 * could contain: vfb = [ 'vnc=1, vnclisten=0.0.0.0']
158 * which is nonsensical as there is no PV FB (there can be
159 * a PVKB) running as HVM guest. */
160
161 if ((strncmp(xendev->nodename, "device/vkbd", 11) == 0))
162 return 0;
163
164 if ((strncmp(xendev->nodename, "device/vfb", 10) == 0))
165 return 0;
166 }
155 xendrv = to_xenbus_driver(dev->driver); 167 xendrv = to_xenbus_driver(dev->driver);
156 return (xendev->state < XenbusStateConnected || 168 return (xendev->state < XenbusStateConnected ||
157 (xendev->state == XenbusStateConnected && 169 (xendev->state == XenbusStateConnected &&
158 xendrv->is_ready && !xendrv->is_ready(xendev))); 170 xendrv->is_ready && !xendrv->is_ready(xendev)));
159} 171}
172static int essential_device_connecting(struct device *dev, void *data)
173{
174 return is_device_connecting(dev, data, true /* ignore PV[KBB+FB] */);
175}
176static int non_essential_device_connecting(struct device *dev, void *data)
177{
178 return is_device_connecting(dev, data, false);
179}
160 180
161static int exists_connecting_device(struct device_driver *drv) 181static int exists_essential_connecting_device(struct device_driver *drv)
162{ 182{
163 return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, 183 return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
164 is_device_connecting); 184 essential_device_connecting);
185}
186static int exists_non_essential_connecting_device(struct device_driver *drv)
187{
188 return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
189 non_essential_device_connecting);
165} 190}
166 191
167static int print_device_status(struct device *dev, void *data) 192static int print_device_status(struct device *dev, void *data)
@@ -192,6 +217,23 @@ static int print_device_status(struct device *dev, void *data)
192/* We only wait for device setup after most initcalls have run. */ 217/* We only wait for device setup after most initcalls have run. */
193static int ready_to_wait_for_devices; 218static int ready_to_wait_for_devices;
194 219
220static bool wait_loop(unsigned long start, unsigned int max_delay,
221 unsigned int *seconds_waited)
222{
223 if (time_after(jiffies, start + (*seconds_waited+5)*HZ)) {
224 if (!*seconds_waited)
225 printk(KERN_WARNING "XENBUS: Waiting for "
226 "devices to initialise: ");
227 *seconds_waited += 5;
228 printk("%us...", max_delay - *seconds_waited);
229 if (*seconds_waited == max_delay)
230 return true;
231 }
232
233 schedule_timeout_interruptible(HZ/10);
234
235 return false;
236}
195/* 237/*
196 * On a 5-minute timeout, wait for all devices currently configured. We need 238 * On a 5-minute timeout, wait for all devices currently configured. We need
197 * to do this to guarantee that the filesystems and / or network devices 239 * to do this to guarantee that the filesystems and / or network devices
@@ -215,19 +257,14 @@ static void wait_for_devices(struct xenbus_driver *xendrv)
215 if (!ready_to_wait_for_devices || !xen_domain()) 257 if (!ready_to_wait_for_devices || !xen_domain())
216 return; 258 return;
217 259
218 while (exists_connecting_device(drv)) { 260 while (exists_non_essential_connecting_device(drv))
219 if (time_after(jiffies, start + (seconds_waited+5)*HZ)) { 261 if (wait_loop(start, 30, &seconds_waited))
220 if (!seconds_waited) 262 break;
221 printk(KERN_WARNING "XENBUS: Waiting for " 263
222 "devices to initialise: "); 264 /* Skips PVKB and PVFB check.*/
223 seconds_waited += 5; 265 while (exists_essential_connecting_device(drv))
224 printk("%us...", 300 - seconds_waited); 266 if (wait_loop(start, 270, &seconds_waited))
225 if (seconds_waited == 300) 267 break;
226 break;
227 }
228
229 schedule_timeout_interruptible(HZ/10);
230 }
231 268
232 if (seconds_waited) 269 if (seconds_waited)
233 printk("\n"); 270 printk("\n");
diff --git a/fs/aio.c b/fs/aio.c
index da887604dfc5..67a6db3e1b6f 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -93,9 +93,8 @@ static void aio_free_ring(struct kioctx *ctx)
93 put_page(info->ring_pages[i]); 93 put_page(info->ring_pages[i]);
94 94
95 if (info->mmap_size) { 95 if (info->mmap_size) {
96 down_write(&ctx->mm->mmap_sem); 96 BUG_ON(ctx->mm != current->mm);
97 do_munmap(ctx->mm, info->mmap_base, info->mmap_size); 97 vm_munmap(info->mmap_base, info->mmap_size);
98 up_write(&ctx->mm->mmap_sem);
99 } 98 }
100 99
101 if (info->ring_pages && info->ring_pages != info->internal_pages) 100 if (info->ring_pages && info->ring_pages != info->internal_pages)
@@ -389,6 +388,17 @@ void exit_aio(struct mm_struct *mm)
389 "exit_aio:ioctx still alive: %d %d %d\n", 388 "exit_aio:ioctx still alive: %d %d %d\n",
390 atomic_read(&ctx->users), ctx->dead, 389 atomic_read(&ctx->users), ctx->dead,
391 ctx->reqs_active); 390 ctx->reqs_active);
391 /*
392 * We don't need to bother with munmap() here -
393 * exit_mmap(mm) is coming and it'll unmap everything.
394 * Since aio_free_ring() uses non-zero ->mmap_size
395 * as indicator that it needs to unmap the area,
396 * just set it to 0; aio_free_ring() is the only
397 * place that uses ->mmap_size, so it's safe.
398 * That way we get all munmap done to current->mm -
399 * all other callers have ctx->mm == current->mm.
400 */
401 ctx->ring_info.mmap_size = 0;
392 put_ioctx(ctx); 402 put_ioctx(ctx);
393 } 403 }
394} 404}
diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h
index eb1cc92cd67d..908e18455413 100644
--- a/fs/autofs4/autofs_i.h
+++ b/fs/autofs4/autofs_i.h
@@ -110,7 +110,6 @@ struct autofs_sb_info {
110 int sub_version; 110 int sub_version;
111 int min_proto; 111 int min_proto;
112 int max_proto; 112 int max_proto;
113 int compat_daemon;
114 unsigned long exp_timeout; 113 unsigned long exp_timeout;
115 unsigned int type; 114 unsigned int type;
116 int reghost_enabled; 115 int reghost_enabled;
@@ -270,6 +269,17 @@ int autofs4_fill_super(struct super_block *, void *, int);
270struct autofs_info *autofs4_new_ino(struct autofs_sb_info *); 269struct autofs_info *autofs4_new_ino(struct autofs_sb_info *);
271void autofs4_clean_ino(struct autofs_info *); 270void autofs4_clean_ino(struct autofs_info *);
272 271
272static inline int autofs_prepare_pipe(struct file *pipe)
273{
274 if (!pipe->f_op || !pipe->f_op->write)
275 return -EINVAL;
276 if (!S_ISFIFO(pipe->f_dentry->d_inode->i_mode))
277 return -EINVAL;
278 /* We want a packet pipe */
279 pipe->f_flags |= O_DIRECT;
280 return 0;
281}
282
273/* Queue management functions */ 283/* Queue management functions */
274 284
275int autofs4_wait(struct autofs_sb_info *,struct dentry *, enum autofs_notify); 285int autofs4_wait(struct autofs_sb_info *,struct dentry *, enum autofs_notify);
diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c
index 9dacb8586701..aa9103f8f01b 100644
--- a/fs/autofs4/dev-ioctl.c
+++ b/fs/autofs4/dev-ioctl.c
@@ -376,7 +376,7 @@ static int autofs_dev_ioctl_setpipefd(struct file *fp,
376 err = -EBADF; 376 err = -EBADF;
377 goto out; 377 goto out;
378 } 378 }
379 if (!pipe->f_op || !pipe->f_op->write) { 379 if (autofs_prepare_pipe(pipe) < 0) {
380 err = -EPIPE; 380 err = -EPIPE;
381 fput(pipe); 381 fput(pipe);
382 goto out; 382 goto out;
@@ -385,7 +385,6 @@ static int autofs_dev_ioctl_setpipefd(struct file *fp,
385 sbi->pipefd = pipefd; 385 sbi->pipefd = pipefd;
386 sbi->pipe = pipe; 386 sbi->pipe = pipe;
387 sbi->catatonic = 0; 387 sbi->catatonic = 0;
388 sbi->compat_daemon = is_compat_task();
389 } 388 }
390out: 389out:
391 mutex_unlock(&sbi->wq_mutex); 390 mutex_unlock(&sbi->wq_mutex);
diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c
index d8dc002e9cc3..6e488ebe7784 100644
--- a/fs/autofs4/inode.c
+++ b/fs/autofs4/inode.c
@@ -19,7 +19,6 @@
19#include <linux/parser.h> 19#include <linux/parser.h>
20#include <linux/bitops.h> 20#include <linux/bitops.h>
21#include <linux/magic.h> 21#include <linux/magic.h>
22#include <linux/compat.h>
23#include "autofs_i.h" 22#include "autofs_i.h"
24#include <linux/module.h> 23#include <linux/module.h>
25 24
@@ -225,7 +224,6 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent)
225 set_autofs_type_indirect(&sbi->type); 224 set_autofs_type_indirect(&sbi->type);
226 sbi->min_proto = 0; 225 sbi->min_proto = 0;
227 sbi->max_proto = 0; 226 sbi->max_proto = 0;
228 sbi->compat_daemon = is_compat_task();
229 mutex_init(&sbi->wq_mutex); 227 mutex_init(&sbi->wq_mutex);
230 mutex_init(&sbi->pipe_mutex); 228 mutex_init(&sbi->pipe_mutex);
231 spin_lock_init(&sbi->fs_lock); 229 spin_lock_init(&sbi->fs_lock);
@@ -292,7 +290,7 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent)
292 printk("autofs: could not open pipe file descriptor\n"); 290 printk("autofs: could not open pipe file descriptor\n");
293 goto fail_dput; 291 goto fail_dput;
294 } 292 }
295 if (!pipe->f_op || !pipe->f_op->write) 293 if (autofs_prepare_pipe(pipe) < 0)
296 goto fail_fput; 294 goto fail_fput;
297 sbi->pipe = pipe; 295 sbi->pipe = pipe;
298 sbi->pipefd = pipefd; 296 sbi->pipefd = pipefd;
diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
index 9c098db43344..da8876d38a7b 100644
--- a/fs/autofs4/waitq.c
+++ b/fs/autofs4/waitq.c
@@ -91,24 +91,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
91 91
92 return (bytes > 0); 92 return (bytes > 0);
93} 93}
94 94
95/*
96 * The autofs_v5 packet was misdesigned.
97 *
98 * The packets are identical on x86-32 and x86-64, but have different
99 * alignment. Which means that 'sizeof()' will give different results.
100 * Fix it up for the case of running 32-bit user mode on a 64-bit kernel.
101 */
102static noinline size_t autofs_v5_packet_size(struct autofs_sb_info *sbi)
103{
104 size_t pktsz = sizeof(struct autofs_v5_packet);
105#if defined(CONFIG_X86_64) && defined(CONFIG_COMPAT)
106 if (sbi->compat_daemon > 0)
107 pktsz -= 4;
108#endif
109 return pktsz;
110}
111
112static void autofs4_notify_daemon(struct autofs_sb_info *sbi, 95static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
113 struct autofs_wait_queue *wq, 96 struct autofs_wait_queue *wq,
114 int type) 97 int type)
@@ -172,7 +155,8 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
172 { 155 {
173 struct autofs_v5_packet *packet = &pkt.v5_pkt.v5_packet; 156 struct autofs_v5_packet *packet = &pkt.v5_pkt.v5_packet;
174 157
175 pktsz = autofs_v5_packet_size(sbi); 158 pktsz = sizeof(*packet);
159
176 packet->wait_queue_token = wq->wait_queue_token; 160 packet->wait_queue_token = wq->wait_queue_token;
177 packet->len = wq->name.len; 161 packet->len = wq->name.len;
178 memcpy(packet->name, wq->name.name, wq->name.len); 162 memcpy(packet->name, wq->name.name, wq->name.len);
diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
index 2eb12f13593d..d146e181d10d 100644
--- a/fs/binfmt_aout.c
+++ b/fs/binfmt_aout.c
@@ -50,9 +50,7 @@ static int set_brk(unsigned long start, unsigned long end)
50 end = PAGE_ALIGN(end); 50 end = PAGE_ALIGN(end);
51 if (end > start) { 51 if (end > start) {
52 unsigned long addr; 52 unsigned long addr;
53 down_write(&current->mm->mmap_sem); 53 addr = vm_brk(start, end - start);
54 addr = do_brk(start, end - start);
55 up_write(&current->mm->mmap_sem);
56 if (BAD_ADDR(addr)) 54 if (BAD_ADDR(addr))
57 return addr; 55 return addr;
58 } 56 }
@@ -280,9 +278,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
280 pos = 32; 278 pos = 32;
281 map_size = ex.a_text+ex.a_data; 279 map_size = ex.a_text+ex.a_data;
282#endif 280#endif
283 down_write(&current->mm->mmap_sem); 281 error = vm_brk(text_addr & PAGE_MASK, map_size);
284 error = do_brk(text_addr & PAGE_MASK, map_size);
285 up_write(&current->mm->mmap_sem);
286 if (error != (text_addr & PAGE_MASK)) { 282 if (error != (text_addr & PAGE_MASK)) {
287 send_sig(SIGKILL, current, 0); 283 send_sig(SIGKILL, current, 0);
288 return error; 284 return error;
@@ -313,9 +309,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
313 309
314 if (!bprm->file->f_op->mmap||((fd_offset & ~PAGE_MASK) != 0)) { 310 if (!bprm->file->f_op->mmap||((fd_offset & ~PAGE_MASK) != 0)) {
315 loff_t pos = fd_offset; 311 loff_t pos = fd_offset;
316 down_write(&current->mm->mmap_sem); 312 vm_brk(N_TXTADDR(ex), ex.a_text+ex.a_data);
317 do_brk(N_TXTADDR(ex), ex.a_text+ex.a_data);
318 up_write(&current->mm->mmap_sem);
319 bprm->file->f_op->read(bprm->file, 313 bprm->file->f_op->read(bprm->file,
320 (char __user *)N_TXTADDR(ex), 314 (char __user *)N_TXTADDR(ex),
321 ex.a_text+ex.a_data, &pos); 315 ex.a_text+ex.a_data, &pos);
@@ -325,24 +319,20 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
325 goto beyond_if; 319 goto beyond_if;
326 } 320 }
327 321
328 down_write(&current->mm->mmap_sem); 322 error = vm_mmap(bprm->file, N_TXTADDR(ex), ex.a_text,
329 error = do_mmap(bprm->file, N_TXTADDR(ex), ex.a_text,
330 PROT_READ | PROT_EXEC, 323 PROT_READ | PROT_EXEC,
331 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE, 324 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
332 fd_offset); 325 fd_offset);
333 up_write(&current->mm->mmap_sem);
334 326
335 if (error != N_TXTADDR(ex)) { 327 if (error != N_TXTADDR(ex)) {
336 send_sig(SIGKILL, current, 0); 328 send_sig(SIGKILL, current, 0);
337 return error; 329 return error;
338 } 330 }
339 331
340 down_write(&current->mm->mmap_sem); 332 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
341 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
342 PROT_READ | PROT_WRITE | PROT_EXEC, 333 PROT_READ | PROT_WRITE | PROT_EXEC,
343 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE, 334 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
344 fd_offset + ex.a_text); 335 fd_offset + ex.a_text);
345 up_write(&current->mm->mmap_sem);
346 if (error != N_DATADDR(ex)) { 336 if (error != N_DATADDR(ex)) {
347 send_sig(SIGKILL, current, 0); 337 send_sig(SIGKILL, current, 0);
348 return error; 338 return error;
@@ -412,9 +402,7 @@ static int load_aout_library(struct file *file)
412 "N_TXTOFF is not page aligned. Please convert library: %s\n", 402 "N_TXTOFF is not page aligned. Please convert library: %s\n",
413 file->f_path.dentry->d_name.name); 403 file->f_path.dentry->d_name.name);
414 } 404 }
415 down_write(&current->mm->mmap_sem); 405 vm_brk(start_addr, ex.a_text + ex.a_data + ex.a_bss);
416 do_brk(start_addr, ex.a_text + ex.a_data + ex.a_bss);
417 up_write(&current->mm->mmap_sem);
418 406
419 file->f_op->read(file, (char __user *)start_addr, 407 file->f_op->read(file, (char __user *)start_addr,
420 ex.a_text + ex.a_data, &pos); 408 ex.a_text + ex.a_data, &pos);
@@ -425,12 +413,10 @@ static int load_aout_library(struct file *file)
425 goto out; 413 goto out;
426 } 414 }
427 /* Now use mmap to map the library into memory. */ 415 /* Now use mmap to map the library into memory. */
428 down_write(&current->mm->mmap_sem); 416 error = vm_mmap(file, start_addr, ex.a_text + ex.a_data,
429 error = do_mmap(file, start_addr, ex.a_text + ex.a_data,
430 PROT_READ | PROT_WRITE | PROT_EXEC, 417 PROT_READ | PROT_WRITE | PROT_EXEC,
431 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE, 418 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
432 N_TXTOFF(ex)); 419 N_TXTOFF(ex));
433 up_write(&current->mm->mmap_sem);
434 retval = error; 420 retval = error;
435 if (error != start_addr) 421 if (error != start_addr)
436 goto out; 422 goto out;
@@ -438,9 +424,7 @@ static int load_aout_library(struct file *file)
438 len = PAGE_ALIGN(ex.a_text + ex.a_data); 424 len = PAGE_ALIGN(ex.a_text + ex.a_data);
439 bss = ex.a_text + ex.a_data + ex.a_bss; 425 bss = ex.a_text + ex.a_data + ex.a_bss;
440 if (bss > len) { 426 if (bss > len) {
441 down_write(&current->mm->mmap_sem); 427 error = vm_brk(start_addr + len, bss - len);
442 error = do_brk(start_addr + len, bss - len);
443 up_write(&current->mm->mmap_sem);
444 retval = error; 428 retval = error;
445 if (error != start_addr + len) 429 if (error != start_addr + len)
446 goto out; 430 goto out;
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 48ffb3dc610a..16f735417072 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -82,9 +82,7 @@ static int set_brk(unsigned long start, unsigned long end)
82 end = ELF_PAGEALIGN(end); 82 end = ELF_PAGEALIGN(end);
83 if (end > start) { 83 if (end > start) {
84 unsigned long addr; 84 unsigned long addr;
85 down_write(&current->mm->mmap_sem); 85 addr = vm_brk(start, end - start);
86 addr = do_brk(start, end - start);
87 up_write(&current->mm->mmap_sem);
88 if (BAD_ADDR(addr)) 86 if (BAD_ADDR(addr))
89 return addr; 87 return addr;
90 } 88 }
@@ -514,9 +512,7 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
514 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1); 512 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
515 513
516 /* Map the last of the bss segment */ 514 /* Map the last of the bss segment */
517 down_write(&current->mm->mmap_sem); 515 error = vm_brk(elf_bss, last_bss - elf_bss);
518 error = do_brk(elf_bss, last_bss - elf_bss);
519 up_write(&current->mm->mmap_sem);
520 if (BAD_ADDR(error)) 516 if (BAD_ADDR(error))
521 goto out_close; 517 goto out_close;
522 } 518 }
@@ -962,10 +958,8 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
962 and some applications "depend" upon this behavior. 958 and some applications "depend" upon this behavior.
963 Since we do not have the power to recompile these, we 959 Since we do not have the power to recompile these, we
964 emulate the SVr4 behavior. Sigh. */ 960 emulate the SVr4 behavior. Sigh. */
965 down_write(&current->mm->mmap_sem); 961 error = vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
966 error = do_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
967 MAP_FIXED | MAP_PRIVATE, 0); 962 MAP_FIXED | MAP_PRIVATE, 0);
968 up_write(&current->mm->mmap_sem);
969 } 963 }
970 964
971#ifdef ELF_PLAT_INIT 965#ifdef ELF_PLAT_INIT
@@ -1050,8 +1044,7 @@ static int load_elf_library(struct file *file)
1050 eppnt++; 1044 eppnt++;
1051 1045
1052 /* Now use mmap to map the library into memory. */ 1046 /* Now use mmap to map the library into memory. */
1053 down_write(&current->mm->mmap_sem); 1047 error = vm_mmap(file,
1054 error = do_mmap(file,
1055 ELF_PAGESTART(eppnt->p_vaddr), 1048 ELF_PAGESTART(eppnt->p_vaddr),
1056 (eppnt->p_filesz + 1049 (eppnt->p_filesz +
1057 ELF_PAGEOFFSET(eppnt->p_vaddr)), 1050 ELF_PAGEOFFSET(eppnt->p_vaddr)),
@@ -1059,7 +1052,6 @@ static int load_elf_library(struct file *file)
1059 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE, 1052 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
1060 (eppnt->p_offset - 1053 (eppnt->p_offset -
1061 ELF_PAGEOFFSET(eppnt->p_vaddr))); 1054 ELF_PAGEOFFSET(eppnt->p_vaddr)));
1062 up_write(&current->mm->mmap_sem);
1063 if (error != ELF_PAGESTART(eppnt->p_vaddr)) 1055 if (error != ELF_PAGESTART(eppnt->p_vaddr))
1064 goto out_free_ph; 1056 goto out_free_ph;
1065 1057
@@ -1072,11 +1064,8 @@ static int load_elf_library(struct file *file)
1072 len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr + 1064 len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr +
1073 ELF_MIN_ALIGN - 1); 1065 ELF_MIN_ALIGN - 1);
1074 bss = eppnt->p_memsz + eppnt->p_vaddr; 1066 bss = eppnt->p_memsz + eppnt->p_vaddr;
1075 if (bss > len) { 1067 if (bss > len)
1076 down_write(&current->mm->mmap_sem); 1068 vm_brk(len, bss - len);
1077 do_brk(len, bss - len);
1078 up_write(&current->mm->mmap_sem);
1079 }
1080 error = 0; 1069 error = 0;
1081 1070
1082out_free_ph: 1071out_free_ph:
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index 9bd5612a8224..d390a0fffc65 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -390,21 +390,17 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm,
390 (executable_stack == EXSTACK_DEFAULT && VM_STACK_FLAGS & VM_EXEC)) 390 (executable_stack == EXSTACK_DEFAULT && VM_STACK_FLAGS & VM_EXEC))
391 stack_prot |= PROT_EXEC; 391 stack_prot |= PROT_EXEC;
392 392
393 down_write(&current->mm->mmap_sem); 393 current->mm->start_brk = vm_mmap(NULL, 0, stack_size, stack_prot,
394 current->mm->start_brk = do_mmap(NULL, 0, stack_size, stack_prot,
395 MAP_PRIVATE | MAP_ANONYMOUS | 394 MAP_PRIVATE | MAP_ANONYMOUS |
396 MAP_UNINITIALIZED | MAP_GROWSDOWN, 395 MAP_UNINITIALIZED | MAP_GROWSDOWN,
397 0); 396 0);
398 397
399 if (IS_ERR_VALUE(current->mm->start_brk)) { 398 if (IS_ERR_VALUE(current->mm->start_brk)) {
400 up_write(&current->mm->mmap_sem);
401 retval = current->mm->start_brk; 399 retval = current->mm->start_brk;
402 current->mm->start_brk = 0; 400 current->mm->start_brk = 0;
403 goto error_kill; 401 goto error_kill;
404 } 402 }
405 403
406 up_write(&current->mm->mmap_sem);
407
408 current->mm->brk = current->mm->start_brk; 404 current->mm->brk = current->mm->start_brk;
409 current->mm->context.end_brk = current->mm->start_brk; 405 current->mm->context.end_brk = current->mm->start_brk;
410 current->mm->context.end_brk += 406 current->mm->context.end_brk +=
@@ -955,10 +951,8 @@ static int elf_fdpic_map_file_constdisp_on_uclinux(
955 if (params->flags & ELF_FDPIC_FLAG_EXECUTABLE) 951 if (params->flags & ELF_FDPIC_FLAG_EXECUTABLE)
956 mflags |= MAP_EXECUTABLE; 952 mflags |= MAP_EXECUTABLE;
957 953
958 down_write(&mm->mmap_sem); 954 maddr = vm_mmap(NULL, load_addr, top - base,
959 maddr = do_mmap(NULL, load_addr, top - base,
960 PROT_READ | PROT_WRITE | PROT_EXEC, mflags, 0); 955 PROT_READ | PROT_WRITE | PROT_EXEC, mflags, 0);
961 up_write(&mm->mmap_sem);
962 if (IS_ERR_VALUE(maddr)) 956 if (IS_ERR_VALUE(maddr))
963 return (int) maddr; 957 return (int) maddr;
964 958
@@ -1096,10 +1090,8 @@ static int elf_fdpic_map_file_by_direct_mmap(struct elf_fdpic_params *params,
1096 1090
1097 /* create the mapping */ 1091 /* create the mapping */
1098 disp = phdr->p_vaddr & ~PAGE_MASK; 1092 disp = phdr->p_vaddr & ~PAGE_MASK;
1099 down_write(&mm->mmap_sem); 1093 maddr = vm_mmap(file, maddr, phdr->p_memsz + disp, prot, flags,
1100 maddr = do_mmap(file, maddr, phdr->p_memsz + disp, prot, flags,
1101 phdr->p_offset - disp); 1094 phdr->p_offset - disp);
1102 up_write(&mm->mmap_sem);
1103 1095
1104 kdebug("mmap[%d] <file> sz=%lx pr=%x fl=%x of=%lx --> %08lx", 1096 kdebug("mmap[%d] <file> sz=%lx pr=%x fl=%x of=%lx --> %08lx",
1105 loop, phdr->p_memsz + disp, prot, flags, 1097 loop, phdr->p_memsz + disp, prot, flags,
@@ -1143,10 +1135,8 @@ static int elf_fdpic_map_file_by_direct_mmap(struct elf_fdpic_params *params,
1143 unsigned long xmaddr; 1135 unsigned long xmaddr;
1144 1136
1145 flags |= MAP_FIXED | MAP_ANONYMOUS; 1137 flags |= MAP_FIXED | MAP_ANONYMOUS;
1146 down_write(&mm->mmap_sem); 1138 xmaddr = vm_mmap(NULL, xaddr, excess - excess1,
1147 xmaddr = do_mmap(NULL, xaddr, excess - excess1,
1148 prot, flags, 0); 1139 prot, flags, 0);
1149 up_write(&mm->mmap_sem);
1150 1140
1151 kdebug("mmap[%d] <anon>" 1141 kdebug("mmap[%d] <anon>"
1152 " ad=%lx sz=%lx pr=%x fl=%x of=0 --> %08lx", 1142 " ad=%lx sz=%lx pr=%x fl=%x of=0 --> %08lx",
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
index 024d20ee3ca3..6b2daf99fab8 100644
--- a/fs/binfmt_flat.c
+++ b/fs/binfmt_flat.c
@@ -542,10 +542,8 @@ static int load_flat_file(struct linux_binprm * bprm,
542 */ 542 */
543 DBG_FLT("BINFMT_FLAT: ROM mapping of file (we hope)\n"); 543 DBG_FLT("BINFMT_FLAT: ROM mapping of file (we hope)\n");
544 544
545 down_write(&current->mm->mmap_sem); 545 textpos = vm_mmap(bprm->file, 0, text_len, PROT_READ|PROT_EXEC,
546 textpos = do_mmap(bprm->file, 0, text_len, PROT_READ|PROT_EXEC,
547 MAP_PRIVATE|MAP_EXECUTABLE, 0); 546 MAP_PRIVATE|MAP_EXECUTABLE, 0);
548 up_write(&current->mm->mmap_sem);
549 if (!textpos || IS_ERR_VALUE(textpos)) { 547 if (!textpos || IS_ERR_VALUE(textpos)) {
550 if (!textpos) 548 if (!textpos)
551 textpos = (unsigned long) -ENOMEM; 549 textpos = (unsigned long) -ENOMEM;
@@ -556,10 +554,8 @@ static int load_flat_file(struct linux_binprm * bprm,
556 554
557 len = data_len + extra + MAX_SHARED_LIBS * sizeof(unsigned long); 555 len = data_len + extra + MAX_SHARED_LIBS * sizeof(unsigned long);
558 len = PAGE_ALIGN(len); 556 len = PAGE_ALIGN(len);
559 down_write(&current->mm->mmap_sem); 557 realdatastart = vm_mmap(0, 0, len,
560 realdatastart = do_mmap(0, 0, len,
561 PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE, 0); 558 PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE, 0);
562 up_write(&current->mm->mmap_sem);
563 559
564 if (realdatastart == 0 || IS_ERR_VALUE(realdatastart)) { 560 if (realdatastart == 0 || IS_ERR_VALUE(realdatastart)) {
565 if (!realdatastart) 561 if (!realdatastart)
@@ -603,10 +599,8 @@ static int load_flat_file(struct linux_binprm * bprm,
603 599
604 len = text_len + data_len + extra + MAX_SHARED_LIBS * sizeof(unsigned long); 600 len = text_len + data_len + extra + MAX_SHARED_LIBS * sizeof(unsigned long);
605 len = PAGE_ALIGN(len); 601 len = PAGE_ALIGN(len);
606 down_write(&current->mm->mmap_sem); 602 textpos = vm_mmap(0, 0, len,
607 textpos = do_mmap(0, 0, len,
608 PROT_READ | PROT_EXEC | PROT_WRITE, MAP_PRIVATE, 0); 603 PROT_READ | PROT_EXEC | PROT_WRITE, MAP_PRIVATE, 0);
609 up_write(&current->mm->mmap_sem);
610 604
611 if (!textpos || IS_ERR_VALUE(textpos)) { 605 if (!textpos || IS_ERR_VALUE(textpos)) {
612 if (!textpos) 606 if (!textpos)
diff --git a/fs/binfmt_som.c b/fs/binfmt_som.c
index e4fc746629a7..4517aaff61b4 100644
--- a/fs/binfmt_som.c
+++ b/fs/binfmt_som.c
@@ -147,10 +147,8 @@ static int map_som_binary(struct file *file,
147 code_size = SOM_PAGEALIGN(hpuxhdr->exec_tsize); 147 code_size = SOM_PAGEALIGN(hpuxhdr->exec_tsize);
148 current->mm->start_code = code_start; 148 current->mm->start_code = code_start;
149 current->mm->end_code = code_start + code_size; 149 current->mm->end_code = code_start + code_size;
150 down_write(&current->mm->mmap_sem); 150 retval = vm_mmap(file, code_start, code_size, prot,
151 retval = do_mmap(file, code_start, code_size, prot,
152 flags, SOM_PAGESTART(hpuxhdr->exec_tfile)); 151 flags, SOM_PAGESTART(hpuxhdr->exec_tfile));
153 up_write(&current->mm->mmap_sem);
154 if (retval < 0 && retval > -1024) 152 if (retval < 0 && retval > -1024)
155 goto out; 153 goto out;
156 154
@@ -158,20 +156,16 @@ static int map_som_binary(struct file *file,
158 data_size = SOM_PAGEALIGN(hpuxhdr->exec_dsize); 156 data_size = SOM_PAGEALIGN(hpuxhdr->exec_dsize);
159 current->mm->start_data = data_start; 157 current->mm->start_data = data_start;
160 current->mm->end_data = bss_start = data_start + data_size; 158 current->mm->end_data = bss_start = data_start + data_size;
161 down_write(&current->mm->mmap_sem); 159 retval = vm_mmap(file, data_start, data_size,
162 retval = do_mmap(file, data_start, data_size,
163 prot | PROT_WRITE, flags, 160 prot | PROT_WRITE, flags,
164 SOM_PAGESTART(hpuxhdr->exec_dfile)); 161 SOM_PAGESTART(hpuxhdr->exec_dfile));
165 up_write(&current->mm->mmap_sem);
166 if (retval < 0 && retval > -1024) 162 if (retval < 0 && retval > -1024)
167 goto out; 163 goto out;
168 164
169 som_brk = bss_start + SOM_PAGEALIGN(hpuxhdr->exec_bsize); 165 som_brk = bss_start + SOM_PAGEALIGN(hpuxhdr->exec_bsize);
170 current->mm->start_brk = current->mm->brk = som_brk; 166 current->mm->start_brk = current->mm->brk = som_brk;
171 down_write(&current->mm->mmap_sem); 167 retval = vm_mmap(NULL, bss_start, som_brk - bss_start,
172 retval = do_mmap(NULL, bss_start, som_brk - bss_start,
173 prot | PROT_WRITE, MAP_FIXED | MAP_PRIVATE, 0); 168 prot | PROT_WRITE, MAP_FIXED | MAP_PRIVATE, 0);
174 up_write(&current->mm->mmap_sem);
175 if (retval > 0 || retval < -1024) 169 if (retval > 0 || retval < -1024)
176 retval = 0; 170 retval = 0;
177out: 171out:
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index f4e90748940a..bcec06750232 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -22,6 +22,7 @@
22#include "ulist.h" 22#include "ulist.h"
23#include "transaction.h" 23#include "transaction.h"
24#include "delayed-ref.h" 24#include "delayed-ref.h"
25#include "locking.h"
25 26
26/* 27/*
27 * this structure records all encountered refs on the way up to the root 28 * this structure records all encountered refs on the way up to the root
@@ -893,18 +894,22 @@ static char *iref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
893 s64 bytes_left = size - 1; 894 s64 bytes_left = size - 1;
894 struct extent_buffer *eb = eb_in; 895 struct extent_buffer *eb = eb_in;
895 struct btrfs_key found_key; 896 struct btrfs_key found_key;
897 int leave_spinning = path->leave_spinning;
896 898
897 if (bytes_left >= 0) 899 if (bytes_left >= 0)
898 dest[bytes_left] = '\0'; 900 dest[bytes_left] = '\0';
899 901
902 path->leave_spinning = 1;
900 while (1) { 903 while (1) {
901 len = btrfs_inode_ref_name_len(eb, iref); 904 len = btrfs_inode_ref_name_len(eb, iref);
902 bytes_left -= len; 905 bytes_left -= len;
903 if (bytes_left >= 0) 906 if (bytes_left >= 0)
904 read_extent_buffer(eb, dest + bytes_left, 907 read_extent_buffer(eb, dest + bytes_left,
905 (unsigned long)(iref + 1), len); 908 (unsigned long)(iref + 1), len);
906 if (eb != eb_in) 909 if (eb != eb_in) {
910 btrfs_tree_read_unlock_blocking(eb);
907 free_extent_buffer(eb); 911 free_extent_buffer(eb);
912 }
908 ret = inode_ref_info(parent, 0, fs_root, path, &found_key); 913 ret = inode_ref_info(parent, 0, fs_root, path, &found_key);
909 if (ret > 0) 914 if (ret > 0)
910 ret = -ENOENT; 915 ret = -ENOENT;
@@ -919,8 +924,11 @@ static char *iref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
919 slot = path->slots[0]; 924 slot = path->slots[0];
920 eb = path->nodes[0]; 925 eb = path->nodes[0];
921 /* make sure we can use eb after releasing the path */ 926 /* make sure we can use eb after releasing the path */
922 if (eb != eb_in) 927 if (eb != eb_in) {
923 atomic_inc(&eb->refs); 928 atomic_inc(&eb->refs);
929 btrfs_tree_read_lock(eb);
930 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
931 }
924 btrfs_release_path(path); 932 btrfs_release_path(path);
925 933
926 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref); 934 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
@@ -931,6 +939,7 @@ static char *iref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
931 } 939 }
932 940
933 btrfs_release_path(path); 941 btrfs_release_path(path);
942 path->leave_spinning = leave_spinning;
934 943
935 if (ret) 944 if (ret)
936 return ERR_PTR(ret); 945 return ERR_PTR(ret);
@@ -1247,7 +1256,7 @@ static int iterate_irefs(u64 inum, struct btrfs_root *fs_root,
1247 struct btrfs_path *path, 1256 struct btrfs_path *path,
1248 iterate_irefs_t *iterate, void *ctx) 1257 iterate_irefs_t *iterate, void *ctx)
1249{ 1258{
1250 int ret; 1259 int ret = 0;
1251 int slot; 1260 int slot;
1252 u32 cur; 1261 u32 cur;
1253 u32 len; 1262 u32 len;
@@ -1259,7 +1268,8 @@ static int iterate_irefs(u64 inum, struct btrfs_root *fs_root,
1259 struct btrfs_inode_ref *iref; 1268 struct btrfs_inode_ref *iref;
1260 struct btrfs_key found_key; 1269 struct btrfs_key found_key;
1261 1270
1262 while (1) { 1271 while (!ret) {
1272 path->leave_spinning = 1;
1263 ret = inode_ref_info(inum, parent ? parent+1 : 0, fs_root, path, 1273 ret = inode_ref_info(inum, parent ? parent+1 : 0, fs_root, path,
1264 &found_key); 1274 &found_key);
1265 if (ret < 0) 1275 if (ret < 0)
@@ -1275,6 +1285,8 @@ static int iterate_irefs(u64 inum, struct btrfs_root *fs_root,
1275 eb = path->nodes[0]; 1285 eb = path->nodes[0];
1276 /* make sure we can use eb after releasing the path */ 1286 /* make sure we can use eb after releasing the path */
1277 atomic_inc(&eb->refs); 1287 atomic_inc(&eb->refs);
1288 btrfs_tree_read_lock(eb);
1289 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1278 btrfs_release_path(path); 1290 btrfs_release_path(path);
1279 1291
1280 item = btrfs_item_nr(eb, slot); 1292 item = btrfs_item_nr(eb, slot);
@@ -1288,13 +1300,12 @@ static int iterate_irefs(u64 inum, struct btrfs_root *fs_root,
1288 (unsigned long long)found_key.objectid, 1300 (unsigned long long)found_key.objectid,
1289 (unsigned long long)fs_root->objectid); 1301 (unsigned long long)fs_root->objectid);
1290 ret = iterate(parent, iref, eb, ctx); 1302 ret = iterate(parent, iref, eb, ctx);
1291 if (ret) { 1303 if (ret)
1292 free_extent_buffer(eb);
1293 break; 1304 break;
1294 }
1295 len = sizeof(*iref) + name_len; 1305 len = sizeof(*iref) + name_len;
1296 iref = (struct btrfs_inode_ref *)((char *)iref + len); 1306 iref = (struct btrfs_inode_ref *)((char *)iref + len);
1297 } 1307 }
1308 btrfs_tree_read_unlock_blocking(eb);
1298 free_extent_buffer(eb); 1309 free_extent_buffer(eb);
1299 } 1310 }
1300 1311
@@ -1414,6 +1425,8 @@ struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
1414 1425
1415void free_ipath(struct inode_fs_paths *ipath) 1426void free_ipath(struct inode_fs_paths *ipath)
1416{ 1427{
1428 if (!ipath)
1429 return;
1417 kfree(ipath->fspath); 1430 kfree(ipath->fspath);
1418 kfree(ipath); 1431 kfree(ipath);
1419} 1432}
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index e801f226d7e0..4106264fbc65 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -220,10 +220,12 @@ struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
220 */ 220 */
221static void add_root_to_dirty_list(struct btrfs_root *root) 221static void add_root_to_dirty_list(struct btrfs_root *root)
222{ 222{
223 spin_lock(&root->fs_info->trans_lock);
223 if (root->track_dirty && list_empty(&root->dirty_list)) { 224 if (root->track_dirty && list_empty(&root->dirty_list)) {
224 list_add(&root->dirty_list, 225 list_add(&root->dirty_list,
225 &root->fs_info->dirty_cowonly_roots); 226 &root->fs_info->dirty_cowonly_roots);
226 } 227 }
228 spin_unlock(&root->fs_info->trans_lock);
227} 229}
228 230
229/* 231/*
@@ -723,7 +725,7 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
723 725
724 cur = btrfs_find_tree_block(root, blocknr, blocksize); 726 cur = btrfs_find_tree_block(root, blocknr, blocksize);
725 if (cur) 727 if (cur)
726 uptodate = btrfs_buffer_uptodate(cur, gen); 728 uptodate = btrfs_buffer_uptodate(cur, gen, 0);
727 else 729 else
728 uptodate = 0; 730 uptodate = 0;
729 if (!cur || !uptodate) { 731 if (!cur || !uptodate) {
@@ -1358,7 +1360,12 @@ static noinline int reada_for_balance(struct btrfs_root *root,
1358 block1 = btrfs_node_blockptr(parent, slot - 1); 1360 block1 = btrfs_node_blockptr(parent, slot - 1);
1359 gen = btrfs_node_ptr_generation(parent, slot - 1); 1361 gen = btrfs_node_ptr_generation(parent, slot - 1);
1360 eb = btrfs_find_tree_block(root, block1, blocksize); 1362 eb = btrfs_find_tree_block(root, block1, blocksize);
1361 if (eb && btrfs_buffer_uptodate(eb, gen)) 1363 /*
1364 * if we get -eagain from btrfs_buffer_uptodate, we
1365 * don't want to return eagain here. That will loop
1366 * forever
1367 */
1368 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
1362 block1 = 0; 1369 block1 = 0;
1363 free_extent_buffer(eb); 1370 free_extent_buffer(eb);
1364 } 1371 }
@@ -1366,7 +1373,7 @@ static noinline int reada_for_balance(struct btrfs_root *root,
1366 block2 = btrfs_node_blockptr(parent, slot + 1); 1373 block2 = btrfs_node_blockptr(parent, slot + 1);
1367 gen = btrfs_node_ptr_generation(parent, slot + 1); 1374 gen = btrfs_node_ptr_generation(parent, slot + 1);
1368 eb = btrfs_find_tree_block(root, block2, blocksize); 1375 eb = btrfs_find_tree_block(root, block2, blocksize);
1369 if (eb && btrfs_buffer_uptodate(eb, gen)) 1376 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
1370 block2 = 0; 1377 block2 = 0;
1371 free_extent_buffer(eb); 1378 free_extent_buffer(eb);
1372 } 1379 }
@@ -1504,8 +1511,9 @@ read_block_for_search(struct btrfs_trans_handle *trans,
1504 1511
1505 tmp = btrfs_find_tree_block(root, blocknr, blocksize); 1512 tmp = btrfs_find_tree_block(root, blocknr, blocksize);
1506 if (tmp) { 1513 if (tmp) {
1507 if (btrfs_buffer_uptodate(tmp, 0)) { 1514 /* first we do an atomic uptodate check */
1508 if (btrfs_buffer_uptodate(tmp, gen)) { 1515 if (btrfs_buffer_uptodate(tmp, 0, 1) > 0) {
1516 if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
1509 /* 1517 /*
1510 * we found an up to date block without 1518 * we found an up to date block without
1511 * sleeping, return 1519 * sleeping, return
@@ -1523,8 +1531,9 @@ read_block_for_search(struct btrfs_trans_handle *trans,
1523 free_extent_buffer(tmp); 1531 free_extent_buffer(tmp);
1524 btrfs_set_path_blocking(p); 1532 btrfs_set_path_blocking(p);
1525 1533
1534 /* now we're allowed to do a blocking uptodate check */
1526 tmp = read_tree_block(root, blocknr, blocksize, gen); 1535 tmp = read_tree_block(root, blocknr, blocksize, gen);
1527 if (tmp && btrfs_buffer_uptodate(tmp, gen)) { 1536 if (tmp && btrfs_buffer_uptodate(tmp, gen, 0) > 0) {
1528 *eb_ret = tmp; 1537 *eb_ret = tmp;
1529 return 0; 1538 return 0;
1530 } 1539 }
@@ -1559,7 +1568,7 @@ read_block_for_search(struct btrfs_trans_handle *trans,
1559 * and give up so that our caller doesn't loop forever 1568 * and give up so that our caller doesn't loop forever
1560 * on our EAGAINs. 1569 * on our EAGAINs.
1561 */ 1570 */
1562 if (!btrfs_buffer_uptodate(tmp, 0)) 1571 if (!btrfs_buffer_uptodate(tmp, 0, 0))
1563 ret = -EIO; 1572 ret = -EIO;
1564 free_extent_buffer(tmp); 1573 free_extent_buffer(tmp);
1565 } 1574 }
@@ -4043,7 +4052,7 @@ again:
4043 tmp = btrfs_find_tree_block(root, blockptr, 4052 tmp = btrfs_find_tree_block(root, blockptr,
4044 btrfs_level_size(root, level - 1)); 4053 btrfs_level_size(root, level - 1));
4045 4054
4046 if (tmp && btrfs_buffer_uptodate(tmp, gen)) { 4055 if (tmp && btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
4047 free_extent_buffer(tmp); 4056 free_extent_buffer(tmp);
4048 break; 4057 break;
4049 } 4058 }
@@ -4166,7 +4175,8 @@ next:
4166 struct extent_buffer *cur; 4175 struct extent_buffer *cur;
4167 cur = btrfs_find_tree_block(root, blockptr, 4176 cur = btrfs_find_tree_block(root, blockptr,
4168 btrfs_level_size(root, level - 1)); 4177 btrfs_level_size(root, level - 1));
4169 if (!cur || !btrfs_buffer_uptodate(cur, gen)) { 4178 if (!cur ||
4179 btrfs_buffer_uptodate(cur, gen, 1) <= 0) {
4170 slot++; 4180 slot++;
4171 if (cur) 4181 if (cur)
4172 free_extent_buffer(cur); 4182 free_extent_buffer(cur);
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 3f65a812e282..8fd72331d600 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -1078,7 +1078,7 @@ struct btrfs_fs_info {
1078 * is required instead of the faster short fsync log commits 1078 * is required instead of the faster short fsync log commits
1079 */ 1079 */
1080 u64 last_trans_log_full_commit; 1080 u64 last_trans_log_full_commit;
1081 unsigned long mount_opt:21; 1081 unsigned long mount_opt;
1082 unsigned long compress_type:4; 1082 unsigned long compress_type:4;
1083 u64 max_inline; 1083 u64 max_inline;
1084 u64 alloc_start; 1084 u64 alloc_start;
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 20196f411206..a7ffc88a7dbe 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -323,7 +323,8 @@ static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
323 * in the wrong place. 323 * in the wrong place.
324 */ 324 */
325static int verify_parent_transid(struct extent_io_tree *io_tree, 325static int verify_parent_transid(struct extent_io_tree *io_tree,
326 struct extent_buffer *eb, u64 parent_transid) 326 struct extent_buffer *eb, u64 parent_transid,
327 int atomic)
327{ 328{
328 struct extent_state *cached_state = NULL; 329 struct extent_state *cached_state = NULL;
329 int ret; 330 int ret;
@@ -331,6 +332,9 @@ static int verify_parent_transid(struct extent_io_tree *io_tree,
331 if (!parent_transid || btrfs_header_generation(eb) == parent_transid) 332 if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
332 return 0; 333 return 0;
333 334
335 if (atomic)
336 return -EAGAIN;
337
334 lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1, 338 lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
335 0, &cached_state); 339 0, &cached_state);
336 if (extent_buffer_uptodate(eb) && 340 if (extent_buffer_uptodate(eb) &&
@@ -372,7 +376,8 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root,
372 ret = read_extent_buffer_pages(io_tree, eb, start, 376 ret = read_extent_buffer_pages(io_tree, eb, start,
373 WAIT_COMPLETE, 377 WAIT_COMPLETE,
374 btree_get_extent, mirror_num); 378 btree_get_extent, mirror_num);
375 if (!ret && !verify_parent_transid(io_tree, eb, parent_transid)) 379 if (!ret && !verify_parent_transid(io_tree, eb,
380 parent_transid, 0))
376 break; 381 break;
377 382
378 /* 383 /*
@@ -383,17 +388,16 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root,
383 if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags)) 388 if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags))
384 break; 389 break;
385 390
386 if (!failed_mirror) {
387 failed = 1;
388 printk(KERN_ERR "failed mirror was %d\n", eb->failed_mirror);
389 failed_mirror = eb->failed_mirror;
390 }
391
392 num_copies = btrfs_num_copies(&root->fs_info->mapping_tree, 391 num_copies = btrfs_num_copies(&root->fs_info->mapping_tree,
393 eb->start, eb->len); 392 eb->start, eb->len);
394 if (num_copies == 1) 393 if (num_copies == 1)
395 break; 394 break;
396 395
396 if (!failed_mirror) {
397 failed = 1;
398 failed_mirror = eb->read_mirror;
399 }
400
397 mirror_num++; 401 mirror_num++;
398 if (mirror_num == failed_mirror) 402 if (mirror_num == failed_mirror)
399 mirror_num++; 403 mirror_num++;
@@ -564,7 +568,7 @@ struct extent_buffer *find_eb_for_page(struct extent_io_tree *tree,
564} 568}
565 569
566static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end, 570static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
567 struct extent_state *state) 571 struct extent_state *state, int mirror)
568{ 572{
569 struct extent_io_tree *tree; 573 struct extent_io_tree *tree;
570 u64 found_start; 574 u64 found_start;
@@ -589,6 +593,7 @@ static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
589 if (!reads_done) 593 if (!reads_done)
590 goto err; 594 goto err;
591 595
596 eb->read_mirror = mirror;
592 if (test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) { 597 if (test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) {
593 ret = -EIO; 598 ret = -EIO;
594 goto err; 599 goto err;
@@ -652,7 +657,7 @@ static int btree_io_failed_hook(struct page *page, int failed_mirror)
652 657
653 eb = (struct extent_buffer *)page->private; 658 eb = (struct extent_buffer *)page->private;
654 set_bit(EXTENT_BUFFER_IOERR, &eb->bflags); 659 set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
655 eb->failed_mirror = failed_mirror; 660 eb->read_mirror = failed_mirror;
656 if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) 661 if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
657 btree_readahead_hook(root, eb, eb->start, -EIO); 662 btree_readahead_hook(root, eb, eb->start, -EIO);
658 return -EIO; /* we fixed nothing */ 663 return -EIO; /* we fixed nothing */
@@ -1202,7 +1207,7 @@ static int __must_check find_and_setup_root(struct btrfs_root *tree_root,
1202 root->commit_root = NULL; 1207 root->commit_root = NULL;
1203 root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item), 1208 root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
1204 blocksize, generation); 1209 blocksize, generation);
1205 if (!root->node || !btrfs_buffer_uptodate(root->node, generation)) { 1210 if (!root->node || !btrfs_buffer_uptodate(root->node, generation, 0)) {
1206 free_extent_buffer(root->node); 1211 free_extent_buffer(root->node);
1207 root->node = NULL; 1212 root->node = NULL;
1208 return -EIO; 1213 return -EIO;
@@ -2254,9 +2259,9 @@ int open_ctree(struct super_block *sb,
2254 goto fail_sb_buffer; 2259 goto fail_sb_buffer;
2255 } 2260 }
2256 2261
2257 if (sectorsize < PAGE_SIZE) { 2262 if (sectorsize != PAGE_SIZE) {
2258 printk(KERN_WARNING "btrfs: Incompatible sector size " 2263 printk(KERN_WARNING "btrfs: Incompatible sector size(%lu) "
2259 "found on %s\n", sb->s_id); 2264 "found on %s\n", (unsigned long)sectorsize, sb->s_id);
2260 goto fail_sb_buffer; 2265 goto fail_sb_buffer;
2261 } 2266 }
2262 2267
@@ -3143,7 +3148,8 @@ int close_ctree(struct btrfs_root *root)
3143 return 0; 3148 return 0;
3144} 3149}
3145 3150
3146int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid) 3151int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
3152 int atomic)
3147{ 3153{
3148 int ret; 3154 int ret;
3149 struct inode *btree_inode = buf->pages[0]->mapping->host; 3155 struct inode *btree_inode = buf->pages[0]->mapping->host;
@@ -3153,7 +3159,9 @@ int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid)
3153 return ret; 3159 return ret;
3154 3160
3155 ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf, 3161 ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
3156 parent_transid); 3162 parent_transid, atomic);
3163 if (ret == -EAGAIN)
3164 return ret;
3157 return !ret; 3165 return !ret;
3158} 3166}
3159 3167
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index a7ace1a2dd12..ab1830aaf0ed 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -66,7 +66,8 @@ void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr);
66void __btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr); 66void __btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr);
67void btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root); 67void btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root);
68void btrfs_mark_buffer_dirty(struct extent_buffer *buf); 68void btrfs_mark_buffer_dirty(struct extent_buffer *buf);
69int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid); 69int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
70 int atomic);
70int btrfs_set_buffer_uptodate(struct extent_buffer *buf); 71int btrfs_set_buffer_uptodate(struct extent_buffer *buf);
71int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid); 72int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid);
72u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len); 73u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 2b35f8d14bb9..49fd7b66d57b 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2301,6 +2301,7 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
2301 2301
2302 if (ret) { 2302 if (ret) {
2303 printk(KERN_DEBUG "btrfs: run_delayed_extent_op returned %d\n", ret); 2303 printk(KERN_DEBUG "btrfs: run_delayed_extent_op returned %d\n", ret);
2304 spin_lock(&delayed_refs->lock);
2304 return ret; 2305 return ret;
2305 } 2306 }
2306 2307
@@ -2331,6 +2332,7 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
2331 2332
2332 if (ret) { 2333 if (ret) {
2333 printk(KERN_DEBUG "btrfs: run_one_delayed_ref returned %d\n", ret); 2334 printk(KERN_DEBUG "btrfs: run_one_delayed_ref returned %d\n", ret);
2335 spin_lock(&delayed_refs->lock);
2334 return ret; 2336 return ret;
2335 } 2337 }
2336 2338
@@ -3769,13 +3771,10 @@ again:
3769 */ 3771 */
3770 if (current->journal_info) 3772 if (current->journal_info)
3771 return -EAGAIN; 3773 return -EAGAIN;
3772 ret = wait_event_interruptible(space_info->wait, 3774 ret = wait_event_killable(space_info->wait, !space_info->flush);
3773 !space_info->flush); 3775 /* Must have been killed, return */
3774 /* Must have been interrupted, return */ 3776 if (ret)
3775 if (ret) {
3776 printk(KERN_DEBUG "btrfs: %s returning -EINTR\n", __func__);
3777 return -EINTR; 3777 return -EINTR;
3778 }
3779 3778
3780 spin_lock(&space_info->lock); 3779 spin_lock(&space_info->lock);
3781 } 3780 }
@@ -4215,8 +4214,8 @@ static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
4215 4214
4216 num_bytes = calc_global_metadata_size(fs_info); 4215 num_bytes = calc_global_metadata_size(fs_info);
4217 4216
4218 spin_lock(&block_rsv->lock);
4219 spin_lock(&sinfo->lock); 4217 spin_lock(&sinfo->lock);
4218 spin_lock(&block_rsv->lock);
4220 4219
4221 block_rsv->size = num_bytes; 4220 block_rsv->size = num_bytes;
4222 4221
@@ -4242,8 +4241,8 @@ static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
4242 block_rsv->full = 1; 4241 block_rsv->full = 1;
4243 } 4242 }
4244 4243
4245 spin_unlock(&sinfo->lock);
4246 spin_unlock(&block_rsv->lock); 4244 spin_unlock(&block_rsv->lock);
4245 spin_unlock(&sinfo->lock);
4247} 4246}
4248 4247
4249static void init_global_block_rsv(struct btrfs_fs_info *fs_info) 4248static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
@@ -6569,7 +6568,7 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
6569 goto skip; 6568 goto skip;
6570 } 6569 }
6571 6570
6572 if (!btrfs_buffer_uptodate(next, generation)) { 6571 if (!btrfs_buffer_uptodate(next, generation, 0)) {
6573 btrfs_tree_unlock(next); 6572 btrfs_tree_unlock(next);
6574 free_extent_buffer(next); 6573 free_extent_buffer(next);
6575 next = NULL; 6574 next = NULL;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index cd4b5e400221..c9018a05036e 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -402,20 +402,28 @@ static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
402 return 0; 402 return 0;
403} 403}
404 404
405static struct extent_state *next_state(struct extent_state *state)
406{
407 struct rb_node *next = rb_next(&state->rb_node);
408 if (next)
409 return rb_entry(next, struct extent_state, rb_node);
410 else
411 return NULL;
412}
413
405/* 414/*
406 * utility function to clear some bits in an extent state struct. 415 * utility function to clear some bits in an extent state struct.
407 * it will optionally wake up any one waiting on this state (wake == 1), or 416 * it will optionally wake up any one waiting on this state (wake == 1)
408 * forcibly remove the state from the tree (delete == 1).
409 * 417 *
410 * If no bits are set on the state struct after clearing things, the 418 * If no bits are set on the state struct after clearing things, the
411 * struct is freed and removed from the tree 419 * struct is freed and removed from the tree
412 */ 420 */
413static int clear_state_bit(struct extent_io_tree *tree, 421static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
414 struct extent_state *state, 422 struct extent_state *state,
415 int *bits, int wake) 423 int *bits, int wake)
416{ 424{
425 struct extent_state *next;
417 int bits_to_clear = *bits & ~EXTENT_CTLBITS; 426 int bits_to_clear = *bits & ~EXTENT_CTLBITS;
418 int ret = state->state & bits_to_clear;
419 427
420 if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) { 428 if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
421 u64 range = state->end - state->start + 1; 429 u64 range = state->end - state->start + 1;
@@ -427,6 +435,7 @@ static int clear_state_bit(struct extent_io_tree *tree,
427 if (wake) 435 if (wake)
428 wake_up(&state->wq); 436 wake_up(&state->wq);
429 if (state->state == 0) { 437 if (state->state == 0) {
438 next = next_state(state);
430 if (state->tree) { 439 if (state->tree) {
431 rb_erase(&state->rb_node, &tree->state); 440 rb_erase(&state->rb_node, &tree->state);
432 state->tree = NULL; 441 state->tree = NULL;
@@ -436,8 +445,9 @@ static int clear_state_bit(struct extent_io_tree *tree,
436 } 445 }
437 } else { 446 } else {
438 merge_state(tree, state); 447 merge_state(tree, state);
448 next = next_state(state);
439 } 449 }
440 return ret; 450 return next;
441} 451}
442 452
443static struct extent_state * 453static struct extent_state *
@@ -476,7 +486,6 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
476 struct extent_state *state; 486 struct extent_state *state;
477 struct extent_state *cached; 487 struct extent_state *cached;
478 struct extent_state *prealloc = NULL; 488 struct extent_state *prealloc = NULL;
479 struct rb_node *next_node;
480 struct rb_node *node; 489 struct rb_node *node;
481 u64 last_end; 490 u64 last_end;
482 int err; 491 int err;
@@ -528,14 +537,11 @@ hit_next:
528 WARN_ON(state->end < start); 537 WARN_ON(state->end < start);
529 last_end = state->end; 538 last_end = state->end;
530 539
531 if (state->end < end && !need_resched())
532 next_node = rb_next(&state->rb_node);
533 else
534 next_node = NULL;
535
536 /* the state doesn't have the wanted bits, go ahead */ 540 /* the state doesn't have the wanted bits, go ahead */
537 if (!(state->state & bits)) 541 if (!(state->state & bits)) {
542 state = next_state(state);
538 goto next; 543 goto next;
544 }
539 545
540 /* 546 /*
541 * | ---- desired range ---- | 547 * | ---- desired range ---- |
@@ -593,16 +599,13 @@ hit_next:
593 goto out; 599 goto out;
594 } 600 }
595 601
596 clear_state_bit(tree, state, &bits, wake); 602 state = clear_state_bit(tree, state, &bits, wake);
597next: 603next:
598 if (last_end == (u64)-1) 604 if (last_end == (u64)-1)
599 goto out; 605 goto out;
600 start = last_end + 1; 606 start = last_end + 1;
601 if (start <= end && next_node) { 607 if (start <= end && state && !need_resched())
602 state = rb_entry(next_node, struct extent_state,
603 rb_node);
604 goto hit_next; 608 goto hit_next;
605 }
606 goto search_again; 609 goto search_again;
607 610
608out: 611out:
@@ -2301,7 +2304,7 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
2301 u64 start; 2304 u64 start;
2302 u64 end; 2305 u64 end;
2303 int whole_page; 2306 int whole_page;
2304 int failed_mirror; 2307 int mirror;
2305 int ret; 2308 int ret;
2306 2309
2307 if (err) 2310 if (err)
@@ -2340,20 +2343,18 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
2340 } 2343 }
2341 spin_unlock(&tree->lock); 2344 spin_unlock(&tree->lock);
2342 2345
2346 mirror = (int)(unsigned long)bio->bi_bdev;
2343 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) { 2347 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
2344 ret = tree->ops->readpage_end_io_hook(page, start, end, 2348 ret = tree->ops->readpage_end_io_hook(page, start, end,
2345 state); 2349 state, mirror);
2346 if (ret) 2350 if (ret)
2347 uptodate = 0; 2351 uptodate = 0;
2348 else 2352 else
2349 clean_io_failure(start, page); 2353 clean_io_failure(start, page);
2350 } 2354 }
2351 2355
2352 if (!uptodate)
2353 failed_mirror = (int)(unsigned long)bio->bi_bdev;
2354
2355 if (!uptodate && tree->ops && tree->ops->readpage_io_failed_hook) { 2356 if (!uptodate && tree->ops && tree->ops->readpage_io_failed_hook) {
2356 ret = tree->ops->readpage_io_failed_hook(page, failed_mirror); 2357 ret = tree->ops->readpage_io_failed_hook(page, mirror);
2357 if (!ret && !err && 2358 if (!ret && !err &&
2358 test_bit(BIO_UPTODATE, &bio->bi_flags)) 2359 test_bit(BIO_UPTODATE, &bio->bi_flags))
2359 uptodate = 1; 2360 uptodate = 1;
@@ -2368,8 +2369,7 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
2368 * can't handle the error it will return -EIO and we 2369 * can't handle the error it will return -EIO and we
2369 * remain responsible for that page. 2370 * remain responsible for that page.
2370 */ 2371 */
2371 ret = bio_readpage_error(bio, page, start, end, 2372 ret = bio_readpage_error(bio, page, start, end, mirror, NULL);
2372 failed_mirror, NULL);
2373 if (ret == 0) { 2373 if (ret == 0) {
2374 uptodate = 2374 uptodate =
2375 test_bit(BIO_UPTODATE, &bio->bi_flags); 2375 test_bit(BIO_UPTODATE, &bio->bi_flags);
@@ -4120,6 +4120,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
4120 if (atomic_inc_not_zero(&exists->refs)) { 4120 if (atomic_inc_not_zero(&exists->refs)) {
4121 spin_unlock(&mapping->private_lock); 4121 spin_unlock(&mapping->private_lock);
4122 unlock_page(p); 4122 unlock_page(p);
4123 page_cache_release(p);
4123 mark_extent_buffer_accessed(exists); 4124 mark_extent_buffer_accessed(exists);
4124 goto free_eb; 4125 goto free_eb;
4125 } 4126 }
@@ -4199,8 +4200,7 @@ free_eb:
4199 unlock_page(eb->pages[i]); 4200 unlock_page(eb->pages[i]);
4200 } 4201 }
4201 4202
4202 if (!atomic_dec_and_test(&eb->refs)) 4203 WARN_ON(!atomic_dec_and_test(&eb->refs));
4203 return exists;
4204 btrfs_release_extent_buffer(eb); 4204 btrfs_release_extent_buffer(eb);
4205 return exists; 4205 return exists;
4206} 4206}
@@ -4462,7 +4462,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
4462 } 4462 }
4463 4463
4464 clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags); 4464 clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
4465 eb->failed_mirror = 0; 4465 eb->read_mirror = 0;
4466 atomic_set(&eb->io_pages, num_reads); 4466 atomic_set(&eb->io_pages, num_reads);
4467 for (i = start_i; i < num_pages; i++) { 4467 for (i = start_i; i < num_pages; i++) {
4468 page = extent_buffer_page(eb, i); 4468 page = extent_buffer_page(eb, i);
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index faf10eb57f75..b516c3b8dec6 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -79,7 +79,7 @@ struct extent_io_ops {
79 u64 start, u64 end, 79 u64 start, u64 end,
80 struct extent_state *state); 80 struct extent_state *state);
81 int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end, 81 int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end,
82 struct extent_state *state); 82 struct extent_state *state, int mirror);
83 int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end, 83 int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
84 struct extent_state *state, int uptodate); 84 struct extent_state *state, int uptodate);
85 void (*set_bit_hook)(struct inode *inode, struct extent_state *state, 85 void (*set_bit_hook)(struct inode *inode, struct extent_state *state,
@@ -135,7 +135,7 @@ struct extent_buffer {
135 spinlock_t refs_lock; 135 spinlock_t refs_lock;
136 atomic_t refs; 136 atomic_t refs;
137 atomic_t io_pages; 137 atomic_t io_pages;
138 int failed_mirror; 138 int read_mirror;
139 struct list_head leak_list; 139 struct list_head leak_list;
140 struct rcu_head rcu_head; 140 struct rcu_head rcu_head;
141 pid_t lock_owner; 141 pid_t lock_owner;
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index d83260d7498f..53bf2d764bbc 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -567,6 +567,7 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode,
567 int extent_type; 567 int extent_type;
568 int recow; 568 int recow;
569 int ret; 569 int ret;
570 int modify_tree = -1;
570 571
571 if (drop_cache) 572 if (drop_cache)
572 btrfs_drop_extent_cache(inode, start, end - 1, 0); 573 btrfs_drop_extent_cache(inode, start, end - 1, 0);
@@ -575,10 +576,13 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode,
575 if (!path) 576 if (!path)
576 return -ENOMEM; 577 return -ENOMEM;
577 578
579 if (start >= BTRFS_I(inode)->disk_i_size)
580 modify_tree = 0;
581
578 while (1) { 582 while (1) {
579 recow = 0; 583 recow = 0;
580 ret = btrfs_lookup_file_extent(trans, root, path, ino, 584 ret = btrfs_lookup_file_extent(trans, root, path, ino,
581 search_start, -1); 585 search_start, modify_tree);
582 if (ret < 0) 586 if (ret < 0)
583 break; 587 break;
584 if (ret > 0 && path->slots[0] > 0 && search_start == start) { 588 if (ret > 0 && path->slots[0] > 0 && search_start == start) {
@@ -634,7 +638,8 @@ next_slot:
634 } 638 }
635 639
636 search_start = max(key.offset, start); 640 search_start = max(key.offset, start);
637 if (recow) { 641 if (recow || !modify_tree) {
642 modify_tree = -1;
638 btrfs_release_path(path); 643 btrfs_release_path(path);
639 continue; 644 continue;
640 } 645 }
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 115bc05e42b0..61b16c641ce0 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -1947,7 +1947,7 @@ static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
1947 * extent_io.c will try to find good copies for us. 1947 * extent_io.c will try to find good copies for us.
1948 */ 1948 */
1949static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end, 1949static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
1950 struct extent_state *state) 1950 struct extent_state *state, int mirror)
1951{ 1951{
1952 size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT); 1952 size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT);
1953 struct inode *inode = page->mapping->host; 1953 struct inode *inode = page->mapping->host;
@@ -4069,7 +4069,7 @@ static struct inode *new_simple_dir(struct super_block *s,
4069 BTRFS_I(inode)->dummy_inode = 1; 4069 BTRFS_I(inode)->dummy_inode = 1;
4070 4070
4071 inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID; 4071 inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
4072 inode->i_op = &simple_dir_inode_operations; 4072 inode->i_op = &btrfs_dir_ro_inode_operations;
4073 inode->i_fop = &simple_dir_operations; 4073 inode->i_fop = &simple_dir_operations;
4074 inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO; 4074 inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
4075 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; 4075 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
@@ -4140,14 +4140,18 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
4140static int btrfs_dentry_delete(const struct dentry *dentry) 4140static int btrfs_dentry_delete(const struct dentry *dentry)
4141{ 4141{
4142 struct btrfs_root *root; 4142 struct btrfs_root *root;
4143 struct inode *inode = dentry->d_inode;
4143 4144
4144 if (!dentry->d_inode && !IS_ROOT(dentry)) 4145 if (!inode && !IS_ROOT(dentry))
4145 dentry = dentry->d_parent; 4146 inode = dentry->d_parent->d_inode;
4146 4147
4147 if (dentry->d_inode) { 4148 if (inode) {
4148 root = BTRFS_I(dentry->d_inode)->root; 4149 root = BTRFS_I(inode)->root;
4149 if (btrfs_root_refs(&root->root_item) == 0) 4150 if (btrfs_root_refs(&root->root_item) == 0)
4150 return 1; 4151 return 1;
4152
4153 if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
4154 return 1;
4151 } 4155 }
4152 return 0; 4156 return 0;
4153} 4157}
@@ -4188,7 +4192,6 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
4188 struct btrfs_path *path; 4192 struct btrfs_path *path;
4189 struct list_head ins_list; 4193 struct list_head ins_list;
4190 struct list_head del_list; 4194 struct list_head del_list;
4191 struct qstr q;
4192 int ret; 4195 int ret;
4193 struct extent_buffer *leaf; 4196 struct extent_buffer *leaf;
4194 int slot; 4197 int slot;
@@ -4279,7 +4282,6 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
4279 4282
4280 while (di_cur < di_total) { 4283 while (di_cur < di_total) {
4281 struct btrfs_key location; 4284 struct btrfs_key location;
4282 struct dentry *tmp;
4283 4285
4284 if (verify_dir_item(root, leaf, di)) 4286 if (verify_dir_item(root, leaf, di))
4285 break; 4287 break;
@@ -4300,35 +4302,15 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
4300 d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)]; 4302 d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
4301 btrfs_dir_item_key_to_cpu(leaf, di, &location); 4303 btrfs_dir_item_key_to_cpu(leaf, di, &location);
4302 4304
4303 q.name = name_ptr; 4305
4304 q.len = name_len;
4305 q.hash = full_name_hash(q.name, q.len);
4306 tmp = d_lookup(filp->f_dentry, &q);
4307 if (!tmp) {
4308 struct btrfs_key *newkey;
4309
4310 newkey = kzalloc(sizeof(struct btrfs_key),
4311 GFP_NOFS);
4312 if (!newkey)
4313 goto no_dentry;
4314 tmp = d_alloc(filp->f_dentry, &q);
4315 if (!tmp) {
4316 kfree(newkey);
4317 dput(tmp);
4318 goto no_dentry;
4319 }
4320 memcpy(newkey, &location,
4321 sizeof(struct btrfs_key));
4322 tmp->d_fsdata = newkey;
4323 tmp->d_flags |= DCACHE_NEED_LOOKUP;
4324 d_rehash(tmp);
4325 dput(tmp);
4326 } else {
4327 dput(tmp);
4328 }
4329no_dentry:
4330 /* is this a reference to our own snapshot? If so 4306 /* is this a reference to our own snapshot? If so
4331 * skip it 4307 * skip it.
4308 *
4309 * In contrast to old kernels, we insert the snapshot's
4310 * dir item and dir index after it has been created, so
4311 * we won't find a reference to our own snapshot. We
4312 * still keep the following code for backward
4313 * compatibility.
4332 */ 4314 */
4333 if (location.type == BTRFS_ROOT_ITEM_KEY && 4315 if (location.type == BTRFS_ROOT_ITEM_KEY &&
4334 location.objectid == root->root_key.objectid) { 4316 location.objectid == root->root_key.objectid) {
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 18cc23d164a8..14f8e1faa46e 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -2262,7 +2262,10 @@ static long btrfs_ioctl_dev_info(struct btrfs_root *root, void __user *arg)
2262 di_args->bytes_used = dev->bytes_used; 2262 di_args->bytes_used = dev->bytes_used;
2263 di_args->total_bytes = dev->total_bytes; 2263 di_args->total_bytes = dev->total_bytes;
2264 memcpy(di_args->uuid, dev->uuid, sizeof(di_args->uuid)); 2264 memcpy(di_args->uuid, dev->uuid, sizeof(di_args->uuid));
2265 strncpy(di_args->path, dev->name, sizeof(di_args->path)); 2265 if (dev->name)
2266 strncpy(di_args->path, dev->name, sizeof(di_args->path));
2267 else
2268 di_args->path[0] = '\0';
2266 2269
2267out: 2270out:
2268 if (ret == 0 && copy_to_user(arg, di_args, sizeof(*di_args))) 2271 if (ret == 0 && copy_to_user(arg, di_args, sizeof(*di_args)))
diff --git a/fs/btrfs/ioctl.h b/fs/btrfs/ioctl.h
index 4f69028a68c4..086e6bdae1c4 100644
--- a/fs/btrfs/ioctl.h
+++ b/fs/btrfs/ioctl.h
@@ -252,7 +252,7 @@ struct btrfs_data_container {
252 252
253struct btrfs_ioctl_ino_path_args { 253struct btrfs_ioctl_ino_path_args {
254 __u64 inum; /* in */ 254 __u64 inum; /* in */
255 __u32 size; /* in */ 255 __u64 size; /* in */
256 __u64 reserved[4]; 256 __u64 reserved[4];
257 /* struct btrfs_data_container *fspath; out */ 257 /* struct btrfs_data_container *fspath; out */
258 __u64 fspath; /* out */ 258 __u64 fspath; /* out */
@@ -260,7 +260,7 @@ struct btrfs_ioctl_ino_path_args {
260 260
261struct btrfs_ioctl_logical_ino_args { 261struct btrfs_ioctl_logical_ino_args {
262 __u64 logical; /* in */ 262 __u64 logical; /* in */
263 __u32 size; /* in */ 263 __u64 size; /* in */
264 __u64 reserved[4]; 264 __u64 reserved[4];
265 /* struct btrfs_data_container *inodes; out */ 265 /* struct btrfs_data_container *inodes; out */
266 __u64 inodes; 266 __u64 inodes;
diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c
index dc5d33146fdb..ac5d01085884 100644
--- a/fs/btrfs/reada.c
+++ b/fs/btrfs/reada.c
@@ -250,14 +250,12 @@ static struct reada_zone *reada_find_zone(struct btrfs_fs_info *fs_info,
250 struct btrfs_bio *bbio) 250 struct btrfs_bio *bbio)
251{ 251{
252 int ret; 252 int ret;
253 int looped = 0;
254 struct reada_zone *zone; 253 struct reada_zone *zone;
255 struct btrfs_block_group_cache *cache = NULL; 254 struct btrfs_block_group_cache *cache = NULL;
256 u64 start; 255 u64 start;
257 u64 end; 256 u64 end;
258 int i; 257 int i;
259 258
260again:
261 zone = NULL; 259 zone = NULL;
262 spin_lock(&fs_info->reada_lock); 260 spin_lock(&fs_info->reada_lock);
263 ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone, 261 ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone,
@@ -274,9 +272,6 @@ again:
274 spin_unlock(&fs_info->reada_lock); 272 spin_unlock(&fs_info->reada_lock);
275 } 273 }
276 274
277 if (looped)
278 return NULL;
279
280 cache = btrfs_lookup_block_group(fs_info, logical); 275 cache = btrfs_lookup_block_group(fs_info, logical);
281 if (!cache) 276 if (!cache)
282 return NULL; 277 return NULL;
@@ -307,13 +302,15 @@ again:
307 ret = radix_tree_insert(&dev->reada_zones, 302 ret = radix_tree_insert(&dev->reada_zones,
308 (unsigned long)(zone->end >> PAGE_CACHE_SHIFT), 303 (unsigned long)(zone->end >> PAGE_CACHE_SHIFT),
309 zone); 304 zone);
310 spin_unlock(&fs_info->reada_lock);
311 305
312 if (ret) { 306 if (ret == -EEXIST) {
313 kfree(zone); 307 kfree(zone);
314 looped = 1; 308 ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone,
315 goto again; 309 logical >> PAGE_CACHE_SHIFT, 1);
310 if (ret == 1)
311 kref_get(&zone->refcnt);
316 } 312 }
313 spin_unlock(&fs_info->reada_lock);
317 314
318 return zone; 315 return zone;
319} 316}
@@ -323,26 +320,26 @@ static struct reada_extent *reada_find_extent(struct btrfs_root *root,
323 struct btrfs_key *top, int level) 320 struct btrfs_key *top, int level)
324{ 321{
325 int ret; 322 int ret;
326 int looped = 0;
327 struct reada_extent *re = NULL; 323 struct reada_extent *re = NULL;
324 struct reada_extent *re_exist = NULL;
328 struct btrfs_fs_info *fs_info = root->fs_info; 325 struct btrfs_fs_info *fs_info = root->fs_info;
329 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree; 326 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
330 struct btrfs_bio *bbio = NULL; 327 struct btrfs_bio *bbio = NULL;
331 struct btrfs_device *dev; 328 struct btrfs_device *dev;
329 struct btrfs_device *prev_dev;
332 u32 blocksize; 330 u32 blocksize;
333 u64 length; 331 u64 length;
334 int nzones = 0; 332 int nzones = 0;
335 int i; 333 int i;
336 unsigned long index = logical >> PAGE_CACHE_SHIFT; 334 unsigned long index = logical >> PAGE_CACHE_SHIFT;
337 335
338again:
339 spin_lock(&fs_info->reada_lock); 336 spin_lock(&fs_info->reada_lock);
340 re = radix_tree_lookup(&fs_info->reada_tree, index); 337 re = radix_tree_lookup(&fs_info->reada_tree, index);
341 if (re) 338 if (re)
342 kref_get(&re->refcnt); 339 kref_get(&re->refcnt);
343 spin_unlock(&fs_info->reada_lock); 340 spin_unlock(&fs_info->reada_lock);
344 341
345 if (re || looped) 342 if (re)
346 return re; 343 return re;
347 344
348 re = kzalloc(sizeof(*re), GFP_NOFS); 345 re = kzalloc(sizeof(*re), GFP_NOFS);
@@ -398,16 +395,31 @@ again:
398 /* insert extent in reada_tree + all per-device trees, all or nothing */ 395 /* insert extent in reada_tree + all per-device trees, all or nothing */
399 spin_lock(&fs_info->reada_lock); 396 spin_lock(&fs_info->reada_lock);
400 ret = radix_tree_insert(&fs_info->reada_tree, index, re); 397 ret = radix_tree_insert(&fs_info->reada_tree, index, re);
398 if (ret == -EEXIST) {
399 re_exist = radix_tree_lookup(&fs_info->reada_tree, index);
400 BUG_ON(!re_exist);
401 kref_get(&re_exist->refcnt);
402 spin_unlock(&fs_info->reada_lock);
403 goto error;
404 }
401 if (ret) { 405 if (ret) {
402 spin_unlock(&fs_info->reada_lock); 406 spin_unlock(&fs_info->reada_lock);
403 if (ret != -ENOMEM) {
404 /* someone inserted the extent in the meantime */
405 looped = 1;
406 }
407 goto error; 407 goto error;
408 } 408 }
409 prev_dev = NULL;
409 for (i = 0; i < nzones; ++i) { 410 for (i = 0; i < nzones; ++i) {
410 dev = bbio->stripes[i].dev; 411 dev = bbio->stripes[i].dev;
412 if (dev == prev_dev) {
413 /*
414 * in case of DUP, just add the first zone. As both
415 * are on the same device, there's nothing to gain
416 * from adding both.
417 * Also, it wouldn't work, as the tree is per device
418 * and adding would fail with EEXIST
419 */
420 continue;
421 }
422 prev_dev = dev;
411 ret = radix_tree_insert(&dev->reada_extents, index, re); 423 ret = radix_tree_insert(&dev->reada_extents, index, re);
412 if (ret) { 424 if (ret) {
413 while (--i >= 0) { 425 while (--i >= 0) {
@@ -450,9 +462,7 @@ error:
450 } 462 }
451 kfree(bbio); 463 kfree(bbio);
452 kfree(re); 464 kfree(re);
453 if (looped) 465 return re_exist;
454 goto again;
455 return NULL;
456} 466}
457 467
458static void reada_kref_dummy(struct kref *kr) 468static void reada_kref_dummy(struct kref *kr)
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 017281dbb2a7..646ee21bb035 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -1279,7 +1279,9 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
1279 if (rb_node) 1279 if (rb_node)
1280 backref_tree_panic(rb_node, -EEXIST, node->bytenr); 1280 backref_tree_panic(rb_node, -EEXIST, node->bytenr);
1281 } else { 1281 } else {
1282 spin_lock(&root->fs_info->trans_lock);
1282 list_del_init(&root->root_list); 1283 list_del_init(&root->root_list);
1284 spin_unlock(&root->fs_info->trans_lock);
1283 kfree(node); 1285 kfree(node);
1284 } 1286 }
1285 return 0; 1287 return 0;
@@ -3811,7 +3813,7 @@ restart:
3811 3813
3812 ret = btrfs_block_rsv_check(rc->extent_root, rc->block_rsv, 5); 3814 ret = btrfs_block_rsv_check(rc->extent_root, rc->block_rsv, 5);
3813 if (ret < 0) { 3815 if (ret < 0) {
3814 if (ret != -EAGAIN) { 3816 if (ret != -ENOSPC) {
3815 err = ret; 3817 err = ret;
3816 WARN_ON(1); 3818 WARN_ON(1);
3817 break; 3819 break;
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index bc015f77f3ea..2f3d6f917fb3 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -998,6 +998,7 @@ static int scrub_setup_recheck_block(struct scrub_dev *sdev,
998 page = sblock->pagev + page_index; 998 page = sblock->pagev + page_index;
999 page->logical = logical; 999 page->logical = logical;
1000 page->physical = bbio->stripes[mirror_index].physical; 1000 page->physical = bbio->stripes[mirror_index].physical;
1001 /* for missing devices, bdev is NULL */
1001 page->bdev = bbio->stripes[mirror_index].dev->bdev; 1002 page->bdev = bbio->stripes[mirror_index].dev->bdev;
1002 page->mirror_num = mirror_index + 1; 1003 page->mirror_num = mirror_index + 1;
1003 page->page = alloc_page(GFP_NOFS); 1004 page->page = alloc_page(GFP_NOFS);
@@ -1042,6 +1043,12 @@ static int scrub_recheck_block(struct btrfs_fs_info *fs_info,
1042 struct scrub_page *page = sblock->pagev + page_num; 1043 struct scrub_page *page = sblock->pagev + page_num;
1043 DECLARE_COMPLETION_ONSTACK(complete); 1044 DECLARE_COMPLETION_ONSTACK(complete);
1044 1045
1046 if (page->bdev == NULL) {
1047 page->io_error = 1;
1048 sblock->no_io_error_seen = 0;
1049 continue;
1050 }
1051
1045 BUG_ON(!page->page); 1052 BUG_ON(!page->page);
1046 bio = bio_alloc(GFP_NOFS, 1); 1053 bio = bio_alloc(GFP_NOFS, 1);
1047 if (!bio) 1054 if (!bio)
@@ -1257,12 +1264,6 @@ static int scrub_checksum_data(struct scrub_block *sblock)
1257 if (memcmp(csum, on_disk_csum, sdev->csum_size)) 1264 if (memcmp(csum, on_disk_csum, sdev->csum_size))
1258 fail = 1; 1265 fail = 1;
1259 1266
1260 if (fail) {
1261 spin_lock(&sdev->stat_lock);
1262 ++sdev->stat.csum_errors;
1263 spin_unlock(&sdev->stat_lock);
1264 }
1265
1266 return fail; 1267 return fail;
1267} 1268}
1268 1269
@@ -1335,15 +1336,6 @@ static int scrub_checksum_tree_block(struct scrub_block *sblock)
1335 if (memcmp(calculated_csum, on_disk_csum, sdev->csum_size)) 1336 if (memcmp(calculated_csum, on_disk_csum, sdev->csum_size))
1336 ++crc_fail; 1337 ++crc_fail;
1337 1338
1338 if (crc_fail || fail) {
1339 spin_lock(&sdev->stat_lock);
1340 if (crc_fail)
1341 ++sdev->stat.csum_errors;
1342 if (fail)
1343 ++sdev->stat.verify_errors;
1344 spin_unlock(&sdev->stat_lock);
1345 }
1346
1347 return fail || crc_fail; 1339 return fail || crc_fail;
1348} 1340}
1349 1341
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 8d5d380f7bdb..c5f8fca4195f 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -815,7 +815,6 @@ int btrfs_sync_fs(struct super_block *sb, int wait)
815 return 0; 815 return 0;
816 } 816 }
817 817
818 btrfs_start_delalloc_inodes(root, 0);
819 btrfs_wait_ordered_extents(root, 0, 0); 818 btrfs_wait_ordered_extents(root, 0, 0);
820 819
821 trans = btrfs_start_transaction(root, 0); 820 trans = btrfs_start_transaction(root, 0);
@@ -1148,13 +1147,15 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
1148 if (ret) 1147 if (ret)
1149 goto restore; 1148 goto restore;
1150 } else { 1149 } else {
1151 if (fs_info->fs_devices->rw_devices == 0) 1150 if (fs_info->fs_devices->rw_devices == 0) {
1152 ret = -EACCES; 1151 ret = -EACCES;
1153 goto restore; 1152 goto restore;
1153 }
1154 1154
1155 if (btrfs_super_log_root(fs_info->super_copy) != 0) 1155 if (btrfs_super_log_root(fs_info->super_copy) != 0) {
1156 ret = -EINVAL; 1156 ret = -EINVAL;
1157 goto restore; 1157 goto restore;
1158 }
1158 1159
1159 ret = btrfs_cleanup_fs_roots(fs_info); 1160 ret = btrfs_cleanup_fs_roots(fs_info);
1160 if (ret) 1161 if (ret)
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 11b77a59db62..36422254ef67 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -73,8 +73,10 @@ loop:
73 73
74 cur_trans = root->fs_info->running_transaction; 74 cur_trans = root->fs_info->running_transaction;
75 if (cur_trans) { 75 if (cur_trans) {
76 if (cur_trans->aborted) 76 if (cur_trans->aborted) {
77 spin_unlock(&root->fs_info->trans_lock);
77 return cur_trans->aborted; 78 return cur_trans->aborted;
79 }
78 atomic_inc(&cur_trans->use_count); 80 atomic_inc(&cur_trans->use_count);
79 atomic_inc(&cur_trans->num_writers); 81 atomic_inc(&cur_trans->num_writers);
80 cur_trans->num_joined++; 82 cur_trans->num_joined++;
@@ -1400,6 +1402,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1400 ret = commit_fs_roots(trans, root); 1402 ret = commit_fs_roots(trans, root);
1401 if (ret) { 1403 if (ret) {
1402 mutex_unlock(&root->fs_info->tree_log_mutex); 1404 mutex_unlock(&root->fs_info->tree_log_mutex);
1405 mutex_unlock(&root->fs_info->reloc_mutex);
1403 goto cleanup_transaction; 1406 goto cleanup_transaction;
1404 } 1407 }
1405 1408
@@ -1411,6 +1414,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1411 ret = commit_cowonly_roots(trans, root); 1414 ret = commit_cowonly_roots(trans, root);
1412 if (ret) { 1415 if (ret) {
1413 mutex_unlock(&root->fs_info->tree_log_mutex); 1416 mutex_unlock(&root->fs_info->tree_log_mutex);
1417 mutex_unlock(&root->fs_info->reloc_mutex);
1414 goto cleanup_transaction; 1418 goto cleanup_transaction;
1415 } 1419 }
1416 1420
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index d017283ae6f5..eb1ae908582c 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -279,7 +279,7 @@ static int process_one_buffer(struct btrfs_root *log,
279 log->fs_info->extent_root, 279 log->fs_info->extent_root,
280 eb->start, eb->len); 280 eb->start, eb->len);
281 281
282 if (btrfs_buffer_uptodate(eb, gen)) { 282 if (btrfs_buffer_uptodate(eb, gen, 0)) {
283 if (wc->write) 283 if (wc->write)
284 btrfs_write_tree_block(eb); 284 btrfs_write_tree_block(eb);
285 if (wc->wait) 285 if (wc->wait)
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 759d02486d7c..1411b99555a4 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -3324,12 +3324,14 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
3324 stripe_size = devices_info[ndevs-1].max_avail; 3324 stripe_size = devices_info[ndevs-1].max_avail;
3325 num_stripes = ndevs * dev_stripes; 3325 num_stripes = ndevs * dev_stripes;
3326 3326
3327 if (stripe_size * num_stripes > max_chunk_size * ncopies) { 3327 if (stripe_size * ndevs > max_chunk_size * ncopies) {
3328 stripe_size = max_chunk_size * ncopies; 3328 stripe_size = max_chunk_size * ncopies;
3329 do_div(stripe_size, num_stripes); 3329 do_div(stripe_size, ndevs);
3330 } 3330 }
3331 3331
3332 do_div(stripe_size, dev_stripes); 3332 do_div(stripe_size, dev_stripes);
3333
3334 /* align to BTRFS_STRIPE_LEN */
3333 do_div(stripe_size, BTRFS_STRIPE_LEN); 3335 do_div(stripe_size, BTRFS_STRIPE_LEN);
3334 stripe_size *= BTRFS_STRIPE_LEN; 3336 stripe_size *= BTRFS_STRIPE_LEN;
3335 3337
@@ -3805,10 +3807,11 @@ static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
3805 else if (mirror_num) 3807 else if (mirror_num)
3806 stripe_index += mirror_num - 1; 3808 stripe_index += mirror_num - 1;
3807 else { 3809 else {
3810 int old_stripe_index = stripe_index;
3808 stripe_index = find_live_mirror(map, stripe_index, 3811 stripe_index = find_live_mirror(map, stripe_index,
3809 map->sub_stripes, stripe_index + 3812 map->sub_stripes, stripe_index +
3810 current->pid % map->sub_stripes); 3813 current->pid % map->sub_stripes);
3811 mirror_num = stripe_index + 1; 3814 mirror_num = stripe_index - old_stripe_index + 1;
3812 } 3815 }
3813 } else { 3816 } else {
3814 /* 3817 /*
@@ -4350,8 +4353,10 @@ static int open_seed_devices(struct btrfs_root *root, u8 *fsid)
4350 4353
4351 ret = __btrfs_open_devices(fs_devices, FMODE_READ, 4354 ret = __btrfs_open_devices(fs_devices, FMODE_READ,
4352 root->fs_info->bdev_holder); 4355 root->fs_info->bdev_holder);
4353 if (ret) 4356 if (ret) {
4357 free_fs_devices(fs_devices);
4354 goto out; 4358 goto out;
4359 }
4355 4360
4356 if (!fs_devices->seeding) { 4361 if (!fs_devices->seeding) {
4357 __btrfs_close_devices(fs_devices); 4362 __btrfs_close_devices(fs_devices);
diff --git a/fs/buffer.c b/fs/buffer.c
index 36d66653b931..351e18ea2e53 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -985,7 +985,6 @@ grow_dev_page(struct block_device *bdev, sector_t block,
985 return page; 985 return page;
986 986
987failed: 987failed:
988 BUG();
989 unlock_page(page); 988 unlock_page(page);
990 page_cache_release(page); 989 page_cache_release(page);
991 return NULL; 990 return NULL;
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index d34212822444..541ef81f6ae8 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -370,13 +370,13 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
370 (int)(srcaddr->sa_family)); 370 (int)(srcaddr->sa_family));
371 } 371 }
372 372
373 seq_printf(s, ",uid=%d", cifs_sb->mnt_uid); 373 seq_printf(s, ",uid=%u", cifs_sb->mnt_uid);
374 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID) 374 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
375 seq_printf(s, ",forceuid"); 375 seq_printf(s, ",forceuid");
376 else 376 else
377 seq_printf(s, ",noforceuid"); 377 seq_printf(s, ",noforceuid");
378 378
379 seq_printf(s, ",gid=%d", cifs_sb->mnt_gid); 379 seq_printf(s, ",gid=%u", cifs_sb->mnt_gid);
380 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID) 380 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
381 seq_printf(s, ",forcegid"); 381 seq_printf(s, ",forcegid");
382 else 382 else
@@ -434,11 +434,15 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
434 seq_printf(s, ",noperm"); 434 seq_printf(s, ",noperm");
435 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) 435 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
436 seq_printf(s, ",strictcache"); 436 seq_printf(s, ",strictcache");
437 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID)
438 seq_printf(s, ",backupuid=%u", cifs_sb->mnt_backupuid);
439 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID)
440 seq_printf(s, ",backupgid=%u", cifs_sb->mnt_backupgid);
437 441
438 seq_printf(s, ",rsize=%d", cifs_sb->rsize); 442 seq_printf(s, ",rsize=%u", cifs_sb->rsize);
439 seq_printf(s, ",wsize=%d", cifs_sb->wsize); 443 seq_printf(s, ",wsize=%u", cifs_sb->wsize);
440 /* convert actimeo and display it in seconds */ 444 /* convert actimeo and display it in seconds */
441 seq_printf(s, ",actimeo=%lu", cifs_sb->actimeo / HZ); 445 seq_printf(s, ",actimeo=%lu", cifs_sb->actimeo / HZ);
442 446
443 return 0; 447 return 0;
444} 448}
@@ -695,7 +699,7 @@ static loff_t cifs_llseek(struct file *file, loff_t offset, int origin)
695 * origin == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate 699 * origin == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
696 * the cached file length 700 * the cached file length
697 */ 701 */
698 if (origin != SEEK_SET || origin != SEEK_CUR) { 702 if (origin != SEEK_SET && origin != SEEK_CUR) {
699 int rc; 703 int rc;
700 struct inode *inode = file->f_path.dentry->d_inode; 704 struct inode *inode = file->f_path.dentry->d_inode;
701 705
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index d1389bb33ceb..65365358c976 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -125,5 +125,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
125extern const struct export_operations cifs_export_ops; 125extern const struct export_operations cifs_export_ops;
126#endif /* CONFIG_CIFS_NFSD_EXPORT */ 126#endif /* CONFIG_CIFS_NFSD_EXPORT */
127 127
128#define CIFS_VERSION "1.77" 128#define CIFS_VERSION "1.78"
129#endif /* _CIFSFS_H */ 129#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index f52c5ab78f9d..da2f5446fa7a 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -4844,8 +4844,12 @@ parse_DFS_referrals(TRANSACTION2_GET_DFS_REFER_RSP *pSMBr,
4844 max_len = data_end - temp; 4844 max_len = data_end - temp;
4845 node->node_name = cifs_strndup_from_utf16(temp, max_len, 4845 node->node_name = cifs_strndup_from_utf16(temp, max_len,
4846 is_unicode, nls_codepage); 4846 is_unicode, nls_codepage);
4847 if (!node->node_name) 4847 if (!node->node_name) {
4848 rc = -ENOMEM; 4848 rc = -ENOMEM;
4849 goto parse_DFS_referrals_exit;
4850 }
4851
4852 ref++;
4849 } 4853 }
4850 4854
4851parse_DFS_referrals_exit: 4855parse_DFS_referrals_exit:
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index f31dc9ac37b7..5dcc55197fb3 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -215,6 +215,8 @@ static const match_table_t cifs_mount_option_tokens = {
215 215
216 { Opt_ignore, "cred" }, 216 { Opt_ignore, "cred" },
217 { Opt_ignore, "credentials" }, 217 { Opt_ignore, "credentials" },
218 { Opt_ignore, "cred=%s" },
219 { Opt_ignore, "credentials=%s" },
218 { Opt_ignore, "guest" }, 220 { Opt_ignore, "guest" },
219 { Opt_ignore, "rw" }, 221 { Opt_ignore, "rw" },
220 { Opt_ignore, "ro" }, 222 { Opt_ignore, "ro" },
@@ -2183,6 +2185,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
2183 tcp_ses->session_estab = false; 2185 tcp_ses->session_estab = false;
2184 tcp_ses->sequence_number = 0; 2186 tcp_ses->sequence_number = 0;
2185 tcp_ses->lstrp = jiffies; 2187 tcp_ses->lstrp = jiffies;
2188 spin_lock_init(&tcp_ses->req_lock);
2186 INIT_LIST_HEAD(&tcp_ses->tcp_ses_list); 2189 INIT_LIST_HEAD(&tcp_ses->tcp_ses_list);
2187 INIT_LIST_HEAD(&tcp_ses->smb_ses_list); 2190 INIT_LIST_HEAD(&tcp_ses->smb_ses_list);
2188 INIT_DELAYED_WORK(&tcp_ses->echo, cifs_echo_request); 2191 INIT_DELAYED_WORK(&tcp_ses->echo, cifs_echo_request);
@@ -3228,10 +3231,6 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
3228 3231
3229 cifs_sb->mnt_uid = pvolume_info->linux_uid; 3232 cifs_sb->mnt_uid = pvolume_info->linux_uid;
3230 cifs_sb->mnt_gid = pvolume_info->linux_gid; 3233 cifs_sb->mnt_gid = pvolume_info->linux_gid;
3231 if (pvolume_info->backupuid_specified)
3232 cifs_sb->mnt_backupuid = pvolume_info->backupuid;
3233 if (pvolume_info->backupgid_specified)
3234 cifs_sb->mnt_backupgid = pvolume_info->backupgid;
3235 cifs_sb->mnt_file_mode = pvolume_info->file_mode; 3234 cifs_sb->mnt_file_mode = pvolume_info->file_mode;
3236 cifs_sb->mnt_dir_mode = pvolume_info->dir_mode; 3235 cifs_sb->mnt_dir_mode = pvolume_info->dir_mode;
3237 cFYI(1, "file mode: 0x%hx dir mode: 0x%hx", 3236 cFYI(1, "file mode: 0x%hx dir mode: 0x%hx",
@@ -3262,10 +3261,14 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
3262 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_RWPIDFORWARD; 3261 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_RWPIDFORWARD;
3263 if (pvolume_info->cifs_acl) 3262 if (pvolume_info->cifs_acl)
3264 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_ACL; 3263 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_ACL;
3265 if (pvolume_info->backupuid_specified) 3264 if (pvolume_info->backupuid_specified) {
3266 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_BACKUPUID; 3265 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_BACKUPUID;
3267 if (pvolume_info->backupgid_specified) 3266 cifs_sb->mnt_backupuid = pvolume_info->backupuid;
3267 }
3268 if (pvolume_info->backupgid_specified) {
3268 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_BACKUPGID; 3269 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_BACKUPGID;
3270 cifs_sb->mnt_backupgid = pvolume_info->backupgid;
3271 }
3269 if (pvolume_info->override_uid) 3272 if (pvolume_info->override_uid)
3270 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_OVERR_UID; 3273 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_OVERR_UID;
3271 if (pvolume_info->override_gid) 3274 if (pvolume_info->override_gid)
@@ -3614,22 +3617,6 @@ cifs_get_volume_info(char *mount_data, const char *devname)
3614 return volume_info; 3617 return volume_info;
3615} 3618}
3616 3619
3617/* make sure ra_pages is a multiple of rsize */
3618static inline unsigned int
3619cifs_ra_pages(struct cifs_sb_info *cifs_sb)
3620{
3621 unsigned int reads;
3622 unsigned int rsize_pages = cifs_sb->rsize / PAGE_CACHE_SIZE;
3623
3624 if (rsize_pages >= default_backing_dev_info.ra_pages)
3625 return default_backing_dev_info.ra_pages;
3626 else if (rsize_pages == 0)
3627 return rsize_pages;
3628
3629 reads = default_backing_dev_info.ra_pages / rsize_pages;
3630 return reads * rsize_pages;
3631}
3632
3633int 3620int
3634cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info) 3621cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info)
3635{ 3622{
@@ -3717,7 +3704,7 @@ try_mount_again:
3717 cifs_sb->rsize = cifs_negotiate_rsize(tcon, volume_info); 3704 cifs_sb->rsize = cifs_negotiate_rsize(tcon, volume_info);
3718 3705
3719 /* tune readahead according to rsize */ 3706 /* tune readahead according to rsize */
3720 cifs_sb->bdi.ra_pages = cifs_ra_pages(cifs_sb); 3707 cifs_sb->bdi.ra_pages = cifs_sb->rsize / PAGE_CACHE_SIZE;
3721 3708
3722remote_path_check: 3709remote_path_check:
3723#ifdef CONFIG_CIFS_DFS_UPCALL 3710#ifdef CONFIG_CIFS_DFS_UPCALL
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index d172c8ed9017..ec4e9a2a12f8 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -668,12 +668,19 @@ cifs_d_revalidate(struct dentry *direntry, struct nameidata *nd)
668 return 0; 668 return 0;
669 else { 669 else {
670 /* 670 /*
671 * Forcibly invalidate automounting directory inodes 671 * If the inode wasn't known to be a dfs entry when
672 * (remote DFS directories) so to have them 672 * the dentry was instantiated, such as when created
673 * instantiated again for automount 673 * via ->readdir(), it needs to be set now since the
674 * attributes will have been updated by
675 * cifs_revalidate_dentry().
674 */ 676 */
675 if (IS_AUTOMOUNT(direntry->d_inode)) 677 if (IS_AUTOMOUNT(direntry->d_inode) &&
676 return 0; 678 !(direntry->d_flags & DCACHE_NEED_AUTOMOUNT)) {
679 spin_lock(&direntry->d_lock);
680 direntry->d_flags |= DCACHE_NEED_AUTOMOUNT;
681 spin_unlock(&direntry->d_lock);
682 }
683
677 return 1; 684 return 1;
678 } 685 }
679 } 686 }
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index fae765dac934..81725e9286e9 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -2178,7 +2178,7 @@ cifs_iovec_write(struct file *file, const struct iovec *iov,
2178 unsigned long nr_pages, i; 2178 unsigned long nr_pages, i;
2179 size_t copied, len, cur_len; 2179 size_t copied, len, cur_len;
2180 ssize_t total_written = 0; 2180 ssize_t total_written = 0;
2181 loff_t offset = *poffset; 2181 loff_t offset;
2182 struct iov_iter it; 2182 struct iov_iter it;
2183 struct cifsFileInfo *open_file; 2183 struct cifsFileInfo *open_file;
2184 struct cifs_tcon *tcon; 2184 struct cifs_tcon *tcon;
@@ -2200,6 +2200,7 @@ cifs_iovec_write(struct file *file, const struct iovec *iov,
2200 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); 2200 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
2201 open_file = file->private_data; 2201 open_file = file->private_data;
2202 tcon = tlink_tcon(open_file->tlink); 2202 tcon = tlink_tcon(open_file->tlink);
2203 offset = *poffset;
2203 2204
2204 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) 2205 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2205 pid = open_file->pid; 2206 pid = open_file->pid;
diff --git a/fs/dcache.c b/fs/dcache.c
index b60ddc41d783..b80531c91779 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -141,18 +141,29 @@ int proc_nr_dentry(ctl_table *table, int write, void __user *buffer,
141 * Compare 2 name strings, return 0 if they match, otherwise non-zero. 141 * Compare 2 name strings, return 0 if they match, otherwise non-zero.
142 * The strings are both count bytes long, and count is non-zero. 142 * The strings are both count bytes long, and count is non-zero.
143 */ 143 */
144#ifdef CONFIG_DCACHE_WORD_ACCESS
145
146#include <asm/word-at-a-time.h>
147/*
148 * NOTE! 'cs' and 'scount' come from a dentry, so it has a
149 * aligned allocation for this particular component. We don't
150 * strictly need the load_unaligned_zeropad() safety, but it
151 * doesn't hurt either.
152 *
153 * In contrast, 'ct' and 'tcount' can be from a pathname, and do
154 * need the careful unaligned handling.
155 */
144static inline int dentry_cmp(const unsigned char *cs, size_t scount, 156static inline int dentry_cmp(const unsigned char *cs, size_t scount,
145 const unsigned char *ct, size_t tcount) 157 const unsigned char *ct, size_t tcount)
146{ 158{
147#ifdef CONFIG_DCACHE_WORD_ACCESS
148 unsigned long a,b,mask; 159 unsigned long a,b,mask;
149 160
150 if (unlikely(scount != tcount)) 161 if (unlikely(scount != tcount))
151 return 1; 162 return 1;
152 163
153 for (;;) { 164 for (;;) {
154 a = *(unsigned long *)cs; 165 a = load_unaligned_zeropad(cs);
155 b = *(unsigned long *)ct; 166 b = load_unaligned_zeropad(ct);
156 if (tcount < sizeof(unsigned long)) 167 if (tcount < sizeof(unsigned long))
157 break; 168 break;
158 if (unlikely(a != b)) 169 if (unlikely(a != b))
@@ -165,7 +176,13 @@ static inline int dentry_cmp(const unsigned char *cs, size_t scount,
165 } 176 }
166 mask = ~(~0ul << tcount*8); 177 mask = ~(~0ul << tcount*8);
167 return unlikely(!!((a ^ b) & mask)); 178 return unlikely(!!((a ^ b) & mask));
179}
180
168#else 181#else
182
183static inline int dentry_cmp(const unsigned char *cs, size_t scount,
184 const unsigned char *ct, size_t tcount)
185{
169 if (scount != tcount) 186 if (scount != tcount)
170 return 1; 187 return 1;
171 188
@@ -177,9 +194,10 @@ static inline int dentry_cmp(const unsigned char *cs, size_t scount,
177 tcount--; 194 tcount--;
178 } while (tcount); 195 } while (tcount);
179 return 0; 196 return 0;
180#endif
181} 197}
182 198
199#endif
200
183static void __d_free(struct rcu_head *head) 201static void __d_free(struct rcu_head *head)
184{ 202{
185 struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu); 203 struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index fa5c07d51dcc..4c58d4a3adc4 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -1737,6 +1737,18 @@ static int _can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now)
1737 return 1; 1737 return 1;
1738 1738
1739 /* 1739 /*
1740 * Even if the convert is compat with all granted locks,
1741 * QUECVT forces it behind other locks on the convert queue.
1742 */
1743
1744 if (now && conv && (lkb->lkb_exflags & DLM_LKF_QUECVT)) {
1745 if (list_empty(&r->res_convertqueue))
1746 return 1;
1747 else
1748 goto out;
1749 }
1750
1751 /*
1740 * The NOORDER flag is set to avoid the standard vms rules on grant 1752 * The NOORDER flag is set to avoid the standard vms rules on grant
1741 * order. 1753 * order.
1742 */ 1754 */
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 739b0985b398..c0b3c70ee87a 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -1663,8 +1663,10 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
1663 if (op == EPOLL_CTL_ADD) { 1663 if (op == EPOLL_CTL_ADD) {
1664 if (is_file_epoll(tfile)) { 1664 if (is_file_epoll(tfile)) {
1665 error = -ELOOP; 1665 error = -ELOOP;
1666 if (ep_loop_check(ep, tfile) != 0) 1666 if (ep_loop_check(ep, tfile) != 0) {
1667 clear_tfile_check_list();
1667 goto error_tgt_fput; 1668 goto error_tgt_fput;
1669 }
1668 } else 1670 } else
1669 list_add(&tfile->f_tfile_llink, &tfile_check_list); 1671 list_add(&tfile->f_tfile_llink, &tfile_check_list);
1670 } 1672 }
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 6da193564e43..e1fb1d5de58e 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1597,7 +1597,9 @@ static int parse_options(char *options, struct super_block *sb,
1597 unsigned int *journal_ioprio, 1597 unsigned int *journal_ioprio,
1598 int is_remount) 1598 int is_remount)
1599{ 1599{
1600#ifdef CONFIG_QUOTA
1600 struct ext4_sb_info *sbi = EXT4_SB(sb); 1601 struct ext4_sb_info *sbi = EXT4_SB(sb);
1602#endif
1601 char *p; 1603 char *p;
1602 substring_t args[MAX_OPT_ARGS]; 1604 substring_t args[MAX_OPT_ARGS];
1603 int token; 1605 int token;
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
index f8411bd1b805..5f5e70e047dc 100644
--- a/fs/gfs2/lock_dlm.c
+++ b/fs/gfs2/lock_dlm.c
@@ -200,10 +200,11 @@ static int make_mode(const unsigned int lmstate)
200 return -1; 200 return -1;
201} 201}
202 202
203static u32 make_flags(const u32 lkid, const unsigned int gfs_flags, 203static u32 make_flags(struct gfs2_glock *gl, const unsigned int gfs_flags,
204 const int req) 204 const int req)
205{ 205{
206 u32 lkf = DLM_LKF_VALBLK; 206 u32 lkf = DLM_LKF_VALBLK;
207 u32 lkid = gl->gl_lksb.sb_lkid;
207 208
208 if (gfs_flags & LM_FLAG_TRY) 209 if (gfs_flags & LM_FLAG_TRY)
209 lkf |= DLM_LKF_NOQUEUE; 210 lkf |= DLM_LKF_NOQUEUE;
@@ -227,8 +228,11 @@ static u32 make_flags(const u32 lkid, const unsigned int gfs_flags,
227 BUG(); 228 BUG();
228 } 229 }
229 230
230 if (lkid != 0) 231 if (lkid != 0) {
231 lkf |= DLM_LKF_CONVERT; 232 lkf |= DLM_LKF_CONVERT;
233 if (test_bit(GLF_BLOCKING, &gl->gl_flags))
234 lkf |= DLM_LKF_QUECVT;
235 }
232 236
233 return lkf; 237 return lkf;
234} 238}
@@ -250,7 +254,7 @@ static int gdlm_lock(struct gfs2_glock *gl, unsigned int req_state,
250 char strname[GDLM_STRNAME_BYTES] = ""; 254 char strname[GDLM_STRNAME_BYTES] = "";
251 255
252 req = make_mode(req_state); 256 req = make_mode(req_state);
253 lkf = make_flags(gl->gl_lksb.sb_lkid, flags, req); 257 lkf = make_flags(gl, flags, req);
254 gfs2_glstats_inc(gl, GFS2_LKS_DCOUNT); 258 gfs2_glstats_inc(gl, GFS2_LKS_DCOUNT);
255 gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT); 259 gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT);
256 if (gl->gl_lksb.sb_lkid) { 260 if (gl->gl_lksb.sb_lkid) {
diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c
index 4dfbfec357e8..ec2a9c23f0c9 100644
--- a/fs/hfsplus/catalog.c
+++ b/fs/hfsplus/catalog.c
@@ -366,6 +366,10 @@ int hfsplus_rename_cat(u32 cnid,
366 err = hfs_brec_find(&src_fd); 366 err = hfs_brec_find(&src_fd);
367 if (err) 367 if (err)
368 goto out; 368 goto out;
369 if (src_fd.entrylength > sizeof(entry) || src_fd.entrylength < 0) {
370 err = -EIO;
371 goto out;
372 }
369 373
370 hfs_bnode_read(src_fd.bnode, &entry, src_fd.entryoffset, 374 hfs_bnode_read(src_fd.bnode, &entry, src_fd.entryoffset,
371 src_fd.entrylength); 375 src_fd.entrylength);
diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
index 88e155f895c6..26b53fb09f68 100644
--- a/fs/hfsplus/dir.c
+++ b/fs/hfsplus/dir.c
@@ -150,6 +150,11 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
150 filp->f_pos++; 150 filp->f_pos++;
151 /* fall through */ 151 /* fall through */
152 case 1: 152 case 1:
153 if (fd.entrylength > sizeof(entry) || fd.entrylength < 0) {
154 err = -EIO;
155 goto out;
156 }
157
153 hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, 158 hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
154 fd.entrylength); 159 fd.entrylength);
155 if (be16_to_cpu(entry.type) != HFSPLUS_FOLDER_THREAD) { 160 if (be16_to_cpu(entry.type) != HFSPLUS_FOLDER_THREAD) {
@@ -181,6 +186,12 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
181 err = -EIO; 186 err = -EIO;
182 goto out; 187 goto out;
183 } 188 }
189
190 if (fd.entrylength > sizeof(entry) || fd.entrylength < 0) {
191 err = -EIO;
192 goto out;
193 }
194
184 hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, 195 hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
185 fd.entrylength); 196 fd.entrylength);
186 type = be16_to_cpu(entry.type); 197 type = be16_to_cpu(entry.type);
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 28cf06e4ec84..001ef01d2fe2 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -485,6 +485,7 @@ static struct inode *hugetlbfs_get_root(struct super_block *sb,
485 inode->i_fop = &simple_dir_operations; 485 inode->i_fop = &simple_dir_operations;
486 /* directory inodes start off with i_nlink == 2 (for "." entry) */ 486 /* directory inodes start off with i_nlink == 2 (for "." entry) */
487 inc_nlink(inode); 487 inc_nlink(inode);
488 lockdep_annotate_inode_mutex_key(inode);
488 } 489 }
489 return inode; 490 return inode;
490} 491}
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 806525a7269c..840f70f50792 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -723,7 +723,7 @@ start_journal_io:
723 if (commit_transaction->t_need_data_flush && 723 if (commit_transaction->t_need_data_flush &&
724 (journal->j_fs_dev != journal->j_dev) && 724 (journal->j_fs_dev != journal->j_dev) &&
725 (journal->j_flags & JBD2_BARRIER)) 725 (journal->j_flags & JBD2_BARRIER))
726 blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL); 726 blkdev_issue_flush(journal->j_fs_dev, GFP_NOFS, NULL);
727 727
728 /* Done it all: now write the commit record asynchronously. */ 728 /* Done it all: now write the commit record asynchronously. */
729 if (JBD2_HAS_INCOMPAT_FEATURE(journal, 729 if (JBD2_HAS_INCOMPAT_FEATURE(journal,
@@ -859,7 +859,7 @@ wait_for_iobuf:
859 if (JBD2_HAS_INCOMPAT_FEATURE(journal, 859 if (JBD2_HAS_INCOMPAT_FEATURE(journal,
860 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT) && 860 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT) &&
861 journal->j_flags & JBD2_BARRIER) { 861 journal->j_flags & JBD2_BARRIER) {
862 blkdev_issue_flush(journal->j_dev, GFP_KERNEL, NULL); 862 blkdev_issue_flush(journal->j_dev, GFP_NOFS, NULL);
863 } 863 }
864 864
865 if (err) 865 if (err)
diff --git a/fs/namei.c b/fs/namei.c
index 0062dd17eb55..c42791914f82 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1429,7 +1429,7 @@ unsigned int full_name_hash(const unsigned char *name, unsigned int len)
1429 unsigned long hash = 0; 1429 unsigned long hash = 0;
1430 1430
1431 for (;;) { 1431 for (;;) {
1432 a = *(unsigned long *)name; 1432 a = load_unaligned_zeropad(name);
1433 if (len < sizeof(unsigned long)) 1433 if (len < sizeof(unsigned long))
1434 break; 1434 break;
1435 hash += a; 1435 hash += a;
@@ -1459,7 +1459,7 @@ static inline unsigned long hash_name(const char *name, unsigned int *hashp)
1459 do { 1459 do {
1460 hash = (hash + a) * 9; 1460 hash = (hash + a) * 9;
1461 len += sizeof(unsigned long); 1461 len += sizeof(unsigned long);
1462 a = *(unsigned long *)(name+len); 1462 a = load_unaligned_zeropad(name+len);
1463 /* Do we have any NUL or '/' bytes in this word? */ 1463 /* Do we have any NUL or '/' bytes in this word? */
1464 mask = has_zero(a) | has_zero(a ^ REPEAT_BYTE('/')); 1464 mask = has_zero(a) | has_zero(a ^ REPEAT_BYTE('/'));
1465 } while (!mask); 1465 } while (!mask);
diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
index 9c94297bb70e..7f6a23f0244e 100644
--- a/fs/nfs/blocklayout/blocklayout.c
+++ b/fs/nfs/blocklayout/blocklayout.c
@@ -38,6 +38,8 @@
38#include <linux/buffer_head.h> /* various write calls */ 38#include <linux/buffer_head.h> /* various write calls */
39#include <linux/prefetch.h> 39#include <linux/prefetch.h>
40 40
41#include "../pnfs.h"
42#include "../internal.h"
41#include "blocklayout.h" 43#include "blocklayout.h"
42 44
43#define NFSDBG_FACILITY NFSDBG_PNFS_LD 45#define NFSDBG_FACILITY NFSDBG_PNFS_LD
@@ -868,7 +870,7 @@ nfs4_blk_get_deviceinfo(struct nfs_server *server, const struct nfs_fh *fh,
868 * GETDEVICEINFO's maxcount 870 * GETDEVICEINFO's maxcount
869 */ 871 */
870 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz; 872 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
871 max_pages = max_resp_sz >> PAGE_SHIFT; 873 max_pages = nfs_page_array_len(0, max_resp_sz);
872 dprintk("%s max_resp_sz %u max_pages %d\n", 874 dprintk("%s max_resp_sz %u max_pages %d\n",
873 __func__, max_resp_sz, max_pages); 875 __func__, max_resp_sz, max_pages);
874 876
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index da7b5e4ff9ec..60f7e4ec842c 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -1729,7 +1729,8 @@ error:
1729 */ 1729 */
1730struct nfs_server *nfs_clone_server(struct nfs_server *source, 1730struct nfs_server *nfs_clone_server(struct nfs_server *source,
1731 struct nfs_fh *fh, 1731 struct nfs_fh *fh,
1732 struct nfs_fattr *fattr) 1732 struct nfs_fattr *fattr,
1733 rpc_authflavor_t flavor)
1733{ 1734{
1734 struct nfs_server *server; 1735 struct nfs_server *server;
1735 struct nfs_fattr *fattr_fsinfo; 1736 struct nfs_fattr *fattr_fsinfo;
@@ -1758,7 +1759,7 @@ struct nfs_server *nfs_clone_server(struct nfs_server *source,
1758 1759
1759 error = nfs_init_server_rpcclient(server, 1760 error = nfs_init_server_rpcclient(server,
1760 source->client->cl_timeout, 1761 source->client->cl_timeout,
1761 source->client->cl_auth->au_flavor); 1762 flavor);
1762 if (error < 0) 1763 if (error < 0)
1763 goto out_free_server; 1764 goto out_free_server;
1764 if (!IS_ERR(source->client_acl)) 1765 if (!IS_ERR(source->client_acl))
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 4aaf0316d76a..8789210c6905 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1429,7 +1429,7 @@ static struct dentry *nfs_atomic_lookup(struct inode *dir, struct dentry *dentry
1429 } 1429 }
1430 1430
1431 open_flags = nd->intent.open.flags; 1431 open_flags = nd->intent.open.flags;
1432 attr.ia_valid = 0; 1432 attr.ia_valid = ATTR_OPEN;
1433 1433
1434 ctx = create_nfs_open_context(dentry, open_flags); 1434 ctx = create_nfs_open_context(dentry, open_flags);
1435 res = ERR_CAST(ctx); 1435 res = ERR_CAST(ctx);
@@ -1536,7 +1536,7 @@ static int nfs_open_revalidate(struct dentry *dentry, struct nameidata *nd)
1536 if (IS_ERR(ctx)) 1536 if (IS_ERR(ctx))
1537 goto out; 1537 goto out;
1538 1538
1539 attr.ia_valid = 0; 1539 attr.ia_valid = ATTR_OPEN;
1540 if (openflags & O_TRUNC) { 1540 if (openflags & O_TRUNC) {
1541 attr.ia_valid |= ATTR_SIZE; 1541 attr.ia_valid |= ATTR_SIZE;
1542 attr.ia_size = 0; 1542 attr.ia_size = 0;
diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c
index b7f348bb618b..ba3019f5934c 100644
--- a/fs/nfs/idmap.c
+++ b/fs/nfs/idmap.c
@@ -554,12 +554,16 @@ static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event,
554 struct nfs_client *clp; 554 struct nfs_client *clp;
555 int error = 0; 555 int error = 0;
556 556
557 if (!try_module_get(THIS_MODULE))
558 return 0;
559
557 while ((clp = nfs_get_client_for_event(sb->s_fs_info, event))) { 560 while ((clp = nfs_get_client_for_event(sb->s_fs_info, event))) {
558 error = __rpc_pipefs_event(clp, event, sb); 561 error = __rpc_pipefs_event(clp, event, sb);
559 nfs_put_client(clp); 562 nfs_put_client(clp);
560 if (error) 563 if (error)
561 break; 564 break;
562 } 565 }
566 module_put(THIS_MODULE);
563 return error; 567 return error;
564} 568}
565 569
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 2476dc69365f..b777bdaba4c5 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -165,7 +165,8 @@ extern struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *,
165extern void nfs_free_server(struct nfs_server *server); 165extern void nfs_free_server(struct nfs_server *server);
166extern struct nfs_server *nfs_clone_server(struct nfs_server *, 166extern struct nfs_server *nfs_clone_server(struct nfs_server *,
167 struct nfs_fh *, 167 struct nfs_fh *,
168 struct nfs_fattr *); 168 struct nfs_fattr *,
169 rpc_authflavor_t);
169extern void nfs_mark_client_ready(struct nfs_client *clp, int state); 170extern void nfs_mark_client_ready(struct nfs_client *clp, int state);
170extern int nfs4_check_client_ready(struct nfs_client *clp); 171extern int nfs4_check_client_ready(struct nfs_client *clp);
171extern struct nfs_client *nfs4_set_ds_client(struct nfs_client* mds_clp, 172extern struct nfs_client *nfs4_set_ds_client(struct nfs_client* mds_clp,
@@ -186,10 +187,10 @@ static inline void nfs_fs_proc_exit(void)
186 187
187/* nfs4namespace.c */ 188/* nfs4namespace.c */
188#ifdef CONFIG_NFS_V4 189#ifdef CONFIG_NFS_V4
189extern struct vfsmount *nfs_do_refmount(struct dentry *dentry); 190extern struct vfsmount *nfs_do_refmount(struct rpc_clnt *client, struct dentry *dentry);
190#else 191#else
191static inline 192static inline
192struct vfsmount *nfs_do_refmount(struct dentry *dentry) 193struct vfsmount *nfs_do_refmount(struct rpc_clnt *client, struct dentry *dentry)
193{ 194{
194 return ERR_PTR(-ENOENT); 195 return ERR_PTR(-ENOENT);
195} 196}
@@ -234,7 +235,6 @@ extern const u32 nfs41_maxwrite_overhead;
234/* nfs4proc.c */ 235/* nfs4proc.c */
235#ifdef CONFIG_NFS_V4 236#ifdef CONFIG_NFS_V4
236extern struct rpc_procinfo nfs4_procedures[]; 237extern struct rpc_procinfo nfs4_procedures[];
237void nfs_fixup_secinfo_attributes(struct nfs_fattr *, struct nfs_fh *);
238#endif 238#endif
239 239
240extern int nfs4_init_ds_session(struct nfs_client *clp); 240extern int nfs4_init_ds_session(struct nfs_client *clp);
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
index 1807866bb3ab..d51868e5683c 100644
--- a/fs/nfs/namespace.c
+++ b/fs/nfs/namespace.c
@@ -148,66 +148,31 @@ rpc_authflavor_t nfs_find_best_sec(struct nfs4_secinfo_flavors *flavors)
148 return pseudoflavor; 148 return pseudoflavor;
149} 149}
150 150
151static int nfs_negotiate_security(const struct dentry *parent, 151static struct rpc_clnt *nfs_lookup_mountpoint(struct inode *dir,
152 const struct dentry *dentry, 152 struct qstr *name,
153 rpc_authflavor_t *flavor) 153 struct nfs_fh *fh,
154 struct nfs_fattr *fattr)
154{ 155{
155 struct page *page;
156 struct nfs4_secinfo_flavors *flavors;
157 int (*secinfo)(struct inode *, const struct qstr *, struct nfs4_secinfo_flavors *);
158 int ret = -EPERM;
159
160 secinfo = NFS_PROTO(parent->d_inode)->secinfo;
161 if (secinfo != NULL) {
162 page = alloc_page(GFP_KERNEL);
163 if (!page) {
164 ret = -ENOMEM;
165 goto out;
166 }
167 flavors = page_address(page);
168 ret = secinfo(parent->d_inode, &dentry->d_name, flavors);
169 *flavor = nfs_find_best_sec(flavors);
170 put_page(page);
171 }
172
173out:
174 return ret;
175}
176
177static int nfs_lookup_with_sec(struct nfs_server *server, struct dentry *parent,
178 struct dentry *dentry, struct path *path,
179 struct nfs_fh *fh, struct nfs_fattr *fattr,
180 rpc_authflavor_t *flavor)
181{
182 struct rpc_clnt *clone;
183 struct rpc_auth *auth;
184 int err; 156 int err;
185 157
186 err = nfs_negotiate_security(parent, path->dentry, flavor); 158 if (NFS_PROTO(dir)->version == 4)
187 if (err < 0) 159 return nfs4_proc_lookup_mountpoint(dir, name, fh, fattr);
188 goto out; 160
189 clone = rpc_clone_client(server->client); 161 err = NFS_PROTO(dir)->lookup(NFS_SERVER(dir)->client, dir, name, fh, fattr);
190 auth = rpcauth_create(*flavor, clone); 162 if (err)
191 if (!auth) { 163 return ERR_PTR(err);
192 err = -EIO; 164 return rpc_clone_client(NFS_SERVER(dir)->client);
193 goto out_shutdown;
194 }
195 err = server->nfs_client->rpc_ops->lookup(clone, parent->d_inode,
196 &path->dentry->d_name,
197 fh, fattr);
198out_shutdown:
199 rpc_shutdown_client(clone);
200out:
201 return err;
202} 165}
203#else /* CONFIG_NFS_V4 */ 166#else /* CONFIG_NFS_V4 */
204static inline int nfs_lookup_with_sec(struct nfs_server *server, 167static inline struct rpc_clnt *nfs_lookup_mountpoint(struct inode *dir,
205 struct dentry *parent, struct dentry *dentry, 168 struct qstr *name,
206 struct path *path, struct nfs_fh *fh, 169 struct nfs_fh *fh,
207 struct nfs_fattr *fattr, 170 struct nfs_fattr *fattr)
208 rpc_authflavor_t *flavor)
209{ 171{
210 return -EPERM; 172 int err = NFS_PROTO(dir)->lookup(NFS_SERVER(dir)->client, dir, name, fh, fattr);
173 if (err)
174 return ERR_PTR(err);
175 return rpc_clone_client(NFS_SERVER(dir)->client);
211} 176}
212#endif /* CONFIG_NFS_V4 */ 177#endif /* CONFIG_NFS_V4 */
213 178
@@ -226,12 +191,10 @@ static inline int nfs_lookup_with_sec(struct nfs_server *server,
226struct vfsmount *nfs_d_automount(struct path *path) 191struct vfsmount *nfs_d_automount(struct path *path)
227{ 192{
228 struct vfsmount *mnt; 193 struct vfsmount *mnt;
229 struct nfs_server *server = NFS_SERVER(path->dentry->d_inode);
230 struct dentry *parent; 194 struct dentry *parent;
231 struct nfs_fh *fh = NULL; 195 struct nfs_fh *fh = NULL;
232 struct nfs_fattr *fattr = NULL; 196 struct nfs_fattr *fattr = NULL;
233 int err; 197 struct rpc_clnt *client;
234 rpc_authflavor_t flavor = RPC_AUTH_UNIX;
235 198
236 dprintk("--> nfs_d_automount()\n"); 199 dprintk("--> nfs_d_automount()\n");
237 200
@@ -249,21 +212,19 @@ struct vfsmount *nfs_d_automount(struct path *path)
249 212
250 /* Look it up again to get its attributes */ 213 /* Look it up again to get its attributes */
251 parent = dget_parent(path->dentry); 214 parent = dget_parent(path->dentry);
252 err = server->nfs_client->rpc_ops->lookup(server->client, parent->d_inode, 215 client = nfs_lookup_mountpoint(parent->d_inode, &path->dentry->d_name, fh, fattr);
253 &path->dentry->d_name,
254 fh, fattr);
255 if (err == -EPERM && NFS_PROTO(parent->d_inode)->secinfo != NULL)
256 err = nfs_lookup_with_sec(server, parent, path->dentry, path, fh, fattr, &flavor);
257 dput(parent); 216 dput(parent);
258 if (err != 0) { 217 if (IS_ERR(client)) {
259 mnt = ERR_PTR(err); 218 mnt = ERR_CAST(client);
260 goto out; 219 goto out;
261 } 220 }
262 221
263 if (fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL) 222 if (fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL)
264 mnt = nfs_do_refmount(path->dentry); 223 mnt = nfs_do_refmount(client, path->dentry);
265 else 224 else
266 mnt = nfs_do_submount(path->dentry, fh, fattr, flavor); 225 mnt = nfs_do_submount(path->dentry, fh, fattr, client->cl_auth->au_flavor);
226 rpc_shutdown_client(client);
227
267 if (IS_ERR(mnt)) 228 if (IS_ERR(mnt))
268 goto out; 229 goto out;
269 230
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 97ecc863dd76..8d75021020b3 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -59,6 +59,7 @@ struct nfs_unique_id {
59 59
60#define NFS_SEQID_CONFIRMED 1 60#define NFS_SEQID_CONFIRMED 1
61struct nfs_seqid_counter { 61struct nfs_seqid_counter {
62 ktime_t create_time;
62 int owner_id; 63 int owner_id;
63 int flags; 64 int flags;
64 u32 counter; 65 u32 counter;
@@ -204,6 +205,9 @@ struct nfs4_state_maintenance_ops {
204extern const struct dentry_operations nfs4_dentry_operations; 205extern const struct dentry_operations nfs4_dentry_operations;
205extern const struct inode_operations nfs4_dir_inode_operations; 206extern const struct inode_operations nfs4_dir_inode_operations;
206 207
208/* nfs4namespace.c */
209struct rpc_clnt *nfs4_create_sec_client(struct rpc_clnt *, struct inode *, struct qstr *);
210
207/* nfs4proc.c */ 211/* nfs4proc.c */
208extern int nfs4_proc_setclientid(struct nfs_client *, u32, unsigned short, struct rpc_cred *, struct nfs4_setclientid_res *); 212extern int nfs4_proc_setclientid(struct nfs_client *, u32, unsigned short, struct rpc_cred *, struct nfs4_setclientid_res *);
209extern int nfs4_proc_setclientid_confirm(struct nfs_client *, struct nfs4_setclientid_res *arg, struct rpc_cred *); 213extern int nfs4_proc_setclientid_confirm(struct nfs_client *, struct nfs4_setclientid_res *arg, struct rpc_cred *);
@@ -212,8 +216,11 @@ extern int nfs4_init_clientid(struct nfs_client *, struct rpc_cred *);
212extern int nfs41_init_clientid(struct nfs_client *, struct rpc_cred *); 216extern int nfs41_init_clientid(struct nfs_client *, struct rpc_cred *);
213extern int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc); 217extern int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc);
214extern int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle); 218extern int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle);
215extern int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name, 219extern int nfs4_proc_fs_locations(struct rpc_clnt *, struct inode *, const struct qstr *,
216 struct nfs4_fs_locations *fs_locations, struct page *page); 220 struct nfs4_fs_locations *, struct page *);
221extern struct rpc_clnt *nfs4_proc_lookup_mountpoint(struct inode *, struct qstr *,
222 struct nfs_fh *, struct nfs_fattr *);
223extern int nfs4_proc_secinfo(struct inode *, const struct qstr *, struct nfs4_secinfo_flavors *);
217extern int nfs4_release_lockowner(struct nfs4_lock_state *); 224extern int nfs4_release_lockowner(struct nfs4_lock_state *);
218extern const struct xattr_handler *nfs4_xattr_handlers[]; 225extern const struct xattr_handler *nfs4_xattr_handlers[];
219 226
diff --git a/fs/nfs/nfs4filelayoutdev.c b/fs/nfs/nfs4filelayoutdev.c
index a866bbd2890a..c9cff9adb2d3 100644
--- a/fs/nfs/nfs4filelayoutdev.c
+++ b/fs/nfs/nfs4filelayoutdev.c
@@ -699,7 +699,7 @@ get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id, gfp_t gfp_fla
699 * GETDEVICEINFO's maxcount 699 * GETDEVICEINFO's maxcount
700 */ 700 */
701 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz; 701 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
702 max_pages = max_resp_sz >> PAGE_SHIFT; 702 max_pages = nfs_page_array_len(0, max_resp_sz);
703 dprintk("%s inode %p max_resp_sz %u max_pages %d\n", 703 dprintk("%s inode %p max_resp_sz %u max_pages %d\n",
704 __func__, inode, max_resp_sz, max_pages); 704 __func__, inode, max_resp_sz, max_pages);
705 705
diff --git a/fs/nfs/nfs4namespace.c b/fs/nfs/nfs4namespace.c
index 9c8eca315f43..a7f3dedc4ec7 100644
--- a/fs/nfs/nfs4namespace.c
+++ b/fs/nfs/nfs4namespace.c
@@ -52,6 +52,30 @@ Elong:
52} 52}
53 53
54/* 54/*
55 * return the path component of "<server>:<path>"
56 * nfspath - the "<server>:<path>" string
57 * end - one past the last char that could contain "<server>:"
58 * returns NULL on failure
59 */
60static char *nfs_path_component(const char *nfspath, const char *end)
61{
62 char *p;
63
64 if (*nfspath == '[') {
65 /* parse [] escaped IPv6 addrs */
66 p = strchr(nfspath, ']');
67 if (p != NULL && ++p < end && *p == ':')
68 return p + 1;
69 } else {
70 /* otherwise split on first colon */
71 p = strchr(nfspath, ':');
72 if (p != NULL && p < end)
73 return p + 1;
74 }
75 return NULL;
76}
77
78/*
55 * Determine the mount path as a string 79 * Determine the mount path as a string
56 */ 80 */
57static char *nfs4_path(struct dentry *dentry, char *buffer, ssize_t buflen) 81static char *nfs4_path(struct dentry *dentry, char *buffer, ssize_t buflen)
@@ -59,9 +83,9 @@ static char *nfs4_path(struct dentry *dentry, char *buffer, ssize_t buflen)
59 char *limit; 83 char *limit;
60 char *path = nfs_path(&limit, dentry, buffer, buflen); 84 char *path = nfs_path(&limit, dentry, buffer, buflen);
61 if (!IS_ERR(path)) { 85 if (!IS_ERR(path)) {
62 char *colon = strchr(path, ':'); 86 char *path_component = nfs_path_component(path, limit);
63 if (colon && colon < limit) 87 if (path_component)
64 path = colon + 1; 88 return path_component;
65 } 89 }
66 return path; 90 return path;
67} 91}
@@ -108,6 +132,58 @@ static size_t nfs_parse_server_name(char *string, size_t len,
108 return ret; 132 return ret;
109} 133}
110 134
135static rpc_authflavor_t nfs4_negotiate_security(struct inode *inode, struct qstr *name)
136{
137 struct page *page;
138 struct nfs4_secinfo_flavors *flavors;
139 rpc_authflavor_t flavor;
140 int err;
141
142 page = alloc_page(GFP_KERNEL);
143 if (!page)
144 return -ENOMEM;
145 flavors = page_address(page);
146
147 err = nfs4_proc_secinfo(inode, name, flavors);
148 if (err < 0) {
149 flavor = err;
150 goto out;
151 }
152
153 flavor = nfs_find_best_sec(flavors);
154
155out:
156 put_page(page);
157 return flavor;
158}
159
160/*
161 * Please call rpc_shutdown_client() when you are done with this client.
162 */
163struct rpc_clnt *nfs4_create_sec_client(struct rpc_clnt *clnt, struct inode *inode,
164 struct qstr *name)
165{
166 struct rpc_clnt *clone;
167 struct rpc_auth *auth;
168 rpc_authflavor_t flavor;
169
170 flavor = nfs4_negotiate_security(inode, name);
171 if (flavor < 0)
172 return ERR_PTR(flavor);
173
174 clone = rpc_clone_client(clnt);
175 if (IS_ERR(clone))
176 return clone;
177
178 auth = rpcauth_create(flavor, clone);
179 if (!auth) {
180 rpc_shutdown_client(clone);
181 clone = ERR_PTR(-EIO);
182 }
183
184 return clone;
185}
186
111static struct vfsmount *try_location(struct nfs_clone_mount *mountdata, 187static struct vfsmount *try_location(struct nfs_clone_mount *mountdata,
112 char *page, char *page2, 188 char *page, char *page2,
113 const struct nfs4_fs_location *location) 189 const struct nfs4_fs_location *location)
@@ -224,7 +300,7 @@ out:
224 * @dentry - dentry of referral 300 * @dentry - dentry of referral
225 * 301 *
226 */ 302 */
227struct vfsmount *nfs_do_refmount(struct dentry *dentry) 303struct vfsmount *nfs_do_refmount(struct rpc_clnt *client, struct dentry *dentry)
228{ 304{
229 struct vfsmount *mnt = ERR_PTR(-ENOMEM); 305 struct vfsmount *mnt = ERR_PTR(-ENOMEM);
230 struct dentry *parent; 306 struct dentry *parent;
@@ -250,7 +326,7 @@ struct vfsmount *nfs_do_refmount(struct dentry *dentry)
250 dprintk("%s: getting locations for %s/%s\n", 326 dprintk("%s: getting locations for %s/%s\n",
251 __func__, parent->d_name.name, dentry->d_name.name); 327 __func__, parent->d_name.name, dentry->d_name.name);
252 328
253 err = nfs4_proc_fs_locations(parent->d_inode, &dentry->d_name, fs_locations, page); 329 err = nfs4_proc_fs_locations(client, parent->d_inode, &dentry->d_name, fs_locations, page);
254 dput(parent); 330 dput(parent);
255 if (err != 0 || 331 if (err != 0 ||
256 fs_locations->nlocations <= 0 || 332 fs_locations->nlocations <= 0 ||
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index f82bde005a82..99650aaf8937 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -838,7 +838,8 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
838 p->o_arg.open_flags = flags; 838 p->o_arg.open_flags = flags;
839 p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE); 839 p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE);
840 p->o_arg.clientid = server->nfs_client->cl_clientid; 840 p->o_arg.clientid = server->nfs_client->cl_clientid;
841 p->o_arg.id = sp->so_seqid.owner_id; 841 p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time);
842 p->o_arg.id.uniquifier = sp->so_seqid.owner_id;
842 p->o_arg.name = &dentry->d_name; 843 p->o_arg.name = &dentry->d_name;
843 p->o_arg.server = server; 844 p->o_arg.server = server;
844 p->o_arg.bitmask = server->attr_bitmask; 845 p->o_arg.bitmask = server->attr_bitmask;
@@ -1466,8 +1467,7 @@ static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
1466 goto unlock_no_action; 1467 goto unlock_no_action;
1467 rcu_read_unlock(); 1468 rcu_read_unlock();
1468 } 1469 }
1469 /* Update sequence id. */ 1470 /* Update client id. */
1470 data->o_arg.id = sp->so_seqid.owner_id;
1471 data->o_arg.clientid = sp->so_server->nfs_client->cl_clientid; 1471 data->o_arg.clientid = sp->so_server->nfs_client->cl_clientid;
1472 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) { 1472 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) {
1473 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR]; 1473 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR];
@@ -1954,10 +1954,19 @@ static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
1954 }; 1954 };
1955 int err; 1955 int err;
1956 do { 1956 do {
1957 err = nfs4_handle_exception(server, 1957 err = _nfs4_do_setattr(inode, cred, fattr, sattr, state);
1958 _nfs4_do_setattr(inode, cred, fattr, sattr, state), 1958 switch (err) {
1959 &exception); 1959 case -NFS4ERR_OPENMODE:
1960 if (state && !(state->state & FMODE_WRITE)) {
1961 err = -EBADF;
1962 if (sattr->ia_valid & ATTR_OPEN)
1963 err = -EACCES;
1964 goto out;
1965 }
1966 }
1967 err = nfs4_handle_exception(server, err, &exception);
1960 } while (exception.retry); 1968 } while (exception.retry);
1969out:
1961 return err; 1970 return err;
1962} 1971}
1963 1972
@@ -2368,8 +2377,9 @@ static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle,
2368 * Note that we'll actually follow the referral later when 2377 * Note that we'll actually follow the referral later when
2369 * we detect fsid mismatch in inode revalidation 2378 * we detect fsid mismatch in inode revalidation
2370 */ 2379 */
2371static int nfs4_get_referral(struct inode *dir, const struct qstr *name, 2380static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir,
2372 struct nfs_fattr *fattr, struct nfs_fh *fhandle) 2381 const struct qstr *name, struct nfs_fattr *fattr,
2382 struct nfs_fh *fhandle)
2373{ 2383{
2374 int status = -ENOMEM; 2384 int status = -ENOMEM;
2375 struct page *page = NULL; 2385 struct page *page = NULL;
@@ -2382,7 +2392,7 @@ static int nfs4_get_referral(struct inode *dir, const struct qstr *name,
2382 if (locations == NULL) 2392 if (locations == NULL)
2383 goto out; 2393 goto out;
2384 2394
2385 status = nfs4_proc_fs_locations(dir, name, locations, page); 2395 status = nfs4_proc_fs_locations(client, dir, name, locations, page);
2386 if (status != 0) 2396 if (status != 0)
2387 goto out; 2397 goto out;
2388 /* Make sure server returned a different fsid for the referral */ 2398 /* Make sure server returned a different fsid for the referral */
@@ -2519,39 +2529,84 @@ static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir,
2519 return status; 2529 return status;
2520} 2530}
2521 2531
2522void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr, struct nfs_fh *fh) 2532static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr)
2523{ 2533{
2524 memset(fh, 0, sizeof(struct nfs_fh));
2525 fattr->fsid.major = 1;
2526 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE | 2534 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
2527 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_FSID | NFS_ATTR_FATTR_MOUNTPOINT; 2535 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_MOUNTPOINT;
2528 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO; 2536 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
2529 fattr->nlink = 2; 2537 fattr->nlink = 2;
2530} 2538}
2531 2539
2532static int nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, struct qstr *name, 2540static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir,
2533 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 2541 struct qstr *name, struct nfs_fh *fhandle,
2542 struct nfs_fattr *fattr)
2534{ 2543{
2535 struct nfs4_exception exception = { }; 2544 struct nfs4_exception exception = { };
2545 struct rpc_clnt *client = *clnt;
2536 int err; 2546 int err;
2537 do { 2547 do {
2538 int status; 2548 err = _nfs4_proc_lookup(client, dir, name, fhandle, fattr);
2539 2549 switch (err) {
2540 status = _nfs4_proc_lookup(clnt, dir, name, fhandle, fattr);
2541 switch (status) {
2542 case -NFS4ERR_BADNAME: 2550 case -NFS4ERR_BADNAME:
2543 return -ENOENT; 2551 err = -ENOENT;
2552 goto out;
2544 case -NFS4ERR_MOVED: 2553 case -NFS4ERR_MOVED:
2545 return nfs4_get_referral(dir, name, fattr, fhandle); 2554 err = nfs4_get_referral(client, dir, name, fattr, fhandle);
2555 goto out;
2546 case -NFS4ERR_WRONGSEC: 2556 case -NFS4ERR_WRONGSEC:
2547 nfs_fixup_secinfo_attributes(fattr, fhandle); 2557 err = -EPERM;
2558 if (client != *clnt)
2559 goto out;
2560
2561 client = nfs4_create_sec_client(client, dir, name);
2562 if (IS_ERR(client))
2563 return PTR_ERR(client);
2564
2565 exception.retry = 1;
2566 break;
2567 default:
2568 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception);
2548 } 2569 }
2549 err = nfs4_handle_exception(NFS_SERVER(dir),
2550 status, &exception);
2551 } while (exception.retry); 2570 } while (exception.retry);
2571
2572out:
2573 if (err == 0)
2574 *clnt = client;
2575 else if (client != *clnt)
2576 rpc_shutdown_client(client);
2577
2552 return err; 2578 return err;
2553} 2579}
2554 2580
2581static int nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, struct qstr *name,
2582 struct nfs_fh *fhandle, struct nfs_fattr *fattr)
2583{
2584 int status;
2585 struct rpc_clnt *client = NFS_CLIENT(dir);
2586
2587 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr);
2588 if (client != NFS_CLIENT(dir)) {
2589 rpc_shutdown_client(client);
2590 nfs_fixup_secinfo_attributes(fattr);
2591 }
2592 return status;
2593}
2594
2595struct rpc_clnt *
2596nfs4_proc_lookup_mountpoint(struct inode *dir, struct qstr *name,
2597 struct nfs_fh *fhandle, struct nfs_fattr *fattr)
2598{
2599 int status;
2600 struct rpc_clnt *client = rpc_clone_client(NFS_CLIENT(dir));
2601
2602 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr);
2603 if (status < 0) {
2604 rpc_shutdown_client(client);
2605 return ERR_PTR(status);
2606 }
2607 return client;
2608}
2609
2555static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry) 2610static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
2556{ 2611{
2557 struct nfs_server *server = NFS_SERVER(inode); 2612 struct nfs_server *server = NFS_SERVER(inode);
@@ -3619,16 +3674,16 @@ out:
3619 return ret; 3674 return ret;
3620} 3675}
3621 3676
3622static void nfs4_write_cached_acl(struct inode *inode, const char *buf, size_t acl_len) 3677static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len)
3623{ 3678{
3624 struct nfs4_cached_acl *acl; 3679 struct nfs4_cached_acl *acl;
3625 3680
3626 if (buf && acl_len <= PAGE_SIZE) { 3681 if (pages && acl_len <= PAGE_SIZE) {
3627 acl = kmalloc(sizeof(*acl) + acl_len, GFP_KERNEL); 3682 acl = kmalloc(sizeof(*acl) + acl_len, GFP_KERNEL);
3628 if (acl == NULL) 3683 if (acl == NULL)
3629 goto out; 3684 goto out;
3630 acl->cached = 1; 3685 acl->cached = 1;
3631 memcpy(acl->data, buf, acl_len); 3686 _copy_from_pages(acl->data, pages, pgbase, acl_len);
3632 } else { 3687 } else {
3633 acl = kmalloc(sizeof(*acl), GFP_KERNEL); 3688 acl = kmalloc(sizeof(*acl), GFP_KERNEL);
3634 if (acl == NULL) 3689 if (acl == NULL)
@@ -3661,7 +3716,6 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
3661 struct nfs_getaclres res = { 3716 struct nfs_getaclres res = {
3662 .acl_len = buflen, 3717 .acl_len = buflen,
3663 }; 3718 };
3664 void *resp_buf;
3665 struct rpc_message msg = { 3719 struct rpc_message msg = {
3666 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL], 3720 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL],
3667 .rpc_argp = &args, 3721 .rpc_argp = &args,
@@ -3675,24 +3729,27 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
3675 if (npages == 0) 3729 if (npages == 0)
3676 npages = 1; 3730 npages = 1;
3677 3731
3732 /* Add an extra page to handle the bitmap returned */
3733 npages++;
3734
3678 for (i = 0; i < npages; i++) { 3735 for (i = 0; i < npages; i++) {
3679 pages[i] = alloc_page(GFP_KERNEL); 3736 pages[i] = alloc_page(GFP_KERNEL);
3680 if (!pages[i]) 3737 if (!pages[i])
3681 goto out_free; 3738 goto out_free;
3682 } 3739 }
3683 if (npages > 1) { 3740
3684 /* for decoding across pages */ 3741 /* for decoding across pages */
3685 res.acl_scratch = alloc_page(GFP_KERNEL); 3742 res.acl_scratch = alloc_page(GFP_KERNEL);
3686 if (!res.acl_scratch) 3743 if (!res.acl_scratch)
3687 goto out_free; 3744 goto out_free;
3688 } 3745
3689 args.acl_len = npages * PAGE_SIZE; 3746 args.acl_len = npages * PAGE_SIZE;
3690 args.acl_pgbase = 0; 3747 args.acl_pgbase = 0;
3748
3691 /* Let decode_getfacl know not to fail if the ACL data is larger than 3749 /* Let decode_getfacl know not to fail if the ACL data is larger than
3692 * the page we send as a guess */ 3750 * the page we send as a guess */
3693 if (buf == NULL) 3751 if (buf == NULL)
3694 res.acl_flags |= NFS4_ACL_LEN_REQUEST; 3752 res.acl_flags |= NFS4_ACL_LEN_REQUEST;
3695 resp_buf = page_address(pages[0]);
3696 3753
3697 dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n", 3754 dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n",
3698 __func__, buf, buflen, npages, args.acl_len); 3755 __func__, buf, buflen, npages, args.acl_len);
@@ -3703,9 +3760,9 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
3703 3760
3704 acl_len = res.acl_len - res.acl_data_offset; 3761 acl_len = res.acl_len - res.acl_data_offset;
3705 if (acl_len > args.acl_len) 3762 if (acl_len > args.acl_len)
3706 nfs4_write_cached_acl(inode, NULL, acl_len); 3763 nfs4_write_cached_acl(inode, NULL, 0, acl_len);
3707 else 3764 else
3708 nfs4_write_cached_acl(inode, resp_buf + res.acl_data_offset, 3765 nfs4_write_cached_acl(inode, pages, res.acl_data_offset,
3709 acl_len); 3766 acl_len);
3710 if (buf) { 3767 if (buf) {
3711 ret = -ERANGE; 3768 ret = -ERANGE;
@@ -4558,7 +4615,9 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *f
4558static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request) 4615static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request)
4559{ 4616{
4560 struct nfs_server *server = NFS_SERVER(state->inode); 4617 struct nfs_server *server = NFS_SERVER(state->inode);
4561 struct nfs4_exception exception = { }; 4618 struct nfs4_exception exception = {
4619 .inode = state->inode,
4620 };
4562 int err; 4621 int err;
4563 4622
4564 do { 4623 do {
@@ -4576,7 +4635,9 @@ static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request
4576static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request) 4635static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request)
4577{ 4636{
4578 struct nfs_server *server = NFS_SERVER(state->inode); 4637 struct nfs_server *server = NFS_SERVER(state->inode);
4579 struct nfs4_exception exception = { }; 4638 struct nfs4_exception exception = {
4639 .inode = state->inode,
4640 };
4580 int err; 4641 int err;
4581 4642
4582 err = nfs4_set_lock_state(state, request); 4643 err = nfs4_set_lock_state(state, request);
@@ -4676,6 +4737,7 @@ static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *
4676{ 4737{
4677 struct nfs4_exception exception = { 4738 struct nfs4_exception exception = {
4678 .state = state, 4739 .state = state,
4740 .inode = state->inode,
4679 }; 4741 };
4680 int err; 4742 int err;
4681 4743
@@ -4721,6 +4783,20 @@ nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
4721 4783
4722 if (state == NULL) 4784 if (state == NULL)
4723 return -ENOLCK; 4785 return -ENOLCK;
4786 /*
4787 * Don't rely on the VFS having checked the file open mode,
4788 * since it won't do this for flock() locks.
4789 */
4790 switch (request->fl_type & (F_RDLCK|F_WRLCK|F_UNLCK)) {
4791 case F_RDLCK:
4792 if (!(filp->f_mode & FMODE_READ))
4793 return -EBADF;
4794 break;
4795 case F_WRLCK:
4796 if (!(filp->f_mode & FMODE_WRITE))
4797 return -EBADF;
4798 }
4799
4724 do { 4800 do {
4725 status = nfs4_proc_setlk(state, cmd, request); 4801 status = nfs4_proc_setlk(state, cmd, request);
4726 if ((status != -EAGAIN) || IS_SETLK(cmd)) 4802 if ((status != -EAGAIN) || IS_SETLK(cmd))
@@ -4891,8 +4967,10 @@ static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr)
4891 fattr->nlink = 2; 4967 fattr->nlink = 2;
4892} 4968}
4893 4969
4894int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name, 4970static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
4895 struct nfs4_fs_locations *fs_locations, struct page *page) 4971 const struct qstr *name,
4972 struct nfs4_fs_locations *fs_locations,
4973 struct page *page)
4896{ 4974{
4897 struct nfs_server *server = NFS_SERVER(dir); 4975 struct nfs_server *server = NFS_SERVER(dir);
4898 u32 bitmask[2] = { 4976 u32 bitmask[2] = {
@@ -4926,11 +5004,26 @@ int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name,
4926 nfs_fattr_init(&fs_locations->fattr); 5004 nfs_fattr_init(&fs_locations->fattr);
4927 fs_locations->server = server; 5005 fs_locations->server = server;
4928 fs_locations->nlocations = 0; 5006 fs_locations->nlocations = 0;
4929 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 5007 status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0);
4930 dprintk("%s: returned status = %d\n", __func__, status); 5008 dprintk("%s: returned status = %d\n", __func__, status);
4931 return status; 5009 return status;
4932} 5010}
4933 5011
5012int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
5013 const struct qstr *name,
5014 struct nfs4_fs_locations *fs_locations,
5015 struct page *page)
5016{
5017 struct nfs4_exception exception = { };
5018 int err;
5019 do {
5020 err = nfs4_handle_exception(NFS_SERVER(dir),
5021 _nfs4_proc_fs_locations(client, dir, name, fs_locations, page),
5022 &exception);
5023 } while (exception.retry);
5024 return err;
5025}
5026
4934static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors) 5027static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors)
4935{ 5028{
4936 int status; 5029 int status;
@@ -4953,8 +5046,8 @@ static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct
4953 return status; 5046 return status;
4954} 5047}
4955 5048
4956static int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, 5049int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name,
4957 struct nfs4_secinfo_flavors *flavors) 5050 struct nfs4_secinfo_flavors *flavors)
4958{ 5051{
4959 struct nfs4_exception exception = { }; 5052 struct nfs4_exception exception = { };
4960 int err; 5053 int err;
@@ -5029,10 +5122,9 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
5029 nfs4_construct_boot_verifier(clp, &verifier); 5122 nfs4_construct_boot_verifier(clp, &verifier);
5030 5123
5031 args.id_len = scnprintf(args.id, sizeof(args.id), 5124 args.id_len = scnprintf(args.id, sizeof(args.id),
5032 "%s/%s.%s/%u", 5125 "%s/%s/%u",
5033 clp->cl_ipaddr, 5126 clp->cl_ipaddr,
5034 init_utsname()->nodename, 5127 clp->cl_rpcclient->cl_nodename,
5035 init_utsname()->domainname,
5036 clp->cl_rpcclient->cl_auth->au_flavor); 5128 clp->cl_rpcclient->cl_auth->au_flavor);
5037 5129
5038 res.server_scope = kzalloc(sizeof(struct server_scope), GFP_KERNEL); 5130 res.server_scope = kzalloc(sizeof(struct server_scope), GFP_KERNEL);
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 0f43414eb25a..7f0fcfc1fe9d 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -393,6 +393,7 @@ nfs4_remove_state_owner_locked(struct nfs4_state_owner *sp)
393static void 393static void
394nfs4_init_seqid_counter(struct nfs_seqid_counter *sc) 394nfs4_init_seqid_counter(struct nfs_seqid_counter *sc)
395{ 395{
396 sc->create_time = ktime_get();
396 sc->flags = 0; 397 sc->flags = 0;
397 sc->counter = 0; 398 sc->counter = 0;
398 spin_lock_init(&sc->lock); 399 spin_lock_init(&sc->lock);
@@ -434,13 +435,17 @@ nfs4_alloc_state_owner(struct nfs_server *server,
434static void 435static void
435nfs4_drop_state_owner(struct nfs4_state_owner *sp) 436nfs4_drop_state_owner(struct nfs4_state_owner *sp)
436{ 437{
437 if (!RB_EMPTY_NODE(&sp->so_server_node)) { 438 struct rb_node *rb_node = &sp->so_server_node;
439
440 if (!RB_EMPTY_NODE(rb_node)) {
438 struct nfs_server *server = sp->so_server; 441 struct nfs_server *server = sp->so_server;
439 struct nfs_client *clp = server->nfs_client; 442 struct nfs_client *clp = server->nfs_client;
440 443
441 spin_lock(&clp->cl_lock); 444 spin_lock(&clp->cl_lock);
442 rb_erase(&sp->so_server_node, &server->state_owners); 445 if (!RB_EMPTY_NODE(rb_node)) {
443 RB_CLEAR_NODE(&sp->so_server_node); 446 rb_erase(rb_node, &server->state_owners);
447 RB_CLEAR_NODE(rb_node);
448 }
444 spin_unlock(&clp->cl_lock); 449 spin_unlock(&clp->cl_lock);
445 } 450 }
446} 451}
@@ -516,6 +521,14 @@ out:
516/** 521/**
517 * nfs4_put_state_owner - Release a nfs4_state_owner 522 * nfs4_put_state_owner - Release a nfs4_state_owner
518 * @sp: state owner data to release 523 * @sp: state owner data to release
524 *
525 * Note that we keep released state owners on an LRU
526 * list.
527 * This caches valid state owners so that they can be
528 * reused, to avoid the OPEN_CONFIRM on minor version 0.
529 * It also pins the uniquifier of dropped state owners for
530 * a while, to ensure that those state owner names are
531 * never reused.
519 */ 532 */
520void nfs4_put_state_owner(struct nfs4_state_owner *sp) 533void nfs4_put_state_owner(struct nfs4_state_owner *sp)
521{ 534{
@@ -525,15 +538,9 @@ void nfs4_put_state_owner(struct nfs4_state_owner *sp)
525 if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock)) 538 if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock))
526 return; 539 return;
527 540
528 if (!RB_EMPTY_NODE(&sp->so_server_node)) { 541 sp->so_expires = jiffies;
529 sp->so_expires = jiffies; 542 list_add_tail(&sp->so_lru, &server->state_owners_lru);
530 list_add_tail(&sp->so_lru, &server->state_owners_lru); 543 spin_unlock(&clp->cl_lock);
531 spin_unlock(&clp->cl_lock);
532 } else {
533 nfs4_remove_state_owner_locked(sp);
534 spin_unlock(&clp->cl_lock);
535 nfs4_free_state_owner(sp);
536 }
537} 544}
538 545
539/** 546/**
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index c74fdb114b48..c54aae364bee 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -74,7 +74,7 @@ static int nfs4_stat_to_errno(int);
74/* lock,open owner id: 74/* lock,open owner id:
75 * we currently use size 2 (u64) out of (NFS4_OPAQUE_LIMIT >> 2) 75 * we currently use size 2 (u64) out of (NFS4_OPAQUE_LIMIT >> 2)
76 */ 76 */
77#define open_owner_id_maxsz (1 + 1 + 4) 77#define open_owner_id_maxsz (1 + 2 + 1 + 1 + 2)
78#define lock_owner_id_maxsz (1 + 1 + 4) 78#define lock_owner_id_maxsz (1 + 1 + 4)
79#define decode_lockowner_maxsz (1 + XDR_QUADLEN(IDMAP_NAMESZ)) 79#define decode_lockowner_maxsz (1 + XDR_QUADLEN(IDMAP_NAMESZ))
80#define compound_encode_hdr_maxsz (3 + (NFS4_MAXTAGLEN >> 2)) 80#define compound_encode_hdr_maxsz (3 + (NFS4_MAXTAGLEN >> 2))
@@ -1340,12 +1340,13 @@ static inline void encode_openhdr(struct xdr_stream *xdr, const struct nfs_opena
1340 */ 1340 */
1341 encode_nfs4_seqid(xdr, arg->seqid); 1341 encode_nfs4_seqid(xdr, arg->seqid);
1342 encode_share_access(xdr, arg->fmode); 1342 encode_share_access(xdr, arg->fmode);
1343 p = reserve_space(xdr, 32); 1343 p = reserve_space(xdr, 36);
1344 p = xdr_encode_hyper(p, arg->clientid); 1344 p = xdr_encode_hyper(p, arg->clientid);
1345 *p++ = cpu_to_be32(20); 1345 *p++ = cpu_to_be32(24);
1346 p = xdr_encode_opaque_fixed(p, "open id:", 8); 1346 p = xdr_encode_opaque_fixed(p, "open id:", 8);
1347 *p++ = cpu_to_be32(arg->server->s_dev); 1347 *p++ = cpu_to_be32(arg->server->s_dev);
1348 xdr_encode_hyper(p, arg->id); 1348 *p++ = cpu_to_be32(arg->id.uniquifier);
1349 xdr_encode_hyper(p, arg->id.create_time);
1349} 1350}
1350 1351
1351static inline void encode_createmode(struct xdr_stream *xdr, const struct nfs_openargs *arg) 1352static inline void encode_createmode(struct xdr_stream *xdr, const struct nfs_openargs *arg)
@@ -4257,8 +4258,6 @@ static int decode_getfattr_attrs(struct xdr_stream *xdr, uint32_t *bitmap,
4257 status = decode_attr_error(xdr, bitmap, &err); 4258 status = decode_attr_error(xdr, bitmap, &err);
4258 if (status < 0) 4259 if (status < 0)
4259 goto xdr_error; 4260 goto xdr_error;
4260 if (err == -NFS4ERR_WRONGSEC)
4261 nfs_fixup_secinfo_attributes(fattr, fh);
4262 4261
4263 status = decode_attr_filehandle(xdr, bitmap, fh); 4262 status = decode_attr_filehandle(xdr, bitmap, fh);
4264 if (status < 0) 4263 if (status < 0)
@@ -4901,11 +4900,19 @@ static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
4901 bitmap[3] = {0}; 4900 bitmap[3] = {0};
4902 struct kvec *iov = req->rq_rcv_buf.head; 4901 struct kvec *iov = req->rq_rcv_buf.head;
4903 int status; 4902 int status;
4903 size_t page_len = xdr->buf->page_len;
4904 4904
4905 res->acl_len = 0; 4905 res->acl_len = 0;
4906 if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0) 4906 if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0)
4907 goto out; 4907 goto out;
4908
4908 bm_p = xdr->p; 4909 bm_p = xdr->p;
4910 res->acl_data_offset = be32_to_cpup(bm_p) + 2;
4911 res->acl_data_offset <<= 2;
4912 /* Check if the acl data starts beyond the allocated buffer */
4913 if (res->acl_data_offset > page_len)
4914 return -ERANGE;
4915
4909 if ((status = decode_attr_bitmap(xdr, bitmap)) != 0) 4916 if ((status = decode_attr_bitmap(xdr, bitmap)) != 0)
4910 goto out; 4917 goto out;
4911 if ((status = decode_attr_length(xdr, &attrlen, &savep)) != 0) 4918 if ((status = decode_attr_length(xdr, &attrlen, &savep)) != 0)
@@ -4915,28 +4922,24 @@ static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
4915 return -EIO; 4922 return -EIO;
4916 if (likely(bitmap[0] & FATTR4_WORD0_ACL)) { 4923 if (likely(bitmap[0] & FATTR4_WORD0_ACL)) {
4917 size_t hdrlen; 4924 size_t hdrlen;
4918 u32 recvd;
4919 4925
4920 /* The bitmap (xdr len + bitmaps) and the attr xdr len words 4926 /* The bitmap (xdr len + bitmaps) and the attr xdr len words
4921 * are stored with the acl data to handle the problem of 4927 * are stored with the acl data to handle the problem of
4922 * variable length bitmaps.*/ 4928 * variable length bitmaps.*/
4923 xdr->p = bm_p; 4929 xdr->p = bm_p;
4924 res->acl_data_offset = be32_to_cpup(bm_p) + 2;
4925 res->acl_data_offset <<= 2;
4926 4930
4927 /* We ignore &savep and don't do consistency checks on 4931 /* We ignore &savep and don't do consistency checks on
4928 * the attr length. Let userspace figure it out.... */ 4932 * the attr length. Let userspace figure it out.... */
4929 hdrlen = (u8 *)xdr->p - (u8 *)iov->iov_base; 4933 hdrlen = (u8 *)xdr->p - (u8 *)iov->iov_base;
4930 attrlen += res->acl_data_offset; 4934 attrlen += res->acl_data_offset;
4931 recvd = req->rq_rcv_buf.len - hdrlen; 4935 if (attrlen > page_len) {
4932 if (attrlen > recvd) {
4933 if (res->acl_flags & NFS4_ACL_LEN_REQUEST) { 4936 if (res->acl_flags & NFS4_ACL_LEN_REQUEST) {
4934 /* getxattr interface called with a NULL buf */ 4937 /* getxattr interface called with a NULL buf */
4935 res->acl_len = attrlen; 4938 res->acl_len = attrlen;
4936 goto out; 4939 goto out;
4937 } 4940 }
4938 dprintk("NFS: acl reply: attrlen %u > recvd %u\n", 4941 dprintk("NFS: acl reply: attrlen %u > page_len %zu\n",
4939 attrlen, recvd); 4942 attrlen, page_len);
4940 return -EINVAL; 4943 return -EINVAL;
4941 } 4944 }
4942 xdr_read_pages(xdr, attrlen); 4945 xdr_read_pages(xdr, attrlen);
@@ -5089,16 +5092,13 @@ out_err:
5089 return -EINVAL; 5092 return -EINVAL;
5090} 5093}
5091 5094
5092static int decode_secinfo(struct xdr_stream *xdr, struct nfs4_secinfo_res *res) 5095static int decode_secinfo_common(struct xdr_stream *xdr, struct nfs4_secinfo_res *res)
5093{ 5096{
5094 struct nfs4_secinfo_flavor *sec_flavor; 5097 struct nfs4_secinfo_flavor *sec_flavor;
5095 int status; 5098 int status;
5096 __be32 *p; 5099 __be32 *p;
5097 int i, num_flavors; 5100 int i, num_flavors;
5098 5101
5099 status = decode_op_hdr(xdr, OP_SECINFO);
5100 if (status)
5101 goto out;
5102 p = xdr_inline_decode(xdr, 4); 5102 p = xdr_inline_decode(xdr, 4);
5103 if (unlikely(!p)) 5103 if (unlikely(!p))
5104 goto out_overflow; 5104 goto out_overflow;
@@ -5124,6 +5124,7 @@ static int decode_secinfo(struct xdr_stream *xdr, struct nfs4_secinfo_res *res)
5124 res->flavors->num_flavors++; 5124 res->flavors->num_flavors++;
5125 } 5125 }
5126 5126
5127 status = 0;
5127out: 5128out:
5128 return status; 5129 return status;
5129out_overflow: 5130out_overflow:
@@ -5131,7 +5132,23 @@ out_overflow:
5131 return -EIO; 5132 return -EIO;
5132} 5133}
5133 5134
5135static int decode_secinfo(struct xdr_stream *xdr, struct nfs4_secinfo_res *res)
5136{
5137 int status = decode_op_hdr(xdr, OP_SECINFO);
5138 if (status)
5139 return status;
5140 return decode_secinfo_common(xdr, res);
5141}
5142
5134#if defined(CONFIG_NFS_V4_1) 5143#if defined(CONFIG_NFS_V4_1)
5144static int decode_secinfo_no_name(struct xdr_stream *xdr, struct nfs4_secinfo_res *res)
5145{
5146 int status = decode_op_hdr(xdr, OP_SECINFO_NO_NAME);
5147 if (status)
5148 return status;
5149 return decode_secinfo_common(xdr, res);
5150}
5151
5135static int decode_exchange_id(struct xdr_stream *xdr, 5152static int decode_exchange_id(struct xdr_stream *xdr,
5136 struct nfs41_exchange_id_res *res) 5153 struct nfs41_exchange_id_res *res)
5137{ 5154{
@@ -6816,7 +6833,7 @@ static int nfs4_xdr_dec_secinfo_no_name(struct rpc_rqst *rqstp,
6816 status = decode_putrootfh(xdr); 6833 status = decode_putrootfh(xdr);
6817 if (status) 6834 if (status)
6818 goto out; 6835 goto out;
6819 status = decode_secinfo(xdr, res); 6836 status = decode_secinfo_no_name(xdr, res);
6820out: 6837out:
6821 return status; 6838 return status;
6822} 6839}
diff --git a/fs/nfs/objlayout/objlayout.c b/fs/nfs/objlayout/objlayout.c
index 8d45f1c318ce..595c5fc21a19 100644
--- a/fs/nfs/objlayout/objlayout.c
+++ b/fs/nfs/objlayout/objlayout.c
@@ -604,7 +604,6 @@ int objlayout_get_deviceinfo(struct pnfs_layout_hdr *pnfslay,
604{ 604{
605 struct objlayout_deviceinfo *odi; 605 struct objlayout_deviceinfo *odi;
606 struct pnfs_device pd; 606 struct pnfs_device pd;
607 struct super_block *sb;
608 struct page *page, **pages; 607 struct page *page, **pages;
609 u32 *p; 608 u32 *p;
610 int err; 609 int err;
@@ -623,7 +622,6 @@ int objlayout_get_deviceinfo(struct pnfs_layout_hdr *pnfslay,
623 pd.pglen = PAGE_SIZE; 622 pd.pglen = PAGE_SIZE;
624 pd.mincount = 0; 623 pd.mincount = 0;
625 624
626 sb = pnfslay->plh_inode->i_sb;
627 err = nfs4_proc_getdeviceinfo(NFS_SERVER(pnfslay->plh_inode), &pd); 625 err = nfs4_proc_getdeviceinfo(NFS_SERVER(pnfslay->plh_inode), &pd);
628 dprintk("%s nfs_getdeviceinfo returned %d\n", __func__, err); 626 dprintk("%s nfs_getdeviceinfo returned %d\n", __func__, err);
629 if (err) 627 if (err)
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index b5d451586943..38512bcd2e98 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -587,7 +587,7 @@ send_layoutget(struct pnfs_layout_hdr *lo,
587 587
588 /* allocate pages for xdr post processing */ 588 /* allocate pages for xdr post processing */
589 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz; 589 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
590 max_pages = max_resp_sz >> PAGE_SHIFT; 590 max_pages = nfs_page_array_len(0, max_resp_sz);
591 591
592 pages = kcalloc(max_pages, sizeof(struct page *), gfp_flags); 592 pages = kcalloc(max_pages, sizeof(struct page *), gfp_flags);
593 if (!pages) 593 if (!pages)
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 9a0e8ef4a409..0a4be28c2ea3 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -322,7 +322,7 @@ out_bad:
322 while (!list_empty(res)) { 322 while (!list_empty(res)) {
323 data = list_entry(res->next, struct nfs_read_data, list); 323 data = list_entry(res->next, struct nfs_read_data, list);
324 list_del(&data->list); 324 list_del(&data->list);
325 nfs_readdata_free(data); 325 nfs_readdata_release(data);
326 } 326 }
327 nfs_readpage_release(req); 327 nfs_readpage_release(req);
328 return -ENOMEM; 328 return -ENOMEM;
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 37412f706b32..4ac7fca7e4bf 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -2428,7 +2428,7 @@ nfs_xdev_mount(struct file_system_type *fs_type, int flags,
2428 dprintk("--> nfs_xdev_mount()\n"); 2428 dprintk("--> nfs_xdev_mount()\n");
2429 2429
2430 /* create a new volume representation */ 2430 /* create a new volume representation */
2431 server = nfs_clone_server(NFS_SB(data->sb), data->fh, data->fattr); 2431 server = nfs_clone_server(NFS_SB(data->sb), data->fh, data->fattr, data->authflavor);
2432 if (IS_ERR(server)) { 2432 if (IS_ERR(server)) {
2433 error = PTR_ERR(server); 2433 error = PTR_ERR(server);
2434 goto out_err_noserver; 2434 goto out_err_noserver;
@@ -2767,11 +2767,15 @@ static struct vfsmount *nfs_do_root_mount(struct file_system_type *fs_type,
2767 char *root_devname; 2767 char *root_devname;
2768 size_t len; 2768 size_t len;
2769 2769
2770 len = strlen(hostname) + 3; 2770 len = strlen(hostname) + 5;
2771 root_devname = kmalloc(len, GFP_KERNEL); 2771 root_devname = kmalloc(len, GFP_KERNEL);
2772 if (root_devname == NULL) 2772 if (root_devname == NULL)
2773 return ERR_PTR(-ENOMEM); 2773 return ERR_PTR(-ENOMEM);
2774 snprintf(root_devname, len, "%s:/", hostname); 2774 /* Does hostname needs to be enclosed in brackets? */
2775 if (strchr(hostname, ':'))
2776 snprintf(root_devname, len, "[%s]:/", hostname);
2777 else
2778 snprintf(root_devname, len, "%s:/", hostname);
2775 root_mnt = vfs_kern_mount(fs_type, flags, root_devname, data); 2779 root_mnt = vfs_kern_mount(fs_type, flags, root_devname, data);
2776 kfree(root_devname); 2780 kfree(root_devname);
2777 return root_mnt; 2781 return root_mnt;
@@ -2951,7 +2955,7 @@ nfs4_xdev_mount(struct file_system_type *fs_type, int flags,
2951 dprintk("--> nfs4_xdev_mount()\n"); 2955 dprintk("--> nfs4_xdev_mount()\n");
2952 2956
2953 /* create a new volume representation */ 2957 /* create a new volume representation */
2954 server = nfs_clone_server(NFS_SB(data->sb), data->fh, data->fattr); 2958 server = nfs_clone_server(NFS_SB(data->sb), data->fh, data->fattr, data->authflavor);
2955 if (IS_ERR(server)) { 2959 if (IS_ERR(server)) {
2956 error = PTR_ERR(server); 2960 error = PTR_ERR(server);
2957 goto out_err_noserver; 2961 goto out_err_noserver;
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 2c68818f68ac..c07462320f6b 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -682,7 +682,8 @@ static struct nfs_page *nfs_try_to_update_request(struct inode *inode,
682 req->wb_bytes = rqend - req->wb_offset; 682 req->wb_bytes = rqend - req->wb_offset;
683out_unlock: 683out_unlock:
684 spin_unlock(&inode->i_lock); 684 spin_unlock(&inode->i_lock);
685 nfs_clear_request_commit(req); 685 if (req)
686 nfs_clear_request_commit(req);
686 return req; 687 return req;
687out_flushme: 688out_flushme:
688 spin_unlock(&inode->i_lock); 689 spin_unlock(&inode->i_lock);
@@ -1018,7 +1019,7 @@ out_bad:
1018 while (!list_empty(res)) { 1019 while (!list_empty(res)) {
1019 data = list_entry(res->next, struct nfs_write_data, list); 1020 data = list_entry(res->next, struct nfs_write_data, list);
1020 list_del(&data->list); 1021 list_del(&data->list);
1021 nfs_writedata_free(data); 1022 nfs_writedata_release(data);
1022 } 1023 }
1023 nfs_redirty_request(req); 1024 nfs_redirty_request(req);
1024 return -ENOMEM; 1025 return -ENOMEM;
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
index 4767429264a2..ed3f9206a0ee 100644
--- a/fs/nfsd/nfs4recover.c
+++ b/fs/nfsd/nfs4recover.c
@@ -577,7 +577,7 @@ cld_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
577 struct cld_net *cn = nn->cld_net; 577 struct cld_net *cn = nn->cld_net;
578 578
579 if (mlen != sizeof(*cmsg)) { 579 if (mlen != sizeof(*cmsg)) {
580 dprintk("%s: got %lu bytes, expected %lu\n", __func__, mlen, 580 dprintk("%s: got %zu bytes, expected %zu\n", __func__, mlen,
581 sizeof(*cmsg)); 581 sizeof(*cmsg));
582 return -EINVAL; 582 return -EINVAL;
583 } 583 }
diff --git a/fs/pipe.c b/fs/pipe.c
index 25feaa3faac0..fec5e4ad071a 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -346,6 +346,16 @@ static const struct pipe_buf_operations anon_pipe_buf_ops = {
346 .get = generic_pipe_buf_get, 346 .get = generic_pipe_buf_get,
347}; 347};
348 348
349static const struct pipe_buf_operations packet_pipe_buf_ops = {
350 .can_merge = 0,
351 .map = generic_pipe_buf_map,
352 .unmap = generic_pipe_buf_unmap,
353 .confirm = generic_pipe_buf_confirm,
354 .release = anon_pipe_buf_release,
355 .steal = generic_pipe_buf_steal,
356 .get = generic_pipe_buf_get,
357};
358
349static ssize_t 359static ssize_t
350pipe_read(struct kiocb *iocb, const struct iovec *_iov, 360pipe_read(struct kiocb *iocb, const struct iovec *_iov,
351 unsigned long nr_segs, loff_t pos) 361 unsigned long nr_segs, loff_t pos)
@@ -407,6 +417,13 @@ redo:
407 ret += chars; 417 ret += chars;
408 buf->offset += chars; 418 buf->offset += chars;
409 buf->len -= chars; 419 buf->len -= chars;
420
421 /* Was it a packet buffer? Clean up and exit */
422 if (buf->flags & PIPE_BUF_FLAG_PACKET) {
423 total_len = chars;
424 buf->len = 0;
425 }
426
410 if (!buf->len) { 427 if (!buf->len) {
411 buf->ops = NULL; 428 buf->ops = NULL;
412 ops->release(pipe, buf); 429 ops->release(pipe, buf);
@@ -459,6 +476,11 @@ redo:
459 return ret; 476 return ret;
460} 477}
461 478
479static inline int is_packetized(struct file *file)
480{
481 return (file->f_flags & O_DIRECT) != 0;
482}
483
462static ssize_t 484static ssize_t
463pipe_write(struct kiocb *iocb, const struct iovec *_iov, 485pipe_write(struct kiocb *iocb, const struct iovec *_iov,
464 unsigned long nr_segs, loff_t ppos) 486 unsigned long nr_segs, loff_t ppos)
@@ -593,6 +615,11 @@ redo2:
593 buf->ops = &anon_pipe_buf_ops; 615 buf->ops = &anon_pipe_buf_ops;
594 buf->offset = 0; 616 buf->offset = 0;
595 buf->len = chars; 617 buf->len = chars;
618 buf->flags = 0;
619 if (is_packetized(filp)) {
620 buf->ops = &packet_pipe_buf_ops;
621 buf->flags = PIPE_BUF_FLAG_PACKET;
622 }
596 pipe->nrbufs = ++bufs; 623 pipe->nrbufs = ++bufs;
597 pipe->tmp_page = NULL; 624 pipe->tmp_page = NULL;
598 625
@@ -1013,7 +1040,7 @@ struct file *create_write_pipe(int flags)
1013 goto err_dentry; 1040 goto err_dentry;
1014 f->f_mapping = inode->i_mapping; 1041 f->f_mapping = inode->i_mapping;
1015 1042
1016 f->f_flags = O_WRONLY | (flags & O_NONBLOCK); 1043 f->f_flags = O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT));
1017 f->f_version = 0; 1044 f->f_version = 0;
1018 1045
1019 return f; 1046 return f;
@@ -1057,7 +1084,7 @@ int do_pipe_flags(int *fd, int flags)
1057 int error; 1084 int error;
1058 int fdw, fdr; 1085 int fdw, fdr;
1059 1086
1060 if (flags & ~(O_CLOEXEC | O_NONBLOCK)) 1087 if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT))
1061 return -EINVAL; 1088 return -EINVAL;
1062 1089
1063 fw = create_write_pipe(flags); 1090 fw = create_write_pipe(flags);
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 2b9a7607cbd5..1030a716d155 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -597,9 +597,6 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
597 if (!page) 597 if (!page)
598 continue; 598 continue;
599 599
600 if (PageReserved(page))
601 continue;
602
603 /* Clear accessed and referenced bits. */ 600 /* Clear accessed and referenced bits. */
604 ptep_test_and_clear_young(vma, addr, pte); 601 ptep_test_and_clear_young(vma, addr, pte);
605 ClearPageReferenced(page); 602 ClearPageReferenced(page);
@@ -750,6 +747,8 @@ static void pte_to_pagemap_entry(pagemap_entry_t *pme, pte_t pte)
750 else if (pte_present(pte)) 747 else if (pte_present(pte))
751 *pme = make_pme(PM_PFRAME(pte_pfn(pte)) 748 *pme = make_pme(PM_PFRAME(pte_pfn(pte))
752 | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT); 749 | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT);
750 else
751 *pme = make_pme(PM_NOT_PRESENT);
753} 752}
754 753
755#ifdef CONFIG_TRANSPARENT_HUGEPAGE 754#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -764,6 +763,8 @@ static void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme,
764 if (pmd_present(pmd)) 763 if (pmd_present(pmd))
765 *pme = make_pme(PM_PFRAME(pmd_pfn(pmd) + offset) 764 *pme = make_pme(PM_PFRAME(pmd_pfn(pmd) + offset)
766 | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT); 765 | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT);
766 else
767 *pme = make_pme(PM_NOT_PRESENT);
767} 768}
768#else 769#else
769static inline void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, 770static inline void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme,
@@ -804,8 +805,10 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
804 805
805 /* check to see if we've left 'vma' behind 806 /* check to see if we've left 'vma' behind
806 * and need a new, higher one */ 807 * and need a new, higher one */
807 if (vma && (addr >= vma->vm_end)) 808 if (vma && (addr >= vma->vm_end)) {
808 vma = find_vma(walk->mm, addr); 809 vma = find_vma(walk->mm, addr);
810 pme = make_pme(PM_NOT_PRESENT);
811 }
809 812
810 /* check that 'vma' actually covers this address, 813 /* check that 'vma' actually covers this address,
811 * and that it isn't a huge page vma */ 814 * and that it isn't a huge page vma */
@@ -833,6 +836,8 @@ static void huge_pte_to_pagemap_entry(pagemap_entry_t *pme,
833 if (pte_present(pte)) 836 if (pte_present(pte))
834 *pme = make_pme(PM_PFRAME(pte_pfn(pte) + offset) 837 *pme = make_pme(PM_PFRAME(pte_pfn(pte) + offset)
835 | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT); 838 | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT);
839 else
840 *pme = make_pme(PM_NOT_PRESENT);
836} 841}
837 842
838/* This function walks within one hugetlb entry in the single call */ 843/* This function walks within one hugetlb entry in the single call */
@@ -842,7 +847,7 @@ static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask,
842{ 847{
843 struct pagemapread *pm = walk->private; 848 struct pagemapread *pm = walk->private;
844 int err = 0; 849 int err = 0;
845 pagemap_entry_t pme = make_pme(PM_NOT_PRESENT); 850 pagemap_entry_t pme;
846 851
847 for (; addr != end; addr += PAGE_SIZE) { 852 for (; addr != end; addr += PAGE_SIZE) {
848 int offset = (addr & ~hmask) >> PAGE_SHIFT; 853 int offset = (addr & ~hmask) >> PAGE_SHIFT;
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index eba66043cf1b..e8bcc4742e0e 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -499,9 +499,10 @@ typedef u64 acpi_integer;
499#define ACPI_STATE_D0 (u8) 0 499#define ACPI_STATE_D0 (u8) 0
500#define ACPI_STATE_D1 (u8) 1 500#define ACPI_STATE_D1 (u8) 1
501#define ACPI_STATE_D2 (u8) 2 501#define ACPI_STATE_D2 (u8) 2
502#define ACPI_STATE_D3 (u8) 3 502#define ACPI_STATE_D3_HOT (u8) 3
503#define ACPI_STATE_D3_COLD (u8) 4 503#define ACPI_STATE_D3 (u8) 4
504#define ACPI_D_STATES_MAX ACPI_STATE_D3_COLD 504#define ACPI_STATE_D3_COLD ACPI_STATE_D3
505#define ACPI_D_STATES_MAX ACPI_STATE_D3
505#define ACPI_D_STATE_COUNT 5 506#define ACPI_D_STATE_COUNT 5
506 507
507#define ACPI_STATE_C0 (u8) 0 508#define ACPI_STATE_C0 (u8) 0
diff --git a/include/asm-generic/siginfo.h b/include/asm-generic/siginfo.h
index 0dd4e87f6fba..5e5e3865f1ed 100644
--- a/include/asm-generic/siginfo.h
+++ b/include/asm-generic/siginfo.h
@@ -35,6 +35,14 @@ typedef union sigval {
35#define __ARCH_SI_BAND_T long 35#define __ARCH_SI_BAND_T long
36#endif 36#endif
37 37
38#ifndef __ARCH_SI_CLOCK_T
39#define __ARCH_SI_CLOCK_T __kernel_clock_t
40#endif
41
42#ifndef __ARCH_SI_ATTRIBUTES
43#define __ARCH_SI_ATTRIBUTES
44#endif
45
38#ifndef HAVE_ARCH_SIGINFO_T 46#ifndef HAVE_ARCH_SIGINFO_T
39 47
40typedef struct siginfo { 48typedef struct siginfo {
@@ -72,8 +80,8 @@ typedef struct siginfo {
72 __kernel_pid_t _pid; /* which child */ 80 __kernel_pid_t _pid; /* which child */
73 __ARCH_SI_UID_T _uid; /* sender's uid */ 81 __ARCH_SI_UID_T _uid; /* sender's uid */
74 int _status; /* exit code */ 82 int _status; /* exit code */
75 __kernel_clock_t _utime; 83 __ARCH_SI_CLOCK_T _utime;
76 __kernel_clock_t _stime; 84 __ARCH_SI_CLOCK_T _stime;
77 } _sigchld; 85 } _sigchld;
78 86
79 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */ 87 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
@@ -91,7 +99,7 @@ typedef struct siginfo {
91 int _fd; 99 int _fd;
92 } _sigpoll; 100 } _sigpoll;
93 } _sifields; 101 } _sifields;
94} siginfo_t; 102} __ARCH_SI_ATTRIBUTES siginfo_t;
95 103
96#endif 104#endif
97 105
diff --git a/include/asm-generic/statfs.h b/include/asm-generic/statfs.h
index 0fd28e028de1..c749af9c0983 100644
--- a/include/asm-generic/statfs.h
+++ b/include/asm-generic/statfs.h
@@ -15,7 +15,7 @@ typedef __kernel_fsid_t fsid_t;
15 * with a 10' pole. 15 * with a 10' pole.
16 */ 16 */
17#ifndef __statfs_word 17#ifndef __statfs_word
18#if BITS_PER_LONG == 64 18#if __BITS_PER_LONG == 64
19#define __statfs_word long 19#define __statfs_word long
20#else 20#else
21#define __statfs_word __u32 21#define __statfs_word __u32
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 88ec80670d5f..ec45ccd8708a 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -554,7 +554,18 @@ extern int __init efi_setup_pcdp_console(char *);
554#define EFI_VARIABLE_NON_VOLATILE 0x0000000000000001 554#define EFI_VARIABLE_NON_VOLATILE 0x0000000000000001
555#define EFI_VARIABLE_BOOTSERVICE_ACCESS 0x0000000000000002 555#define EFI_VARIABLE_BOOTSERVICE_ACCESS 0x0000000000000002
556#define EFI_VARIABLE_RUNTIME_ACCESS 0x0000000000000004 556#define EFI_VARIABLE_RUNTIME_ACCESS 0x0000000000000004
557 557#define EFI_VARIABLE_HARDWARE_ERROR_RECORD 0x0000000000000008
558#define EFI_VARIABLE_AUTHENTICATED_WRITE_ACCESS 0x0000000000000010
559#define EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS 0x0000000000000020
560#define EFI_VARIABLE_APPEND_WRITE 0x0000000000000040
561
562#define EFI_VARIABLE_MASK (EFI_VARIABLE_NON_VOLATILE | \
563 EFI_VARIABLE_BOOTSERVICE_ACCESS | \
564 EFI_VARIABLE_RUNTIME_ACCESS | \
565 EFI_VARIABLE_HARDWARE_ERROR_RECORD | \
566 EFI_VARIABLE_AUTHENTICATED_WRITE_ACCESS | \
567 EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS | \
568 EFI_VARIABLE_APPEND_WRITE)
558/* 569/*
559 * The type of search to perform when calling boottime->locate_handle 570 * The type of search to perform when calling boottime->locate_handle
560 */ 571 */
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 8a1835855faa..fe5136d81454 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -159,7 +159,8 @@ static inline void eth_hw_addr_random(struct net_device *dev)
159 * @addr1: Pointer to a six-byte array containing the Ethernet address 159 * @addr1: Pointer to a six-byte array containing the Ethernet address
160 * @addr2: Pointer other six-byte array containing the Ethernet address 160 * @addr2: Pointer other six-byte array containing the Ethernet address
161 * 161 *
162 * Compare two ethernet addresses, returns 0 if equal 162 * Compare two ethernet addresses, returns 0 if equal, non-zero otherwise.
163 * Unlike memcmp(), it doesn't return a value suitable for sorting.
163 */ 164 */
164static inline unsigned compare_ether_addr(const u8 *addr1, const u8 *addr2) 165static inline unsigned compare_ether_addr(const u8 *addr1, const u8 *addr2)
165{ 166{
@@ -184,10 +185,10 @@ static inline unsigned long zap_last_2bytes(unsigned long value)
184 * @addr1: Pointer to an array of 8 bytes 185 * @addr1: Pointer to an array of 8 bytes
185 * @addr2: Pointer to an other array of 8 bytes 186 * @addr2: Pointer to an other array of 8 bytes
186 * 187 *
187 * Compare two ethernet addresses, returns 0 if equal. 188 * Compare two ethernet addresses, returns 0 if equal, non-zero otherwise.
188 * Same result than "memcmp(addr1, addr2, ETH_ALEN)" but without conditional 189 * Unlike memcmp(), it doesn't return a value suitable for sorting.
189 * branches, and possibly long word memory accesses on CPU allowing cheap 190 * The function doesn't need any conditional branches and possibly uses
190 * unaligned memory reads. 191 * word memory accesses on CPU allowing cheap unaligned memory reads.
191 * arrays = { byte1, byte2, byte3, byte4, byte6, byte7, pad1, pad2} 192 * arrays = { byte1, byte2, byte3, byte4, byte6, byte7, pad1, pad2}
192 * 193 *
193 * Please note that alignment of addr1 & addr2 is only guaranted to be 16 bits. 194 * Please note that alignment of addr1 & addr2 is only guaranted to be 16 bits.
diff --git a/include/linux/gpio-pxa.h b/include/linux/gpio-pxa.h
index 05071ee34c3f..d755b28ba635 100644
--- a/include/linux/gpio-pxa.h
+++ b/include/linux/gpio-pxa.h
@@ -13,4 +13,8 @@ extern int pxa_last_gpio;
13 13
14extern int pxa_irq_to_gpio(int irq); 14extern int pxa_irq_to_gpio(int irq);
15 15
16struct pxa_gpio_platform_data {
17 int (*gpio_set_wake)(unsigned int gpio, unsigned int on);
18};
19
16#endif /* __GPIO_PXA_H */ 20#endif /* __GPIO_PXA_H */
diff --git a/include/linux/hsi/hsi.h b/include/linux/hsi/hsi.h
index 4b178067f405..56fae865e272 100644
--- a/include/linux/hsi/hsi.h
+++ b/include/linux/hsi/hsi.h
@@ -26,9 +26,9 @@
26#include <linux/device.h> 26#include <linux/device.h>
27#include <linux/mutex.h> 27#include <linux/mutex.h>
28#include <linux/scatterlist.h> 28#include <linux/scatterlist.h>
29#include <linux/spinlock.h>
30#include <linux/list.h> 29#include <linux/list.h>
31#include <linux/module.h> 30#include <linux/module.h>
31#include <linux/notifier.h>
32 32
33/* HSI message ttype */ 33/* HSI message ttype */
34#define HSI_MSG_READ 0 34#define HSI_MSG_READ 0
@@ -121,18 +121,18 @@ static inline int hsi_register_board_info(struct hsi_board_info const *info,
121 * @device: Driver model representation of the device 121 * @device: Driver model representation of the device
122 * @tx_cfg: HSI TX configuration 122 * @tx_cfg: HSI TX configuration
123 * @rx_cfg: HSI RX configuration 123 * @rx_cfg: HSI RX configuration
124 * @hsi_start_rx: Called after incoming wake line goes high 124 * @e_handler: Callback for handling port events (RX Wake High/Low)
125 * @hsi_stop_rx: Called after incoming wake line goes low 125 * @pclaimed: Keeps tracks if the clients claimed its associated HSI port
126 * @nb: Notifier block for port events
126 */ 127 */
127struct hsi_client { 128struct hsi_client {
128 struct device device; 129 struct device device;
129 struct hsi_config tx_cfg; 130 struct hsi_config tx_cfg;
130 struct hsi_config rx_cfg; 131 struct hsi_config rx_cfg;
131 void (*hsi_start_rx)(struct hsi_client *cl);
132 void (*hsi_stop_rx)(struct hsi_client *cl);
133 /* private: */ 132 /* private: */
133 void (*ehandler)(struct hsi_client *, unsigned long);
134 unsigned int pclaimed:1; 134 unsigned int pclaimed:1;
135 struct list_head link; 135 struct notifier_block nb;
136}; 136};
137 137
138#define to_hsi_client(dev) container_of(dev, struct hsi_client, device) 138#define to_hsi_client(dev) container_of(dev, struct hsi_client, device)
@@ -147,6 +147,10 @@ static inline void *hsi_client_drvdata(struct hsi_client *cl)
147 return dev_get_drvdata(&cl->device); 147 return dev_get_drvdata(&cl->device);
148} 148}
149 149
150int hsi_register_port_event(struct hsi_client *cl,
151 void (*handler)(struct hsi_client *, unsigned long));
152int hsi_unregister_port_event(struct hsi_client *cl);
153
150/** 154/**
151 * struct hsi_client_driver - Driver associated to an HSI client 155 * struct hsi_client_driver - Driver associated to an HSI client
152 * @driver: Driver model representation of the driver 156 * @driver: Driver model representation of the driver
@@ -214,8 +218,7 @@ void hsi_free_msg(struct hsi_msg *msg);
214 * @start_tx: Callback to inform that a client wants to TX data 218 * @start_tx: Callback to inform that a client wants to TX data
215 * @stop_tx: Callback to inform that a client no longer wishes to TX data 219 * @stop_tx: Callback to inform that a client no longer wishes to TX data
216 * @release: Callback to inform that a client no longer uses the port 220 * @release: Callback to inform that a client no longer uses the port
217 * @clients: List of hsi_clients using the port. 221 * @n_head: Notifier chain for signaling port events to the clients.
218 * @clock: Lock to serialize access to the clients list.
219 */ 222 */
220struct hsi_port { 223struct hsi_port {
221 struct device device; 224 struct device device;
@@ -231,14 +234,14 @@ struct hsi_port {
231 int (*start_tx)(struct hsi_client *cl); 234 int (*start_tx)(struct hsi_client *cl);
232 int (*stop_tx)(struct hsi_client *cl); 235 int (*stop_tx)(struct hsi_client *cl);
233 int (*release)(struct hsi_client *cl); 236 int (*release)(struct hsi_client *cl);
234 struct list_head clients; 237 /* private */
235 spinlock_t clock; 238 struct atomic_notifier_head n_head;
236}; 239};
237 240
238#define to_hsi_port(dev) container_of(dev, struct hsi_port, device) 241#define to_hsi_port(dev) container_of(dev, struct hsi_port, device)
239#define hsi_get_port(cl) to_hsi_port((cl)->device.parent) 242#define hsi_get_port(cl) to_hsi_port((cl)->device.parent)
240 243
241void hsi_event(struct hsi_port *port, unsigned int event); 244int hsi_event(struct hsi_port *port, unsigned long event);
242int hsi_claim_port(struct hsi_client *cl, unsigned int share); 245int hsi_claim_port(struct hsi_client *cl, unsigned int share);
243void hsi_release_port(struct hsi_client *cl); 246void hsi_release_port(struct hsi_client *cl);
244 247
@@ -270,13 +273,13 @@ struct hsi_controller {
270 struct module *owner; 273 struct module *owner;
271 unsigned int id; 274 unsigned int id;
272 unsigned int num_ports; 275 unsigned int num_ports;
273 struct hsi_port *port; 276 struct hsi_port **port;
274}; 277};
275 278
276#define to_hsi_controller(dev) container_of(dev, struct hsi_controller, device) 279#define to_hsi_controller(dev) container_of(dev, struct hsi_controller, device)
277 280
278struct hsi_controller *hsi_alloc_controller(unsigned int n_ports, gfp_t flags); 281struct hsi_controller *hsi_alloc_controller(unsigned int n_ports, gfp_t flags);
279void hsi_free_controller(struct hsi_controller *hsi); 282void hsi_put_controller(struct hsi_controller *hsi);
280int hsi_register_controller(struct hsi_controller *hsi); 283int hsi_register_controller(struct hsi_controller *hsi);
281void hsi_unregister_controller(struct hsi_controller *hsi); 284void hsi_unregister_controller(struct hsi_controller *hsi);
282 285
@@ -294,7 +297,7 @@ static inline void *hsi_controller_drvdata(struct hsi_controller *hsi)
294static inline struct hsi_port *hsi_find_port_num(struct hsi_controller *hsi, 297static inline struct hsi_port *hsi_find_port_num(struct hsi_controller *hsi,
295 unsigned int num) 298 unsigned int num)
296{ 299{
297 return (num < hsi->num_ports) ? &hsi->port[num] : NULL; 300 return (num < hsi->num_ports) ? hsi->port[num] : NULL;
298} 301}
299 302
300/* 303/*
diff --git a/include/linux/i2c/twl.h b/include/linux/i2c/twl.h
index 2463b6100333..1f90de0cfdbe 100644
--- a/include/linux/i2c/twl.h
+++ b/include/linux/i2c/twl.h
@@ -666,23 +666,11 @@ struct twl4030_codec_data {
666 unsigned int check_defaults:1; 666 unsigned int check_defaults:1;
667 unsigned int reset_registers:1; 667 unsigned int reset_registers:1;
668 unsigned int hs_extmute:1; 668 unsigned int hs_extmute:1;
669 u16 hs_left_step;
670 u16 hs_right_step;
671 u16 hf_left_step;
672 u16 hf_right_step;
673 void (*set_hs_extmute)(int mute); 669 void (*set_hs_extmute)(int mute);
674}; 670};
675 671
676struct twl4030_vibra_data { 672struct twl4030_vibra_data {
677 unsigned int coexist; 673 unsigned int coexist;
678
679 /* twl6040 */
680 unsigned int vibldrv_res; /* left driver resistance */
681 unsigned int vibrdrv_res; /* right driver resistance */
682 unsigned int viblmotor_res; /* left motor resistance */
683 unsigned int vibrmotor_res; /* right motor resistance */
684 int vddvibl_uV; /* VDDVIBL volt, set 0 for fixed reg */
685 int vddvibr_uV; /* VDDVIBR volt, set 0 for fixed reg */
686}; 674};
687 675
688struct twl4030_audio_data { 676struct twl4030_audio_data {
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 7810406f3d80..b27cfcfd3a59 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -49,6 +49,12 @@ typedef void (*irq_preflow_handler_t)(struct irq_data *data);
49 * IRQ_TYPE_LEVEL_LOW - low level triggered 49 * IRQ_TYPE_LEVEL_LOW - low level triggered
50 * IRQ_TYPE_LEVEL_MASK - Mask to filter out the level bits 50 * IRQ_TYPE_LEVEL_MASK - Mask to filter out the level bits
51 * IRQ_TYPE_SENSE_MASK - Mask for all the above bits 51 * IRQ_TYPE_SENSE_MASK - Mask for all the above bits
52 * IRQ_TYPE_DEFAULT - For use by some PICs to ask irq_set_type
53 * to setup the HW to a sane default (used
54 * by irqdomain map() callbacks to synchronize
55 * the HW state and SW flags for a newly
56 * allocated descriptor).
57 *
52 * IRQ_TYPE_PROBE - Special flag for probing in progress 58 * IRQ_TYPE_PROBE - Special flag for probing in progress
53 * 59 *
54 * Bits which can be modified via irq_set/clear/modify_status_flags() 60 * Bits which can be modified via irq_set/clear/modify_status_flags()
@@ -77,6 +83,7 @@ enum {
77 IRQ_TYPE_LEVEL_LOW = 0x00000008, 83 IRQ_TYPE_LEVEL_LOW = 0x00000008,
78 IRQ_TYPE_LEVEL_MASK = (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH), 84 IRQ_TYPE_LEVEL_MASK = (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH),
79 IRQ_TYPE_SENSE_MASK = 0x0000000f, 85 IRQ_TYPE_SENSE_MASK = 0x0000000f,
86 IRQ_TYPE_DEFAULT = IRQ_TYPE_SENSE_MASK,
80 87
81 IRQ_TYPE_PROBE = 0x00000010, 88 IRQ_TYPE_PROBE = 0x00000010,
82 89
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 42378d637ffb..e926df7b54c9 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -996,7 +996,8 @@ extern int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *dev,
996extern void ata_sas_port_destroy(struct ata_port *); 996extern void ata_sas_port_destroy(struct ata_port *);
997extern struct ata_port *ata_sas_port_alloc(struct ata_host *, 997extern struct ata_port *ata_sas_port_alloc(struct ata_host *,
998 struct ata_port_info *, struct Scsi_Host *); 998 struct ata_port_info *, struct Scsi_Host *);
999extern int ata_sas_async_port_init(struct ata_port *); 999extern void ata_sas_async_probe(struct ata_port *ap);
1000extern int ata_sas_sync_probe(struct ata_port *ap);
1000extern int ata_sas_port_init(struct ata_port *); 1001extern int ata_sas_port_init(struct ata_port *);
1001extern int ata_sas_port_start(struct ata_port *ap); 1002extern int ata_sas_port_start(struct ata_port *ap);
1002extern void ata_sas_port_stop(struct ata_port *ap); 1003extern void ata_sas_port_stop(struct ata_port *ap);
diff --git a/include/linux/mfd/db5500-prcmu.h b/include/linux/mfd/db5500-prcmu.h
index 9890687f582d..5a049dfaf153 100644
--- a/include/linux/mfd/db5500-prcmu.h
+++ b/include/linux/mfd/db5500-prcmu.h
@@ -8,41 +8,14 @@
8#ifndef __MFD_DB5500_PRCMU_H 8#ifndef __MFD_DB5500_PRCMU_H
9#define __MFD_DB5500_PRCMU_H 9#define __MFD_DB5500_PRCMU_H
10 10
11#ifdef CONFIG_MFD_DB5500_PRCMU 11static inline int prcmu_resetout(u8 resoutn, u8 state)
12
13void db5500_prcmu_early_init(void);
14int db5500_prcmu_set_epod(u16 epod_id, u8 epod_state);
15int db5500_prcmu_set_display_clocks(void);
16int db5500_prcmu_disable_dsipll(void);
17int db5500_prcmu_enable_dsipll(void);
18int db5500_prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size);
19int db5500_prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size);
20void db5500_prcmu_enable_wakeups(u32 wakeups);
21int db5500_prcmu_request_clock(u8 clock, bool enable);
22void db5500_prcmu_config_abb_event_readout(u32 abb_events);
23void db5500_prcmu_get_abb_event_buffer(void __iomem **buf);
24int prcmu_resetout(u8 resoutn, u8 state);
25int db5500_prcmu_set_power_state(u8 state, bool keep_ulp_clk,
26 bool keep_ap_pll);
27int db5500_prcmu_config_esram0_deep_sleep(u8 state);
28void db5500_prcmu_system_reset(u16 reset_code);
29u16 db5500_prcmu_get_reset_code(void);
30bool db5500_prcmu_is_ac_wake_requested(void);
31int db5500_prcmu_set_arm_opp(u8 opp);
32int db5500_prcmu_get_arm_opp(void);
33
34#else /* !CONFIG_UX500_SOC_DB5500 */
35
36static inline void db5500_prcmu_early_init(void) {}
37
38static inline int db5500_prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size)
39{ 12{
40 return -ENOSYS; 13 return 0;
41} 14}
42 15
43static inline int db5500_prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size) 16static inline int db5500_prcmu_set_epod(u16 epod_id, u8 epod_state)
44{ 17{
45 return -ENOSYS; 18 return 0;
46} 19}
47 20
48static inline int db5500_prcmu_request_clock(u8 clock, bool enable) 21static inline int db5500_prcmu_request_clock(u8 clock, bool enable)
@@ -50,69 +23,82 @@ static inline int db5500_prcmu_request_clock(u8 clock, bool enable)
50 return 0; 23 return 0;
51} 24}
52 25
53static inline int db5500_prcmu_set_display_clocks(void) 26static inline int db5500_prcmu_set_power_state(u8 state, bool keep_ulp_clk,
27 bool keep_ap_pll)
54{ 28{
55 return 0; 29 return 0;
56} 30}
57 31
58static inline int db5500_prcmu_disable_dsipll(void) 32static inline int db5500_prcmu_config_esram0_deep_sleep(u8 state)
59{ 33{
60 return 0; 34 return 0;
61} 35}
62 36
63static inline int db5500_prcmu_enable_dsipll(void) 37static inline u16 db5500_prcmu_get_reset_code(void)
64{ 38{
65 return 0; 39 return 0;
66} 40}
67 41
68static inline int db5500_prcmu_config_esram0_deep_sleep(u8 state) 42static inline bool db5500_prcmu_is_ac_wake_requested(void)
69{ 43{
70 return 0; 44 return 0;
71} 45}
72 46
73static inline void db5500_prcmu_enable_wakeups(u32 wakeups) {} 47static inline int db5500_prcmu_set_arm_opp(u8 opp)
74
75static inline int prcmu_resetout(u8 resoutn, u8 state)
76{ 48{
77 return 0; 49 return 0;
78} 50}
79 51
80static inline int db5500_prcmu_set_epod(u16 epod_id, u8 epod_state) 52static inline int db5500_prcmu_get_arm_opp(void)
81{ 53{
82 return 0; 54 return 0;
83} 55}
84 56
85static inline void db5500_prcmu_get_abb_event_buffer(void __iomem **buf) {}
86static inline void db5500_prcmu_config_abb_event_readout(u32 abb_events) {} 57static inline void db5500_prcmu_config_abb_event_readout(u32 abb_events) {}
87 58
88static inline int db5500_prcmu_set_power_state(u8 state, bool keep_ulp_clk, 59static inline void db5500_prcmu_get_abb_event_buffer(void __iomem **buf) {}
89 bool keep_ap_pll)
90{
91 return 0;
92}
93 60
94static inline void db5500_prcmu_system_reset(u16 reset_code) {} 61static inline void db5500_prcmu_system_reset(u16 reset_code) {}
95 62
96static inline u16 db5500_prcmu_get_reset_code(void) 63static inline void db5500_prcmu_enable_wakeups(u32 wakeups) {}
64
65#ifdef CONFIG_MFD_DB5500_PRCMU
66
67void db5500_prcmu_early_init(void);
68int db5500_prcmu_set_display_clocks(void);
69int db5500_prcmu_disable_dsipll(void);
70int db5500_prcmu_enable_dsipll(void);
71int db5500_prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size);
72int db5500_prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size);
73
74#else /* !CONFIG_UX500_SOC_DB5500 */
75
76static inline void db5500_prcmu_early_init(void) {}
77
78static inline int db5500_prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size)
97{ 79{
98 return 0; 80 return -ENOSYS;
99} 81}
100 82
101static inline bool db5500_prcmu_is_ac_wake_requested(void) 83static inline int db5500_prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size)
102{ 84{
103 return 0; 85 return -ENOSYS;
104} 86}
105 87
106static inline int db5500_prcmu_set_arm_opp(u8 opp) 88static inline int db5500_prcmu_set_display_clocks(void)
107{ 89{
108 return 0; 90 return 0;
109} 91}
110 92
111static inline int db5500_prcmu_get_arm_opp(void) 93static inline int db5500_prcmu_disable_dsipll(void)
112{ 94{
113 return 0; 95 return 0;
114} 96}
115 97
98static inline int db5500_prcmu_enable_dsipll(void)
99{
100 return 0;
101}
116 102
117#endif /* CONFIG_MFD_DB5500_PRCMU */ 103#endif /* CONFIG_MFD_DB5500_PRCMU */
118 104
diff --git a/include/linux/mfd/rc5t583.h b/include/linux/mfd/rc5t583.h
index a2c61609d21d..0b64b19d81ab 100644
--- a/include/linux/mfd/rc5t583.h
+++ b/include/linux/mfd/rc5t583.h
@@ -26,6 +26,7 @@
26 26
27#include <linux/mutex.h> 27#include <linux/mutex.h>
28#include <linux/types.h> 28#include <linux/types.h>
29#include <linux/regmap.h>
29 30
30#define RC5T583_MAX_REGS 0xF8 31#define RC5T583_MAX_REGS 0xF8
31 32
@@ -279,14 +280,44 @@ struct rc5t583_platform_data {
279 bool enable_shutdown; 280 bool enable_shutdown;
280}; 281};
281 282
282int rc5t583_write(struct device *dev, u8 reg, uint8_t val); 283static inline int rc5t583_write(struct device *dev, uint8_t reg, uint8_t val)
283int rc5t583_read(struct device *dev, uint8_t reg, uint8_t *val); 284{
284int rc5t583_set_bits(struct device *dev, unsigned int reg, 285 struct rc5t583 *rc5t583 = dev_get_drvdata(dev);
285 unsigned int bit_mask); 286 return regmap_write(rc5t583->regmap, reg, val);
286int rc5t583_clear_bits(struct device *dev, unsigned int reg, 287}
287 unsigned int bit_mask); 288
288int rc5t583_update(struct device *dev, unsigned int reg, 289static inline int rc5t583_read(struct device *dev, uint8_t reg, uint8_t *val)
289 unsigned int val, unsigned int mask); 290{
291 struct rc5t583 *rc5t583 = dev_get_drvdata(dev);
292 unsigned int ival;
293 int ret;
294 ret = regmap_read(rc5t583->regmap, reg, &ival);
295 if (!ret)
296 *val = (uint8_t)ival;
297 return ret;
298}
299
300static inline int rc5t583_set_bits(struct device *dev, unsigned int reg,
301 unsigned int bit_mask)
302{
303 struct rc5t583 *rc5t583 = dev_get_drvdata(dev);
304 return regmap_update_bits(rc5t583->regmap, reg, bit_mask, bit_mask);
305}
306
307static inline int rc5t583_clear_bits(struct device *dev, unsigned int reg,
308 unsigned int bit_mask)
309{
310 struct rc5t583 *rc5t583 = dev_get_drvdata(dev);
311 return regmap_update_bits(rc5t583->regmap, reg, bit_mask, 0);
312}
313
314static inline int rc5t583_update(struct device *dev, unsigned int reg,
315 unsigned int val, unsigned int mask)
316{
317 struct rc5t583 *rc5t583 = dev_get_drvdata(dev);
318 return regmap_update_bits(rc5t583->regmap, reg, mask, val);
319}
320
290int rc5t583_ext_power_req_config(struct device *dev, int deepsleep_id, 321int rc5t583_ext_power_req_config(struct device *dev, int deepsleep_id,
291 int ext_pwr_req, int deepsleep_slot_nr); 322 int ext_pwr_req, int deepsleep_slot_nr);
292int rc5t583_irq_init(struct rc5t583 *rc5t583, int irq, int irq_base); 323int rc5t583_irq_init(struct rc5t583 *rc5t583, int irq, int irq_base);
diff --git a/include/linux/mfd/twl6040.h b/include/linux/mfd/twl6040.h
index 9bc9ac651dad..b15b5f03f5c4 100644
--- a/include/linux/mfd/twl6040.h
+++ b/include/linux/mfd/twl6040.h
@@ -174,8 +174,35 @@
174#define TWL6040_SYSCLK_SEL_LPPLL 0 174#define TWL6040_SYSCLK_SEL_LPPLL 0
175#define TWL6040_SYSCLK_SEL_HPPLL 1 175#define TWL6040_SYSCLK_SEL_HPPLL 1
176 176
177struct twl6040_codec_data {
178 u16 hs_left_step;
179 u16 hs_right_step;
180 u16 hf_left_step;
181 u16 hf_right_step;
182};
183
184struct twl6040_vibra_data {
185 unsigned int vibldrv_res; /* left driver resistance */
186 unsigned int vibrdrv_res; /* right driver resistance */
187 unsigned int viblmotor_res; /* left motor resistance */
188 unsigned int vibrmotor_res; /* right motor resistance */
189 int vddvibl_uV; /* VDDVIBL volt, set 0 for fixed reg */
190 int vddvibr_uV; /* VDDVIBR volt, set 0 for fixed reg */
191};
192
193struct twl6040_platform_data {
194 int audpwron_gpio; /* audio power-on gpio */
195 unsigned int irq_base;
196
197 struct twl6040_codec_data *codec;
198 struct twl6040_vibra_data *vibra;
199};
200
201struct regmap;
202
177struct twl6040 { 203struct twl6040 {
178 struct device *dev; 204 struct device *dev;
205 struct regmap *regmap;
179 struct mutex mutex; 206 struct mutex mutex;
180 struct mutex io_mutex; 207 struct mutex io_mutex;
181 struct mutex irq_mutex; 208 struct mutex irq_mutex;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index d8738a464b94..74aa71bea1e4 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1393,29 +1393,20 @@ extern int install_special_mapping(struct mm_struct *mm,
1393 1393
1394extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); 1394extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
1395 1395
1396extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
1397 unsigned long len, unsigned long prot,
1398 unsigned long flag, unsigned long pgoff);
1399extern unsigned long mmap_region(struct file *file, unsigned long addr, 1396extern unsigned long mmap_region(struct file *file, unsigned long addr,
1400 unsigned long len, unsigned long flags, 1397 unsigned long len, unsigned long flags,
1401 vm_flags_t vm_flags, unsigned long pgoff); 1398 vm_flags_t vm_flags, unsigned long pgoff);
1402 1399extern unsigned long do_mmap(struct file *, unsigned long,
1403static inline unsigned long do_mmap(struct file *file, unsigned long addr, 1400 unsigned long, unsigned long,
1404 unsigned long len, unsigned long prot, 1401 unsigned long, unsigned long);
1405 unsigned long flag, unsigned long offset)
1406{
1407 unsigned long ret = -EINVAL;
1408 if ((offset + PAGE_ALIGN(len)) < offset)
1409 goto out;
1410 if (!(offset & ~PAGE_MASK))
1411 ret = do_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
1412out:
1413 return ret;
1414}
1415
1416extern int do_munmap(struct mm_struct *, unsigned long, size_t); 1402extern int do_munmap(struct mm_struct *, unsigned long, size_t);
1417 1403
1418extern unsigned long do_brk(unsigned long, unsigned long); 1404/* These take the mm semaphore themselves */
1405extern unsigned long vm_brk(unsigned long, unsigned long);
1406extern int vm_munmap(unsigned long, size_t);
1407extern unsigned long vm_mmap(struct file *, unsigned long,
1408 unsigned long, unsigned long,
1409 unsigned long, unsigned long);
1419 1410
1420/* truncate.c */ 1411/* truncate.c */
1421extern void truncate_inode_pages(struct address_space *, loff_t); 1412extern void truncate_inode_pages(struct address_space *, loff_t);
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 01beae78f079..629b823f8836 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -481,7 +481,7 @@ struct mmc_driver {
481 struct device_driver drv; 481 struct device_driver drv;
482 int (*probe)(struct mmc_card *); 482 int (*probe)(struct mmc_card *);
483 void (*remove)(struct mmc_card *); 483 void (*remove)(struct mmc_card *);
484 int (*suspend)(struct mmc_card *, pm_message_t); 484 int (*suspend)(struct mmc_card *);
485 int (*resume)(struct mmc_card *); 485 int (*resume)(struct mmc_card *);
486}; 486};
487 487
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 5cbaa20f1659..33900a53c990 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1403,15 +1403,6 @@ static inline bool netdev_uses_dsa_tags(struct net_device *dev)
1403 return 0; 1403 return 0;
1404} 1404}
1405 1405
1406#ifndef CONFIG_NET_NS
1407static inline void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
1408{
1409 skb->dev = dev;
1410}
1411#else /* CONFIG_NET_NS */
1412void skb_set_dev(struct sk_buff *skb, struct net_device *dev);
1413#endif
1414
1415static inline bool netdev_uses_trailer_tags(struct net_device *dev) 1406static inline bool netdev_uses_trailer_tags(struct net_device *dev)
1416{ 1407{
1417#ifdef CONFIG_NET_DSA_TAG_TRAILER 1408#ifdef CONFIG_NET_DSA_TAG_TRAILER
diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h
index 0ddd161f3b06..31d2844e6572 100644
--- a/include/linux/netfilter_bridge.h
+++ b/include/linux/netfilter_bridge.h
@@ -104,9 +104,18 @@ struct bridge_skb_cb {
104 } daddr; 104 } daddr;
105}; 105};
106 106
107static inline void br_drop_fake_rtable(struct sk_buff *skb)
108{
109 struct dst_entry *dst = skb_dst(skb);
110
111 if (dst && (dst->flags & DST_FAKE_RTABLE))
112 skb_dst_drop(skb);
113}
114
107#else 115#else
108#define nf_bridge_maybe_copy_header(skb) (0) 116#define nf_bridge_maybe_copy_header(skb) (0)
109#define nf_bridge_pad(skb) (0) 117#define nf_bridge_pad(skb) (0)
118#define br_drop_fake_rtable(skb) do { } while (0)
110#endif /* CONFIG_BRIDGE_NETFILTER */ 119#endif /* CONFIG_BRIDGE_NETFILTER */
111 120
112#endif /* __KERNEL__ */ 121#endif /* __KERNEL__ */
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index bfd0d1bf6707..7ba3551a0414 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -312,6 +312,11 @@ struct nfs4_layoutreturn {
312 int rpc_status; 312 int rpc_status;
313}; 313};
314 314
315struct stateowner_id {
316 __u64 create_time;
317 __u32 uniquifier;
318};
319
315/* 320/*
316 * Arguments to the open call. 321 * Arguments to the open call.
317 */ 322 */
@@ -321,7 +326,7 @@ struct nfs_openargs {
321 int open_flags; 326 int open_flags;
322 fmode_t fmode; 327 fmode_t fmode;
323 __u64 clientid; 328 __u64 clientid;
324 __u64 id; 329 struct stateowner_id id;
325 union { 330 union {
326 struct { 331 struct {
327 struct iattr * attrs; /* UNCHECKED, GUARDED */ 332 struct iattr * attrs; /* UNCHECKED, GUARDED */
diff --git a/include/linux/pinctrl/machine.h b/include/linux/pinctrl/machine.h
index fee4349364f7..e4d1de742502 100644
--- a/include/linux/pinctrl/machine.h
+++ b/include/linux/pinctrl/machine.h
@@ -12,6 +12,8 @@
12#ifndef __LINUX_PINCTRL_MACHINE_H 12#ifndef __LINUX_PINCTRL_MACHINE_H
13#define __LINUX_PINCTRL_MACHINE_H 13#define __LINUX_PINCTRL_MACHINE_H
14 14
15#include <linux/bug.h>
16
15#include "pinctrl-state.h" 17#include "pinctrl-state.h"
16 18
17enum pinctrl_map_type { 19enum pinctrl_map_type {
@@ -148,7 +150,7 @@ struct pinctrl_map {
148#define PIN_MAP_CONFIGS_GROUP_HOG_DEFAULT(dev, grp, cfgs) \ 150#define PIN_MAP_CONFIGS_GROUP_HOG_DEFAULT(dev, grp, cfgs) \
149 PIN_MAP_CONFIGS_GROUP(dev, PINCTRL_STATE_DEFAULT, dev, grp, cfgs) 151 PIN_MAP_CONFIGS_GROUP(dev, PINCTRL_STATE_DEFAULT, dev, grp, cfgs)
150 152
151#ifdef CONFIG_PINMUX 153#ifdef CONFIG_PINCTRL
152 154
153extern int pinctrl_register_mappings(struct pinctrl_map const *map, 155extern int pinctrl_register_mappings(struct pinctrl_map const *map,
154 unsigned num_maps); 156 unsigned num_maps);
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index 6d626ff0cfd0..e1ac1ce16fb0 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -6,6 +6,7 @@
6#define PIPE_BUF_FLAG_LRU 0x01 /* page is on the LRU */ 6#define PIPE_BUF_FLAG_LRU 0x01 /* page is on the LRU */
7#define PIPE_BUF_FLAG_ATOMIC 0x02 /* was atomically mapped */ 7#define PIPE_BUF_FLAG_ATOMIC 0x02 /* was atomically mapped */
8#define PIPE_BUF_FLAG_GIFT 0x04 /* page is a gift */ 8#define PIPE_BUF_FLAG_GIFT 0x04 /* page is a gift */
9#define PIPE_BUF_FLAG_PACKET 0x08 /* read() as a packet */
9 10
10/** 11/**
11 * struct pipe_buffer - a linux kernel pipe buffer 12 * struct pipe_buffer - a linux kernel pipe buffer
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index c6db9fb33c44..600060e25ec6 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -141,7 +141,7 @@ static inline unsigned __read_seqcount_begin(const seqcount_t *s)
141 unsigned ret; 141 unsigned ret;
142 142
143repeat: 143repeat:
144 ret = s->sequence; 144 ret = ACCESS_ONCE(s->sequence);
145 if (unlikely(ret & 1)) { 145 if (unlikely(ret & 1)) {
146 cpu_relax(); 146 cpu_relax();
147 goto repeat; 147 goto repeat;
@@ -166,6 +166,27 @@ static inline unsigned read_seqcount_begin(const seqcount_t *s)
166} 166}
167 167
168/** 168/**
169 * raw_seqcount_begin - begin a seq-read critical section
170 * @s: pointer to seqcount_t
171 * Returns: count to be passed to read_seqcount_retry
172 *
173 * raw_seqcount_begin opens a read critical section of the given seqcount.
174 * Validity of the critical section is tested by checking read_seqcount_retry
175 * function.
176 *
177 * Unlike read_seqcount_begin(), this function will not wait for the count
178 * to stabilize. If a writer is active when we begin, we will fail the
179 * read_seqcount_retry() instead of stabilizing at the beginning of the
180 * critical section.
181 */
182static inline unsigned raw_seqcount_begin(const seqcount_t *s)
183{
184 unsigned ret = ACCESS_ONCE(s->sequence);
185 smp_rmb();
186 return ret & ~1;
187}
188
189/**
169 * __read_seqcount_retry - end a seq-read critical section (without barrier) 190 * __read_seqcount_retry - end a seq-read critical section (without barrier)
170 * @s: pointer to seqcount_t 191 * @s: pointer to seqcount_t
171 * @start: count, from read_seqcount_begin 192 * @start: count, from read_seqcount_begin
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 70a3f8d49118..111f26b6e28b 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -238,11 +238,12 @@ enum {
238/* 238/*
239 * The callback notifies userspace to release buffers when skb DMA is done in 239 * The callback notifies userspace to release buffers when skb DMA is done in
240 * lower device, the skb last reference should be 0 when calling this. 240 * lower device, the skb last reference should be 0 when calling this.
241 * The desc is used to track userspace buffer index. 241 * The ctx field is used to track device context.
242 * The desc field is used to track userspace buffer index.
242 */ 243 */
243struct ubuf_info { 244struct ubuf_info {
244 void (*callback)(void *); 245 void (*callback)(struct ubuf_info *);
245 void *arg; 246 void *ctx;
246 unsigned long desc; 247 unsigned long desc;
247}; 248};
248 249
@@ -1019,7 +1020,7 @@ static inline void skb_queue_splice(const struct sk_buff_head *list,
1019} 1020}
1020 1021
1021/** 1022/**
1022 * skb_queue_splice - join two skb lists and reinitialise the emptied list 1023 * skb_queue_splice_init - join two skb lists and reinitialise the emptied list
1023 * @list: the new list to add 1024 * @list: the new list to add
1024 * @head: the place to add it in the first list 1025 * @head: the place to add it in the first list
1025 * 1026 *
@@ -1050,7 +1051,7 @@ static inline void skb_queue_splice_tail(const struct sk_buff_head *list,
1050} 1051}
1051 1052
1052/** 1053/**
1053 * skb_queue_splice_tail - join two skb lists and reinitialise the emptied list 1054 * skb_queue_splice_tail_init - join two skb lists and reinitialise the emptied list
1054 * @list: the new list to add 1055 * @list: the new list to add
1055 * @head: the place to add it in the first list 1056 * @head: the place to add it in the first list
1056 * 1057 *
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 98679b061b63..fa702aeb5038 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -254,7 +254,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
254 * driver is finished with this message, it must call 254 * driver is finished with this message, it must call
255 * spi_finalize_current_message() so the subsystem can issue the next 255 * spi_finalize_current_message() so the subsystem can issue the next
256 * transfer 256 * transfer
257 * @prepare_transfer_hardware: there are currently no more messages on the 257 * @unprepare_transfer_hardware: there are currently no more messages on the
258 * queue so the subsystem notifies the driver that it may relax the 258 * queue so the subsystem notifies the driver that it may relax the
259 * hardware by issuing this call 259 * hardware by issuing this call
260 * 260 *
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 5de415707c23..d28cc78a38e4 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -126,6 +126,8 @@ struct usb_hcd {
126 unsigned wireless:1; /* Wireless USB HCD */ 126 unsigned wireless:1; /* Wireless USB HCD */
127 unsigned authorized_default:1; 127 unsigned authorized_default:1;
128 unsigned has_tt:1; /* Integrated TT in root hub */ 128 unsigned has_tt:1; /* Integrated TT in root hub */
129 unsigned broken_pci_sleep:1; /* Don't put the
130 controller in PCI-D3 for system sleep */
129 131
130 unsigned int irq; /* irq allocated */ 132 unsigned int irq; /* irq allocated */
131 void __iomem *regs; /* device memory/io */ 133 void __iomem *regs; /* device memory/io */
diff --git a/include/linux/usb/otg.h b/include/linux/usb/otg.h
index f67810f8f21b..38ab3f46346f 100644
--- a/include/linux/usb/otg.h
+++ b/include/linux/usb/otg.h
@@ -94,6 +94,7 @@ struct usb_phy {
94 94
95 struct usb_otg *otg; 95 struct usb_otg *otg;
96 96
97 struct device *io_dev;
97 struct usb_phy_io_ops *io_ops; 98 struct usb_phy_io_ops *io_ops;
98 void __iomem *io_priv; 99 void __iomem *io_priv;
99 100
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index 03b90cdc1921..06f8e3858251 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -26,13 +26,14 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
26 PGFREE, PGACTIVATE, PGDEACTIVATE, 26 PGFREE, PGACTIVATE, PGDEACTIVATE,
27 PGFAULT, PGMAJFAULT, 27 PGFAULT, PGMAJFAULT,
28 FOR_ALL_ZONES(PGREFILL), 28 FOR_ALL_ZONES(PGREFILL),
29 FOR_ALL_ZONES(PGSTEAL), 29 FOR_ALL_ZONES(PGSTEAL_KSWAPD),
30 FOR_ALL_ZONES(PGSTEAL_DIRECT),
30 FOR_ALL_ZONES(PGSCAN_KSWAPD), 31 FOR_ALL_ZONES(PGSCAN_KSWAPD),
31 FOR_ALL_ZONES(PGSCAN_DIRECT), 32 FOR_ALL_ZONES(PGSCAN_DIRECT),
32#ifdef CONFIG_NUMA 33#ifdef CONFIG_NUMA
33 PGSCAN_ZONE_RECLAIM_FAILED, 34 PGSCAN_ZONE_RECLAIM_FAILED,
34#endif 35#endif
35 PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL, 36 PGINODESTEAL, SLABS_SCANNED, KSWAPD_INODESTEAL,
36 KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY, 37 KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY,
37 KSWAPD_SKIP_CONGESTION_WAIT, 38 KSWAPD_SKIP_CONGESTION_WAIT,
38 PAGEOUTRUN, ALLOCSTALL, PGROTATED, 39 PAGEOUTRUN, ALLOCSTALL, PGROTATED,
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 6822d2595aff..db1c5df45224 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -314,6 +314,7 @@ struct hci_conn {
314 314
315 __u8 remote_cap; 315 __u8 remote_cap;
316 __u8 remote_auth; 316 __u8 remote_auth;
317 bool flush_key;
317 318
318 unsigned int sent; 319 unsigned int sent;
319 320
@@ -980,7 +981,7 @@ int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable);
980int mgmt_connectable(struct hci_dev *hdev, u8 connectable); 981int mgmt_connectable(struct hci_dev *hdev, u8 connectable);
981int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status); 982int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status);
982int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, 983int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
983 u8 persistent); 984 bool persistent);
984int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, 985int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
985 u8 addr_type, u32 flags, u8 *name, u8 name_len, 986 u8 addr_type, u32 flags, u8 *name, u8 name_len,
986 u8 *dev_class); 987 u8 *dev_class);
diff --git a/include/net/dst.h b/include/net/dst.h
index 59c5d18cc385..bed833d9796a 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -36,7 +36,11 @@ struct dst_entry {
36 struct net_device *dev; 36 struct net_device *dev;
37 struct dst_ops *ops; 37 struct dst_ops *ops;
38 unsigned long _metrics; 38 unsigned long _metrics;
39 unsigned long expires; 39 union {
40 unsigned long expires;
41 /* point to where the dst_entry copied from */
42 struct dst_entry *from;
43 };
40 struct dst_entry *path; 44 struct dst_entry *path;
41 struct neighbour __rcu *_neighbour; 45 struct neighbour __rcu *_neighbour;
42#ifdef CONFIG_XFRM 46#ifdef CONFIG_XFRM
@@ -55,6 +59,7 @@ struct dst_entry {
55#define DST_NOCACHE 0x0010 59#define DST_NOCACHE 0x0010
56#define DST_NOCOUNT 0x0020 60#define DST_NOCOUNT 0x0020
57#define DST_NOPEER 0x0040 61#define DST_NOPEER 0x0040
62#define DST_FAKE_RTABLE 0x0080
58 63
59 short error; 64 short error;
60 short obsolete; 65 short obsolete;
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index b26bb8101981..0ae759a6c76e 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -123,6 +123,54 @@ static inline struct inet6_dev *ip6_dst_idev(struct dst_entry *dst)
123 return ((struct rt6_info *)dst)->rt6i_idev; 123 return ((struct rt6_info *)dst)->rt6i_idev;
124} 124}
125 125
126static inline void rt6_clean_expires(struct rt6_info *rt)
127{
128 if (!(rt->rt6i_flags & RTF_EXPIRES) && rt->dst.from)
129 dst_release(rt->dst.from);
130
131 rt->rt6i_flags &= ~RTF_EXPIRES;
132 rt->dst.from = NULL;
133}
134
135static inline void rt6_set_expires(struct rt6_info *rt, unsigned long expires)
136{
137 if (!(rt->rt6i_flags & RTF_EXPIRES) && rt->dst.from)
138 dst_release(rt->dst.from);
139
140 rt->rt6i_flags |= RTF_EXPIRES;
141 rt->dst.expires = expires;
142}
143
144static inline void rt6_update_expires(struct rt6_info *rt, int timeout)
145{
146 if (!(rt->rt6i_flags & RTF_EXPIRES)) {
147 if (rt->dst.from)
148 dst_release(rt->dst.from);
149 /* dst_set_expires relies on expires == 0
150 * if it has not been set previously.
151 */
152 rt->dst.expires = 0;
153 }
154
155 dst_set_expires(&rt->dst, timeout);
156 rt->rt6i_flags |= RTF_EXPIRES;
157}
158
159static inline void rt6_set_from(struct rt6_info *rt, struct rt6_info *from)
160{
161 struct dst_entry *new = (struct dst_entry *) from;
162
163 if (!(rt->rt6i_flags & RTF_EXPIRES) && rt->dst.from) {
164 if (new == rt->dst.from)
165 return;
166 dst_release(rt->dst.from);
167 }
168
169 rt->rt6i_flags &= ~RTF_EXPIRES;
170 rt->dst.from = new;
171 dst_hold(new);
172}
173
126struct fib6_walker_t { 174struct fib6_walker_t {
127 struct list_head lh; 175 struct list_head lh;
128 struct fib6_node *root, *node; 176 struct fib6_node *root, *node;
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 2bdee51ba30d..72522f087375 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -393,7 +393,7 @@ struct ip_vs_protocol {
393 393
394 void (*exit)(struct ip_vs_protocol *pp); 394 void (*exit)(struct ip_vs_protocol *pp);
395 395
396 void (*init_netns)(struct net *net, struct ip_vs_proto_data *pd); 396 int (*init_netns)(struct net *net, struct ip_vs_proto_data *pd);
397 397
398 void (*exit_netns)(struct net *net, struct ip_vs_proto_data *pd); 398 void (*exit_netns)(struct net *net, struct ip_vs_proto_data *pd);
399 399
@@ -1203,6 +1203,8 @@ ip_vs_lookup_real_service(struct net *net, int af, __u16 protocol,
1203 1203
1204extern int ip_vs_use_count_inc(void); 1204extern int ip_vs_use_count_inc(void);
1205extern void ip_vs_use_count_dec(void); 1205extern void ip_vs_use_count_dec(void);
1206extern int ip_vs_register_nl_ioctl(void);
1207extern void ip_vs_unregister_nl_ioctl(void);
1206extern int ip_vs_control_init(void); 1208extern int ip_vs_control_init(void);
1207extern void ip_vs_control_cleanup(void); 1209extern void ip_vs_control_cleanup(void);
1208extern struct ip_vs_dest * 1210extern struct ip_vs_dest *
diff --git a/include/net/red.h b/include/net/red.h
index 77d4c3745cb5..ef46058d35bf 100644
--- a/include/net/red.h
+++ b/include/net/red.h
@@ -245,7 +245,7 @@ static inline unsigned long red_calc_qavg_from_idle_time(const struct red_parms
245 * 245 *
246 * dummy packets as a burst after idle time, i.e. 246 * dummy packets as a burst after idle time, i.e.
247 * 247 *
248 * p->qavg *= (1-W)^m 248 * v->qavg *= (1-W)^m
249 * 249 *
250 * This is an apparently overcomplicated solution (f.e. we have to 250 * This is an apparently overcomplicated solution (f.e. we have to
251 * precompute a table to make this calculation in reasonable time) 251 * precompute a table to make this calculation in reasonable time)
@@ -279,7 +279,7 @@ static inline unsigned long red_calc_qavg_no_idle_time(const struct red_parms *p
279 unsigned int backlog) 279 unsigned int backlog)
280{ 280{
281 /* 281 /*
282 * NOTE: p->qavg is fixed point number with point at Wlog. 282 * NOTE: v->qavg is fixed point number with point at Wlog.
283 * The formula below is equvalent to floating point 283 * The formula below is equvalent to floating point
284 * version: 284 * version:
285 * 285 *
@@ -390,7 +390,7 @@ static inline void red_adaptative_algo(struct red_parms *p, struct red_vars *v)
390 if (red_is_idling(v)) 390 if (red_is_idling(v))
391 qavg = red_calc_qavg_from_idle_time(p, v); 391 qavg = red_calc_qavg_from_idle_time(p, v);
392 392
393 /* p->qavg is fixed point number with point at Wlog */ 393 /* v->qavg is fixed point number with point at Wlog */
394 qavg >>= p->Wlog; 394 qavg >>= p->Wlog;
395 395
396 if (qavg > p->target_max && p->max_P <= MAX_P_MAX) 396 if (qavg > p->target_max && p->max_P <= MAX_P_MAX)
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 6ee44b24864a..a2ef81466b00 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -704,4 +704,17 @@ static inline void sctp_v4_map_v6(union sctp_addr *addr)
704 addr->v6.sin6_addr.s6_addr32[2] = htonl(0x0000ffff); 704 addr->v6.sin6_addr.s6_addr32[2] = htonl(0x0000ffff);
705} 705}
706 706
707/* The cookie is always 0 since this is how it's used in the
708 * pmtu code.
709 */
710static inline struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t)
711{
712 if (t->dst && !dst_check(t->dst, 0)) {
713 dst_release(t->dst);
714 t->dst = NULL;
715 }
716
717 return t->dst;
718}
719
707#endif /* __net_sctp_h__ */ 720#endif /* __net_sctp_h__ */
diff --git a/include/net/sock.h b/include/net/sock.h
index a6ba1f8871fd..5a0a58ac4126 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -246,6 +246,7 @@ struct cg_proto;
246 * @sk_user_data: RPC layer private data 246 * @sk_user_data: RPC layer private data
247 * @sk_sndmsg_page: cached page for sendmsg 247 * @sk_sndmsg_page: cached page for sendmsg
248 * @sk_sndmsg_off: cached offset for sendmsg 248 * @sk_sndmsg_off: cached offset for sendmsg
249 * @sk_peek_off: current peek_offset value
249 * @sk_send_head: front of stuff to transmit 250 * @sk_send_head: front of stuff to transmit
250 * @sk_security: used by security modules 251 * @sk_security: used by security modules
251 * @sk_mark: generic packet mark 252 * @sk_mark: generic packet mark
@@ -1128,9 +1129,9 @@ sk_sockets_allocated_read_positive(struct sock *sk)
1128 struct proto *prot = sk->sk_prot; 1129 struct proto *prot = sk->sk_prot;
1129 1130
1130 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) 1131 if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
1131 return percpu_counter_sum_positive(sk->sk_cgrp->sockets_allocated); 1132 return percpu_counter_read_positive(sk->sk_cgrp->sockets_allocated);
1132 1133
1133 return percpu_counter_sum_positive(prot->sockets_allocated); 1134 return percpu_counter_read_positive(prot->sockets_allocated);
1134} 1135}
1135 1136
1136static inline int 1137static inline int
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index 5f5ed1b8b41b..f4f1c96dca72 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -217,11 +217,29 @@ struct domain_device {
217 struct kref kref; 217 struct kref kref;
218}; 218};
219 219
220struct sas_discovery_event { 220struct sas_work {
221 struct list_head drain_node;
221 struct work_struct work; 222 struct work_struct work;
223};
224
225static inline void INIT_SAS_WORK(struct sas_work *sw, void (*fn)(struct work_struct *))
226{
227 INIT_WORK(&sw->work, fn);
228 INIT_LIST_HEAD(&sw->drain_node);
229}
230
231struct sas_discovery_event {
232 struct sas_work work;
222 struct asd_sas_port *port; 233 struct asd_sas_port *port;
223}; 234};
224 235
236static inline struct sas_discovery_event *to_sas_discovery_event(struct work_struct *work)
237{
238 struct sas_discovery_event *ev = container_of(work, typeof(*ev), work.work);
239
240 return ev;
241}
242
225struct sas_discovery { 243struct sas_discovery {
226 struct sas_discovery_event disc_work[DISC_NUM_EVENTS]; 244 struct sas_discovery_event disc_work[DISC_NUM_EVENTS];
227 unsigned long pending; 245 unsigned long pending;
@@ -244,7 +262,7 @@ struct asd_sas_port {
244 struct list_head destroy_list; 262 struct list_head destroy_list;
245 enum sas_linkrate linkrate; 263 enum sas_linkrate linkrate;
246 264
247 struct work_struct work; 265 struct sas_work work;
248 266
249/* public: */ 267/* public: */
250 int id; 268 int id;
@@ -270,10 +288,17 @@ struct asd_sas_port {
270}; 288};
271 289
272struct asd_sas_event { 290struct asd_sas_event {
273 struct work_struct work; 291 struct sas_work work;
274 struct asd_sas_phy *phy; 292 struct asd_sas_phy *phy;
275}; 293};
276 294
295static inline struct asd_sas_event *to_asd_sas_event(struct work_struct *work)
296{
297 struct asd_sas_event *ev = container_of(work, typeof(*ev), work.work);
298
299 return ev;
300}
301
277/* The phy pretty much is controlled by the LLDD. 302/* The phy pretty much is controlled by the LLDD.
278 * The class only reads those fields. 303 * The class only reads those fields.
279 */ 304 */
@@ -333,10 +358,17 @@ struct scsi_core {
333}; 358};
334 359
335struct sas_ha_event { 360struct sas_ha_event {
336 struct work_struct work; 361 struct sas_work work;
337 struct sas_ha_struct *ha; 362 struct sas_ha_struct *ha;
338}; 363};
339 364
365static inline struct sas_ha_event *to_sas_ha_event(struct work_struct *work)
366{
367 struct sas_ha_event *ev = container_of(work, typeof(*ev), work.work);
368
369 return ev;
370}
371
340enum sas_ha_state { 372enum sas_ha_state {
341 SAS_HA_REGISTERED, 373 SAS_HA_REGISTERED,
342 SAS_HA_DRAINING, 374 SAS_HA_DRAINING,
diff --git a/include/scsi/sas.h b/include/scsi/sas.h
index a577a833603d..be3eb0bf1ac0 100644
--- a/include/scsi/sas.h
+++ b/include/scsi/sas.h
@@ -103,6 +103,7 @@ enum sas_dev_type {
103}; 103};
104 104
105enum sas_protocol { 105enum sas_protocol {
106 SAS_PROTOCOL_NONE = 0,
106 SAS_PROTOCOL_SATA = 0x01, 107 SAS_PROTOCOL_SATA = 0x01,
107 SAS_PROTOCOL_SMP = 0x02, 108 SAS_PROTOCOL_SMP = 0x02,
108 SAS_PROTOCOL_STP = 0x04, 109 SAS_PROTOCOL_STP = 0x04,
diff --git a/include/scsi/sas_ata.h b/include/scsi/sas_ata.h
index cdccd2eb7b6c..77670e823ed8 100644
--- a/include/scsi/sas_ata.h
+++ b/include/scsi/sas_ata.h
@@ -37,7 +37,7 @@ static inline int dev_is_sata(struct domain_device *dev)
37} 37}
38 38
39int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy); 39int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy);
40int sas_ata_init_host_and_port(struct domain_device *found_dev); 40int sas_ata_init(struct domain_device *dev);
41void sas_ata_task_abort(struct sas_task *task); 41void sas_ata_task_abort(struct sas_task *task);
42void sas_ata_strategy_handler(struct Scsi_Host *shost); 42void sas_ata_strategy_handler(struct Scsi_Host *shost);
43void sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q, 43void sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q,
@@ -52,7 +52,7 @@ static inline int dev_is_sata(struct domain_device *dev)
52{ 52{
53 return 0; 53 return 0;
54} 54}
55static inline int sas_ata_init_host_and_port(struct domain_device *found_dev) 55static inline int sas_ata_init(struct domain_device *dev)
56{ 56{
57 return 0; 57 return 0;
58} 58}
diff --git a/init/do_mounts.c b/init/do_mounts.c
index 0e93f92a0345..42b0707c3481 100644
--- a/init/do_mounts.c
+++ b/init/do_mounts.c
@@ -472,7 +472,7 @@ void __init change_floppy(char *fmt, ...)
472void __init mount_root(void) 472void __init mount_root(void)
473{ 473{
474#ifdef CONFIG_ROOT_NFS 474#ifdef CONFIG_ROOT_NFS
475 if (MAJOR(ROOT_DEV) == UNNAMED_MAJOR) { 475 if (ROOT_DEV == Root_NFS) {
476 if (mount_nfs_root()) 476 if (mount_nfs_root())
477 return; 477 return;
478 478
diff --git a/init/main.c b/init/main.c
index 9d454f09f3b1..44b2433334c7 100644
--- a/init/main.c
+++ b/init/main.c
@@ -225,13 +225,9 @@ static int __init loglevel(char *str)
225 225
226early_param("loglevel", loglevel); 226early_param("loglevel", loglevel);
227 227
228/* 228/* Change NUL term back to "=", to make "param" the whole string. */
229 * Unknown boot options get handed to init, unless they look like 229static int __init repair_env_string(char *param, char *val)
230 * unused parameters (modprobe will find them in /proc/cmdline).
231 */
232static int __init unknown_bootoption(char *param, char *val)
233{ 230{
234 /* Change NUL term back to "=", to make "param" the whole string. */
235 if (val) { 231 if (val) {
236 /* param=val or param="val"? */ 232 /* param=val or param="val"? */
237 if (val == param+strlen(param)+1) 233 if (val == param+strlen(param)+1)
@@ -243,6 +239,16 @@ static int __init unknown_bootoption(char *param, char *val)
243 } else 239 } else
244 BUG(); 240 BUG();
245 } 241 }
242 return 0;
243}
244
245/*
246 * Unknown boot options get handed to init, unless they look like
247 * unused parameters (modprobe will find them in /proc/cmdline).
248 */
249static int __init unknown_bootoption(char *param, char *val)
250{
251 repair_env_string(param, val);
246 252
247 /* Handle obsolete-style parameters */ 253 /* Handle obsolete-style parameters */
248 if (obsolete_checksetup(param)) 254 if (obsolete_checksetup(param))
@@ -732,11 +738,6 @@ static char *initcall_level_names[] __initdata = {
732 "late parameters", 738 "late parameters",
733}; 739};
734 740
735static int __init ignore_unknown_bootoption(char *param, char *val)
736{
737 return 0;
738}
739
740static void __init do_initcall_level(int level) 741static void __init do_initcall_level(int level)
741{ 742{
742 extern const struct kernel_param __start___param[], __stop___param[]; 743 extern const struct kernel_param __start___param[], __stop___param[];
@@ -747,7 +748,7 @@ static void __init do_initcall_level(int level)
747 static_command_line, __start___param, 748 static_command_line, __start___param,
748 __stop___param - __start___param, 749 __stop___param - __start___param,
749 level, level, 750 level, level,
750 ignore_unknown_bootoption); 751 repair_env_string);
751 752
752 for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++) 753 for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++)
753 do_one_initcall(*fn); 754 do_one_initcall(*fn);
diff --git a/kernel/compat.c b/kernel/compat.c
index 74ff8498809a..d2c67aa49ae6 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -372,25 +372,54 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
372 372
373#ifdef __ARCH_WANT_SYS_SIGPROCMASK 373#ifdef __ARCH_WANT_SYS_SIGPROCMASK
374 374
375asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set, 375/*
376 compat_old_sigset_t __user *oset) 376 * sys_sigprocmask SIG_SETMASK sets the first (compat) word of the
377 * blocked set of signals to the supplied signal set
378 */
379static inline void compat_sig_setmask(sigset_t *blocked, compat_sigset_word set)
377{ 380{
378 old_sigset_t s; 381 memcpy(blocked->sig, &set, sizeof(set));
379 long ret; 382}
380 mm_segment_t old_fs;
381 383
382 if (set && get_user(s, set)) 384asmlinkage long compat_sys_sigprocmask(int how,
383 return -EFAULT; 385 compat_old_sigset_t __user *nset,
384 old_fs = get_fs(); 386 compat_old_sigset_t __user *oset)
385 set_fs(KERNEL_DS); 387{
386 ret = sys_sigprocmask(how, 388 old_sigset_t old_set, new_set;
387 set ? (old_sigset_t __user *) &s : NULL, 389 sigset_t new_blocked;
388 oset ? (old_sigset_t __user *) &s : NULL); 390
389 set_fs(old_fs); 391 old_set = current->blocked.sig[0];
390 if (ret == 0) 392
391 if (oset) 393 if (nset) {
392 ret = put_user(s, oset); 394 if (get_user(new_set, nset))
393 return ret; 395 return -EFAULT;
396 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
397
398 new_blocked = current->blocked;
399
400 switch (how) {
401 case SIG_BLOCK:
402 sigaddsetmask(&new_blocked, new_set);
403 break;
404 case SIG_UNBLOCK:
405 sigdelsetmask(&new_blocked, new_set);
406 break;
407 case SIG_SETMASK:
408 compat_sig_setmask(&new_blocked, new_set);
409 break;
410 default:
411 return -EINVAL;
412 }
413
414 set_current_blocked(&new_blocked);
415 }
416
417 if (oset) {
418 if (put_user(old_set, oset))
419 return -EFAULT;
420 }
421
422 return 0;
394} 423}
395 424
396#endif 425#endif
diff --git a/kernel/events/core.c b/kernel/events/core.c
index a6a9ec4cd8f5..fd126f82b57c 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3183,7 +3183,7 @@ static void perf_event_for_each(struct perf_event *event,
3183 perf_event_for_each_child(event, func); 3183 perf_event_for_each_child(event, func);
3184 func(event); 3184 func(event);
3185 list_for_each_entry(sibling, &event->sibling_list, group_entry) 3185 list_for_each_entry(sibling, &event->sibling_list, group_entry)
3186 perf_event_for_each_child(event, func); 3186 perf_event_for_each_child(sibling, func);
3187 mutex_unlock(&ctx->mutex); 3187 mutex_unlock(&ctx->mutex);
3188} 3188}
3189 3189
diff --git a/kernel/fork.c b/kernel/fork.c
index b9372a0bff18..687a15d56243 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -47,6 +47,7 @@
47#include <linux/audit.h> 47#include <linux/audit.h>
48#include <linux/memcontrol.h> 48#include <linux/memcontrol.h>
49#include <linux/ftrace.h> 49#include <linux/ftrace.h>
50#include <linux/proc_fs.h>
50#include <linux/profile.h> 51#include <linux/profile.h>
51#include <linux/rmap.h> 52#include <linux/rmap.h>
52#include <linux/ksm.h> 53#include <linux/ksm.h>
@@ -1464,6 +1465,8 @@ bad_fork_cleanup_io:
1464 if (p->io_context) 1465 if (p->io_context)
1465 exit_io_context(p); 1466 exit_io_context(p);
1466bad_fork_cleanup_namespaces: 1467bad_fork_cleanup_namespaces:
1468 if (unlikely(clone_flags & CLONE_NEWPID))
1469 pid_ns_release_proc(p->nsproxy->pid_ns);
1467 exit_task_namespaces(p); 1470 exit_task_namespaces(p);
1468bad_fork_cleanup_mm: 1471bad_fork_cleanup_mm:
1469 if (p->mm) 1472 if (p->mm)
diff --git a/kernel/irq/debug.h b/kernel/irq/debug.h
index 97a8bfadc88a..e75e29e4434a 100644
--- a/kernel/irq/debug.h
+++ b/kernel/irq/debug.h
@@ -4,10 +4,10 @@
4 4
5#include <linux/kallsyms.h> 5#include <linux/kallsyms.h>
6 6
7#define P(f) if (desc->status_use_accessors & f) printk("%14s set\n", #f) 7#define ___P(f) if (desc->status_use_accessors & f) printk("%14s set\n", #f)
8#define PS(f) if (desc->istate & f) printk("%14s set\n", #f) 8#define ___PS(f) if (desc->istate & f) printk("%14s set\n", #f)
9/* FIXME */ 9/* FIXME */
10#define PD(f) do { } while (0) 10#define ___PD(f) do { } while (0)
11 11
12static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc) 12static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc)
13{ 13{
@@ -23,23 +23,23 @@ static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc)
23 print_symbol("%s\n", (unsigned long)desc->action->handler); 23 print_symbol("%s\n", (unsigned long)desc->action->handler);
24 } 24 }
25 25
26 P(IRQ_LEVEL); 26 ___P(IRQ_LEVEL);
27 P(IRQ_PER_CPU); 27 ___P(IRQ_PER_CPU);
28 P(IRQ_NOPROBE); 28 ___P(IRQ_NOPROBE);
29 P(IRQ_NOREQUEST); 29 ___P(IRQ_NOREQUEST);
30 P(IRQ_NOTHREAD); 30 ___P(IRQ_NOTHREAD);
31 P(IRQ_NOAUTOEN); 31 ___P(IRQ_NOAUTOEN);
32 32
33 PS(IRQS_AUTODETECT); 33 ___PS(IRQS_AUTODETECT);
34 PS(IRQS_REPLAY); 34 ___PS(IRQS_REPLAY);
35 PS(IRQS_WAITING); 35 ___PS(IRQS_WAITING);
36 PS(IRQS_PENDING); 36 ___PS(IRQS_PENDING);
37 37
38 PD(IRQS_INPROGRESS); 38 ___PD(IRQS_INPROGRESS);
39 PD(IRQS_DISABLED); 39 ___PD(IRQS_DISABLED);
40 PD(IRQS_MASKED); 40 ___PD(IRQS_MASKED);
41} 41}
42 42
43#undef P 43#undef ___P
44#undef PS 44#undef ___PS
45#undef PD 45#undef ___PD
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 8742fd013a94..eef311a58a64 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -51,6 +51,23 @@
51 51
52#define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1) 52#define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1)
53 53
54/*
55 * Number of free pages that are not high.
56 */
57static inline unsigned long low_free_pages(void)
58{
59 return nr_free_pages() - nr_free_highpages();
60}
61
62/*
63 * Number of pages required to be kept free while writing the image. Always
64 * half of all available low pages before the writing starts.
65 */
66static inline unsigned long reqd_free_pages(void)
67{
68 return low_free_pages() / 2;
69}
70
54struct swap_map_page { 71struct swap_map_page {
55 sector_t entries[MAP_PAGE_ENTRIES]; 72 sector_t entries[MAP_PAGE_ENTRIES];
56 sector_t next_swap; 73 sector_t next_swap;
@@ -72,7 +89,7 @@ struct swap_map_handle {
72 sector_t cur_swap; 89 sector_t cur_swap;
73 sector_t first_sector; 90 sector_t first_sector;
74 unsigned int k; 91 unsigned int k;
75 unsigned long nr_free_pages, written; 92 unsigned long reqd_free_pages;
76 u32 crc32; 93 u32 crc32;
77}; 94};
78 95
@@ -316,8 +333,7 @@ static int get_swap_writer(struct swap_map_handle *handle)
316 goto err_rel; 333 goto err_rel;
317 } 334 }
318 handle->k = 0; 335 handle->k = 0;
319 handle->nr_free_pages = nr_free_pages() >> 1; 336 handle->reqd_free_pages = reqd_free_pages();
320 handle->written = 0;
321 handle->first_sector = handle->cur_swap; 337 handle->first_sector = handle->cur_swap;
322 return 0; 338 return 0;
323err_rel: 339err_rel:
@@ -352,11 +368,11 @@ static int swap_write_page(struct swap_map_handle *handle, void *buf,
352 handle->cur_swap = offset; 368 handle->cur_swap = offset;
353 handle->k = 0; 369 handle->k = 0;
354 } 370 }
355 if (bio_chain && ++handle->written > handle->nr_free_pages) { 371 if (bio_chain && low_free_pages() <= handle->reqd_free_pages) {
356 error = hib_wait_on_bio_chain(bio_chain); 372 error = hib_wait_on_bio_chain(bio_chain);
357 if (error) 373 if (error)
358 goto out; 374 goto out;
359 handle->written = 0; 375 handle->reqd_free_pages = reqd_free_pages();
360 } 376 }
361 out: 377 out:
362 return error; 378 return error;
@@ -618,7 +634,7 @@ static int save_image_lzo(struct swap_map_handle *handle,
618 * Adjust number of free pages after all allocations have been done. 634 * Adjust number of free pages after all allocations have been done.
619 * We don't want to run out of pages when writing. 635 * We don't want to run out of pages when writing.
620 */ 636 */
621 handle->nr_free_pages = nr_free_pages() >> 1; 637 handle->reqd_free_pages = reqd_free_pages();
622 638
623 /* 639 /*
624 * Start the CRC32 thread. 640 * Start the CRC32 thread.
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 1050d6d3922c..d0c5baf1ab18 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -1820,7 +1820,6 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
1820 * a quiescent state betweentimes. 1820 * a quiescent state betweentimes.
1821 */ 1821 */
1822 local_irq_save(flags); 1822 local_irq_save(flags);
1823 WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
1824 rdp = this_cpu_ptr(rsp->rda); 1823 rdp = this_cpu_ptr(rsp->rda);
1825 1824
1826 /* Add the callback to our list. */ 1825 /* Add the callback to our list. */
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 4603b9d8f30a..0533a688ce22 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -6405,16 +6405,26 @@ static void __sdt_free(const struct cpumask *cpu_map)
6405 struct sd_data *sdd = &tl->data; 6405 struct sd_data *sdd = &tl->data;
6406 6406
6407 for_each_cpu(j, cpu_map) { 6407 for_each_cpu(j, cpu_map) {
6408 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j); 6408 struct sched_domain *sd;
6409 if (sd && (sd->flags & SD_OVERLAP)) 6409
6410 free_sched_groups(sd->groups, 0); 6410 if (sdd->sd) {
6411 kfree(*per_cpu_ptr(sdd->sd, j)); 6411 sd = *per_cpu_ptr(sdd->sd, j);
6412 kfree(*per_cpu_ptr(sdd->sg, j)); 6412 if (sd && (sd->flags & SD_OVERLAP))
6413 kfree(*per_cpu_ptr(sdd->sgp, j)); 6413 free_sched_groups(sd->groups, 0);
6414 kfree(*per_cpu_ptr(sdd->sd, j));
6415 }
6416
6417 if (sdd->sg)
6418 kfree(*per_cpu_ptr(sdd->sg, j));
6419 if (sdd->sgp)
6420 kfree(*per_cpu_ptr(sdd->sgp, j));
6414 } 6421 }
6415 free_percpu(sdd->sd); 6422 free_percpu(sdd->sd);
6423 sdd->sd = NULL;
6416 free_percpu(sdd->sg); 6424 free_percpu(sdd->sg);
6425 sdd->sg = NULL;
6417 free_percpu(sdd->sgp); 6426 free_percpu(sdd->sgp);
6427 sdd->sgp = NULL;
6418 } 6428 }
6419} 6429}
6420 6430
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 0d97ebdc58f0..e9553640c1c3 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -784,7 +784,7 @@ account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
784 update_load_add(&rq_of(cfs_rq)->load, se->load.weight); 784 update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
785#ifdef CONFIG_SMP 785#ifdef CONFIG_SMP
786 if (entity_is_task(se)) 786 if (entity_is_task(se))
787 list_add_tail(&se->group_node, &rq_of(cfs_rq)->cfs_tasks); 787 list_add(&se->group_node, &rq_of(cfs_rq)->cfs_tasks);
788#endif 788#endif
789 cfs_rq->nr_running++; 789 cfs_rq->nr_running++;
790} 790}
@@ -3215,6 +3215,8 @@ static int move_one_task(struct lb_env *env)
3215 3215
3216static unsigned long task_h_load(struct task_struct *p); 3216static unsigned long task_h_load(struct task_struct *p);
3217 3217
3218static const unsigned int sched_nr_migrate_break = 32;
3219
3218/* 3220/*
3219 * move_tasks tries to move up to load_move weighted load from busiest to 3221 * move_tasks tries to move up to load_move weighted load from busiest to
3220 * this_rq, as part of a balancing operation within domain "sd". 3222 * this_rq, as part of a balancing operation within domain "sd".
@@ -3242,7 +3244,7 @@ static int move_tasks(struct lb_env *env)
3242 3244
3243 /* take a breather every nr_migrate tasks */ 3245 /* take a breather every nr_migrate tasks */
3244 if (env->loop > env->loop_break) { 3246 if (env->loop > env->loop_break) {
3245 env->loop_break += sysctl_sched_nr_migrate; 3247 env->loop_break += sched_nr_migrate_break;
3246 env->flags |= LBF_NEED_BREAK; 3248 env->flags |= LBF_NEED_BREAK;
3247 break; 3249 break;
3248 } 3250 }
@@ -3252,7 +3254,7 @@ static int move_tasks(struct lb_env *env)
3252 3254
3253 load = task_h_load(p); 3255 load = task_h_load(p);
3254 3256
3255 if (load < 16 && !env->sd->nr_balance_failed) 3257 if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed)
3256 goto next; 3258 goto next;
3257 3259
3258 if ((load / 2) > env->load_move) 3260 if ((load / 2) > env->load_move)
@@ -4407,7 +4409,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
4407 .dst_cpu = this_cpu, 4409 .dst_cpu = this_cpu,
4408 .dst_rq = this_rq, 4410 .dst_rq = this_rq,
4409 .idle = idle, 4411 .idle = idle,
4410 .loop_break = sysctl_sched_nr_migrate, 4412 .loop_break = sched_nr_migrate_break,
4411 }; 4413 };
4412 4414
4413 cpumask_copy(cpus, cpu_active_mask); 4415 cpumask_copy(cpus, cpu_active_mask);
@@ -4445,10 +4447,10 @@ redo:
4445 * correctly treated as an imbalance. 4447 * correctly treated as an imbalance.
4446 */ 4448 */
4447 env.flags |= LBF_ALL_PINNED; 4449 env.flags |= LBF_ALL_PINNED;
4448 env.load_move = imbalance; 4450 env.load_move = imbalance;
4449 env.src_cpu = busiest->cpu; 4451 env.src_cpu = busiest->cpu;
4450 env.src_rq = busiest; 4452 env.src_rq = busiest;
4451 env.loop_max = busiest->nr_running; 4453 env.loop_max = min_t(unsigned long, sysctl_sched_nr_migrate, busiest->nr_running);
4452 4454
4453more_balance: 4455more_balance:
4454 local_irq_save(flags); 4456 local_irq_save(flags);
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
index e61fd73913d0..de00a486c5c6 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -68,3 +68,4 @@ SCHED_FEAT(TTWU_QUEUE, true)
68 68
69SCHED_FEAT(FORCE_SD_OVERLAP, false) 69SCHED_FEAT(FORCE_SD_OVERLAP, false)
70SCHED_FEAT(RT_RUNTIME_SHARE, true) 70SCHED_FEAT(RT_RUNTIME_SHARE, true)
71SCHED_FEAT(LB_MIN, false)
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index bf57abdc7bd0..f113755695e2 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -346,7 +346,8 @@ int tick_resume_broadcast(void)
346 tick_get_broadcast_mask()); 346 tick_get_broadcast_mask());
347 break; 347 break;
348 case TICKDEV_MODE_ONESHOT: 348 case TICKDEV_MODE_ONESHOT:
349 broadcast = tick_resume_broadcast_oneshot(bc); 349 if (!cpumask_empty(tick_get_broadcast_mask()))
350 broadcast = tick_resume_broadcast_oneshot(bc);
350 break; 351 break;
351 } 352 }
352 } 353 }
@@ -373,6 +374,9 @@ static int tick_broadcast_set_event(ktime_t expires, int force)
373{ 374{
374 struct clock_event_device *bc = tick_broadcast_device.evtdev; 375 struct clock_event_device *bc = tick_broadcast_device.evtdev;
375 376
377 if (bc->mode != CLOCK_EVT_MODE_ONESHOT)
378 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
379
376 return clockevents_program_event(bc, expires, force); 380 return clockevents_program_event(bc, expires, force);
377} 381}
378 382
@@ -531,7 +535,6 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
531 int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC; 535 int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC;
532 536
533 bc->event_handler = tick_handle_oneshot_broadcast; 537 bc->event_handler = tick_handle_oneshot_broadcast;
534 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
535 538
536 /* Take the do_timer update */ 539 /* Take the do_timer update */
537 tick_do_timer_cpu = cpu; 540 tick_do_timer_cpu = cpu;
@@ -549,6 +552,7 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
549 to_cpumask(tmpmask)); 552 to_cpumask(tmpmask));
550 553
551 if (was_periodic && !cpumask_empty(to_cpumask(tmpmask))) { 554 if (was_periodic && !cpumask_empty(to_cpumask(tmpmask))) {
555 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
552 tick_broadcast_init_next_event(to_cpumask(tmpmask), 556 tick_broadcast_init_next_event(to_cpumask(tmpmask),
553 tick_next_period); 557 tick_next_period);
554 tick_broadcast_set_event(tick_next_period, 1); 558 tick_broadcast_set_event(tick_next_period, 1);
@@ -577,15 +581,10 @@ void tick_broadcast_switch_to_oneshot(void)
577 raw_spin_lock_irqsave(&tick_broadcast_lock, flags); 581 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
578 582
579 tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT; 583 tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT;
580
581 if (cpumask_empty(tick_get_broadcast_mask()))
582 goto end;
583
584 bc = tick_broadcast_device.evtdev; 584 bc = tick_broadcast_device.evtdev;
585 if (bc) 585 if (bc)
586 tick_broadcast_setup_oneshot(bc); 586 tick_broadcast_setup_oneshot(bc);
587 587
588end:
589 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); 588 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
590} 589}
591 590
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index ed7b5d1e12f4..2a22255c1010 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -4629,7 +4629,8 @@ static ssize_t
4629rb_simple_read(struct file *filp, char __user *ubuf, 4629rb_simple_read(struct file *filp, char __user *ubuf,
4630 size_t cnt, loff_t *ppos) 4630 size_t cnt, loff_t *ppos)
4631{ 4631{
4632 struct ring_buffer *buffer = filp->private_data; 4632 struct trace_array *tr = filp->private_data;
4633 struct ring_buffer *buffer = tr->buffer;
4633 char buf[64]; 4634 char buf[64];
4634 int r; 4635 int r;
4635 4636
@@ -4647,7 +4648,8 @@ static ssize_t
4647rb_simple_write(struct file *filp, const char __user *ubuf, 4648rb_simple_write(struct file *filp, const char __user *ubuf,
4648 size_t cnt, loff_t *ppos) 4649 size_t cnt, loff_t *ppos)
4649{ 4650{
4650 struct ring_buffer *buffer = filp->private_data; 4651 struct trace_array *tr = filp->private_data;
4652 struct ring_buffer *buffer = tr->buffer;
4651 unsigned long val; 4653 unsigned long val;
4652 int ret; 4654 int ret;
4653 4655
@@ -4734,7 +4736,7 @@ static __init int tracer_init_debugfs(void)
4734 &trace_clock_fops); 4736 &trace_clock_fops);
4735 4737
4736 trace_create_file("tracing_on", 0644, d_tracer, 4738 trace_create_file("tracing_on", 0644, d_tracer,
4737 global_trace.buffer, &rb_simple_fops); 4739 &global_trace, &rb_simple_fops);
4738 4740
4739#ifdef CONFIG_DYNAMIC_FTRACE 4741#ifdef CONFIG_DYNAMIC_FTRACE
4740 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, 4742 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 95059f091a24..f95d65da6db8 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -836,11 +836,11 @@ extern const char *__stop___trace_bprintk_fmt[];
836 filter) 836 filter)
837#include "trace_entries.h" 837#include "trace_entries.h"
838 838
839#ifdef CONFIG_FUNCTION_TRACER 839#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER)
840int perf_ftrace_event_register(struct ftrace_event_call *call, 840int perf_ftrace_event_register(struct ftrace_event_call *call,
841 enum trace_reg type, void *data); 841 enum trace_reg type, void *data);
842#else 842#else
843#define perf_ftrace_event_register NULL 843#define perf_ftrace_event_register NULL
844#endif /* CONFIG_FUNCTION_TRACER */ 844#endif
845 845
846#endif /* _LINUX_KERNEL_TRACE_H */ 846#endif /* _LINUX_KERNEL_TRACE_H */
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 859fae6b1825..df611a0e76c5 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -652,6 +652,8 @@ int trace_print_lat_context(struct trace_iterator *iter)
652{ 652{
653 u64 next_ts; 653 u64 next_ts;
654 int ret; 654 int ret;
655 /* trace_find_next_entry will reset ent_size */
656 int ent_size = iter->ent_size;
655 struct trace_seq *s = &iter->seq; 657 struct trace_seq *s = &iter->seq;
656 struct trace_entry *entry = iter->ent, 658 struct trace_entry *entry = iter->ent,
657 *next_entry = trace_find_next_entry(iter, NULL, 659 *next_entry = trace_find_next_entry(iter, NULL,
@@ -660,6 +662,9 @@ int trace_print_lat_context(struct trace_iterator *iter)
660 unsigned long abs_usecs = ns2usecs(iter->ts - iter->tr->time_start); 662 unsigned long abs_usecs = ns2usecs(iter->ts - iter->tr->time_start);
661 unsigned long rel_usecs; 663 unsigned long rel_usecs;
662 664
665 /* Restore the original ent_size */
666 iter->ent_size = ent_size;
667
663 if (!next_entry) 668 if (!next_entry)
664 next_ts = iter->ts; 669 next_ts = iter->ts;
665 rel_usecs = ns2usecs(next_ts - iter->ts); 670 rel_usecs = ns2usecs(next_ts - iter->ts);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index cd65cb19c941..ae8f708e3d75 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -532,7 +532,7 @@ static struct page *dequeue_huge_page_vma(struct hstate *h,
532 struct vm_area_struct *vma, 532 struct vm_area_struct *vma,
533 unsigned long address, int avoid_reserve) 533 unsigned long address, int avoid_reserve)
534{ 534{
535 struct page *page; 535 struct page *page = NULL;
536 struct mempolicy *mpol; 536 struct mempolicy *mpol;
537 nodemask_t *nodemask; 537 nodemask_t *nodemask;
538 struct zonelist *zonelist; 538 struct zonelist *zonelist;
@@ -2498,7 +2498,6 @@ retry_avoidcopy:
2498 if (outside_reserve) { 2498 if (outside_reserve) {
2499 BUG_ON(huge_pte_none(pte)); 2499 BUG_ON(huge_pte_none(pte));
2500 if (unmap_ref_private(mm, vma, old_page, address)) { 2500 if (unmap_ref_private(mm, vma, old_page, address)) {
2501 BUG_ON(page_count(old_page) != 1);
2502 BUG_ON(huge_pte_none(pte)); 2501 BUG_ON(huge_pte_none(pte));
2503 spin_lock(&mm->page_table_lock); 2502 spin_lock(&mm->page_table_lock);
2504 ptep = huge_pte_offset(mm, address & huge_page_mask(h)); 2503 ptep = huge_pte_offset(mm, address & huge_page_mask(h));
diff --git a/mm/memblock.c b/mm/memblock.c
index 99f285599501..a44eab3157f8 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -330,6 +330,9 @@ static int __init_memblock memblock_add_region(struct memblock_type *type,
330 phys_addr_t end = base + memblock_cap_size(base, &size); 330 phys_addr_t end = base + memblock_cap_size(base, &size);
331 int i, nr_new; 331 int i, nr_new;
332 332
333 if (!size)
334 return 0;
335
333 /* special case for empty array */ 336 /* special case for empty array */
334 if (type->regions[0].size == 0) { 337 if (type->regions[0].size == 0) {
335 WARN_ON(type->cnt != 1 || type->total_size); 338 WARN_ON(type->cnt != 1 || type->total_size);
@@ -430,6 +433,9 @@ static int __init_memblock memblock_isolate_range(struct memblock_type *type,
430 433
431 *start_rgn = *end_rgn = 0; 434 *start_rgn = *end_rgn = 0;
432 435
436 if (!size)
437 return 0;
438
433 /* we'll create at most two more regions */ 439 /* we'll create at most two more regions */
434 while (type->cnt + 2 > type->max) 440 while (type->cnt + 2 > type->max)
435 if (memblock_double_array(type) < 0) 441 if (memblock_double_array(type) < 0)
@@ -514,7 +520,6 @@ int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
514 (unsigned long long)base, 520 (unsigned long long)base,
515 (unsigned long long)base + size, 521 (unsigned long long)base + size,
516 (void *)_RET_IP_); 522 (void *)_RET_IP_);
517 BUG_ON(0 == size);
518 523
519 return memblock_add_region(_rgn, base, size, MAX_NUMNODES); 524 return memblock_add_region(_rgn, base, size, MAX_NUMNODES);
520} 525}
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index b868def9bcc1..b659260c56ad 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2476,10 +2476,10 @@ struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
2476static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg, 2476static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
2477 struct page *page, 2477 struct page *page,
2478 unsigned int nr_pages, 2478 unsigned int nr_pages,
2479 struct page_cgroup *pc,
2480 enum charge_type ctype, 2479 enum charge_type ctype,
2481 bool lrucare) 2480 bool lrucare)
2482{ 2481{
2482 struct page_cgroup *pc = lookup_page_cgroup(page);
2483 struct zone *uninitialized_var(zone); 2483 struct zone *uninitialized_var(zone);
2484 bool was_on_lru = false; 2484 bool was_on_lru = false;
2485 bool anon; 2485 bool anon;
@@ -2716,7 +2716,6 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
2716{ 2716{
2717 struct mem_cgroup *memcg = NULL; 2717 struct mem_cgroup *memcg = NULL;
2718 unsigned int nr_pages = 1; 2718 unsigned int nr_pages = 1;
2719 struct page_cgroup *pc;
2720 bool oom = true; 2719 bool oom = true;
2721 int ret; 2720 int ret;
2722 2721
@@ -2730,11 +2729,10 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
2730 oom = false; 2729 oom = false;
2731 } 2730 }
2732 2731
2733 pc = lookup_page_cgroup(page);
2734 ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &memcg, oom); 2732 ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &memcg, oom);
2735 if (ret == -ENOMEM) 2733 if (ret == -ENOMEM)
2736 return ret; 2734 return ret;
2737 __mem_cgroup_commit_charge(memcg, page, nr_pages, pc, ctype, false); 2735 __mem_cgroup_commit_charge(memcg, page, nr_pages, ctype, false);
2738 return 0; 2736 return 0;
2739} 2737}
2740 2738
@@ -2831,16 +2829,13 @@ static void
2831__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg, 2829__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg,
2832 enum charge_type ctype) 2830 enum charge_type ctype)
2833{ 2831{
2834 struct page_cgroup *pc;
2835
2836 if (mem_cgroup_disabled()) 2832 if (mem_cgroup_disabled())
2837 return; 2833 return;
2838 if (!memcg) 2834 if (!memcg)
2839 return; 2835 return;
2840 cgroup_exclude_rmdir(&memcg->css); 2836 cgroup_exclude_rmdir(&memcg->css);
2841 2837
2842 pc = lookup_page_cgroup(page); 2838 __mem_cgroup_commit_charge(memcg, page, 1, ctype, true);
2843 __mem_cgroup_commit_charge(memcg, page, 1, pc, ctype, true);
2844 /* 2839 /*
2845 * Now swap is on-memory. This means this page may be 2840 * Now swap is on-memory. This means this page may be
2846 * counted both as mem and swap....double count. 2841 * counted both as mem and swap....double count.
@@ -3298,14 +3293,13 @@ int mem_cgroup_prepare_migration(struct page *page,
3298 * page. In the case new page is migrated but not remapped, new page's 3293 * page. In the case new page is migrated but not remapped, new page's
3299 * mapcount will be finally 0 and we call uncharge in end_migration(). 3294 * mapcount will be finally 0 and we call uncharge in end_migration().
3300 */ 3295 */
3301 pc = lookup_page_cgroup(newpage);
3302 if (PageAnon(page)) 3296 if (PageAnon(page))
3303 ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED; 3297 ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
3304 else if (page_is_file_cache(page)) 3298 else if (page_is_file_cache(page))
3305 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE; 3299 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
3306 else 3300 else
3307 ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM; 3301 ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
3308 __mem_cgroup_commit_charge(memcg, newpage, 1, pc, ctype, false); 3302 __mem_cgroup_commit_charge(memcg, newpage, 1, ctype, false);
3309 return ret; 3303 return ret;
3310} 3304}
3311 3305
@@ -3392,8 +3386,7 @@ void mem_cgroup_replace_page_cache(struct page *oldpage,
3392 * the newpage may be on LRU(or pagevec for LRU) already. We lock 3386 * the newpage may be on LRU(or pagevec for LRU) already. We lock
3393 * LRU while we overwrite pc->mem_cgroup. 3387 * LRU while we overwrite pc->mem_cgroup.
3394 */ 3388 */
3395 pc = lookup_page_cgroup(newpage); 3389 __mem_cgroup_commit_charge(memcg, newpage, 1, type, true);
3396 __mem_cgroup_commit_charge(memcg, newpage, 1, pc, type, true);
3397} 3390}
3398 3391
3399#ifdef CONFIG_DEBUG_VM 3392#ifdef CONFIG_DEBUG_VM
@@ -4514,6 +4507,12 @@ static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
4514swap_buffers: 4507swap_buffers:
4515 /* Swap primary and spare array */ 4508 /* Swap primary and spare array */
4516 thresholds->spare = thresholds->primary; 4509 thresholds->spare = thresholds->primary;
4510 /* If all events are unregistered, free the spare array */
4511 if (!new) {
4512 kfree(thresholds->spare);
4513 thresholds->spare = NULL;
4514 }
4515
4517 rcu_assign_pointer(thresholds->primary, new); 4516 rcu_assign_pointer(thresholds->primary, new);
4518 4517
4519 /* To be sure that nobody uses thresholds */ 4518 /* To be sure that nobody uses thresholds */
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index cfb6c8678754..b19569137529 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1361,11 +1361,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1361 1361
1362 mm = get_task_mm(task); 1362 mm = get_task_mm(task);
1363 put_task_struct(task); 1363 put_task_struct(task);
1364 if (mm) 1364
1365 err = do_migrate_pages(mm, old, new, 1365 if (!mm) {
1366 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
1367 else
1368 err = -EINVAL; 1366 err = -EINVAL;
1367 goto out;
1368 }
1369
1370 err = do_migrate_pages(mm, old, new,
1371 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
1369 1372
1370 mmput(mm); 1373 mmput(mm);
1371out: 1374out:
diff --git a/mm/migrate.c b/mm/migrate.c
index 51c08a0c6f68..11072383ae12 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1388,14 +1388,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
1388 mm = get_task_mm(task); 1388 mm = get_task_mm(task);
1389 put_task_struct(task); 1389 put_task_struct(task);
1390 1390
1391 if (mm) { 1391 if (!mm)
1392 if (nodes) 1392 return -EINVAL;
1393 err = do_pages_move(mm, task_nodes, nr_pages, pages, 1393
1394 nodes, status, flags); 1394 if (nodes)
1395 else 1395 err = do_pages_move(mm, task_nodes, nr_pages, pages,
1396 err = do_pages_stat(mm, nr_pages, pages, status); 1396 nodes, status, flags);
1397 } else 1397 else
1398 err = -EINVAL; 1398 err = do_pages_stat(mm, nr_pages, pages, status);
1399 1399
1400 mmput(mm); 1400 mmput(mm);
1401 return err; 1401 return err;
diff --git a/mm/mmap.c b/mm/mmap.c
index a7bf6a31c9f6..848ef52d9603 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -240,6 +240,8 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
240 return next; 240 return next;
241} 241}
242 242
243static unsigned long do_brk(unsigned long addr, unsigned long len);
244
243SYSCALL_DEFINE1(brk, unsigned long, brk) 245SYSCALL_DEFINE1(brk, unsigned long, brk)
244{ 246{
245 unsigned long rlim, retval; 247 unsigned long rlim, retval;
@@ -951,7 +953,7 @@ static inline unsigned long round_hint_to_min(unsigned long hint)
951 * The caller must hold down_write(&current->mm->mmap_sem). 953 * The caller must hold down_write(&current->mm->mmap_sem).
952 */ 954 */
953 955
954unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, 956static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
955 unsigned long len, unsigned long prot, 957 unsigned long len, unsigned long prot,
956 unsigned long flags, unsigned long pgoff) 958 unsigned long flags, unsigned long pgoff)
957{ 959{
@@ -1087,7 +1089,32 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
1087 1089
1088 return mmap_region(file, addr, len, flags, vm_flags, pgoff); 1090 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
1089} 1091}
1090EXPORT_SYMBOL(do_mmap_pgoff); 1092
1093unsigned long do_mmap(struct file *file, unsigned long addr,
1094 unsigned long len, unsigned long prot,
1095 unsigned long flag, unsigned long offset)
1096{
1097 if (unlikely(offset + PAGE_ALIGN(len) < offset))
1098 return -EINVAL;
1099 if (unlikely(offset & ~PAGE_MASK))
1100 return -EINVAL;
1101 return do_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
1102}
1103EXPORT_SYMBOL(do_mmap);
1104
1105unsigned long vm_mmap(struct file *file, unsigned long addr,
1106 unsigned long len, unsigned long prot,
1107 unsigned long flag, unsigned long offset)
1108{
1109 unsigned long ret;
1110 struct mm_struct *mm = current->mm;
1111
1112 down_write(&mm->mmap_sem);
1113 ret = do_mmap(file, addr, len, prot, flag, offset);
1114 up_write(&mm->mmap_sem);
1115 return ret;
1116}
1117EXPORT_SYMBOL(vm_mmap);
1091 1118
1092SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, 1119SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1093 unsigned long, prot, unsigned long, flags, 1120 unsigned long, prot, unsigned long, flags,
@@ -2105,21 +2132,25 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
2105 2132
2106 return 0; 2133 return 0;
2107} 2134}
2108
2109EXPORT_SYMBOL(do_munmap); 2135EXPORT_SYMBOL(do_munmap);
2110 2136
2111SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) 2137int vm_munmap(unsigned long start, size_t len)
2112{ 2138{
2113 int ret; 2139 int ret;
2114 struct mm_struct *mm = current->mm; 2140 struct mm_struct *mm = current->mm;
2115 2141
2116 profile_munmap(addr);
2117
2118 down_write(&mm->mmap_sem); 2142 down_write(&mm->mmap_sem);
2119 ret = do_munmap(mm, addr, len); 2143 ret = do_munmap(mm, start, len);
2120 up_write(&mm->mmap_sem); 2144 up_write(&mm->mmap_sem);
2121 return ret; 2145 return ret;
2122} 2146}
2147EXPORT_SYMBOL(vm_munmap);
2148
2149SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
2150{
2151 profile_munmap(addr);
2152 return vm_munmap(addr, len);
2153}
2123 2154
2124static inline void verify_mm_writelocked(struct mm_struct *mm) 2155static inline void verify_mm_writelocked(struct mm_struct *mm)
2125{ 2156{
@@ -2136,7 +2167,7 @@ static inline void verify_mm_writelocked(struct mm_struct *mm)
2136 * anonymous maps. eventually we may be able to do some 2167 * anonymous maps. eventually we may be able to do some
2137 * brk-specific accounting here. 2168 * brk-specific accounting here.
2138 */ 2169 */
2139unsigned long do_brk(unsigned long addr, unsigned long len) 2170static unsigned long do_brk(unsigned long addr, unsigned long len)
2140{ 2171{
2141 struct mm_struct * mm = current->mm; 2172 struct mm_struct * mm = current->mm;
2142 struct vm_area_struct * vma, * prev; 2173 struct vm_area_struct * vma, * prev;
@@ -2232,7 +2263,17 @@ out:
2232 return addr; 2263 return addr;
2233} 2264}
2234 2265
2235EXPORT_SYMBOL(do_brk); 2266unsigned long vm_brk(unsigned long addr, unsigned long len)
2267{
2268 struct mm_struct *mm = current->mm;
2269 unsigned long ret;
2270
2271 down_write(&mm->mmap_sem);
2272 ret = do_brk(addr, len);
2273 up_write(&mm->mmap_sem);
2274 return ret;
2275}
2276EXPORT_SYMBOL(vm_brk);
2236 2277
2237/* Release all mmaps. */ 2278/* Release all mmaps. */
2238void exit_mmap(struct mm_struct *mm) 2279void exit_mmap(struct mm_struct *mm)
diff --git a/mm/nobootmem.c b/mm/nobootmem.c
index 24f0fc1a56d6..1983fb1c7026 100644
--- a/mm/nobootmem.c
+++ b/mm/nobootmem.c
@@ -82,8 +82,7 @@ void __init free_bootmem_late(unsigned long addr, unsigned long size)
82 82
83static void __init __free_pages_memory(unsigned long start, unsigned long end) 83static void __init __free_pages_memory(unsigned long start, unsigned long end)
84{ 84{
85 int i; 85 unsigned long i, start_aligned, end_aligned;
86 unsigned long start_aligned, end_aligned;
87 int order = ilog2(BITS_PER_LONG); 86 int order = ilog2(BITS_PER_LONG);
88 87
89 start_aligned = (start + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1); 88 start_aligned = (start + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1);
@@ -298,13 +297,19 @@ void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
298 if (WARN_ON_ONCE(slab_is_available())) 297 if (WARN_ON_ONCE(slab_is_available()))
299 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); 298 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
300 299
300again:
301 ptr = __alloc_memory_core_early(pgdat->node_id, size, align, 301 ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
302 goal, -1ULL); 302 goal, -1ULL);
303 if (ptr) 303 if (ptr)
304 return ptr; 304 return ptr;
305 305
306 return __alloc_memory_core_early(MAX_NUMNODES, size, align, 306 ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align,
307 goal, -1ULL); 307 goal, -1ULL);
308 if (!ptr && goal) {
309 goal = 0;
310 goto again;
311 }
312 return ptr;
308} 313}
309 314
310void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size, 315void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
diff --git a/mm/nommu.c b/mm/nommu.c
index f59e170fceb4..bb8f4f004a82 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -1233,7 +1233,7 @@ enomem:
1233/* 1233/*
1234 * handle mapping creation for uClinux 1234 * handle mapping creation for uClinux
1235 */ 1235 */
1236unsigned long do_mmap_pgoff(struct file *file, 1236static unsigned long do_mmap_pgoff(struct file *file,
1237 unsigned long addr, 1237 unsigned long addr,
1238 unsigned long len, 1238 unsigned long len,
1239 unsigned long prot, 1239 unsigned long prot,
@@ -1470,7 +1470,32 @@ error_getting_region:
1470 show_free_areas(0); 1470 show_free_areas(0);
1471 return -ENOMEM; 1471 return -ENOMEM;
1472} 1472}
1473EXPORT_SYMBOL(do_mmap_pgoff); 1473
1474unsigned long do_mmap(struct file *file, unsigned long addr,
1475 unsigned long len, unsigned long prot,
1476 unsigned long flag, unsigned long offset)
1477{
1478 if (unlikely(offset + PAGE_ALIGN(len) < offset))
1479 return -EINVAL;
1480 if (unlikely(offset & ~PAGE_MASK))
1481 return -EINVAL;
1482 return do_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
1483}
1484EXPORT_SYMBOL(do_mmap);
1485
1486unsigned long vm_mmap(struct file *file, unsigned long addr,
1487 unsigned long len, unsigned long prot,
1488 unsigned long flag, unsigned long offset)
1489{
1490 unsigned long ret;
1491 struct mm_struct *mm = current->mm;
1492
1493 down_write(&mm->mmap_sem);
1494 ret = do_mmap(file, addr, len, prot, flag, offset);
1495 up_write(&mm->mmap_sem);
1496 return ret;
1497}
1498EXPORT_SYMBOL(vm_mmap);
1474 1499
1475SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, 1500SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1476 unsigned long, prot, unsigned long, flags, 1501 unsigned long, prot, unsigned long, flags,
@@ -1709,16 +1734,22 @@ erase_whole_vma:
1709} 1734}
1710EXPORT_SYMBOL(do_munmap); 1735EXPORT_SYMBOL(do_munmap);
1711 1736
1712SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) 1737int vm_munmap(unsigned long addr, size_t len)
1713{ 1738{
1714 int ret;
1715 struct mm_struct *mm = current->mm; 1739 struct mm_struct *mm = current->mm;
1740 int ret;
1716 1741
1717 down_write(&mm->mmap_sem); 1742 down_write(&mm->mmap_sem);
1718 ret = do_munmap(mm, addr, len); 1743 ret = do_munmap(mm, addr, len);
1719 up_write(&mm->mmap_sem); 1744 up_write(&mm->mmap_sem);
1720 return ret; 1745 return ret;
1721} 1746}
1747EXPORT_SYMBOL(vm_munmap);
1748
1749SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
1750{
1751 return vm_munmap(addr, len);
1752}
1722 1753
1723/* 1754/*
1724 * release all the mappings made in a process's VM space 1755 * release all the mappings made in a process's VM space
@@ -1744,7 +1775,7 @@ void exit_mmap(struct mm_struct *mm)
1744 kleave(""); 1775 kleave("");
1745} 1776}
1746 1777
1747unsigned long do_brk(unsigned long addr, unsigned long len) 1778unsigned long vm_brk(unsigned long addr, unsigned long len)
1748{ 1779{
1749 return -ENOMEM; 1780 return -ENOMEM;
1750} 1781}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a712fb9e04ce..918330f71dba 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5203,7 +5203,7 @@ int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
5203 int ret; 5203 int ret;
5204 5204
5205 ret = proc_dointvec_minmax(table, write, buffer, length, ppos); 5205 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
5206 if (!write || (ret == -EINVAL)) 5206 if (!write || (ret < 0))
5207 return ret; 5207 return ret;
5208 for_each_populated_zone(zone) { 5208 for_each_populated_zone(zone) {
5209 for_each_possible_cpu(cpu) { 5209 for_each_possible_cpu(cpu) {
diff --git a/mm/percpu.c b/mm/percpu.c
index f47af9123af7..bb4be7435ce3 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1132,20 +1132,20 @@ static void pcpu_dump_alloc_info(const char *lvl,
1132 for (alloc_end += gi->nr_units / upa; 1132 for (alloc_end += gi->nr_units / upa;
1133 alloc < alloc_end; alloc++) { 1133 alloc < alloc_end; alloc++) {
1134 if (!(alloc % apl)) { 1134 if (!(alloc % apl)) {
1135 printk("\n"); 1135 printk(KERN_CONT "\n");
1136 printk("%spcpu-alloc: ", lvl); 1136 printk("%spcpu-alloc: ", lvl);
1137 } 1137 }
1138 printk("[%0*d] ", group_width, group); 1138 printk(KERN_CONT "[%0*d] ", group_width, group);
1139 1139
1140 for (unit_end += upa; unit < unit_end; unit++) 1140 for (unit_end += upa; unit < unit_end; unit++)
1141 if (gi->cpu_map[unit] != NR_CPUS) 1141 if (gi->cpu_map[unit] != NR_CPUS)
1142 printk("%0*d ", cpu_width, 1142 printk(KERN_CONT "%0*d ", cpu_width,
1143 gi->cpu_map[unit]); 1143 gi->cpu_map[unit]);
1144 else 1144 else
1145 printk("%s ", empty_str); 1145 printk(KERN_CONT "%s ", empty_str);
1146 } 1146 }
1147 } 1147 }
1148 printk("\n"); 1148 printk(KERN_CONT "\n");
1149} 1149}
1150 1150
1151/** 1151/**
@@ -1650,6 +1650,16 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
1650 areas[group] = ptr; 1650 areas[group] = ptr;
1651 1651
1652 base = min(ptr, base); 1652 base = min(ptr, base);
1653 }
1654
1655 /*
1656 * Copy data and free unused parts. This should happen after all
1657 * allocations are complete; otherwise, we may end up with
1658 * overlapping groups.
1659 */
1660 for (group = 0; group < ai->nr_groups; group++) {
1661 struct pcpu_group_info *gi = &ai->groups[group];
1662 void *ptr = areas[group];
1653 1663
1654 for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) { 1664 for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
1655 if (gi->cpu_map[i] == NR_CPUS) { 1665 if (gi->cpu_map[i] == NR_CPUS) {
@@ -1885,6 +1895,8 @@ void __init setup_per_cpu_areas(void)
1885 fc = __alloc_bootmem(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); 1895 fc = __alloc_bootmem(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
1886 if (!ai || !fc) 1896 if (!ai || !fc)
1887 panic("Failed to allocate memory for percpu areas."); 1897 panic("Failed to allocate memory for percpu areas.");
1898 /* kmemleak tracks the percpu allocations separately */
1899 kmemleak_free(fc);
1888 1900
1889 ai->dyn_size = unit_size; 1901 ai->dyn_size = unit_size;
1890 ai->unit_size = unit_size; 1902 ai->unit_size = unit_size;
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 9d3dd3763cf7..4c5ff7f284d9 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -26,7 +26,7 @@
26 */ 26 */
27static const struct address_space_operations swap_aops = { 27static const struct address_space_operations swap_aops = {
28 .writepage = swap_writepage, 28 .writepage = swap_writepage,
29 .set_page_dirty = __set_page_dirty_nobuffers, 29 .set_page_dirty = __set_page_dirty_no_writeback,
30 .migratepage = migrate_page, 30 .migratepage = migrate_page,
31}; 31};
32 32
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 1a518684a32f..33dc256033b5 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1568,9 +1568,14 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
1568 reclaim_stat->recent_scanned[0] += nr_anon; 1568 reclaim_stat->recent_scanned[0] += nr_anon;
1569 reclaim_stat->recent_scanned[1] += nr_file; 1569 reclaim_stat->recent_scanned[1] += nr_file;
1570 1570
1571 if (current_is_kswapd()) 1571 if (global_reclaim(sc)) {
1572 __count_vm_events(KSWAPD_STEAL, nr_reclaimed); 1572 if (current_is_kswapd())
1573 __count_zone_vm_events(PGSTEAL, zone, nr_reclaimed); 1573 __count_zone_vm_events(PGSTEAL_KSWAPD, zone,
1574 nr_reclaimed);
1575 else
1576 __count_zone_vm_events(PGSTEAL_DIRECT, zone,
1577 nr_reclaimed);
1578 }
1574 1579
1575 putback_inactive_pages(mz, &page_list); 1580 putback_inactive_pages(mz, &page_list);
1576 1581
diff --git a/mm/vmstat.c b/mm/vmstat.c
index f600557a7659..7db1b9bab492 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -738,7 +738,8 @@ const char * const vmstat_text[] = {
738 "pgmajfault", 738 "pgmajfault",
739 739
740 TEXTS_FOR_ZONES("pgrefill") 740 TEXTS_FOR_ZONES("pgrefill")
741 TEXTS_FOR_ZONES("pgsteal") 741 TEXTS_FOR_ZONES("pgsteal_kswapd")
742 TEXTS_FOR_ZONES("pgsteal_direct")
742 TEXTS_FOR_ZONES("pgscan_kswapd") 743 TEXTS_FOR_ZONES("pgscan_kswapd")
743 TEXTS_FOR_ZONES("pgscan_direct") 744 TEXTS_FOR_ZONES("pgscan_direct")
744 745
@@ -747,7 +748,6 @@ const char * const vmstat_text[] = {
747#endif 748#endif
748 "pginodesteal", 749 "pginodesteal",
749 "slabs_scanned", 750 "slabs_scanned",
750 "kswapd_steal",
751 "kswapd_inodesteal", 751 "kswapd_inodesteal",
752 "kswapd_low_wmark_hit_quickly", 752 "kswapd_low_wmark_hit_quickly",
753 "kswapd_high_wmark_hit_quickly", 753 "kswapd_high_wmark_hit_quickly",
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 9988d4abb372..9757c193c86b 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -157,7 +157,7 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
157 skb = __vlan_hwaccel_put_tag(skb, vlan_tci); 157 skb = __vlan_hwaccel_put_tag(skb, vlan_tci);
158 } 158 }
159 159
160 skb_set_dev(skb, vlan_dev_priv(dev)->real_dev); 160 skb->dev = vlan_dev_priv(dev)->real_dev;
161 len = skb->len; 161 len = skb->len;
162 if (netpoll_tx_running(dev)) 162 if (netpoll_tx_running(dev))
163 return skb->dev->netdev_ops->ndo_start_xmit(skb, skb->dev); 163 return skb->dev->netdev_ops->ndo_start_xmit(skb, skb->dev);
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 0906c194a413..9d9a6a3edbd5 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -2011,16 +2011,17 @@ static void __exit ax25_exit(void)
2011 proc_net_remove(&init_net, "ax25_route"); 2011 proc_net_remove(&init_net, "ax25_route");
2012 proc_net_remove(&init_net, "ax25"); 2012 proc_net_remove(&init_net, "ax25");
2013 proc_net_remove(&init_net, "ax25_calls"); 2013 proc_net_remove(&init_net, "ax25_calls");
2014 ax25_rt_free();
2015 ax25_uid_free();
2016 ax25_dev_free();
2017 2014
2018 ax25_unregister_sysctl();
2019 unregister_netdevice_notifier(&ax25_dev_notifier); 2015 unregister_netdevice_notifier(&ax25_dev_notifier);
2016 ax25_unregister_sysctl();
2020 2017
2021 dev_remove_pack(&ax25_packet_type); 2018 dev_remove_pack(&ax25_packet_type);
2022 2019
2023 sock_unregister(PF_AX25); 2020 sock_unregister(PF_AX25);
2024 proto_unregister(&ax25_proto); 2021 proto_unregister(&ax25_proto);
2022
2023 ax25_rt_free();
2024 ax25_uid_free();
2025 ax25_dev_free();
2025} 2026}
2026module_exit(ax25_exit); 2027module_exit(ax25_exit);
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 92a857e3786d..edfd61addcec 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -1215,40 +1215,40 @@ struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1215 return NULL; 1215 return NULL;
1216} 1216}
1217 1217
1218static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn, 1218static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1219 u8 key_type, u8 old_key_type) 1219 u8 key_type, u8 old_key_type)
1220{ 1220{
1221 /* Legacy key */ 1221 /* Legacy key */
1222 if (key_type < 0x03) 1222 if (key_type < 0x03)
1223 return 1; 1223 return true;
1224 1224
1225 /* Debug keys are insecure so don't store them persistently */ 1225 /* Debug keys are insecure so don't store them persistently */
1226 if (key_type == HCI_LK_DEBUG_COMBINATION) 1226 if (key_type == HCI_LK_DEBUG_COMBINATION)
1227 return 0; 1227 return false;
1228 1228
1229 /* Changed combination key and there's no previous one */ 1229 /* Changed combination key and there's no previous one */
1230 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff) 1230 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1231 return 0; 1231 return false;
1232 1232
1233 /* Security mode 3 case */ 1233 /* Security mode 3 case */
1234 if (!conn) 1234 if (!conn)
1235 return 1; 1235 return true;
1236 1236
1237 /* Neither local nor remote side had no-bonding as requirement */ 1237 /* Neither local nor remote side had no-bonding as requirement */
1238 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01) 1238 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1239 return 1; 1239 return true;
1240 1240
1241 /* Local side had dedicated bonding as requirement */ 1241 /* Local side had dedicated bonding as requirement */
1242 if (conn->auth_type == 0x02 || conn->auth_type == 0x03) 1242 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1243 return 1; 1243 return true;
1244 1244
1245 /* Remote side had dedicated bonding as requirement */ 1245 /* Remote side had dedicated bonding as requirement */
1246 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) 1246 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1247 return 1; 1247 return true;
1248 1248
1249 /* If none of the above criteria match, then don't store the key 1249 /* If none of the above criteria match, then don't store the key
1250 * persistently */ 1250 * persistently */
1251 return 0; 1251 return false;
1252} 1252}
1253 1253
1254struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8]) 1254struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
@@ -1285,7 +1285,8 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1285 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len) 1285 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1286{ 1286{
1287 struct link_key *key, *old_key; 1287 struct link_key *key, *old_key;
1288 u8 old_key_type, persistent; 1288 u8 old_key_type;
1289 bool persistent;
1289 1290
1290 old_key = hci_find_link_key(hdev, bdaddr); 1291 old_key = hci_find_link_key(hdev, bdaddr);
1291 if (old_key) { 1292 if (old_key) {
@@ -1328,10 +1329,8 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1328 1329
1329 mgmt_new_link_key(hdev, key, persistent); 1330 mgmt_new_link_key(hdev, key, persistent);
1330 1331
1331 if (!persistent) { 1332 if (conn)
1332 list_del(&key->list); 1333 conn->flush_key = !persistent;
1333 kfree(key);
1334 }
1335 1334
1336 return 0; 1335 return 0;
1337} 1336}
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index b37531094c49..6c065254afc0 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -1901,6 +1901,8 @@ static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff
1901 } 1901 }
1902 1902
1903 if (ev->status == 0) { 1903 if (ev->status == 0) {
1904 if (conn->type == ACL_LINK && conn->flush_key)
1905 hci_remove_link_key(hdev, &conn->dst);
1904 hci_proto_disconn_cfm(conn, ev->reason); 1906 hci_proto_disconn_cfm(conn, ev->reason);
1905 hci_conn_del(conn); 1907 hci_conn_del(conn);
1906 } 1908 }
@@ -2311,6 +2313,7 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
2311 2313
2312 case HCI_OP_USER_PASSKEY_NEG_REPLY: 2314 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2313 hci_cc_user_passkey_neg_reply(hdev, skb); 2315 hci_cc_user_passkey_neg_reply(hdev, skb);
2316 break;
2314 2317
2315 case HCI_OP_LE_SET_SCAN_PARAM: 2318 case HCI_OP_LE_SET_SCAN_PARAM:
2316 hci_cc_le_set_scan_param(hdev, skb); 2319 hci_cc_le_set_scan_param(hdev, skb);
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 4ef275c69675..4bb03b111122 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -2884,7 +2884,7 @@ int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
2884 return 0; 2884 return 0;
2885} 2885}
2886 2886
2887int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, u8 persistent) 2887int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, bool persistent)
2888{ 2888{
2889 struct mgmt_ev_new_link_key ev; 2889 struct mgmt_ev_new_link_key ev;
2890 2890
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index 61f65344e711..a2098e3de500 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -47,6 +47,7 @@ int br_dev_queue_push_xmit(struct sk_buff *skb)
47 kfree_skb(skb); 47 kfree_skb(skb);
48 } else { 48 } else {
49 skb_push(skb, ETH_HLEN); 49 skb_push(skb, ETH_HLEN);
50 br_drop_fake_rtable(skb);
50 dev_queue_xmit(skb); 51 dev_queue_xmit(skb);
51 } 52 }
52 53
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index dec4f3817133..d7f49b63ab0f 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -156,7 +156,7 @@ void br_netfilter_rtable_init(struct net_bridge *br)
156 rt->dst.dev = br->dev; 156 rt->dst.dev = br->dev;
157 rt->dst.path = &rt->dst; 157 rt->dst.path = &rt->dst;
158 dst_init_metrics(&rt->dst, br_dst_default_metrics, true); 158 dst_init_metrics(&rt->dst, br_dst_default_metrics, true);
159 rt->dst.flags = DST_NOXFRM | DST_NOPEER; 159 rt->dst.flags = DST_NOXFRM | DST_NOPEER | DST_FAKE_RTABLE;
160 rt->dst.ops = &fake_dst_ops; 160 rt->dst.ops = &fake_dst_ops;
161} 161}
162 162
@@ -694,11 +694,7 @@ static unsigned int br_nf_local_in(unsigned int hook, struct sk_buff *skb,
694 const struct net_device *out, 694 const struct net_device *out,
695 int (*okfn)(struct sk_buff *)) 695 int (*okfn)(struct sk_buff *))
696{ 696{
697 struct rtable *rt = skb_rtable(skb); 697 br_drop_fake_rtable(skb);
698
699 if (rt && rt == bridge_parent_rtable(in))
700 skb_dst_drop(skb);
701
702 return NF_ACCEPT; 698 return NF_ACCEPT;
703} 699}
704 700
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index 20618dd3088b..d09340e1523f 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -103,6 +103,7 @@ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
103 skb->protocol = htons(ETH_P_IPV6); 103 skb->protocol = htons(ETH_P_IPV6);
104 break; 104 break;
105 default: 105 default:
106 kfree_skb(skb);
106 priv->netdev->stats.rx_errors++; 107 priv->netdev->stats.rx_errors++;
107 return -EINVAL; 108 return -EINVAL;
108 } 109 }
@@ -220,14 +221,16 @@ static int chnl_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
220 221
221 if (skb->len > priv->netdev->mtu) { 222 if (skb->len > priv->netdev->mtu) {
222 pr_warn("Size of skb exceeded MTU\n"); 223 pr_warn("Size of skb exceeded MTU\n");
224 kfree_skb(skb);
223 dev->stats.tx_errors++; 225 dev->stats.tx_errors++;
224 return -ENOSPC; 226 return NETDEV_TX_OK;
225 } 227 }
226 228
227 if (!priv->flowenabled) { 229 if (!priv->flowenabled) {
228 pr_debug("dropping packets flow off\n"); 230 pr_debug("dropping packets flow off\n");
231 kfree_skb(skb);
229 dev->stats.tx_dropped++; 232 dev->stats.tx_dropped++;
230 return NETDEV_TX_BUSY; 233 return NETDEV_TX_OK;
231 } 234 }
232 235
233 if (priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP) 236 if (priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP)
@@ -242,7 +245,7 @@ static int chnl_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
242 result = priv->chnl.dn->transmit(priv->chnl.dn, pkt); 245 result = priv->chnl.dn->transmit(priv->chnl.dn, pkt);
243 if (result) { 246 if (result) {
244 dev->stats.tx_dropped++; 247 dev->stats.tx_dropped++;
245 return result; 248 return NETDEV_TX_OK;
246 } 249 }
247 250
248 /* Update statistics. */ 251 /* Update statistics. */
diff --git a/net/core/dev.c b/net/core/dev.c
index c25d453b2803..99e1d759f41e 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1409,14 +1409,34 @@ EXPORT_SYMBOL(register_netdevice_notifier);
1409 * register_netdevice_notifier(). The notifier is unlinked into the 1409 * register_netdevice_notifier(). The notifier is unlinked into the
1410 * kernel structures and may then be reused. A negative errno code 1410 * kernel structures and may then be reused. A negative errno code
1411 * is returned on a failure. 1411 * is returned on a failure.
1412 *
1413 * After unregistering unregister and down device events are synthesized
1414 * for all devices on the device list to the removed notifier to remove
1415 * the need for special case cleanup code.
1412 */ 1416 */
1413 1417
1414int unregister_netdevice_notifier(struct notifier_block *nb) 1418int unregister_netdevice_notifier(struct notifier_block *nb)
1415{ 1419{
1420 struct net_device *dev;
1421 struct net *net;
1416 int err; 1422 int err;
1417 1423
1418 rtnl_lock(); 1424 rtnl_lock();
1419 err = raw_notifier_chain_unregister(&netdev_chain, nb); 1425 err = raw_notifier_chain_unregister(&netdev_chain, nb);
1426 if (err)
1427 goto unlock;
1428
1429 for_each_net(net) {
1430 for_each_netdev(net, dev) {
1431 if (dev->flags & IFF_UP) {
1432 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1433 nb->notifier_call(nb, NETDEV_DOWN, dev);
1434 }
1435 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
1436 nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
1437 }
1438 }
1439unlock:
1420 rtnl_unlock(); 1440 rtnl_unlock();
1421 return err; 1441 return err;
1422} 1442}
@@ -1597,10 +1617,14 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1597 return NET_RX_DROP; 1617 return NET_RX_DROP;
1598 } 1618 }
1599 skb->skb_iif = 0; 1619 skb->skb_iif = 0;
1600 skb_set_dev(skb, dev); 1620 skb->dev = dev;
1621 skb_dst_drop(skb);
1601 skb->tstamp.tv64 = 0; 1622 skb->tstamp.tv64 = 0;
1602 skb->pkt_type = PACKET_HOST; 1623 skb->pkt_type = PACKET_HOST;
1603 skb->protocol = eth_type_trans(skb, dev); 1624 skb->protocol = eth_type_trans(skb, dev);
1625 skb->mark = 0;
1626 secpath_reset(skb);
1627 nf_reset(skb);
1604 return netif_rx(skb); 1628 return netif_rx(skb);
1605} 1629}
1606EXPORT_SYMBOL_GPL(dev_forward_skb); 1630EXPORT_SYMBOL_GPL(dev_forward_skb);
@@ -1849,36 +1873,6 @@ void netif_device_attach(struct net_device *dev)
1849} 1873}
1850EXPORT_SYMBOL(netif_device_attach); 1874EXPORT_SYMBOL(netif_device_attach);
1851 1875
1852/**
1853 * skb_dev_set -- assign a new device to a buffer
1854 * @skb: buffer for the new device
1855 * @dev: network device
1856 *
1857 * If an skb is owned by a device already, we have to reset
1858 * all data private to the namespace a device belongs to
1859 * before assigning it a new device.
1860 */
1861#ifdef CONFIG_NET_NS
1862void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
1863{
1864 skb_dst_drop(skb);
1865 if (skb->dev && !net_eq(dev_net(skb->dev), dev_net(dev))) {
1866 secpath_reset(skb);
1867 nf_reset(skb);
1868 skb_init_secmark(skb);
1869 skb->mark = 0;
1870 skb->priority = 0;
1871 skb->nf_trace = 0;
1872 skb->ipvs_property = 0;
1873#ifdef CONFIG_NET_SCHED
1874 skb->tc_index = 0;
1875#endif
1876 }
1877 skb->dev = dev;
1878}
1879EXPORT_SYMBOL(skb_set_dev);
1880#endif /* CONFIG_NET_NS */
1881
1882static void skb_warn_bad_offload(const struct sk_buff *skb) 1876static void skb_warn_bad_offload(const struct sk_buff *skb)
1883{ 1877{
1884 static const netdev_features_t null_features = 0; 1878 static const netdev_features_t null_features = 0;
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index 7f36b38e060f..a7cad741df01 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -42,13 +42,14 @@ static void send_dm_alert(struct work_struct *unused);
42 * netlink alerts 42 * netlink alerts
43 */ 43 */
44static int trace_state = TRACE_OFF; 44static int trace_state = TRACE_OFF;
45static DEFINE_SPINLOCK(trace_state_lock); 45static DEFINE_MUTEX(trace_state_mutex);
46 46
47struct per_cpu_dm_data { 47struct per_cpu_dm_data {
48 struct work_struct dm_alert_work; 48 struct work_struct dm_alert_work;
49 struct sk_buff *skb; 49 struct sk_buff __rcu *skb;
50 atomic_t dm_hit_count; 50 atomic_t dm_hit_count;
51 struct timer_list send_timer; 51 struct timer_list send_timer;
52 int cpu;
52}; 53};
53 54
54struct dm_hw_stat_delta { 55struct dm_hw_stat_delta {
@@ -79,29 +80,53 @@ static void reset_per_cpu_data(struct per_cpu_dm_data *data)
79 size_t al; 80 size_t al;
80 struct net_dm_alert_msg *msg; 81 struct net_dm_alert_msg *msg;
81 struct nlattr *nla; 82 struct nlattr *nla;
83 struct sk_buff *skb;
84 struct sk_buff *oskb = rcu_dereference_protected(data->skb, 1);
82 85
83 al = sizeof(struct net_dm_alert_msg); 86 al = sizeof(struct net_dm_alert_msg);
84 al += dm_hit_limit * sizeof(struct net_dm_drop_point); 87 al += dm_hit_limit * sizeof(struct net_dm_drop_point);
85 al += sizeof(struct nlattr); 88 al += sizeof(struct nlattr);
86 89
87 data->skb = genlmsg_new(al, GFP_KERNEL); 90 skb = genlmsg_new(al, GFP_KERNEL);
88 genlmsg_put(data->skb, 0, 0, &net_drop_monitor_family, 91
89 0, NET_DM_CMD_ALERT); 92 if (skb) {
90 nla = nla_reserve(data->skb, NLA_UNSPEC, sizeof(struct net_dm_alert_msg)); 93 genlmsg_put(skb, 0, 0, &net_drop_monitor_family,
91 msg = nla_data(nla); 94 0, NET_DM_CMD_ALERT);
92 memset(msg, 0, al); 95 nla = nla_reserve(skb, NLA_UNSPEC,
93 atomic_set(&data->dm_hit_count, dm_hit_limit); 96 sizeof(struct net_dm_alert_msg));
97 msg = nla_data(nla);
98 memset(msg, 0, al);
99 } else
100 schedule_work_on(data->cpu, &data->dm_alert_work);
101
102 /*
103 * Don't need to lock this, since we are guaranteed to only
104 * run this on a single cpu at a time.
105 * Note also that we only update data->skb if the old and new skb
106 * pointers don't match. This ensures that we don't continually call
107 * synchornize_rcu if we repeatedly fail to alloc a new netlink message.
108 */
109 if (skb != oskb) {
110 rcu_assign_pointer(data->skb, skb);
111
112 synchronize_rcu();
113
114 atomic_set(&data->dm_hit_count, dm_hit_limit);
115 }
116
94} 117}
95 118
96static void send_dm_alert(struct work_struct *unused) 119static void send_dm_alert(struct work_struct *unused)
97{ 120{
98 struct sk_buff *skb; 121 struct sk_buff *skb;
99 struct per_cpu_dm_data *data = &__get_cpu_var(dm_cpu_data); 122 struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data);
123
124 WARN_ON_ONCE(data->cpu != smp_processor_id());
100 125
101 /* 126 /*
102 * Grab the skb we're about to send 127 * Grab the skb we're about to send
103 */ 128 */
104 skb = data->skb; 129 skb = rcu_dereference_protected(data->skb, 1);
105 130
106 /* 131 /*
107 * Replace it with a new one 132 * Replace it with a new one
@@ -111,8 +136,10 @@ static void send_dm_alert(struct work_struct *unused)
111 /* 136 /*
112 * Ship it! 137 * Ship it!
113 */ 138 */
114 genlmsg_multicast(skb, 0, NET_DM_GRP_ALERT, GFP_KERNEL); 139 if (skb)
140 genlmsg_multicast(skb, 0, NET_DM_GRP_ALERT, GFP_KERNEL);
115 141
142 put_cpu_var(dm_cpu_data);
116} 143}
117 144
118/* 145/*
@@ -123,9 +150,11 @@ static void send_dm_alert(struct work_struct *unused)
123 */ 150 */
124static void sched_send_work(unsigned long unused) 151static void sched_send_work(unsigned long unused)
125{ 152{
126 struct per_cpu_dm_data *data = &__get_cpu_var(dm_cpu_data); 153 struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data);
154
155 schedule_work_on(smp_processor_id(), &data->dm_alert_work);
127 156
128 schedule_work(&data->dm_alert_work); 157 put_cpu_var(dm_cpu_data);
129} 158}
130 159
131static void trace_drop_common(struct sk_buff *skb, void *location) 160static void trace_drop_common(struct sk_buff *skb, void *location)
@@ -134,8 +163,15 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
134 struct nlmsghdr *nlh; 163 struct nlmsghdr *nlh;
135 struct nlattr *nla; 164 struct nlattr *nla;
136 int i; 165 int i;
137 struct per_cpu_dm_data *data = &__get_cpu_var(dm_cpu_data); 166 struct sk_buff *dskb;
167 struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data);
168
169
170 rcu_read_lock();
171 dskb = rcu_dereference(data->skb);
138 172
173 if (!dskb)
174 goto out;
139 175
140 if (!atomic_add_unless(&data->dm_hit_count, -1, 0)) { 176 if (!atomic_add_unless(&data->dm_hit_count, -1, 0)) {
141 /* 177 /*
@@ -144,12 +180,13 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
144 goto out; 180 goto out;
145 } 181 }
146 182
147 nlh = (struct nlmsghdr *)data->skb->data; 183 nlh = (struct nlmsghdr *)dskb->data;
148 nla = genlmsg_data(nlmsg_data(nlh)); 184 nla = genlmsg_data(nlmsg_data(nlh));
149 msg = nla_data(nla); 185 msg = nla_data(nla);
150 for (i = 0; i < msg->entries; i++) { 186 for (i = 0; i < msg->entries; i++) {
151 if (!memcmp(&location, msg->points[i].pc, sizeof(void *))) { 187 if (!memcmp(&location, msg->points[i].pc, sizeof(void *))) {
152 msg->points[i].count++; 188 msg->points[i].count++;
189 atomic_inc(&data->dm_hit_count);
153 goto out; 190 goto out;
154 } 191 }
155 } 192 }
@@ -157,7 +194,7 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
157 /* 194 /*
158 * We need to create a new entry 195 * We need to create a new entry
159 */ 196 */
160 __nla_reserve_nohdr(data->skb, sizeof(struct net_dm_drop_point)); 197 __nla_reserve_nohdr(dskb, sizeof(struct net_dm_drop_point));
161 nla->nla_len += NLA_ALIGN(sizeof(struct net_dm_drop_point)); 198 nla->nla_len += NLA_ALIGN(sizeof(struct net_dm_drop_point));
162 memcpy(msg->points[msg->entries].pc, &location, sizeof(void *)); 199 memcpy(msg->points[msg->entries].pc, &location, sizeof(void *));
163 msg->points[msg->entries].count = 1; 200 msg->points[msg->entries].count = 1;
@@ -169,6 +206,8 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
169 } 206 }
170 207
171out: 208out:
209 rcu_read_unlock();
210 put_cpu_var(dm_cpu_data);
172 return; 211 return;
173} 212}
174 213
@@ -213,7 +252,7 @@ static int set_all_monitor_traces(int state)
213 struct dm_hw_stat_delta *new_stat = NULL; 252 struct dm_hw_stat_delta *new_stat = NULL;
214 struct dm_hw_stat_delta *temp; 253 struct dm_hw_stat_delta *temp;
215 254
216 spin_lock(&trace_state_lock); 255 mutex_lock(&trace_state_mutex);
217 256
218 if (state == trace_state) { 257 if (state == trace_state) {
219 rc = -EAGAIN; 258 rc = -EAGAIN;
@@ -252,7 +291,7 @@ static int set_all_monitor_traces(int state)
252 rc = -EINPROGRESS; 291 rc = -EINPROGRESS;
253 292
254out_unlock: 293out_unlock:
255 spin_unlock(&trace_state_lock); 294 mutex_unlock(&trace_state_mutex);
256 295
257 return rc; 296 return rc;
258} 297}
@@ -295,12 +334,12 @@ static int dropmon_net_event(struct notifier_block *ev_block,
295 334
296 new_stat->dev = dev; 335 new_stat->dev = dev;
297 new_stat->last_rx = jiffies; 336 new_stat->last_rx = jiffies;
298 spin_lock(&trace_state_lock); 337 mutex_lock(&trace_state_mutex);
299 list_add_rcu(&new_stat->list, &hw_stats_list); 338 list_add_rcu(&new_stat->list, &hw_stats_list);
300 spin_unlock(&trace_state_lock); 339 mutex_unlock(&trace_state_mutex);
301 break; 340 break;
302 case NETDEV_UNREGISTER: 341 case NETDEV_UNREGISTER:
303 spin_lock(&trace_state_lock); 342 mutex_lock(&trace_state_mutex);
304 list_for_each_entry_safe(new_stat, tmp, &hw_stats_list, list) { 343 list_for_each_entry_safe(new_stat, tmp, &hw_stats_list, list) {
305 if (new_stat->dev == dev) { 344 if (new_stat->dev == dev) {
306 new_stat->dev = NULL; 345 new_stat->dev = NULL;
@@ -311,7 +350,7 @@ static int dropmon_net_event(struct notifier_block *ev_block,
311 } 350 }
312 } 351 }
313 } 352 }
314 spin_unlock(&trace_state_lock); 353 mutex_unlock(&trace_state_mutex);
315 break; 354 break;
316 } 355 }
317out: 356out:
@@ -367,13 +406,15 @@ static int __init init_net_drop_monitor(void)
367 406
368 for_each_present_cpu(cpu) { 407 for_each_present_cpu(cpu) {
369 data = &per_cpu(dm_cpu_data, cpu); 408 data = &per_cpu(dm_cpu_data, cpu);
370 reset_per_cpu_data(data); 409 data->cpu = cpu;
371 INIT_WORK(&data->dm_alert_work, send_dm_alert); 410 INIT_WORK(&data->dm_alert_work, send_dm_alert);
372 init_timer(&data->send_timer); 411 init_timer(&data->send_timer);
373 data->send_timer.data = cpu; 412 data->send_timer.data = cpu;
374 data->send_timer.function = sched_send_work; 413 data->send_timer.function = sched_send_work;
414 reset_per_cpu_data(data);
375 } 415 }
376 416
417
377 goto out; 418 goto out;
378 419
379out_unreg: 420out_unreg:
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 0e950fda9a0a..31a5ae51a45c 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -83,21 +83,29 @@ assign:
83 83
84static int ops_init(const struct pernet_operations *ops, struct net *net) 84static int ops_init(const struct pernet_operations *ops, struct net *net)
85{ 85{
86 int err; 86 int err = -ENOMEM;
87 void *data = NULL;
88
87 if (ops->id && ops->size) { 89 if (ops->id && ops->size) {
88 void *data = kzalloc(ops->size, GFP_KERNEL); 90 data = kzalloc(ops->size, GFP_KERNEL);
89 if (!data) 91 if (!data)
90 return -ENOMEM; 92 goto out;
91 93
92 err = net_assign_generic(net, *ops->id, data); 94 err = net_assign_generic(net, *ops->id, data);
93 if (err) { 95 if (err)
94 kfree(data); 96 goto cleanup;
95 return err;
96 }
97 } 97 }
98 err = 0;
98 if (ops->init) 99 if (ops->init)
99 return ops->init(net); 100 err = ops->init(net);
100 return 0; 101 if (!err)
102 return 0;
103
104cleanup:
105 kfree(data);
106
107out:
108 return err;
101} 109}
102 110
103static void ops_free(const struct pernet_operations *ops, struct net *net) 111static void ops_free(const struct pernet_operations *ops, struct net *net)
@@ -448,12 +456,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
448static int __register_pernet_operations(struct list_head *list, 456static int __register_pernet_operations(struct list_head *list,
449 struct pernet_operations *ops) 457 struct pernet_operations *ops)
450{ 458{
451 int err = 0; 459 return ops_init(ops, &init_net);
452 err = ops_init(ops, &init_net);
453 if (err)
454 ops_free(ops, &init_net);
455 return err;
456
457} 460}
458 461
459static void __unregister_pernet_operations(struct pernet_operations *ops) 462static void __unregister_pernet_operations(struct pernet_operations *ops)
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 4d8ce93cd503..77a59980b579 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -1931,7 +1931,7 @@ static int pktgen_device_event(struct notifier_block *unused,
1931{ 1931{
1932 struct net_device *dev = ptr; 1932 struct net_device *dev = ptr;
1933 1933
1934 if (!net_eq(dev_net(dev), &init_net)) 1934 if (!net_eq(dev_net(dev), &init_net) || pktgen_exiting)
1935 return NOTIFY_DONE; 1935 return NOTIFY_DONE;
1936 1936
1937 /* It is OK that we do not hold the group lock right now, 1937 /* It is OK that we do not hold the group lock right now,
@@ -3755,12 +3755,18 @@ static void __exit pg_cleanup(void)
3755{ 3755{
3756 struct pktgen_thread *t; 3756 struct pktgen_thread *t;
3757 struct list_head *q, *n; 3757 struct list_head *q, *n;
3758 struct list_head list;
3758 3759
3759 /* Stop all interfaces & threads */ 3760 /* Stop all interfaces & threads */
3760 pktgen_exiting = true; 3761 pktgen_exiting = true;
3761 3762
3762 list_for_each_safe(q, n, &pktgen_threads) { 3763 mutex_lock(&pktgen_thread_lock);
3764 list_splice(&list, &pktgen_threads);
3765 mutex_unlock(&pktgen_thread_lock);
3766
3767 list_for_each_safe(q, n, &list) {
3763 t = list_entry(q, struct pktgen_thread, th_list); 3768 t = list_entry(q, struct pktgen_thread, th_list);
3769 list_del(&t->th_list);
3764 kthread_stop(t->tsk); 3770 kthread_stop(t->tsk);
3765 kfree(t); 3771 kfree(t);
3766 } 3772 }
diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
index 368515885368..840821b90bcd 100644
--- a/net/ieee802154/6lowpan.c
+++ b/net/ieee802154/6lowpan.c
@@ -1044,6 +1044,24 @@ static void lowpan_dev_free(struct net_device *dev)
1044 free_netdev(dev); 1044 free_netdev(dev);
1045} 1045}
1046 1046
1047static struct wpan_phy *lowpan_get_phy(const struct net_device *dev)
1048{
1049 struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
1050 return ieee802154_mlme_ops(real_dev)->get_phy(real_dev);
1051}
1052
1053static u16 lowpan_get_pan_id(const struct net_device *dev)
1054{
1055 struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
1056 return ieee802154_mlme_ops(real_dev)->get_pan_id(real_dev);
1057}
1058
1059static u16 lowpan_get_short_addr(const struct net_device *dev)
1060{
1061 struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
1062 return ieee802154_mlme_ops(real_dev)->get_short_addr(real_dev);
1063}
1064
1047static struct header_ops lowpan_header_ops = { 1065static struct header_ops lowpan_header_ops = {
1048 .create = lowpan_header_create, 1066 .create = lowpan_header_create,
1049}; 1067};
@@ -1053,6 +1071,12 @@ static const struct net_device_ops lowpan_netdev_ops = {
1053 .ndo_set_mac_address = eth_mac_addr, 1071 .ndo_set_mac_address = eth_mac_addr,
1054}; 1072};
1055 1073
1074static struct ieee802154_mlme_ops lowpan_mlme = {
1075 .get_pan_id = lowpan_get_pan_id,
1076 .get_phy = lowpan_get_phy,
1077 .get_short_addr = lowpan_get_short_addr,
1078};
1079
1056static void lowpan_setup(struct net_device *dev) 1080static void lowpan_setup(struct net_device *dev)
1057{ 1081{
1058 pr_debug("(%s)\n", __func__); 1082 pr_debug("(%s)\n", __func__);
@@ -1070,6 +1094,7 @@ static void lowpan_setup(struct net_device *dev)
1070 1094
1071 dev->netdev_ops = &lowpan_netdev_ops; 1095 dev->netdev_ops = &lowpan_netdev_ops;
1072 dev->header_ops = &lowpan_header_ops; 1096 dev->header_ops = &lowpan_header_ops;
1097 dev->ml_priv = &lowpan_mlme;
1073 dev->destructor = lowpan_dev_free; 1098 dev->destructor = lowpan_dev_free;
1074} 1099}
1075 1100
@@ -1143,6 +1168,8 @@ static int lowpan_newlink(struct net *src_net, struct net_device *dev,
1143 list_add_tail(&entry->list, &lowpan_devices); 1168 list_add_tail(&entry->list, &lowpan_devices);
1144 mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx); 1169 mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
1145 1170
1171 spin_lock_init(&flist_lock);
1172
1146 register_netdevice(dev); 1173 register_netdevice(dev);
1147 1174
1148 return 0; 1175 return 0;
@@ -1152,11 +1179,20 @@ static void lowpan_dellink(struct net_device *dev, struct list_head *head)
1152{ 1179{
1153 struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev); 1180 struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev);
1154 struct net_device *real_dev = lowpan_dev->real_dev; 1181 struct net_device *real_dev = lowpan_dev->real_dev;
1155 struct lowpan_dev_record *entry; 1182 struct lowpan_dev_record *entry, *tmp;
1156 struct lowpan_dev_record *tmp; 1183 struct lowpan_fragment *frame, *tframe;
1157 1184
1158 ASSERT_RTNL(); 1185 ASSERT_RTNL();
1159 1186
1187 spin_lock(&flist_lock);
1188 list_for_each_entry_safe(frame, tframe, &lowpan_fragments, list) {
1189 del_timer(&frame->timer);
1190 list_del(&frame->list);
1191 dev_kfree_skb(frame->skb);
1192 kfree(frame);
1193 }
1194 spin_unlock(&flist_lock);
1195
1160 mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx); 1196 mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
1161 list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) { 1197 list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
1162 if (entry->ldev == dev) { 1198 if (entry->ldev == dev) {
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index bce36f1a37b4..30b88d7b4bd6 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1370,6 +1370,8 @@ static int check_leaf(struct fib_table *tb, struct trie *t, struct leaf *l,
1370 1370
1371 if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos) 1371 if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos)
1372 continue; 1372 continue;
1373 if (fi->fib_dead)
1374 continue;
1373 if (fa->fa_info->fib_scope < flp->flowi4_scope) 1375 if (fa->fa_info->fib_scope < flp->flowi4_scope)
1374 continue; 1376 continue;
1375 fib_alias_accessed(fa); 1377 fib_alias_accessed(fa);
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 8d25a1c557eb..8f8db724bfaf 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -141,7 +141,7 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
141 goto rtattr_failure; 141 goto rtattr_failure;
142 142
143 if (icsk == NULL) { 143 if (icsk == NULL) {
144 r->idiag_rqueue = r->idiag_wqueue = 0; 144 handler->idiag_get_info(sk, r, NULL);
145 goto out; 145 goto out;
146 } 146 }
147 147
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 8bb6adeb62c0..1272a88c2a63 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -3243,7 +3243,7 @@ void __init tcp_init(void)
3243{ 3243{
3244 struct sk_buff *skb = NULL; 3244 struct sk_buff *skb = NULL;
3245 unsigned long limit; 3245 unsigned long limit;
3246 int max_share, cnt; 3246 int max_rshare, max_wshare, cnt;
3247 unsigned int i; 3247 unsigned int i;
3248 unsigned long jiffy = jiffies; 3248 unsigned long jiffy = jiffies;
3249 3249
@@ -3303,15 +3303,16 @@ void __init tcp_init(void)
3303 tcp_init_mem(&init_net); 3303 tcp_init_mem(&init_net);
3304 /* Set per-socket limits to no more than 1/128 the pressure threshold */ 3304 /* Set per-socket limits to no more than 1/128 the pressure threshold */
3305 limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7); 3305 limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7);
3306 max_share = min(4UL*1024*1024, limit); 3306 max_wshare = min(4UL*1024*1024, limit);
3307 max_rshare = min(6UL*1024*1024, limit);
3307 3308
3308 sysctl_tcp_wmem[0] = SK_MEM_QUANTUM; 3309 sysctl_tcp_wmem[0] = SK_MEM_QUANTUM;
3309 sysctl_tcp_wmem[1] = 16*1024; 3310 sysctl_tcp_wmem[1] = 16*1024;
3310 sysctl_tcp_wmem[2] = max(64*1024, max_share); 3311 sysctl_tcp_wmem[2] = max(64*1024, max_wshare);
3311 3312
3312 sysctl_tcp_rmem[0] = SK_MEM_QUANTUM; 3313 sysctl_tcp_rmem[0] = SK_MEM_QUANTUM;
3313 sysctl_tcp_rmem[1] = 87380; 3314 sysctl_tcp_rmem[1] = 87380;
3314 sysctl_tcp_rmem[2] = max(87380, max_share); 3315 sysctl_tcp_rmem[2] = max(87380, max_rshare);
3315 3316
3316 pr_info("Hash tables configured (established %u bind %u)\n", 3317 pr_info("Hash tables configured (established %u bind %u)\n",
3317 tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size); 3318 tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 9944c1d9a218..257b61789eeb 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -85,7 +85,7 @@ int sysctl_tcp_ecn __read_mostly = 2;
85EXPORT_SYMBOL(sysctl_tcp_ecn); 85EXPORT_SYMBOL(sysctl_tcp_ecn);
86int sysctl_tcp_dsack __read_mostly = 1; 86int sysctl_tcp_dsack __read_mostly = 1;
87int sysctl_tcp_app_win __read_mostly = 31; 87int sysctl_tcp_app_win __read_mostly = 31;
88int sysctl_tcp_adv_win_scale __read_mostly = 2; 88int sysctl_tcp_adv_win_scale __read_mostly = 1;
89EXPORT_SYMBOL(sysctl_tcp_adv_win_scale); 89EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
90 90
91int sysctl_tcp_stdurg __read_mostly; 91int sysctl_tcp_stdurg __read_mostly;
@@ -335,6 +335,7 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
335 incr = __tcp_grow_window(sk, skb); 335 incr = __tcp_grow_window(sk, skb);
336 336
337 if (incr) { 337 if (incr) {
338 incr = max_t(int, incr, 2 * skb->len);
338 tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, 339 tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr,
339 tp->window_clamp); 340 tp->window_clamp);
340 inet_csk(sk)->icsk_ack.quick |= 1; 341 inet_csk(sk)->icsk_ack.quick |= 1;
@@ -494,7 +495,7 @@ static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp)
494 goto new_measure; 495 goto new_measure;
495 if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq)) 496 if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq))
496 return; 497 return;
497 tcp_rcv_rtt_update(tp, jiffies - tp->rcv_rtt_est.time, 1); 498 tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rcv_rtt_est.time, 1);
498 499
499new_measure: 500new_measure:
500 tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd; 501 tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd;
@@ -2867,11 +2868,14 @@ static inline void tcp_complete_cwr(struct sock *sk)
2867 2868
2868 /* Do not moderate cwnd if it's already undone in cwr or recovery. */ 2869 /* Do not moderate cwnd if it's already undone in cwr or recovery. */
2869 if (tp->undo_marker) { 2870 if (tp->undo_marker) {
2870 if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR) 2871 if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR) {
2871 tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh); 2872 tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
2872 else /* PRR */ 2873 tp->snd_cwnd_stamp = tcp_time_stamp;
2874 } else if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH) {
2875 /* PRR algorithm. */
2873 tp->snd_cwnd = tp->snd_ssthresh; 2876 tp->snd_cwnd = tp->snd_ssthresh;
2874 tp->snd_cwnd_stamp = tcp_time_stamp; 2877 tp->snd_cwnd_stamp = tcp_time_stamp;
2878 }
2875 } 2879 }
2876 tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR); 2880 tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
2877} 2881}
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 376b2cfbb685..7ac6423117ad 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1096,6 +1096,7 @@ static void __pskb_trim_head(struct sk_buff *skb, int len)
1096 eat = min_t(int, len, skb_headlen(skb)); 1096 eat = min_t(int, len, skb_headlen(skb));
1097 if (eat) { 1097 if (eat) {
1098 __skb_pull(skb, eat); 1098 __skb_pull(skb, eat);
1099 skb->avail_size -= eat;
1099 len -= eat; 1100 len -= eat;
1100 if (!len) 1101 if (!len)
1101 return; 1102 return;
diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c
index 8a949f19deb6..a7f86a3cd502 100644
--- a/net/ipv4/udp_diag.c
+++ b/net/ipv4/udp_diag.c
@@ -146,9 +146,17 @@ static int udp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
146 return udp_dump_one(&udp_table, in_skb, nlh, req); 146 return udp_dump_one(&udp_table, in_skb, nlh, req);
147} 147}
148 148
149static void udp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
150 void *info)
151{
152 r->idiag_rqueue = sk_rmem_alloc_get(sk);
153 r->idiag_wqueue = sk_wmem_alloc_get(sk);
154}
155
149static const struct inet_diag_handler udp_diag_handler = { 156static const struct inet_diag_handler udp_diag_handler = {
150 .dump = udp_diag_dump, 157 .dump = udp_diag_dump,
151 .dump_one = udp_diag_dump_one, 158 .dump_one = udp_diag_dump_one,
159 .idiag_get_info = udp_diag_get_info,
152 .idiag_type = IPPROTO_UDP, 160 .idiag_type = IPPROTO_UDP,
153}; 161};
154 162
@@ -167,6 +175,7 @@ static int udplite_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *
167static const struct inet_diag_handler udplite_diag_handler = { 175static const struct inet_diag_handler udplite_diag_handler = {
168 .dump = udplite_diag_dump, 176 .dump = udplite_diag_dump,
169 .dump_one = udplite_diag_dump_one, 177 .dump_one = udplite_diag_dump_one,
178 .idiag_get_info = udp_diag_get_info,
170 .idiag_type = IPPROTO_UDPLITE, 179 .idiag_type = IPPROTO_UDPLITE,
171}; 180};
172 181
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 6a3bb6077e19..7d5cb975cc6f 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -803,8 +803,7 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
803 ip6_del_rt(rt); 803 ip6_del_rt(rt);
804 rt = NULL; 804 rt = NULL;
805 } else if (!(rt->rt6i_flags & RTF_EXPIRES)) { 805 } else if (!(rt->rt6i_flags & RTF_EXPIRES)) {
806 rt->dst.expires = expires; 806 rt6_set_expires(rt, expires);
807 rt->rt6i_flags |= RTF_EXPIRES;
808 } 807 }
809 } 808 }
810 dst_release(&rt->dst); 809 dst_release(&rt->dst);
@@ -1887,11 +1886,9 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
1887 rt = NULL; 1886 rt = NULL;
1888 } else if (addrconf_finite_timeout(rt_expires)) { 1887 } else if (addrconf_finite_timeout(rt_expires)) {
1889 /* not infinity */ 1888 /* not infinity */
1890 rt->dst.expires = jiffies + rt_expires; 1889 rt6_set_expires(rt, jiffies + rt_expires);
1891 rt->rt6i_flags |= RTF_EXPIRES;
1892 } else { 1890 } else {
1893 rt->rt6i_flags &= ~RTF_EXPIRES; 1891 rt6_clean_expires(rt);
1894 rt->dst.expires = 0;
1895 } 1892 }
1896 } else if (valid_lft) { 1893 } else if (valid_lft) {
1897 clock_t expires = 0; 1894 clock_t expires = 0;
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 5b27fbcae346..93717435013e 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -673,11 +673,10 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
673 &rt->rt6i_gateway)) { 673 &rt->rt6i_gateway)) {
674 if (!(iter->rt6i_flags & RTF_EXPIRES)) 674 if (!(iter->rt6i_flags & RTF_EXPIRES))
675 return -EEXIST; 675 return -EEXIST;
676 iter->dst.expires = rt->dst.expires; 676 if (!(rt->rt6i_flags & RTF_EXPIRES))
677 if (!(rt->rt6i_flags & RTF_EXPIRES)) { 677 rt6_clean_expires(iter);
678 iter->rt6i_flags &= ~RTF_EXPIRES; 678 else
679 iter->dst.expires = 0; 679 rt6_set_expires(iter, rt->dst.expires);
680 }
681 return -EEXIST; 680 return -EEXIST;
682 } 681 }
683 } 682 }
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 3dcdb81ec3e8..176b469322ac 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1264,8 +1264,7 @@ static void ndisc_router_discovery(struct sk_buff *skb)
1264 } 1264 }
1265 1265
1266 if (rt) 1266 if (rt)
1267 rt->dst.expires = jiffies + (HZ * lifetime); 1267 rt6_set_expires(rt, jiffies + (HZ * lifetime));
1268
1269 if (ra_msg->icmph.icmp6_hop_limit) { 1268 if (ra_msg->icmph.icmp6_hop_limit) {
1270 in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit; 1269 in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
1271 if (rt) 1270 if (rt)
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 3992e26a6039..bc4888d902b2 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -62,7 +62,7 @@
62#include <linux/sysctl.h> 62#include <linux/sysctl.h>
63#endif 63#endif
64 64
65static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort, 65static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
66 const struct in6_addr *dest); 66 const struct in6_addr *dest);
67static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie); 67static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
68static unsigned int ip6_default_advmss(const struct dst_entry *dst); 68static unsigned int ip6_default_advmss(const struct dst_entry *dst);
@@ -285,6 +285,10 @@ static void ip6_dst_destroy(struct dst_entry *dst)
285 rt->rt6i_idev = NULL; 285 rt->rt6i_idev = NULL;
286 in6_dev_put(idev); 286 in6_dev_put(idev);
287 } 287 }
288
289 if (!(rt->rt6i_flags & RTF_EXPIRES) && dst->from)
290 dst_release(dst->from);
291
288 if (peer) { 292 if (peer) {
289 rt->rt6i_peer = NULL; 293 rt->rt6i_peer = NULL;
290 inet_putpeer(peer); 294 inet_putpeer(peer);
@@ -329,8 +333,17 @@ static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
329 333
330static __inline__ int rt6_check_expired(const struct rt6_info *rt) 334static __inline__ int rt6_check_expired(const struct rt6_info *rt)
331{ 335{
332 return (rt->rt6i_flags & RTF_EXPIRES) && 336 struct rt6_info *ort = NULL;
333 time_after(jiffies, rt->dst.expires); 337
338 if (rt->rt6i_flags & RTF_EXPIRES) {
339 if (time_after(jiffies, rt->dst.expires))
340 return 1;
341 } else if (rt->dst.from) {
342 ort = (struct rt6_info *) rt->dst.from;
343 return (ort->rt6i_flags & RTF_EXPIRES) &&
344 time_after(jiffies, ort->dst.expires);
345 }
346 return 0;
334} 347}
335 348
336static inline int rt6_need_strict(const struct in6_addr *daddr) 349static inline int rt6_need_strict(const struct in6_addr *daddr)
@@ -620,12 +633,11 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
620 (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref); 633 (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
621 634
622 if (rt) { 635 if (rt) {
623 if (!addrconf_finite_timeout(lifetime)) { 636 if (!addrconf_finite_timeout(lifetime))
624 rt->rt6i_flags &= ~RTF_EXPIRES; 637 rt6_clean_expires(rt);
625 } else { 638 else
626 rt->dst.expires = jiffies + HZ * lifetime; 639 rt6_set_expires(rt, jiffies + HZ * lifetime);
627 rt->rt6i_flags |= RTF_EXPIRES; 640
628 }
629 dst_release(&rt->dst); 641 dst_release(&rt->dst);
630 } 642 }
631 return 0; 643 return 0;
@@ -730,7 +742,7 @@ int ip6_ins_rt(struct rt6_info *rt)
730 return __ip6_ins_rt(rt, &info); 742 return __ip6_ins_rt(rt, &info);
731} 743}
732 744
733static struct rt6_info *rt6_alloc_cow(const struct rt6_info *ort, 745static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort,
734 const struct in6_addr *daddr, 746 const struct in6_addr *daddr,
735 const struct in6_addr *saddr) 747 const struct in6_addr *saddr)
736{ 748{
@@ -954,10 +966,10 @@ struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_ori
954 rt->rt6i_idev = ort->rt6i_idev; 966 rt->rt6i_idev = ort->rt6i_idev;
955 if (rt->rt6i_idev) 967 if (rt->rt6i_idev)
956 in6_dev_hold(rt->rt6i_idev); 968 in6_dev_hold(rt->rt6i_idev);
957 rt->dst.expires = 0;
958 969
959 rt->rt6i_gateway = ort->rt6i_gateway; 970 rt->rt6i_gateway = ort->rt6i_gateway;
960 rt->rt6i_flags = ort->rt6i_flags & ~RTF_EXPIRES; 971 rt->rt6i_flags = ort->rt6i_flags;
972 rt6_clean_expires(rt);
961 rt->rt6i_metric = 0; 973 rt->rt6i_metric = 0;
962 974
963 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key)); 975 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
@@ -1019,10 +1031,9 @@ static void ip6_link_failure(struct sk_buff *skb)
1019 1031
1020 rt = (struct rt6_info *) skb_dst(skb); 1032 rt = (struct rt6_info *) skb_dst(skb);
1021 if (rt) { 1033 if (rt) {
1022 if (rt->rt6i_flags & RTF_CACHE) { 1034 if (rt->rt6i_flags & RTF_CACHE)
1023 dst_set_expires(&rt->dst, 0); 1035 rt6_update_expires(rt, 0);
1024 rt->rt6i_flags |= RTF_EXPIRES; 1036 else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT))
1025 } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT))
1026 rt->rt6i_node->fn_sernum = -1; 1037 rt->rt6i_node->fn_sernum = -1;
1027 } 1038 }
1028} 1039}
@@ -1289,9 +1300,12 @@ int ip6_route_add(struct fib6_config *cfg)
1289 } 1300 }
1290 1301
1291 rt->dst.obsolete = -1; 1302 rt->dst.obsolete = -1;
1292 rt->dst.expires = (cfg->fc_flags & RTF_EXPIRES) ? 1303
1293 jiffies + clock_t_to_jiffies(cfg->fc_expires) : 1304 if (cfg->fc_flags & RTF_EXPIRES)
1294 0; 1305 rt6_set_expires(rt, jiffies +
1306 clock_t_to_jiffies(cfg->fc_expires));
1307 else
1308 rt6_clean_expires(rt);
1295 1309
1296 if (cfg->fc_protocol == RTPROT_UNSPEC) 1310 if (cfg->fc_protocol == RTPROT_UNSPEC)
1297 cfg->fc_protocol = RTPROT_BOOT; 1311 cfg->fc_protocol = RTPROT_BOOT;
@@ -1736,8 +1750,8 @@ again:
1736 features |= RTAX_FEATURE_ALLFRAG; 1750 features |= RTAX_FEATURE_ALLFRAG;
1737 dst_metric_set(&rt->dst, RTAX_FEATURES, features); 1751 dst_metric_set(&rt->dst, RTAX_FEATURES, features);
1738 } 1752 }
1739 dst_set_expires(&rt->dst, net->ipv6.sysctl.ip6_rt_mtu_expires); 1753 rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires);
1740 rt->rt6i_flags |= RTF_MODIFIED|RTF_EXPIRES; 1754 rt->rt6i_flags |= RTF_MODIFIED;
1741 goto out; 1755 goto out;
1742 } 1756 }
1743 1757
@@ -1765,9 +1779,8 @@ again:
1765 * which is 10 mins. After 10 mins the decreased pmtu is expired 1779 * which is 10 mins. After 10 mins the decreased pmtu is expired
1766 * and detecting PMTU increase will be automatically happened. 1780 * and detecting PMTU increase will be automatically happened.
1767 */ 1781 */
1768 dst_set_expires(&nrt->dst, net->ipv6.sysctl.ip6_rt_mtu_expires); 1782 rt6_update_expires(nrt, net->ipv6.sysctl.ip6_rt_mtu_expires);
1769 nrt->rt6i_flags |= RTF_DYNAMIC|RTF_EXPIRES; 1783 nrt->rt6i_flags |= RTF_DYNAMIC;
1770
1771 ip6_ins_rt(nrt); 1784 ip6_ins_rt(nrt);
1772 } 1785 }
1773out: 1786out:
@@ -1799,7 +1812,7 @@ void rt6_pmtu_discovery(const struct in6_addr *daddr, const struct in6_addr *sad
1799 * Misc support functions 1812 * Misc support functions
1800 */ 1813 */
1801 1814
1802static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort, 1815static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
1803 const struct in6_addr *dest) 1816 const struct in6_addr *dest)
1804{ 1817{
1805 struct net *net = dev_net(ort->dst.dev); 1818 struct net *net = dev_net(ort->dst.dev);
@@ -1819,10 +1832,14 @@ static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort,
1819 if (rt->rt6i_idev) 1832 if (rt->rt6i_idev)
1820 in6_dev_hold(rt->rt6i_idev); 1833 in6_dev_hold(rt->rt6i_idev);
1821 rt->dst.lastuse = jiffies; 1834 rt->dst.lastuse = jiffies;
1822 rt->dst.expires = 0;
1823 1835
1824 rt->rt6i_gateway = ort->rt6i_gateway; 1836 rt->rt6i_gateway = ort->rt6i_gateway;
1825 rt->rt6i_flags = ort->rt6i_flags & ~RTF_EXPIRES; 1837 rt->rt6i_flags = ort->rt6i_flags;
1838 if ((ort->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) ==
1839 (RTF_DEFAULT | RTF_ADDRCONF))
1840 rt6_set_from(rt, ort);
1841 else
1842 rt6_clean_expires(rt);
1826 rt->rt6i_metric = 0; 1843 rt->rt6i_metric = 0;
1827 1844
1828#ifdef CONFIG_IPV6_SUBTREES 1845#ifdef CONFIG_IPV6_SUBTREES
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 86cfe6005f40..98256cf72f9d 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1383,6 +1383,10 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1383 tcp_mtup_init(newsk); 1383 tcp_mtup_init(newsk);
1384 tcp_sync_mss(newsk, dst_mtu(dst)); 1384 tcp_sync_mss(newsk, dst_mtu(dst));
1385 newtp->advmss = dst_metric_advmss(dst); 1385 newtp->advmss = dst_metric_advmss(dst);
1386 if (tcp_sk(sk)->rx_opt.user_mss &&
1387 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1388 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1389
1386 tcp_initialize_rcv_mss(newsk); 1390 tcp_initialize_rcv_mss(newsk);
1387 if (tcp_rsk(req)->snt_synack) 1391 if (tcp_rsk(req)->snt_synack)
1388 tcp_valid_rtt_meas(newsk, 1392 tcp_valid_rtt_meas(newsk,
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 11dbb2255ccb..7e5d927b576f 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -3480,7 +3480,7 @@ static int pfkey_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
3480 3480
3481 /* Addresses to be used by KM for negotiation, if ext is available */ 3481 /* Addresses to be used by KM for negotiation, if ext is available */
3482 if (k != NULL && (set_sadb_kmaddress(skb, k) < 0)) 3482 if (k != NULL && (set_sadb_kmaddress(skb, k) < 0))
3483 return -EINVAL; 3483 goto err;
3484 3484
3485 /* selector src */ 3485 /* selector src */
3486 set_sadb_address(skb, sasize_sel, SADB_EXT_ADDRESS_SRC, sel); 3486 set_sadb_address(skb, sasize_sel, SADB_EXT_ADDRESS_SRC, sel);
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 55670ec3cd0f..6274f0be82b0 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -232,7 +232,7 @@ static void l2tp_ip_close(struct sock *sk, long timeout)
232{ 232{
233 write_lock_bh(&l2tp_ip_lock); 233 write_lock_bh(&l2tp_ip_lock);
234 hlist_del_init(&sk->sk_bind_node); 234 hlist_del_init(&sk->sk_bind_node);
235 hlist_del_init(&sk->sk_node); 235 sk_del_node_init(sk);
236 write_unlock_bh(&l2tp_ip_lock); 236 write_unlock_bh(&l2tp_ip_lock);
237 sk_common_release(sk); 237 sk_common_release(sk);
238} 238}
@@ -271,7 +271,8 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
271 chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST) 271 chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST)
272 goto out; 272 goto out;
273 273
274 inet->inet_rcv_saddr = inet->inet_saddr = addr->l2tp_addr.s_addr; 274 if (addr->l2tp_addr.s_addr)
275 inet->inet_rcv_saddr = inet->inet_saddr = addr->l2tp_addr.s_addr;
275 if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST) 276 if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
276 inet->inet_saddr = 0; /* Use device */ 277 inet->inet_saddr = 0; /* Use device */
277 sk_dst_reset(sk); 278 sk_dst_reset(sk);
@@ -441,8 +442,9 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
441 442
442 daddr = lip->l2tp_addr.s_addr; 443 daddr = lip->l2tp_addr.s_addr;
443 } else { 444 } else {
445 rc = -EDESTADDRREQ;
444 if (sk->sk_state != TCP_ESTABLISHED) 446 if (sk->sk_state != TCP_ESTABLISHED)
445 return -EDESTADDRREQ; 447 goto out;
446 448
447 daddr = inet->inet_daddr; 449 daddr = inet->inet_daddr;
448 connected = 1; 450 connected = 1;
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 33fd8d9f714e..cef7c29214a8 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -457,8 +457,8 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
457 * fall back to HT20 if we don't use or use 457 * fall back to HT20 if we don't use or use
458 * the other extension channel 458 * the other extension channel
459 */ 459 */
460 if ((channel_type == NL80211_CHAN_HT40MINUS || 460 if (!(channel_type == NL80211_CHAN_HT40MINUS ||
461 channel_type == NL80211_CHAN_HT40PLUS) && 461 channel_type == NL80211_CHAN_HT40PLUS) ||
462 channel_type != sdata->u.ibss.channel_type) 462 channel_type != sdata->u.ibss.channel_type)
463 sta_ht_cap_new.cap &= 463 sta_ht_cap_new.cap &=
464 ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; 464 ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index d9798a307f20..db8fae51714c 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -1210,7 +1210,7 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
1210 struct sk_buff *skb); 1210 struct sk_buff *skb);
1211void ieee80211_sta_reset_beacon_monitor(struct ieee80211_sub_if_data *sdata); 1211void ieee80211_sta_reset_beacon_monitor(struct ieee80211_sub_if_data *sdata);
1212void ieee80211_sta_reset_conn_monitor(struct ieee80211_sub_if_data *sdata); 1212void ieee80211_sta_reset_conn_monitor(struct ieee80211_sub_if_data *sdata);
1213void ieee80211_mgd_teardown(struct ieee80211_sub_if_data *sdata); 1213void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata);
1214 1214
1215/* IBSS code */ 1215/* IBSS code */
1216void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local); 1216void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local);
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 401c01f0731e..c20051b7ffcd 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -486,6 +486,8 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
486 /* free all potentially still buffered bcast frames */ 486 /* free all potentially still buffered bcast frames */
487 local->total_ps_buffered -= skb_queue_len(&sdata->u.ap.ps_bc_buf); 487 local->total_ps_buffered -= skb_queue_len(&sdata->u.ap.ps_bc_buf);
488 skb_queue_purge(&sdata->u.ap.ps_bc_buf); 488 skb_queue_purge(&sdata->u.ap.ps_bc_buf);
489 } else if (sdata->vif.type == NL80211_IFTYPE_STATION) {
490 ieee80211_mgd_stop(sdata);
489 } 491 }
490 492
491 if (going_down) 493 if (going_down)
@@ -644,8 +646,6 @@ static void ieee80211_teardown_sdata(struct net_device *dev)
644 646
645 if (ieee80211_vif_is_mesh(&sdata->vif)) 647 if (ieee80211_vif_is_mesh(&sdata->vif))
646 mesh_rmc_free(sdata); 648 mesh_rmc_free(sdata);
647 else if (sdata->vif.type == NL80211_IFTYPE_STATION)
648 ieee80211_mgd_teardown(sdata);
649 649
650 flushed = sta_info_flush(local, sdata); 650 flushed = sta_info_flush(local, sdata);
651 WARN_ON(flushed); 651 WARN_ON(flushed);
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index f76da5b3f5c5..20c680bfc3ae 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -3497,7 +3497,7 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
3497 return 0; 3497 return 0;
3498} 3498}
3499 3499
3500void ieee80211_mgd_teardown(struct ieee80211_sub_if_data *sdata) 3500void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata)
3501{ 3501{
3502 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 3502 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
3503 3503
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index bcfe8c77c839..d64e285400aa 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -103,7 +103,7 @@ static void
103ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, 103ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
104 struct sk_buff *skb, 104 struct sk_buff *skb,
105 struct ieee80211_rate *rate, 105 struct ieee80211_rate *rate,
106 int rtap_len) 106 int rtap_len, bool has_fcs)
107{ 107{
108 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 108 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
109 struct ieee80211_radiotap_header *rthdr; 109 struct ieee80211_radiotap_header *rthdr;
@@ -134,7 +134,7 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
134 } 134 }
135 135
136 /* IEEE80211_RADIOTAP_FLAGS */ 136 /* IEEE80211_RADIOTAP_FLAGS */
137 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) 137 if (has_fcs && (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS))
138 *pos |= IEEE80211_RADIOTAP_F_FCS; 138 *pos |= IEEE80211_RADIOTAP_F_FCS;
139 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC)) 139 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
140 *pos |= IEEE80211_RADIOTAP_F_BADFCS; 140 *pos |= IEEE80211_RADIOTAP_F_BADFCS;
@@ -294,7 +294,8 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
294 } 294 }
295 295
296 /* prepend radiotap information */ 296 /* prepend radiotap information */
297 ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom); 297 ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom,
298 true);
298 299
299 skb_reset_mac_header(skb); 300 skb_reset_mac_header(skb);
300 skb->ip_summed = CHECKSUM_UNNECESSARY; 301 skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -2571,7 +2572,8 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
2571 goto out_free_skb; 2572 goto out_free_skb;
2572 2573
2573 /* prepend radiotap information */ 2574 /* prepend radiotap information */
2574 ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom); 2575 ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom,
2576 false);
2575 2577
2576 skb_set_mac_header(skb, 0); 2578 skb_set_mac_header(skb, 0);
2577 skb->ip_summed = CHECKSUM_UNNECESSARY; 2579 skb->ip_summed = CHECKSUM_UNNECESSARY;
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 782a60198df4..e76facc69e95 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1158,7 +1158,8 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
1158 tx->sta = rcu_dereference(sdata->u.vlan.sta); 1158 tx->sta = rcu_dereference(sdata->u.vlan.sta);
1159 if (!tx->sta && sdata->dev->ieee80211_ptr->use_4addr) 1159 if (!tx->sta && sdata->dev->ieee80211_ptr->use_4addr)
1160 return TX_DROP; 1160 return TX_DROP;
1161 } else if (info->flags & IEEE80211_TX_CTL_INJECTED) { 1161 } else if (info->flags & IEEE80211_TX_CTL_INJECTED ||
1162 tx->sdata->control_port_protocol == tx->skb->protocol) {
1162 tx->sta = sta_info_get_bss(sdata, hdr->addr1); 1163 tx->sta = sta_info_get_bss(sdata, hdr->addr1);
1163 } 1164 }
1164 if (!tx->sta) 1165 if (!tx->sta)
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 2555816e7788..00bdb1d9d690 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1924,6 +1924,7 @@ protocol_fail:
1924control_fail: 1924control_fail:
1925 ip_vs_estimator_net_cleanup(net); 1925 ip_vs_estimator_net_cleanup(net);
1926estimator_fail: 1926estimator_fail:
1927 net->ipvs = NULL;
1927 return -ENOMEM; 1928 return -ENOMEM;
1928} 1929}
1929 1930
@@ -1936,6 +1937,7 @@ static void __net_exit __ip_vs_cleanup(struct net *net)
1936 ip_vs_control_net_cleanup(net); 1937 ip_vs_control_net_cleanup(net);
1937 ip_vs_estimator_net_cleanup(net); 1938 ip_vs_estimator_net_cleanup(net);
1938 IP_VS_DBG(2, "ipvs netns %d released\n", net_ipvs(net)->gen); 1939 IP_VS_DBG(2, "ipvs netns %d released\n", net_ipvs(net)->gen);
1940 net->ipvs = NULL;
1939} 1941}
1940 1942
1941static void __net_exit __ip_vs_dev_cleanup(struct net *net) 1943static void __net_exit __ip_vs_dev_cleanup(struct net *net)
@@ -1993,10 +1995,18 @@ static int __init ip_vs_init(void)
1993 goto cleanup_dev; 1995 goto cleanup_dev;
1994 } 1996 }
1995 1997
1998 ret = ip_vs_register_nl_ioctl();
1999 if (ret < 0) {
2000 pr_err("can't register netlink/ioctl.\n");
2001 goto cleanup_hooks;
2002 }
2003
1996 pr_info("ipvs loaded.\n"); 2004 pr_info("ipvs loaded.\n");
1997 2005
1998 return ret; 2006 return ret;
1999 2007
2008cleanup_hooks:
2009 nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
2000cleanup_dev: 2010cleanup_dev:
2001 unregister_pernet_device(&ipvs_core_dev_ops); 2011 unregister_pernet_device(&ipvs_core_dev_ops);
2002cleanup_sub: 2012cleanup_sub:
@@ -2012,6 +2022,7 @@ exit:
2012 2022
2013static void __exit ip_vs_cleanup(void) 2023static void __exit ip_vs_cleanup(void)
2014{ 2024{
2025 ip_vs_unregister_nl_ioctl();
2015 nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops)); 2026 nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
2016 unregister_pernet_device(&ipvs_core_dev_ops); 2027 unregister_pernet_device(&ipvs_core_dev_ops);
2017 unregister_pernet_subsys(&ipvs_core_ops); /* free ip_vs struct */ 2028 unregister_pernet_subsys(&ipvs_core_ops); /* free ip_vs struct */
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index b3afe189af61..f5589987fc80 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -3680,7 +3680,7 @@ int __net_init ip_vs_control_net_init_sysctl(struct net *net)
3680 return 0; 3680 return 0;
3681} 3681}
3682 3682
3683void __net_init ip_vs_control_net_cleanup_sysctl(struct net *net) 3683void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net)
3684{ 3684{
3685 struct netns_ipvs *ipvs = net_ipvs(net); 3685 struct netns_ipvs *ipvs = net_ipvs(net);
3686 3686
@@ -3692,7 +3692,7 @@ void __net_init ip_vs_control_net_cleanup_sysctl(struct net *net)
3692#else 3692#else
3693 3693
3694int __net_init ip_vs_control_net_init_sysctl(struct net *net) { return 0; } 3694int __net_init ip_vs_control_net_init_sysctl(struct net *net) { return 0; }
3695void __net_init ip_vs_control_net_cleanup_sysctl(struct net *net) { } 3695void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net) { }
3696 3696
3697#endif 3697#endif
3698 3698
@@ -3750,21 +3750,10 @@ void __net_exit ip_vs_control_net_cleanup(struct net *net)
3750 free_percpu(ipvs->tot_stats.cpustats); 3750 free_percpu(ipvs->tot_stats.cpustats);
3751} 3751}
3752 3752
3753int __init ip_vs_control_init(void) 3753int __init ip_vs_register_nl_ioctl(void)
3754{ 3754{
3755 int idx;
3756 int ret; 3755 int ret;
3757 3756
3758 EnterFunction(2);
3759
3760 /* Initialize svc_table, ip_vs_svc_fwm_table, rs_table */
3761 for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
3762 INIT_LIST_HEAD(&ip_vs_svc_table[idx]);
3763 INIT_LIST_HEAD(&ip_vs_svc_fwm_table[idx]);
3764 }
3765
3766 smp_wmb(); /* Do we really need it now ? */
3767
3768 ret = nf_register_sockopt(&ip_vs_sockopts); 3757 ret = nf_register_sockopt(&ip_vs_sockopts);
3769 if (ret) { 3758 if (ret) {
3770 pr_err("cannot register sockopt.\n"); 3759 pr_err("cannot register sockopt.\n");
@@ -3776,28 +3765,47 @@ int __init ip_vs_control_init(void)
3776 pr_err("cannot register Generic Netlink interface.\n"); 3765 pr_err("cannot register Generic Netlink interface.\n");
3777 goto err_genl; 3766 goto err_genl;
3778 } 3767 }
3779
3780 ret = register_netdevice_notifier(&ip_vs_dst_notifier);
3781 if (ret < 0)
3782 goto err_notf;
3783
3784 LeaveFunction(2);
3785 return 0; 3768 return 0;
3786 3769
3787err_notf:
3788 ip_vs_genl_unregister();
3789err_genl: 3770err_genl:
3790 nf_unregister_sockopt(&ip_vs_sockopts); 3771 nf_unregister_sockopt(&ip_vs_sockopts);
3791err_sock: 3772err_sock:
3792 return ret; 3773 return ret;
3793} 3774}
3794 3775
3776void ip_vs_unregister_nl_ioctl(void)
3777{
3778 ip_vs_genl_unregister();
3779 nf_unregister_sockopt(&ip_vs_sockopts);
3780}
3781
3782int __init ip_vs_control_init(void)
3783{
3784 int idx;
3785 int ret;
3786
3787 EnterFunction(2);
3788
3789 /* Initialize svc_table, ip_vs_svc_fwm_table, rs_table */
3790 for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
3791 INIT_LIST_HEAD(&ip_vs_svc_table[idx]);
3792 INIT_LIST_HEAD(&ip_vs_svc_fwm_table[idx]);
3793 }
3794
3795 smp_wmb(); /* Do we really need it now ? */
3796
3797 ret = register_netdevice_notifier(&ip_vs_dst_notifier);
3798 if (ret < 0)
3799 return ret;
3800
3801 LeaveFunction(2);
3802 return 0;
3803}
3804
3795 3805
3796void ip_vs_control_cleanup(void) 3806void ip_vs_control_cleanup(void)
3797{ 3807{
3798 EnterFunction(2); 3808 EnterFunction(2);
3799 unregister_netdevice_notifier(&ip_vs_dst_notifier); 3809 unregister_netdevice_notifier(&ip_vs_dst_notifier);
3800 ip_vs_genl_unregister();
3801 nf_unregister_sockopt(&ip_vs_sockopts);
3802 LeaveFunction(2); 3810 LeaveFunction(2);
3803} 3811}
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
index 538d74ee4f68..e39f693dd3e4 100644
--- a/net/netfilter/ipvs/ip_vs_ftp.c
+++ b/net/netfilter/ipvs/ip_vs_ftp.c
@@ -439,6 +439,8 @@ static int __net_init __ip_vs_ftp_init(struct net *net)
439 struct ip_vs_app *app; 439 struct ip_vs_app *app;
440 struct netns_ipvs *ipvs = net_ipvs(net); 440 struct netns_ipvs *ipvs = net_ipvs(net);
441 441
442 if (!ipvs)
443 return -ENOENT;
442 app = kmemdup(&ip_vs_ftp, sizeof(struct ip_vs_app), GFP_KERNEL); 444 app = kmemdup(&ip_vs_ftp, sizeof(struct ip_vs_app), GFP_KERNEL);
443 if (!app) 445 if (!app)
444 return -ENOMEM; 446 return -ENOMEM;
diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
index 0f16283fd058..caa43704e55e 100644
--- a/net/netfilter/ipvs/ip_vs_lblc.c
+++ b/net/netfilter/ipvs/ip_vs_lblc.c
@@ -551,6 +551,9 @@ static int __net_init __ip_vs_lblc_init(struct net *net)
551{ 551{
552 struct netns_ipvs *ipvs = net_ipvs(net); 552 struct netns_ipvs *ipvs = net_ipvs(net);
553 553
554 if (!ipvs)
555 return -ENOENT;
556
554 if (!net_eq(net, &init_net)) { 557 if (!net_eq(net, &init_net)) {
555 ipvs->lblc_ctl_table = kmemdup(vs_vars_table, 558 ipvs->lblc_ctl_table = kmemdup(vs_vars_table,
556 sizeof(vs_vars_table), 559 sizeof(vs_vars_table),
diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
index eec797f8cce7..548bf37aa29e 100644
--- a/net/netfilter/ipvs/ip_vs_lblcr.c
+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
@@ -745,6 +745,9 @@ static int __net_init __ip_vs_lblcr_init(struct net *net)
745{ 745{
746 struct netns_ipvs *ipvs = net_ipvs(net); 746 struct netns_ipvs *ipvs = net_ipvs(net);
747 747
748 if (!ipvs)
749 return -ENOENT;
750
748 if (!net_eq(net, &init_net)) { 751 if (!net_eq(net, &init_net)) {
749 ipvs->lblcr_ctl_table = kmemdup(vs_vars_table, 752 ipvs->lblcr_ctl_table = kmemdup(vs_vars_table,
750 sizeof(vs_vars_table), 753 sizeof(vs_vars_table),
diff --git a/net/netfilter/ipvs/ip_vs_proto.c b/net/netfilter/ipvs/ip_vs_proto.c
index f843a8833250..ed835e67a07e 100644
--- a/net/netfilter/ipvs/ip_vs_proto.c
+++ b/net/netfilter/ipvs/ip_vs_proto.c
@@ -59,9 +59,6 @@ static int __used __init register_ip_vs_protocol(struct ip_vs_protocol *pp)
59 return 0; 59 return 0;
60} 60}
61 61
62#if defined(CONFIG_IP_VS_PROTO_TCP) || defined(CONFIG_IP_VS_PROTO_UDP) || \
63 defined(CONFIG_IP_VS_PROTO_SCTP) || defined(CONFIG_IP_VS_PROTO_AH) || \
64 defined(CONFIG_IP_VS_PROTO_ESP)
65/* 62/*
66 * register an ipvs protocols netns related data 63 * register an ipvs protocols netns related data
67 */ 64 */
@@ -81,12 +78,18 @@ register_ip_vs_proto_netns(struct net *net, struct ip_vs_protocol *pp)
81 ipvs->proto_data_table[hash] = pd; 78 ipvs->proto_data_table[hash] = pd;
82 atomic_set(&pd->appcnt, 0); /* Init app counter */ 79 atomic_set(&pd->appcnt, 0); /* Init app counter */
83 80
84 if (pp->init_netns != NULL) 81 if (pp->init_netns != NULL) {
85 pp->init_netns(net, pd); 82 int ret = pp->init_netns(net, pd);
83 if (ret) {
84 /* unlink an free proto data */
85 ipvs->proto_data_table[hash] = pd->next;
86 kfree(pd);
87 return ret;
88 }
89 }
86 90
87 return 0; 91 return 0;
88} 92}
89#endif
90 93
91/* 94/*
92 * unregister an ipvs protocol 95 * unregister an ipvs protocol
@@ -316,22 +319,35 @@ ip_vs_tcpudp_debug_packet(int af, struct ip_vs_protocol *pp,
316 */ 319 */
317int __net_init ip_vs_protocol_net_init(struct net *net) 320int __net_init ip_vs_protocol_net_init(struct net *net)
318{ 321{
322 int i, ret;
323 static struct ip_vs_protocol *protos[] = {
319#ifdef CONFIG_IP_VS_PROTO_TCP 324#ifdef CONFIG_IP_VS_PROTO_TCP
320 register_ip_vs_proto_netns(net, &ip_vs_protocol_tcp); 325 &ip_vs_protocol_tcp,
321#endif 326#endif
322#ifdef CONFIG_IP_VS_PROTO_UDP 327#ifdef CONFIG_IP_VS_PROTO_UDP
323 register_ip_vs_proto_netns(net, &ip_vs_protocol_udp); 328 &ip_vs_protocol_udp,
324#endif 329#endif
325#ifdef CONFIG_IP_VS_PROTO_SCTP 330#ifdef CONFIG_IP_VS_PROTO_SCTP
326 register_ip_vs_proto_netns(net, &ip_vs_protocol_sctp); 331 &ip_vs_protocol_sctp,
327#endif 332#endif
328#ifdef CONFIG_IP_VS_PROTO_AH 333#ifdef CONFIG_IP_VS_PROTO_AH
329 register_ip_vs_proto_netns(net, &ip_vs_protocol_ah); 334 &ip_vs_protocol_ah,
330#endif 335#endif
331#ifdef CONFIG_IP_VS_PROTO_ESP 336#ifdef CONFIG_IP_VS_PROTO_ESP
332 register_ip_vs_proto_netns(net, &ip_vs_protocol_esp); 337 &ip_vs_protocol_esp,
333#endif 338#endif
339 };
340
341 for (i = 0; i < ARRAY_SIZE(protos); i++) {
342 ret = register_ip_vs_proto_netns(net, protos[i]);
343 if (ret < 0)
344 goto cleanup;
345 }
334 return 0; 346 return 0;
347
348cleanup:
349 ip_vs_protocol_net_cleanup(net);
350 return ret;
335} 351}
336 352
337void __net_exit ip_vs_protocol_net_cleanup(struct net *net) 353void __net_exit ip_vs_protocol_net_cleanup(struct net *net)
diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c
index 1fbf7a2816f5..9f3fb751c491 100644
--- a/net/netfilter/ipvs/ip_vs_proto_sctp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c
@@ -1090,7 +1090,7 @@ out:
1090 * timeouts is netns related now. 1090 * timeouts is netns related now.
1091 * --------------------------------------------- 1091 * ---------------------------------------------
1092 */ 1092 */
1093static void __ip_vs_sctp_init(struct net *net, struct ip_vs_proto_data *pd) 1093static int __ip_vs_sctp_init(struct net *net, struct ip_vs_proto_data *pd)
1094{ 1094{
1095 struct netns_ipvs *ipvs = net_ipvs(net); 1095 struct netns_ipvs *ipvs = net_ipvs(net);
1096 1096
@@ -1098,6 +1098,9 @@ static void __ip_vs_sctp_init(struct net *net, struct ip_vs_proto_data *pd)
1098 spin_lock_init(&ipvs->sctp_app_lock); 1098 spin_lock_init(&ipvs->sctp_app_lock);
1099 pd->timeout_table = ip_vs_create_timeout_table((int *)sctp_timeouts, 1099 pd->timeout_table = ip_vs_create_timeout_table((int *)sctp_timeouts,
1100 sizeof(sctp_timeouts)); 1100 sizeof(sctp_timeouts));
1101 if (!pd->timeout_table)
1102 return -ENOMEM;
1103 return 0;
1101} 1104}
1102 1105
1103static void __ip_vs_sctp_exit(struct net *net, struct ip_vs_proto_data *pd) 1106static void __ip_vs_sctp_exit(struct net *net, struct ip_vs_proto_data *pd)
diff --git a/net/netfilter/ipvs/ip_vs_proto_tcp.c b/net/netfilter/ipvs/ip_vs_proto_tcp.c
index ef8641f7af83..cd609cc62721 100644
--- a/net/netfilter/ipvs/ip_vs_proto_tcp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_tcp.c
@@ -677,7 +677,7 @@ void ip_vs_tcp_conn_listen(struct net *net, struct ip_vs_conn *cp)
677 * timeouts is netns related now. 677 * timeouts is netns related now.
678 * --------------------------------------------- 678 * ---------------------------------------------
679 */ 679 */
680static void __ip_vs_tcp_init(struct net *net, struct ip_vs_proto_data *pd) 680static int __ip_vs_tcp_init(struct net *net, struct ip_vs_proto_data *pd)
681{ 681{
682 struct netns_ipvs *ipvs = net_ipvs(net); 682 struct netns_ipvs *ipvs = net_ipvs(net);
683 683
@@ -685,7 +685,10 @@ static void __ip_vs_tcp_init(struct net *net, struct ip_vs_proto_data *pd)
685 spin_lock_init(&ipvs->tcp_app_lock); 685 spin_lock_init(&ipvs->tcp_app_lock);
686 pd->timeout_table = ip_vs_create_timeout_table((int *)tcp_timeouts, 686 pd->timeout_table = ip_vs_create_timeout_table((int *)tcp_timeouts,
687 sizeof(tcp_timeouts)); 687 sizeof(tcp_timeouts));
688 if (!pd->timeout_table)
689 return -ENOMEM;
688 pd->tcp_state_table = tcp_states; 690 pd->tcp_state_table = tcp_states;
691 return 0;
689} 692}
690 693
691static void __ip_vs_tcp_exit(struct net *net, struct ip_vs_proto_data *pd) 694static void __ip_vs_tcp_exit(struct net *net, struct ip_vs_proto_data *pd)
diff --git a/net/netfilter/ipvs/ip_vs_proto_udp.c b/net/netfilter/ipvs/ip_vs_proto_udp.c
index f4b7262896bb..2fedb2dcb3d1 100644
--- a/net/netfilter/ipvs/ip_vs_proto_udp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_udp.c
@@ -467,7 +467,7 @@ udp_state_transition(struct ip_vs_conn *cp, int direction,
467 cp->timeout = pd->timeout_table[IP_VS_UDP_S_NORMAL]; 467 cp->timeout = pd->timeout_table[IP_VS_UDP_S_NORMAL];
468} 468}
469 469
470static void __udp_init(struct net *net, struct ip_vs_proto_data *pd) 470static int __udp_init(struct net *net, struct ip_vs_proto_data *pd)
471{ 471{
472 struct netns_ipvs *ipvs = net_ipvs(net); 472 struct netns_ipvs *ipvs = net_ipvs(net);
473 473
@@ -475,6 +475,9 @@ static void __udp_init(struct net *net, struct ip_vs_proto_data *pd)
475 spin_lock_init(&ipvs->udp_app_lock); 475 spin_lock_init(&ipvs->udp_app_lock);
476 pd->timeout_table = ip_vs_create_timeout_table((int *)udp_timeouts, 476 pd->timeout_table = ip_vs_create_timeout_table((int *)udp_timeouts,
477 sizeof(udp_timeouts)); 477 sizeof(udp_timeouts));
478 if (!pd->timeout_table)
479 return -ENOMEM;
480 return 0;
478} 481}
479 482
480static void __udp_exit(struct net *net, struct ip_vs_proto_data *pd) 483static void __udp_exit(struct net *net, struct ip_vs_proto_data *pd)
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index 59530e93fa58..3746d8b9a478 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -227,7 +227,7 @@ static int xt_ct_tg_check_v1(const struct xt_tgchk_param *par)
227 } 227 }
228 228
229#ifdef CONFIG_NF_CONNTRACK_TIMEOUT 229#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
230 if (info->timeout) { 230 if (info->timeout[0]) {
231 typeof(nf_ct_timeout_find_get_hook) timeout_find_get; 231 typeof(nf_ct_timeout_find_get_hook) timeout_find_get;
232 struct nf_conn_timeout *timeout_ext; 232 struct nf_conn_timeout *timeout_ext;
233 233
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index e44e631ea952..777716bc80f7 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -421,6 +421,19 @@ static int validate_sample(const struct nlattr *attr,
421 return validate_actions(actions, key, depth + 1); 421 return validate_actions(actions, key, depth + 1);
422} 422}
423 423
424static int validate_tp_port(const struct sw_flow_key *flow_key)
425{
426 if (flow_key->eth.type == htons(ETH_P_IP)) {
427 if (flow_key->ipv4.tp.src && flow_key->ipv4.tp.dst)
428 return 0;
429 } else if (flow_key->eth.type == htons(ETH_P_IPV6)) {
430 if (flow_key->ipv6.tp.src && flow_key->ipv6.tp.dst)
431 return 0;
432 }
433
434 return -EINVAL;
435}
436
424static int validate_set(const struct nlattr *a, 437static int validate_set(const struct nlattr *a,
425 const struct sw_flow_key *flow_key) 438 const struct sw_flow_key *flow_key)
426{ 439{
@@ -462,18 +475,13 @@ static int validate_set(const struct nlattr *a,
462 if (flow_key->ip.proto != IPPROTO_TCP) 475 if (flow_key->ip.proto != IPPROTO_TCP)
463 return -EINVAL; 476 return -EINVAL;
464 477
465 if (!flow_key->ipv4.tp.src || !flow_key->ipv4.tp.dst) 478 return validate_tp_port(flow_key);
466 return -EINVAL;
467
468 break;
469 479
470 case OVS_KEY_ATTR_UDP: 480 case OVS_KEY_ATTR_UDP:
471 if (flow_key->ip.proto != IPPROTO_UDP) 481 if (flow_key->ip.proto != IPPROTO_UDP)
472 return -EINVAL; 482 return -EINVAL;
473 483
474 if (!flow_key->ipv4.tp.src || !flow_key->ipv4.tp.dst) 484 return validate_tp_port(flow_key);
475 return -EINVAL;
476 break;
477 485
478 default: 486 default:
479 return -EINVAL; 487 return -EINVAL;
@@ -1641,10 +1649,9 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
1641 reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq, 1649 reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
1642 OVS_VPORT_CMD_NEW); 1650 OVS_VPORT_CMD_NEW);
1643 if (IS_ERR(reply)) { 1651 if (IS_ERR(reply)) {
1644 err = PTR_ERR(reply);
1645 netlink_set_err(init_net.genl_sock, 0, 1652 netlink_set_err(init_net.genl_sock, 0,
1646 ovs_dp_vport_multicast_group.id, err); 1653 ovs_dp_vport_multicast_group.id, PTR_ERR(reply));
1647 return 0; 1654 goto exit_unlock;
1648 } 1655 }
1649 1656
1650 genl_notify(reply, genl_info_net(info), info->snd_pid, 1657 genl_notify(reply, genl_info_net(info), info->snd_pid,
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 1252c3081ef1..2a11ec2383ee 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -183,7 +183,8 @@ void ovs_flow_used(struct sw_flow *flow, struct sk_buff *skb)
183 u8 tcp_flags = 0; 183 u8 tcp_flags = 0;
184 184
185 if (flow->key.eth.type == htons(ETH_P_IP) && 185 if (flow->key.eth.type == htons(ETH_P_IP) &&
186 flow->key.ip.proto == IPPROTO_TCP) { 186 flow->key.ip.proto == IPPROTO_TCP &&
187 likely(skb->len >= skb_transport_offset(skb) + sizeof(struct tcphdr))) {
187 u8 *tcp = (u8 *)tcp_hdr(skb); 188 u8 *tcp = (u8 *)tcp_hdr(skb);
188 tcp_flags = *(tcp + TCP_FLAGS_OFFSET) & TCP_FLAG_MASK; 189 tcp_flags = *(tcp + TCP_FLAGS_OFFSET) & TCP_FLAG_MASK;
189 } 190 }
diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c
index 9b9a85ecc4c7..bf5cf69c820a 100644
--- a/net/phonet/pn_dev.c
+++ b/net/phonet/pn_dev.c
@@ -331,23 +331,6 @@ static int __net_init phonet_init_net(struct net *net)
331 331
332static void __net_exit phonet_exit_net(struct net *net) 332static void __net_exit phonet_exit_net(struct net *net)
333{ 333{
334 struct phonet_net *pnn = phonet_pernet(net);
335 struct net_device *dev;
336 unsigned i;
337
338 rtnl_lock();
339 for_each_netdev(net, dev)
340 phonet_device_destroy(dev);
341
342 for (i = 0; i < 64; i++) {
343 dev = pnn->routes.table[i];
344 if (dev) {
345 rtm_phonet_notify(RTM_DELROUTE, dev, i);
346 dev_put(dev);
347 }
348 }
349 rtnl_unlock();
350
351 proc_net_remove(net, "phonet"); 334 proc_net_remove(net, "phonet");
352} 335}
353 336
@@ -361,7 +344,7 @@ static struct pernet_operations phonet_net_ops = {
361/* Initialize Phonet devices list */ 344/* Initialize Phonet devices list */
362int __init phonet_device_init(void) 345int __init phonet_device_init(void)
363{ 346{
364 int err = register_pernet_device(&phonet_net_ops); 347 int err = register_pernet_subsys(&phonet_net_ops);
365 if (err) 348 if (err)
366 return err; 349 return err;
367 350
@@ -377,7 +360,7 @@ void phonet_device_exit(void)
377{ 360{
378 rtnl_unregister_all(PF_PHONET); 361 rtnl_unregister_all(PF_PHONET);
379 unregister_netdevice_notifier(&phonet_device_notifier); 362 unregister_netdevice_notifier(&phonet_device_notifier);
380 unregister_pernet_device(&phonet_net_ops); 363 unregister_pernet_subsys(&phonet_net_ops);
381 proc_net_remove(&init_net, "pnresource"); 364 proc_net_remove(&init_net, "pnresource");
382} 365}
383 366
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index 0b15236be7b6..8179494c269a 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -565,11 +565,8 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
565 opt.packets = q->packetsin; 565 opt.packets = q->packetsin;
566 opt.bytesin = q->bytesin; 566 opt.bytesin = q->bytesin;
567 567
568 if (gred_wred_mode(table)) { 568 if (gred_wred_mode(table))
569 q->vars.qidlestart = 569 gred_load_wred_set(table, q);
570 table->tab[table->def]->vars.qidlestart;
571 q->vars.qavg = table->tab[table->def]->vars.qavg;
572 }
573 570
574 opt.qave = red_calc_qavg(&q->parms, &q->vars, q->vars.qavg); 571 opt.qave = red_calc_qavg(&q->parms, &q->vars, q->vars.qavg);
575 572
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 5da548fa7ae9..ebd22966f748 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -408,10 +408,8 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
408 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) { 408 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
409 if (!(skb = skb_unshare(skb, GFP_ATOMIC)) || 409 if (!(skb = skb_unshare(skb, GFP_ATOMIC)) ||
410 (skb->ip_summed == CHECKSUM_PARTIAL && 410 (skb->ip_summed == CHECKSUM_PARTIAL &&
411 skb_checksum_help(skb))) { 411 skb_checksum_help(skb)))
412 sch->qstats.drops++; 412 return qdisc_drop(skb, sch);
413 return NET_XMIT_DROP;
414 }
415 413
416 skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8); 414 skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
417 } 415 }
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 817174eb5f41..8fc4dcd294ab 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -377,9 +377,7 @@ int sctp_packet_transmit(struct sctp_packet *packet)
377 */ 377 */
378 skb_set_owner_w(nskb, sk); 378 skb_set_owner_w(nskb, sk);
379 379
380 /* The 'obsolete' field of dst is set to 2 when a dst is freed. */ 380 if (!sctp_transport_dst_check(tp)) {
381 if (!dst || (dst->obsolete > 1)) {
382 dst_release(dst);
383 sctp_transport_route(tp, NULL, sctp_sk(sk)); 381 sctp_transport_route(tp, NULL, sctp_sk(sk));
384 if (asoc && (asoc->param_flags & SPP_PMTUD_ENABLE)) { 382 if (asoc && (asoc->param_flags & SPP_PMTUD_ENABLE)) {
385 sctp_assoc_sync_pmtu(asoc); 383 sctp_assoc_sync_pmtu(asoc);
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 3889330b7b04..b026ba0c6992 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -226,23 +226,6 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
226 transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT; 226 transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
227} 227}
228 228
229/* this is a complete rip-off from __sk_dst_check
230 * the cookie is always 0 since this is how it's used in the
231 * pmtu code
232 */
233static struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t)
234{
235 struct dst_entry *dst = t->dst;
236
237 if (dst && dst->obsolete && dst->ops->check(dst, 0) == NULL) {
238 dst_release(t->dst);
239 t->dst = NULL;
240 return NULL;
241 }
242
243 return dst;
244}
245
246void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu) 229void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
247{ 230{
248 struct dst_entry *dst; 231 struct dst_entry *dst;
diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c
index ca8cad8251c7..782bfe1b6465 100644
--- a/net/sunrpc/auth_gss/gss_mech_switch.c
+++ b/net/sunrpc/auth_gss/gss_mech_switch.c
@@ -242,12 +242,13 @@ EXPORT_SYMBOL_GPL(gss_mech_get_by_pseudoflavor);
242int gss_mech_list_pseudoflavors(rpc_authflavor_t *array_ptr) 242int gss_mech_list_pseudoflavors(rpc_authflavor_t *array_ptr)
243{ 243{
244 struct gss_api_mech *pos = NULL; 244 struct gss_api_mech *pos = NULL;
245 int i = 0; 245 int j, i = 0;
246 246
247 spin_lock(&registered_mechs_lock); 247 spin_lock(&registered_mechs_lock);
248 list_for_each_entry(pos, &registered_mechs, gm_list) { 248 list_for_each_entry(pos, &registered_mechs, gm_list) {
249 array_ptr[i] = pos->gm_pfs->pseudoflavor; 249 for (j=0; j < pos->gm_pf_num; j++) {
250 i++; 250 array_ptr[i++] = pos->gm_pfs[j].pseudoflavor;
251 }
251 } 252 }
252 spin_unlock(&registered_mechs_lock); 253 spin_unlock(&registered_mechs_lock);
253 return i; 254 return i;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 67972462a543..adf2990acebf 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -176,16 +176,22 @@ rpc_setup_pipedir(struct rpc_clnt *clnt, const char *dir_name)
176 return 0; 176 return 0;
177} 177}
178 178
179static int __rpc_pipefs_event(struct rpc_clnt *clnt, unsigned long event, 179static inline int rpc_clnt_skip_event(struct rpc_clnt *clnt, unsigned long event)
180 struct super_block *sb) 180{
181 if (((event == RPC_PIPEFS_MOUNT) && clnt->cl_dentry) ||
182 ((event == RPC_PIPEFS_UMOUNT) && !clnt->cl_dentry))
183 return 1;
184 return 0;
185}
186
187static int __rpc_clnt_handle_event(struct rpc_clnt *clnt, unsigned long event,
188 struct super_block *sb)
181{ 189{
182 struct dentry *dentry; 190 struct dentry *dentry;
183 int err = 0; 191 int err = 0;
184 192
185 switch (event) { 193 switch (event) {
186 case RPC_PIPEFS_MOUNT: 194 case RPC_PIPEFS_MOUNT:
187 if (clnt->cl_program->pipe_dir_name == NULL)
188 break;
189 dentry = rpc_setup_pipedir_sb(sb, clnt, 195 dentry = rpc_setup_pipedir_sb(sb, clnt,
190 clnt->cl_program->pipe_dir_name); 196 clnt->cl_program->pipe_dir_name);
191 BUG_ON(dentry == NULL); 197 BUG_ON(dentry == NULL);
@@ -208,6 +214,20 @@ static int __rpc_pipefs_event(struct rpc_clnt *clnt, unsigned long event,
208 return err; 214 return err;
209} 215}
210 216
217static int __rpc_pipefs_event(struct rpc_clnt *clnt, unsigned long event,
218 struct super_block *sb)
219{
220 int error = 0;
221
222 for (;; clnt = clnt->cl_parent) {
223 if (!rpc_clnt_skip_event(clnt, event))
224 error = __rpc_clnt_handle_event(clnt, event, sb);
225 if (error || clnt == clnt->cl_parent)
226 break;
227 }
228 return error;
229}
230
211static struct rpc_clnt *rpc_get_client_for_event(struct net *net, int event) 231static struct rpc_clnt *rpc_get_client_for_event(struct net *net, int event)
212{ 232{
213 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 233 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
@@ -215,10 +235,12 @@ static struct rpc_clnt *rpc_get_client_for_event(struct net *net, int event)
215 235
216 spin_lock(&sn->rpc_client_lock); 236 spin_lock(&sn->rpc_client_lock);
217 list_for_each_entry(clnt, &sn->all_clients, cl_clients) { 237 list_for_each_entry(clnt, &sn->all_clients, cl_clients) {
218 if (((event == RPC_PIPEFS_MOUNT) && clnt->cl_dentry) || 238 if (clnt->cl_program->pipe_dir_name == NULL)
219 ((event == RPC_PIPEFS_UMOUNT) && !clnt->cl_dentry)) 239 break;
240 if (rpc_clnt_skip_event(clnt, event))
241 continue;
242 if (atomic_inc_not_zero(&clnt->cl_count) == 0)
220 continue; 243 continue;
221 atomic_inc(&clnt->cl_count);
222 spin_unlock(&sn->rpc_client_lock); 244 spin_unlock(&sn->rpc_client_lock);
223 return clnt; 245 return clnt;
224 } 246 }
@@ -257,6 +279,14 @@ void rpc_clients_notifier_unregister(void)
257 return rpc_pipefs_notifier_unregister(&rpc_clients_block); 279 return rpc_pipefs_notifier_unregister(&rpc_clients_block);
258} 280}
259 281
282static void rpc_clnt_set_nodename(struct rpc_clnt *clnt, const char *nodename)
283{
284 clnt->cl_nodelen = strlen(nodename);
285 if (clnt->cl_nodelen > UNX_MAXNODENAME)
286 clnt->cl_nodelen = UNX_MAXNODENAME;
287 memcpy(clnt->cl_nodename, nodename, clnt->cl_nodelen);
288}
289
260static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, struct rpc_xprt *xprt) 290static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, struct rpc_xprt *xprt)
261{ 291{
262 const struct rpc_program *program = args->program; 292 const struct rpc_program *program = args->program;
@@ -337,10 +367,7 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, stru
337 } 367 }
338 368
339 /* save the nodename */ 369 /* save the nodename */
340 clnt->cl_nodelen = strlen(init_utsname()->nodename); 370 rpc_clnt_set_nodename(clnt, utsname()->nodename);
341 if (clnt->cl_nodelen > UNX_MAXNODENAME)
342 clnt->cl_nodelen = UNX_MAXNODENAME;
343 memcpy(clnt->cl_nodename, init_utsname()->nodename, clnt->cl_nodelen);
344 rpc_register_client(clnt); 371 rpc_register_client(clnt);
345 return clnt; 372 return clnt;
346 373
@@ -499,6 +526,7 @@ rpc_clone_client(struct rpc_clnt *clnt)
499 err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name); 526 err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name);
500 if (err != 0) 527 if (err != 0)
501 goto out_no_path; 528 goto out_no_path;
529 rpc_clnt_set_nodename(new, utsname()->nodename);
502 if (new->cl_auth) 530 if (new->cl_auth)
503 atomic_inc(&new->cl_auth->au_count); 531 atomic_inc(&new->cl_auth->au_count);
504 atomic_inc(&clnt->cl_count); 532 atomic_inc(&clnt->cl_count);
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 0af37fc46818..3b62cf288031 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -1126,19 +1126,20 @@ rpc_fill_super(struct super_block *sb, void *data, int silent)
1126 return -ENOMEM; 1126 return -ENOMEM;
1127 dprintk("RPC: sending pipefs MOUNT notification for net %p%s\n", net, 1127 dprintk("RPC: sending pipefs MOUNT notification for net %p%s\n", net,
1128 NET_NAME(net)); 1128 NET_NAME(net));
1129 sn->pipefs_sb = sb;
1129 err = blocking_notifier_call_chain(&rpc_pipefs_notifier_list, 1130 err = blocking_notifier_call_chain(&rpc_pipefs_notifier_list,
1130 RPC_PIPEFS_MOUNT, 1131 RPC_PIPEFS_MOUNT,
1131 sb); 1132 sb);
1132 if (err) 1133 if (err)
1133 goto err_depopulate; 1134 goto err_depopulate;
1134 sb->s_fs_info = get_net(net); 1135 sb->s_fs_info = get_net(net);
1135 sn->pipefs_sb = sb;
1136 return 0; 1136 return 0;
1137 1137
1138err_depopulate: 1138err_depopulate:
1139 blocking_notifier_call_chain(&rpc_pipefs_notifier_list, 1139 blocking_notifier_call_chain(&rpc_pipefs_notifier_list,
1140 RPC_PIPEFS_UMOUNT, 1140 RPC_PIPEFS_UMOUNT,
1141 sb); 1141 sb);
1142 sn->pipefs_sb = NULL;
1142 __rpc_depopulate(root, files, RPCAUTH_lockd, RPCAUTH_RootEOF); 1143 __rpc_depopulate(root, files, RPCAUTH_lockd, RPCAUTH_RootEOF);
1143 return err; 1144 return err;
1144} 1145}
diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c
index 8adfc88e793a..3d6498af9adc 100644
--- a/net/sunrpc/sunrpc_syms.c
+++ b/net/sunrpc/sunrpc_syms.c
@@ -75,20 +75,21 @@ static struct pernet_operations sunrpc_net_ops = {
75static int __init 75static int __init
76init_sunrpc(void) 76init_sunrpc(void)
77{ 77{
78 int err = register_rpc_pipefs(); 78 int err = rpc_init_mempool();
79 if (err) 79 if (err)
80 goto out; 80 goto out;
81 err = rpc_init_mempool();
82 if (err)
83 goto out2;
84 err = rpcauth_init_module(); 81 err = rpcauth_init_module();
85 if (err) 82 if (err)
86 goto out3; 83 goto out2;
87 84
88 cache_initialize(); 85 cache_initialize();
89 86
90 err = register_pernet_subsys(&sunrpc_net_ops); 87 err = register_pernet_subsys(&sunrpc_net_ops);
91 if (err) 88 if (err)
89 goto out3;
90
91 err = register_rpc_pipefs();
92 if (err)
92 goto out4; 93 goto out4;
93#ifdef RPC_DEBUG 94#ifdef RPC_DEBUG
94 rpc_register_sysctl(); 95 rpc_register_sysctl();
@@ -98,11 +99,11 @@ init_sunrpc(void)
98 return 0; 99 return 0;
99 100
100out4: 101out4:
101 rpcauth_remove_module(); 102 unregister_pernet_subsys(&sunrpc_net_ops);
102out3: 103out3:
103 rpc_destroy_mempool(); 104 rpcauth_remove_module();
104out2: 105out2:
105 unregister_rpc_pipefs(); 106 rpc_destroy_mempool();
106out: 107out:
107 return err; 108 return err;
108} 109}
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 1b7a08df933c..957f25621617 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -989,7 +989,7 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
989 if (rdev->wiphy.software_iftypes & BIT(iftype)) 989 if (rdev->wiphy.software_iftypes & BIT(iftype))
990 continue; 990 continue;
991 for (j = 0; j < c->n_limits; j++) { 991 for (j = 0; j < c->n_limits; j++) {
992 if (!(limits[j].types & iftype)) 992 if (!(limits[j].types & BIT(iftype)))
993 continue; 993 continue;
994 if (limits[j].max < num[iftype]) 994 if (limits[j].max < num[iftype])
995 goto cont; 995 goto cont;
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index 8e730ccc3f2b..44ddaa542db6 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -1100,6 +1100,10 @@ void handle_moddevtable(struct module *mod, struct elf_info *info,
1100 if (!sym->st_shndx || get_secindex(info, sym) >= info->num_sections) 1100 if (!sym->st_shndx || get_secindex(info, sym) >= info->num_sections)
1101 return; 1101 return;
1102 1102
1103 /* We're looking for an object */
1104 if (ELF_ST_TYPE(sym->st_info) != STT_OBJECT)
1105 return;
1106
1103 /* All our symbols are of form <prefix>__mod_XXX_device_table. */ 1107 /* All our symbols are of form <prefix>__mod_XXX_device_table. */
1104 name = strstr(symname, "__mod_"); 1108 name = strstr(symname, "__mod_");
1105 if (!name) 1109 if (!name)
diff --git a/sound/core/vmaster.c b/sound/core/vmaster.c
index 14a286a7bf2b..857586135d18 100644
--- a/sound/core/vmaster.c
+++ b/sound/core/vmaster.c
@@ -419,6 +419,7 @@ EXPORT_SYMBOL(snd_ctl_make_virtual_master);
419 * snd_ctl_add_vmaster_hook - Add a hook to a vmaster control 419 * snd_ctl_add_vmaster_hook - Add a hook to a vmaster control
420 * @kcontrol: vmaster kctl element 420 * @kcontrol: vmaster kctl element
421 * @hook: the hook function 421 * @hook: the hook function
422 * @private_data: the private_data pointer to be saved
422 * 423 *
423 * Adds the given hook to the vmaster control element so that it's called 424 * Adds the given hook to the vmaster control element so that it's called
424 * at each time when the value is changed. 425 * at each time when the value is changed.
diff --git a/sound/last.c b/sound/last.c
index bdd0857b8871..7ffc182e0844 100644
--- a/sound/last.c
+++ b/sound/last.c
@@ -38,4 +38,4 @@ static int __init alsa_sound_last_init(void)
38 return 0; 38 return 0;
39} 39}
40 40
41__initcall(alsa_sound_last_init); 41late_initcall_sync(alsa_sound_last_init);
diff --git a/sound/pci/echoaudio/echoaudio_dsp.c b/sound/pci/echoaudio/echoaudio_dsp.c
index 64417a733220..d8c670c9d62c 100644
--- a/sound/pci/echoaudio/echoaudio_dsp.c
+++ b/sound/pci/echoaudio/echoaudio_dsp.c
@@ -475,7 +475,7 @@ static int load_firmware(struct echoaudio *chip)
475 const struct firmware *fw; 475 const struct firmware *fw;
476 int box_type, err; 476 int box_type, err;
477 477
478 if (snd_BUG_ON(!chip->dsp_code_to_load || !chip->comm_page)) 478 if (snd_BUG_ON(!chip->comm_page))
479 return -EPERM; 479 return -EPERM;
480 480
481 /* See if the ASIC is present and working - only if the DSP is already loaded */ 481 /* See if the ASIC is present and working - only if the DSP is already loaded */
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 7a8fcc4c15f8..841475cc13b6 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -5444,10 +5444,6 @@ int snd_hda_suspend(struct hda_bus *bus)
5444 list_for_each_entry(codec, &bus->codec_list, list) { 5444 list_for_each_entry(codec, &bus->codec_list, list) {
5445 if (hda_codec_is_power_on(codec)) 5445 if (hda_codec_is_power_on(codec))
5446 hda_call_codec_suspend(codec); 5446 hda_call_codec_suspend(codec);
5447 else /* forcibly change the power to D3 even if not used */
5448 hda_set_power_state(codec,
5449 codec->afg ? codec->afg : codec->mfg,
5450 AC_PWRST_D3);
5451 if (codec->patch_ops.post_suspend) 5447 if (codec->patch_ops.post_suspend)
5452 codec->patch_ops.post_suspend(codec); 5448 codec->patch_ops.post_suspend(codec);
5453 } 5449 }
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index c19e71a94e1b..1f350522bed4 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -783,11 +783,13 @@ static unsigned int azx_rirb_get_response(struct hda_bus *bus,
783{ 783{
784 struct azx *chip = bus->private_data; 784 struct azx *chip = bus->private_data;
785 unsigned long timeout; 785 unsigned long timeout;
786 unsigned long loopcounter;
786 int do_poll = 0; 787 int do_poll = 0;
787 788
788 again: 789 again:
789 timeout = jiffies + msecs_to_jiffies(1000); 790 timeout = jiffies + msecs_to_jiffies(1000);
790 for (;;) { 791
792 for (loopcounter = 0;; loopcounter++) {
791 if (chip->polling_mode || do_poll) { 793 if (chip->polling_mode || do_poll) {
792 spin_lock_irq(&chip->reg_lock); 794 spin_lock_irq(&chip->reg_lock);
793 azx_update_rirb(chip); 795 azx_update_rirb(chip);
@@ -803,7 +805,7 @@ static unsigned int azx_rirb_get_response(struct hda_bus *bus,
803 } 805 }
804 if (time_after(jiffies, timeout)) 806 if (time_after(jiffies, timeout))
805 break; 807 break;
806 if (bus->needs_damn_long_delay) 808 if (bus->needs_damn_long_delay || loopcounter > 3000)
807 msleep(2); /* temporary workaround */ 809 msleep(2); /* temporary workaround */
808 else { 810 else {
809 udelay(10); 811 udelay(10);
@@ -2351,6 +2353,17 @@ static void azx_power_notify(struct hda_bus *bus)
2351 * power management 2353 * power management
2352 */ 2354 */
2353 2355
2356static int snd_hda_codecs_inuse(struct hda_bus *bus)
2357{
2358 struct hda_codec *codec;
2359
2360 list_for_each_entry(codec, &bus->codec_list, list) {
2361 if (snd_hda_codec_needs_resume(codec))
2362 return 1;
2363 }
2364 return 0;
2365}
2366
2354static int azx_suspend(struct pci_dev *pci, pm_message_t state) 2367static int azx_suspend(struct pci_dev *pci, pm_message_t state)
2355{ 2368{
2356 struct snd_card *card = pci_get_drvdata(pci); 2369 struct snd_card *card = pci_get_drvdata(pci);
@@ -2397,7 +2410,8 @@ static int azx_resume(struct pci_dev *pci)
2397 return -EIO; 2410 return -EIO;
2398 azx_init_pci(chip); 2411 azx_init_pci(chip);
2399 2412
2400 azx_init_chip(chip, 1); 2413 if (snd_hda_codecs_inuse(chip->bus))
2414 azx_init_chip(chip, 1);
2401 2415
2402 snd_hda_resume(chip->bus); 2416 snd_hda_resume(chip->bus);
2403 snd_power_change_state(card, SNDRV_CTL_POWER_D0); 2417 snd_power_change_state(card, SNDRV_CTL_POWER_D0);
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index a36488d94aaa..d906c5b74cf0 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -3971,9 +3971,14 @@ static void cx_auto_init_output(struct hda_codec *codec)
3971 int i; 3971 int i;
3972 3972
3973 mute_outputs(codec, spec->multiout.num_dacs, spec->multiout.dac_nids); 3973 mute_outputs(codec, spec->multiout.num_dacs, spec->multiout.dac_nids);
3974 for (i = 0; i < cfg->hp_outs; i++) 3974 for (i = 0; i < cfg->hp_outs; i++) {
3975 unsigned int val = PIN_OUT;
3976 if (snd_hda_query_pin_caps(codec, cfg->hp_pins[i]) &
3977 AC_PINCAP_HP_DRV)
3978 val |= AC_PINCTL_HP_EN;
3975 snd_hda_codec_write(codec, cfg->hp_pins[i], 0, 3979 snd_hda_codec_write(codec, cfg->hp_pins[i], 0,
3976 AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP); 3980 AC_VERB_SET_PIN_WIDGET_CONTROL, val);
3981 }
3977 mute_outputs(codec, cfg->hp_outs, cfg->hp_pins); 3982 mute_outputs(codec, cfg->hp_outs, cfg->hp_pins);
3978 mute_outputs(codec, cfg->line_outs, cfg->line_out_pins); 3983 mute_outputs(codec, cfg->line_outs, cfg->line_out_pins);
3979 mute_outputs(codec, cfg->speaker_outs, cfg->speaker_pins); 3984 mute_outputs(codec, cfg->speaker_outs, cfg->speaker_pins);
@@ -4391,8 +4396,10 @@ static void apply_pin_fixup(struct hda_codec *codec,
4391 4396
4392enum { 4397enum {
4393 CXT_PINCFG_LENOVO_X200, 4398 CXT_PINCFG_LENOVO_X200,
4399 CXT_PINCFG_LENOVO_TP410,
4394}; 4400};
4395 4401
4402/* ThinkPad X200 & co with cxt5051 */
4396static const struct cxt_pincfg cxt_pincfg_lenovo_x200[] = { 4403static const struct cxt_pincfg cxt_pincfg_lenovo_x200[] = {
4397 { 0x16, 0x042140ff }, /* HP (seq# overridden) */ 4404 { 0x16, 0x042140ff }, /* HP (seq# overridden) */
4398 { 0x17, 0x21a11000 }, /* dock-mic */ 4405 { 0x17, 0x21a11000 }, /* dock-mic */
@@ -4401,15 +4408,33 @@ static const struct cxt_pincfg cxt_pincfg_lenovo_x200[] = {
4401 {} 4408 {}
4402}; 4409};
4403 4410
4411/* ThinkPad 410/420/510/520, X201 & co with cxt5066 */
4412static const struct cxt_pincfg cxt_pincfg_lenovo_tp410[] = {
4413 { 0x19, 0x042110ff }, /* HP (seq# overridden) */
4414 { 0x1a, 0x21a190f0 }, /* dock-mic */
4415 { 0x1c, 0x212140ff }, /* dock-HP */
4416 {}
4417};
4418
4404static const struct cxt_pincfg *cxt_pincfg_tbl[] = { 4419static const struct cxt_pincfg *cxt_pincfg_tbl[] = {
4405 [CXT_PINCFG_LENOVO_X200] = cxt_pincfg_lenovo_x200, 4420 [CXT_PINCFG_LENOVO_X200] = cxt_pincfg_lenovo_x200,
4421 [CXT_PINCFG_LENOVO_TP410] = cxt_pincfg_lenovo_tp410,
4406}; 4422};
4407 4423
4408static const struct snd_pci_quirk cxt_fixups[] = { 4424static const struct snd_pci_quirk cxt5051_fixups[] = {
4409 SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo X200", CXT_PINCFG_LENOVO_X200), 4425 SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo X200", CXT_PINCFG_LENOVO_X200),
4410 {} 4426 {}
4411}; 4427};
4412 4428
4429static const struct snd_pci_quirk cxt5066_fixups[] = {
4430 SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410),
4431 SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo T410", CXT_PINCFG_LENOVO_TP410),
4432 SND_PCI_QUIRK(0x17aa, 0x215f, "Lenovo T510", CXT_PINCFG_LENOVO_TP410),
4433 SND_PCI_QUIRK(0x17aa, 0x21ce, "Lenovo T420", CXT_PINCFG_LENOVO_TP410),
4434 SND_PCI_QUIRK(0x17aa, 0x21cf, "Lenovo T520", CXT_PINCFG_LENOVO_TP410),
4435 {}
4436};
4437
4413/* add "fake" mute amp-caps to DACs on cx5051 so that mixer mute switches 4438/* add "fake" mute amp-caps to DACs on cx5051 so that mixer mute switches
4414 * can be created (bko#42825) 4439 * can be created (bko#42825)
4415 */ 4440 */
@@ -4446,13 +4471,13 @@ static int patch_conexant_auto(struct hda_codec *codec)
4446 case 0x14f15051: 4471 case 0x14f15051:
4447 add_cx5051_fake_mutes(codec); 4472 add_cx5051_fake_mutes(codec);
4448 codec->pin_amp_workaround = 1; 4473 codec->pin_amp_workaround = 1;
4474 apply_pin_fixup(codec, cxt5051_fixups, cxt_pincfg_tbl);
4449 break; 4475 break;
4450 default: 4476 default:
4451 codec->pin_amp_workaround = 1; 4477 codec->pin_amp_workaround = 1;
4478 apply_pin_fixup(codec, cxt5066_fixups, cxt_pincfg_tbl);
4452 } 4479 }
4453 4480
4454 apply_pin_fixup(codec, cxt_fixups, cxt_pincfg_tbl);
4455
4456 /* Show mute-led control only on HP laptops 4481 /* Show mute-led control only on HP laptops
4457 * This is a sort of white-list: on HP laptops, EAPD corresponds 4482 * This is a sort of white-list: on HP laptops, EAPD corresponds
4458 * only to the mute-LED without actualy amp function. Meanwhile, 4483 * only to the mute-LED without actualy amp function. Meanwhile,
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 2508f8109f11..7810913d07a0 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -1445,6 +1445,13 @@ enum {
1445 ALC_FIXUP_ACT_BUILD, 1445 ALC_FIXUP_ACT_BUILD,
1446}; 1446};
1447 1447
1448static void alc_apply_pincfgs(struct hda_codec *codec,
1449 const struct alc_pincfg *cfg)
1450{
1451 for (; cfg->nid; cfg++)
1452 snd_hda_codec_set_pincfg(codec, cfg->nid, cfg->val);
1453}
1454
1448static void alc_apply_fixup(struct hda_codec *codec, int action) 1455static void alc_apply_fixup(struct hda_codec *codec, int action)
1449{ 1456{
1450 struct alc_spec *spec = codec->spec; 1457 struct alc_spec *spec = codec->spec;
@@ -1478,9 +1485,7 @@ static void alc_apply_fixup(struct hda_codec *codec, int action)
1478 snd_printdd(KERN_INFO "hda_codec: %s: " 1485 snd_printdd(KERN_INFO "hda_codec: %s: "
1479 "Apply pincfg for %s\n", 1486 "Apply pincfg for %s\n",
1480 codec->chip_name, modelname); 1487 codec->chip_name, modelname);
1481 for (; cfg->nid; cfg++) 1488 alc_apply_pincfgs(codec, cfg);
1482 snd_hda_codec_set_pincfg(codec, cfg->nid,
1483 cfg->val);
1484 break; 1489 break;
1485 case ALC_FIXUP_VERBS: 1490 case ALC_FIXUP_VERBS:
1486 if (action != ALC_FIXUP_ACT_PROBE || !fix->v.verbs) 1491 if (action != ALC_FIXUP_ACT_PROBE || !fix->v.verbs)
@@ -4861,6 +4866,7 @@ enum {
4861 ALC260_FIXUP_GPIO1_TOGGLE, 4866 ALC260_FIXUP_GPIO1_TOGGLE,
4862 ALC260_FIXUP_REPLACER, 4867 ALC260_FIXUP_REPLACER,
4863 ALC260_FIXUP_HP_B1900, 4868 ALC260_FIXUP_HP_B1900,
4869 ALC260_FIXUP_KN1,
4864}; 4870};
4865 4871
4866static void alc260_gpio1_automute(struct hda_codec *codec) 4872static void alc260_gpio1_automute(struct hda_codec *codec)
@@ -4888,6 +4894,36 @@ static void alc260_fixup_gpio1_toggle(struct hda_codec *codec,
4888 } 4894 }
4889} 4895}
4890 4896
4897static void alc260_fixup_kn1(struct hda_codec *codec,
4898 const struct alc_fixup *fix, int action)
4899{
4900 struct alc_spec *spec = codec->spec;
4901 static const struct alc_pincfg pincfgs[] = {
4902 { 0x0f, 0x02214000 }, /* HP/speaker */
4903 { 0x12, 0x90a60160 }, /* int mic */
4904 { 0x13, 0x02a19000 }, /* ext mic */
4905 { 0x18, 0x01446000 }, /* SPDIF out */
4906 /* disable bogus I/O pins */
4907 { 0x10, 0x411111f0 },
4908 { 0x11, 0x411111f0 },
4909 { 0x14, 0x411111f0 },
4910 { 0x15, 0x411111f0 },
4911 { 0x16, 0x411111f0 },
4912 { 0x17, 0x411111f0 },
4913 { 0x19, 0x411111f0 },
4914 { }
4915 };
4916
4917 switch (action) {
4918 case ALC_FIXUP_ACT_PRE_PROBE:
4919 alc_apply_pincfgs(codec, pincfgs);
4920 break;
4921 case ALC_FIXUP_ACT_PROBE:
4922 spec->init_amp = ALC_INIT_NONE;
4923 break;
4924 }
4925}
4926
4891static const struct alc_fixup alc260_fixups[] = { 4927static const struct alc_fixup alc260_fixups[] = {
4892 [ALC260_FIXUP_HP_DC5750] = { 4928 [ALC260_FIXUP_HP_DC5750] = {
4893 .type = ALC_FIXUP_PINS, 4929 .type = ALC_FIXUP_PINS,
@@ -4938,7 +4974,11 @@ static const struct alc_fixup alc260_fixups[] = {
4938 .v.func = alc260_fixup_gpio1_toggle, 4974 .v.func = alc260_fixup_gpio1_toggle,
4939 .chained = true, 4975 .chained = true,
4940 .chain_id = ALC260_FIXUP_COEF, 4976 .chain_id = ALC260_FIXUP_COEF,
4941 } 4977 },
4978 [ALC260_FIXUP_KN1] = {
4979 .type = ALC_FIXUP_FUNC,
4980 .v.func = alc260_fixup_kn1,
4981 },
4942}; 4982};
4943 4983
4944static const struct snd_pci_quirk alc260_fixup_tbl[] = { 4984static const struct snd_pci_quirk alc260_fixup_tbl[] = {
@@ -4948,6 +4988,7 @@ static const struct snd_pci_quirk alc260_fixup_tbl[] = {
4948 SND_PCI_QUIRK(0x103c, 0x280a, "HP dc5750", ALC260_FIXUP_HP_DC5750), 4988 SND_PCI_QUIRK(0x103c, 0x280a, "HP dc5750", ALC260_FIXUP_HP_DC5750),
4949 SND_PCI_QUIRK(0x103c, 0x30ba, "HP Presario B1900", ALC260_FIXUP_HP_B1900), 4989 SND_PCI_QUIRK(0x103c, 0x30ba, "HP Presario B1900", ALC260_FIXUP_HP_B1900),
4950 SND_PCI_QUIRK(0x1509, 0x4540, "Favorit 100XS", ALC260_FIXUP_GPIO1), 4990 SND_PCI_QUIRK(0x1509, 0x4540, "Favorit 100XS", ALC260_FIXUP_GPIO1),
4991 SND_PCI_QUIRK(0x152d, 0x0729, "Quanta KN1", ALC260_FIXUP_KN1),
4951 SND_PCI_QUIRK(0x161f, 0x2057, "Replacer 672V", ALC260_FIXUP_REPLACER), 4992 SND_PCI_QUIRK(0x161f, 0x2057, "Replacer 672V", ALC260_FIXUP_REPLACER),
4952 SND_PCI_QUIRK(0x1631, 0xc017, "PB V7900", ALC260_FIXUP_COEF), 4993 SND_PCI_QUIRK(0x1631, 0xc017, "PB V7900", ALC260_FIXUP_COEF),
4953 {} 4994 {}
@@ -5364,6 +5405,8 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
5364 SND_PCI_QUIRK(0x1025, 0x0142, "Acer Aspire 7730G", 5405 SND_PCI_QUIRK(0x1025, 0x0142, "Acer Aspire 7730G",
5365 ALC882_FIXUP_ACER_ASPIRE_4930G), 5406 ALC882_FIXUP_ACER_ASPIRE_4930G),
5366 SND_PCI_QUIRK(0x1025, 0x0155, "Packard-Bell M5120", ALC882_FIXUP_PB_M5210), 5407 SND_PCI_QUIRK(0x1025, 0x0155, "Packard-Bell M5120", ALC882_FIXUP_PB_M5210),
5408 SND_PCI_QUIRK(0x1025, 0x021e, "Acer Aspire 5739G",
5409 ALC882_FIXUP_ACER_ASPIRE_4930G),
5367 SND_PCI_QUIRK(0x1025, 0x0259, "Acer Aspire 5935", ALC889_FIXUP_DAC_ROUTE), 5410 SND_PCI_QUIRK(0x1025, 0x0259, "Acer Aspire 5935", ALC889_FIXUP_DAC_ROUTE),
5368 SND_PCI_QUIRK(0x1025, 0x026b, "Acer Aspire 8940G", ALC882_FIXUP_ACER_ASPIRE_8930G), 5411 SND_PCI_QUIRK(0x1025, 0x026b, "Acer Aspire 8940G", ALC882_FIXUP_ACER_ASPIRE_8930G),
5369 SND_PCI_QUIRK(0x1025, 0x0296, "Acer Aspire 7736z", ALC882_FIXUP_ACER_ASPIRE_7736), 5412 SND_PCI_QUIRK(0x1025, 0x0296, "Acer Aspire 7736z", ALC882_FIXUP_ACER_ASPIRE_7736),
@@ -5397,6 +5440,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
5397 SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_IMAC91_VREF), 5440 SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_IMAC91_VREF),
5398 5441
5399 SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD), 5442 SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD),
5443 SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD),
5400 SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3), 5444 SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
5401 SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3", ALC889_FIXUP_CD), 5445 SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3", ALC889_FIXUP_CD),
5402 SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX), 5446 SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX),
@@ -5597,13 +5641,13 @@ static int patch_alc262(struct hda_codec *codec)
5597 snd_hda_codec_write(codec, 0x1a, 0, AC_VERB_SET_PROC_COEF, tmp | 0x80); 5641 snd_hda_codec_write(codec, 0x1a, 0, AC_VERB_SET_PROC_COEF, tmp | 0x80);
5598 } 5642 }
5599#endif 5643#endif
5600 alc_auto_parse_customize_define(codec);
5601
5602 alc_fix_pll_init(codec, 0x20, 0x0a, 10); 5644 alc_fix_pll_init(codec, 0x20, 0x0a, 10);
5603 5645
5604 alc_pick_fixup(codec, NULL, alc262_fixup_tbl, alc262_fixups); 5646 alc_pick_fixup(codec, NULL, alc262_fixup_tbl, alc262_fixups);
5605 alc_apply_fixup(codec, ALC_FIXUP_ACT_PRE_PROBE); 5647 alc_apply_fixup(codec, ALC_FIXUP_ACT_PRE_PROBE);
5606 5648
5649 alc_auto_parse_customize_define(codec);
5650
5607 /* automatic parse from the BIOS config */ 5651 /* automatic parse from the BIOS config */
5608 err = alc262_parse_auto_config(codec); 5652 err = alc262_parse_auto_config(codec);
5609 if (err < 0) 5653 if (err < 0)
@@ -6068,6 +6112,7 @@ static const struct alc_fixup alc269_fixups[] = {
6068 6112
6069static const struct snd_pci_quirk alc269_fixup_tbl[] = { 6113static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6070 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_MIC2_MUTE_LED), 6114 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_MIC2_MUTE_LED),
6115 SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_DMIC),
6071 SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW), 6116 SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
6072 SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC), 6117 SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
6073 SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC), 6118 SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC),
@@ -6207,8 +6252,6 @@ static int patch_alc269(struct hda_codec *codec)
6207 6252
6208 spec->mixer_nid = 0x0b; 6253 spec->mixer_nid = 0x0b;
6209 6254
6210 alc_auto_parse_customize_define(codec);
6211
6212 err = alc_codec_rename_from_preset(codec); 6255 err = alc_codec_rename_from_preset(codec);
6213 if (err < 0) 6256 if (err < 0)
6214 goto error; 6257 goto error;
@@ -6241,6 +6284,8 @@ static int patch_alc269(struct hda_codec *codec)
6241 alc269_fixup_tbl, alc269_fixups); 6284 alc269_fixup_tbl, alc269_fixups);
6242 alc_apply_fixup(codec, ALC_FIXUP_ACT_PRE_PROBE); 6285 alc_apply_fixup(codec, ALC_FIXUP_ACT_PRE_PROBE);
6243 6286
6287 alc_auto_parse_customize_define(codec);
6288
6244 /* automatic parse from the BIOS config */ 6289 /* automatic parse from the BIOS config */
6245 err = alc269_parse_auto_config(codec); 6290 err = alc269_parse_auto_config(codec);
6246 if (err < 0) 6291 if (err < 0)
@@ -6817,8 +6862,6 @@ static int patch_alc662(struct hda_codec *codec)
6817 /* handle multiple HPs as is */ 6862 /* handle multiple HPs as is */
6818 spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP; 6863 spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
6819 6864
6820 alc_auto_parse_customize_define(codec);
6821
6822 alc_fix_pll_init(codec, 0x20, 0x04, 15); 6865 alc_fix_pll_init(codec, 0x20, 0x04, 15);
6823 6866
6824 err = alc_codec_rename_from_preset(codec); 6867 err = alc_codec_rename_from_preset(codec);
@@ -6835,6 +6878,9 @@ static int patch_alc662(struct hda_codec *codec)
6835 alc_pick_fixup(codec, alc662_fixup_models, 6878 alc_pick_fixup(codec, alc662_fixup_models,
6836 alc662_fixup_tbl, alc662_fixups); 6879 alc662_fixup_tbl, alc662_fixups);
6837 alc_apply_fixup(codec, ALC_FIXUP_ACT_PRE_PROBE); 6880 alc_apply_fixup(codec, ALC_FIXUP_ACT_PRE_PROBE);
6881
6882 alc_auto_parse_customize_define(codec);
6883
6838 /* automatic parse from the BIOS config */ 6884 /* automatic parse from the BIOS config */
6839 err = alc662_parse_auto_config(codec); 6885 err = alc662_parse_auto_config(codec);
6840 if (err < 0) 6886 if (err < 0)
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 33a9946b492c..4742cac26aa9 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -5063,12 +5063,11 @@ static void stac92xx_update_led_status(struct hda_codec *codec, int enabled)
5063 if (spec->gpio_led_polarity) 5063 if (spec->gpio_led_polarity)
5064 muted = !muted; 5064 muted = !muted;
5065 5065
5066 /*polarity defines *not* muted state level*/
5067 if (!spec->vref_mute_led_nid) { 5066 if (!spec->vref_mute_led_nid) {
5068 if (muted) 5067 if (muted)
5069 spec->gpio_data &= ~spec->gpio_led; /* orange */ 5068 spec->gpio_data |= spec->gpio_led;
5070 else 5069 else
5071 spec->gpio_data |= spec->gpio_led; /* white */ 5070 spec->gpio_data &= ~spec->gpio_led;
5072 stac_gpio_set(codec, spec->gpio_mask, 5071 stac_gpio_set(codec, spec->gpio_mask,
5073 spec->gpio_dir, spec->gpio_data); 5072 spec->gpio_dir, spec->gpio_data);
5074 } else { 5073 } else {
diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
index b68cdec03b9e..0b2aea2ce172 100644
--- a/sound/pci/rme9652/hdsp.c
+++ b/sound/pci/rme9652/hdsp.c
@@ -5170,6 +5170,7 @@ static int snd_hdsp_create_hwdep(struct snd_card *card, struct hdsp *hdsp)
5170 strcpy(hw->name, "HDSP hwdep interface"); 5170 strcpy(hw->name, "HDSP hwdep interface");
5171 5171
5172 hw->ops.ioctl = snd_hdsp_hwdep_ioctl; 5172 hw->ops.ioctl = snd_hdsp_hwdep_ioctl;
5173 hw->ops.ioctl_compat = snd_hdsp_hwdep_ioctl;
5173 5174
5174 return 0; 5175 return 0;
5175} 5176}
diff --git a/sound/soc/blackfin/bf5xx-ssm2602.c b/sound/soc/blackfin/bf5xx-ssm2602.c
index df3ac73f8778..b39ad356b92b 100644
--- a/sound/soc/blackfin/bf5xx-ssm2602.c
+++ b/sound/soc/blackfin/bf5xx-ssm2602.c
@@ -99,6 +99,7 @@ static struct snd_soc_dai_link bf5xx_ssm2602_dai[] = {
99 .platform_name = "bfin-i2s-pcm-audio", 99 .platform_name = "bfin-i2s-pcm-audio",
100 .codec_name = "ssm2602.0-001b", 100 .codec_name = "ssm2602.0-001b",
101 .ops = &bf5xx_ssm2602_ops, 101 .ops = &bf5xx_ssm2602_ops,
102 .dai_fmt = BF5XX_SSM2602_DAIFMT,
102 }, 103 },
103 { 104 {
104 .name = "ssm2602", 105 .name = "ssm2602",
@@ -108,6 +109,7 @@ static struct snd_soc_dai_link bf5xx_ssm2602_dai[] = {
108 .platform_name = "bfin-i2s-pcm-audio", 109 .platform_name = "bfin-i2s-pcm-audio",
109 .codec_name = "ssm2602.0-001b", 110 .codec_name = "ssm2602.0-001b",
110 .ops = &bf5xx_ssm2602_ops, 111 .ops = &bf5xx_ssm2602_ops,
112 .dai_fmt = BF5XX_SSM2602_DAIFMT,
111 }, 113 },
112}; 114};
113 115
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 6508e8b790bb..59d8efaa17e9 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -57,7 +57,7 @@ config SND_SOC_ALL_CODECS
57 select SND_SOC_TPA6130A2 if I2C 57 select SND_SOC_TPA6130A2 if I2C
58 select SND_SOC_TLV320DAC33 if I2C 58 select SND_SOC_TLV320DAC33 if I2C
59 select SND_SOC_TWL4030 if TWL4030_CORE 59 select SND_SOC_TWL4030 if TWL4030_CORE
60 select SND_SOC_TWL6040 if TWL4030_CORE 60 select SND_SOC_TWL6040 if TWL6040_CORE
61 select SND_SOC_UDA134X 61 select SND_SOC_UDA134X
62 select SND_SOC_UDA1380 if I2C 62 select SND_SOC_UDA1380 if I2C
63 select SND_SOC_WL1273 if MFD_WL1273_CORE 63 select SND_SOC_WL1273 if MFD_WL1273_CORE
@@ -276,7 +276,6 @@ config SND_SOC_TWL4030
276 tristate 276 tristate
277 277
278config SND_SOC_TWL6040 278config SND_SOC_TWL6040
279 select TWL6040_CORE
280 tristate 279 tristate
281 280
282config SND_SOC_UDA134X 281config SND_SOC_UDA134X
diff --git a/sound/soc/codecs/cs42l73.c b/sound/soc/codecs/cs42l73.c
index 78979b3e0e95..07c44b71f096 100644
--- a/sound/soc/codecs/cs42l73.c
+++ b/sound/soc/codecs/cs42l73.c
@@ -929,6 +929,8 @@ static int cs42l73_set_mclk(struct snd_soc_dai *dai, unsigned int freq)
929 929
930 /* MCLKX -> MCLK */ 930 /* MCLKX -> MCLK */
931 mclkx_coeff = cs42l73_get_mclkx_coeff(freq); 931 mclkx_coeff = cs42l73_get_mclkx_coeff(freq);
932 if (mclkx_coeff < 0)
933 return mclkx_coeff;
932 934
933 mclk = cs42l73_mclkx_coeffs[mclkx_coeff].mclkx / 935 mclk = cs42l73_mclkx_coeffs[mclkx_coeff].mclkx /
934 cs42l73_mclkx_coeffs[mclkx_coeff].ratio; 936 cs42l73_mclkx_coeffs[mclkx_coeff].ratio;
diff --git a/sound/soc/codecs/tlv320aic23.c b/sound/soc/codecs/tlv320aic23.c
index 16d55f91a653..df1e07ffac32 100644
--- a/sound/soc/codecs/tlv320aic23.c
+++ b/sound/soc/codecs/tlv320aic23.c
@@ -472,7 +472,7 @@ static int tlv320aic23_set_dai_sysclk(struct snd_soc_dai *codec_dai,
472static int tlv320aic23_set_bias_level(struct snd_soc_codec *codec, 472static int tlv320aic23_set_bias_level(struct snd_soc_codec *codec,
473 enum snd_soc_bias_level level) 473 enum snd_soc_bias_level level)
474{ 474{
475 u16 reg = snd_soc_read(codec, TLV320AIC23_PWR) & 0xff7f; 475 u16 reg = snd_soc_read(codec, TLV320AIC23_PWR) & 0x17f;
476 476
477 switch (level) { 477 switch (level) {
478 case SND_SOC_BIAS_ON: 478 case SND_SOC_BIAS_ON:
@@ -491,7 +491,7 @@ static int tlv320aic23_set_bias_level(struct snd_soc_codec *codec,
491 case SND_SOC_BIAS_OFF: 491 case SND_SOC_BIAS_OFF:
492 /* everything off, dac mute, inactive */ 492 /* everything off, dac mute, inactive */
493 snd_soc_write(codec, TLV320AIC23_ACTIVE, 0x0); 493 snd_soc_write(codec, TLV320AIC23_ACTIVE, 0x0);
494 snd_soc_write(codec, TLV320AIC23_PWR, 0xffff); 494 snd_soc_write(codec, TLV320AIC23_PWR, 0x1ff);
495 break; 495 break;
496 } 496 }
497 codec->dapm.bias_level = level; 497 codec->dapm.bias_level = level;
diff --git a/sound/soc/codecs/twl6040.c b/sound/soc/codecs/twl6040.c
index 2d8c6b825e57..dc7509b9d53a 100644
--- a/sound/soc/codecs/twl6040.c
+++ b/sound/soc/codecs/twl6040.c
@@ -26,7 +26,6 @@
26#include <linux/pm.h> 26#include <linux/pm.h>
27#include <linux/platform_device.h> 27#include <linux/platform_device.h>
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/i2c/twl.h>
30#include <linux/mfd/twl6040.h> 29#include <linux/mfd/twl6040.h>
31 30
32#include <sound/core.h> 31#include <sound/core.h>
@@ -1528,7 +1527,7 @@ static int twl6040_resume(struct snd_soc_codec *codec)
1528static int twl6040_probe(struct snd_soc_codec *codec) 1527static int twl6040_probe(struct snd_soc_codec *codec)
1529{ 1528{
1530 struct twl6040_data *priv; 1529 struct twl6040_data *priv;
1531 struct twl4030_codec_data *pdata = dev_get_platdata(codec->dev); 1530 struct twl6040_codec_data *pdata = dev_get_platdata(codec->dev);
1532 struct platform_device *pdev = container_of(codec->dev, 1531 struct platform_device *pdev = container_of(codec->dev,
1533 struct platform_device, dev); 1532 struct platform_device, dev);
1534 int ret = 0; 1533 int ret = 0;
diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c
index 8c4c9591ec05..aa12c6b6beeb 100644
--- a/sound/soc/codecs/wm8350.c
+++ b/sound/soc/codecs/wm8350.c
@@ -60,7 +60,7 @@ struct wm8350_jack_data {
60}; 60};
61 61
62struct wm8350_data { 62struct wm8350_data {
63 struct snd_soc_codec codec; 63 struct wm8350 *wm8350;
64 struct wm8350_output out1; 64 struct wm8350_output out1;
65 struct wm8350_output out2; 65 struct wm8350_output out2;
66 struct wm8350_jack_data hpl; 66 struct wm8350_jack_data hpl;
@@ -1309,7 +1309,7 @@ static void wm8350_hp_work(struct wm8350_data *priv,
1309 struct wm8350_jack_data *jack, 1309 struct wm8350_jack_data *jack,
1310 u16 mask) 1310 u16 mask)
1311{ 1311{
1312 struct wm8350 *wm8350 = priv->codec.control_data; 1312 struct wm8350 *wm8350 = priv->wm8350;
1313 u16 reg; 1313 u16 reg;
1314 int report; 1314 int report;
1315 1315
@@ -1342,7 +1342,7 @@ static void wm8350_hpr_work(struct work_struct *work)
1342static irqreturn_t wm8350_hp_jack_handler(int irq, void *data) 1342static irqreturn_t wm8350_hp_jack_handler(int irq, void *data)
1343{ 1343{
1344 struct wm8350_data *priv = data; 1344 struct wm8350_data *priv = data;
1345 struct wm8350 *wm8350 = priv->codec.control_data; 1345 struct wm8350 *wm8350 = priv->wm8350;
1346 struct wm8350_jack_data *jack = NULL; 1346 struct wm8350_jack_data *jack = NULL;
1347 1347
1348 switch (irq - wm8350->irq_base) { 1348 switch (irq - wm8350->irq_base) {
@@ -1427,7 +1427,7 @@ EXPORT_SYMBOL_GPL(wm8350_hp_jack_detect);
1427static irqreturn_t wm8350_mic_handler(int irq, void *data) 1427static irqreturn_t wm8350_mic_handler(int irq, void *data)
1428{ 1428{
1429 struct wm8350_data *priv = data; 1429 struct wm8350_data *priv = data;
1430 struct wm8350 *wm8350 = priv->codec.control_data; 1430 struct wm8350 *wm8350 = priv->wm8350;
1431 u16 reg; 1431 u16 reg;
1432 int report = 0; 1432 int report = 0;
1433 1433
@@ -1536,6 +1536,8 @@ static int wm8350_codec_probe(struct snd_soc_codec *codec)
1536 return -ENOMEM; 1536 return -ENOMEM;
1537 snd_soc_codec_set_drvdata(codec, priv); 1537 snd_soc_codec_set_drvdata(codec, priv);
1538 1538
1539 priv->wm8350 = wm8350;
1540
1539 for (i = 0; i < ARRAY_SIZE(supply_names); i++) 1541 for (i = 0; i < ARRAY_SIZE(supply_names); i++)
1540 priv->supplies[i].supply = supply_names[i]; 1542 priv->supplies[i].supply = supply_names[i];
1541 1543
@@ -1544,7 +1546,6 @@ static int wm8350_codec_probe(struct snd_soc_codec *codec)
1544 if (ret != 0) 1546 if (ret != 0)
1545 return ret; 1547 return ret;
1546 1548
1547 wm8350->codec.codec = codec;
1548 codec->control_data = wm8350; 1549 codec->control_data = wm8350;
1549 1550
1550 /* Put the codec into reset if it wasn't already */ 1551 /* Put the codec into reset if it wasn't already */
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index 7c49642af052..6c1fe3afd4b5 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -1000,61 +1000,170 @@ static void wm8994_update_class_w(struct snd_soc_codec *codec)
1000 } 1000 }
1001} 1001}
1002 1002
1003static int late_enable_ev(struct snd_soc_dapm_widget *w, 1003static int aif1clk_ev(struct snd_soc_dapm_widget *w,
1004 struct snd_kcontrol *kcontrol, int event) 1004 struct snd_kcontrol *kcontrol, int event)
1005{ 1005{
1006 struct snd_soc_codec *codec = w->codec; 1006 struct snd_soc_codec *codec = w->codec;
1007 struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); 1007 struct wm8994 *control = codec->control_data;
1008 int mask = WM8994_AIF1DAC1L_ENA | WM8994_AIF1DAC1R_ENA;
1009 int dac;
1010 int adc;
1011 int val;
1012
1013 switch (control->type) {
1014 case WM8994:
1015 case WM8958:
1016 mask |= WM8994_AIF1DAC2L_ENA | WM8994_AIF1DAC2R_ENA;
1017 break;
1018 default:
1019 break;
1020 }
1008 1021
1009 switch (event) { 1022 switch (event) {
1010 case SND_SOC_DAPM_PRE_PMU: 1023 case SND_SOC_DAPM_PRE_PMU:
1011 if (wm8994->aif1clk_enable) { 1024 val = snd_soc_read(codec, WM8994_AIF1_CONTROL_1);
1012 snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1, 1025 if ((val & WM8994_AIF1ADCL_SRC) &&
1013 WM8994_AIF1CLK_ENA_MASK, 1026 (val & WM8994_AIF1ADCR_SRC))
1014 WM8994_AIF1CLK_ENA); 1027 adc = WM8994_AIF1ADC1R_ENA | WM8994_AIF1ADC2R_ENA;
1015 wm8994->aif1clk_enable = 0; 1028 else if (!(val & WM8994_AIF1ADCL_SRC) &&
1016 } 1029 !(val & WM8994_AIF1ADCR_SRC))
1017 if (wm8994->aif2clk_enable) { 1030 adc = WM8994_AIF1ADC1L_ENA | WM8994_AIF1ADC2L_ENA;
1018 snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1, 1031 else
1019 WM8994_AIF2CLK_ENA_MASK, 1032 adc = WM8994_AIF1ADC1R_ENA | WM8994_AIF1ADC2R_ENA |
1020 WM8994_AIF2CLK_ENA); 1033 WM8994_AIF1ADC1L_ENA | WM8994_AIF1ADC2L_ENA;
1021 wm8994->aif2clk_enable = 0; 1034
1022 } 1035 val = snd_soc_read(codec, WM8994_AIF1_CONTROL_2);
1036 if ((val & WM8994_AIF1DACL_SRC) &&
1037 (val & WM8994_AIF1DACR_SRC))
1038 dac = WM8994_AIF1DAC1R_ENA | WM8994_AIF1DAC2R_ENA;
1039 else if (!(val & WM8994_AIF1DACL_SRC) &&
1040 !(val & WM8994_AIF1DACR_SRC))
1041 dac = WM8994_AIF1DAC1L_ENA | WM8994_AIF1DAC2L_ENA;
1042 else
1043 dac = WM8994_AIF1DAC1R_ENA | WM8994_AIF1DAC2R_ENA |
1044 WM8994_AIF1DAC1L_ENA | WM8994_AIF1DAC2L_ENA;
1045
1046 snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_4,
1047 mask, adc);
1048 snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5,
1049 mask, dac);
1050 snd_soc_update_bits(codec, WM8994_CLOCKING_1,
1051 WM8994_AIF1DSPCLK_ENA |
1052 WM8994_SYSDSPCLK_ENA,
1053 WM8994_AIF1DSPCLK_ENA |
1054 WM8994_SYSDSPCLK_ENA);
1055 snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_4, mask,
1056 WM8994_AIF1ADC1R_ENA |
1057 WM8994_AIF1ADC1L_ENA |
1058 WM8994_AIF1ADC2R_ENA |
1059 WM8994_AIF1ADC2L_ENA);
1060 snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5, mask,
1061 WM8994_AIF1DAC1R_ENA |
1062 WM8994_AIF1DAC1L_ENA |
1063 WM8994_AIF1DAC2R_ENA |
1064 WM8994_AIF1DAC2L_ENA);
1023 break; 1065 break;
1024 }
1025 1066
1026 /* We may also have postponed startup of DSP, handle that. */ 1067 case SND_SOC_DAPM_PRE_PMD:
1027 wm8958_aif_ev(w, kcontrol, event); 1068 case SND_SOC_DAPM_POST_PMD:
1069 snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5,
1070 mask, 0);
1071 snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_4,
1072 mask, 0);
1073
1074 val = snd_soc_read(codec, WM8994_CLOCKING_1);
1075 if (val & WM8994_AIF2DSPCLK_ENA)
1076 val = WM8994_SYSDSPCLK_ENA;
1077 else
1078 val = 0;
1079 snd_soc_update_bits(codec, WM8994_CLOCKING_1,
1080 WM8994_SYSDSPCLK_ENA |
1081 WM8994_AIF1DSPCLK_ENA, val);
1082 break;
1083 }
1028 1084
1029 return 0; 1085 return 0;
1030} 1086}
1031 1087
1032static int late_disable_ev(struct snd_soc_dapm_widget *w, 1088static int aif2clk_ev(struct snd_soc_dapm_widget *w,
1033 struct snd_kcontrol *kcontrol, int event) 1089 struct snd_kcontrol *kcontrol, int event)
1034{ 1090{
1035 struct snd_soc_codec *codec = w->codec; 1091 struct snd_soc_codec *codec = w->codec;
1036 struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); 1092 int dac;
1093 int adc;
1094 int val;
1037 1095
1038 switch (event) { 1096 switch (event) {
1097 case SND_SOC_DAPM_PRE_PMU:
1098 val = snd_soc_read(codec, WM8994_AIF2_CONTROL_1);
1099 if ((val & WM8994_AIF2ADCL_SRC) &&
1100 (val & WM8994_AIF2ADCR_SRC))
1101 adc = WM8994_AIF2ADCR_ENA;
1102 else if (!(val & WM8994_AIF2ADCL_SRC) &&
1103 !(val & WM8994_AIF2ADCR_SRC))
1104 adc = WM8994_AIF2ADCL_ENA;
1105 else
1106 adc = WM8994_AIF2ADCL_ENA | WM8994_AIF2ADCR_ENA;
1107
1108
1109 val = snd_soc_read(codec, WM8994_AIF2_CONTROL_2);
1110 if ((val & WM8994_AIF2DACL_SRC) &&
1111 (val & WM8994_AIF2DACR_SRC))
1112 dac = WM8994_AIF2DACR_ENA;
1113 else if (!(val & WM8994_AIF2DACL_SRC) &&
1114 !(val & WM8994_AIF2DACR_SRC))
1115 dac = WM8994_AIF2DACL_ENA;
1116 else
1117 dac = WM8994_AIF2DACL_ENA | WM8994_AIF2DACR_ENA;
1118
1119 snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_4,
1120 WM8994_AIF2ADCL_ENA |
1121 WM8994_AIF2ADCR_ENA, adc);
1122 snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5,
1123 WM8994_AIF2DACL_ENA |
1124 WM8994_AIF2DACR_ENA, dac);
1125 snd_soc_update_bits(codec, WM8994_CLOCKING_1,
1126 WM8994_AIF2DSPCLK_ENA |
1127 WM8994_SYSDSPCLK_ENA,
1128 WM8994_AIF2DSPCLK_ENA |
1129 WM8994_SYSDSPCLK_ENA);
1130 snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_4,
1131 WM8994_AIF2ADCL_ENA |
1132 WM8994_AIF2ADCR_ENA,
1133 WM8994_AIF2ADCL_ENA |
1134 WM8994_AIF2ADCR_ENA);
1135 snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5,
1136 WM8994_AIF2DACL_ENA |
1137 WM8994_AIF2DACR_ENA,
1138 WM8994_AIF2DACL_ENA |
1139 WM8994_AIF2DACR_ENA);
1140 break;
1141
1142 case SND_SOC_DAPM_PRE_PMD:
1039 case SND_SOC_DAPM_POST_PMD: 1143 case SND_SOC_DAPM_POST_PMD:
1040 if (wm8994->aif1clk_disable) { 1144 snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5,
1041 snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1, 1145 WM8994_AIF2DACL_ENA |
1042 WM8994_AIF1CLK_ENA_MASK, 0); 1146 WM8994_AIF2DACR_ENA, 0);
1043 wm8994->aif1clk_disable = 0; 1147 snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5,
1044 } 1148 WM8994_AIF2ADCL_ENA |
1045 if (wm8994->aif2clk_disable) { 1149 WM8994_AIF2ADCR_ENA, 0);
1046 snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1, 1150
1047 WM8994_AIF2CLK_ENA_MASK, 0); 1151 val = snd_soc_read(codec, WM8994_CLOCKING_1);
1048 wm8994->aif2clk_disable = 0; 1152 if (val & WM8994_AIF1DSPCLK_ENA)
1049 } 1153 val = WM8994_SYSDSPCLK_ENA;
1154 else
1155 val = 0;
1156 snd_soc_update_bits(codec, WM8994_CLOCKING_1,
1157 WM8994_SYSDSPCLK_ENA |
1158 WM8994_AIF2DSPCLK_ENA, val);
1050 break; 1159 break;
1051 } 1160 }
1052 1161
1053 return 0; 1162 return 0;
1054} 1163}
1055 1164
1056static int aif1clk_ev(struct snd_soc_dapm_widget *w, 1165static int aif1clk_late_ev(struct snd_soc_dapm_widget *w,
1057 struct snd_kcontrol *kcontrol, int event) 1166 struct snd_kcontrol *kcontrol, int event)
1058{ 1167{
1059 struct snd_soc_codec *codec = w->codec; 1168 struct snd_soc_codec *codec = w->codec;
1060 struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); 1169 struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
@@ -1071,8 +1180,8 @@ static int aif1clk_ev(struct snd_soc_dapm_widget *w,
1071 return 0; 1180 return 0;
1072} 1181}
1073 1182
1074static int aif2clk_ev(struct snd_soc_dapm_widget *w, 1183static int aif2clk_late_ev(struct snd_soc_dapm_widget *w,
1075 struct snd_kcontrol *kcontrol, int event) 1184 struct snd_kcontrol *kcontrol, int event)
1076{ 1185{
1077 struct snd_soc_codec *codec = w->codec; 1186 struct snd_soc_codec *codec = w->codec;
1078 struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); 1187 struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
@@ -1089,6 +1198,63 @@ static int aif2clk_ev(struct snd_soc_dapm_widget *w,
1089 return 0; 1198 return 0;
1090} 1199}
1091 1200
1201static int late_enable_ev(struct snd_soc_dapm_widget *w,
1202 struct snd_kcontrol *kcontrol, int event)
1203{
1204 struct snd_soc_codec *codec = w->codec;
1205 struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
1206
1207 switch (event) {
1208 case SND_SOC_DAPM_PRE_PMU:
1209 if (wm8994->aif1clk_enable) {
1210 aif1clk_ev(w, kcontrol, event);
1211 snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1,
1212 WM8994_AIF1CLK_ENA_MASK,
1213 WM8994_AIF1CLK_ENA);
1214 wm8994->aif1clk_enable = 0;
1215 }
1216 if (wm8994->aif2clk_enable) {
1217 aif2clk_ev(w, kcontrol, event);
1218 snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1,
1219 WM8994_AIF2CLK_ENA_MASK,
1220 WM8994_AIF2CLK_ENA);
1221 wm8994->aif2clk_enable = 0;
1222 }
1223 break;
1224 }
1225
1226 /* We may also have postponed startup of DSP, handle that. */
1227 wm8958_aif_ev(w, kcontrol, event);
1228
1229 return 0;
1230}
1231
1232static int late_disable_ev(struct snd_soc_dapm_widget *w,
1233 struct snd_kcontrol *kcontrol, int event)
1234{
1235 struct snd_soc_codec *codec = w->codec;
1236 struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
1237
1238 switch (event) {
1239 case SND_SOC_DAPM_POST_PMD:
1240 if (wm8994->aif1clk_disable) {
1241 snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1,
1242 WM8994_AIF1CLK_ENA_MASK, 0);
1243 aif1clk_ev(w, kcontrol, event);
1244 wm8994->aif1clk_disable = 0;
1245 }
1246 if (wm8994->aif2clk_disable) {
1247 snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1,
1248 WM8994_AIF2CLK_ENA_MASK, 0);
1249 aif2clk_ev(w, kcontrol, event);
1250 wm8994->aif2clk_disable = 0;
1251 }
1252 break;
1253 }
1254
1255 return 0;
1256}
1257
1092static int adc_mux_ev(struct snd_soc_dapm_widget *w, 1258static int adc_mux_ev(struct snd_soc_dapm_widget *w,
1093 struct snd_kcontrol *kcontrol, int event) 1259 struct snd_kcontrol *kcontrol, int event)
1094{ 1260{
@@ -1385,9 +1551,9 @@ static const struct snd_kcontrol_new aif2dacr_src_mux =
1385 SOC_DAPM_ENUM("AIF2DACR Mux", aif2dacr_src_enum); 1551 SOC_DAPM_ENUM("AIF2DACR Mux", aif2dacr_src_enum);
1386 1552
1387static const struct snd_soc_dapm_widget wm8994_lateclk_revd_widgets[] = { 1553static const struct snd_soc_dapm_widget wm8994_lateclk_revd_widgets[] = {
1388SND_SOC_DAPM_SUPPLY("AIF1CLK", SND_SOC_NOPM, 0, 0, aif1clk_ev, 1554SND_SOC_DAPM_SUPPLY("AIF1CLK", SND_SOC_NOPM, 0, 0, aif1clk_late_ev,
1389 SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), 1555 SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
1390SND_SOC_DAPM_SUPPLY("AIF2CLK", SND_SOC_NOPM, 0, 0, aif2clk_ev, 1556SND_SOC_DAPM_SUPPLY("AIF2CLK", SND_SOC_NOPM, 0, 0, aif2clk_late_ev,
1391 SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), 1557 SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
1392 1558
1393SND_SOC_DAPM_PGA_E("Late DAC1L Enable PGA", SND_SOC_NOPM, 0, 0, NULL, 0, 1559SND_SOC_DAPM_PGA_E("Late DAC1L Enable PGA", SND_SOC_NOPM, 0, 0, NULL, 0,
@@ -1416,8 +1582,10 @@ SND_SOC_DAPM_POST("Late Disable PGA", late_disable_ev)
1416}; 1582};
1417 1583
1418static const struct snd_soc_dapm_widget wm8994_lateclk_widgets[] = { 1584static const struct snd_soc_dapm_widget wm8994_lateclk_widgets[] = {
1419SND_SOC_DAPM_SUPPLY("AIF1CLK", WM8994_AIF1_CLOCKING_1, 0, 0, NULL, 0), 1585SND_SOC_DAPM_SUPPLY("AIF1CLK", WM8994_AIF1_CLOCKING_1, 0, 0, aif1clk_ev,
1420SND_SOC_DAPM_SUPPLY("AIF2CLK", WM8994_AIF2_CLOCKING_1, 0, 0, NULL, 0), 1586 SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD),
1587SND_SOC_DAPM_SUPPLY("AIF2CLK", WM8994_AIF2_CLOCKING_1, 0, 0, aif2clk_ev,
1588 SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD),
1421SND_SOC_DAPM_PGA("Direct Voice", SND_SOC_NOPM, 0, 0, NULL, 0), 1589SND_SOC_DAPM_PGA("Direct Voice", SND_SOC_NOPM, 0, 0, NULL, 0),
1422SND_SOC_DAPM_MIXER("SPKL", WM8994_POWER_MANAGEMENT_3, 8, 0, 1590SND_SOC_DAPM_MIXER("SPKL", WM8994_POWER_MANAGEMENT_3, 8, 0,
1423 left_speaker_mixer, ARRAY_SIZE(left_speaker_mixer)), 1591 left_speaker_mixer, ARRAY_SIZE(left_speaker_mixer)),
@@ -1470,30 +1638,30 @@ SND_SOC_DAPM_SUPPLY("VMID", SND_SOC_NOPM, 0, 0, vmid_event,
1470SND_SOC_DAPM_SUPPLY("CLK_SYS", SND_SOC_NOPM, 0, 0, clk_sys_event, 1638SND_SOC_DAPM_SUPPLY("CLK_SYS", SND_SOC_NOPM, 0, 0, clk_sys_event,
1471 SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD), 1639 SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
1472 1640
1473SND_SOC_DAPM_SUPPLY("DSP1CLK", WM8994_CLOCKING_1, 3, 0, NULL, 0), 1641SND_SOC_DAPM_SUPPLY("DSP1CLK", SND_SOC_NOPM, 3, 0, NULL, 0),
1474SND_SOC_DAPM_SUPPLY("DSP2CLK", WM8994_CLOCKING_1, 2, 0, NULL, 0), 1642SND_SOC_DAPM_SUPPLY("DSP2CLK", SND_SOC_NOPM, 2, 0, NULL, 0),
1475SND_SOC_DAPM_SUPPLY("DSPINTCLK", WM8994_CLOCKING_1, 1, 0, NULL, 0), 1643SND_SOC_DAPM_SUPPLY("DSPINTCLK", SND_SOC_NOPM, 1, 0, NULL, 0),
1476 1644
1477SND_SOC_DAPM_AIF_OUT("AIF1ADC1L", NULL, 1645SND_SOC_DAPM_AIF_OUT("AIF1ADC1L", NULL,
1478 0, WM8994_POWER_MANAGEMENT_4, 9, 0), 1646 0, SND_SOC_NOPM, 9, 0),
1479SND_SOC_DAPM_AIF_OUT("AIF1ADC1R", NULL, 1647SND_SOC_DAPM_AIF_OUT("AIF1ADC1R", NULL,
1480 0, WM8994_POWER_MANAGEMENT_4, 8, 0), 1648 0, SND_SOC_NOPM, 8, 0),
1481SND_SOC_DAPM_AIF_IN_E("AIF1DAC1L", NULL, 0, 1649SND_SOC_DAPM_AIF_IN_E("AIF1DAC1L", NULL, 0,
1482 WM8994_POWER_MANAGEMENT_5, 9, 0, wm8958_aif_ev, 1650 SND_SOC_NOPM, 9, 0, wm8958_aif_ev,
1483 SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), 1651 SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
1484SND_SOC_DAPM_AIF_IN_E("AIF1DAC1R", NULL, 0, 1652SND_SOC_DAPM_AIF_IN_E("AIF1DAC1R", NULL, 0,
1485 WM8994_POWER_MANAGEMENT_5, 8, 0, wm8958_aif_ev, 1653 SND_SOC_NOPM, 8, 0, wm8958_aif_ev,
1486 SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), 1654 SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
1487 1655
1488SND_SOC_DAPM_AIF_OUT("AIF1ADC2L", NULL, 1656SND_SOC_DAPM_AIF_OUT("AIF1ADC2L", NULL,
1489 0, WM8994_POWER_MANAGEMENT_4, 11, 0), 1657 0, SND_SOC_NOPM, 11, 0),
1490SND_SOC_DAPM_AIF_OUT("AIF1ADC2R", NULL, 1658SND_SOC_DAPM_AIF_OUT("AIF1ADC2R", NULL,
1491 0, WM8994_POWER_MANAGEMENT_4, 10, 0), 1659 0, SND_SOC_NOPM, 10, 0),
1492SND_SOC_DAPM_AIF_IN_E("AIF1DAC2L", NULL, 0, 1660SND_SOC_DAPM_AIF_IN_E("AIF1DAC2L", NULL, 0,
1493 WM8994_POWER_MANAGEMENT_5, 11, 0, wm8958_aif_ev, 1661 SND_SOC_NOPM, 11, 0, wm8958_aif_ev,
1494 SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), 1662 SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
1495SND_SOC_DAPM_AIF_IN_E("AIF1DAC2R", NULL, 0, 1663SND_SOC_DAPM_AIF_IN_E("AIF1DAC2R", NULL, 0,
1496 WM8994_POWER_MANAGEMENT_5, 10, 0, wm8958_aif_ev, 1664 SND_SOC_NOPM, 10, 0, wm8958_aif_ev,
1497 SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), 1665 SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
1498 1666
1499SND_SOC_DAPM_MIXER("AIF1ADC1L Mixer", SND_SOC_NOPM, 0, 0, 1667SND_SOC_DAPM_MIXER("AIF1ADC1L Mixer", SND_SOC_NOPM, 0, 0,
@@ -1520,14 +1688,14 @@ SND_SOC_DAPM_MIXER("DAC1R Mixer", SND_SOC_NOPM, 0, 0,
1520 dac1r_mix, ARRAY_SIZE(dac1r_mix)), 1688 dac1r_mix, ARRAY_SIZE(dac1r_mix)),
1521 1689
1522SND_SOC_DAPM_AIF_OUT("AIF2ADCL", NULL, 0, 1690SND_SOC_DAPM_AIF_OUT("AIF2ADCL", NULL, 0,
1523 WM8994_POWER_MANAGEMENT_4, 13, 0), 1691 SND_SOC_NOPM, 13, 0),
1524SND_SOC_DAPM_AIF_OUT("AIF2ADCR", NULL, 0, 1692SND_SOC_DAPM_AIF_OUT("AIF2ADCR", NULL, 0,
1525 WM8994_POWER_MANAGEMENT_4, 12, 0), 1693 SND_SOC_NOPM, 12, 0),
1526SND_SOC_DAPM_AIF_IN_E("AIF2DACL", NULL, 0, 1694SND_SOC_DAPM_AIF_IN_E("AIF2DACL", NULL, 0,
1527 WM8994_POWER_MANAGEMENT_5, 13, 0, wm8958_aif_ev, 1695 SND_SOC_NOPM, 13, 0, wm8958_aif_ev,
1528 SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD), 1696 SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
1529SND_SOC_DAPM_AIF_IN_E("AIF2DACR", NULL, 0, 1697SND_SOC_DAPM_AIF_IN_E("AIF2DACR", NULL, 0,
1530 WM8994_POWER_MANAGEMENT_5, 12, 0, wm8958_aif_ev, 1698 SND_SOC_NOPM, 12, 0, wm8958_aif_ev,
1531 SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD), 1699 SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
1532 1700
1533SND_SOC_DAPM_AIF_IN("AIF1DACDAT", NULL, 0, SND_SOC_NOPM, 0, 0), 1701SND_SOC_DAPM_AIF_IN("AIF1DACDAT", NULL, 0, SND_SOC_NOPM, 0, 0),
diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c
index f13f2886339c..6c028c470601 100644
--- a/sound/soc/codecs/wm_hubs.c
+++ b/sound/soc/codecs/wm_hubs.c
@@ -1035,7 +1035,7 @@ void wm_hubs_set_bias_level(struct snd_soc_codec *codec,
1035 enum snd_soc_bias_level level) 1035 enum snd_soc_bias_level level)
1036{ 1036{
1037 struct wm_hubs_data *hubs = snd_soc_codec_get_drvdata(codec); 1037 struct wm_hubs_data *hubs = snd_soc_codec_get_drvdata(codec);
1038 int val; 1038 int mask, val;
1039 1039
1040 switch (level) { 1040 switch (level) {
1041 case SND_SOC_BIAS_STANDBY: 1041 case SND_SOC_BIAS_STANDBY:
@@ -1047,6 +1047,13 @@ void wm_hubs_set_bias_level(struct snd_soc_codec *codec,
1047 case SND_SOC_BIAS_ON: 1047 case SND_SOC_BIAS_ON:
1048 /* Turn off any unneded single ended outputs */ 1048 /* Turn off any unneded single ended outputs */
1049 val = 0; 1049 val = 0;
1050 mask = 0;
1051
1052 if (hubs->lineout1_se)
1053 mask |= WM8993_LINEOUT1N_ENA | WM8993_LINEOUT1P_ENA;
1054
1055 if (hubs->lineout2_se)
1056 mask |= WM8993_LINEOUT2N_ENA | WM8993_LINEOUT2P_ENA;
1050 1057
1051 if (hubs->lineout1_se && hubs->lineout1n_ena) 1058 if (hubs->lineout1_se && hubs->lineout1n_ena)
1052 val |= WM8993_LINEOUT1N_ENA; 1059 val |= WM8993_LINEOUT1N_ENA;
@@ -1061,11 +1068,7 @@ void wm_hubs_set_bias_level(struct snd_soc_codec *codec,
1061 val |= WM8993_LINEOUT2P_ENA; 1068 val |= WM8993_LINEOUT2P_ENA;
1062 1069
1063 snd_soc_update_bits(codec, WM8993_POWER_MANAGEMENT_3, 1070 snd_soc_update_bits(codec, WM8993_POWER_MANAGEMENT_3,
1064 WM8993_LINEOUT1N_ENA | 1071 mask, val);
1065 WM8993_LINEOUT1P_ENA |
1066 WM8993_LINEOUT2N_ENA |
1067 WM8993_LINEOUT2P_ENA,
1068 val);
1069 1072
1070 /* Remove the input clamps */ 1073 /* Remove the input clamps */
1071 snd_soc_update_bits(codec, WM8993_INPUTS_CLAMP_REG, 1074 snd_soc_update_bits(codec, WM8993_INPUTS_CLAMP_REG,
diff --git a/sound/soc/omap/Kconfig b/sound/soc/omap/Kconfig
index e00dd0b1139c..deafbfaacdbf 100644
--- a/sound/soc/omap/Kconfig
+++ b/sound/soc/omap/Kconfig
@@ -97,7 +97,7 @@ config SND_OMAP_SOC_SDP3430
97 97
98config SND_OMAP_SOC_OMAP_ABE_TWL6040 98config SND_OMAP_SOC_OMAP_ABE_TWL6040
99 tristate "SoC Audio support for OMAP boards using ABE and twl6040 codec" 99 tristate "SoC Audio support for OMAP boards using ABE and twl6040 codec"
100 depends on TWL4030_CORE && SND_OMAP_SOC && ARCH_OMAP4 100 depends on TWL6040_CORE && SND_OMAP_SOC && ARCH_OMAP4
101 select SND_OMAP_SOC_DMIC 101 select SND_OMAP_SOC_DMIC
102 select SND_OMAP_SOC_MCPDM 102 select SND_OMAP_SOC_MCPDM
103 select SND_SOC_TWL6040 103 select SND_SOC_TWL6040
diff --git a/sound/soc/omap/omap-pcm.c b/sound/soc/omap/omap-pcm.c
index a59bd352d342..5a649da9122a 100644
--- a/sound/soc/omap/omap-pcm.c
+++ b/sound/soc/omap/omap-pcm.c
@@ -401,6 +401,10 @@ static int omap_pcm_new(struct snd_soc_pcm_runtime *rtd)
401 } 401 }
402 402
403out: 403out:
404 /* free preallocated buffers in case of error */
405 if (ret)
406 omap_pcm_free_dma_buffers(pcm);
407
404 return ret; 408 return ret;
405} 409}
406 410
diff --git a/sound/soc/samsung/s3c2412-i2s.c b/sound/soc/samsung/s3c2412-i2s.c
index 72185078ddf8..79fbeea99d46 100644
--- a/sound/soc/samsung/s3c2412-i2s.c
+++ b/sound/soc/samsung/s3c2412-i2s.c
@@ -166,7 +166,7 @@ static struct snd_soc_dai_driver s3c2412_i2s_dai = {
166 166
167static __devinit int s3c2412_iis_dev_probe(struct platform_device *pdev) 167static __devinit int s3c2412_iis_dev_probe(struct platform_device *pdev)
168{ 168{
169 return snd_soc_register_dai(&pdev->dev, &s3c2412_i2s_dai); 169 return s3c_i2sv2_register_dai(&pdev->dev, -1, &s3c2412_i2s_dai);
170} 170}
171 171
172static __devexit int s3c2412_iis_dev_remove(struct platform_device *pdev) 172static __devexit int s3c2412_iis_dev_remove(struct platform_device *pdev)
diff --git a/sound/soc/sh/fsi.c b/sound/soc/sh/fsi.c
index 378cc5b056d7..74ed2dffbffd 100644
--- a/sound/soc/sh/fsi.c
+++ b/sound/soc/sh/fsi.c
@@ -1001,11 +1001,10 @@ static void fsi_dma_do_tasklet(unsigned long data)
1001 sg_dma_address(&sg) = buf; 1001 sg_dma_address(&sg) = buf;
1002 sg_dma_len(&sg) = len; 1002 sg_dma_len(&sg) = len;
1003 1003
1004 desc = chan->device->device_prep_slave_sg(chan, &sg, 1, dir, 1004 desc = dmaengine_prep_slave_sg(chan, &sg, 1, dir,
1005 DMA_PREP_INTERRUPT | 1005 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1006 DMA_CTRL_ACK);
1007 if (!desc) { 1006 if (!desc) {
1008 dev_err(dai->dev, "device_prep_slave_sg() fail\n"); 1007 dev_err(dai->dev, "dmaengine_prep_slave_sg() fail\n");
1009 return; 1008 return;
1010 } 1009 }
1011 1010
diff --git a/sound/soc/sh/migor.c b/sound/soc/sh/migor.c
index 9d9ad8d61c0a..8526e1edaf45 100644
--- a/sound/soc/sh/migor.c
+++ b/sound/soc/sh/migor.c
@@ -35,7 +35,7 @@ static unsigned long siumckb_recalc(struct clk *clk)
35 return codec_freq; 35 return codec_freq;
36} 36}
37 37
38static struct clk_ops siumckb_clk_ops = { 38static struct sh_clk_ops siumckb_clk_ops = {
39 .recalc = siumckb_recalc, 39 .recalc = siumckb_recalc,
40}; 40};
41 41
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index accdcb7d4d9d..c88d9741b9e7 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -3113,6 +3113,7 @@ int snd_soc_register_card(struct snd_soc_card *card)
3113 GFP_KERNEL); 3113 GFP_KERNEL);
3114 if (card->rtd == NULL) 3114 if (card->rtd == NULL)
3115 return -ENOMEM; 3115 return -ENOMEM;
3116 card->num_rtd = 0;
3116 card->rtd_aux = &card->rtd[card->num_links]; 3117 card->rtd_aux = &card->rtd[card->num_links];
3117 3118
3118 for (i = 0; i < card->num_links; i++) 3119 for (i = 0; i < card->num_links; i++)
@@ -3624,10 +3625,10 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
3624 int i, ret; 3625 int i, ret;
3625 3626
3626 num_routes = of_property_count_strings(np, propname); 3627 num_routes = of_property_count_strings(np, propname);
3627 if (num_routes & 1) { 3628 if (num_routes < 0 || num_routes & 1) {
3628 dev_err(card->dev, 3629 dev_err(card->dev,
3629 "Property '%s's length is not even\n", 3630 "Property '%s' does not exist or its length is not even\n",
3630 propname); 3631 propname);
3631 return -EINVAL; 3632 return -EINVAL;
3632 } 3633 }
3633 num_routes /= 2; 3634 num_routes /= 2;
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 5cbd2d7623b8..1bb6d4a63cd8 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -67,6 +67,7 @@ static int dapm_up_seq[] = {
67 [snd_soc_dapm_out_drv] = 10, 67 [snd_soc_dapm_out_drv] = 10,
68 [snd_soc_dapm_hp] = 10, 68 [snd_soc_dapm_hp] = 10,
69 [snd_soc_dapm_spk] = 10, 69 [snd_soc_dapm_spk] = 10,
70 [snd_soc_dapm_line] = 10,
70 [snd_soc_dapm_post] = 11, 71 [snd_soc_dapm_post] = 11,
71}; 72};
72 73
@@ -75,6 +76,7 @@ static int dapm_down_seq[] = {
75 [snd_soc_dapm_adc] = 1, 76 [snd_soc_dapm_adc] = 1,
76 [snd_soc_dapm_hp] = 2, 77 [snd_soc_dapm_hp] = 2,
77 [snd_soc_dapm_spk] = 2, 78 [snd_soc_dapm_spk] = 2,
79 [snd_soc_dapm_line] = 2,
78 [snd_soc_dapm_out_drv] = 2, 80 [snd_soc_dapm_out_drv] = 2,
79 [snd_soc_dapm_pga] = 4, 81 [snd_soc_dapm_pga] = 4,
80 [snd_soc_dapm_mixer_named_ctl] = 5, 82 [snd_soc_dapm_mixer_named_ctl] = 5,
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 03059e75665a..9bf3fc759344 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -234,8 +234,8 @@ endif
234 234
235export PERL_PATH 235export PERL_PATH
236 236
237FLEX = $(CROSS_COMPILE)flex 237FLEX = flex
238BISON= $(CROSS_COMPILE)bison 238BISON= bison
239 239
240$(OUTPUT)util/parse-events-flex.c: util/parse-events.l 240$(OUTPUT)util/parse-events-flex.c: util/parse-events.l
241 $(QUIET_FLEX)$(FLEX) --header-file=$(OUTPUT)util/parse-events-flex.h -t util/parse-events.l > $(OUTPUT)util/parse-events-flex.c 241 $(QUIET_FLEX)$(FLEX) --header-file=$(OUTPUT)util/parse-events-flex.h -t util/parse-events.l > $(OUTPUT)util/parse-events-flex.c
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 2e317438980b..cdae9b2db1cc 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -374,16 +374,23 @@ static int __cmd_report(struct perf_report *rep)
374 (kernel_map->dso->hit && 374 (kernel_map->dso->hit &&
375 (kernel_kmap->ref_reloc_sym == NULL || 375 (kernel_kmap->ref_reloc_sym == NULL ||
376 kernel_kmap->ref_reloc_sym->addr == 0))) { 376 kernel_kmap->ref_reloc_sym->addr == 0))) {
377 const struct dso *kdso = kernel_map->dso; 377 const char *desc =
378 "As no suitable kallsyms nor vmlinux was found, kernel samples\n"
379 "can't be resolved.";
380
381 if (kernel_map) {
382 const struct dso *kdso = kernel_map->dso;
383 if (!RB_EMPTY_ROOT(&kdso->symbols[MAP__FUNCTION])) {
384 desc = "If some relocation was applied (e.g. "
385 "kexec) symbols may be misresolved.";
386 }
387 }
378 388
379 ui__warning( 389 ui__warning(
380"Kernel address maps (/proc/{kallsyms,modules}) were restricted.\n\n" 390"Kernel address maps (/proc/{kallsyms,modules}) were restricted.\n\n"
381"Check /proc/sys/kernel/kptr_restrict before running 'perf record'.\n\n%s\n\n" 391"Check /proc/sys/kernel/kptr_restrict before running 'perf record'.\n\n%s\n\n"
382"Samples in kernel modules can't be resolved as well.\n\n", 392"Samples in kernel modules can't be resolved as well.\n\n",
383 RB_EMPTY_ROOT(&kdso->symbols[MAP__FUNCTION]) ? 393 desc);
384"As no suitable kallsyms nor vmlinux was found, kernel samples\n"
385"can't be resolved." :
386"If some relocation was applied (e.g. kexec) symbols may be misresolved.");
387 } 394 }
388 395
389 if (dump_trace) { 396 if (dump_trace) {
diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c
index 1c5b9801ac61..223ffdcc0fd8 100644
--- a/tools/perf/builtin-test.c
+++ b/tools/perf/builtin-test.c
@@ -851,6 +851,28 @@ static int test__checkevent_symbolic_name_modifier(struct perf_evlist *evlist)
851 return test__checkevent_symbolic_name(evlist); 851 return test__checkevent_symbolic_name(evlist);
852} 852}
853 853
854static int test__checkevent_exclude_host_modifier(struct perf_evlist *evlist)
855{
856 struct perf_evsel *evsel = list_entry(evlist->entries.next,
857 struct perf_evsel, node);
858
859 TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest);
860 TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host);
861
862 return test__checkevent_symbolic_name(evlist);
863}
864
865static int test__checkevent_exclude_guest_modifier(struct perf_evlist *evlist)
866{
867 struct perf_evsel *evsel = list_entry(evlist->entries.next,
868 struct perf_evsel, node);
869
870 TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest);
871 TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
872
873 return test__checkevent_symbolic_name(evlist);
874}
875
854static int test__checkevent_symbolic_alias_modifier(struct perf_evlist *evlist) 876static int test__checkevent_symbolic_alias_modifier(struct perf_evlist *evlist)
855{ 877{
856 struct perf_evsel *evsel = list_entry(evlist->entries.next, 878 struct perf_evsel *evsel = list_entry(evlist->entries.next,
@@ -1091,6 +1113,14 @@ static struct test__event_st {
1091 .name = "r1,syscalls:sys_enter_open:k,1:1:hp", 1113 .name = "r1,syscalls:sys_enter_open:k,1:1:hp",
1092 .check = test__checkevent_list, 1114 .check = test__checkevent_list,
1093 }, 1115 },
1116 {
1117 .name = "instructions:G",
1118 .check = test__checkevent_exclude_host_modifier,
1119 },
1120 {
1121 .name = "instructions:H",
1122 .check = test__checkevent_exclude_guest_modifier,
1123 },
1094}; 1124};
1095 1125
1096#define TEST__EVENTS_CNT (sizeof(test__events) / sizeof(struct test__event_st)) 1126#define TEST__EVENTS_CNT (sizeof(test__events) / sizeof(struct test__event_st))
diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l
index 05d766e3ecb5..1fcf1bbc5458 100644
--- a/tools/perf/util/parse-events.l
+++ b/tools/perf/util/parse-events.l
@@ -54,7 +54,7 @@ num_dec [0-9]+
54num_hex 0x[a-fA-F0-9]+ 54num_hex 0x[a-fA-F0-9]+
55num_raw_hex [a-fA-F0-9]+ 55num_raw_hex [a-fA-F0-9]+
56name [a-zA-Z_*?][a-zA-Z0-9_*?]* 56name [a-zA-Z_*?][a-zA-Z0-9_*?]*
57modifier_event [ukhp]{1,5} 57modifier_event [ukhpGH]{1,8}
58modifier_bp [rwx] 58modifier_bp [rwx]
59 59
60%% 60%%
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index c0a028c3ebaf..ab9867b2b433 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -977,8 +977,9 @@ static Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep,
977 * And always look at the original dso, not at debuginfo packages, that 977 * And always look at the original dso, not at debuginfo packages, that
978 * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS). 978 * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS).
979 */ 979 */
980static int dso__synthesize_plt_symbols(struct dso *dso, struct map *map, 980static int
981 symbol_filter_t filter) 981dso__synthesize_plt_symbols(struct dso *dso, char *name, struct map *map,
982 symbol_filter_t filter)
982{ 983{
983 uint32_t nr_rel_entries, idx; 984 uint32_t nr_rel_entries, idx;
984 GElf_Sym sym; 985 GElf_Sym sym;
@@ -993,10 +994,7 @@ static int dso__synthesize_plt_symbols(struct dso *dso, struct map *map,
993 char sympltname[1024]; 994 char sympltname[1024];
994 Elf *elf; 995 Elf *elf;
995 int nr = 0, symidx, fd, err = 0; 996 int nr = 0, symidx, fd, err = 0;
996 char name[PATH_MAX];
997 997
998 snprintf(name, sizeof(name), "%s%s",
999 symbol_conf.symfs, dso->long_name);
1000 fd = open(name, O_RDONLY); 998 fd = open(name, O_RDONLY);
1001 if (fd < 0) 999 if (fd < 0)
1002 goto out; 1000 goto out;
@@ -1703,8 +1701,9 @@ restart:
1703 continue; 1701 continue;
1704 1702
1705 if (ret > 0) { 1703 if (ret > 0) {
1706 int nr_plt = dso__synthesize_plt_symbols(dso, map, 1704 int nr_plt;
1707 filter); 1705
1706 nr_plt = dso__synthesize_plt_symbols(dso, name, map, filter);
1708 if (nr_plt > 0) 1707 if (nr_plt > 0)
1709 ret += nr_plt; 1708 ret += nr_plt;
1710 break; 1709 break;
diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl
index 95d6a6f7c33a..4915408f6a98 100755
--- a/tools/testing/ktest/ktest.pl
+++ b/tools/testing/ktest/ktest.pl
@@ -183,6 +183,9 @@ my %force_config;
183# do not force reboots on config problems 183# do not force reboots on config problems
184my $no_reboot = 1; 184my $no_reboot = 1;
185 185
186# reboot on success
187my $reboot_success = 0;
188
186my %option_map = ( 189my %option_map = (
187 "MACHINE" => \$machine, 190 "MACHINE" => \$machine,
188 "SSH_USER" => \$ssh_user, 191 "SSH_USER" => \$ssh_user,
@@ -2192,7 +2195,7 @@ sub run_bisect {
2192 } 2195 }
2193 2196
2194 # Are we looking for where it worked, not failed? 2197 # Are we looking for where it worked, not failed?
2195 if ($reverse_bisect) { 2198 if ($reverse_bisect && $ret >= 0) {
2196 $ret = !$ret; 2199 $ret = !$ret;
2197 } 2200 }
2198 2201
@@ -3469,6 +3472,7 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
3469 3472
3470 # Do not reboot on failing test options 3473 # Do not reboot on failing test options
3471 $no_reboot = 1; 3474 $no_reboot = 1;
3475 $reboot_success = 0;
3472 3476
3473 $iteration = $i; 3477 $iteration = $i;
3474 3478
@@ -3554,9 +3558,11 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
3554 die "failed to checkout $checkout"; 3558 die "failed to checkout $checkout";
3555 } 3559 }
3556 3560
3561 $no_reboot = 0;
3562
3557 # A test may opt to not reboot the box 3563 # A test may opt to not reboot the box
3558 if ($reboot_on_success) { 3564 if ($reboot_on_success) {
3559 $no_reboot = 0; 3565 $reboot_success = 1;
3560 } 3566 }
3561 3567
3562 if ($test_type eq "bisect") { 3568 if ($test_type eq "bisect") {
@@ -3600,7 +3606,7 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
3600 3606
3601if ($opt{"POWEROFF_ON_SUCCESS"}) { 3607if ($opt{"POWEROFF_ON_SUCCESS"}) {
3602 halt; 3608 halt;
3603} elsif ($opt{"REBOOT_ON_SUCCESS"} && !do_not_reboot) { 3609} elsif ($opt{"REBOOT_ON_SUCCESS"} && !do_not_reboot && $reboot_success) {
3604 reboot_to_good; 3610 reboot_to_good;
3605} elsif (defined($switch_to_good)) { 3611} elsif (defined($switch_to_good)) {
3606 # still need to get to the good kernel 3612 # still need to get to the good kernel