aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-block13
-rw-r--r--Documentation/DocBook/uio-howto.tmpl7
-rw-r--r--Documentation/blockdev/cciss.txt14
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt1
-rw-r--r--Documentation/filesystems/btrfs.txt4
-rw-r--r--Documentation/i2c/ten-bit-addresses36
-rw-r--r--Documentation/networking/ip-sysctl.txt2
-rw-r--r--Documentation/power/devices.txt111
-rw-r--r--Documentation/power/runtime_pm.txt40
-rw-r--r--Documentation/serial/serial-rs485.txt14
-rw-r--r--Documentation/sound/alsa/HD-Audio.txt8
-rw-r--r--MAINTAINERS39
-rw-r--r--Makefile2
-rw-r--r--arch/arm/Kconfig20
-rw-r--r--arch/arm/boot/Makefile2
-rw-r--r--arch/arm/common/gic.c15
-rw-r--r--arch/arm/common/pl330.c12
-rw-r--r--arch/arm/configs/at91cap9_defconfig (renamed from arch/arm/configs/at91cap9adk_defconfig)7
-rw-r--r--arch/arm/configs/at91rm9200_defconfig47
-rw-r--r--arch/arm/configs/at91sam9260_defconfig (renamed from arch/arm/configs/at91sam9260ek_defconfig)16
-rw-r--r--arch/arm/configs/at91sam9g20_defconfig (renamed from arch/arm/configs/at91sam9g20ek_defconfig)23
-rw-r--r--arch/arm/configs/at91sam9g45_defconfig7
-rw-r--r--arch/arm/configs/at91sam9rl_defconfig (renamed from arch/arm/configs/at91sam9rlek_defconfig)5
-rw-r--r--arch/arm/configs/ezx_defconfig2
-rw-r--r--arch/arm/configs/imote2_defconfig2
-rw-r--r--arch/arm/configs/magician_defconfig2
-rw-r--r--arch/arm/configs/omap1_defconfig1
-rw-r--r--arch/arm/configs/u300_defconfig13
-rw-r--r--arch/arm/configs/u8500_defconfig14
-rw-r--r--arch/arm/configs/zeus_defconfig2
-rw-r--r--arch/arm/include/asm/cti.h179
-rw-r--r--arch/arm/include/asm/hardware/cache-l2x0.h2
-rw-r--r--arch/arm/include/asm/mach/arch.h1
-rw-r--r--arch/arm/include/asm/perf_event.h3
-rw-r--r--arch/arm/include/asm/pmu.h25
-rw-r--r--arch/arm/include/asm/topology.h2
-rw-r--r--arch/arm/include/asm/unistd.h2
-rw-r--r--arch/arm/kernel/calls.S2
-rw-r--r--arch/arm/kernel/entry-armv.S2
-rw-r--r--arch/arm/kernel/head.S2
-rw-r--r--arch/arm/kernel/kprobes-arm.c4
-rw-r--r--arch/arm/kernel/kprobes-test-arm.c27
-rw-r--r--arch/arm/kernel/kprobes-test-thumb.c16
-rw-r--r--arch/arm/kernel/kprobes-test.h100
-rw-r--r--arch/arm/kernel/machine_kexec.c35
-rw-r--r--arch/arm/kernel/perf_event.c30
-rw-r--r--arch/arm/kernel/perf_event_v6.c32
-rw-r--r--arch/arm/kernel/perf_event_v7.c401
-rw-r--r--arch/arm/kernel/perf_event_xscale.c16
-rw-r--r--arch/arm/kernel/pmu.c1
-rw-r--r--arch/arm/kernel/process.c3
-rw-r--r--arch/arm/kernel/setup.c6
-rw-r--r--arch/arm/kernel/topology.c2
-rw-r--r--arch/arm/lib/bitops.h26
-rw-r--r--arch/arm/lib/changebit.S4
-rw-r--r--arch/arm/lib/clearbit.S4
-rw-r--r--arch/arm/lib/setbit.S4
-rw-r--r--arch/arm/lib/testchangebit.S4
-rw-r--r--arch/arm/lib/testclearbit.S4
-rw-r--r--arch/arm/lib/testsetbit.S4
-rw-r--r--arch/arm/mach-bcmring/core.c2
-rw-r--r--arch/arm/mach-bcmring/dma.c1
-rw-r--r--arch/arm/mach-exynos/cpuidle.c2
-rw-r--r--arch/arm/mach-highbank/highbank.c4
-rw-r--r--arch/arm/mach-imx/Kconfig13
-rw-r--r--arch/arm/mach-imx/clock-imx6q.c7
-rw-r--r--arch/arm/mach-imx/mach-imx6q.c1
-rw-r--r--arch/arm/mach-imx/mm-imx3.c109
-rw-r--r--arch/arm/mach-imx/src.c7
-rw-r--r--arch/arm/mach-mmp/gplugd.c2
-rw-r--r--arch/arm/mach-mmp/include/mach/gpio-pxa.h2
-rw-r--r--arch/arm/mach-mx5/cpu.c5
-rw-r--r--arch/arm/mach-mx5/mm.c6
-rw-r--r--arch/arm/mach-mxs/clock-mx28.c2
-rw-r--r--arch/arm/mach-omap1/Kconfig8
-rw-r--r--arch/arm/mach-omap1/board-ams-delta.c10
-rw-r--r--arch/arm/mach-omap1/clock.h3
-rw-r--r--arch/arm/mach-omap1/clock_data.c53
-rw-r--r--arch/arm/mach-omap1/devices.c3
-rw-r--r--arch/arm/mach-omap2/Kconfig1
-rw-r--r--arch/arm/mach-omap2/Makefile5
-rw-r--r--arch/arm/mach-omap2/cpuidle34xx.c1
-rw-r--r--arch/arm/mach-omap2/display.c159
-rw-r--r--arch/arm/mach-omap2/display.h29
-rw-r--r--arch/arm/mach-omap2/io.h0
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c6
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2420_data.c17
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2430_data.c17
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c5
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_3xxx_data.c37
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_44xx_data.c24
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_common_data.c4
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_common_data.h4
-rw-r--r--arch/arm/mach-omap2/omap_l3_noc.c2
-rw-r--r--arch/arm/mach-omap2/pm.c6
-rw-r--r--arch/arm/mach-omap2/smartreflex.c2
-rw-r--r--arch/arm/mach-omap2/twl-common.c11
-rw-r--r--arch/arm/mach-omap2/twl-common.h3
-rw-r--r--arch/arm/mach-pxa/balloon3.c2
-rw-r--r--arch/arm/mach-pxa/colibri-pxa320.c2
-rw-r--r--arch/arm/mach-pxa/gumstix.c2
-rw-r--r--arch/arm/mach-pxa/include/mach/palm27x.h4
-rw-r--r--arch/arm/mach-pxa/palm27x.c4
-rw-r--r--arch/arm/mach-pxa/palmtc.c2
-rw-r--r--arch/arm/mach-pxa/vpac270.c2
-rw-r--r--arch/arm/mach-s3c64xx/mach-crag6410-module.c2
-rw-r--r--arch/arm/mach-shmobile/pm-sh7372.c33
-rw-r--r--arch/arm/mach-w90x900/dev.c4
-rw-r--r--arch/arm/mach-w90x900/include/mach/mfp.h3
-rw-r--r--arch/arm/mach-w90x900/include/mach/nuc900_spi.h2
-rw-r--r--arch/arm/mach-w90x900/mfp.c48
-rw-r--r--arch/arm/mm/cache-l2x0.c2
-rw-r--r--arch/arm/mm/dma-mapping.c11
-rw-r--r--arch/arm/mm/mmap.c23
-rw-r--r--arch/arm/plat-mxc/include/mach/common.h2
-rw-r--r--arch/arm/plat-mxc/include/mach/mxc.h14
-rw-r--r--arch/arm/plat-mxc/include/mach/system.h7
-rw-r--r--arch/arm/plat-mxc/system.c3
-rw-r--r--arch/arm/plat-omap/include/plat/clock.h2
-rw-r--r--arch/arm/plat-omap/include/plat/common.h3
-rw-r--r--arch/arm/plat-s3c24xx/cpu-freq-debugfs.c2
-rw-r--r--arch/arm/plat-s5p/sysmmu.c1
-rw-r--r--arch/arm/plat-samsung/include/plat/gpio-cfg.h2
-rw-r--r--arch/arm/plat-samsung/pd.c2
-rw-r--r--arch/arm/plat-samsung/pwm.c2
-rw-r--r--arch/arm/tools/mach-types1
-rw-r--r--arch/cris/arch-v10/drivers/Kconfig2
-rw-r--r--arch/cris/arch-v32/drivers/Kconfig2
-rw-r--r--arch/microblaze/include/asm/namei.h22
-rw-r--r--arch/mips/cavium-octeon/flash_setup.c9
-rw-r--r--arch/mips/cavium-octeon/smp.c5
-rw-r--r--arch/mips/emma/common/prom.c2
-rw-r--r--arch/mips/include/asm/mach-bcm47xx/gpio.h18
-rw-r--r--arch/mips/include/asm/unistd.h18
-rw-r--r--arch/mips/kernel/cevt-r4k.c38
-rw-r--r--arch/mips/kernel/cpufreq/loongson2_clock.c1
-rw-r--r--arch/mips/kernel/scall32-o32.S2
-rw-r--r--arch/mips/kernel/scall64-64.S2
-rw-r--r--arch/mips/kernel/scall64-n32.S2
-rw-r--r--arch/mips/kernel/scall64-o32.S2
-rw-r--r--arch/mips/kernel/traps.c3
-rw-r--r--arch/mips/lantiq/clk.c2
-rw-r--r--arch/mips/lantiq/devices.c2
-rw-r--r--arch/mips/lantiq/prom.c2
-rw-r--r--arch/mips/lantiq/setup.c2
-rw-r--r--arch/mips/lantiq/xway/clk-ase.c2
-rw-r--r--arch/mips/lantiq/xway/clk-xway.c2
-rw-r--r--arch/mips/lantiq/xway/devices.c2
-rw-r--r--arch/mips/lantiq/xway/dma.c1
-rw-r--r--arch/mips/lantiq/xway/gpio.c2
-rw-r--r--arch/mips/lantiq/xway/gpio_ebu.c2
-rw-r--r--arch/mips/lantiq/xway/gpio_stp.c2
-rw-r--r--arch/mips/lantiq/xway/prom-ase.c2
-rw-r--r--arch/mips/lantiq/xway/prom-xway.c2
-rw-r--r--arch/mips/lantiq/xway/reset.c2
-rw-r--r--arch/mips/nxp/pnx8550/common/pci.c134
-rw-r--r--arch/mips/nxp/pnx8550/common/setup.c143
-rw-r--r--arch/mips/pci/pci-alchemy.c1
-rw-r--r--arch/mips/pci/pci-lantiq.c1
-rw-r--r--arch/mips/pmc-sierra/yosemite/prom.c2
-rw-r--r--arch/powerpc/Kconfig2
-rw-r--r--arch/powerpc/Makefile6
-rw-r--r--arch/powerpc/boot/dts/p1023rds.dts17
-rw-r--r--arch/powerpc/configs/ppc44x_defconfig2
-rw-r--r--arch/powerpc/include/asm/atomic.h48
-rw-r--r--arch/powerpc/include/asm/bitops.h12
-rw-r--r--arch/powerpc/include/asm/futex.h7
-rw-r--r--arch/powerpc/include/asm/kvm.h8
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h2
-rw-r--r--arch/powerpc/include/asm/reg_booke.h2
-rw-r--r--arch/powerpc/include/asm/sections.h2
-rw-r--r--arch/powerpc/include/asm/synch.h9
-rw-r--r--arch/powerpc/kernel/entry_32.S15
-rw-r--r--arch/powerpc/kernel/jump_label.c2
-rw-r--r--arch/powerpc/kernel/kvm.c1
-rw-r--r--arch/powerpc/kernel/misc_32.S2
-rw-r--r--arch/powerpc/kernel/process.c24
-rw-r--r--arch/powerpc/kernel/prom_init.c6
-rw-r--r--arch/powerpc/kernel/setup_32.c2
-rw-r--r--arch/powerpc/kernel/setup_64.c1
-rw-r--r--arch/powerpc/kernel/signal_32.c2
-rw-r--r--arch/powerpc/kernel/traps.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv.c1
-rw-r--r--arch/powerpc/kvm/book3s_pr.c14
-rw-r--r--arch/powerpc/kvm/powerpc.c1
-rw-r--r--arch/powerpc/lib/feature-fixups.c23
-rw-r--r--arch/powerpc/mm/hugetlbpage.c1
-rw-r--r--arch/powerpc/platforms/85xx/Kconfig2
-rw-r--r--arch/powerpc/platforms/85xx/p3060_qds.c2
-rw-r--r--arch/powerpc/platforms/Kconfig2
-rw-r--r--arch/powerpc/platforms/ps3/interrupt.c23
-rw-r--r--arch/powerpc/platforms/ps3/platform.h1
-rw-r--r--arch/powerpc/platforms/ps3/smp.c62
-rw-r--r--arch/powerpc/sysdev/ehv_pic.c1
-rw-r--r--arch/powerpc/sysdev/fsl_lbc.c1
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe.c2
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/crypto/crypt_s390.h7
-rw-r--r--arch/s390/include/asm/kvm_host.h3
-rw-r--r--arch/s390/include/asm/pgtable.h12
-rw-r--r--arch/s390/include/asm/setup.h3
-rw-r--r--arch/s390/include/asm/timex.h2
-rw-r--r--arch/s390/include/asm/unistd.h4
-rw-r--r--arch/s390/kernel/compat_wrapper.S20
-rw-r--r--arch/s390/kernel/early.c2
-rw-r--r--arch/s390/kernel/setup.c4
-rw-r--r--arch/s390/kernel/syscalls.S2
-rw-r--r--arch/s390/kernel/topology.c45
-rw-r--r--arch/s390/kernel/vmlinux.lds.S2
-rw-r--r--arch/s390/kvm/diag.c2
-rw-r--r--arch/s390/kvm/intercept.c3
-rw-r--r--arch/s390/kvm/interrupt.c1
-rw-r--r--arch/s390/kvm/kvm-s390.c12
-rw-r--r--arch/s390/kvm/priv.c10
-rw-r--r--arch/s390/kvm/sigp.c45
-rw-r--r--arch/s390/mm/fault.c9
-rw-r--r--arch/sparc/include/asm/pgtable_32.h20
-rw-r--r--arch/sparc/include/asm/pgtable_64.h20
-rw-r--r--arch/sparc/kernel/entry.h7
-rw-r--r--arch/sparc/kernel/module.c27
-rw-r--r--arch/sparc/kernel/setup_64.c48
-rw-r--r--arch/sparc/kernel/signal32.c18
-rw-r--r--arch/sparc/kernel/signal_32.c30
-rw-r--r--arch/sparc/kernel/signal_64.c42
-rw-r--r--arch/sparc/kernel/sigutil_64.c1
-rw-r--r--arch/sparc/mm/Makefile1
-rw-r--r--arch/sparc/mm/generic_32.c99
-rw-r--r--arch/sparc/mm/generic_64.c165
-rw-r--r--arch/unicore32/Kconfig4
-rw-r--r--arch/unicore32/Kconfig.debug14
-rw-r--r--arch/unicore32/boot/compressed/Makefile4
-rw-r--r--arch/unicore32/include/asm/bitops.h12
-rw-r--r--arch/unicore32/include/asm/processor.h1
-rw-r--r--arch/unicore32/kernel/ksyms.c4
-rw-r--r--arch/unicore32/lib/findbit.S14
-rw-r--r--arch/x86/include/asm/apic.h1
-rw-r--r--arch/x86/include/asm/mach_traps.h2
-rw-r--r--arch/x86/include/asm/mce.h5
-rw-r--r--arch/x86/include/asm/mrst.h7
-rw-r--r--arch/x86/include/asm/x86_init.h3
-rw-r--r--arch/x86/kernel/alternative.c2
-rw-r--r--arch/x86/kernel/apic/apic.c33
-rw-r--r--arch/x86/kernel/apic/io_apic.c9
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-inject.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c25
-rw-r--r--arch/x86/kernel/kvmclock.c5
-rw-r--r--arch/x86/kernel/nmi.c3
-rw-r--r--arch/x86/kernel/setup.c2
-rw-r--r--arch/x86/kernel/x86_init.c4
-rw-r--r--arch/x86/kvm/vmx.c131
-rw-r--r--arch/x86/platform/mrst/mrst.c46
-rw-r--r--arch/x86/um/asm/processor.h2
-rw-r--r--arch/x86/xen/enlighten.c3
-rw-r--r--arch/x86/xen/grant-table.c2
-rw-r--r--block/blk-core.c18
-rw-r--r--block/blk-map.c7
-rw-r--r--block/genhd.c71
-rw-r--r--drivers/acpi/apei/erst.c31
-rw-r--r--drivers/ata/ahci_platform.c2
-rw-r--r--drivers/ata/libata-sff.c4
-rw-r--r--drivers/base/core.c3
-rw-r--r--drivers/base/node.c14
-rw-r--r--drivers/base/power/clock_ops.c3
-rw-r--r--drivers/base/power/main.c3
-rw-r--r--drivers/base/power/qos.c18
-rw-r--r--drivers/block/cciss.c6
-rw-r--r--drivers/block/cciss_scsi.c1
-rw-r--r--drivers/block/loop.c47
-rw-r--r--drivers/block/paride/pg.c1
-rw-r--r--drivers/bluetooth/btusb.c3
-rw-r--r--drivers/char/random.c2
-rw-r--r--drivers/crypto/mv_cesa.c12
-rw-r--r--drivers/devfreq/Kconfig41
-rw-r--r--drivers/devfreq/devfreq.c10
-rw-r--r--drivers/edac/mpc85xx_edac.c2
-rw-r--r--drivers/firmware/dmi_scan.c6
-rw-r--r--drivers/firmware/efivars.c12
-rw-r--r--drivers/gpio/gpio-pca953x.c4
-rw-r--r--drivers/gpu/drm/drm_crtc.c4
-rw-r--r--drivers/gpu/drm/drm_irq.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.c62
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.h21
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_connector.c78
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c76
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.h25
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h13
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.c83
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.h1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c66
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c44
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c71
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c89
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h28
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c57
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c6
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h19
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c12
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c1
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h21
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c33
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c411
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c3
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c92
-rw-r--r--drivers/gpu/drm/radeon/r300.c94
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c26
-rw-r--r--drivers/gpu/drm/radeon/radeon.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c210
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c8
-rw-r--r--drivers/gpu/vga/vgaarb.c62
-rw-r--r--drivers/hwmon/Kconfig2
-rw-r--r--drivers/hwmon/ad7314.c1
-rw-r--r--drivers/hwmon/ads7871.c1
-rw-r--r--drivers/hwmon/exynos4_tmu.c12
-rw-r--r--drivers/hwmon/gpio-fan.c13
-rw-r--r--drivers/hwmon/jz4740-hwmon.c12
-rw-r--r--drivers/hwmon/ntc_thermistor.c14
-rw-r--r--drivers/hwmon/s3c-hwmon.c13
-rw-r--r--drivers/hwmon/sch5627.c13
-rw-r--r--drivers/hwmon/sch5636.c13
-rw-r--r--drivers/hwmon/twl4030-madc-hwmon.c14
-rw-r--r--drivers/hwmon/ultra45_env.c13
-rw-r--r--drivers/hwmon/wm831x-hwmon.c12
-rw-r--r--drivers/hwmon/wm8350-hwmon.c12
-rw-r--r--drivers/i2c/algos/i2c-algo-bit.c4
-rw-r--r--drivers/i2c/busses/i2c-nuc900.c2
-rw-r--r--drivers/i2c/i2c-core.c4
-rw-r--r--drivers/i2c/i2c-dev.c2
-rw-r--r--drivers/ide/cy82c693.c6
-rw-r--r--drivers/ide/icside.c2
-rw-r--r--drivers/ide/piix.c18
-rw-r--r--drivers/ide/triflex.c16
-rw-r--r--drivers/infiniband/core/addr.c9
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c4
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c10
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c18
-rw-r--r--drivers/infiniband/hw/qib/qib_qsfp.c12
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c13
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c20
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c13
-rw-r--r--drivers/input/mouse/elantech.c26
-rw-r--r--drivers/input/serio/ams_delta_serio.c1
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h14
-rw-r--r--drivers/leds/led-class.c5
-rw-r--r--drivers/misc/Kconfig15
-rw-r--r--drivers/misc/ad525x_dpot.h2
-rw-r--r--drivers/misc/carma/carma-fpga-program.c9
-rw-r--r--drivers/misc/carma/carma-fpga.c9
-rw-r--r--drivers/misc/eeprom/Kconfig2
-rw-r--r--drivers/misc/pch_phub.c81
-rw-r--r--drivers/misc/spear13xx_pcie_gadget.c2
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/bonding/bond_sysfs.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c50
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c15
-rw-r--r--drivers/net/ethernet/cadence/Kconfig1
-rw-r--r--drivers/net/ethernet/lantiq_etop.c2
-rw-r--r--drivers/net/ethernet/marvell/sky2.c106
-rw-r--r--drivers/net/ethernet/marvell/sky2.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h1
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c36
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c2
-rw-r--r--drivers/net/ethernet/rdc/r6040.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c2
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c117
-rw-r--r--drivers/net/ethernet/sun/sunhme.c2
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c7
-rw-r--r--drivers/net/hippi/Kconfig2
-rw-r--r--drivers/net/usb/asix.c68
-rw-r--r--drivers/net/usb/cdc_ether.c2
-rw-r--r--drivers/net/usb/lg-vl600.c25
-rw-r--r--drivers/net/usb/smsc75xx.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c3
-rw-r--r--drivers/net/wireless/ath/regd.c2
-rw-r--r--drivers/net/wireless/b43/xmit.c15
-rw-r--r--drivers/net/wireless/b43/xmit.h16
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/dma.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie.c33
-rw-r--r--drivers/net/wireless/libertas/cfg.c2
-rw-r--r--drivers/net/wireless/libertas/if_spi.c1
-rw-r--r--drivers/net/wireless/mwifiex/scan.c9
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h1
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c22
-rw-r--r--drivers/net/wireless/wl12xx/scan.c2
-rw-r--r--drivers/of/irq.c16
-rw-r--r--drivers/pci/Kconfig1
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c29
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c3
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c27
-rw-r--r--drivers/pci/hotplug/shpchp_core.c4
-rw-r--r--drivers/pci/hotplug/shpchp_hpc.c4
-rw-r--r--drivers/platform/x86/dell-laptop.c4
-rw-r--r--drivers/regulator/aat2870-regulator.c2
-rw-r--r--drivers/regulator/core.c2
-rw-r--r--drivers/regulator/tps65910-regulator.c14
-rw-r--r--drivers/regulator/twl-regulator.c46
-rw-r--r--drivers/rtc/rtc-puv3.c4
-rw-r--r--drivers/s390/char/zcore.c2
-rw-r--r--drivers/s390/crypto/ap_bus.c23
-rw-r--r--drivers/s390/net/Kconfig2
-rw-r--r--drivers/s390/net/lcs.c6
-rw-r--r--drivers/s390/net/netiucv.c2
-rw-r--r--drivers/s390/net/qeth_core.h3
-rw-r--r--drivers/s390/net/qeth_core_main.c1
-rw-r--r--drivers/s390/net/qeth_l3_main.c7
-rw-r--r--drivers/s390/net/qeth_l3_sys.c4
-rw-r--r--drivers/scsi/aacraid/linit.c4
-rw-r--r--drivers/scsi/hpsa.c5
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c5
-rw-r--r--drivers/scsi/scsi_lib.c3
-rw-r--r--drivers/scsi/scsi_scan.c6
-rw-r--r--drivers/spi/spi-nuc900.c2
-rw-r--r--drivers/spi/spi-pl022.c8
-rw-r--r--drivers/staging/et131x/Kconfig3
-rw-r--r--drivers/staging/et131x/et131x.c12
-rw-r--r--drivers/staging/iio/industrialio-core.c25
-rw-r--r--drivers/staging/media/as102/as102_drv.c4
-rw-r--r--drivers/staging/media/as102/as102_drv.h3
-rw-r--r--drivers/staging/octeon/ethernet-tx.c2
-rw-r--r--drivers/staging/slicoss/Kconfig2
-rw-r--r--drivers/tty/hvc/hvc_dcc.c2
-rw-r--r--drivers/tty/serial/Kconfig14
-rw-r--r--drivers/tty/serial/atmel_serial.c16
-rw-r--r--drivers/tty/serial/crisv10.c10
-rw-r--r--drivers/tty/serial/mfd.c4
-rw-r--r--drivers/tty/serial/pch_uart.c19
-rw-r--r--drivers/tty/tty_ldisc.c30
-rw-r--r--drivers/usb/class/cdc-acm.c8
-rw-r--r--drivers/usb/core/hub.c6
-rw-r--r--drivers/usb/core/quirks.c27
-rw-r--r--drivers/usb/dwc3/gadget.c1
-rw-r--r--drivers/usb/gadget/Kconfig9
-rw-r--r--drivers/usb/gadget/ci13xxx_msm.c2
-rw-r--r--drivers/usb/gadget/ci13xxx_udc.c21
-rw-r--r--drivers/usb/gadget/f_mass_storage.c6
-rw-r--r--drivers/usb/gadget/f_midi.c138
-rw-r--r--drivers/usb/gadget/f_phonet.c2
-rw-r--r--drivers/usb/gadget/file_storage.c4
-rw-r--r--drivers/usb/gadget/fsl_udc_core.c5
-rw-r--r--drivers/usb/gadget/inode.c5
-rw-r--r--drivers/usb/gadget/pch_udc.c10
-rw-r--r--drivers/usb/gadget/r8a66597-udc.c30
-rw-r--r--drivers/usb/gadget/udc-core.c10
-rw-r--r--drivers/usb/host/ehci-sched.c15
-rw-r--r--drivers/usb/host/ehci-xls.c2
-rw-r--r--drivers/usb/host/ohci-at91.c6
-rw-r--r--drivers/usb/host/ohci-hcd.c15
-rw-r--r--drivers/usb/host/ohci-pci.c26
-rw-r--r--drivers/usb/host/ohci.h1
-rw-r--r--drivers/usb/host/pci-quirks.c57
-rw-r--r--drivers/usb/host/xhci-mem.c5
-rw-r--r--drivers/usb/host/xhci-ring.c13
-rw-r--r--drivers/usb/host/xhci.c34
-rw-r--r--drivers/usb/musb/Kconfig3
-rw-r--r--drivers/usb/musb/am35x.c1
-rw-r--r--drivers/usb/musb/da8xx.c1
-rw-r--r--drivers/usb/musb/musb_core.c3
-rw-r--r--drivers/usb/musb/musb_gadget.c4
-rw-r--r--drivers/usb/renesas_usbhs/common.c2
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c4
-rw-r--r--drivers/usb/renesas_usbhs/mod.h8
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c4
-rw-r--r--drivers/usb/renesas_usbhs/mod_host.c63
-rw-r--r--drivers/usb/serial/ark3116.c10
-rw-r--r--drivers/usb/serial/ftdi_sio.c14
-rw-r--r--drivers/usb/serial/option.c28
-rw-r--r--drivers/usb/serial/pl2303.c1
-rw-r--r--drivers/usb/serial/pl2303.h4
-rw-r--r--drivers/usb/storage/ene_ub6250.c3
-rw-r--r--drivers/usb/storage/protocol.c7
-rw-r--r--drivers/video/da8xx-fb.c15
-rw-r--r--drivers/video/omap/dispc.c1
-rw-r--r--drivers/video/omap2/dss/dispc.c11
-rw-r--r--drivers/video/omap2/dss/hdmi.c2
-rw-r--r--drivers/video/via/share.h4
-rw-r--r--drivers/virtio/Kconfig2
-rw-r--r--drivers/virtio/virtio_mmio.c2
-rw-r--r--drivers/virtio/virtio_pci.c18
-rw-r--r--drivers/watchdog/Kconfig7
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/adx_wdt.c355
-rw-r--r--drivers/watchdog/s3c2410_wdt.c4
-rw-r--r--drivers/watchdog/wm831x_wdt.c2
-rw-r--r--drivers/xen/balloon.c4
-rw-r--r--drivers/xen/gntalloc.c4
-rw-r--r--drivers/xen/gntdev.c10
-rw-r--r--drivers/xen/xenbus/xenbus_client.c11
-rw-r--r--fs/bio.c7
-rw-r--r--fs/btrfs/backref.c2
-rw-r--r--fs/btrfs/ctree.c17
-rw-r--r--fs/btrfs/ctree.h8
-rw-r--r--fs/btrfs/disk-io.c147
-rw-r--r--fs/btrfs/extent-tree.c153
-rw-r--r--fs/btrfs/extent_io.c36
-rw-r--r--fs/btrfs/extent_io.h2
-rw-r--r--fs/btrfs/free-space-cache.c65
-rw-r--r--fs/btrfs/inode.c8
-rw-r--r--fs/btrfs/ioctl.c17
-rw-r--r--fs/btrfs/scrub.c7
-rw-r--r--fs/btrfs/super.c44
-rw-r--r--fs/btrfs/transaction.c8
-rw-r--r--fs/btrfs/volumes.h6
-rw-r--r--fs/ceph/dir.c2
-rw-r--r--fs/ceph/inode.c9
-rw-r--r--fs/ceph/super.c6
-rw-r--r--fs/dcache.c11
-rw-r--r--fs/ecryptfs/crypto.c26
-rw-r--r--fs/ecryptfs/ecryptfs_kernel.h5
-rw-r--r--fs/ecryptfs/file.c23
-rw-r--r--fs/ecryptfs/inode.c52
-rw-r--r--fs/ext4/balloc.c2
-rw-r--r--fs/ext4/inode.c3
-rw-r--r--fs/ext4/super.c6
-rw-r--r--fs/minix/bitmap.c55
-rw-r--r--fs/minix/inode.c25
-rw-r--r--fs/minix/minix.h11
-rw-r--r--fs/namespace.c32
-rw-r--r--fs/nfs/dir.c2
-rw-r--r--fs/nfs/file.c91
-rw-r--r--fs/nfs/inode.c2
-rw-r--r--fs/nfs/internal.h2
-rw-r--r--fs/nfs/nfs3proc.c1
-rw-r--r--fs/nfs/nfs4proc.c4
-rw-r--r--fs/nfs/pnfs.c26
-rw-r--r--fs/nfs/proc.c1
-rw-r--r--fs/nfs/read.c14
-rw-r--r--fs/nfs/super.c37
-rw-r--r--fs/ocfs2/alloc.c2
-rw-r--r--fs/ocfs2/aops.c69
-rw-r--r--fs/ocfs2/aops.h14
-rw-r--r--fs/ocfs2/cluster/heartbeat.c194
-rw-r--r--fs/ocfs2/cluster/netdebug.c102
-rw-r--r--fs/ocfs2/cluster/tcp.c138
-rw-r--r--fs/ocfs2/cluster/tcp.h2
-rw-r--r--fs/ocfs2/dir.c3
-rw-r--r--fs/ocfs2/dlm/dlmcommon.h56
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c44
-rw-r--r--fs/ocfs2/dlm/dlmlock.c54
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c175
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c164
-rw-r--r--fs/ocfs2/dlm/dlmthread.c16
-rw-r--r--fs/ocfs2/dlmglue.c21
-rw-r--r--fs/ocfs2/extent_map.c96
-rw-r--r--fs/ocfs2/extent_map.h2
-rw-r--r--fs/ocfs2/file.c96
-rw-r--r--fs/ocfs2/inode.c2
-rw-r--r--fs/ocfs2/inode.h3
-rw-r--r--fs/ocfs2/ioctl.c11
-rw-r--r--fs/ocfs2/journal.c23
-rw-r--r--fs/ocfs2/journal.h5
-rw-r--r--fs/ocfs2/mmap.c53
-rw-r--r--fs/ocfs2/move_extents.c2
-rw-r--r--fs/ocfs2/ocfs2.h51
-rw-r--r--fs/ocfs2/quota_local.c23
-rw-r--r--fs/ocfs2/slot_map.c4
-rw-r--r--fs/ocfs2/stack_o2cb.c71
-rw-r--r--fs/ocfs2/super.c25
-rw-r--r--fs/ocfs2/xattr.c10
-rw-r--r--fs/pstore/platform.c13
-rw-r--r--fs/xfs/xfs_qm.c3
-rw-r--r--include/drm/drm_mode.h2
-rw-r--r--include/drm/drm_pciids.h2
-rw-r--r--include/drm/exynos_drm.h9
-rw-r--r--include/drm/radeon_drm.h4
-rw-r--r--include/linux/bio.h10
-rw-r--r--include/linux/ceph/osd_client.h8
-rw-r--r--include/linux/clocksource.h3
-rw-r--r--include/linux/device.h7
-rw-r--r--include/linux/fs.h1
-rw-r--r--include/linux/genhd.h4
-rw-r--r--include/linux/hugetlb.h5
-rw-r--r--include/linux/i2c.h3
-rw-r--r--include/linux/inet_diag.h3
-rw-r--r--include/linux/init_task.h1
-rw-r--r--include/linux/kvm.h1
-rw-r--r--include/linux/mfd/tps65910.h3
-rw-r--r--include/linux/nfs_fs.h3
-rw-r--r--include/linux/nfs_xdr.h1
-rw-r--r--include/linux/pci-ats.h6
-rw-r--r--include/linux/pci.h2
-rw-r--r--include/linux/pm.h231
-rw-r--r--include/linux/pm_runtime.h6
-rw-r--r--include/linux/pstore.h4
-rw-r--r--include/linux/sched.h1
-rw-r--r--include/linux/serial.h14
-rw-r--r--include/linux/virtio_config.h2
-rw-r--r--include/linux/virtio_mmio.h2
-rw-r--r--include/linux/vmalloc.h2
-rw-r--r--include/net/bluetooth/l2cap.h7
-rw-r--r--include/net/cfg80211.h4
-rw-r--r--include/video/omapdss.h7
-rw-r--r--include/xen/platform_pci.h6
-rw-r--r--kernel/cgroup_freezer.c11
-rw-r--r--kernel/fork.c5
-rw-r--r--kernel/hrtimer.c6
-rw-r--r--kernel/irq/manage.c2
-rw-r--r--kernel/irq/spurious.c6
-rw-r--r--kernel/power/hibernate.c37
-rw-r--r--kernel/power/main.c3
-rw-r--r--kernel/time/clocksource.c58
-rw-r--r--kernel/time/timekeeping.c92
-rw-r--r--mm/backing-dev.c8
-rw-r--r--mm/hugetlb.c2
-rw-r--r--mm/nommu.c2
-rw-r--r--mm/oom_kill.c5
-rw-r--r--mm/page-writeback.c23
-rw-r--r--mm/percpu-vm.c17
-rw-r--r--mm/percpu.c62
-rw-r--r--mm/slub.c42
-rw-r--r--mm/vmalloc.c27
-rw-r--r--net/bluetooth/hci_conn.c2
-rw-r--r--net/bluetooth/l2cap_core.c16
-rw-r--r--net/bridge/br_multicast.c6
-rw-r--r--net/ceph/osd_client.c2
-rw-r--r--net/ipv4/ah4.c8
-rw-r--r--net/ipv4/inet_diag.c4
-rw-r--r--net/ipv4/ip_options.c1
-rw-r--r--net/ipv4/ping.c2
-rw-r--r--net/ipv4/route.c120
-rw-r--r--net/ipv4/tcp_ipv4.c1
-rw-r--r--net/ipv4/tcp_output.c2
-rw-r--r--net/ipv6/ah6.c8
-rw-r--r--net/ipv6/ip6_input.c8
-rw-r--r--net/ipv6/ip6_tunnel.c8
-rw-r--r--net/l2tp/l2tp_core.c8
-rw-r--r--net/mac80211/mlme.c21
-rw-r--r--net/mac80211/rx.c9
-rw-r--r--net/mac80211/sta_info.c8
-rw-r--r--net/mac80211/util.c4
-rw-r--r--net/rds/Kconfig1
-rw-r--r--net/sunrpc/xprtsock.c7
-rw-r--r--net/wireless/nl80211.c9
-rw-r--r--net/wireless/reg.c3
-rw-r--r--net/wireless/scan.c13
-rw-r--r--security/keys/encrypted-keys/Makefile8
-rw-r--r--security/keys/encrypted-keys/encrypted.c2
-rw-r--r--security/keys/encrypted-keys/encrypted.h3
-rw-r--r--security/keys/user_defined.c3
-rw-r--r--security/smack/smackfs.c115
-rw-r--r--sound/pci/cs5535audio/cs5535audio_pcm.c2
-rw-r--r--sound/pci/hda/hda_codec.c6
-rw-r--r--sound/pci/hda/hda_eld.c41
-rw-r--r--sound/pci/hda/hda_local.h3
-rw-r--r--sound/pci/hda/patch_cirrus.c55
-rw-r--r--sound/pci/hda/patch_hdmi.c59
-rw-r--r--sound/pci/hda/patch_realtek.c36
-rw-r--r--sound/pci/hda/patch_sigmatel.c35
-rw-r--r--sound/pci/hda/patch_via.c76
-rw-r--r--sound/pci/lx6464es/lx_core.c23
-rw-r--r--sound/pci/lx6464es/lx_core.h3
-rw-r--r--sound/pci/rme9652/hdspm.c2
-rw-r--r--sound/soc/codecs/adau1373.c2
-rw-r--r--sound/soc/codecs/cs4271.c8
-rw-r--r--sound/soc/codecs/rt5631.c2
-rw-r--r--sound/soc/codecs/sgtl5000.c2
-rw-r--r--sound/soc/codecs/sta32x.c63
-rw-r--r--sound/soc/codecs/sta32x.h1
-rw-r--r--sound/soc/codecs/wm8731.c1
-rw-r--r--sound/soc/codecs/wm8753.c3
-rw-r--r--sound/soc/codecs/wm8962.c4
-rw-r--r--sound/soc/codecs/wm8993.c2
-rw-r--r--sound/soc/codecs/wm9081.c10
-rw-r--r--sound/soc/codecs/wm9090.c6
-rw-r--r--sound/soc/codecs/wm_hubs.c2
-rw-r--r--sound/soc/fsl/fsl_ssi.c1
-rw-r--r--sound/soc/nuc900/nuc900-ac97.c3
-rwxr-xr-xtools/testing/ktest/ktest.pl16
679 files changed, 7356 insertions, 5343 deletions
diff --git a/Documentation/ABI/testing/sysfs-block b/Documentation/ABI/testing/sysfs-block
index 2b5d56127fc..c1eb41cb987 100644
--- a/Documentation/ABI/testing/sysfs-block
+++ b/Documentation/ABI/testing/sysfs-block
@@ -206,16 +206,3 @@ Description:
206 when a discarded area is read the discard_zeroes_data 206 when a discarded area is read the discard_zeroes_data
207 parameter will be set to one. Otherwise it will be 0 and 207 parameter will be set to one. Otherwise it will be 0 and
208 the result of reading a discarded area is undefined. 208 the result of reading a discarded area is undefined.
209What: /sys/block/<disk>/alias
210Date: Aug 2011
211Contact: Nao Nishijima <nao.nishijima.xt@hitachi.com>
212Description:
213 A raw device name of a disk does not always point a same disk
214 each boot-up time. Therefore, users have to use persistent
215 device names, which udev creates when the kernel finds a disk,
216 instead of raw device name. However, kernel doesn't show those
217 persistent names on its messages (e.g. dmesg).
218 This file can store an alias of the disk and it would be
219 appeared in kernel messages if it is set. A disk can have an
220 alias which length is up to 255bytes. Users can use alphabets,
221 numbers, "-" and "_" in alias name. This file is writeonce.
diff --git a/Documentation/DocBook/uio-howto.tmpl b/Documentation/DocBook/uio-howto.tmpl
index 54883de5d5f..ac3d0018140 100644
--- a/Documentation/DocBook/uio-howto.tmpl
+++ b/Documentation/DocBook/uio-howto.tmpl
@@ -521,6 +521,11 @@ Here's a description of the fields of <varname>struct uio_mem</varname>:
521 521
522<itemizedlist> 522<itemizedlist>
523<listitem><para> 523<listitem><para>
524<varname>const char *name</varname>: Optional. Set this to help identify
525the memory region, it will show up in the corresponding sysfs node.
526</para></listitem>
527
528<listitem><para>
524<varname>int memtype</varname>: Required if the mapping is used. Set this to 529<varname>int memtype</varname>: Required if the mapping is used. Set this to
525<varname>UIO_MEM_PHYS</varname> if you you have physical memory on your 530<varname>UIO_MEM_PHYS</varname> if you you have physical memory on your
526card to be mapped. Use <varname>UIO_MEM_LOGICAL</varname> for logical 531card to be mapped. Use <varname>UIO_MEM_LOGICAL</varname> for logical
@@ -553,7 +558,7 @@ instead to remember such an address.
553</itemizedlist> 558</itemizedlist>
554 559
555<para> 560<para>
556Please do not touch the <varname>kobj</varname> element of 561Please do not touch the <varname>map</varname> element of
557<varname>struct uio_mem</varname>! It is used by the UIO framework 562<varname>struct uio_mem</varname>! It is used by the UIO framework
558to set up sysfs files for this mapping. Simply leave it alone. 563to set up sysfs files for this mapping. Simply leave it alone.
559</para> 564</para>
diff --git a/Documentation/blockdev/cciss.txt b/Documentation/blockdev/cciss.txt
index 71464e09ec1..b79d0a13e7c 100644
--- a/Documentation/blockdev/cciss.txt
+++ b/Documentation/blockdev/cciss.txt
@@ -98,14 +98,12 @@ You must enable "SCSI tape drive support for Smart Array 5xxx" and
98"SCSI support" in your kernel configuration to be able to use SCSI 98"SCSI support" in your kernel configuration to be able to use SCSI
99tape drives with your Smart Array 5xxx controller. 99tape drives with your Smart Array 5xxx controller.
100 100
101Additionally, note that the driver will not engage the SCSI core at init 101Additionally, note that the driver will engage the SCSI core at init
102time. The driver must be directed to dynamically engage the SCSI core via 102time if any tape drives or medium changers are detected. The driver may
103the /proc filesystem entry which the "block" side of the driver creates as 103also be directed to dynamically engage the SCSI core via the /proc filesystem
104/proc/driver/cciss/cciss* at runtime. This is because at driver init time, 104entry which the "block" side of the driver creates as
105the SCSI core may not yet be initialized (because the driver is a block 105/proc/driver/cciss/cciss* at runtime. This is best done via a script.
106driver) and attempting to register it with the SCSI core in such a case 106
107would cause a hang. This is best done via an initialization script
108(typically in /etc/init.d, but could vary depending on distribution).
109For example: 107For example:
110 108
111 for x in /proc/driver/cciss/cciss[0-9]* 109 for x in /proc/driver/cciss/cciss[0-9]*
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index e8552782b44..874921e9780 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -33,6 +33,7 @@ qcom Qualcomm, Inc.
33ramtron Ramtron International 33ramtron Ramtron International
34samsung Samsung Semiconductor 34samsung Samsung Semiconductor
35schindler Schindler 35schindler Schindler
36sil Silicon Image
36simtek 37simtek
37sirf SiRF Technology, Inc. 38sirf SiRF Technology, Inc.
38stericsson ST-Ericsson 39stericsson ST-Ericsson
diff --git a/Documentation/filesystems/btrfs.txt b/Documentation/filesystems/btrfs.txt
index 64087c34327..7671352216f 100644
--- a/Documentation/filesystems/btrfs.txt
+++ b/Documentation/filesystems/btrfs.txt
@@ -63,8 +63,8 @@ IRC network.
63Userspace tools for creating and manipulating Btrfs file systems are 63Userspace tools for creating and manipulating Btrfs file systems are
64available from the git repository at the following location: 64available from the git repository at the following location:
65 65
66 http://git.kernel.org/?p=linux/kernel/git/mason/btrfs-progs-unstable.git 66 http://git.kernel.org/?p=linux/kernel/git/mason/btrfs-progs.git
67 git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-progs-unstable.git 67 git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-progs.git
68 68
69These include the following tools: 69These include the following tools:
70 70
diff --git a/Documentation/i2c/ten-bit-addresses b/Documentation/i2c/ten-bit-addresses
index e9890709c50..cdfe13901b9 100644
--- a/Documentation/i2c/ten-bit-addresses
+++ b/Documentation/i2c/ten-bit-addresses
@@ -1,22 +1,24 @@
1The I2C protocol knows about two kinds of device addresses: normal 7 bit 1The I2C protocol knows about two kinds of device addresses: normal 7 bit
2addresses, and an extended set of 10 bit addresses. The sets of addresses 2addresses, and an extended set of 10 bit addresses. The sets of addresses
3do not intersect: the 7 bit address 0x10 is not the same as the 10 bit 3do not intersect: the 7 bit address 0x10 is not the same as the 10 bit
4address 0x10 (though a single device could respond to both of them). You 4address 0x10 (though a single device could respond to both of them).
5select a 10 bit address by adding an extra byte after the address
6byte:
7 S Addr7 Rd/Wr ....
8becomes
9 S 11110 Addr10 Rd/Wr
10S is the start bit, Rd/Wr the read/write bit, and if you count the number
11of bits, you will see the there are 8 after the S bit for 7 bit addresses,
12and 16 after the S bit for 10 bit addresses.
13 5
14WARNING! The current 10 bit address support is EXPERIMENTAL. There are 6I2C messages to and from 10-bit address devices have a different format.
15several places in the code that will cause SEVERE PROBLEMS with 10 bit 7See the I2C specification for the details.
16addresses, even though there is some basic handling and hooks. Also,
17almost no supported adapter handles the 10 bit addresses correctly.
18 8
19As soon as a real 10 bit address device is spotted 'in the wild', we 9The current 10 bit address support is minimal. It should work, however
20can and will add proper support. Right now, 10 bit address devices 10you can expect some problems along the way:
21are defined by the I2C protocol, but we have never seen a single device 11* Not all bus drivers support 10-bit addresses. Some don't because the
22which supports them. 12 hardware doesn't support them (SMBus doesn't require 10-bit address
13 support for example), some don't because nobody bothered adding the
14 code (or it's there but not working properly.) Software implementation
15 (i2c-algo-bit) is known to work.
16* Some optional features do not support 10-bit addresses. This is the
17 case of automatic detection and instantiation of devices by their,
18 drivers, for example.
19* Many user-space packages (for example i2c-tools) lack support for
20 10-bit addresses.
21
22Note that 10-bit address devices are still pretty rare, so the limitations
23listed above could stay for a long time, maybe even forever if nobody
24needs them to be fixed.
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index cb7f3148035..f049a1ca186 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -20,7 +20,7 @@ ip_no_pmtu_disc - BOOLEAN
20 default FALSE 20 default FALSE
21 21
22min_pmtu - INTEGER 22min_pmtu - INTEGER
23 default 562 - minimum discovered Path MTU 23 default 552 - minimum discovered Path MTU
24 24
25route/max_size - INTEGER 25route/max_size - INTEGER
26 Maximum number of routes allowed in the kernel. Increase 26 Maximum number of routes allowed in the kernel. Increase
diff --git a/Documentation/power/devices.txt b/Documentation/power/devices.txt
index 646a89e0c07..3139fb505dc 100644
--- a/Documentation/power/devices.txt
+++ b/Documentation/power/devices.txt
@@ -123,9 +123,10 @@ please refer directly to the source code for more information about it.
123Subsystem-Level Methods 123Subsystem-Level Methods
124----------------------- 124-----------------------
125The core methods to suspend and resume devices reside in struct dev_pm_ops 125The core methods to suspend and resume devices reside in struct dev_pm_ops
126pointed to by the pm member of struct bus_type, struct device_type and 126pointed to by the ops member of struct dev_pm_domain, or by the pm member of
127struct class. They are mostly of interest to the people writing infrastructure 127struct bus_type, struct device_type and struct class. They are mostly of
128for buses, like PCI or USB, or device type and device class drivers. 128interest to the people writing infrastructure for platforms and buses, like PCI
129or USB, or device type and device class drivers.
129 130
130Bus drivers implement these methods as appropriate for the hardware and the 131Bus drivers implement these methods as appropriate for the hardware and the
131drivers using it; PCI works differently from USB, and so on. Not many people 132drivers using it; PCI works differently from USB, and so on. Not many people
@@ -139,41 +140,57 @@ sequencing in the driver model tree.
139 140
140/sys/devices/.../power/wakeup files 141/sys/devices/.../power/wakeup files
141----------------------------------- 142-----------------------------------
142All devices in the driver model have two flags to control handling of wakeup 143All device objects in the driver model contain fields that control the handling
143events (hardware signals that can force the device and/or system out of a low 144of system wakeup events (hardware signals that can force the system out of a
144power state). These flags are initialized by bus or device driver code using 145sleep state). These fields are initialized by bus or device driver code using
145device_set_wakeup_capable() and device_set_wakeup_enable(), defined in 146device_set_wakeup_capable() and device_set_wakeup_enable(), defined in
146include/linux/pm_wakeup.h. 147include/linux/pm_wakeup.h.
147 148
148The "can_wakeup" flag just records whether the device (and its driver) can 149The "power.can_wakeup" flag just records whether the device (and its driver) can
149physically support wakeup events. The device_set_wakeup_capable() routine 150physically support wakeup events. The device_set_wakeup_capable() routine
150affects this flag. The "should_wakeup" flag controls whether the device should 151affects this flag. The "power.wakeup" field is a pointer to an object of type
151try to use its wakeup mechanism. device_set_wakeup_enable() affects this flag; 152struct wakeup_source used for controlling whether or not the device should use
152for the most part drivers should not change its value. The initial value of 153its system wakeup mechanism and for notifying the PM core of system wakeup
153should_wakeup is supposed to be false for the majority of devices; the major 154events signaled by the device. This object is only present for wakeup-capable
154exceptions are power buttons, keyboards, and Ethernet adapters whose WoL 155devices (i.e. devices whose "can_wakeup" flags are set) and is created (or
155(wake-on-LAN) feature has been set up with ethtool. It should also default 156removed) by device_set_wakeup_capable().
156to true for devices that don't generate wakeup requests on their own but merely
157forward wakeup requests from one bus to another (like PCI bridges).
158 157
159Whether or not a device is capable of issuing wakeup events is a hardware 158Whether or not a device is capable of issuing wakeup events is a hardware
160matter, and the kernel is responsible for keeping track of it. By contrast, 159matter, and the kernel is responsible for keeping track of it. By contrast,
161whether or not a wakeup-capable device should issue wakeup events is a policy 160whether or not a wakeup-capable device should issue wakeup events is a policy
162decision, and it is managed by user space through a sysfs attribute: the 161decision, and it is managed by user space through a sysfs attribute: the
163power/wakeup file. User space can write the strings "enabled" or "disabled" to 162"power/wakeup" file. User space can write the strings "enabled" or "disabled"
164set or clear the "should_wakeup" flag, respectively. This file is only present 163to it to indicate whether or not, respectively, the device is supposed to signal
165for wakeup-capable devices (i.e. devices whose "can_wakeup" flags are set) 164system wakeup. This file is only present if the "power.wakeup" object exists
166and is created (or removed) by device_set_wakeup_capable(). Reads from the 165for the given device and is created (or removed) along with that object, by
167file will return the corresponding string. 166device_set_wakeup_capable(). Reads from the file will return the corresponding
168 167string.
169The device_may_wakeup() routine returns true only if both flags are set. 168
169The "power/wakeup" file is supposed to contain the "disabled" string initially
170for the majority of devices; the major exceptions are power buttons, keyboards,
171and Ethernet adapters whose WoL (wake-on-LAN) feature has been set up with
172ethtool. It should also default to "enabled" for devices that don't generate
173wakeup requests on their own but merely forward wakeup requests from one bus to
174another (like PCI Express ports).
175
176The device_may_wakeup() routine returns true only if the "power.wakeup" object
177exists and the corresponding "power/wakeup" file contains the string "enabled".
170This information is used by subsystems, like the PCI bus type code, to see 178This information is used by subsystems, like the PCI bus type code, to see
171whether or not to enable the devices' wakeup mechanisms. If device wakeup 179whether or not to enable the devices' wakeup mechanisms. If device wakeup
172mechanisms are enabled or disabled directly by drivers, they also should use 180mechanisms are enabled or disabled directly by drivers, they also should use
173device_may_wakeup() to decide what to do during a system sleep transition. 181device_may_wakeup() to decide what to do during a system sleep transition.
174However for runtime power management, wakeup events should be enabled whenever 182Device drivers, however, are not supposed to call device_set_wakeup_enable()
175the device and driver both support them, regardless of the should_wakeup flag. 183directly in any case.
176 184
185It ought to be noted that system wakeup is conceptually different from "remote
186wakeup" used by runtime power management, although it may be supported by the
187same physical mechanism. Remote wakeup is a feature allowing devices in
188low-power states to trigger specific interrupts to signal conditions in which
189they should be put into the full-power state. Those interrupts may or may not
190be used to signal system wakeup events, depending on the hardware design. On
191some systems it is impossible to trigger them from system sleep states. In any
192case, remote wakeup should always be enabled for runtime power management for
193all devices and drivers that support it.
177 194
178/sys/devices/.../power/control files 195/sys/devices/.../power/control files
179------------------------------------ 196------------------------------------
@@ -249,20 +266,31 @@ for every device before the next phase begins. Not all busses or classes
249support all these callbacks and not all drivers use all the callbacks. The 266support all these callbacks and not all drivers use all the callbacks. The
250various phases always run after tasks have been frozen and before they are 267various phases always run after tasks have been frozen and before they are
251unfrozen. Furthermore, the *_noirq phases run at a time when IRQ handlers have 268unfrozen. Furthermore, the *_noirq phases run at a time when IRQ handlers have
252been disabled (except for those marked with the IRQ_WAKEUP flag). 269been disabled (except for those marked with the IRQF_NO_SUSPEND flag).
253 270
254All phases use bus, type, or class callbacks (that is, methods defined in 271All phases use PM domain, bus, type, or class callbacks (that is, methods
255dev->bus->pm, dev->type->pm, or dev->class->pm). These callbacks are mutually 272defined in dev->pm_domain->ops, dev->bus->pm, dev->type->pm, or dev->class->pm).
256exclusive, so if the device type provides a struct dev_pm_ops object pointed to 273These callbacks are regarded by the PM core as mutually exclusive. Moreover,
257by its pm field (i.e. both dev->type and dev->type->pm are defined), the 274PM domain callbacks always take precedence over bus, type and class callbacks,
258callbacks included in that object (i.e. dev->type->pm) will be used. Otherwise, 275while type callbacks take precedence over bus and class callbacks, and class
259if the class provides a struct dev_pm_ops object pointed to by its pm field 276callbacks take precedence over bus callbacks. To be precise, the following
260(i.e. both dev->class and dev->class->pm are defined), the PM core will use the 277rules are used to determine which callback to execute in the given phase:
261callbacks from that object (i.e. dev->class->pm). Finally, if the pm fields of 278
262both the device type and class objects are NULL (or those objects do not exist), 279 1. If dev->pm_domain is present, the PM core will attempt to execute the
263the callbacks provided by the bus (that is, the callbacks from dev->bus->pm) 280 callback included in dev->pm_domain->ops. If that callback is not
264will be used (this allows device types to override callbacks provided by bus 281 present, no action will be carried out for the given device.
265types or classes if necessary). 282
283 2. Otherwise, if both dev->type and dev->type->pm are present, the callback
284 included in dev->type->pm will be executed.
285
286 3. Otherwise, if both dev->class and dev->class->pm are present, the
287 callback included in dev->class->pm will be executed.
288
289 4. Otherwise, if both dev->bus and dev->bus->pm are present, the callback
290 included in dev->bus->pm will be executed.
291
292This allows PM domains and device types to override callbacks provided by bus
293types or device classes if necessary.
266 294
267These callbacks may in turn invoke device- or driver-specific methods stored in 295These callbacks may in turn invoke device- or driver-specific methods stored in
268dev->driver->pm, but they don't have to. 296dev->driver->pm, but they don't have to.
@@ -283,9 +311,8 @@ When the system goes into the standby or memory sleep state, the phases are:
283 311
284 After the prepare callback method returns, no new children may be 312 After the prepare callback method returns, no new children may be
285 registered below the device. The method may also prepare the device or 313 registered below the device. The method may also prepare the device or
286 driver in some way for the upcoming system power transition (for 314 driver in some way for the upcoming system power transition, but it
287 example, by allocating additional memory required for this purpose), but 315 should not put the device into a low-power state.
288 it should not put the device into a low-power state.
289 316
290 2. The suspend methods should quiesce the device to stop it from performing 317 2. The suspend methods should quiesce the device to stop it from performing
291 I/O. They also may save the device registers and put it into the 318 I/O. They also may save the device registers and put it into the
diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
index 5336149f831..c2ae8bf77d4 100644
--- a/Documentation/power/runtime_pm.txt
+++ b/Documentation/power/runtime_pm.txt
@@ -44,25 +44,33 @@ struct dev_pm_ops {
44}; 44};
45 45
46The ->runtime_suspend(), ->runtime_resume() and ->runtime_idle() callbacks 46The ->runtime_suspend(), ->runtime_resume() and ->runtime_idle() callbacks
47are executed by the PM core for either the power domain, or the device type 47are executed by the PM core for the device's subsystem that may be either of
48(if the device power domain's struct dev_pm_ops does not exist), or the class 48the following:
49(if the device power domain's and type's struct dev_pm_ops object does not 49
50exist), or the bus type (if the device power domain's, type's and class' 50 1. PM domain of the device, if the device's PM domain object, dev->pm_domain,
51struct dev_pm_ops objects do not exist) of the given device, so the priority 51 is present.
52order of callbacks from high to low is that power domain callbacks, device 52
53type callbacks, class callbacks and bus type callbacks, and the high priority 53 2. Device type of the device, if both dev->type and dev->type->pm are present.
54one will take precedence over low priority one. The bus type, device type and 54
55class callbacks are referred to as subsystem-level callbacks in what follows, 55 3. Device class of the device, if both dev->class and dev->class->pm are
56and generally speaking, the power domain callbacks are used for representing 56 present.
57power domains within a SoC. 57
58 4. Bus type of the device, if both dev->bus and dev->bus->pm are present.
59
60The PM core always checks which callback to use in the order given above, so the
61priority order of callbacks from high to low is: PM domain, device type, class
62and bus type. Moreover, the high-priority one will always take precedence over
63a low-priority one. The PM domain, bus type, device type and class callbacks
64are referred to as subsystem-level callbacks in what follows.
58 65
59By default, the callbacks are always invoked in process context with interrupts 66By default, the callbacks are always invoked in process context with interrupts
60enabled. However, subsystems can use the pm_runtime_irq_safe() helper function 67enabled. However, subsystems can use the pm_runtime_irq_safe() helper function
61to tell the PM core that a device's ->runtime_suspend() and ->runtime_resume() 68to tell the PM core that their ->runtime_suspend(), ->runtime_resume() and
62callbacks should be invoked in atomic context with interrupts disabled. 69->runtime_idle() callbacks may be invoked in atomic context with interrupts
63This implies that these callback routines must not block or sleep, but it also 70disabled for a given device. This implies that the callback routines in
64means that the synchronous helper functions listed at the end of Section 4 can 71question must not block or sleep, but it also means that the synchronous helper
65be used within an interrupt handler or in an atomic context. 72functions listed at the end of Section 4 may be used for that device within an
73interrupt handler or generally in an atomic context.
66 74
67The subsystem-level suspend callback is _entirely_ _responsible_ for handling 75The subsystem-level suspend callback is _entirely_ _responsible_ for handling
68the suspend of the device as appropriate, which may, but need not include 76the suspend of the device as appropriate, which may, but need not include
diff --git a/Documentation/serial/serial-rs485.txt b/Documentation/serial/serial-rs485.txt
index 079cb3df62c..41c8378c0b2 100644
--- a/Documentation/serial/serial-rs485.txt
+++ b/Documentation/serial/serial-rs485.txt
@@ -97,15 +97,23 @@
97 97
98 struct serial_rs485 rs485conf; 98 struct serial_rs485 rs485conf;
99 99
100 /* Set RS485 mode: */ 100 /* Enable RS485 mode: */
101 rs485conf.flags |= SER_RS485_ENABLED; 101 rs485conf.flags |= SER_RS485_ENABLED;
102 102
103 /* Set logical level for RTS pin equal to 1 when sending: */
104 rs485conf.flags |= SER_RS485_RTS_ON_SEND;
105 /* or, set logical level for RTS pin equal to 0 when sending: */
106 rs485conf.flags &= ~(SER_RS485_RTS_ON_SEND);
107
108 /* Set logical level for RTS pin equal to 1 after sending: */
109 rs485conf.flags |= SER_RS485_RTS_AFTER_SEND;
110 /* or, set logical level for RTS pin equal to 0 after sending: */
111 rs485conf.flags &= ~(SER_RS485_RTS_AFTER_SEND);
112
103 /* Set rts delay before send, if needed: */ 113 /* Set rts delay before send, if needed: */
104 rs485conf.flags |= SER_RS485_RTS_BEFORE_SEND;
105 rs485conf.delay_rts_before_send = ...; 114 rs485conf.delay_rts_before_send = ...;
106 115
107 /* Set rts delay after send, if needed: */ 116 /* Set rts delay after send, if needed: */
108 rs485conf.flags |= SER_RS485_RTS_AFTER_SEND;
109 rs485conf.delay_rts_after_send = ...; 117 rs485conf.delay_rts_after_send = ...;
110 118
111 /* Set this flag if you want to receive data even whilst sending data */ 119 /* Set this flag if you want to receive data even whilst sending data */
diff --git a/Documentation/sound/alsa/HD-Audio.txt b/Documentation/sound/alsa/HD-Audio.txt
index 03e2771ddee..91fee3b45fb 100644
--- a/Documentation/sound/alsa/HD-Audio.txt
+++ b/Documentation/sound/alsa/HD-Audio.txt
@@ -579,7 +579,7 @@ Development Tree
579~~~~~~~~~~~~~~~~ 579~~~~~~~~~~~~~~~~
580The latest development codes for HD-audio are found on sound git tree: 580The latest development codes for HD-audio are found on sound git tree:
581 581
582- git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound-2.6.git 582- git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git
583 583
584The master branch or for-next branches can be used as the main 584The master branch or for-next branches can be used as the main
585development branches in general while the HD-audio specific patches 585development branches in general while the HD-audio specific patches
@@ -594,7 +594,7 @@ is, installed via the usual spells: configure, make and make
594install(-modules). See INSTALL in the package. The snapshot tarballs 594install(-modules). See INSTALL in the package. The snapshot tarballs
595are found at: 595are found at:
596 596
597- ftp://ftp.kernel.org/pub/linux/kernel/people/tiwai/snapshot/ 597- ftp://ftp.suse.com/pub/people/tiwai/snapshot/
598 598
599 599
600Sending a Bug Report 600Sending a Bug Report
@@ -696,7 +696,7 @@ via hda-verb won't change the mixer value.
696 696
697The hda-verb program is found in the ftp directory: 697The hda-verb program is found in the ftp directory:
698 698
699- ftp://ftp.kernel.org/pub/linux/kernel/people/tiwai/misc/ 699- ftp://ftp.suse.com/pub/people/tiwai/misc/
700 700
701Also a git repository is available: 701Also a git repository is available:
702 702
@@ -764,7 +764,7 @@ operation, the jack plugging simulation, etc.
764 764
765The package is found in: 765The package is found in:
766 766
767- ftp://ftp.kernel.org/pub/linux/kernel/people/tiwai/misc/ 767- ftp://ftp.suse.com/pub/people/tiwai/misc/
768 768
769A git repository is available: 769A git repository is available:
770 770
diff --git a/MAINTAINERS b/MAINTAINERS
index 071a9967434..44756028499 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -789,6 +789,7 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
789S: Maintained 789S: Maintained
790T: git git://git.pengutronix.de/git/imx/linux-2.6.git 790T: git git://git.pengutronix.de/git/imx/linux-2.6.git
791F: arch/arm/mach-mx*/ 791F: arch/arm/mach-mx*/
792F: arch/arm/mach-imx/
792F: arch/arm/plat-mxc/ 793F: arch/arm/plat-mxc/
793 794
794ARM/FREESCALE IMX51 795ARM/FREESCALE IMX51
@@ -804,6 +805,13 @@ S: Maintained
804T: git git://git.linaro.org/people/shawnguo/linux-2.6.git 805T: git git://git.linaro.org/people/shawnguo/linux-2.6.git
805F: arch/arm/mach-imx/*imx6* 806F: arch/arm/mach-imx/*imx6*
806 807
808ARM/FREESCALE MXS ARM ARCHITECTURE
809M: Shawn Guo <shawn.guo@linaro.org>
810L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
811S: Maintained
812T: git git://git.linaro.org/people/shawnguo/linux-2.6.git
813F: arch/arm/mach-mxs/
814
807ARM/GLOMATION GESBC9312SX MACHINE SUPPORT 815ARM/GLOMATION GESBC9312SX MACHINE SUPPORT
808M: Lennert Buytenhek <kernel@wantstofly.org> 816M: Lennert Buytenhek <kernel@wantstofly.org>
809L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 817L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1789,6 +1797,14 @@ F: include/net/cfg80211.h
1789F: net/wireless/* 1797F: net/wireless/*
1790X: net/wireless/wext* 1798X: net/wireless/wext*
1791 1799
1800CHAR and MISC DRIVERS
1801M: Arnd Bergmann <arnd@arndb.de>
1802M: Greg Kroah-Hartman <greg@kroah.com>
1803T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git
1804S: Maintained
1805F: drivers/char/*
1806F: drivers/misc/*
1807
1792CHECKPATCH 1808CHECKPATCH
1793M: Andy Whitcroft <apw@canonical.com> 1809M: Andy Whitcroft <apw@canonical.com>
1794S: Supported 1810S: Supported
@@ -1927,9 +1943,11 @@ S: Maintained
1927F: drivers/connector/ 1943F: drivers/connector/
1928 1944
1929CONTROL GROUPS (CGROUPS) 1945CONTROL GROUPS (CGROUPS)
1930M: Paul Menage <paul@paulmenage.org> 1946M: Tejun Heo <tj@kernel.org>
1931M: Li Zefan <lizf@cn.fujitsu.com> 1947M: Li Zefan <lizf@cn.fujitsu.com>
1932L: containers@lists.linux-foundation.org 1948L: containers@lists.linux-foundation.org
1949L: cgroups@vger.kernel.org
1950T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup.git
1933S: Maintained 1951S: Maintained
1934F: include/linux/cgroup* 1952F: include/linux/cgroup*
1935F: kernel/cgroup* 1953F: kernel/cgroup*
@@ -2584,7 +2602,7 @@ S: Maintained
2584F: drivers/net/ethernet/i825xx/eexpress.* 2602F: drivers/net/ethernet/i825xx/eexpress.*
2585 2603
2586ETHERNET BRIDGE 2604ETHERNET BRIDGE
2587M: Stephen Hemminger <shemminger@linux-foundation.org> 2605M: Stephen Hemminger <shemminger@vyatta.com>
2588L: bridge@lists.linux-foundation.org 2606L: bridge@lists.linux-foundation.org
2589L: netdev@vger.kernel.org 2607L: netdev@vger.kernel.org
2590W: http://www.linuxfoundation.org/en/Net:Bridge 2608W: http://www.linuxfoundation.org/en/Net:Bridge
@@ -3718,7 +3736,7 @@ F: fs/jbd2/
3718F: include/linux/jbd2.h 3736F: include/linux/jbd2.h
3719 3737
3720JSM Neo PCI based serial card 3738JSM Neo PCI based serial card
3721M: Breno Leitao <leitao@linux.vnet.ibm.com> 3739M: Lucas Tavares <lucaskt@linux.vnet.ibm.com>
3722L: linux-serial@vger.kernel.org 3740L: linux-serial@vger.kernel.org
3723S: Maintained 3741S: Maintained
3724F: drivers/tty/serial/jsm/ 3742F: drivers/tty/serial/jsm/
@@ -4304,6 +4322,7 @@ MEMORY RESOURCE CONTROLLER
4304M: Balbir Singh <bsingharora@gmail.com> 4322M: Balbir Singh <bsingharora@gmail.com>
4305M: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> 4323M: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
4306M: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> 4324M: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
4325L: cgroups@vger.kernel.org
4307L: linux-mm@kvack.org 4326L: linux-mm@kvack.org
4308S: Maintained 4327S: Maintained
4309F: mm/memcontrol.c 4328F: mm/memcontrol.c
@@ -4337,7 +4356,7 @@ MIPS
4337M: Ralf Baechle <ralf@linux-mips.org> 4356M: Ralf Baechle <ralf@linux-mips.org>
4338L: linux-mips@linux-mips.org 4357L: linux-mips@linux-mips.org
4339W: http://www.linux-mips.org/ 4358W: http://www.linux-mips.org/
4340T: git git://git.linux-mips.org/pub/scm/linux.git 4359T: git git://git.linux-mips.org/pub/scm/ralf/linux.git
4341Q: http://patchwork.linux-mips.org/project/linux-mips/list/ 4360Q: http://patchwork.linux-mips.org/project/linux-mips/list/
4342S: Supported 4361S: Supported
4343F: Documentation/mips/ 4362F: Documentation/mips/
@@ -4470,7 +4489,7 @@ S: Supported
4470F: drivers/infiniband/hw/nes/ 4489F: drivers/infiniband/hw/nes/
4471 4490
4472NETEM NETWORK EMULATOR 4491NETEM NETWORK EMULATOR
4473M: Stephen Hemminger <shemminger@linux-foundation.org> 4492M: Stephen Hemminger <shemminger@vyatta.com>
4474L: netem@lists.linux-foundation.org 4493L: netem@lists.linux-foundation.org
4475S: Maintained 4494S: Maintained
4476F: net/sched/sch_netem.c 4495F: net/sched/sch_netem.c
@@ -4947,7 +4966,7 @@ F: drivers/char/ppdev.c
4947F: include/linux/ppdev.h 4966F: include/linux/ppdev.h
4948 4967
4949PARAVIRT_OPS INTERFACE 4968PARAVIRT_OPS INTERFACE
4950M: Jeremy Fitzhardinge <jeremy@xensource.com> 4969M: Jeremy Fitzhardinge <jeremy@goop.org>
4951M: Chris Wright <chrisw@sous-sol.org> 4970M: Chris Wright <chrisw@sous-sol.org>
4952M: Alok Kataria <akataria@vmware.com> 4971M: Alok Kataria <akataria@vmware.com>
4953M: Rusty Russell <rusty@rustcorp.com.au> 4972M: Rusty Russell <rusty@rustcorp.com.au>
@@ -5656,7 +5675,6 @@ F: drivers/media/video/*7146*
5656F: include/media/*7146* 5675F: include/media/*7146*
5657 5676
5658SAMSUNG AUDIO (ASoC) DRIVERS 5677SAMSUNG AUDIO (ASoC) DRIVERS
5659M: Jassi Brar <jassisinghbrar@gmail.com>
5660M: Sangbeom Kim <sbkim73@samsung.com> 5678M: Sangbeom Kim <sbkim73@samsung.com>
5661L: alsa-devel@alsa-project.org (moderated for non-subscribers) 5679L: alsa-devel@alsa-project.org (moderated for non-subscribers)
5662S: Supported 5680S: Supported
@@ -5985,7 +6003,7 @@ S: Maintained
5985F: drivers/usb/misc/sisusbvga/ 6003F: drivers/usb/misc/sisusbvga/
5986 6004
5987SKGE, SKY2 10/100/1000 GIGABIT ETHERNET DRIVERS 6005SKGE, SKY2 10/100/1000 GIGABIT ETHERNET DRIVERS
5988M: Stephen Hemminger <shemminger@linux-foundation.org> 6006M: Stephen Hemminger <shemminger@vyatta.com>
5989L: netdev@vger.kernel.org 6007L: netdev@vger.kernel.org
5990S: Maintained 6008S: Maintained
5991F: drivers/net/ethernet/marvell/sk* 6009F: drivers/net/ethernet/marvell/sk*
@@ -7399,8 +7417,8 @@ S: Maintained
7399F: arch/x86/kernel/cpu/mcheck/* 7417F: arch/x86/kernel/cpu/mcheck/*
7400 7418
7401XEN HYPERVISOR INTERFACE 7419XEN HYPERVISOR INTERFACE
7402M: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
7403M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> 7420M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
7421M: Jeremy Fitzhardinge <jeremy@goop.org>
7404L: xen-devel@lists.xensource.com (moderated for non-subscribers) 7422L: xen-devel@lists.xensource.com (moderated for non-subscribers)
7405L: virtualization@lists.linux-foundation.org 7423L: virtualization@lists.linux-foundation.org
7406S: Supported 7424S: Supported
@@ -7433,7 +7451,8 @@ F: drivers/xen/*swiotlb*
7433 7451
7434XFS FILESYSTEM 7452XFS FILESYSTEM
7435P: Silicon Graphics Inc 7453P: Silicon Graphics Inc
7436M: Alex Elder <aelder@sgi.com> 7454M: Ben Myers <bpm@sgi.com>
7455M: Alex Elder <elder@kernel.org>
7437M: xfs-masters@oss.sgi.com 7456M: xfs-masters@oss.sgi.com
7438L: xfs@oss.sgi.com 7457L: xfs@oss.sgi.com
7439W: http://oss.sgi.com/projects/xfs 7458W: http://oss.sgi.com/projects/xfs
diff --git a/Makefile b/Makefile
index dab8610c4d6..12aafc20efb 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 2 2PATCHLEVEL = 2
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc2 4EXTRAVERSION = -rc4
5NAME = Saber-toothed Squirrel 5NAME = Saber-toothed Squirrel
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 44789eff983..e084b7e981e 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1231,7 +1231,7 @@ config ARM_ERRATA_742231
1231 capabilities of the processor. 1231 capabilities of the processor.
1232 1232
1233config PL310_ERRATA_588369 1233config PL310_ERRATA_588369
1234 bool "Clean & Invalidate maintenance operations do not invalidate clean lines" 1234 bool "PL310 errata: Clean & Invalidate maintenance operations do not invalidate clean lines"
1235 depends on CACHE_L2X0 1235 depends on CACHE_L2X0
1236 help 1236 help
1237 The PL310 L2 cache controller implements three types of Clean & 1237 The PL310 L2 cache controller implements three types of Clean &
@@ -1256,7 +1256,7 @@ config ARM_ERRATA_720789
1256 entries regardless of the ASID. 1256 entries regardless of the ASID.
1257 1257
1258config PL310_ERRATA_727915 1258config PL310_ERRATA_727915
1259 bool "Background Clean & Invalidate by Way operation can cause data corruption" 1259 bool "PL310 errata: Background Clean & Invalidate by Way operation can cause data corruption"
1260 depends on CACHE_L2X0 1260 depends on CACHE_L2X0
1261 help 1261 help
1262 PL310 implements the Clean & Invalidate by Way L2 cache maintenance 1262 PL310 implements the Clean & Invalidate by Way L2 cache maintenance
@@ -1289,8 +1289,8 @@ config ARM_ERRATA_751472
1289 operation is received by a CPU before the ICIALLUIS has completed, 1289 operation is received by a CPU before the ICIALLUIS has completed,
1290 potentially leading to corrupted entries in the cache or TLB. 1290 potentially leading to corrupted entries in the cache or TLB.
1291 1291
1292config ARM_ERRATA_753970 1292config PL310_ERRATA_753970
1293 bool "ARM errata: cache sync operation may be faulty" 1293 bool "PL310 errata: cache sync operation may be faulty"
1294 depends on CACHE_PL310 1294 depends on CACHE_PL310
1295 help 1295 help
1296 This option enables the workaround for the 753970 PL310 (r3p0) erratum. 1296 This option enables the workaround for the 753970 PL310 (r3p0) erratum.
@@ -1352,6 +1352,18 @@ config ARM_ERRATA_764369
1352 relevant cache maintenance functions and sets a specific bit 1352 relevant cache maintenance functions and sets a specific bit
1353 in the diagnostic control register of the SCU. 1353 in the diagnostic control register of the SCU.
1354 1354
1355config PL310_ERRATA_769419
1356 bool "PL310 errata: no automatic Store Buffer drain"
1357 depends on CACHE_L2X0
1358 help
1359 On revisions of the PL310 prior to r3p2, the Store Buffer does
1360 not automatically drain. This can cause normal, non-cacheable
1361 writes to be retained when the memory system is idle, leading
1362 to suboptimal I/O performance for drivers using coherent DMA.
1363 This option adds a write barrier to the cpu_idle loop so that,
1364 on systems with an outer cache, the store buffer is drained
1365 explicitly.
1366
1355endmenu 1367endmenu
1356 1368
1357source "arch/arm/common/Kconfig" 1369source "arch/arm/common/Kconfig"
diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile
index 176062ac7f0..5df26a9976a 100644
--- a/arch/arm/boot/Makefile
+++ b/arch/arm/boot/Makefile
@@ -65,6 +65,8 @@ $(obj)/%.dtb: $(src)/dts/%.dts
65 65
66$(obj)/dtbs: $(addprefix $(obj)/, $(dtb-y)) 66$(obj)/dtbs: $(addprefix $(obj)/, $(dtb-y))
67 67
68clean-files := *.dtb
69
68quiet_cmd_uimage = UIMAGE $@ 70quiet_cmd_uimage = UIMAGE $@
69 cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A arm -O linux -T kernel \ 71 cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A arm -O linux -T kernel \
70 -C none -a $(LOADADDR) -e $(STARTADDR) \ 72 -C none -a $(LOADADDR) -e $(STARTADDR) \
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index a1feb6b4f9f..b2dc2dd7f1d 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -612,7 +612,8 @@ static void __init gic_pm_init(struct gic_chip_data *gic)
612 sizeof(u32)); 612 sizeof(u32));
613 BUG_ON(!gic->saved_ppi_conf); 613 BUG_ON(!gic->saved_ppi_conf);
614 614
615 cpu_pm_register_notifier(&gic_notifier_block); 615 if (gic == &gic_data[0])
616 cpu_pm_register_notifier(&gic_notifier_block);
616} 617}
617#else 618#else
618static void __init gic_pm_init(struct gic_chip_data *gic) 619static void __init gic_pm_init(struct gic_chip_data *gic)
@@ -696,12 +697,14 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
696 * For primary GICs, skip over SGIs. 697 * For primary GICs, skip over SGIs.
697 * For secondary GICs, skip over PPIs, too. 698 * For secondary GICs, skip over PPIs, too.
698 */ 699 */
700 domain->hwirq_base = 32;
699 if (gic_nr == 0) { 701 if (gic_nr == 0) {
700 domain->hwirq_base = 16; 702 if ((irq_start & 31) > 0) {
701 if (irq_start > 0) 703 domain->hwirq_base = 16;
702 irq_start = (irq_start & ~31) + 16; 704 if (irq_start != -1)
703 } else 705 irq_start = (irq_start & ~31) + 16;
704 domain->hwirq_base = 32; 706 }
707 }
705 708
706 /* 709 /*
707 * Find out how many interrupts are supported. 710 * Find out how many interrupts are supported.
diff --git a/arch/arm/common/pl330.c b/arch/arm/common/pl330.c
index 7129cfbdacd..f407a6b35d3 100644
--- a/arch/arm/common/pl330.c
+++ b/arch/arm/common/pl330.c
@@ -1211,8 +1211,8 @@ static inline u32 _prepare_ccr(const struct pl330_reqcfg *rqc)
1211 ccr |= (rqc->brst_size << CC_SRCBRSTSIZE_SHFT); 1211 ccr |= (rqc->brst_size << CC_SRCBRSTSIZE_SHFT);
1212 ccr |= (rqc->brst_size << CC_DSTBRSTSIZE_SHFT); 1212 ccr |= (rqc->brst_size << CC_DSTBRSTSIZE_SHFT);
1213 1213
1214 ccr |= (rqc->dcctl << CC_SRCCCTRL_SHFT); 1214 ccr |= (rqc->scctl << CC_SRCCCTRL_SHFT);
1215 ccr |= (rqc->scctl << CC_DSTCCTRL_SHFT); 1215 ccr |= (rqc->dcctl << CC_DSTCCTRL_SHFT);
1216 1216
1217 ccr |= (rqc->swap << CC_SWAP_SHFT); 1217 ccr |= (rqc->swap << CC_SWAP_SHFT);
1218 1218
@@ -1623,6 +1623,11 @@ static inline int _alloc_event(struct pl330_thread *thrd)
1623 return -1; 1623 return -1;
1624} 1624}
1625 1625
1626static bool _chan_ns(const struct pl330_info *pi, int i)
1627{
1628 return pi->pcfg.irq_ns & (1 << i);
1629}
1630
1626/* Upon success, returns IdentityToken for the 1631/* Upon success, returns IdentityToken for the
1627 * allocated channel, NULL otherwise. 1632 * allocated channel, NULL otherwise.
1628 */ 1633 */
@@ -1647,7 +1652,8 @@ void *pl330_request_channel(const struct pl330_info *pi)
1647 1652
1648 for (i = 0; i < chans; i++) { 1653 for (i = 0; i < chans; i++) {
1649 thrd = &pl330->channels[i]; 1654 thrd = &pl330->channels[i];
1650 if (thrd->free) { 1655 if ((thrd->free) && (!_manager_ns(thrd) ||
1656 _chan_ns(pi, i))) {
1651 thrd->ev = _alloc_event(thrd); 1657 thrd->ev = _alloc_event(thrd);
1652 if (thrd->ev >= 0) { 1658 if (thrd->ev >= 0) {
1653 thrd->free = false; 1659 thrd->free = false;
diff --git a/arch/arm/configs/at91cap9adk_defconfig b/arch/arm/configs/at91cap9_defconfig
index ffb1edd9336..8826eb218e7 100644
--- a/arch/arm/configs/at91cap9adk_defconfig
+++ b/arch/arm/configs/at91cap9_defconfig
@@ -38,7 +38,6 @@ CONFIG_IP_PNP_RARP=y
38# CONFIG_IPV6 is not set 38# CONFIG_IPV6 is not set
39CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 39CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
40CONFIG_MTD=y 40CONFIG_MTD=y
41CONFIG_MTD_PARTITIONS=y
42CONFIG_MTD_CMDLINE_PARTS=y 41CONFIG_MTD_CMDLINE_PARTS=y
43CONFIG_MTD_CHAR=y 42CONFIG_MTD_CHAR=y
44CONFIG_MTD_BLOCK=y 43CONFIG_MTD_BLOCK=y
@@ -52,16 +51,12 @@ CONFIG_MTD_NAND_ATMEL=y
52CONFIG_BLK_DEV_LOOP=y 51CONFIG_BLK_DEV_LOOP=y
53CONFIG_BLK_DEV_RAM=y 52CONFIG_BLK_DEV_RAM=y
54CONFIG_BLK_DEV_RAM_SIZE=8192 53CONFIG_BLK_DEV_RAM_SIZE=8192
55CONFIG_ATMEL_SSC=y
56CONFIG_SCSI=y 54CONFIG_SCSI=y
57CONFIG_BLK_DEV_SD=y 55CONFIG_BLK_DEV_SD=y
58CONFIG_SCSI_MULTI_LUN=y 56CONFIG_SCSI_MULTI_LUN=y
59CONFIG_NETDEVICES=y 57CONFIG_NETDEVICES=y
60CONFIG_NET_ETHERNET=y
61CONFIG_MII=y 58CONFIG_MII=y
62CONFIG_MACB=y 59CONFIG_MACB=y
63# CONFIG_NETDEV_1000 is not set
64# CONFIG_NETDEV_10000 is not set
65# CONFIG_INPUT_MOUSEDEV_PSAUX is not set 60# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
66CONFIG_INPUT_EVDEV=y 61CONFIG_INPUT_EVDEV=y
67# CONFIG_INPUT_KEYBOARD is not set 62# CONFIG_INPUT_KEYBOARD is not set
@@ -81,7 +76,6 @@ CONFIG_WATCHDOG=y
81CONFIG_WATCHDOG_NOWAYOUT=y 76CONFIG_WATCHDOG_NOWAYOUT=y
82CONFIG_FB=y 77CONFIG_FB=y
83CONFIG_FB_ATMEL=y 78CONFIG_FB_ATMEL=y
84# CONFIG_VGA_CONSOLE is not set
85CONFIG_LOGO=y 79CONFIG_LOGO=y
86# CONFIG_LOGO_LINUX_MONO is not set 80# CONFIG_LOGO_LINUX_MONO is not set
87# CONFIG_LOGO_LINUX_CLUT224 is not set 81# CONFIG_LOGO_LINUX_CLUT224 is not set
@@ -99,7 +93,6 @@ CONFIG_MMC_AT91=m
99CONFIG_RTC_CLASS=y 93CONFIG_RTC_CLASS=y
100CONFIG_RTC_DRV_AT91SAM9=y 94CONFIG_RTC_DRV_AT91SAM9=y
101CONFIG_EXT2_FS=y 95CONFIG_EXT2_FS=y
102CONFIG_INOTIFY=y
103CONFIG_VFAT_FS=y 96CONFIG_VFAT_FS=y
104CONFIG_TMPFS=y 97CONFIG_TMPFS=y
105CONFIG_JFFS2_FS=y 98CONFIG_JFFS2_FS=y
diff --git a/arch/arm/configs/at91rm9200_defconfig b/arch/arm/configs/at91rm9200_defconfig
index 38cb7c98542..bbe4e1a1f5d 100644
--- a/arch/arm/configs/at91rm9200_defconfig
+++ b/arch/arm/configs/at91rm9200_defconfig
@@ -5,7 +5,6 @@ CONFIG_SYSVIPC=y
5CONFIG_IKCONFIG=y 5CONFIG_IKCONFIG=y
6CONFIG_IKCONFIG_PROC=y 6CONFIG_IKCONFIG_PROC=y
7CONFIG_LOG_BUF_SHIFT=14 7CONFIG_LOG_BUF_SHIFT=14
8CONFIG_SYSFS_DEPRECATED_V2=y
9CONFIG_BLK_DEV_INITRD=y 8CONFIG_BLK_DEV_INITRD=y
10CONFIG_MODULES=y 9CONFIG_MODULES=y
11CONFIG_MODULE_FORCE_LOAD=y 10CONFIG_MODULE_FORCE_LOAD=y
@@ -56,7 +55,6 @@ CONFIG_IP_PNP=y
56CONFIG_IP_PNP_DHCP=y 55CONFIG_IP_PNP_DHCP=y
57CONFIG_IP_PNP_BOOTP=y 56CONFIG_IP_PNP_BOOTP=y
58CONFIG_NET_IPIP=m 57CONFIG_NET_IPIP=m
59CONFIG_NET_IPGRE=m
60CONFIG_INET_AH=m 58CONFIG_INET_AH=m
61CONFIG_INET_ESP=m 59CONFIG_INET_ESP=m
62CONFIG_INET_IPCOMP=m 60CONFIG_INET_IPCOMP=m
@@ -75,18 +73,8 @@ CONFIG_IPV6_TUNNEL=m
75CONFIG_BRIDGE=m 73CONFIG_BRIDGE=m
76CONFIG_VLAN_8021Q=m 74CONFIG_VLAN_8021Q=m
77CONFIG_BT=m 75CONFIG_BT=m
78CONFIG_BT_L2CAP=m
79CONFIG_BT_SCO=m
80CONFIG_BT_RFCOMM=m
81CONFIG_BT_RFCOMM_TTY=y
82CONFIG_BT_BNEP=m
83CONFIG_BT_BNEP_MC_FILTER=y
84CONFIG_BT_BNEP_PROTO_FILTER=y
85CONFIG_BT_HIDP=m
86CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 76CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
87CONFIG_MTD=y 77CONFIG_MTD=y
88CONFIG_MTD_CONCAT=y
89CONFIG_MTD_PARTITIONS=y
90CONFIG_MTD_CMDLINE_PARTS=y 78CONFIG_MTD_CMDLINE_PARTS=y
91CONFIG_MTD_AFS_PARTS=y 79CONFIG_MTD_AFS_PARTS=y
92CONFIG_MTD_CHAR=y 80CONFIG_MTD_CHAR=y
@@ -108,8 +96,6 @@ CONFIG_BLK_DEV_LOOP=y
108CONFIG_BLK_DEV_NBD=y 96CONFIG_BLK_DEV_NBD=y
109CONFIG_BLK_DEV_RAM=y 97CONFIG_BLK_DEV_RAM=y
110CONFIG_BLK_DEV_RAM_SIZE=8192 98CONFIG_BLK_DEV_RAM_SIZE=8192
111CONFIG_ATMEL_TCLIB=y
112CONFIG_EEPROM_LEGACY=m
113CONFIG_SCSI=y 99CONFIG_SCSI=y
114CONFIG_BLK_DEV_SD=y 100CONFIG_BLK_DEV_SD=y
115CONFIG_BLK_DEV_SR=m 101CONFIG_BLK_DEV_SR=m
@@ -119,14 +105,23 @@ CONFIG_SCSI_MULTI_LUN=y
119# CONFIG_SCSI_LOWLEVEL is not set 105# CONFIG_SCSI_LOWLEVEL is not set
120CONFIG_NETDEVICES=y 106CONFIG_NETDEVICES=y
121CONFIG_TUN=m 107CONFIG_TUN=m
108CONFIG_ARM_AT91_ETHER=y
122CONFIG_PHYLIB=y 109CONFIG_PHYLIB=y
123CONFIG_DAVICOM_PHY=y 110CONFIG_DAVICOM_PHY=y
124CONFIG_SMSC_PHY=y 111CONFIG_SMSC_PHY=y
125CONFIG_MICREL_PHY=y 112CONFIG_MICREL_PHY=y
126CONFIG_NET_ETHERNET=y 113CONFIG_PPP=y
127CONFIG_ARM_AT91_ETHER=y 114CONFIG_PPP_BSDCOMP=y
128# CONFIG_NETDEV_1000 is not set 115CONFIG_PPP_DEFLATE=y
129# CONFIG_NETDEV_10000 is not set 116CONFIG_PPP_FILTER=y
117CONFIG_PPP_MPPE=m
118CONFIG_PPP_MULTILINK=y
119CONFIG_PPPOE=m
120CONFIG_PPP_ASYNC=y
121CONFIG_SLIP=m
122CONFIG_SLIP_COMPRESSED=y
123CONFIG_SLIP_SMART=y
124CONFIG_SLIP_MODE_SLIP6=y
130CONFIG_USB_CATC=m 125CONFIG_USB_CATC=m
131CONFIG_USB_KAWETH=m 126CONFIG_USB_KAWETH=m
132CONFIG_USB_PEGASUS=m 127CONFIG_USB_PEGASUS=m
@@ -139,18 +134,6 @@ CONFIG_USB_NET_RNDIS_HOST=m
139CONFIG_USB_ALI_M5632=y 134CONFIG_USB_ALI_M5632=y
140CONFIG_USB_AN2720=y 135CONFIG_USB_AN2720=y
141CONFIG_USB_EPSON2888=y 136CONFIG_USB_EPSON2888=y
142CONFIG_PPP=y
143CONFIG_PPP_MULTILINK=y
144CONFIG_PPP_FILTER=y
145CONFIG_PPP_ASYNC=y
146CONFIG_PPP_DEFLATE=y
147CONFIG_PPP_BSDCOMP=y
148CONFIG_PPP_MPPE=m
149CONFIG_PPPOE=m
150CONFIG_SLIP=m
151CONFIG_SLIP_COMPRESSED=y
152CONFIG_SLIP_SMART=y
153CONFIG_SLIP_MODE_SLIP6=y
154# CONFIG_INPUT_MOUSEDEV_PSAUX is not set 137# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
155CONFIG_INPUT_MOUSEDEV_SCREEN_X=640 138CONFIG_INPUT_MOUSEDEV_SCREEN_X=640
156CONFIG_INPUT_MOUSEDEV_SCREEN_Y=480 139CONFIG_INPUT_MOUSEDEV_SCREEN_Y=480
@@ -158,9 +141,9 @@ CONFIG_INPUT_EVDEV=y
158CONFIG_KEYBOARD_GPIO=y 141CONFIG_KEYBOARD_GPIO=y
159# CONFIG_INPUT_MOUSE is not set 142# CONFIG_INPUT_MOUSE is not set
160CONFIG_INPUT_TOUCHSCREEN=y 143CONFIG_INPUT_TOUCHSCREEN=y
144CONFIG_LEGACY_PTY_COUNT=32
161CONFIG_SERIAL_ATMEL=y 145CONFIG_SERIAL_ATMEL=y
162CONFIG_SERIAL_ATMEL_CONSOLE=y 146CONFIG_SERIAL_ATMEL_CONSOLE=y
163CONFIG_LEGACY_PTY_COUNT=32
164CONFIG_HW_RANDOM=y 147CONFIG_HW_RANDOM=y
165CONFIG_I2C=y 148CONFIG_I2C=y
166CONFIG_I2C_CHARDEV=y 149CONFIG_I2C_CHARDEV=y
@@ -290,7 +273,6 @@ CONFIG_NFS_V3_ACL=y
290CONFIG_NFS_V4=y 273CONFIG_NFS_V4=y
291CONFIG_ROOT_NFS=y 274CONFIG_ROOT_NFS=y
292CONFIG_NFSD=y 275CONFIG_NFSD=y
293CONFIG_SMB_FS=m
294CONFIG_CIFS=m 276CONFIG_CIFS=m
295CONFIG_PARTITION_ADVANCED=y 277CONFIG_PARTITION_ADVANCED=y
296CONFIG_MAC_PARTITION=y 278CONFIG_MAC_PARTITION=y
@@ -335,7 +317,6 @@ CONFIG_NLS_UTF8=y
335CONFIG_MAGIC_SYSRQ=y 317CONFIG_MAGIC_SYSRQ=y
336CONFIG_DEBUG_FS=y 318CONFIG_DEBUG_FS=y
337CONFIG_DEBUG_KERNEL=y 319CONFIG_DEBUG_KERNEL=y
338# CONFIG_RCU_CPU_STALL_DETECTOR is not set
339# CONFIG_FTRACE is not set 320# CONFIG_FTRACE is not set
340CONFIG_CRYPTO_PCBC=y 321CONFIG_CRYPTO_PCBC=y
341CONFIG_CRYPTO_SHA1=y 322CONFIG_CRYPTO_SHA1=y
diff --git a/arch/arm/configs/at91sam9260ek_defconfig b/arch/arm/configs/at91sam9260_defconfig
index f8a9226413b..505b3765f87 100644
--- a/arch/arm/configs/at91sam9260ek_defconfig
+++ b/arch/arm/configs/at91sam9260_defconfig
@@ -12,11 +12,23 @@ CONFIG_MODULE_UNLOAD=y
12# CONFIG_IOSCHED_CFQ is not set 12# CONFIG_IOSCHED_CFQ is not set
13CONFIG_ARCH_AT91=y 13CONFIG_ARCH_AT91=y
14CONFIG_ARCH_AT91SAM9260=y 14CONFIG_ARCH_AT91SAM9260=y
15CONFIG_ARCH_AT91SAM9260_SAM9XE=y
15CONFIG_MACH_AT91SAM9260EK=y 16CONFIG_MACH_AT91SAM9260EK=y
17CONFIG_MACH_CAM60=y
18CONFIG_MACH_SAM9_L9260=y
19CONFIG_MACH_AFEB9260=y
20CONFIG_MACH_USB_A9260=y
21CONFIG_MACH_QIL_A9260=y
22CONFIG_MACH_CPU9260=y
23CONFIG_MACH_FLEXIBITY=y
24CONFIG_MACH_SNAPPER_9260=y
25CONFIG_MACH_AT91SAM_DT=y
16CONFIG_AT91_PROGRAMMABLE_CLOCKS=y 26CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
17# CONFIG_ARM_THUMB is not set 27# CONFIG_ARM_THUMB is not set
18CONFIG_ZBOOT_ROM_TEXT=0x0 28CONFIG_ZBOOT_ROM_TEXT=0x0
19CONFIG_ZBOOT_ROM_BSS=0x0 29CONFIG_ZBOOT_ROM_BSS=0x0
30CONFIG_ARM_APPENDED_DTB=y
31CONFIG_ARM_ATAG_DTB_COMPAT=y
20CONFIG_CMDLINE="mem=64M console=ttyS0,115200 initrd=0x21100000,3145728 root=/dev/ram0 rw" 32CONFIG_CMDLINE="mem=64M console=ttyS0,115200 initrd=0x21100000,3145728 root=/dev/ram0 rw"
21CONFIG_FPE_NWFPE=y 33CONFIG_FPE_NWFPE=y
22CONFIG_NET=y 34CONFIG_NET=y
@@ -33,12 +45,10 @@ CONFIG_IP_PNP_BOOTP=y
33CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 45CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
34CONFIG_BLK_DEV_RAM=y 46CONFIG_BLK_DEV_RAM=y
35CONFIG_BLK_DEV_RAM_SIZE=8192 47CONFIG_BLK_DEV_RAM_SIZE=8192
36CONFIG_ATMEL_SSC=y
37CONFIG_SCSI=y 48CONFIG_SCSI=y
38CONFIG_BLK_DEV_SD=y 49CONFIG_BLK_DEV_SD=y
39CONFIG_SCSI_MULTI_LUN=y 50CONFIG_SCSI_MULTI_LUN=y
40CONFIG_NETDEVICES=y 51CONFIG_NETDEVICES=y
41CONFIG_NET_ETHERNET=y
42CONFIG_MII=y 52CONFIG_MII=y
43CONFIG_MACB=y 53CONFIG_MACB=y
44# CONFIG_INPUT_MOUSEDEV_PSAUX is not set 54# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
@@ -55,7 +65,6 @@ CONFIG_I2C_GPIO=y
55CONFIG_WATCHDOG=y 65CONFIG_WATCHDOG=y
56CONFIG_WATCHDOG_NOWAYOUT=y 66CONFIG_WATCHDOG_NOWAYOUT=y
57CONFIG_AT91SAM9X_WATCHDOG=y 67CONFIG_AT91SAM9X_WATCHDOG=y
58# CONFIG_VGA_CONSOLE is not set
59# CONFIG_USB_HID is not set 68# CONFIG_USB_HID is not set
60CONFIG_USB=y 69CONFIG_USB=y
61CONFIG_USB_DEVICEFS=y 70CONFIG_USB_DEVICEFS=y
@@ -71,7 +80,6 @@ CONFIG_USB_G_SERIAL=m
71CONFIG_RTC_CLASS=y 80CONFIG_RTC_CLASS=y
72CONFIG_RTC_DRV_AT91SAM9=y 81CONFIG_RTC_DRV_AT91SAM9=y
73CONFIG_EXT2_FS=y 82CONFIG_EXT2_FS=y
74CONFIG_INOTIFY=y
75CONFIG_VFAT_FS=y 83CONFIG_VFAT_FS=y
76CONFIG_TMPFS=y 84CONFIG_TMPFS=y
77CONFIG_CRAMFS=y 85CONFIG_CRAMFS=y
diff --git a/arch/arm/configs/at91sam9g20ek_defconfig b/arch/arm/configs/at91sam9g20_defconfig
index 9e90e6d7929..9123568d9a8 100644
--- a/arch/arm/configs/at91sam9g20ek_defconfig
+++ b/arch/arm/configs/at91sam9g20_defconfig
@@ -14,6 +14,15 @@ CONFIG_ARCH_AT91=y
14CONFIG_ARCH_AT91SAM9G20=y 14CONFIG_ARCH_AT91SAM9G20=y
15CONFIG_MACH_AT91SAM9G20EK=y 15CONFIG_MACH_AT91SAM9G20EK=y
16CONFIG_MACH_AT91SAM9G20EK_2MMC=y 16CONFIG_MACH_AT91SAM9G20EK_2MMC=y
17CONFIG_MACH_CPU9G20=y
18CONFIG_MACH_ACMENETUSFOXG20=y
19CONFIG_MACH_PORTUXG20=y
20CONFIG_MACH_STAMP9G20=y
21CONFIG_MACH_PCONTROL_G20=y
22CONFIG_MACH_GSIA18S=y
23CONFIG_MACH_USB_A9G20=y
24CONFIG_MACH_SNAPPER_9260=y
25CONFIG_MACH_AT91SAM_DT=y
17CONFIG_AT91_PROGRAMMABLE_CLOCKS=y 26CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
18# CONFIG_ARM_THUMB is not set 27# CONFIG_ARM_THUMB is not set
19CONFIG_AEABI=y 28CONFIG_AEABI=y
@@ -21,9 +30,10 @@ CONFIG_LEDS=y
21CONFIG_LEDS_CPU=y 30CONFIG_LEDS_CPU=y
22CONFIG_ZBOOT_ROM_TEXT=0x0 31CONFIG_ZBOOT_ROM_TEXT=0x0
23CONFIG_ZBOOT_ROM_BSS=0x0 32CONFIG_ZBOOT_ROM_BSS=0x0
33CONFIG_ARM_APPENDED_DTB=y
34CONFIG_ARM_ATAG_DTB_COMPAT=y
24CONFIG_CMDLINE="mem=64M console=ttyS0,115200 initrd=0x21100000,3145728 root=/dev/ram0 rw" 35CONFIG_CMDLINE="mem=64M console=ttyS0,115200 initrd=0x21100000,3145728 root=/dev/ram0 rw"
25CONFIG_FPE_NWFPE=y 36CONFIG_FPE_NWFPE=y
26CONFIG_PM=y
27CONFIG_NET=y 37CONFIG_NET=y
28CONFIG_PACKET=y 38CONFIG_PACKET=y
29CONFIG_UNIX=y 39CONFIG_UNIX=y
@@ -37,8 +47,6 @@ CONFIG_IP_PNP_BOOTP=y
37# CONFIG_IPV6 is not set 47# CONFIG_IPV6 is not set
38CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 48CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
39CONFIG_MTD=y 49CONFIG_MTD=y
40CONFIG_MTD_CONCAT=y
41CONFIG_MTD_PARTITIONS=y
42CONFIG_MTD_CMDLINE_PARTS=y 50CONFIG_MTD_CMDLINE_PARTS=y
43CONFIG_MTD_CHAR=y 51CONFIG_MTD_CHAR=y
44CONFIG_MTD_BLOCK=y 52CONFIG_MTD_BLOCK=y
@@ -48,17 +56,13 @@ CONFIG_MTD_NAND_ATMEL=y
48CONFIG_BLK_DEV_LOOP=y 56CONFIG_BLK_DEV_LOOP=y
49CONFIG_BLK_DEV_RAM=y 57CONFIG_BLK_DEV_RAM=y
50CONFIG_BLK_DEV_RAM_SIZE=8192 58CONFIG_BLK_DEV_RAM_SIZE=8192
51CONFIG_ATMEL_SSC=y
52CONFIG_SCSI=y 59CONFIG_SCSI=y
53CONFIG_BLK_DEV_SD=y 60CONFIG_BLK_DEV_SD=y
54CONFIG_SCSI_MULTI_LUN=y 61CONFIG_SCSI_MULTI_LUN=y
55# CONFIG_SCSI_LOWLEVEL is not set 62# CONFIG_SCSI_LOWLEVEL is not set
56CONFIG_NETDEVICES=y 63CONFIG_NETDEVICES=y
57CONFIG_NET_ETHERNET=y
58CONFIG_MII=y 64CONFIG_MII=y
59CONFIG_MACB=y 65CONFIG_MACB=y
60# CONFIG_NETDEV_1000 is not set
61# CONFIG_NETDEV_10000 is not set
62# CONFIG_INPUT_MOUSEDEV_PSAUX is not set 66# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
63CONFIG_INPUT_MOUSEDEV_SCREEN_X=320 67CONFIG_INPUT_MOUSEDEV_SCREEN_X=320
64CONFIG_INPUT_MOUSEDEV_SCREEN_Y=240 68CONFIG_INPUT_MOUSEDEV_SCREEN_Y=240
@@ -66,15 +70,14 @@ CONFIG_INPUT_EVDEV=y
66# CONFIG_KEYBOARD_ATKBD is not set 70# CONFIG_KEYBOARD_ATKBD is not set
67CONFIG_KEYBOARD_GPIO=y 71CONFIG_KEYBOARD_GPIO=y
68# CONFIG_INPUT_MOUSE is not set 72# CONFIG_INPUT_MOUSE is not set
73CONFIG_LEGACY_PTY_COUNT=16
69CONFIG_SERIAL_ATMEL=y 74CONFIG_SERIAL_ATMEL=y
70CONFIG_SERIAL_ATMEL_CONSOLE=y 75CONFIG_SERIAL_ATMEL_CONSOLE=y
71CONFIG_LEGACY_PTY_COUNT=16
72CONFIG_HW_RANDOM=y 76CONFIG_HW_RANDOM=y
73CONFIG_SPI=y 77CONFIG_SPI=y
74CONFIG_SPI_ATMEL=y 78CONFIG_SPI_ATMEL=y
75CONFIG_SPI_SPIDEV=y 79CONFIG_SPI_SPIDEV=y
76# CONFIG_HWMON is not set 80# CONFIG_HWMON is not set
77# CONFIG_VGA_CONSOLE is not set
78CONFIG_SOUND=y 81CONFIG_SOUND=y
79CONFIG_SND=y 82CONFIG_SND=y
80CONFIG_SND_SEQUENCER=y 83CONFIG_SND_SEQUENCER=y
@@ -82,7 +85,6 @@ CONFIG_SND_MIXER_OSS=y
82CONFIG_SND_PCM_OSS=y 85CONFIG_SND_PCM_OSS=y
83CONFIG_SND_SEQUENCER_OSS=y 86CONFIG_SND_SEQUENCER_OSS=y
84# CONFIG_SND_VERBOSE_PROCFS is not set 87# CONFIG_SND_VERBOSE_PROCFS is not set
85CONFIG_SND_AT73C213=y
86CONFIG_USB=y 88CONFIG_USB=y
87CONFIG_USB_DEVICEFS=y 89CONFIG_USB_DEVICEFS=y
88# CONFIG_USB_DEVICE_CLASS is not set 90# CONFIG_USB_DEVICE_CLASS is not set
@@ -105,7 +107,6 @@ CONFIG_LEDS_TRIGGER_HEARTBEAT=y
105CONFIG_RTC_CLASS=y 107CONFIG_RTC_CLASS=y
106CONFIG_RTC_DRV_AT91SAM9=y 108CONFIG_RTC_DRV_AT91SAM9=y
107CONFIG_EXT2_FS=y 109CONFIG_EXT2_FS=y
108CONFIG_INOTIFY=y
109CONFIG_MSDOS_FS=y 110CONFIG_MSDOS_FS=y
110CONFIG_VFAT_FS=y 111CONFIG_VFAT_FS=y
111CONFIG_TMPFS=y 112CONFIG_TMPFS=y
diff --git a/arch/arm/configs/at91sam9g45_defconfig b/arch/arm/configs/at91sam9g45_defconfig
index c5876d244f4..606d48f3b8f 100644
--- a/arch/arm/configs/at91sam9g45_defconfig
+++ b/arch/arm/configs/at91sam9g45_defconfig
@@ -18,6 +18,7 @@ CONFIG_MODULE_UNLOAD=y
18CONFIG_ARCH_AT91=y 18CONFIG_ARCH_AT91=y
19CONFIG_ARCH_AT91SAM9G45=y 19CONFIG_ARCH_AT91SAM9G45=y
20CONFIG_MACH_AT91SAM9M10G45EK=y 20CONFIG_MACH_AT91SAM9M10G45EK=y
21CONFIG_MACH_AT91SAM_DT=y
21CONFIG_AT91_PROGRAMMABLE_CLOCKS=y 22CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
22CONFIG_AT91_SLOW_CLOCK=y 23CONFIG_AT91_SLOW_CLOCK=y
23CONFIG_AEABI=y 24CONFIG_AEABI=y
@@ -73,11 +74,8 @@ CONFIG_SCSI_MULTI_LUN=y
73# CONFIG_SCSI_LOWLEVEL is not set 74# CONFIG_SCSI_LOWLEVEL is not set
74CONFIG_NETDEVICES=y 75CONFIG_NETDEVICES=y
75CONFIG_MII=y 76CONFIG_MII=y
76CONFIG_DAVICOM_PHY=y
77CONFIG_NET_ETHERNET=y
78CONFIG_MACB=y 77CONFIG_MACB=y
79# CONFIG_NETDEV_1000 is not set 78CONFIG_DAVICOM_PHY=y
80# CONFIG_NETDEV_10000 is not set
81CONFIG_LIBERTAS_THINFIRM=m 79CONFIG_LIBERTAS_THINFIRM=m
82CONFIG_LIBERTAS_THINFIRM_USB=m 80CONFIG_LIBERTAS_THINFIRM_USB=m
83CONFIG_AT76C50X_USB=m 81CONFIG_AT76C50X_USB=m
@@ -131,7 +129,6 @@ CONFIG_I2C_GPIO=y
131CONFIG_SPI=y 129CONFIG_SPI=y
132CONFIG_SPI_ATMEL=y 130CONFIG_SPI_ATMEL=y
133# CONFIG_HWMON is not set 131# CONFIG_HWMON is not set
134# CONFIG_MFD_SUPPORT is not set
135CONFIG_FB=y 132CONFIG_FB=y
136CONFIG_FB_ATMEL=y 133CONFIG_FB_ATMEL=y
137CONFIG_FB_UDL=m 134CONFIG_FB_UDL=m
diff --git a/arch/arm/configs/at91sam9rlek_defconfig b/arch/arm/configs/at91sam9rl_defconfig
index 75621e4d03f..ad562ee6420 100644
--- a/arch/arm/configs/at91sam9rlek_defconfig
+++ b/arch/arm/configs/at91sam9rl_defconfig
@@ -23,8 +23,6 @@ CONFIG_NET=y
23CONFIG_UNIX=y 23CONFIG_UNIX=y
24CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 24CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
25CONFIG_MTD=y 25CONFIG_MTD=y
26CONFIG_MTD_CONCAT=y
27CONFIG_MTD_PARTITIONS=y
28CONFIG_MTD_CMDLINE_PARTS=y 26CONFIG_MTD_CMDLINE_PARTS=y
29CONFIG_MTD_CHAR=y 27CONFIG_MTD_CHAR=y
30CONFIG_MTD_BLOCK=y 28CONFIG_MTD_BLOCK=y
@@ -35,7 +33,6 @@ CONFIG_BLK_DEV_LOOP=y
35CONFIG_BLK_DEV_RAM=y 33CONFIG_BLK_DEV_RAM=y
36CONFIG_BLK_DEV_RAM_COUNT=4 34CONFIG_BLK_DEV_RAM_COUNT=4
37CONFIG_BLK_DEV_RAM_SIZE=24576 35CONFIG_BLK_DEV_RAM_SIZE=24576
38CONFIG_ATMEL_SSC=y
39CONFIG_SCSI=y 36CONFIG_SCSI=y
40CONFIG_BLK_DEV_SD=y 37CONFIG_BLK_DEV_SD=y
41CONFIG_SCSI_MULTI_LUN=y 38CONFIG_SCSI_MULTI_LUN=y
@@ -62,13 +59,11 @@ CONFIG_WATCHDOG_NOWAYOUT=y
62CONFIG_AT91SAM9X_WATCHDOG=y 59CONFIG_AT91SAM9X_WATCHDOG=y
63CONFIG_FB=y 60CONFIG_FB=y
64CONFIG_FB_ATMEL=y 61CONFIG_FB_ATMEL=y
65# CONFIG_VGA_CONSOLE is not set
66CONFIG_MMC=y 62CONFIG_MMC=y
67CONFIG_MMC_AT91=m 63CONFIG_MMC_AT91=m
68CONFIG_RTC_CLASS=y 64CONFIG_RTC_CLASS=y
69CONFIG_RTC_DRV_AT91SAM9=y 65CONFIG_RTC_DRV_AT91SAM9=y
70CONFIG_EXT2_FS=y 66CONFIG_EXT2_FS=y
71CONFIG_INOTIFY=y
72CONFIG_MSDOS_FS=y 67CONFIG_MSDOS_FS=y
73CONFIG_VFAT_FS=y 68CONFIG_VFAT_FS=y
74CONFIG_TMPFS=y 69CONFIG_TMPFS=y
diff --git a/arch/arm/configs/ezx_defconfig b/arch/arm/configs/ezx_defconfig
index 227a477346e..d95763d5f0d 100644
--- a/arch/arm/configs/ezx_defconfig
+++ b/arch/arm/configs/ezx_defconfig
@@ -287,7 +287,7 @@ CONFIG_USB=y
287# CONFIG_USB_DEVICE_CLASS is not set 287# CONFIG_USB_DEVICE_CLASS is not set
288CONFIG_USB_OHCI_HCD=y 288CONFIG_USB_OHCI_HCD=y
289CONFIG_USB_GADGET=y 289CONFIG_USB_GADGET=y
290CONFIG_USB_GADGET_PXA27X=y 290CONFIG_USB_PXA27X=y
291CONFIG_USB_ETH=m 291CONFIG_USB_ETH=m
292# CONFIG_USB_ETH_RNDIS is not set 292# CONFIG_USB_ETH_RNDIS is not set
293CONFIG_MMC=y 293CONFIG_MMC=y
diff --git a/arch/arm/configs/imote2_defconfig b/arch/arm/configs/imote2_defconfig
index 176ec22af03..fd996bb1302 100644
--- a/arch/arm/configs/imote2_defconfig
+++ b/arch/arm/configs/imote2_defconfig
@@ -263,7 +263,7 @@ CONFIG_USB=y
263# CONFIG_USB_DEVICE_CLASS is not set 263# CONFIG_USB_DEVICE_CLASS is not set
264CONFIG_USB_OHCI_HCD=y 264CONFIG_USB_OHCI_HCD=y
265CONFIG_USB_GADGET=y 265CONFIG_USB_GADGET=y
266CONFIG_USB_GADGET_PXA27X=y 266CONFIG_USB_PXA27X=y
267CONFIG_USB_ETH=m 267CONFIG_USB_ETH=m
268# CONFIG_USB_ETH_RNDIS is not set 268# CONFIG_USB_ETH_RNDIS is not set
269CONFIG_MMC=y 269CONFIG_MMC=y
diff --git a/arch/arm/configs/magician_defconfig b/arch/arm/configs/magician_defconfig
index a88e64d4e9a..443675d317e 100644
--- a/arch/arm/configs/magician_defconfig
+++ b/arch/arm/configs/magician_defconfig
@@ -132,7 +132,7 @@ CONFIG_USB_MON=m
132CONFIG_USB_OHCI_HCD=y 132CONFIG_USB_OHCI_HCD=y
133CONFIG_USB_GADGET=y 133CONFIG_USB_GADGET=y
134CONFIG_USB_GADGET_VBUS_DRAW=500 134CONFIG_USB_GADGET_VBUS_DRAW=500
135CONFIG_USB_GADGET_PXA27X=y 135CONFIG_USB_PXA27X=y
136CONFIG_USB_ETH=m 136CONFIG_USB_ETH=m
137# CONFIG_USB_ETH_RNDIS is not set 137# CONFIG_USB_ETH_RNDIS is not set
138CONFIG_USB_GADGETFS=m 138CONFIG_USB_GADGETFS=m
diff --git a/arch/arm/configs/omap1_defconfig b/arch/arm/configs/omap1_defconfig
index 7b63462b349..a7e77758137 100644
--- a/arch/arm/configs/omap1_defconfig
+++ b/arch/arm/configs/omap1_defconfig
@@ -48,7 +48,6 @@ CONFIG_MACH_SX1=y
48CONFIG_MACH_NOKIA770=y 48CONFIG_MACH_NOKIA770=y
49CONFIG_MACH_AMS_DELTA=y 49CONFIG_MACH_AMS_DELTA=y
50CONFIG_MACH_OMAP_GENERIC=y 50CONFIG_MACH_OMAP_GENERIC=y
51CONFIG_OMAP_CLOCKS_SET_BY_BOOTLOADER=y
52CONFIG_OMAP_ARM_216MHZ=y 51CONFIG_OMAP_ARM_216MHZ=y
53CONFIG_OMAP_ARM_195MHZ=y 52CONFIG_OMAP_ARM_195MHZ=y
54CONFIG_OMAP_ARM_192MHZ=y 53CONFIG_OMAP_ARM_192MHZ=y
diff --git a/arch/arm/configs/u300_defconfig b/arch/arm/configs/u300_defconfig
index 4a5a12681be..374000ec4e4 100644
--- a/arch/arm/configs/u300_defconfig
+++ b/arch/arm/configs/u300_defconfig
@@ -14,8 +14,6 @@ CONFIG_MODULE_UNLOAD=y
14CONFIG_ARCH_U300=y 14CONFIG_ARCH_U300=y
15CONFIG_MACH_U300=y 15CONFIG_MACH_U300=y
16CONFIG_MACH_U300_BS335=y 16CONFIG_MACH_U300_BS335=y
17CONFIG_MACH_U300_DUAL_RAM=y
18CONFIG_U300_DEBUG=y
19CONFIG_MACH_U300_SPIDUMMY=y 17CONFIG_MACH_U300_SPIDUMMY=y
20CONFIG_NO_HZ=y 18CONFIG_NO_HZ=y
21CONFIG_HIGH_RES_TIMERS=y 19CONFIG_HIGH_RES_TIMERS=y
@@ -26,19 +24,21 @@ CONFIG_ZBOOT_ROM_BSS=0x0
26CONFIG_CMDLINE="root=/dev/ram0 rw rootfstype=rootfs console=ttyAMA0,115200n8 lpj=515072" 24CONFIG_CMDLINE="root=/dev/ram0 rw rootfstype=rootfs console=ttyAMA0,115200n8 lpj=515072"
27CONFIG_CPU_IDLE=y 25CONFIG_CPU_IDLE=y
28CONFIG_FPE_NWFPE=y 26CONFIG_FPE_NWFPE=y
29CONFIG_PM=y
30# CONFIG_SUSPEND is not set 27# CONFIG_SUSPEND is not set
31CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 28CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
32# CONFIG_PREVENT_FIRMWARE_BUILD is not set 29# CONFIG_PREVENT_FIRMWARE_BUILD is not set
33# CONFIG_MISC_DEVICES is not set 30CONFIG_MTD=y
31CONFIG_MTD_CMDLINE_PARTS=y
32CONFIG_MTD_NAND=y
33CONFIG_MTD_NAND_FSMC=y
34# CONFIG_INPUT_MOUSEDEV is not set 34# CONFIG_INPUT_MOUSEDEV is not set
35CONFIG_INPUT_EVDEV=y 35CONFIG_INPUT_EVDEV=y
36# CONFIG_KEYBOARD_ATKBD is not set 36# CONFIG_KEYBOARD_ATKBD is not set
37# CONFIG_INPUT_MOUSE is not set 37# CONFIG_INPUT_MOUSE is not set
38# CONFIG_SERIO is not set 38# CONFIG_SERIO is not set
39CONFIG_LEGACY_PTY_COUNT=16
39CONFIG_SERIAL_AMBA_PL011=y 40CONFIG_SERIAL_AMBA_PL011=y
40CONFIG_SERIAL_AMBA_PL011_CONSOLE=y 41CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
41CONFIG_LEGACY_PTY_COUNT=16
42# CONFIG_HW_RANDOM is not set 42# CONFIG_HW_RANDOM is not set
43CONFIG_I2C=y 43CONFIG_I2C=y
44# CONFIG_HWMON is not set 44# CONFIG_HWMON is not set
@@ -51,6 +51,7 @@ CONFIG_BACKLIGHT_CLASS_DEVICE=y
51# CONFIG_HID_SUPPORT is not set 51# CONFIG_HID_SUPPORT is not set
52# CONFIG_USB_SUPPORT is not set 52# CONFIG_USB_SUPPORT is not set
53CONFIG_MMC=y 53CONFIG_MMC=y
54CONFIG_MMC_CLKGATE=y
54CONFIG_MMC_ARMMMCI=y 55CONFIG_MMC_ARMMMCI=y
55CONFIG_RTC_CLASS=y 56CONFIG_RTC_CLASS=y
56# CONFIG_RTC_HCTOSYS is not set 57# CONFIG_RTC_HCTOSYS is not set
@@ -65,10 +66,8 @@ CONFIG_NLS_CODEPAGE_437=y
65CONFIG_NLS_ISO8859_1=y 66CONFIG_NLS_ISO8859_1=y
66CONFIG_PRINTK_TIME=y 67CONFIG_PRINTK_TIME=y
67CONFIG_DEBUG_FS=y 68CONFIG_DEBUG_FS=y
68CONFIG_DEBUG_KERNEL=y
69# CONFIG_SCHED_DEBUG is not set 69# CONFIG_SCHED_DEBUG is not set
70CONFIG_TIMER_STATS=y 70CONFIG_TIMER_STATS=y
71# CONFIG_DEBUG_PREEMPT is not set 71# CONFIG_DEBUG_PREEMPT is not set
72CONFIG_DEBUG_INFO=y 72CONFIG_DEBUG_INFO=y
73# CONFIG_RCU_CPU_STALL_DETECTOR is not set
74# CONFIG_CRC32 is not set 73# CONFIG_CRC32 is not set
diff --git a/arch/arm/configs/u8500_defconfig b/arch/arm/configs/u8500_defconfig
index 97d31a4663d..2d7b6e7b727 100644
--- a/arch/arm/configs/u8500_defconfig
+++ b/arch/arm/configs/u8500_defconfig
@@ -10,7 +10,7 @@ CONFIG_MODULE_UNLOAD=y
10CONFIG_ARCH_U8500=y 10CONFIG_ARCH_U8500=y
11CONFIG_UX500_SOC_DB5500=y 11CONFIG_UX500_SOC_DB5500=y
12CONFIG_UX500_SOC_DB8500=y 12CONFIG_UX500_SOC_DB8500=y
13CONFIG_MACH_U8500=y 13CONFIG_MACH_HREFV60=y
14CONFIG_MACH_SNOWBALL=y 14CONFIG_MACH_SNOWBALL=y
15CONFIG_MACH_U5500=y 15CONFIG_MACH_U5500=y
16CONFIG_NO_HZ=y 16CONFIG_NO_HZ=y
@@ -24,6 +24,7 @@ CONFIG_CPU_FREQ=y
24CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y 24CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
25CONFIG_VFP=y 25CONFIG_VFP=y
26CONFIG_NEON=y 26CONFIG_NEON=y
27CONFIG_PM_RUNTIME=y
27CONFIG_NET=y 28CONFIG_NET=y
28CONFIG_PACKET=y 29CONFIG_PACKET=y
29CONFIG_UNIX=y 30CONFIG_UNIX=y
@@ -41,11 +42,8 @@ CONFIG_MISC_DEVICES=y
41CONFIG_AB8500_PWM=y 42CONFIG_AB8500_PWM=y
42CONFIG_SENSORS_BH1780=y 43CONFIG_SENSORS_BH1780=y
43CONFIG_NETDEVICES=y 44CONFIG_NETDEVICES=y
44CONFIG_SMSC_PHY=y
45CONFIG_NET_ETHERNET=y
46CONFIG_SMSC911X=y 45CONFIG_SMSC911X=y
47# CONFIG_NETDEV_1000 is not set 46CONFIG_SMSC_PHY=y
48# CONFIG_NETDEV_10000 is not set
49# CONFIG_WLAN is not set 47# CONFIG_WLAN is not set
50# CONFIG_INPUT_MOUSEDEV_PSAUX is not set 48# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
51CONFIG_INPUT_EVDEV=y 49CONFIG_INPUT_EVDEV=y
@@ -72,15 +70,12 @@ CONFIG_SPI=y
72CONFIG_SPI_PL022=y 70CONFIG_SPI_PL022=y
73CONFIG_GPIO_STMPE=y 71CONFIG_GPIO_STMPE=y
74CONFIG_GPIO_TC3589X=y 72CONFIG_GPIO_TC3589X=y
75# CONFIG_HWMON is not set
76CONFIG_MFD_STMPE=y 73CONFIG_MFD_STMPE=y
77CONFIG_MFD_TC3589X=y 74CONFIG_MFD_TC3589X=y
75CONFIG_AB5500_CORE=y
78CONFIG_AB8500_CORE=y 76CONFIG_AB8500_CORE=y
79CONFIG_REGULATOR_AB8500=y 77CONFIG_REGULATOR_AB8500=y
80# CONFIG_HID_SUPPORT is not set 78# CONFIG_HID_SUPPORT is not set
81CONFIG_USB_MUSB_HDRC=y
82CONFIG_USB_GADGET_MUSB_HDRC=y
83CONFIG_MUSB_PIO_ONLY=y
84CONFIG_USB_GADGET=y 79CONFIG_USB_GADGET=y
85CONFIG_AB8500_USB=y 80CONFIG_AB8500_USB=y
86CONFIG_MMC=y 81CONFIG_MMC=y
@@ -97,6 +92,7 @@ CONFIG_DMADEVICES=y
97CONFIG_STE_DMA40=y 92CONFIG_STE_DMA40=y
98CONFIG_STAGING=y 93CONFIG_STAGING=y
99CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4=y 94CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4=y
95CONFIG_HSEM_U8500=y
100CONFIG_EXT2_FS=y 96CONFIG_EXT2_FS=y
101CONFIG_EXT2_FS_XATTR=y 97CONFIG_EXT2_FS_XATTR=y
102CONFIG_EXT2_FS_POSIX_ACL=y 98CONFIG_EXT2_FS_POSIX_ACL=y
diff --git a/arch/arm/configs/zeus_defconfig b/arch/arm/configs/zeus_defconfig
index 59577ad3f4e..547a3c1e59d 100644
--- a/arch/arm/configs/zeus_defconfig
+++ b/arch/arm/configs/zeus_defconfig
@@ -140,7 +140,7 @@ CONFIG_USB_SERIAL=m
140CONFIG_USB_SERIAL_GENERIC=y 140CONFIG_USB_SERIAL_GENERIC=y
141CONFIG_USB_SERIAL_MCT_U232=m 141CONFIG_USB_SERIAL_MCT_U232=m
142CONFIG_USB_GADGET=m 142CONFIG_USB_GADGET=m
143CONFIG_USB_GADGET_PXA27X=y 143CONFIG_USB_PXA27X=y
144CONFIG_USB_ETH=m 144CONFIG_USB_ETH=m
145CONFIG_USB_GADGETFS=m 145CONFIG_USB_GADGETFS=m
146CONFIG_USB_FILE_STORAGE=m 146CONFIG_USB_FILE_STORAGE=m
diff --git a/arch/arm/include/asm/cti.h b/arch/arm/include/asm/cti.h
new file mode 100644
index 00000000000..a0ada3ea435
--- /dev/null
+++ b/arch/arm/include/asm/cti.h
@@ -0,0 +1,179 @@
1#ifndef __ASMARM_CTI_H
2#define __ASMARM_CTI_H
3
4#include <asm/io.h>
5
6/* The registers' definition is from section 3.2 of
7 * Embedded Cross Trigger Revision: r0p0
8 */
9#define CTICONTROL 0x000
10#define CTISTATUS 0x004
11#define CTILOCK 0x008
12#define CTIPROTECTION 0x00C
13#define CTIINTACK 0x010
14#define CTIAPPSET 0x014
15#define CTIAPPCLEAR 0x018
16#define CTIAPPPULSE 0x01c
17#define CTIINEN 0x020
18#define CTIOUTEN 0x0A0
19#define CTITRIGINSTATUS 0x130
20#define CTITRIGOUTSTATUS 0x134
21#define CTICHINSTATUS 0x138
22#define CTICHOUTSTATUS 0x13c
23#define CTIPERIPHID0 0xFE0
24#define CTIPERIPHID1 0xFE4
25#define CTIPERIPHID2 0xFE8
26#define CTIPERIPHID3 0xFEC
27#define CTIPCELLID0 0xFF0
28#define CTIPCELLID1 0xFF4
29#define CTIPCELLID2 0xFF8
30#define CTIPCELLID3 0xFFC
31
32/* The below are from section 3.6.4 of
33 * CoreSight v1.0 Architecture Specification
34 */
35#define LOCKACCESS 0xFB0
36#define LOCKSTATUS 0xFB4
37
38/* write this value to LOCKACCESS will unlock the module, and
39 * other value will lock the module
40 */
41#define LOCKCODE 0xC5ACCE55
42
43/**
44 * struct cti - cross trigger interface struct
45 * @base: mapped virtual address for the cti base
46 * @irq: irq number for the cti
47 * @trig_out_for_irq: triger out number which will cause
48 * the @irq happen
49 *
50 * cti struct used to operate cti registers.
51 */
52struct cti {
53 void __iomem *base;
54 int irq;
55 int trig_out_for_irq;
56};
57
58/**
59 * cti_init - initialize the cti instance
60 * @cti: cti instance
61 * @base: mapped virtual address for the cti base
62 * @irq: irq number for the cti
63 * @trig_out: triger out number which will cause
64 * the @irq happen
65 *
66 * called by machine code to pass the board dependent
67 * @base, @irq and @trig_out to cti.
68 */
69static inline void cti_init(struct cti *cti,
70 void __iomem *base, int irq, int trig_out)
71{
72 cti->base = base;
73 cti->irq = irq;
74 cti->trig_out_for_irq = trig_out;
75}
76
77/**
78 * cti_map_trigger - use the @chan to map @trig_in to @trig_out
79 * @cti: cti instance
80 * @trig_in: trigger in number
81 * @trig_out: trigger out number
82 * @channel: channel number
83 *
84 * This function maps one trigger in of @trig_in to one trigger
85 * out of @trig_out using the channel @chan.
86 */
87static inline void cti_map_trigger(struct cti *cti,
88 int trig_in, int trig_out, int chan)
89{
90 void __iomem *base = cti->base;
91 unsigned long val;
92
93 val = __raw_readl(base + CTIINEN + trig_in * 4);
94 val |= BIT(chan);
95 __raw_writel(val, base + CTIINEN + trig_in * 4);
96
97 val = __raw_readl(base + CTIOUTEN + trig_out * 4);
98 val |= BIT(chan);
99 __raw_writel(val, base + CTIOUTEN + trig_out * 4);
100}
101
102/**
103 * cti_enable - enable the cti module
104 * @cti: cti instance
105 *
106 * enable the cti module
107 */
108static inline void cti_enable(struct cti *cti)
109{
110 __raw_writel(0x1, cti->base + CTICONTROL);
111}
112
113/**
114 * cti_disable - disable the cti module
115 * @cti: cti instance
116 *
117 * enable the cti module
118 */
119static inline void cti_disable(struct cti *cti)
120{
121 __raw_writel(0, cti->base + CTICONTROL);
122}
123
124/**
125 * cti_irq_ack - clear the cti irq
126 * @cti: cti instance
127 *
128 * clear the cti irq
129 */
130static inline void cti_irq_ack(struct cti *cti)
131{
132 void __iomem *base = cti->base;
133 unsigned long val;
134
135 val = __raw_readl(base + CTIINTACK);
136 val |= BIT(cti->trig_out_for_irq);
137 __raw_writel(val, base + CTIINTACK);
138}
139
140/**
141 * cti_unlock - unlock cti module
142 * @cti: cti instance
143 *
144 * unlock the cti module, or else any writes to the cti
145 * module is not allowed.
146 */
147static inline void cti_unlock(struct cti *cti)
148{
149 void __iomem *base = cti->base;
150 unsigned long val;
151
152 val = __raw_readl(base + LOCKSTATUS);
153
154 if (val & 1) {
155 val = LOCKCODE;
156 __raw_writel(val, base + LOCKACCESS);
157 }
158}
159
160/**
161 * cti_lock - lock cti module
162 * @cti: cti instance
163 *
164 * lock the cti module, so any writes to the cti
165 * module will be not allowed.
166 */
167static inline void cti_lock(struct cti *cti)
168{
169 void __iomem *base = cti->base;
170 unsigned long val;
171
172 val = __raw_readl(base + LOCKSTATUS);
173
174 if (!(val & 1)) {
175 val = ~LOCKCODE;
176 __raw_writel(val, base + LOCKACCESS);
177 }
178}
179#endif
diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h
index 1db1143a948..7df239bcdf2 100644
--- a/arch/arm/include/asm/hardware/cache-l2x0.h
+++ b/arch/arm/include/asm/hardware/cache-l2x0.h
@@ -20,6 +20,8 @@
20#ifndef __ASM_ARM_HARDWARE_L2X0_H 20#ifndef __ASM_ARM_HARDWARE_L2X0_H
21#define __ASM_ARM_HARDWARE_L2X0_H 21#define __ASM_ARM_HARDWARE_L2X0_H
22 22
23#include <linux/errno.h>
24
23#define L2X0_CACHE_ID 0x000 25#define L2X0_CACHE_ID 0x000
24#define L2X0_CACHE_TYPE 0x004 26#define L2X0_CACHE_TYPE 0x004
25#define L2X0_CTRL 0x100 27#define L2X0_CTRL 0x100
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
index 6fe6cf0895c..bcb0c883e21 100644
--- a/arch/arm/include/asm/mach/arch.h
+++ b/arch/arm/include/asm/mach/arch.h
@@ -13,6 +13,7 @@
13struct tag; 13struct tag;
14struct meminfo; 14struct meminfo;
15struct sys_timer; 15struct sys_timer;
16struct pt_regs;
16 17
17struct machine_desc { 18struct machine_desc {
18 unsigned int nr; /* architecture number */ 19 unsigned int nr; /* architecture number */
diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h
index 0f8e3827a89..99cfe360798 100644
--- a/arch/arm/include/asm/perf_event.h
+++ b/arch/arm/include/asm/perf_event.h
@@ -32,7 +32,4 @@ enum arm_perf_pmu_ids {
32extern enum arm_perf_pmu_ids 32extern enum arm_perf_pmu_ids
33armpmu_get_pmu_id(void); 33armpmu_get_pmu_id(void);
34 34
35extern int
36armpmu_get_max_events(void);
37
38#endif /* __ARM_PERF_EVENT_H__ */ 35#endif /* __ARM_PERF_EVENT_H__ */
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
index 71d99b83cdb..b5a5be2536c 100644
--- a/arch/arm/include/asm/pmu.h
+++ b/arch/arm/include/asm/pmu.h
@@ -27,13 +27,22 @@ enum arm_pmu_type {
27/* 27/*
28 * struct arm_pmu_platdata - ARM PMU platform data 28 * struct arm_pmu_platdata - ARM PMU platform data
29 * 29 *
30 * @handle_irq: an optional handler which will be called from the interrupt and 30 * @handle_irq: an optional handler which will be called from the
31 * passed the address of the low level handler, and can be used to implement 31 * interrupt and passed the address of the low level handler,
32 * any platform specific handling before or after calling it. 32 * and can be used to implement any platform specific handling
33 * before or after calling it.
34 * @enable_irq: an optional handler which will be called after
35 * request_irq and be used to handle some platform specific
36 * irq enablement
37 * @disable_irq: an optional handler which will be called before
38 * free_irq and be used to handle some platform specific
39 * irq disablement
33 */ 40 */
34struct arm_pmu_platdata { 41struct arm_pmu_platdata {
35 irqreturn_t (*handle_irq)(int irq, void *dev, 42 irqreturn_t (*handle_irq)(int irq, void *dev,
36 irq_handler_t pmu_handler); 43 irq_handler_t pmu_handler);
44 void (*enable_irq)(int irq);
45 void (*disable_irq)(int irq);
37}; 46};
38 47
39#ifdef CONFIG_CPU_HAS_PMU 48#ifdef CONFIG_CPU_HAS_PMU
@@ -55,16 +64,6 @@ reserve_pmu(enum arm_pmu_type type);
55extern void 64extern void
56release_pmu(enum arm_pmu_type type); 65release_pmu(enum arm_pmu_type type);
57 66
58/**
59 * init_pmu() - Initialise the PMU.
60 *
61 * Initialise the system ready for PMU enabling. This should typically set the
62 * IRQ affinity and nothing else. The users (oprofile/perf events etc) will do
63 * the actual hardware initialisation.
64 */
65extern int
66init_pmu(enum arm_pmu_type type);
67
68#else /* CONFIG_CPU_HAS_PMU */ 67#else /* CONFIG_CPU_HAS_PMU */
69 68
70#include <linux/err.h> 69#include <linux/err.h>
diff --git a/arch/arm/include/asm/topology.h b/arch/arm/include/asm/topology.h
index a7e457ed27c..58b8b84adcd 100644
--- a/arch/arm/include/asm/topology.h
+++ b/arch/arm/include/asm/topology.h
@@ -25,7 +25,7 @@ extern struct cputopo_arm cpu_topology[NR_CPUS];
25 25
26void init_cpu_topology(void); 26void init_cpu_topology(void);
27void store_cpu_topology(unsigned int cpuid); 27void store_cpu_topology(unsigned int cpuid);
28const struct cpumask *cpu_coregroup_mask(unsigned int cpu); 28const struct cpumask *cpu_coregroup_mask(int cpu);
29 29
30#else 30#else
31 31
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index c60a2944f95..4a112378380 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -402,6 +402,8 @@
402#define __NR_syncfs (__NR_SYSCALL_BASE+373) 402#define __NR_syncfs (__NR_SYSCALL_BASE+373)
403#define __NR_sendmmsg (__NR_SYSCALL_BASE+374) 403#define __NR_sendmmsg (__NR_SYSCALL_BASE+374)
404#define __NR_setns (__NR_SYSCALL_BASE+375) 404#define __NR_setns (__NR_SYSCALL_BASE+375)
405#define __NR_process_vm_readv (__NR_SYSCALL_BASE+376)
406#define __NR_process_vm_writev (__NR_SYSCALL_BASE+377)
405 407
406/* 408/*
407 * The following SWIs are ARM private. 409 * The following SWIs are ARM private.
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index 9943e9e74a1..463ff4a0ec8 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -385,6 +385,8 @@
385 CALL(sys_syncfs) 385 CALL(sys_syncfs)
386 CALL(sys_sendmmsg) 386 CALL(sys_sendmmsg)
387/* 375 */ CALL(sys_setns) 387/* 375 */ CALL(sys_setns)
388 CALL(sys_process_vm_readv)
389 CALL(sys_process_vm_writev)
388#ifndef syscalls_counted 390#ifndef syscalls_counted
389.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls 391.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
390#define syscalls_counted 392#define syscalls_counted
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index bd49a6a2a17..3a456c6c700 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -496,7 +496,7 @@ ENDPROC(__und_usr)
496 .popsection 496 .popsection
497 .pushsection __ex_table,"a" 497 .pushsection __ex_table,"a"
498 .long 1b, 4b 498 .long 1b, 4b
499#if __LINUX_ARM_ARCH__ >= 7 499#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
500 .long 2b, 4b 500 .long 2b, 4b
501 .long 3b, 4b 501 .long 3b, 4b
502#endif 502#endif
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 566c54c2a1f..08c82fd844a 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -360,7 +360,7 @@ __secondary_data:
360 * r13 = *virtual* address to jump to upon completion 360 * r13 = *virtual* address to jump to upon completion
361 */ 361 */
362__enable_mmu: 362__enable_mmu:
363#ifdef CONFIG_ALIGNMENT_TRAP 363#if defined(CONFIG_ALIGNMENT_TRAP) && __LINUX_ARM_ARCH__ < 6
364 orr r0, r0, #CR_A 364 orr r0, r0, #CR_A
365#else 365#else
366 bic r0, r0, #CR_A 366 bic r0, r0, #CR_A
diff --git a/arch/arm/kernel/kprobes-arm.c b/arch/arm/kernel/kprobes-arm.c
index 9fe8910308a..8a30c89da70 100644
--- a/arch/arm/kernel/kprobes-arm.c
+++ b/arch/arm/kernel/kprobes-arm.c
@@ -519,10 +519,12 @@ static const union decode_item arm_cccc_0000_____1001_table[] = {
519static const union decode_item arm_cccc_0001_____1001_table[] = { 519static const union decode_item arm_cccc_0001_____1001_table[] = {
520 /* Synchronization primitives */ 520 /* Synchronization primitives */
521 521
522#if __LINUX_ARM_ARCH__ < 6
523 /* Deprecated on ARMv6 and may be UNDEFINED on v7 */
522 /* SMP/SWPB cccc 0001 0x00 xxxx xxxx xxxx 1001 xxxx */ 524 /* SMP/SWPB cccc 0001 0x00 xxxx xxxx xxxx 1001 xxxx */
523 DECODE_EMULATEX (0x0fb000f0, 0x01000090, emulate_rd12rn16rm0_rwflags_nopc, 525 DECODE_EMULATEX (0x0fb000f0, 0x01000090, emulate_rd12rn16rm0_rwflags_nopc,
524 REGS(NOPC, NOPC, 0, 0, NOPC)), 526 REGS(NOPC, NOPC, 0, 0, NOPC)),
525 527#endif
526 /* LDREX/STREX{,D,B,H} cccc 0001 1xxx xxxx xxxx xxxx 1001 xxxx */ 528 /* LDREX/STREX{,D,B,H} cccc 0001 1xxx xxxx xxxx xxxx 1001 xxxx */
527 /* And unallocated instructions... */ 529 /* And unallocated instructions... */
528 DECODE_END 530 DECODE_END
diff --git a/arch/arm/kernel/kprobes-test-arm.c b/arch/arm/kernel/kprobes-test-arm.c
index fc82de8bdcc..ba32b393b3f 100644
--- a/arch/arm/kernel/kprobes-test-arm.c
+++ b/arch/arm/kernel/kprobes-test-arm.c
@@ -427,18 +427,25 @@ void kprobe_arm_test_cases(void)
427 427
428 TEST_GROUP("Synchronization primitives") 428 TEST_GROUP("Synchronization primitives")
429 429
430 /* 430#if __LINUX_ARM_ARCH__ < 6
431 * Use hard coded constants for SWP instructions to avoid warnings 431 TEST_RP("swp lr, r",7,VAL2,", [r",8,0,"]")
432 * about deprecated instructions. 432 TEST_R( "swpvs r0, r",1,VAL1,", [sp]")
433 */ 433 TEST_RP("swp sp, r",14,VAL2,", [r",12,13*4,"]")
434 TEST_RP( ".word 0xe108e097 @ swp lr, r",7,VAL2,", [r",8,0,"]") 434#else
435 TEST_R( ".word 0x610d0091 @ swpvs r0, r",1,VAL1,", [sp]") 435 TEST_UNSUPPORTED(".word 0xe108e097 @ swp lr, r7, [r8]")
436 TEST_RP( ".word 0xe10cd09e @ swp sp, r",14,VAL2,", [r",12,13*4,"]") 436 TEST_UNSUPPORTED(".word 0x610d0091 @ swpvs r0, r1, [sp]")
437 TEST_UNSUPPORTED(".word 0xe10cd09e @ swp sp, r14 [r12]")
438#endif
437 TEST_UNSUPPORTED(".word 0xe102f091 @ swp pc, r1, [r2]") 439 TEST_UNSUPPORTED(".word 0xe102f091 @ swp pc, r1, [r2]")
438 TEST_UNSUPPORTED(".word 0xe102009f @ swp r0, pc, [r2]") 440 TEST_UNSUPPORTED(".word 0xe102009f @ swp r0, pc, [r2]")
439 TEST_UNSUPPORTED(".word 0xe10f0091 @ swp r0, r1, [pc]") 441 TEST_UNSUPPORTED(".word 0xe10f0091 @ swp r0, r1, [pc]")
440 TEST_RP( ".word 0xe148e097 @ swpb lr, r",7,VAL2,", [r",8,0,"]") 442#if __LINUX_ARM_ARCH__ < 6
441 TEST_R( ".word 0x614d0091 @ swpvsb r0, r",1,VAL1,", [sp]") 443 TEST_RP("swpb lr, r",7,VAL2,", [r",8,0,"]")
444 TEST_R( "swpvsb r0, r",1,VAL1,", [sp]")
445#else
446 TEST_UNSUPPORTED(".word 0xe148e097 @ swpb lr, r7, [r8]")
447 TEST_UNSUPPORTED(".word 0x614d0091 @ swpvsb r0, r1, [sp]")
448#endif
442 TEST_UNSUPPORTED(".word 0xe142f091 @ swpb pc, r1, [r2]") 449 TEST_UNSUPPORTED(".word 0xe142f091 @ swpb pc, r1, [r2]")
443 450
444 TEST_UNSUPPORTED(".word 0xe1100090") /* Unallocated space */ 451 TEST_UNSUPPORTED(".word 0xe1100090") /* Unallocated space */
@@ -550,7 +557,7 @@ void kprobe_arm_test_cases(void)
550 TEST_RPR( "strccd r",8, VAL2,", [r",13,0, ", r",12,48,"]") 557 TEST_RPR( "strccd r",8, VAL2,", [r",13,0, ", r",12,48,"]")
551 TEST_RPR( "strd r",4, VAL1,", [r",2, 24,", r",3, 48,"]!") 558 TEST_RPR( "strd r",4, VAL1,", [r",2, 24,", r",3, 48,"]!")
552 TEST_RPR( "strcsd r",12,VAL2,", [r",11,48,", -r",10,24,"]!") 559 TEST_RPR( "strcsd r",12,VAL2,", [r",11,48,", -r",10,24,"]!")
553 TEST_RPR( "strd r",2, VAL1,", [r",3, 24,"], r",4,48,"") 560 TEST_RPR( "strd r",2, VAL1,", [r",5, 24,"], r",4,48,"")
554 TEST_RPR( "strd r",10,VAL2,", [r",9, 48,"], -r",7,24,"") 561 TEST_RPR( "strd r",10,VAL2,", [r",9, 48,"], -r",7,24,"")
555 TEST_UNSUPPORTED(".word 0xe1afc0fa @ strd r12, [pc, r10]!") 562 TEST_UNSUPPORTED(".word 0xe1afc0fa @ strd r12, [pc, r10]!")
556 563
diff --git a/arch/arm/kernel/kprobes-test-thumb.c b/arch/arm/kernel/kprobes-test-thumb.c
index 5e726c31c45..5d8b8579222 100644
--- a/arch/arm/kernel/kprobes-test-thumb.c
+++ b/arch/arm/kernel/kprobes-test-thumb.c
@@ -222,8 +222,8 @@ void kprobe_thumb16_test_cases(void)
222DONT_TEST_IN_ITBLOCK( 222DONT_TEST_IN_ITBLOCK(
223 TEST_BF_R( "cbnz r",0,0, ", 2f") 223 TEST_BF_R( "cbnz r",0,0, ", 2f")
224 TEST_BF_R( "cbz r",2,-1,", 2f") 224 TEST_BF_R( "cbz r",2,-1,", 2f")
225 TEST_BF_RX( "cbnz r",4,1, ", 2f",0x20) 225 TEST_BF_RX( "cbnz r",4,1, ", 2f", SPACE_0x20)
226 TEST_BF_RX( "cbz r",7,0, ", 2f",0x40) 226 TEST_BF_RX( "cbz r",7,0, ", 2f", SPACE_0x40)
227) 227)
228 TEST_R("sxth r0, r",7, HH1,"") 228 TEST_R("sxth r0, r",7, HH1,"")
229 TEST_R("sxth r7, r",0, HH2,"") 229 TEST_R("sxth r7, r",0, HH2,"")
@@ -246,7 +246,7 @@ DONT_TEST_IN_ITBLOCK(
246 TESTCASE_START(code) \ 246 TESTCASE_START(code) \
247 TEST_ARG_PTR(13, offset) \ 247 TEST_ARG_PTR(13, offset) \
248 TEST_ARG_END("") \ 248 TEST_ARG_END("") \
249 TEST_BRANCH_F(code,0) \ 249 TEST_BRANCH_F(code) \
250 TESTCASE_END 250 TESTCASE_END
251 251
252 TEST("push {r0}") 252 TEST("push {r0}")
@@ -319,8 +319,8 @@ CONDITION_INSTRUCTIONS(8,
319 319
320 TEST_BF( "b 2f") 320 TEST_BF( "b 2f")
321 TEST_BB( "b 2b") 321 TEST_BB( "b 2b")
322 TEST_BF_X("b 2f", 0x400) 322 TEST_BF_X("b 2f", SPACE_0x400)
323 TEST_BB_X("b 2b", 0x400) 323 TEST_BB_X("b 2b", SPACE_0x400)
324 324
325 TEST_GROUP("Testing instructions in IT blocks") 325 TEST_GROUP("Testing instructions in IT blocks")
326 326
@@ -746,7 +746,7 @@ CONDITION_INSTRUCTIONS(22,
746 TEST_BB("bne.w 2b") 746 TEST_BB("bne.w 2b")
747 TEST_BF("bgt.w 2f") 747 TEST_BF("bgt.w 2f")
748 TEST_BB("blt.w 2b") 748 TEST_BB("blt.w 2b")
749 TEST_BF_X("bpl.w 2f",0x1000) 749 TEST_BF_X("bpl.w 2f", SPACE_0x1000)
750) 750)
751 751
752 TEST_UNSUPPORTED("msr cpsr, r0") 752 TEST_UNSUPPORTED("msr cpsr, r0")
@@ -786,11 +786,11 @@ CONDITION_INSTRUCTIONS(22,
786 786
787 TEST_BF( "b.w 2f") 787 TEST_BF( "b.w 2f")
788 TEST_BB( "b.w 2b") 788 TEST_BB( "b.w 2b")
789 TEST_BF_X("b.w 2f", 0x1000) 789 TEST_BF_X("b.w 2f", SPACE_0x1000)
790 790
791 TEST_BF( "bl.w 2f") 791 TEST_BF( "bl.w 2f")
792 TEST_BB( "bl.w 2b") 792 TEST_BB( "bl.w 2b")
793 TEST_BB_X("bl.w 2b", 0x1000) 793 TEST_BB_X("bl.w 2b", SPACE_0x1000)
794 794
795 TEST_X( "blx __dummy_arm_subroutine", 795 TEST_X( "blx __dummy_arm_subroutine",
796 ".arm \n\t" 796 ".arm \n\t"
diff --git a/arch/arm/kernel/kprobes-test.h b/arch/arm/kernel/kprobes-test.h
index 0dc5d77b935..e28a869b1ae 100644
--- a/arch/arm/kernel/kprobes-test.h
+++ b/arch/arm/kernel/kprobes-test.h
@@ -149,23 +149,31 @@ struct test_arg_end {
149 "1: "instruction" \n\t" \ 149 "1: "instruction" \n\t" \
150 " nop \n\t" 150 " nop \n\t"
151 151
152#define TEST_BRANCH_F(instruction, xtra_dist) \ 152#define TEST_BRANCH_F(instruction) \
153 TEST_INSTRUCTION(instruction) \ 153 TEST_INSTRUCTION(instruction) \
154 ".if "#xtra_dist" \n\t" \
155 " b 99f \n\t" \ 154 " b 99f \n\t" \
156 ".space "#xtra_dist" \n\t" \ 155 "2: nop \n\t"
157 ".endif \n\t" \ 156
157#define TEST_BRANCH_B(instruction) \
158 " b 50f \n\t" \
159 " b 99f \n\t" \
160 "2: nop \n\t" \
161 " b 99f \n\t" \
162 TEST_INSTRUCTION(instruction)
163
164#define TEST_BRANCH_FX(instruction, codex) \
165 TEST_INSTRUCTION(instruction) \
166 " b 99f \n\t" \
167 codex" \n\t" \
158 " b 99f \n\t" \ 168 " b 99f \n\t" \
159 "2: nop \n\t" 169 "2: nop \n\t"
160 170
161#define TEST_BRANCH_B(instruction, xtra_dist) \ 171#define TEST_BRANCH_BX(instruction, codex) \
162 " b 50f \n\t" \ 172 " b 50f \n\t" \
163 " b 99f \n\t" \ 173 " b 99f \n\t" \
164 "2: nop \n\t" \ 174 "2: nop \n\t" \
165 " b 99f \n\t" \ 175 " b 99f \n\t" \
166 ".if "#xtra_dist" \n\t" \ 176 codex" \n\t" \
167 ".space "#xtra_dist" \n\t" \
168 ".endif \n\t" \
169 TEST_INSTRUCTION(instruction) 177 TEST_INSTRUCTION(instruction)
170 178
171#define TESTCASE_END \ 179#define TESTCASE_END \
@@ -301,47 +309,60 @@ struct test_arg_end {
301 TESTCASE_START(code1 #reg1 code2) \ 309 TESTCASE_START(code1 #reg1 code2) \
302 TEST_ARG_PTR(reg1, val1) \ 310 TEST_ARG_PTR(reg1, val1) \
303 TEST_ARG_END("") \ 311 TEST_ARG_END("") \
304 TEST_BRANCH_F(code1 #reg1 code2, 0) \ 312 TEST_BRANCH_F(code1 #reg1 code2) \
305 TESTCASE_END 313 TESTCASE_END
306 314
307#define TEST_BF_X(code, xtra_dist) \ 315#define TEST_BF(code) \
308 TESTCASE_START(code) \ 316 TESTCASE_START(code) \
309 TEST_ARG_END("") \ 317 TEST_ARG_END("") \
310 TEST_BRANCH_F(code, xtra_dist) \ 318 TEST_BRANCH_F(code) \
311 TESTCASE_END 319 TESTCASE_END
312 320
313#define TEST_BB_X(code, xtra_dist) \ 321#define TEST_BB(code) \
314 TESTCASE_START(code) \ 322 TESTCASE_START(code) \
315 TEST_ARG_END("") \ 323 TEST_ARG_END("") \
316 TEST_BRANCH_B(code, xtra_dist) \ 324 TEST_BRANCH_B(code) \
317 TESTCASE_END 325 TESTCASE_END
318 326
319#define TEST_BF_RX(code1, reg, val, code2, xtra_dist) \ 327#define TEST_BF_R(code1, reg, val, code2) \
320 TESTCASE_START(code1 #reg code2) \ 328 TESTCASE_START(code1 #reg code2) \
321 TEST_ARG_REG(reg, val) \ 329 TEST_ARG_REG(reg, val) \
322 TEST_ARG_END("") \ 330 TEST_ARG_END("") \
323 TEST_BRANCH_F(code1 #reg code2, xtra_dist) \ 331 TEST_BRANCH_F(code1 #reg code2) \
324 TESTCASE_END 332 TESTCASE_END
325 333
326#define TEST_BB_RX(code1, reg, val, code2, xtra_dist) \ 334#define TEST_BB_R(code1, reg, val, code2) \
327 TESTCASE_START(code1 #reg code2) \ 335 TESTCASE_START(code1 #reg code2) \
328 TEST_ARG_REG(reg, val) \ 336 TEST_ARG_REG(reg, val) \
329 TEST_ARG_END("") \ 337 TEST_ARG_END("") \
330 TEST_BRANCH_B(code1 #reg code2, xtra_dist) \ 338 TEST_BRANCH_B(code1 #reg code2) \
331 TESTCASE_END 339 TESTCASE_END
332 340
333#define TEST_BF(code) TEST_BF_X(code, 0)
334#define TEST_BB(code) TEST_BB_X(code, 0)
335
336#define TEST_BF_R(code1, reg, val, code2) TEST_BF_RX(code1, reg, val, code2, 0)
337#define TEST_BB_R(code1, reg, val, code2) TEST_BB_RX(code1, reg, val, code2, 0)
338
339#define TEST_BF_RR(code1, reg1, val1, code2, reg2, val2, code3) \ 341#define TEST_BF_RR(code1, reg1, val1, code2, reg2, val2, code3) \
340 TESTCASE_START(code1 #reg1 code2 #reg2 code3) \ 342 TESTCASE_START(code1 #reg1 code2 #reg2 code3) \
341 TEST_ARG_REG(reg1, val1) \ 343 TEST_ARG_REG(reg1, val1) \
342 TEST_ARG_REG(reg2, val2) \ 344 TEST_ARG_REG(reg2, val2) \
343 TEST_ARG_END("") \ 345 TEST_ARG_END("") \
344 TEST_BRANCH_F(code1 #reg1 code2 #reg2 code3, 0) \ 346 TEST_BRANCH_F(code1 #reg1 code2 #reg2 code3) \
347 TESTCASE_END
348
349#define TEST_BF_X(code, codex) \
350 TESTCASE_START(code) \
351 TEST_ARG_END("") \
352 TEST_BRANCH_FX(code, codex) \
353 TESTCASE_END
354
355#define TEST_BB_X(code, codex) \
356 TESTCASE_START(code) \
357 TEST_ARG_END("") \
358 TEST_BRANCH_BX(code, codex) \
359 TESTCASE_END
360
361#define TEST_BF_RX(code1, reg, val, code2, codex) \
362 TESTCASE_START(code1 #reg code2) \
363 TEST_ARG_REG(reg, val) \
364 TEST_ARG_END("") \
365 TEST_BRANCH_FX(code1 #reg code2, codex) \
345 TESTCASE_END 366 TESTCASE_END
346 367
347#define TEST_X(code, codex) \ 368#define TEST_X(code, codex) \
@@ -372,6 +393,25 @@ struct test_arg_end {
372 TESTCASE_END 393 TESTCASE_END
373 394
374 395
396/*
397 * Macros for defining space directives spread over multiple lines.
398 * These are required so the compiler guesses better the length of inline asm
399 * code and will spill the literal pool early enough to avoid generating PC
400 * relative loads with out of range offsets.
401 */
402#define TWICE(x) x x
403#define SPACE_0x8 TWICE(".space 4\n\t")
404#define SPACE_0x10 TWICE(SPACE_0x8)
405#define SPACE_0x20 TWICE(SPACE_0x10)
406#define SPACE_0x40 TWICE(SPACE_0x20)
407#define SPACE_0x80 TWICE(SPACE_0x40)
408#define SPACE_0x100 TWICE(SPACE_0x80)
409#define SPACE_0x200 TWICE(SPACE_0x100)
410#define SPACE_0x400 TWICE(SPACE_0x200)
411#define SPACE_0x800 TWICE(SPACE_0x400)
412#define SPACE_0x1000 TWICE(SPACE_0x800)
413
414
375/* Various values used in test cases... */ 415/* Various values used in test cases... */
376#define N(val) (val ^ 0xffffffff) 416#define N(val) (val ^ 0xffffffff)
377#define VAL1 0x12345678 417#define VAL1 0x12345678
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index cc40b965d42..29620b632ed 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -32,24 +32,6 @@ static atomic_t waiting_for_crash_ipi;
32 32
33int machine_kexec_prepare(struct kimage *image) 33int machine_kexec_prepare(struct kimage *image)
34{ 34{
35 unsigned long page_list;
36 void *reboot_code_buffer;
37 page_list = image->head & PAGE_MASK;
38
39 reboot_code_buffer = page_address(image->control_code_page);
40
41 /* Prepare parameters for reboot_code_buffer*/
42 kexec_start_address = image->start;
43 kexec_indirection_page = page_list;
44 kexec_mach_type = machine_arch_type;
45 kexec_boot_atags = image->start - KEXEC_ARM_ZIMAGE_OFFSET + KEXEC_ARM_ATAGS_OFFSET;
46
47 /* copy our kernel relocation code to the control code page */
48 memcpy(reboot_code_buffer,
49 relocate_new_kernel, relocate_new_kernel_size);
50
51 flush_icache_range((unsigned long) reboot_code_buffer,
52 (unsigned long) reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE);
53 return 0; 35 return 0;
54} 36}
55 37
@@ -100,14 +82,31 @@ void (*kexec_reinit)(void);
100 82
101void machine_kexec(struct kimage *image) 83void machine_kexec(struct kimage *image)
102{ 84{
85 unsigned long page_list;
103 unsigned long reboot_code_buffer_phys; 86 unsigned long reboot_code_buffer_phys;
104 void *reboot_code_buffer; 87 void *reboot_code_buffer;
105 88
89
90 page_list = image->head & PAGE_MASK;
91
106 /* we need both effective and real address here */ 92 /* we need both effective and real address here */
107 reboot_code_buffer_phys = 93 reboot_code_buffer_phys =
108 page_to_pfn(image->control_code_page) << PAGE_SHIFT; 94 page_to_pfn(image->control_code_page) << PAGE_SHIFT;
109 reboot_code_buffer = page_address(image->control_code_page); 95 reboot_code_buffer = page_address(image->control_code_page);
110 96
97 /* Prepare parameters for reboot_code_buffer*/
98 kexec_start_address = image->start;
99 kexec_indirection_page = page_list;
100 kexec_mach_type = machine_arch_type;
101 kexec_boot_atags = image->start - KEXEC_ARM_ZIMAGE_OFFSET + KEXEC_ARM_ATAGS_OFFSET;
102
103 /* copy our kernel relocation code to the control code page */
104 memcpy(reboot_code_buffer,
105 relocate_new_kernel, relocate_new_kernel_size);
106
107
108 flush_icache_range((unsigned long) reboot_code_buffer,
109 (unsigned long) reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE);
111 printk(KERN_INFO "Bye!\n"); 110 printk(KERN_INFO "Bye!\n");
112 111
113 if (kexec_reinit) 112 if (kexec_reinit)
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 24e2347be6b..172101ac97d 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -59,8 +59,7 @@ armpmu_get_pmu_id(void)
59} 59}
60EXPORT_SYMBOL_GPL(armpmu_get_pmu_id); 60EXPORT_SYMBOL_GPL(armpmu_get_pmu_id);
61 61
62int 62int perf_num_counters(void)
63armpmu_get_max_events(void)
64{ 63{
65 int max_events = 0; 64 int max_events = 0;
66 65
@@ -69,12 +68,6 @@ armpmu_get_max_events(void)
69 68
70 return max_events; 69 return max_events;
71} 70}
72EXPORT_SYMBOL_GPL(armpmu_get_max_events);
73
74int perf_num_counters(void)
75{
76 return armpmu_get_max_events();
77}
78EXPORT_SYMBOL_GPL(perf_num_counters); 71EXPORT_SYMBOL_GPL(perf_num_counters);
79 72
80#define HW_OP_UNSUPPORTED 0xFFFF 73#define HW_OP_UNSUPPORTED 0xFFFF
@@ -343,8 +336,14 @@ validate_group(struct perf_event *event)
343{ 336{
344 struct perf_event *sibling, *leader = event->group_leader; 337 struct perf_event *sibling, *leader = event->group_leader;
345 struct pmu_hw_events fake_pmu; 338 struct pmu_hw_events fake_pmu;
339 DECLARE_BITMAP(fake_used_mask, ARMPMU_MAX_HWEVENTS);
346 340
347 memset(&fake_pmu, 0, sizeof(fake_pmu)); 341 /*
342 * Initialise the fake PMU. We only need to populate the
343 * used_mask for the purposes of validation.
344 */
345 memset(fake_used_mask, 0, sizeof(fake_used_mask));
346 fake_pmu.used_mask = fake_used_mask;
348 347
349 if (!validate_event(&fake_pmu, leader)) 348 if (!validate_event(&fake_pmu, leader))
350 return -ENOSPC; 349 return -ENOSPC;
@@ -374,6 +373,8 @@ armpmu_release_hardware(struct arm_pmu *armpmu)
374{ 373{
375 int i, irq, irqs; 374 int i, irq, irqs;
376 struct platform_device *pmu_device = armpmu->plat_device; 375 struct platform_device *pmu_device = armpmu->plat_device;
376 struct arm_pmu_platdata *plat =
377 dev_get_platdata(&pmu_device->dev);
377 378
378 irqs = min(pmu_device->num_resources, num_possible_cpus()); 379 irqs = min(pmu_device->num_resources, num_possible_cpus());
379 380
@@ -381,8 +382,11 @@ armpmu_release_hardware(struct arm_pmu *armpmu)
381 if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs)) 382 if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs))
382 continue; 383 continue;
383 irq = platform_get_irq(pmu_device, i); 384 irq = platform_get_irq(pmu_device, i);
384 if (irq >= 0) 385 if (irq >= 0) {
386 if (plat && plat->disable_irq)
387 plat->disable_irq(irq);
385 free_irq(irq, armpmu); 388 free_irq(irq, armpmu);
389 }
386 } 390 }
387 391
388 release_pmu(armpmu->type); 392 release_pmu(armpmu->type);
@@ -396,6 +400,9 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
396 int i, err, irq, irqs; 400 int i, err, irq, irqs;
397 struct platform_device *pmu_device = armpmu->plat_device; 401 struct platform_device *pmu_device = armpmu->plat_device;
398 402
403 if (!pmu_device)
404 return -ENODEV;
405
399 err = reserve_pmu(armpmu->type); 406 err = reserve_pmu(armpmu->type);
400 if (err) { 407 if (err) {
401 pr_warning("unable to reserve pmu\n"); 408 pr_warning("unable to reserve pmu\n");
@@ -439,7 +446,8 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
439 irq); 446 irq);
440 armpmu_release_hardware(armpmu); 447 armpmu_release_hardware(armpmu);
441 return err; 448 return err;
442 } 449 } else if (plat && plat->enable_irq)
450 plat->enable_irq(irq);
443 451
444 cpumask_set_cpu(i, &armpmu->active_irqs); 452 cpumask_set_cpu(i, &armpmu->active_irqs);
445 } 453 }
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c
index e63d8115c01..533be9930ec 100644
--- a/arch/arm/kernel/perf_event_v6.c
+++ b/arch/arm/kernel/perf_event_v6.c
@@ -65,13 +65,15 @@ enum armv6_counters {
65 * accesses/misses in hardware. 65 * accesses/misses in hardware.
66 */ 66 */
67static const unsigned armv6_perf_map[PERF_COUNT_HW_MAX] = { 67static const unsigned armv6_perf_map[PERF_COUNT_HW_MAX] = {
68 [PERF_COUNT_HW_CPU_CYCLES] = ARMV6_PERFCTR_CPU_CYCLES, 68 [PERF_COUNT_HW_CPU_CYCLES] = ARMV6_PERFCTR_CPU_CYCLES,
69 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6_PERFCTR_INSTR_EXEC, 69 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6_PERFCTR_INSTR_EXEC,
70 [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, 70 [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
71 [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, 71 [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
72 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6_PERFCTR_BR_EXEC, 72 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6_PERFCTR_BR_EXEC,
73 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6_PERFCTR_BR_MISPREDICT, 73 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6_PERFCTR_BR_MISPREDICT,
74 [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED, 74 [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
75 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV6_PERFCTR_IBUF_STALL,
76 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV6_PERFCTR_LSU_FULL_STALL,
75}; 77};
76 78
77static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] 79static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
@@ -218,13 +220,15 @@ enum armv6mpcore_perf_types {
218 * accesses/misses in hardware. 220 * accesses/misses in hardware.
219 */ 221 */
220static const unsigned armv6mpcore_perf_map[PERF_COUNT_HW_MAX] = { 222static const unsigned armv6mpcore_perf_map[PERF_COUNT_HW_MAX] = {
221 [PERF_COUNT_HW_CPU_CYCLES] = ARMV6MPCORE_PERFCTR_CPU_CYCLES, 223 [PERF_COUNT_HW_CPU_CYCLES] = ARMV6MPCORE_PERFCTR_CPU_CYCLES,
222 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_INSTR_EXEC, 224 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_INSTR_EXEC,
223 [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, 225 [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
224 [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, 226 [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
225 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_BR_EXEC, 227 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_BR_EXEC,
226 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6MPCORE_PERFCTR_BR_MISPREDICT, 228 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6MPCORE_PERFCTR_BR_MISPREDICT,
227 [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED, 229 [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
230 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV6MPCORE_PERFCTR_IBUF_STALL,
231 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV6MPCORE_PERFCTR_LSU_FULL_STALL,
228}; 232};
229 233
230static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] 234static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 1ef6d0034b8..460bbbb6b88 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -28,165 +28,87 @@ static struct arm_pmu armv7pmu;
28 * they are not available. 28 * they are not available.
29 */ 29 */
30enum armv7_perf_types { 30enum armv7_perf_types {
31 ARMV7_PERFCTR_PMNC_SW_INCR = 0x00, 31 ARMV7_PERFCTR_PMNC_SW_INCR = 0x00,
32 ARMV7_PERFCTR_IFETCH_MISS = 0x01, 32 ARMV7_PERFCTR_L1_ICACHE_REFILL = 0x01,
33 ARMV7_PERFCTR_ITLB_MISS = 0x02, 33 ARMV7_PERFCTR_ITLB_REFILL = 0x02,
34 ARMV7_PERFCTR_DCACHE_REFILL = 0x03, /* L1 */ 34 ARMV7_PERFCTR_L1_DCACHE_REFILL = 0x03,
35 ARMV7_PERFCTR_DCACHE_ACCESS = 0x04, /* L1 */ 35 ARMV7_PERFCTR_L1_DCACHE_ACCESS = 0x04,
36 ARMV7_PERFCTR_DTLB_REFILL = 0x05, 36 ARMV7_PERFCTR_DTLB_REFILL = 0x05,
37 ARMV7_PERFCTR_DREAD = 0x06, 37 ARMV7_PERFCTR_MEM_READ = 0x06,
38 ARMV7_PERFCTR_DWRITE = 0x07, 38 ARMV7_PERFCTR_MEM_WRITE = 0x07,
39 ARMV7_PERFCTR_INSTR_EXECUTED = 0x08, 39 ARMV7_PERFCTR_INSTR_EXECUTED = 0x08,
40 ARMV7_PERFCTR_EXC_TAKEN = 0x09, 40 ARMV7_PERFCTR_EXC_TAKEN = 0x09,
41 ARMV7_PERFCTR_EXC_EXECUTED = 0x0A, 41 ARMV7_PERFCTR_EXC_EXECUTED = 0x0A,
42 ARMV7_PERFCTR_CID_WRITE = 0x0B, 42 ARMV7_PERFCTR_CID_WRITE = 0x0B,
43 /* ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS. 43
44 /*
45 * ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
44 * It counts: 46 * It counts:
45 * - all branch instructions, 47 * - all (taken) branch instructions,
46 * - instructions that explicitly write the PC, 48 * - instructions that explicitly write the PC,
47 * - exception generating instructions. 49 * - exception generating instructions.
48 */ 50 */
49 ARMV7_PERFCTR_PC_WRITE = 0x0C, 51 ARMV7_PERFCTR_PC_WRITE = 0x0C,
50 ARMV7_PERFCTR_PC_IMM_BRANCH = 0x0D, 52 ARMV7_PERFCTR_PC_IMM_BRANCH = 0x0D,
51 ARMV7_PERFCTR_PC_PROC_RETURN = 0x0E, 53 ARMV7_PERFCTR_PC_PROC_RETURN = 0x0E,
52 ARMV7_PERFCTR_UNALIGNED_ACCESS = 0x0F, 54 ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS = 0x0F,
55 ARMV7_PERFCTR_PC_BRANCH_MIS_PRED = 0x10,
56 ARMV7_PERFCTR_CLOCK_CYCLES = 0x11,
57 ARMV7_PERFCTR_PC_BRANCH_PRED = 0x12,
53 58
54 /* These events are defined by the PMUv2 supplement (ARM DDI 0457A). */ 59 /* These events are defined by the PMUv2 supplement (ARM DDI 0457A). */
55 ARMV7_PERFCTR_PC_BRANCH_MIS_PRED = 0x10, 60 ARMV7_PERFCTR_MEM_ACCESS = 0x13,
56 ARMV7_PERFCTR_CLOCK_CYCLES = 0x11, 61 ARMV7_PERFCTR_L1_ICACHE_ACCESS = 0x14,
57 ARMV7_PERFCTR_PC_BRANCH_PRED = 0x12, 62 ARMV7_PERFCTR_L1_DCACHE_WB = 0x15,
58 ARMV7_PERFCTR_MEM_ACCESS = 0x13, 63 ARMV7_PERFCTR_L2_CACHE_ACCESS = 0x16,
59 ARMV7_PERFCTR_L1_ICACHE_ACCESS = 0x14, 64 ARMV7_PERFCTR_L2_CACHE_REFILL = 0x17,
60 ARMV7_PERFCTR_L1_DCACHE_WB = 0x15, 65 ARMV7_PERFCTR_L2_CACHE_WB = 0x18,
61 ARMV7_PERFCTR_L2_DCACHE_ACCESS = 0x16, 66 ARMV7_PERFCTR_BUS_ACCESS = 0x19,
62 ARMV7_PERFCTR_L2_DCACHE_REFILL = 0x17, 67 ARMV7_PERFCTR_MEM_ERROR = 0x1A,
63 ARMV7_PERFCTR_L2_DCACHE_WB = 0x18, 68 ARMV7_PERFCTR_INSTR_SPEC = 0x1B,
64 ARMV7_PERFCTR_BUS_ACCESS = 0x19, 69 ARMV7_PERFCTR_TTBR_WRITE = 0x1C,
65 ARMV7_PERFCTR_MEMORY_ERROR = 0x1A, 70 ARMV7_PERFCTR_BUS_CYCLES = 0x1D,
66 ARMV7_PERFCTR_INSTR_SPEC = 0x1B, 71
67 ARMV7_PERFCTR_TTBR_WRITE = 0x1C, 72 ARMV7_PERFCTR_CPU_CYCLES = 0xFF
68 ARMV7_PERFCTR_BUS_CYCLES = 0x1D,
69
70 ARMV7_PERFCTR_CPU_CYCLES = 0xFF
71}; 73};
72 74
73/* ARMv7 Cortex-A8 specific event types */ 75/* ARMv7 Cortex-A8 specific event types */
74enum armv7_a8_perf_types { 76enum armv7_a8_perf_types {
75 ARMV7_PERFCTR_WRITE_BUFFER_FULL = 0x40, 77 ARMV7_A8_PERFCTR_L2_CACHE_ACCESS = 0x43,
76 ARMV7_PERFCTR_L2_STORE_MERGED = 0x41, 78 ARMV7_A8_PERFCTR_L2_CACHE_REFILL = 0x44,
77 ARMV7_PERFCTR_L2_STORE_BUFF = 0x42, 79 ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS = 0x50,
78 ARMV7_PERFCTR_L2_ACCESS = 0x43, 80 ARMV7_A8_PERFCTR_STALL_ISIDE = 0x56,
79 ARMV7_PERFCTR_L2_CACH_MISS = 0x44,
80 ARMV7_PERFCTR_AXI_READ_CYCLES = 0x45,
81 ARMV7_PERFCTR_AXI_WRITE_CYCLES = 0x46,
82 ARMV7_PERFCTR_MEMORY_REPLAY = 0x47,
83 ARMV7_PERFCTR_UNALIGNED_ACCESS_REPLAY = 0x48,
84 ARMV7_PERFCTR_L1_DATA_MISS = 0x49,
85 ARMV7_PERFCTR_L1_INST_MISS = 0x4A,
86 ARMV7_PERFCTR_L1_DATA_COLORING = 0x4B,
87 ARMV7_PERFCTR_L1_NEON_DATA = 0x4C,
88 ARMV7_PERFCTR_L1_NEON_CACH_DATA = 0x4D,
89 ARMV7_PERFCTR_L2_NEON = 0x4E,
90 ARMV7_PERFCTR_L2_NEON_HIT = 0x4F,
91 ARMV7_PERFCTR_L1_INST = 0x50,
92 ARMV7_PERFCTR_PC_RETURN_MIS_PRED = 0x51,
93 ARMV7_PERFCTR_PC_BRANCH_FAILED = 0x52,
94 ARMV7_PERFCTR_PC_BRANCH_TAKEN = 0x53,
95 ARMV7_PERFCTR_PC_BRANCH_EXECUTED = 0x54,
96 ARMV7_PERFCTR_OP_EXECUTED = 0x55,
97 ARMV7_PERFCTR_CYCLES_INST_STALL = 0x56,
98 ARMV7_PERFCTR_CYCLES_INST = 0x57,
99 ARMV7_PERFCTR_CYCLES_NEON_DATA_STALL = 0x58,
100 ARMV7_PERFCTR_CYCLES_NEON_INST_STALL = 0x59,
101 ARMV7_PERFCTR_NEON_CYCLES = 0x5A,
102
103 ARMV7_PERFCTR_PMU0_EVENTS = 0x70,
104 ARMV7_PERFCTR_PMU1_EVENTS = 0x71,
105 ARMV7_PERFCTR_PMU_EVENTS = 0x72,
106}; 81};
107 82
108/* ARMv7 Cortex-A9 specific event types */ 83/* ARMv7 Cortex-A9 specific event types */
109enum armv7_a9_perf_types { 84enum armv7_a9_perf_types {
110 ARMV7_PERFCTR_JAVA_HW_BYTECODE_EXEC = 0x40, 85 ARMV7_A9_PERFCTR_INSTR_CORE_RENAME = 0x68,
111 ARMV7_PERFCTR_JAVA_SW_BYTECODE_EXEC = 0x41, 86 ARMV7_A9_PERFCTR_STALL_ICACHE = 0x60,
112 ARMV7_PERFCTR_JAZELLE_BRANCH_EXEC = 0x42, 87 ARMV7_A9_PERFCTR_STALL_DISPATCH = 0x66,
113
114 ARMV7_PERFCTR_COHERENT_LINE_MISS = 0x50,
115 ARMV7_PERFCTR_COHERENT_LINE_HIT = 0x51,
116
117 ARMV7_PERFCTR_ICACHE_DEP_STALL_CYCLES = 0x60,
118 ARMV7_PERFCTR_DCACHE_DEP_STALL_CYCLES = 0x61,
119 ARMV7_PERFCTR_TLB_MISS_DEP_STALL_CYCLES = 0x62,
120 ARMV7_PERFCTR_STREX_EXECUTED_PASSED = 0x63,
121 ARMV7_PERFCTR_STREX_EXECUTED_FAILED = 0x64,
122 ARMV7_PERFCTR_DATA_EVICTION = 0x65,
123 ARMV7_PERFCTR_ISSUE_STAGE_NO_INST = 0x66,
124 ARMV7_PERFCTR_ISSUE_STAGE_EMPTY = 0x67,
125 ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE = 0x68,
126
127 ARMV7_PERFCTR_PREDICTABLE_FUNCT_RETURNS = 0x6E,
128
129 ARMV7_PERFCTR_MAIN_UNIT_EXECUTED_INST = 0x70,
130 ARMV7_PERFCTR_SECOND_UNIT_EXECUTED_INST = 0x71,
131 ARMV7_PERFCTR_LD_ST_UNIT_EXECUTED_INST = 0x72,
132 ARMV7_PERFCTR_FP_EXECUTED_INST = 0x73,
133 ARMV7_PERFCTR_NEON_EXECUTED_INST = 0x74,
134
135 ARMV7_PERFCTR_PLD_FULL_DEP_STALL_CYCLES = 0x80,
136 ARMV7_PERFCTR_DATA_WR_DEP_STALL_CYCLES = 0x81,
137 ARMV7_PERFCTR_ITLB_MISS_DEP_STALL_CYCLES = 0x82,
138 ARMV7_PERFCTR_DTLB_MISS_DEP_STALL_CYCLES = 0x83,
139 ARMV7_PERFCTR_MICRO_ITLB_MISS_DEP_STALL_CYCLES = 0x84,
140 ARMV7_PERFCTR_MICRO_DTLB_MISS_DEP_STALL_CYCLES = 0x85,
141 ARMV7_PERFCTR_DMB_DEP_STALL_CYCLES = 0x86,
142
143 ARMV7_PERFCTR_INTGR_CLK_ENABLED_CYCLES = 0x8A,
144 ARMV7_PERFCTR_DATA_ENGINE_CLK_EN_CYCLES = 0x8B,
145
146 ARMV7_PERFCTR_ISB_INST = 0x90,
147 ARMV7_PERFCTR_DSB_INST = 0x91,
148 ARMV7_PERFCTR_DMB_INST = 0x92,
149 ARMV7_PERFCTR_EXT_INTERRUPTS = 0x93,
150
151 ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_COMPLETED = 0xA0,
152 ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_SKIPPED = 0xA1,
153 ARMV7_PERFCTR_PLE_FIFO_FLUSH = 0xA2,
154 ARMV7_PERFCTR_PLE_RQST_COMPLETED = 0xA3,
155 ARMV7_PERFCTR_PLE_FIFO_OVERFLOW = 0xA4,
156 ARMV7_PERFCTR_PLE_RQST_PROG = 0xA5
157}; 88};
158 89
159/* ARMv7 Cortex-A5 specific event types */ 90/* ARMv7 Cortex-A5 specific event types */
160enum armv7_a5_perf_types { 91enum armv7_a5_perf_types {
161 ARMV7_PERFCTR_IRQ_TAKEN = 0x86, 92 ARMV7_A5_PERFCTR_PREFETCH_LINEFILL = 0xc2,
162 ARMV7_PERFCTR_FIQ_TAKEN = 0x87, 93 ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP = 0xc3,
163
164 ARMV7_PERFCTR_EXT_MEM_RQST = 0xc0,
165 ARMV7_PERFCTR_NC_EXT_MEM_RQST = 0xc1,
166 ARMV7_PERFCTR_PREFETCH_LINEFILL = 0xc2,
167 ARMV7_PERFCTR_PREFETCH_LINEFILL_DROP = 0xc3,
168 ARMV7_PERFCTR_ENTER_READ_ALLOC = 0xc4,
169 ARMV7_PERFCTR_READ_ALLOC = 0xc5,
170
171 ARMV7_PERFCTR_STALL_SB_FULL = 0xc9,
172}; 94};
173 95
174/* ARMv7 Cortex-A15 specific event types */ 96/* ARMv7 Cortex-A15 specific event types */
175enum armv7_a15_perf_types { 97enum armv7_a15_perf_types {
176 ARMV7_PERFCTR_L1_DCACHE_READ_ACCESS = 0x40, 98 ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ = 0x40,
177 ARMV7_PERFCTR_L1_DCACHE_WRITE_ACCESS = 0x41, 99 ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE = 0x41,
178 ARMV7_PERFCTR_L1_DCACHE_READ_REFILL = 0x42, 100 ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ = 0x42,
179 ARMV7_PERFCTR_L1_DCACHE_WRITE_REFILL = 0x43, 101 ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE = 0x43,
180 102
181 ARMV7_PERFCTR_L1_DTLB_READ_REFILL = 0x4C, 103 ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ = 0x4C,
182 ARMV7_PERFCTR_L1_DTLB_WRITE_REFILL = 0x4D, 104 ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE = 0x4D,
183 105
184 ARMV7_PERFCTR_L2_DCACHE_READ_ACCESS = 0x50, 106 ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ = 0x50,
185 ARMV7_PERFCTR_L2_DCACHE_WRITE_ACCESS = 0x51, 107 ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE = 0x51,
186 ARMV7_PERFCTR_L2_DCACHE_READ_REFILL = 0x52, 108 ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ = 0x52,
187 ARMV7_PERFCTR_L2_DCACHE_WRITE_REFILL = 0x53, 109 ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE = 0x53,
188 110
189 ARMV7_PERFCTR_SPEC_PC_WRITE = 0x76, 111 ARMV7_A15_PERFCTR_PC_WRITE_SPEC = 0x76,
190}; 112};
191 113
192/* 114/*
@@ -197,13 +119,15 @@ enum armv7_a15_perf_types {
197 * accesses/misses in hardware. 119 * accesses/misses in hardware.
198 */ 120 */
199static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = { 121static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = {
200 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, 122 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
201 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, 123 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
202 [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, 124 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
203 [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, 125 [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
204 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, 126 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
205 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, 127 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
206 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES, 128 [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
129 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV7_A8_PERFCTR_STALL_ISIDE,
130 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
207}; 131};
208 132
209static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] 133static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
@@ -217,12 +141,12 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
217 * combined. 141 * combined.
218 */ 142 */
219 [C(OP_READ)] = { 143 [C(OP_READ)] = {
220 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS, 144 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
221 [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL, 145 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
222 }, 146 },
223 [C(OP_WRITE)] = { 147 [C(OP_WRITE)] = {
224 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS, 148 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
225 [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL, 149 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
226 }, 150 },
227 [C(OP_PREFETCH)] = { 151 [C(OP_PREFETCH)] = {
228 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 152 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@@ -231,12 +155,12 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
231 }, 155 },
232 [C(L1I)] = { 156 [C(L1I)] = {
233 [C(OP_READ)] = { 157 [C(OP_READ)] = {
234 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_INST, 158 [C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS,
235 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_INST_MISS, 159 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
236 }, 160 },
237 [C(OP_WRITE)] = { 161 [C(OP_WRITE)] = {
238 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_INST, 162 [C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS,
239 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_INST_MISS, 163 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
240 }, 164 },
241 [C(OP_PREFETCH)] = { 165 [C(OP_PREFETCH)] = {
242 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 166 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@@ -245,12 +169,12 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
245 }, 169 },
246 [C(LL)] = { 170 [C(LL)] = {
247 [C(OP_READ)] = { 171 [C(OP_READ)] = {
248 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_ACCESS, 172 [C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
249 [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACH_MISS, 173 [C(RESULT_MISS)] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
250 }, 174 },
251 [C(OP_WRITE)] = { 175 [C(OP_WRITE)] = {
252 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_ACCESS, 176 [C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
253 [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACH_MISS, 177 [C(RESULT_MISS)] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
254 }, 178 },
255 [C(OP_PREFETCH)] = { 179 [C(OP_PREFETCH)] = {
256 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 180 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@@ -274,11 +198,11 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
274 [C(ITLB)] = { 198 [C(ITLB)] = {
275 [C(OP_READ)] = { 199 [C(OP_READ)] = {
276 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 200 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
277 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, 201 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
278 }, 202 },
279 [C(OP_WRITE)] = { 203 [C(OP_WRITE)] = {
280 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 204 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
281 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, 205 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
282 }, 206 },
283 [C(OP_PREFETCH)] = { 207 [C(OP_PREFETCH)] = {
284 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 208 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@@ -287,14 +211,12 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
287 }, 211 },
288 [C(BPU)] = { 212 [C(BPU)] = {
289 [C(OP_READ)] = { 213 [C(OP_READ)] = {
290 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE, 214 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
291 [C(RESULT_MISS)] 215 [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
292 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
293 }, 216 },
294 [C(OP_WRITE)] = { 217 [C(OP_WRITE)] = {
295 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE, 218 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
296 [C(RESULT_MISS)] 219 [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
297 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
298 }, 220 },
299 [C(OP_PREFETCH)] = { 221 [C(OP_PREFETCH)] = {
300 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 222 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@@ -321,14 +243,15 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
321 * Cortex-A9 HW events mapping 243 * Cortex-A9 HW events mapping
322 */ 244 */
323static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = { 245static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
324 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, 246 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
325 [PERF_COUNT_HW_INSTRUCTIONS] = 247 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_A9_PERFCTR_INSTR_CORE_RENAME,
326 ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE, 248 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
327 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_DCACHE_ACCESS, 249 [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
328 [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_DCACHE_REFILL, 250 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
329 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, 251 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
330 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, 252 [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
331 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES, 253 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV7_A9_PERFCTR_STALL_ICACHE,
254 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV7_A9_PERFCTR_STALL_DISPATCH,
332}; 255};
333 256
334static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] 257static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
@@ -342,12 +265,12 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
342 * combined. 265 * combined.
343 */ 266 */
344 [C(OP_READ)] = { 267 [C(OP_READ)] = {
345 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS, 268 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
346 [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL, 269 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
347 }, 270 },
348 [C(OP_WRITE)] = { 271 [C(OP_WRITE)] = {
349 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS, 272 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
350 [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL, 273 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
351 }, 274 },
352 [C(OP_PREFETCH)] = { 275 [C(OP_PREFETCH)] = {
353 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 276 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@@ -357,11 +280,11 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
357 [C(L1I)] = { 280 [C(L1I)] = {
358 [C(OP_READ)] = { 281 [C(OP_READ)] = {
359 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 282 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
360 [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS, 283 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
361 }, 284 },
362 [C(OP_WRITE)] = { 285 [C(OP_WRITE)] = {
363 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 286 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
364 [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS, 287 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
365 }, 288 },
366 [C(OP_PREFETCH)] = { 289 [C(OP_PREFETCH)] = {
367 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 290 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@@ -399,11 +322,11 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
399 [C(ITLB)] = { 322 [C(ITLB)] = {
400 [C(OP_READ)] = { 323 [C(OP_READ)] = {
401 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 324 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
402 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, 325 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
403 }, 326 },
404 [C(OP_WRITE)] = { 327 [C(OP_WRITE)] = {
405 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 328 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
406 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, 329 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
407 }, 330 },
408 [C(OP_PREFETCH)] = { 331 [C(OP_PREFETCH)] = {
409 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 332 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@@ -412,14 +335,12 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
412 }, 335 },
413 [C(BPU)] = { 336 [C(BPU)] = {
414 [C(OP_READ)] = { 337 [C(OP_READ)] = {
415 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE, 338 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
416 [C(RESULT_MISS)] 339 [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
417 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
418 }, 340 },
419 [C(OP_WRITE)] = { 341 [C(OP_WRITE)] = {
420 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE, 342 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
421 [C(RESULT_MISS)] 343 [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
422 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
423 }, 344 },
424 [C(OP_PREFETCH)] = { 345 [C(OP_PREFETCH)] = {
425 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 346 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@@ -446,13 +367,15 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
446 * Cortex-A5 HW events mapping 367 * Cortex-A5 HW events mapping
447 */ 368 */
448static const unsigned armv7_a5_perf_map[PERF_COUNT_HW_MAX] = { 369static const unsigned armv7_a5_perf_map[PERF_COUNT_HW_MAX] = {
449 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, 370 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
450 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, 371 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
451 [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, 372 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
452 [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, 373 [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
453 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, 374 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
454 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, 375 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
455 [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED, 376 [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
377 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED,
378 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
456}; 379};
457 380
458static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] 381static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
@@ -460,42 +383,34 @@ static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
460 [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 383 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
461 [C(L1D)] = { 384 [C(L1D)] = {
462 [C(OP_READ)] = { 385 [C(OP_READ)] = {
463 [C(RESULT_ACCESS)] 386 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
464 = ARMV7_PERFCTR_DCACHE_ACCESS, 387 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
465 [C(RESULT_MISS)]
466 = ARMV7_PERFCTR_DCACHE_REFILL,
467 }, 388 },
468 [C(OP_WRITE)] = { 389 [C(OP_WRITE)] = {
469 [C(RESULT_ACCESS)] 390 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
470 = ARMV7_PERFCTR_DCACHE_ACCESS, 391 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
471 [C(RESULT_MISS)]
472 = ARMV7_PERFCTR_DCACHE_REFILL,
473 }, 392 },
474 [C(OP_PREFETCH)] = { 393 [C(OP_PREFETCH)] = {
475 [C(RESULT_ACCESS)] 394 [C(RESULT_ACCESS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
476 = ARMV7_PERFCTR_PREFETCH_LINEFILL, 395 [C(RESULT_MISS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
477 [C(RESULT_MISS)]
478 = ARMV7_PERFCTR_PREFETCH_LINEFILL_DROP,
479 }, 396 },
480 }, 397 },
481 [C(L1I)] = { 398 [C(L1I)] = {
482 [C(OP_READ)] = { 399 [C(OP_READ)] = {
483 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, 400 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
484 [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS, 401 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
485 }, 402 },
486 [C(OP_WRITE)] = { 403 [C(OP_WRITE)] = {
487 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, 404 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
488 [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS, 405 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
489 }, 406 },
490 /* 407 /*
491 * The prefetch counters don't differentiate between the I 408 * The prefetch counters don't differentiate between the I
492 * side and the D side. 409 * side and the D side.
493 */ 410 */
494 [C(OP_PREFETCH)] = { 411 [C(OP_PREFETCH)] = {
495 [C(RESULT_ACCESS)] 412 [C(RESULT_ACCESS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
496 = ARMV7_PERFCTR_PREFETCH_LINEFILL, 413 [C(RESULT_MISS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
497 [C(RESULT_MISS)]
498 = ARMV7_PERFCTR_PREFETCH_LINEFILL_DROP,
499 }, 414 },
500 }, 415 },
501 [C(LL)] = { 416 [C(LL)] = {
@@ -529,11 +444,11 @@ static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
529 [C(ITLB)] = { 444 [C(ITLB)] = {
530 [C(OP_READ)] = { 445 [C(OP_READ)] = {
531 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 446 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
532 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, 447 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
533 }, 448 },
534 [C(OP_WRITE)] = { 449 [C(OP_WRITE)] = {
535 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 450 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
536 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, 451 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
537 }, 452 },
538 [C(OP_PREFETCH)] = { 453 [C(OP_PREFETCH)] = {
539 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 454 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@@ -543,13 +458,11 @@ static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
543 [C(BPU)] = { 458 [C(BPU)] = {
544 [C(OP_READ)] = { 459 [C(OP_READ)] = {
545 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, 460 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
546 [C(RESULT_MISS)] 461 [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
547 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
548 }, 462 },
549 [C(OP_WRITE)] = { 463 [C(OP_WRITE)] = {
550 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, 464 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
551 [C(RESULT_MISS)] 465 [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
552 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
553 }, 466 },
554 [C(OP_PREFETCH)] = { 467 [C(OP_PREFETCH)] = {
555 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 468 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@@ -562,13 +475,15 @@ static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
562 * Cortex-A15 HW events mapping 475 * Cortex-A15 HW events mapping
563 */ 476 */
564static const unsigned armv7_a15_perf_map[PERF_COUNT_HW_MAX] = { 477static const unsigned armv7_a15_perf_map[PERF_COUNT_HW_MAX] = {
565 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, 478 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
566 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, 479 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
567 [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, 480 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
568 [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, 481 [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
569 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_SPEC_PC_WRITE, 482 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_A15_PERFCTR_PC_WRITE_SPEC,
570 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, 483 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
571 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES, 484 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES,
485 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED,
486 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
572}; 487};
573 488
574static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] 489static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
@@ -576,16 +491,12 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
576 [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 491 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
577 [C(L1D)] = { 492 [C(L1D)] = {
578 [C(OP_READ)] = { 493 [C(OP_READ)] = {
579 [C(RESULT_ACCESS)] 494 [C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ,
580 = ARMV7_PERFCTR_L1_DCACHE_READ_ACCESS, 495 [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ,
581 [C(RESULT_MISS)]
582 = ARMV7_PERFCTR_L1_DCACHE_READ_REFILL,
583 }, 496 },
584 [C(OP_WRITE)] = { 497 [C(OP_WRITE)] = {
585 [C(RESULT_ACCESS)] 498 [C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE,
586 = ARMV7_PERFCTR_L1_DCACHE_WRITE_ACCESS, 499 [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE,
587 [C(RESULT_MISS)]
588 = ARMV7_PERFCTR_L1_DCACHE_WRITE_REFILL,
589 }, 500 },
590 [C(OP_PREFETCH)] = { 501 [C(OP_PREFETCH)] = {
591 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 502 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@@ -601,11 +512,11 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
601 */ 512 */
602 [C(OP_READ)] = { 513 [C(OP_READ)] = {
603 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, 514 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
604 [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS, 515 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
605 }, 516 },
606 [C(OP_WRITE)] = { 517 [C(OP_WRITE)] = {
607 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, 518 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
608 [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS, 519 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
609 }, 520 },
610 [C(OP_PREFETCH)] = { 521 [C(OP_PREFETCH)] = {
611 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 522 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@@ -614,16 +525,12 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
614 }, 525 },
615 [C(LL)] = { 526 [C(LL)] = {
616 [C(OP_READ)] = { 527 [C(OP_READ)] = {
617 [C(RESULT_ACCESS)] 528 [C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ,
618 = ARMV7_PERFCTR_L2_DCACHE_READ_ACCESS, 529 [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ,
619 [C(RESULT_MISS)]
620 = ARMV7_PERFCTR_L2_DCACHE_READ_REFILL,
621 }, 530 },
622 [C(OP_WRITE)] = { 531 [C(OP_WRITE)] = {
623 [C(RESULT_ACCESS)] 532 [C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE,
624 = ARMV7_PERFCTR_L2_DCACHE_WRITE_ACCESS, 533 [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE,
625 [C(RESULT_MISS)]
626 = ARMV7_PERFCTR_L2_DCACHE_WRITE_REFILL,
627 }, 534 },
628 [C(OP_PREFETCH)] = { 535 [C(OP_PREFETCH)] = {
629 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 536 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@@ -633,13 +540,11 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
633 [C(DTLB)] = { 540 [C(DTLB)] = {
634 [C(OP_READ)] = { 541 [C(OP_READ)] = {
635 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 542 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
636 [C(RESULT_MISS)] 543 [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ,
637 = ARMV7_PERFCTR_L1_DTLB_READ_REFILL,
638 }, 544 },
639 [C(OP_WRITE)] = { 545 [C(OP_WRITE)] = {
640 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 546 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
641 [C(RESULT_MISS)] 547 [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE,
642 = ARMV7_PERFCTR_L1_DTLB_WRITE_REFILL,
643 }, 548 },
644 [C(OP_PREFETCH)] = { 549 [C(OP_PREFETCH)] = {
645 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 550 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@@ -649,11 +554,11 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
649 [C(ITLB)] = { 554 [C(ITLB)] = {
650 [C(OP_READ)] = { 555 [C(OP_READ)] = {
651 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 556 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
652 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, 557 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
653 }, 558 },
654 [C(OP_WRITE)] = { 559 [C(OP_WRITE)] = {
655 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 560 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
656 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, 561 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
657 }, 562 },
658 [C(OP_PREFETCH)] = { 563 [C(OP_PREFETCH)] = {
659 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 564 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@@ -663,13 +568,11 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
663 [C(BPU)] = { 568 [C(BPU)] = {
664 [C(OP_READ)] = { 569 [C(OP_READ)] = {
665 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, 570 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
666 [C(RESULT_MISS)] 571 [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
667 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
668 }, 572 },
669 [C(OP_WRITE)] = { 573 [C(OP_WRITE)] = {
670 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, 574 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
671 [C(RESULT_MISS)] 575 [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
672 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
673 }, 576 },
674 [C(OP_PREFETCH)] = { 577 [C(OP_PREFETCH)] = {
675 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 578 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c
index e0cca10a841..3b99d826982 100644
--- a/arch/arm/kernel/perf_event_xscale.c
+++ b/arch/arm/kernel/perf_event_xscale.c
@@ -48,13 +48,15 @@ enum xscale_counters {
48}; 48};
49 49
50static const unsigned xscale_perf_map[PERF_COUNT_HW_MAX] = { 50static const unsigned xscale_perf_map[PERF_COUNT_HW_MAX] = {
51 [PERF_COUNT_HW_CPU_CYCLES] = XSCALE_PERFCTR_CCNT, 51 [PERF_COUNT_HW_CPU_CYCLES] = XSCALE_PERFCTR_CCNT,
52 [PERF_COUNT_HW_INSTRUCTIONS] = XSCALE_PERFCTR_INSTRUCTION, 52 [PERF_COUNT_HW_INSTRUCTIONS] = XSCALE_PERFCTR_INSTRUCTION,
53 [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, 53 [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
54 [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, 54 [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
55 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = XSCALE_PERFCTR_BRANCH, 55 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = XSCALE_PERFCTR_BRANCH,
56 [PERF_COUNT_HW_BRANCH_MISSES] = XSCALE_PERFCTR_BRANCH_MISS, 56 [PERF_COUNT_HW_BRANCH_MISSES] = XSCALE_PERFCTR_BRANCH_MISS,
57 [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED, 57 [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
58 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = XSCALE_PERFCTR_ICACHE_NO_DELIVER,
59 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
58}; 60};
59 61
60static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] 62static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
diff --git a/arch/arm/kernel/pmu.c b/arch/arm/kernel/pmu.c
index 2c3407ee857..2334bf8a650 100644
--- a/arch/arm/kernel/pmu.c
+++ b/arch/arm/kernel/pmu.c
@@ -33,3 +33,4 @@ release_pmu(enum arm_pmu_type type)
33{ 33{
34 clear_bit_unlock(type, pmu_lock); 34 clear_bit_unlock(type, pmu_lock);
35} 35}
36EXPORT_SYMBOL_GPL(release_pmu);
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 1e8b3e2de7a..eeb3e16c604 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -191,6 +191,9 @@ void cpu_idle(void)
191#endif 191#endif
192 192
193 local_irq_disable(); 193 local_irq_disable();
194#ifdef CONFIG_PL310_ERRATA_769419
195 wmb();
196#endif
194 if (hlt_counter) { 197 if (hlt_counter) {
195 local_irq_enable(); 198 local_irq_enable();
196 cpu_relax(); 199 cpu_relax();
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index a753880e984..6c2b0db5d1e 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -461,8 +461,10 @@ static void __init setup_processor(void)
461 cpu_name, read_cpuid_id(), read_cpuid_id() & 15, 461 cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
462 proc_arch[cpu_architecture()], cr_alignment); 462 proc_arch[cpu_architecture()], cr_alignment);
463 463
464 sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS); 464 snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
465 sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS); 465 list->arch_name, ENDIANNESS);
466 snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
467 list->elf_name, ENDIANNESS);
466 elf_hwcap = list->elf_hwcap; 468 elf_hwcap = list->elf_hwcap;
467#ifndef CONFIG_ARM_THUMB 469#ifndef CONFIG_ARM_THUMB
468 elf_hwcap &= ~HWCAP_THUMB; 470 elf_hwcap &= ~HWCAP_THUMB;
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index 1040c00405d..8200deaa14f 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -43,7 +43,7 @@
43 43
44struct cputopo_arm cpu_topology[NR_CPUS]; 44struct cputopo_arm cpu_topology[NR_CPUS];
45 45
46const struct cpumask *cpu_coregroup_mask(unsigned int cpu) 46const struct cpumask *cpu_coregroup_mask(int cpu)
47{ 47{
48 return &cpu_topology[cpu].core_sibling; 48 return &cpu_topology[cpu].core_sibling;
49} 49}
diff --git a/arch/arm/lib/bitops.h b/arch/arm/lib/bitops.h
index 10d868a5a48..d6408d1ee54 100644
--- a/arch/arm/lib/bitops.h
+++ b/arch/arm/lib/bitops.h
@@ -1,5 +1,9 @@
1#include <asm/unwind.h>
2
1#if __LINUX_ARM_ARCH__ >= 6 3#if __LINUX_ARM_ARCH__ >= 6
2 .macro bitop, instr 4 .macro bitop, name, instr
5ENTRY( \name )
6UNWIND( .fnstart )
3 ands ip, r1, #3 7 ands ip, r1, #3
4 strneb r1, [ip] @ assert word-aligned 8 strneb r1, [ip] @ assert word-aligned
5 mov r2, #1 9 mov r2, #1
@@ -13,9 +17,13 @@
13 cmp r0, #0 17 cmp r0, #0
14 bne 1b 18 bne 1b
15 bx lr 19 bx lr
20UNWIND( .fnend )
21ENDPROC(\name )
16 .endm 22 .endm
17 23
18 .macro testop, instr, store 24 .macro testop, name, instr, store
25ENTRY( \name )
26UNWIND( .fnstart )
19 ands ip, r1, #3 27 ands ip, r1, #3
20 strneb r1, [ip] @ assert word-aligned 28 strneb r1, [ip] @ assert word-aligned
21 mov r2, #1 29 mov r2, #1
@@ -34,9 +42,13 @@
34 cmp r0, #0 42 cmp r0, #0
35 movne r0, #1 43 movne r0, #1
362: bx lr 442: bx lr
45UNWIND( .fnend )
46ENDPROC(\name )
37 .endm 47 .endm
38#else 48#else
39 .macro bitop, instr 49 .macro bitop, name, instr
50ENTRY( \name )
51UNWIND( .fnstart )
40 ands ip, r1, #3 52 ands ip, r1, #3
41 strneb r1, [ip] @ assert word-aligned 53 strneb r1, [ip] @ assert word-aligned
42 and r2, r0, #31 54 and r2, r0, #31
@@ -49,6 +61,8 @@
49 str r2, [r1, r0, lsl #2] 61 str r2, [r1, r0, lsl #2]
50 restore_irqs ip 62 restore_irqs ip
51 mov pc, lr 63 mov pc, lr
64UNWIND( .fnend )
65ENDPROC(\name )
52 .endm 66 .endm
53 67
54/** 68/**
@@ -59,7 +73,9 @@
59 * Note: we can trivially conditionalise the store instruction 73 * Note: we can trivially conditionalise the store instruction
60 * to avoid dirtying the data cache. 74 * to avoid dirtying the data cache.
61 */ 75 */
62 .macro testop, instr, store 76 .macro testop, name, instr, store
77ENTRY( \name )
78UNWIND( .fnstart )
63 ands ip, r1, #3 79 ands ip, r1, #3
64 strneb r1, [ip] @ assert word-aligned 80 strneb r1, [ip] @ assert word-aligned
65 and r3, r0, #31 81 and r3, r0, #31
@@ -73,5 +89,7 @@
73 moveq r0, #0 89 moveq r0, #0
74 restore_irqs ip 90 restore_irqs ip
75 mov pc, lr 91 mov pc, lr
92UNWIND( .fnend )
93ENDPROC(\name )
76 .endm 94 .endm
77#endif 95#endif
diff --git a/arch/arm/lib/changebit.S b/arch/arm/lib/changebit.S
index 68ed5b62e83..f4027862172 100644
--- a/arch/arm/lib/changebit.S
+++ b/arch/arm/lib/changebit.S
@@ -12,6 +12,4 @@
12#include "bitops.h" 12#include "bitops.h"
13 .text 13 .text
14 14
15ENTRY(_change_bit) 15bitop _change_bit, eor
16 bitop eor
17ENDPROC(_change_bit)
diff --git a/arch/arm/lib/clearbit.S b/arch/arm/lib/clearbit.S
index 4c04c3b51ee..f6b75fb64d3 100644
--- a/arch/arm/lib/clearbit.S
+++ b/arch/arm/lib/clearbit.S
@@ -12,6 +12,4 @@
12#include "bitops.h" 12#include "bitops.h"
13 .text 13 .text
14 14
15ENTRY(_clear_bit) 15bitop _clear_bit, bic
16 bitop bic
17ENDPROC(_clear_bit)
diff --git a/arch/arm/lib/setbit.S b/arch/arm/lib/setbit.S
index bbee5c66a23..618fedae4b3 100644
--- a/arch/arm/lib/setbit.S
+++ b/arch/arm/lib/setbit.S
@@ -12,6 +12,4 @@
12#include "bitops.h" 12#include "bitops.h"
13 .text 13 .text
14 14
15ENTRY(_set_bit) 15bitop _set_bit, orr
16 bitop orr
17ENDPROC(_set_bit)
diff --git a/arch/arm/lib/testchangebit.S b/arch/arm/lib/testchangebit.S
index 15a4d431f22..4becdc3a59c 100644
--- a/arch/arm/lib/testchangebit.S
+++ b/arch/arm/lib/testchangebit.S
@@ -12,6 +12,4 @@
12#include "bitops.h" 12#include "bitops.h"
13 .text 13 .text
14 14
15ENTRY(_test_and_change_bit) 15testop _test_and_change_bit, eor, str
16 testop eor, str
17ENDPROC(_test_and_change_bit)
diff --git a/arch/arm/lib/testclearbit.S b/arch/arm/lib/testclearbit.S
index 521b66b5b95..918841dcce7 100644
--- a/arch/arm/lib/testclearbit.S
+++ b/arch/arm/lib/testclearbit.S
@@ -12,6 +12,4 @@
12#include "bitops.h" 12#include "bitops.h"
13 .text 13 .text
14 14
15ENTRY(_test_and_clear_bit) 15testop _test_and_clear_bit, bicne, strne
16 testop bicne, strne
17ENDPROC(_test_and_clear_bit)
diff --git a/arch/arm/lib/testsetbit.S b/arch/arm/lib/testsetbit.S
index 1c98cc2185b..8d1b2fe9e48 100644
--- a/arch/arm/lib/testsetbit.S
+++ b/arch/arm/lib/testsetbit.S
@@ -12,6 +12,4 @@
12#include "bitops.h" 12#include "bitops.h"
13 .text 13 .text
14 14
15ENTRY(_test_and_set_bit) 15testop _test_and_set_bit, orreq, streq
16 testop orreq, streq
17ENDPROC(_test_and_set_bit)
diff --git a/arch/arm/mach-bcmring/core.c b/arch/arm/mach-bcmring/core.c
index 43eadbcc29e..430da120a29 100644
--- a/arch/arm/mach-bcmring/core.c
+++ b/arch/arm/mach-bcmring/core.c
@@ -235,7 +235,7 @@ void __init bcmring_init_timer(void)
235 */ 235 */
236 bcmring_clocksource_init(); 236 bcmring_clocksource_init();
237 237
238 sp804_clockevents_register(TIMER0_VA_BASE, IRQ_TIMER0, "timer0"); 238 sp804_clockevents_init(TIMER0_VA_BASE, IRQ_TIMER0, "timer0");
239} 239}
240 240
241struct sys_timer bcmring_timer = { 241struct sys_timer bcmring_timer = {
diff --git a/arch/arm/mach-bcmring/dma.c b/arch/arm/mach-bcmring/dma.c
index b52b8de91bd..f4d4d6d174d 100644
--- a/arch/arm/mach-bcmring/dma.c
+++ b/arch/arm/mach-bcmring/dma.c
@@ -36,6 +36,7 @@
36#include <linux/mm.h> 36#include <linux/mm.h>
37#include <linux/pfn.h> 37#include <linux/pfn.h>
38#include <linux/atomic.h> 38#include <linux/atomic.h>
39#include <linux/sched.h>
39#include <mach/dma.h> 40#include <mach/dma.h>
40 41
41/* I don't quite understand why dc4 fails when this is set to 1 and DMA is enabled */ 42/* I don't quite understand why dc4 fails when this is set to 1 and DMA is enabled */
diff --git a/arch/arm/mach-exynos/cpuidle.c b/arch/arm/mach-exynos/cpuidle.c
index 35f6502144a..4ebb382c597 100644
--- a/arch/arm/mach-exynos/cpuidle.c
+++ b/arch/arm/mach-exynos/cpuidle.c
@@ -12,6 +12,8 @@
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/cpuidle.h> 13#include <linux/cpuidle.h>
14#include <linux/io.h> 14#include <linux/io.h>
15#include <linux/export.h>
16#include <linux/time.h>
15 17
16#include <asm/proc-fns.h> 18#include <asm/proc-fns.h>
17 19
diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c
index 89bdf0039f7..7266dd510f1 100644
--- a/arch/arm/mach-highbank/highbank.c
+++ b/arch/arm/mach-highbank/highbank.c
@@ -22,6 +22,7 @@
22#include <linux/of_irq.h> 22#include <linux/of_irq.h>
23#include <linux/of_platform.h> 23#include <linux/of_platform.h>
24#include <linux/of_address.h> 24#include <linux/of_address.h>
25#include <linux/smp.h>
25 26
26#include <asm/cacheflush.h> 27#include <asm/cacheflush.h>
27#include <asm/unified.h> 28#include <asm/unified.h>
@@ -72,6 +73,9 @@ static void __init highbank_map_io(void)
72 73
73void highbank_set_cpu_jump(int cpu, void *jump_addr) 74void highbank_set_cpu_jump(int cpu, void *jump_addr)
74{ 75{
76#ifdef CONFIG_SMP
77 cpu = cpu_logical_map(cpu);
78#endif
75 writel(BSYM(virt_to_phys(jump_addr)), HB_JUMP_TABLE_VIRT(cpu)); 79 writel(BSYM(virt_to_phys(jump_addr)), HB_JUMP_TABLE_VIRT(cpu));
76 __cpuc_flush_dcache_area(HB_JUMP_TABLE_VIRT(cpu), 16); 80 __cpuc_flush_dcache_area(HB_JUMP_TABLE_VIRT(cpu), 16);
77 outer_clean_range(HB_JUMP_TABLE_PHYS(cpu), 81 outer_clean_range(HB_JUMP_TABLE_PHYS(cpu),
diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig
index 5f7f9c2a34a..c44aa974e79 100644
--- a/arch/arm/mach-imx/Kconfig
+++ b/arch/arm/mach-imx/Kconfig
@@ -10,11 +10,6 @@ config HAVE_IMX_MMDC
10config HAVE_IMX_SRC 10config HAVE_IMX_SRC
11 bool 11 bool
12 12
13#
14# ARCH_MX31 and ARCH_MX35 are left for compatibility
15# Some usages assume that having one of them implies not having (e.g.) ARCH_MX2.
16# To easily distinguish good and reviewed from unreviewed usages new (and IMHO
17# more sensible) names are used: SOC_IMX31 and SOC_IMX35
18config ARCH_MX1 13config ARCH_MX1
19 bool 14 bool
20 15
@@ -27,12 +22,6 @@ config ARCH_MX25
27config MACH_MX27 22config MACH_MX27
28 bool 23 bool
29 24
30config ARCH_MX31
31 bool
32
33config ARCH_MX35
34 bool
35
36config SOC_IMX1 25config SOC_IMX1
37 bool 26 bool
38 select ARCH_MX1 27 select ARCH_MX1
@@ -72,7 +61,6 @@ config SOC_IMX31
72 select CPU_V6 61 select CPU_V6
73 select IMX_HAVE_PLATFORM_MXC_RNGA 62 select IMX_HAVE_PLATFORM_MXC_RNGA
74 select ARCH_MXC_AUDMUX_V2 63 select ARCH_MXC_AUDMUX_V2
75 select ARCH_MX31
76 select MXC_AVIC 64 select MXC_AVIC
77 select SMP_ON_UP if SMP 65 select SMP_ON_UP if SMP
78 66
@@ -82,7 +70,6 @@ config SOC_IMX35
82 select ARCH_MXC_IOMUX_V3 70 select ARCH_MXC_IOMUX_V3
83 select ARCH_MXC_AUDMUX_V2 71 select ARCH_MXC_AUDMUX_V2
84 select HAVE_EPIT 72 select HAVE_EPIT
85 select ARCH_MX35
86 select MXC_AVIC 73 select MXC_AVIC
87 select SMP_ON_UP if SMP 74 select SMP_ON_UP if SMP
88 75
diff --git a/arch/arm/mach-imx/clock-imx6q.c b/arch/arm/mach-imx/clock-imx6q.c
index 613a1b993bf..039a7abb165 100644
--- a/arch/arm/mach-imx/clock-imx6q.c
+++ b/arch/arm/mach-imx/clock-imx6q.c
@@ -1953,14 +1953,17 @@ static struct map_desc imx6q_clock_desc[] = {
1953 imx_map_entry(MX6Q, ANATOP, MT_DEVICE), 1953 imx_map_entry(MX6Q, ANATOP, MT_DEVICE),
1954}; 1954};
1955 1955
1956void __init imx6q_clock_map_io(void)
1957{
1958 iotable_init(imx6q_clock_desc, ARRAY_SIZE(imx6q_clock_desc));
1959}
1960
1956int __init mx6q_clocks_init(void) 1961int __init mx6q_clocks_init(void)
1957{ 1962{
1958 struct device_node *np; 1963 struct device_node *np;
1959 void __iomem *base; 1964 void __iomem *base;
1960 int i, irq; 1965 int i, irq;
1961 1966
1962 iotable_init(imx6q_clock_desc, ARRAY_SIZE(imx6q_clock_desc));
1963
1964 /* retrieve the freqency of fixed clocks from device tree */ 1967 /* retrieve the freqency of fixed clocks from device tree */
1965 for_each_compatible_node(np, NULL, "fixed-clock") { 1968 for_each_compatible_node(np, NULL, "fixed-clock") {
1966 u32 rate; 1969 u32 rate;
diff --git a/arch/arm/mach-imx/mach-imx6q.c b/arch/arm/mach-imx/mach-imx6q.c
index 8bf5fa34948..9cd860a27af 100644
--- a/arch/arm/mach-imx/mach-imx6q.c
+++ b/arch/arm/mach-imx/mach-imx6q.c
@@ -34,6 +34,7 @@ static void __init imx6q_map_io(void)
34{ 34{
35 imx_lluart_map_io(); 35 imx_lluart_map_io();
36 imx_scu_map_io(); 36 imx_scu_map_io();
37 imx6q_clock_map_io();
37} 38}
38 39
39static void __init imx6q_gpio_add_irq_domain(struct device_node *np, 40static void __init imx6q_gpio_add_irq_domain(struct device_node *np,
diff --git a/arch/arm/mach-imx/mm-imx3.c b/arch/arm/mach-imx/mm-imx3.c
index 9f0e82ec339..31807d2a8b7 100644
--- a/arch/arm/mach-imx/mm-imx3.c
+++ b/arch/arm/mach-imx/mm-imx3.c
@@ -33,29 +33,32 @@
33static void imx3_idle(void) 33static void imx3_idle(void)
34{ 34{
35 unsigned long reg = 0; 35 unsigned long reg = 0;
36 __asm__ __volatile__( 36
37 /* disable I and D cache */ 37 if (!need_resched())
38 "mrc p15, 0, %0, c1, c0, 0\n" 38 __asm__ __volatile__(
39 "bic %0, %0, #0x00001000\n" 39 /* disable I and D cache */
40 "bic %0, %0, #0x00000004\n" 40 "mrc p15, 0, %0, c1, c0, 0\n"
41 "mcr p15, 0, %0, c1, c0, 0\n" 41 "bic %0, %0, #0x00001000\n"
42 /* invalidate I cache */ 42 "bic %0, %0, #0x00000004\n"
43 "mov %0, #0\n" 43 "mcr p15, 0, %0, c1, c0, 0\n"
44 "mcr p15, 0, %0, c7, c5, 0\n" 44 /* invalidate I cache */
45 /* clear and invalidate D cache */ 45 "mov %0, #0\n"
46 "mov %0, #0\n" 46 "mcr p15, 0, %0, c7, c5, 0\n"
47 "mcr p15, 0, %0, c7, c14, 0\n" 47 /* clear and invalidate D cache */
48 /* WFI */ 48 "mov %0, #0\n"
49 "mov %0, #0\n" 49 "mcr p15, 0, %0, c7, c14, 0\n"
50 "mcr p15, 0, %0, c7, c0, 4\n" 50 /* WFI */
51 "nop\n" "nop\n" "nop\n" "nop\n" 51 "mov %0, #0\n"
52 "nop\n" "nop\n" "nop\n" 52 "mcr p15, 0, %0, c7, c0, 4\n"
53 /* enable I and D cache */ 53 "nop\n" "nop\n" "nop\n" "nop\n"
54 "mrc p15, 0, %0, c1, c0, 0\n" 54 "nop\n" "nop\n" "nop\n"
55 "orr %0, %0, #0x00001000\n" 55 /* enable I and D cache */
56 "orr %0, %0, #0x00000004\n" 56 "mrc p15, 0, %0, c1, c0, 0\n"
57 "mcr p15, 0, %0, c1, c0, 0\n" 57 "orr %0, %0, #0x00001000\n"
58 : "=r" (reg)); 58 "orr %0, %0, #0x00000004\n"
59 "mcr p15, 0, %0, c1, c0, 0\n"
60 : "=r" (reg));
61 local_irq_enable();
59} 62}
60 63
61static void __iomem *imx3_ioremap(unsigned long phys_addr, size_t size, 64static void __iomem *imx3_ioremap(unsigned long phys_addr, size_t size,
@@ -108,6 +111,7 @@ void imx3_init_l2x0(void)
108 l2x0_init(l2x0_base, 0x00030024, 0x00000000); 111 l2x0_init(l2x0_base, 0x00030024, 0x00000000);
109} 112}
110 113
114#ifdef CONFIG_SOC_IMX31
111static struct map_desc mx31_io_desc[] __initdata = { 115static struct map_desc mx31_io_desc[] __initdata = {
112 imx_map_entry(MX31, X_MEMC, MT_DEVICE), 116 imx_map_entry(MX31, X_MEMC, MT_DEVICE),
113 imx_map_entry(MX31, AVIC, MT_DEVICE_NONSHARED), 117 imx_map_entry(MX31, AVIC, MT_DEVICE_NONSHARED),
@@ -126,33 +130,11 @@ void __init mx31_map_io(void)
126 iotable_init(mx31_io_desc, ARRAY_SIZE(mx31_io_desc)); 130 iotable_init(mx31_io_desc, ARRAY_SIZE(mx31_io_desc));
127} 131}
128 132
129static struct map_desc mx35_io_desc[] __initdata = {
130 imx_map_entry(MX35, X_MEMC, MT_DEVICE),
131 imx_map_entry(MX35, AVIC, MT_DEVICE_NONSHARED),
132 imx_map_entry(MX35, AIPS1, MT_DEVICE_NONSHARED),
133 imx_map_entry(MX35, AIPS2, MT_DEVICE_NONSHARED),
134 imx_map_entry(MX35, SPBA0, MT_DEVICE_NONSHARED),
135};
136
137void __init mx35_map_io(void)
138{
139 iotable_init(mx35_io_desc, ARRAY_SIZE(mx35_io_desc));
140}
141
142void __init imx31_init_early(void) 133void __init imx31_init_early(void)
143{ 134{
144 mxc_set_cpu_type(MXC_CPU_MX31); 135 mxc_set_cpu_type(MXC_CPU_MX31);
145 mxc_arch_reset_init(MX31_IO_ADDRESS(MX31_WDOG_BASE_ADDR)); 136 mxc_arch_reset_init(MX31_IO_ADDRESS(MX31_WDOG_BASE_ADDR));
146 imx_idle = imx3_idle; 137 pm_idle = imx3_idle;
147 imx_ioremap = imx3_ioremap;
148}
149
150void __init imx35_init_early(void)
151{
152 mxc_set_cpu_type(MXC_CPU_MX35);
153 mxc_iomux_v3_init(MX35_IO_ADDRESS(MX35_IOMUXC_BASE_ADDR));
154 mxc_arch_reset_init(MX35_IO_ADDRESS(MX35_WDOG_BASE_ADDR));
155 imx_idle = imx3_idle;
156 imx_ioremap = imx3_ioremap; 138 imx_ioremap = imx3_ioremap;
157} 139}
158 140
@@ -161,11 +143,6 @@ void __init mx31_init_irq(void)
161 mxc_init_irq(MX31_IO_ADDRESS(MX31_AVIC_BASE_ADDR)); 143 mxc_init_irq(MX31_IO_ADDRESS(MX31_AVIC_BASE_ADDR));
162} 144}
163 145
164void __init mx35_init_irq(void)
165{
166 mxc_init_irq(MX35_IO_ADDRESS(MX35_AVIC_BASE_ADDR));
167}
168
169static struct sdma_script_start_addrs imx31_to1_sdma_script __initdata = { 146static struct sdma_script_start_addrs imx31_to1_sdma_script __initdata = {
170 .per_2_per_addr = 1677, 147 .per_2_per_addr = 1677,
171}; 148};
@@ -199,6 +176,35 @@ void __init imx31_soc_init(void)
199 176
200 imx_add_imx_sdma("imx31-sdma", MX31_SDMA_BASE_ADDR, MX31_INT_SDMA, &imx31_sdma_pdata); 177 imx_add_imx_sdma("imx31-sdma", MX31_SDMA_BASE_ADDR, MX31_INT_SDMA, &imx31_sdma_pdata);
201} 178}
179#endif /* ifdef CONFIG_SOC_IMX31 */
180
181#ifdef CONFIG_SOC_IMX35
182static struct map_desc mx35_io_desc[] __initdata = {
183 imx_map_entry(MX35, X_MEMC, MT_DEVICE),
184 imx_map_entry(MX35, AVIC, MT_DEVICE_NONSHARED),
185 imx_map_entry(MX35, AIPS1, MT_DEVICE_NONSHARED),
186 imx_map_entry(MX35, AIPS2, MT_DEVICE_NONSHARED),
187 imx_map_entry(MX35, SPBA0, MT_DEVICE_NONSHARED),
188};
189
190void __init mx35_map_io(void)
191{
192 iotable_init(mx35_io_desc, ARRAY_SIZE(mx35_io_desc));
193}
194
195void __init imx35_init_early(void)
196{
197 mxc_set_cpu_type(MXC_CPU_MX35);
198 mxc_iomux_v3_init(MX35_IO_ADDRESS(MX35_IOMUXC_BASE_ADDR));
199 mxc_arch_reset_init(MX35_IO_ADDRESS(MX35_WDOG_BASE_ADDR));
200 pm_idle = imx3_idle;
201 imx_ioremap = imx3_ioremap;
202}
203
204void __init mx35_init_irq(void)
205{
206 mxc_init_irq(MX35_IO_ADDRESS(MX35_AVIC_BASE_ADDR));
207}
202 208
203static struct sdma_script_start_addrs imx35_to1_sdma_script __initdata = { 209static struct sdma_script_start_addrs imx35_to1_sdma_script __initdata = {
204 .ap_2_ap_addr = 642, 210 .ap_2_ap_addr = 642,
@@ -254,3 +260,4 @@ void __init imx35_soc_init(void)
254 260
255 imx_add_imx_sdma("imx35-sdma", MX35_SDMA_BASE_ADDR, MX35_INT_SDMA, &imx35_sdma_pdata); 261 imx_add_imx_sdma("imx35-sdma", MX35_SDMA_BASE_ADDR, MX35_INT_SDMA, &imx35_sdma_pdata);
256} 262}
263#endif /* ifdef CONFIG_SOC_IMX35 */
diff --git a/arch/arm/mach-imx/src.c b/arch/arm/mach-imx/src.c
index 36cacbd0dcc..a8e33681b73 100644
--- a/arch/arm/mach-imx/src.c
+++ b/arch/arm/mach-imx/src.c
@@ -14,6 +14,7 @@
14#include <linux/io.h> 14#include <linux/io.h>
15#include <linux/of.h> 15#include <linux/of.h>
16#include <linux/of_address.h> 16#include <linux/of_address.h>
17#include <linux/smp.h>
17#include <asm/unified.h> 18#include <asm/unified.h>
18 19
19#define SRC_SCR 0x000 20#define SRC_SCR 0x000
@@ -23,10 +24,15 @@
23 24
24static void __iomem *src_base; 25static void __iomem *src_base;
25 26
27#ifndef CONFIG_SMP
28#define cpu_logical_map(cpu) 0
29#endif
30
26void imx_enable_cpu(int cpu, bool enable) 31void imx_enable_cpu(int cpu, bool enable)
27{ 32{
28 u32 mask, val; 33 u32 mask, val;
29 34
35 cpu = cpu_logical_map(cpu);
30 mask = 1 << (BP_SRC_SCR_CORE1_ENABLE + cpu - 1); 36 mask = 1 << (BP_SRC_SCR_CORE1_ENABLE + cpu - 1);
31 val = readl_relaxed(src_base + SRC_SCR); 37 val = readl_relaxed(src_base + SRC_SCR);
32 val = enable ? val | mask : val & ~mask; 38 val = enable ? val | mask : val & ~mask;
@@ -35,6 +41,7 @@ void imx_enable_cpu(int cpu, bool enable)
35 41
36void imx_set_cpu_jump(int cpu, void *jump_addr) 42void imx_set_cpu_jump(int cpu, void *jump_addr)
37{ 43{
44 cpu = cpu_logical_map(cpu);
38 writel_relaxed(BSYM(virt_to_phys(jump_addr)), 45 writel_relaxed(BSYM(virt_to_phys(jump_addr)),
39 src_base + SRC_GPR1 + cpu * 8); 46 src_base + SRC_GPR1 + cpu * 8);
40} 47}
diff --git a/arch/arm/mach-mmp/gplugd.c b/arch/arm/mach-mmp/gplugd.c
index 69156568bc4..4665767a4f7 100644
--- a/arch/arm/mach-mmp/gplugd.c
+++ b/arch/arm/mach-mmp/gplugd.c
@@ -182,7 +182,7 @@ static void __init gplugd_init(void)
182 182
183 /* on-chip devices */ 183 /* on-chip devices */
184 pxa168_add_uart(3); 184 pxa168_add_uart(3);
185 pxa168_add_ssp(0); 185 pxa168_add_ssp(1);
186 pxa168_add_twsi(0, NULL, ARRAY_AND_SIZE(gplugd_i2c_board_info)); 186 pxa168_add_twsi(0, NULL, ARRAY_AND_SIZE(gplugd_i2c_board_info));
187 187
188 pxa168_add_eth(&gplugd_eth_platform_data); 188 pxa168_add_eth(&gplugd_eth_platform_data);
diff --git a/arch/arm/mach-mmp/include/mach/gpio-pxa.h b/arch/arm/mach-mmp/include/mach/gpio-pxa.h
index d14eeaf1632..99b4ce1b656 100644
--- a/arch/arm/mach-mmp/include/mach/gpio-pxa.h
+++ b/arch/arm/mach-mmp/include/mach/gpio-pxa.h
@@ -7,7 +7,7 @@
7#define GPIO_REGS_VIRT (APB_VIRT_BASE + 0x19000) 7#define GPIO_REGS_VIRT (APB_VIRT_BASE + 0x19000)
8 8
9#define BANK_OFF(n) (((n) < 3) ? (n) << 2 : 0x100 + (((n) - 3) << 2)) 9#define BANK_OFF(n) (((n) < 3) ? (n) << 2 : 0x100 + (((n) - 3) << 2))
10#define GPIO_REG(x) (GPIO_REGS_VIRT + (x)) 10#define GPIO_REG(x) (*(volatile u32 *)(GPIO_REGS_VIRT + (x)))
11 11
12#define NR_BUILTIN_GPIO IRQ_GPIO_NUM 12#define NR_BUILTIN_GPIO IRQ_GPIO_NUM
13 13
diff --git a/arch/arm/mach-mx5/cpu.c b/arch/arm/mach-mx5/cpu.c
index 5c5328257dc..5e2e7a84386 100644
--- a/arch/arm/mach-mx5/cpu.c
+++ b/arch/arm/mach-mx5/cpu.c
@@ -16,7 +16,7 @@
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/module.h> 17#include <linux/module.h>
18#include <mach/hardware.h> 18#include <mach/hardware.h>
19#include <asm/io.h> 19#include <linux/io.h>
20 20
21static int mx5_cpu_rev = -1; 21static int mx5_cpu_rev = -1;
22 22
@@ -67,7 +67,8 @@ static int __init mx51_neon_fixup(void)
67 if (!cpu_is_mx51()) 67 if (!cpu_is_mx51())
68 return 0; 68 return 0;
69 69
70 if (mx51_revision() < IMX_CHIP_REVISION_3_0 && (elf_hwcap & HWCAP_NEON)) { 70 if (mx51_revision() < IMX_CHIP_REVISION_3_0 &&
71 (elf_hwcap & HWCAP_NEON)) {
71 elf_hwcap &= ~HWCAP_NEON; 72 elf_hwcap &= ~HWCAP_NEON;
72 pr_info("Turning off NEON support, detected broken NEON implementation\n"); 73 pr_info("Turning off NEON support, detected broken NEON implementation\n");
73 } 74 }
diff --git a/arch/arm/mach-mx5/mm.c b/arch/arm/mach-mx5/mm.c
index 26eacc9d0d9..df4a508f240 100644
--- a/arch/arm/mach-mx5/mm.c
+++ b/arch/arm/mach-mx5/mm.c
@@ -23,7 +23,9 @@
23 23
24static void imx5_idle(void) 24static void imx5_idle(void)
25{ 25{
26 mx5_cpu_lp_set(WAIT_UNCLOCKED_POWER_OFF); 26 if (!need_resched())
27 mx5_cpu_lp_set(WAIT_UNCLOCKED_POWER_OFF);
28 local_irq_enable();
27} 29}
28 30
29/* 31/*
@@ -89,7 +91,7 @@ void __init imx51_init_early(void)
89 mxc_set_cpu_type(MXC_CPU_MX51); 91 mxc_set_cpu_type(MXC_CPU_MX51);
90 mxc_iomux_v3_init(MX51_IO_ADDRESS(MX51_IOMUXC_BASE_ADDR)); 92 mxc_iomux_v3_init(MX51_IO_ADDRESS(MX51_IOMUXC_BASE_ADDR));
91 mxc_arch_reset_init(MX51_IO_ADDRESS(MX51_WDOG1_BASE_ADDR)); 93 mxc_arch_reset_init(MX51_IO_ADDRESS(MX51_WDOG1_BASE_ADDR));
92 imx_idle = imx5_idle; 94 pm_idle = imx5_idle;
93} 95}
94 96
95void __init imx53_init_early(void) 97void __init imx53_init_early(void)
diff --git a/arch/arm/mach-mxs/clock-mx28.c b/arch/arm/mach-mxs/clock-mx28.c
index 229ae349421..da6e4aad177 100644
--- a/arch/arm/mach-mxs/clock-mx28.c
+++ b/arch/arm/mach-mxs/clock-mx28.c
@@ -404,7 +404,7 @@ static int name##_set_rate(struct clk *clk, unsigned long rate) \
404 reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##dr); \ 404 reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##dr); \
405 reg &= ~BM_CLKCTRL_##dr##_DIV; \ 405 reg &= ~BM_CLKCTRL_##dr##_DIV; \
406 reg |= div << BP_CLKCTRL_##dr##_DIV; \ 406 reg |= div << BP_CLKCTRL_##dr##_DIV; \
407 if (reg | (1 << clk->enable_shift)) { \ 407 if (reg & (1 << clk->enable_shift)) { \
408 pr_err("%s: clock is gated\n", __func__); \ 408 pr_err("%s: clock is gated\n", __func__); \
409 return -EINVAL; \ 409 return -EINVAL; \
410 } \ 410 } \
diff --git a/arch/arm/mach-omap1/Kconfig b/arch/arm/mach-omap1/Kconfig
index e0a028161dd..73f287d6429 100644
--- a/arch/arm/mach-omap1/Kconfig
+++ b/arch/arm/mach-omap1/Kconfig
@@ -171,14 +171,6 @@ config MACH_OMAP_GENERIC
171comment "OMAP CPU Speed" 171comment "OMAP CPU Speed"
172 depends on ARCH_OMAP1 172 depends on ARCH_OMAP1
173 173
174config OMAP_CLOCKS_SET_BY_BOOTLOADER
175 bool "OMAP clocks set by bootloader"
176 depends on ARCH_OMAP1
177 help
178 Enable this option to prevent the kernel from overriding the clock
179 frequencies programmed by bootloader for MPU, DSP, MMUs, TC,
180 internal LCD controller and MPU peripherals.
181
182config OMAP_ARM_216MHZ 174config OMAP_ARM_216MHZ
183 bool "OMAP ARM 216 MHz CPU (1710 only)" 175 bool "OMAP ARM 216 MHz CPU (1710 only)"
184 depends on ARCH_OMAP1 && ARCH_OMAP16XX 176 depends on ARCH_OMAP1 && ARCH_OMAP16XX
diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c
index 1b374009b1a..af7911963c0 100644
--- a/arch/arm/mach-omap1/board-ams-delta.c
+++ b/arch/arm/mach-omap1/board-ams-delta.c
@@ -302,8 +302,6 @@ static void __init ams_delta_init(void)
302 omap_cfg_reg(J19_1610_CAM_D6); 302 omap_cfg_reg(J19_1610_CAM_D6);
303 omap_cfg_reg(J18_1610_CAM_D7); 303 omap_cfg_reg(J18_1610_CAM_D7);
304 304
305 iotable_init(ams_delta_io_desc, ARRAY_SIZE(ams_delta_io_desc));
306
307 omap_board_config = ams_delta_config; 305 omap_board_config = ams_delta_config;
308 omap_board_config_size = ARRAY_SIZE(ams_delta_config); 306 omap_board_config_size = ARRAY_SIZE(ams_delta_config);
309 omap_serial_init(); 307 omap_serial_init();
@@ -373,10 +371,16 @@ static int __init ams_delta_modem_init(void)
373} 371}
374arch_initcall(ams_delta_modem_init); 372arch_initcall(ams_delta_modem_init);
375 373
374static void __init ams_delta_map_io(void)
375{
376 omap15xx_map_io();
377 iotable_init(ams_delta_io_desc, ARRAY_SIZE(ams_delta_io_desc));
378}
379
376MACHINE_START(AMS_DELTA, "Amstrad E3 (Delta)") 380MACHINE_START(AMS_DELTA, "Amstrad E3 (Delta)")
377 /* Maintainer: Jonathan McDowell <noodles@earth.li> */ 381 /* Maintainer: Jonathan McDowell <noodles@earth.li> */
378 .atag_offset = 0x100, 382 .atag_offset = 0x100,
379 .map_io = omap15xx_map_io, 383 .map_io = ams_delta_map_io,
380 .init_early = omap1_init_early, 384 .init_early = omap1_init_early,
381 .reserve = omap_reserve, 385 .reserve = omap_reserve,
382 .init_irq = omap1_init_irq, 386 .init_irq = omap1_init_irq,
diff --git a/arch/arm/mach-omap1/clock.h b/arch/arm/mach-omap1/clock.h
index eaf09efb91c..16b1423b454 100644
--- a/arch/arm/mach-omap1/clock.h
+++ b/arch/arm/mach-omap1/clock.h
@@ -17,7 +17,8 @@
17 17
18#include <plat/clock.h> 18#include <plat/clock.h>
19 19
20extern int __init omap1_clk_init(void); 20int omap1_clk_init(void);
21void omap1_clk_late_init(void);
21extern int omap1_clk_enable(struct clk *clk); 22extern int omap1_clk_enable(struct clk *clk);
22extern void omap1_clk_disable(struct clk *clk); 23extern void omap1_clk_disable(struct clk *clk);
23extern long omap1_clk_round_rate(struct clk *clk, unsigned long rate); 24extern long omap1_clk_round_rate(struct clk *clk, unsigned long rate);
diff --git a/arch/arm/mach-omap1/clock_data.c b/arch/arm/mach-omap1/clock_data.c
index 92400b9eb69..1297bb58869 100644
--- a/arch/arm/mach-omap1/clock_data.c
+++ b/arch/arm/mach-omap1/clock_data.c
@@ -767,6 +767,15 @@ static struct clk_functions omap1_clk_functions = {
767 .clk_disable_unused = omap1_clk_disable_unused, 767 .clk_disable_unused = omap1_clk_disable_unused,
768}; 768};
769 769
770static void __init omap1_show_rates(void)
771{
772 pr_notice("Clocking rate (xtal/DPLL1/MPU): "
773 "%ld.%01ld/%ld.%01ld/%ld.%01ld MHz\n",
774 ck_ref.rate / 1000000, (ck_ref.rate / 100000) % 10,
775 ck_dpll1.rate / 1000000, (ck_dpll1.rate / 100000) % 10,
776 arm_ck.rate / 1000000, (arm_ck.rate / 100000) % 10);
777}
778
770int __init omap1_clk_init(void) 779int __init omap1_clk_init(void)
771{ 780{
772 struct omap_clk *c; 781 struct omap_clk *c;
@@ -835,9 +844,12 @@ int __init omap1_clk_init(void)
835 /* We want to be in syncronous scalable mode */ 844 /* We want to be in syncronous scalable mode */
836 omap_writew(0x1000, ARM_SYSST); 845 omap_writew(0x1000, ARM_SYSST);
837 846
838#ifdef CONFIG_OMAP_CLOCKS_SET_BY_BOOTLOADER 847
839 /* Use values set by bootloader. Determine PLL rate and recalculate 848 /*
840 * dependent clocks as if kernel had changed PLL or divisors. 849 * Initially use the values set by bootloader. Determine PLL rate and
850 * recalculate dependent clocks as if kernel had changed PLL or
851 * divisors. See also omap1_clk_late_init() that can reprogram dpll1
852 * after the SRAM is initialized.
841 */ 853 */
842 { 854 {
843 unsigned pll_ctl_val = omap_readw(DPLL_CTL); 855 unsigned pll_ctl_val = omap_readw(DPLL_CTL);
@@ -862,25 +874,10 @@ int __init omap1_clk_init(void)
862 } 874 }
863 } 875 }
864 } 876 }
865#else
866 /* Find the highest supported frequency and enable it */
867 if (omap1_select_table_rate(&virtual_ck_mpu, ~0)) {
868 printk(KERN_ERR "System frequencies not set. Check your config.\n");
869 /* Guess sane values (60MHz) */
870 omap_writew(0x2290, DPLL_CTL);
871 omap_writew(cpu_is_omap7xx() ? 0x3005 : 0x1005, ARM_CKCTL);
872 ck_dpll1.rate = 60000000;
873 }
874#endif
875 propagate_rate(&ck_dpll1); 877 propagate_rate(&ck_dpll1);
876 /* Cache rates for clocks connected to ck_ref (not dpll1) */ 878 /* Cache rates for clocks connected to ck_ref (not dpll1) */
877 propagate_rate(&ck_ref); 879 propagate_rate(&ck_ref);
878 printk(KERN_INFO "Clocking rate (xtal/DPLL1/MPU): " 880 omap1_show_rates();
879 "%ld.%01ld/%ld.%01ld/%ld.%01ld MHz\n",
880 ck_ref.rate / 1000000, (ck_ref.rate / 100000) % 10,
881 ck_dpll1.rate / 1000000, (ck_dpll1.rate / 100000) % 10,
882 arm_ck.rate / 1000000, (arm_ck.rate / 100000) % 10);
883
884 if (machine_is_omap_perseus2() || machine_is_omap_fsample()) { 881 if (machine_is_omap_perseus2() || machine_is_omap_fsample()) {
885 /* Select slicer output as OMAP input clock */ 882 /* Select slicer output as OMAP input clock */
886 omap_writew(omap_readw(OMAP7XX_PCC_UPLD_CTRL) & ~0x1, 883 omap_writew(omap_readw(OMAP7XX_PCC_UPLD_CTRL) & ~0x1,
@@ -925,3 +922,21 @@ int __init omap1_clk_init(void)
925 922
926 return 0; 923 return 0;
927} 924}
925
926#define OMAP1_DPLL1_SANE_VALUE 60000000
927
928void __init omap1_clk_late_init(void)
929{
930 if (ck_dpll1.rate >= OMAP1_DPLL1_SANE_VALUE)
931 return;
932
933 /* Find the highest supported frequency and enable it */
934 if (omap1_select_table_rate(&virtual_ck_mpu, ~0)) {
935 pr_err("System frequencies not set, using default. Check your config.\n");
936 omap_writew(0x2290, DPLL_CTL);
937 omap_writew(cpu_is_omap7xx() ? 0x3005 : 0x1005, ARM_CKCTL);
938 ck_dpll1.rate = OMAP1_DPLL1_SANE_VALUE;
939 }
940 propagate_rate(&ck_dpll1);
941 omap1_show_rates();
942}
diff --git a/arch/arm/mach-omap1/devices.c b/arch/arm/mach-omap1/devices.c
index 9d47ca7f80f..1d76a63c098 100644
--- a/arch/arm/mach-omap1/devices.c
+++ b/arch/arm/mach-omap1/devices.c
@@ -30,6 +30,8 @@
30#include <plat/omap7xx.h> 30#include <plat/omap7xx.h>
31#include <plat/mcbsp.h> 31#include <plat/mcbsp.h>
32 32
33#include "clock.h"
34
33/*-------------------------------------------------------------------------*/ 35/*-------------------------------------------------------------------------*/
34 36
35#if defined(CONFIG_RTC_DRV_OMAP) || defined(CONFIG_RTC_DRV_OMAP_MODULE) 37#if defined(CONFIG_RTC_DRV_OMAP) || defined(CONFIG_RTC_DRV_OMAP_MODULE)
@@ -293,6 +295,7 @@ static int __init omap1_init_devices(void)
293 return -ENODEV; 295 return -ENODEV;
294 296
295 omap_sram_init(); 297 omap_sram_init();
298 omap1_clk_late_init();
296 299
297 /* please keep these calls, and their implementations above, 300 /* please keep these calls, and their implementations above,
298 * in alphabetical order so they're easier to sort through. 301 * in alphabetical order so they're easier to sort through.
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 22f7c97a272..b6625130831 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -336,6 +336,7 @@ config MACH_OMAP4_PANDA
336config OMAP3_EMU 336config OMAP3_EMU
337 bool "OMAP3 debugging peripherals" 337 bool "OMAP3 debugging peripherals"
338 depends on ARCH_OMAP3 338 depends on ARCH_OMAP3
339 select ARM_AMBA
339 select OC_ETM 340 select OC_ETM
340 help 341 help
341 Say Y here to enable debugging hardware of omap3 342 Say Y here to enable debugging hardware of omap3
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index 69ab1c06913..b009f17dee5 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -4,7 +4,7 @@
4 4
5# Common support 5# Common support
6obj-y := id.o io.o control.o mux.o devices.o serial.o gpmc.o timer.o pm.o \ 6obj-y := id.o io.o control.o mux.o devices.o serial.o gpmc.o timer.o pm.o \
7 common.o gpio.o dma.o wd_timer.o 7 common.o gpio.o dma.o wd_timer.o display.o
8 8
9omap-2-3-common = irq.o sdrc.o 9omap-2-3-common = irq.o sdrc.o
10hwmod-common = omap_hwmod.o \ 10hwmod-common = omap_hwmod.o \
@@ -264,7 +264,4 @@ smsc911x-$(CONFIG_SMSC911X) := gpmc-smsc911x.o
264obj-y += $(smsc911x-m) $(smsc911x-y) 264obj-y += $(smsc911x-m) $(smsc911x-y)
265obj-$(CONFIG_ARCH_OMAP4) += hwspinlock.o 265obj-$(CONFIG_ARCH_OMAP4) += hwspinlock.o
266 266
267disp-$(CONFIG_OMAP2_DSS) := display.o
268obj-y += $(disp-m) $(disp-y)
269
270obj-y += common-board-devices.o twl-common.o 267obj-y += common-board-devices.o twl-common.o
diff --git a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c
index 876f6477973..e20332f4abd 100644
--- a/arch/arm/mach-omap2/cpuidle34xx.c
+++ b/arch/arm/mach-omap2/cpuidle34xx.c
@@ -24,6 +24,7 @@
24 24
25#include <linux/sched.h> 25#include <linux/sched.h>
26#include <linux/cpuidle.h> 26#include <linux/cpuidle.h>
27#include <linux/export.h>
27 28
28#include <plat/prcm.h> 29#include <plat/prcm.h>
29#include <plat/irqs.h> 30#include <plat/irqs.h>
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c
index adb2756e242..dce9905d64b 100644
--- a/arch/arm/mach-omap2/display.c
+++ b/arch/arm/mach-omap2/display.c
@@ -27,8 +27,35 @@
27#include <plat/omap_hwmod.h> 27#include <plat/omap_hwmod.h>
28#include <plat/omap_device.h> 28#include <plat/omap_device.h>
29#include <plat/omap-pm.h> 29#include <plat/omap-pm.h>
30#include <plat/common.h>
30 31
31#include "control.h" 32#include "control.h"
33#include "display.h"
34
35#define DISPC_CONTROL 0x0040
36#define DISPC_CONTROL2 0x0238
37#define DISPC_IRQSTATUS 0x0018
38
39#define DSS_SYSCONFIG 0x10
40#define DSS_SYSSTATUS 0x14
41#define DSS_CONTROL 0x40
42#define DSS_SDI_CONTROL 0x44
43#define DSS_PLL_CONTROL 0x48
44
45#define LCD_EN_MASK (0x1 << 0)
46#define DIGIT_EN_MASK (0x1 << 1)
47
48#define FRAMEDONE_IRQ_SHIFT 0
49#define EVSYNC_EVEN_IRQ_SHIFT 2
50#define EVSYNC_ODD_IRQ_SHIFT 3
51#define FRAMEDONE2_IRQ_SHIFT 22
52#define FRAMEDONETV_IRQ_SHIFT 24
53
54/*
55 * FRAMEDONE_IRQ_TIMEOUT: how long (in milliseconds) to wait during DISPC
56 * reset before deciding that something has gone wrong
57 */
58#define FRAMEDONE_IRQ_TIMEOUT 100
32 59
33static struct platform_device omap_display_device = { 60static struct platform_device omap_display_device = {
34 .name = "omapdss", 61 .name = "omapdss",
@@ -172,3 +199,135 @@ int __init omap_display_init(struct omap_dss_board_info *board_data)
172 199
173 return r; 200 return r;
174} 201}
202
203static void dispc_disable_outputs(void)
204{
205 u32 v, irq_mask = 0;
206 bool lcd_en, digit_en, lcd2_en = false;
207 int i;
208 struct omap_dss_dispc_dev_attr *da;
209 struct omap_hwmod *oh;
210
211 oh = omap_hwmod_lookup("dss_dispc");
212 if (!oh) {
213 WARN(1, "display: could not disable outputs during reset - could not find dss_dispc hwmod\n");
214 return;
215 }
216
217 if (!oh->dev_attr) {
218 pr_err("display: could not disable outputs during reset due to missing dev_attr\n");
219 return;
220 }
221
222 da = (struct omap_dss_dispc_dev_attr *)oh->dev_attr;
223
224 /* store value of LCDENABLE and DIGITENABLE bits */
225 v = omap_hwmod_read(oh, DISPC_CONTROL);
226 lcd_en = v & LCD_EN_MASK;
227 digit_en = v & DIGIT_EN_MASK;
228
229 /* store value of LCDENABLE for LCD2 */
230 if (da->manager_count > 2) {
231 v = omap_hwmod_read(oh, DISPC_CONTROL2);
232 lcd2_en = v & LCD_EN_MASK;
233 }
234
235 if (!(lcd_en | digit_en | lcd2_en))
236 return; /* no managers currently enabled */
237
238 /*
239 * If any manager was enabled, we need to disable it before
240 * DSS clocks are disabled or DISPC module is reset
241 */
242 if (lcd_en)
243 irq_mask |= 1 << FRAMEDONE_IRQ_SHIFT;
244
245 if (digit_en) {
246 if (da->has_framedonetv_irq) {
247 irq_mask |= 1 << FRAMEDONETV_IRQ_SHIFT;
248 } else {
249 irq_mask |= 1 << EVSYNC_EVEN_IRQ_SHIFT |
250 1 << EVSYNC_ODD_IRQ_SHIFT;
251 }
252 }
253
254 if (lcd2_en)
255 irq_mask |= 1 << FRAMEDONE2_IRQ_SHIFT;
256
257 /*
258 * clear any previous FRAMEDONE, FRAMEDONETV,
259 * EVSYNC_EVEN/ODD or FRAMEDONE2 interrupts
260 */
261 omap_hwmod_write(irq_mask, oh, DISPC_IRQSTATUS);
262
263 /* disable LCD and TV managers */
264 v = omap_hwmod_read(oh, DISPC_CONTROL);
265 v &= ~(LCD_EN_MASK | DIGIT_EN_MASK);
266 omap_hwmod_write(v, oh, DISPC_CONTROL);
267
268 /* disable LCD2 manager */
269 if (da->manager_count > 2) {
270 v = omap_hwmod_read(oh, DISPC_CONTROL2);
271 v &= ~LCD_EN_MASK;
272 omap_hwmod_write(v, oh, DISPC_CONTROL2);
273 }
274
275 i = 0;
276 while ((omap_hwmod_read(oh, DISPC_IRQSTATUS) & irq_mask) !=
277 irq_mask) {
278 i++;
279 if (i > FRAMEDONE_IRQ_TIMEOUT) {
280 pr_err("didn't get FRAMEDONE1/2 or TV interrupt\n");
281 break;
282 }
283 mdelay(1);
284 }
285}
286
287#define MAX_MODULE_SOFTRESET_WAIT 10000
288int omap_dss_reset(struct omap_hwmod *oh)
289{
290 struct omap_hwmod_opt_clk *oc;
291 int c = 0;
292 int i, r;
293
294 if (!(oh->class->sysc->sysc_flags & SYSS_HAS_RESET_STATUS)) {
295 pr_err("dss_core: hwmod data doesn't contain reset data\n");
296 return -EINVAL;
297 }
298
299 for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++)
300 if (oc->_clk)
301 clk_enable(oc->_clk);
302
303 dispc_disable_outputs();
304
305 /* clear SDI registers */
306 if (cpu_is_omap3430()) {
307 omap_hwmod_write(0x0, oh, DSS_SDI_CONTROL);
308 omap_hwmod_write(0x0, oh, DSS_PLL_CONTROL);
309 }
310
311 /*
312 * clear DSS_CONTROL register to switch DSS clock sources to
313 * PRCM clock, if any
314 */
315 omap_hwmod_write(0x0, oh, DSS_CONTROL);
316
317 omap_test_timeout((omap_hwmod_read(oh, oh->class->sysc->syss_offs)
318 & SYSS_RESETDONE_MASK),
319 MAX_MODULE_SOFTRESET_WAIT, c);
320
321 if (c == MAX_MODULE_SOFTRESET_WAIT)
322 pr_warning("dss_core: waiting for reset to finish failed\n");
323 else
324 pr_debug("dss_core: softreset done\n");
325
326 for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++)
327 if (oc->_clk)
328 clk_disable(oc->_clk);
329
330 r = (c == MAX_MODULE_SOFTRESET_WAIT) ? -ETIMEDOUT : 0;
331
332 return r;
333}
diff --git a/arch/arm/mach-omap2/display.h b/arch/arm/mach-omap2/display.h
new file mode 100644
index 00000000000..b871b017b35
--- /dev/null
+++ b/arch/arm/mach-omap2/display.h
@@ -0,0 +1,29 @@
1/*
2 * display.h - OMAP2+ integration-specific DSS header
3 *
4 * Copyright (C) 2011 Texas Instruments, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#ifndef __ARCH_ARM_MACH_OMAP2_DISPLAY_H
20#define __ARCH_ARM_MACH_OMAP2_DISPLAY_H
21
22#include <linux/kernel.h>
23
24struct omap_dss_dispc_dev_attr {
25 u8 manager_count;
26 bool has_framedonetv_irq;
27};
28
29#endif
diff --git a/arch/arm/mach-omap2/io.h b/arch/arm/mach-omap2/io.h
deleted file mode 100644
index e69de29bb2d..00000000000
--- a/arch/arm/mach-omap2/io.h
+++ /dev/null
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 00fcd2c311e..529142aff76 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -749,7 +749,7 @@ static int _count_mpu_irqs(struct omap_hwmod *oh)
749 ohii = &oh->mpu_irqs[i++]; 749 ohii = &oh->mpu_irqs[i++];
750 } while (ohii->irq != -1); 750 } while (ohii->irq != -1);
751 751
752 return i; 752 return i-1;
753} 753}
754 754
755/** 755/**
@@ -772,7 +772,7 @@ static int _count_sdma_reqs(struct omap_hwmod *oh)
772 ohdi = &oh->sdma_reqs[i++]; 772 ohdi = &oh->sdma_reqs[i++];
773 } while (ohdi->dma_req != -1); 773 } while (ohdi->dma_req != -1);
774 774
775 return i; 775 return i-1;
776} 776}
777 777
778/** 778/**
@@ -795,7 +795,7 @@ static int _count_ocp_if_addr_spaces(struct omap_hwmod_ocp_if *os)
795 mem = &os->addr[i++]; 795 mem = &os->addr[i++];
796 } while (mem->pa_start != mem->pa_end); 796 } while (mem->pa_start != mem->pa_end);
797 797
798 return i; 798 return i-1;
799} 799}
800 800
801/** 801/**
diff --git a/arch/arm/mach-omap2/omap_hwmod_2420_data.c b/arch/arm/mach-omap2/omap_hwmod_2420_data.c
index 6d720621352..a5409ce3f32 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2420_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2420_data.c
@@ -875,6 +875,10 @@ static struct omap_hwmod_ocp_if *omap2420_dss_slaves[] = {
875}; 875};
876 876
877static struct omap_hwmod_opt_clk dss_opt_clks[] = { 877static struct omap_hwmod_opt_clk dss_opt_clks[] = {
878 /*
879 * The DSS HW needs all DSS clocks enabled during reset. The dss_core
880 * driver does not use these clocks.
881 */
878 { .role = "tv_clk", .clk = "dss_54m_fck" }, 882 { .role = "tv_clk", .clk = "dss_54m_fck" },
879 { .role = "sys_clk", .clk = "dss2_fck" }, 883 { .role = "sys_clk", .clk = "dss2_fck" },
880}; 884};
@@ -899,7 +903,7 @@ static struct omap_hwmod omap2420_dss_core_hwmod = {
899 .slaves_cnt = ARRAY_SIZE(omap2420_dss_slaves), 903 .slaves_cnt = ARRAY_SIZE(omap2420_dss_slaves),
900 .masters = omap2420_dss_masters, 904 .masters = omap2420_dss_masters,
901 .masters_cnt = ARRAY_SIZE(omap2420_dss_masters), 905 .masters_cnt = ARRAY_SIZE(omap2420_dss_masters),
902 .flags = HWMOD_NO_IDLEST, 906 .flags = HWMOD_NO_IDLEST | HWMOD_CONTROL_OPT_CLKS_IN_RESET,
903}; 907};
904 908
905/* l4_core -> dss_dispc */ 909/* l4_core -> dss_dispc */
@@ -939,6 +943,7 @@ static struct omap_hwmod omap2420_dss_dispc_hwmod = {
939 .slaves = omap2420_dss_dispc_slaves, 943 .slaves = omap2420_dss_dispc_slaves,
940 .slaves_cnt = ARRAY_SIZE(omap2420_dss_dispc_slaves), 944 .slaves_cnt = ARRAY_SIZE(omap2420_dss_dispc_slaves),
941 .flags = HWMOD_NO_IDLEST, 945 .flags = HWMOD_NO_IDLEST,
946 .dev_attr = &omap2_3_dss_dispc_dev_attr
942}; 947};
943 948
944/* l4_core -> dss_rfbi */ 949/* l4_core -> dss_rfbi */
@@ -961,6 +966,10 @@ static struct omap_hwmod_ocp_if *omap2420_dss_rfbi_slaves[] = {
961 &omap2420_l4_core__dss_rfbi, 966 &omap2420_l4_core__dss_rfbi,
962}; 967};
963 968
969static struct omap_hwmod_opt_clk dss_rfbi_opt_clks[] = {
970 { .role = "ick", .clk = "dss_ick" },
971};
972
964static struct omap_hwmod omap2420_dss_rfbi_hwmod = { 973static struct omap_hwmod omap2420_dss_rfbi_hwmod = {
965 .name = "dss_rfbi", 974 .name = "dss_rfbi",
966 .class = &omap2_rfbi_hwmod_class, 975 .class = &omap2_rfbi_hwmod_class,
@@ -972,6 +981,8 @@ static struct omap_hwmod omap2420_dss_rfbi_hwmod = {
972 .module_offs = CORE_MOD, 981 .module_offs = CORE_MOD,
973 }, 982 },
974 }, 983 },
984 .opt_clks = dss_rfbi_opt_clks,
985 .opt_clks_cnt = ARRAY_SIZE(dss_rfbi_opt_clks),
975 .slaves = omap2420_dss_rfbi_slaves, 986 .slaves = omap2420_dss_rfbi_slaves,
976 .slaves_cnt = ARRAY_SIZE(omap2420_dss_rfbi_slaves), 987 .slaves_cnt = ARRAY_SIZE(omap2420_dss_rfbi_slaves),
977 .flags = HWMOD_NO_IDLEST, 988 .flags = HWMOD_NO_IDLEST,
@@ -981,7 +992,7 @@ static struct omap_hwmod omap2420_dss_rfbi_hwmod = {
981static struct omap_hwmod_ocp_if omap2420_l4_core__dss_venc = { 992static struct omap_hwmod_ocp_if omap2420_l4_core__dss_venc = {
982 .master = &omap2420_l4_core_hwmod, 993 .master = &omap2420_l4_core_hwmod,
983 .slave = &omap2420_dss_venc_hwmod, 994 .slave = &omap2420_dss_venc_hwmod,
984 .clk = "dss_54m_fck", 995 .clk = "dss_ick",
985 .addr = omap2_dss_venc_addrs, 996 .addr = omap2_dss_venc_addrs,
986 .fw = { 997 .fw = {
987 .omap2 = { 998 .omap2 = {
@@ -1001,7 +1012,7 @@ static struct omap_hwmod_ocp_if *omap2420_dss_venc_slaves[] = {
1001static struct omap_hwmod omap2420_dss_venc_hwmod = { 1012static struct omap_hwmod omap2420_dss_venc_hwmod = {
1002 .name = "dss_venc", 1013 .name = "dss_venc",
1003 .class = &omap2_venc_hwmod_class, 1014 .class = &omap2_venc_hwmod_class,
1004 .main_clk = "dss1_fck", 1015 .main_clk = "dss_54m_fck",
1005 .prcm = { 1016 .prcm = {
1006 .omap2 = { 1017 .omap2 = {
1007 .prcm_reg_id = 1, 1018 .prcm_reg_id = 1,
diff --git a/arch/arm/mach-omap2/omap_hwmod_2430_data.c b/arch/arm/mach-omap2/omap_hwmod_2430_data.c
index a2580d01c3f..c4f56cb60d7 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2430_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2430_data.c
@@ -942,6 +942,10 @@ static struct omap_hwmod_ocp_if *omap2430_dss_slaves[] = {
942}; 942};
943 943
944static struct omap_hwmod_opt_clk dss_opt_clks[] = { 944static struct omap_hwmod_opt_clk dss_opt_clks[] = {
945 /*
946 * The DSS HW needs all DSS clocks enabled during reset. The dss_core
947 * driver does not use these clocks.
948 */
945 { .role = "tv_clk", .clk = "dss_54m_fck" }, 949 { .role = "tv_clk", .clk = "dss_54m_fck" },
946 { .role = "sys_clk", .clk = "dss2_fck" }, 950 { .role = "sys_clk", .clk = "dss2_fck" },
947}; 951};
@@ -966,7 +970,7 @@ static struct omap_hwmod omap2430_dss_core_hwmod = {
966 .slaves_cnt = ARRAY_SIZE(omap2430_dss_slaves), 970 .slaves_cnt = ARRAY_SIZE(omap2430_dss_slaves),
967 .masters = omap2430_dss_masters, 971 .masters = omap2430_dss_masters,
968 .masters_cnt = ARRAY_SIZE(omap2430_dss_masters), 972 .masters_cnt = ARRAY_SIZE(omap2430_dss_masters),
969 .flags = HWMOD_NO_IDLEST, 973 .flags = HWMOD_NO_IDLEST | HWMOD_CONTROL_OPT_CLKS_IN_RESET,
970}; 974};
971 975
972/* l4_core -> dss_dispc */ 976/* l4_core -> dss_dispc */
@@ -1000,6 +1004,7 @@ static struct omap_hwmod omap2430_dss_dispc_hwmod = {
1000 .slaves = omap2430_dss_dispc_slaves, 1004 .slaves = omap2430_dss_dispc_slaves,
1001 .slaves_cnt = ARRAY_SIZE(omap2430_dss_dispc_slaves), 1005 .slaves_cnt = ARRAY_SIZE(omap2430_dss_dispc_slaves),
1002 .flags = HWMOD_NO_IDLEST, 1006 .flags = HWMOD_NO_IDLEST,
1007 .dev_attr = &omap2_3_dss_dispc_dev_attr
1003}; 1008};
1004 1009
1005/* l4_core -> dss_rfbi */ 1010/* l4_core -> dss_rfbi */
@@ -1016,6 +1021,10 @@ static struct omap_hwmod_ocp_if *omap2430_dss_rfbi_slaves[] = {
1016 &omap2430_l4_core__dss_rfbi, 1021 &omap2430_l4_core__dss_rfbi,
1017}; 1022};
1018 1023
1024static struct omap_hwmod_opt_clk dss_rfbi_opt_clks[] = {
1025 { .role = "ick", .clk = "dss_ick" },
1026};
1027
1019static struct omap_hwmod omap2430_dss_rfbi_hwmod = { 1028static struct omap_hwmod omap2430_dss_rfbi_hwmod = {
1020 .name = "dss_rfbi", 1029 .name = "dss_rfbi",
1021 .class = &omap2_rfbi_hwmod_class, 1030 .class = &omap2_rfbi_hwmod_class,
@@ -1027,6 +1036,8 @@ static struct omap_hwmod omap2430_dss_rfbi_hwmod = {
1027 .module_offs = CORE_MOD, 1036 .module_offs = CORE_MOD,
1028 }, 1037 },
1029 }, 1038 },
1039 .opt_clks = dss_rfbi_opt_clks,
1040 .opt_clks_cnt = ARRAY_SIZE(dss_rfbi_opt_clks),
1030 .slaves = omap2430_dss_rfbi_slaves, 1041 .slaves = omap2430_dss_rfbi_slaves,
1031 .slaves_cnt = ARRAY_SIZE(omap2430_dss_rfbi_slaves), 1042 .slaves_cnt = ARRAY_SIZE(omap2430_dss_rfbi_slaves),
1032 .flags = HWMOD_NO_IDLEST, 1043 .flags = HWMOD_NO_IDLEST,
@@ -1036,7 +1047,7 @@ static struct omap_hwmod omap2430_dss_rfbi_hwmod = {
1036static struct omap_hwmod_ocp_if omap2430_l4_core__dss_venc = { 1047static struct omap_hwmod_ocp_if omap2430_l4_core__dss_venc = {
1037 .master = &omap2430_l4_core_hwmod, 1048 .master = &omap2430_l4_core_hwmod,
1038 .slave = &omap2430_dss_venc_hwmod, 1049 .slave = &omap2430_dss_venc_hwmod,
1039 .clk = "dss_54m_fck", 1050 .clk = "dss_ick",
1040 .addr = omap2_dss_venc_addrs, 1051 .addr = omap2_dss_venc_addrs,
1041 .flags = OCPIF_SWSUP_IDLE, 1052 .flags = OCPIF_SWSUP_IDLE,
1042 .user = OCP_USER_MPU | OCP_USER_SDMA, 1053 .user = OCP_USER_MPU | OCP_USER_SDMA,
@@ -1050,7 +1061,7 @@ static struct omap_hwmod_ocp_if *omap2430_dss_venc_slaves[] = {
1050static struct omap_hwmod omap2430_dss_venc_hwmod = { 1061static struct omap_hwmod omap2430_dss_venc_hwmod = {
1051 .name = "dss_venc", 1062 .name = "dss_venc",
1052 .class = &omap2_venc_hwmod_class, 1063 .class = &omap2_venc_hwmod_class,
1053 .main_clk = "dss1_fck", 1064 .main_clk = "dss_54m_fck",
1054 .prcm = { 1065 .prcm = {
1055 .omap2 = { 1066 .omap2 = {
1056 .prcm_reg_id = 1, 1067 .prcm_reg_id = 1,
diff --git a/arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c
index c451729d289..c11273da5dc 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c
@@ -11,6 +11,7 @@
11#include <plat/omap_hwmod.h> 11#include <plat/omap_hwmod.h>
12#include <plat/serial.h> 12#include <plat/serial.h>
13#include <plat/dma.h> 13#include <plat/dma.h>
14#include <plat/common.h>
14 15
15#include <mach/irqs.h> 16#include <mach/irqs.h>
16 17
@@ -43,13 +44,15 @@ static struct omap_hwmod_class_sysconfig omap2_dss_sysc = {
43 .rev_offs = 0x0000, 44 .rev_offs = 0x0000,
44 .sysc_offs = 0x0010, 45 .sysc_offs = 0x0010,
45 .syss_offs = 0x0014, 46 .syss_offs = 0x0014,
46 .sysc_flags = (SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE), 47 .sysc_flags = (SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE |
48 SYSS_HAS_RESET_STATUS),
47 .sysc_fields = &omap_hwmod_sysc_type1, 49 .sysc_fields = &omap_hwmod_sysc_type1,
48}; 50};
49 51
50struct omap_hwmod_class omap2_dss_hwmod_class = { 52struct omap_hwmod_class omap2_dss_hwmod_class = {
51 .name = "dss", 53 .name = "dss",
52 .sysc = &omap2_dss_sysc, 54 .sysc = &omap2_dss_sysc,
55 .reset = omap_dss_reset,
53}; 56};
54 57
55/* 58/*
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index bc9035ec87f..7f8915ad509 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -1369,9 +1369,14 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_slaves[] = {
1369}; 1369};
1370 1370
1371static struct omap_hwmod_opt_clk dss_opt_clks[] = { 1371static struct omap_hwmod_opt_clk dss_opt_clks[] = {
1372 { .role = "tv_clk", .clk = "dss_tv_fck" }, 1372 /*
1373 { .role = "video_clk", .clk = "dss_96m_fck" }, 1373 * The DSS HW needs all DSS clocks enabled during reset. The dss_core
1374 * driver does not use these clocks.
1375 */
1374 { .role = "sys_clk", .clk = "dss2_alwon_fck" }, 1376 { .role = "sys_clk", .clk = "dss2_alwon_fck" },
1377 { .role = "tv_clk", .clk = "dss_tv_fck" },
1378 /* required only on OMAP3430 */
1379 { .role = "tv_dac_clk", .clk = "dss_96m_fck" },
1375}; 1380};
1376 1381
1377static struct omap_hwmod omap3430es1_dss_core_hwmod = { 1382static struct omap_hwmod omap3430es1_dss_core_hwmod = {
@@ -1394,11 +1399,12 @@ static struct omap_hwmod omap3430es1_dss_core_hwmod = {
1394 .slaves_cnt = ARRAY_SIZE(omap3430es1_dss_slaves), 1399 .slaves_cnt = ARRAY_SIZE(omap3430es1_dss_slaves),
1395 .masters = omap3xxx_dss_masters, 1400 .masters = omap3xxx_dss_masters,
1396 .masters_cnt = ARRAY_SIZE(omap3xxx_dss_masters), 1401 .masters_cnt = ARRAY_SIZE(omap3xxx_dss_masters),
1397 .flags = HWMOD_NO_IDLEST, 1402 .flags = HWMOD_NO_IDLEST | HWMOD_CONTROL_OPT_CLKS_IN_RESET,
1398}; 1403};
1399 1404
1400static struct omap_hwmod omap3xxx_dss_core_hwmod = { 1405static struct omap_hwmod omap3xxx_dss_core_hwmod = {
1401 .name = "dss_core", 1406 .name = "dss_core",
1407 .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
1402 .class = &omap2_dss_hwmod_class, 1408 .class = &omap2_dss_hwmod_class,
1403 .main_clk = "dss1_alwon_fck", /* instead of dss_fck */ 1409 .main_clk = "dss1_alwon_fck", /* instead of dss_fck */
1404 .sdma_reqs = omap3xxx_dss_sdma_chs, 1410 .sdma_reqs = omap3xxx_dss_sdma_chs,
@@ -1456,6 +1462,7 @@ static struct omap_hwmod omap3xxx_dss_dispc_hwmod = {
1456 .slaves = omap3xxx_dss_dispc_slaves, 1462 .slaves = omap3xxx_dss_dispc_slaves,
1457 .slaves_cnt = ARRAY_SIZE(omap3xxx_dss_dispc_slaves), 1463 .slaves_cnt = ARRAY_SIZE(omap3xxx_dss_dispc_slaves),
1458 .flags = HWMOD_NO_IDLEST, 1464 .flags = HWMOD_NO_IDLEST,
1465 .dev_attr = &omap2_3_dss_dispc_dev_attr
1459}; 1466};
1460 1467
1461/* 1468/*
@@ -1486,6 +1493,7 @@ static struct omap_hwmod_addr_space omap3xxx_dss_dsi1_addrs[] = {
1486static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_dsi1 = { 1493static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_dsi1 = {
1487 .master = &omap3xxx_l4_core_hwmod, 1494 .master = &omap3xxx_l4_core_hwmod,
1488 .slave = &omap3xxx_dss_dsi1_hwmod, 1495 .slave = &omap3xxx_dss_dsi1_hwmod,
1496 .clk = "dss_ick",
1489 .addr = omap3xxx_dss_dsi1_addrs, 1497 .addr = omap3xxx_dss_dsi1_addrs,
1490 .fw = { 1498 .fw = {
1491 .omap2 = { 1499 .omap2 = {
@@ -1502,6 +1510,10 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_dsi1_slaves[] = {
1502 &omap3xxx_l4_core__dss_dsi1, 1510 &omap3xxx_l4_core__dss_dsi1,
1503}; 1511};
1504 1512
1513static struct omap_hwmod_opt_clk dss_dsi1_opt_clks[] = {
1514 { .role = "sys_clk", .clk = "dss2_alwon_fck" },
1515};
1516
1505static struct omap_hwmod omap3xxx_dss_dsi1_hwmod = { 1517static struct omap_hwmod omap3xxx_dss_dsi1_hwmod = {
1506 .name = "dss_dsi1", 1518 .name = "dss_dsi1",
1507 .class = &omap3xxx_dsi_hwmod_class, 1519 .class = &omap3xxx_dsi_hwmod_class,
@@ -1514,6 +1526,8 @@ static struct omap_hwmod omap3xxx_dss_dsi1_hwmod = {
1514 .module_offs = OMAP3430_DSS_MOD, 1526 .module_offs = OMAP3430_DSS_MOD,
1515 }, 1527 },
1516 }, 1528 },
1529 .opt_clks = dss_dsi1_opt_clks,
1530 .opt_clks_cnt = ARRAY_SIZE(dss_dsi1_opt_clks),
1517 .slaves = omap3xxx_dss_dsi1_slaves, 1531 .slaves = omap3xxx_dss_dsi1_slaves,
1518 .slaves_cnt = ARRAY_SIZE(omap3xxx_dss_dsi1_slaves), 1532 .slaves_cnt = ARRAY_SIZE(omap3xxx_dss_dsi1_slaves),
1519 .flags = HWMOD_NO_IDLEST, 1533 .flags = HWMOD_NO_IDLEST,
@@ -1540,6 +1554,10 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_rfbi_slaves[] = {
1540 &omap3xxx_l4_core__dss_rfbi, 1554 &omap3xxx_l4_core__dss_rfbi,
1541}; 1555};
1542 1556
1557static struct omap_hwmod_opt_clk dss_rfbi_opt_clks[] = {
1558 { .role = "ick", .clk = "dss_ick" },
1559};
1560
1543static struct omap_hwmod omap3xxx_dss_rfbi_hwmod = { 1561static struct omap_hwmod omap3xxx_dss_rfbi_hwmod = {
1544 .name = "dss_rfbi", 1562 .name = "dss_rfbi",
1545 .class = &omap2_rfbi_hwmod_class, 1563 .class = &omap2_rfbi_hwmod_class,
@@ -1551,6 +1569,8 @@ static struct omap_hwmod omap3xxx_dss_rfbi_hwmod = {
1551 .module_offs = OMAP3430_DSS_MOD, 1569 .module_offs = OMAP3430_DSS_MOD,
1552 }, 1570 },
1553 }, 1571 },
1572 .opt_clks = dss_rfbi_opt_clks,
1573 .opt_clks_cnt = ARRAY_SIZE(dss_rfbi_opt_clks),
1554 .slaves = omap3xxx_dss_rfbi_slaves, 1574 .slaves = omap3xxx_dss_rfbi_slaves,
1555 .slaves_cnt = ARRAY_SIZE(omap3xxx_dss_rfbi_slaves), 1575 .slaves_cnt = ARRAY_SIZE(omap3xxx_dss_rfbi_slaves),
1556 .flags = HWMOD_NO_IDLEST, 1576 .flags = HWMOD_NO_IDLEST,
@@ -1560,7 +1580,7 @@ static struct omap_hwmod omap3xxx_dss_rfbi_hwmod = {
1560static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_venc = { 1580static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_venc = {
1561 .master = &omap3xxx_l4_core_hwmod, 1581 .master = &omap3xxx_l4_core_hwmod,
1562 .slave = &omap3xxx_dss_venc_hwmod, 1582 .slave = &omap3xxx_dss_venc_hwmod,
1563 .clk = "dss_tv_fck", 1583 .clk = "dss_ick",
1564 .addr = omap2_dss_venc_addrs, 1584 .addr = omap2_dss_venc_addrs,
1565 .fw = { 1585 .fw = {
1566 .omap2 = { 1586 .omap2 = {
@@ -1578,10 +1598,15 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_venc_slaves[] = {
1578 &omap3xxx_l4_core__dss_venc, 1598 &omap3xxx_l4_core__dss_venc,
1579}; 1599};
1580 1600
1601static struct omap_hwmod_opt_clk dss_venc_opt_clks[] = {
1602 /* required only on OMAP3430 */
1603 { .role = "tv_dac_clk", .clk = "dss_96m_fck" },
1604};
1605
1581static struct omap_hwmod omap3xxx_dss_venc_hwmod = { 1606static struct omap_hwmod omap3xxx_dss_venc_hwmod = {
1582 .name = "dss_venc", 1607 .name = "dss_venc",
1583 .class = &omap2_venc_hwmod_class, 1608 .class = &omap2_venc_hwmod_class,
1584 .main_clk = "dss1_alwon_fck", 1609 .main_clk = "dss_tv_fck",
1585 .prcm = { 1610 .prcm = {
1586 .omap2 = { 1611 .omap2 = {
1587 .prcm_reg_id = 1, 1612 .prcm_reg_id = 1,
@@ -1589,6 +1614,8 @@ static struct omap_hwmod omap3xxx_dss_venc_hwmod = {
1589 .module_offs = OMAP3430_DSS_MOD, 1614 .module_offs = OMAP3430_DSS_MOD,
1590 }, 1615 },
1591 }, 1616 },
1617 .opt_clks = dss_venc_opt_clks,
1618 .opt_clks_cnt = ARRAY_SIZE(dss_venc_opt_clks),
1592 .slaves = omap3xxx_dss_venc_slaves, 1619 .slaves = omap3xxx_dss_venc_slaves,
1593 .slaves_cnt = ARRAY_SIZE(omap3xxx_dss_venc_slaves), 1620 .slaves_cnt = ARRAY_SIZE(omap3xxx_dss_venc_slaves),
1594 .flags = HWMOD_NO_IDLEST, 1621 .flags = HWMOD_NO_IDLEST,
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index 7695e5d4331..daaf165af69 100644
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -30,6 +30,7 @@
30#include <plat/mmc.h> 30#include <plat/mmc.h>
31#include <plat/i2c.h> 31#include <plat/i2c.h>
32#include <plat/dmtimer.h> 32#include <plat/dmtimer.h>
33#include <plat/common.h>
33 34
34#include "omap_hwmod_common_data.h" 35#include "omap_hwmod_common_data.h"
35 36
@@ -1187,6 +1188,7 @@ static struct omap_hwmod_class_sysconfig omap44xx_dss_sysc = {
1187static struct omap_hwmod_class omap44xx_dss_hwmod_class = { 1188static struct omap_hwmod_class omap44xx_dss_hwmod_class = {
1188 .name = "dss", 1189 .name = "dss",
1189 .sysc = &omap44xx_dss_sysc, 1190 .sysc = &omap44xx_dss_sysc,
1191 .reset = omap_dss_reset,
1190}; 1192};
1191 1193
1192/* dss */ 1194/* dss */
@@ -1240,12 +1242,12 @@ static struct omap_hwmod_ocp_if *omap44xx_dss_slaves[] = {
1240static struct omap_hwmod_opt_clk dss_opt_clks[] = { 1242static struct omap_hwmod_opt_clk dss_opt_clks[] = {
1241 { .role = "sys_clk", .clk = "dss_sys_clk" }, 1243 { .role = "sys_clk", .clk = "dss_sys_clk" },
1242 { .role = "tv_clk", .clk = "dss_tv_clk" }, 1244 { .role = "tv_clk", .clk = "dss_tv_clk" },
1243 { .role = "dss_clk", .clk = "dss_dss_clk" }, 1245 { .role = "hdmi_clk", .clk = "dss_48mhz_clk" },
1244 { .role = "video_clk", .clk = "dss_48mhz_clk" },
1245}; 1246};
1246 1247
1247static struct omap_hwmod omap44xx_dss_hwmod = { 1248static struct omap_hwmod omap44xx_dss_hwmod = {
1248 .name = "dss_core", 1249 .name = "dss_core",
1250 .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
1249 .class = &omap44xx_dss_hwmod_class, 1251 .class = &omap44xx_dss_hwmod_class,
1250 .clkdm_name = "l3_dss_clkdm", 1252 .clkdm_name = "l3_dss_clkdm",
1251 .main_clk = "dss_dss_clk", 1253 .main_clk = "dss_dss_clk",
@@ -1325,6 +1327,11 @@ static struct omap_hwmod_addr_space omap44xx_dss_dispc_addrs[] = {
1325 { } 1327 { }
1326}; 1328};
1327 1329
1330static struct omap_dss_dispc_dev_attr omap44xx_dss_dispc_dev_attr = {
1331 .manager_count = 3,
1332 .has_framedonetv_irq = 1
1333};
1334
1328/* l4_per -> dss_dispc */ 1335/* l4_per -> dss_dispc */
1329static struct omap_hwmod_ocp_if omap44xx_l4_per__dss_dispc = { 1336static struct omap_hwmod_ocp_if omap44xx_l4_per__dss_dispc = {
1330 .master = &omap44xx_l4_per_hwmod, 1337 .master = &omap44xx_l4_per_hwmod,
@@ -1340,12 +1347,6 @@ static struct omap_hwmod_ocp_if *omap44xx_dss_dispc_slaves[] = {
1340 &omap44xx_l4_per__dss_dispc, 1347 &omap44xx_l4_per__dss_dispc,
1341}; 1348};
1342 1349
1343static struct omap_hwmod_opt_clk dss_dispc_opt_clks[] = {
1344 { .role = "sys_clk", .clk = "dss_sys_clk" },
1345 { .role = "tv_clk", .clk = "dss_tv_clk" },
1346 { .role = "hdmi_clk", .clk = "dss_48mhz_clk" },
1347};
1348
1349static struct omap_hwmod omap44xx_dss_dispc_hwmod = { 1350static struct omap_hwmod omap44xx_dss_dispc_hwmod = {
1350 .name = "dss_dispc", 1351 .name = "dss_dispc",
1351 .class = &omap44xx_dispc_hwmod_class, 1352 .class = &omap44xx_dispc_hwmod_class,
@@ -1359,10 +1360,9 @@ static struct omap_hwmod omap44xx_dss_dispc_hwmod = {
1359 .context_offs = OMAP4_RM_DSS_DSS_CONTEXT_OFFSET, 1360 .context_offs = OMAP4_RM_DSS_DSS_CONTEXT_OFFSET,
1360 }, 1361 },
1361 }, 1362 },
1362 .opt_clks = dss_dispc_opt_clks,
1363 .opt_clks_cnt = ARRAY_SIZE(dss_dispc_opt_clks),
1364 .slaves = omap44xx_dss_dispc_slaves, 1363 .slaves = omap44xx_dss_dispc_slaves,
1365 .slaves_cnt = ARRAY_SIZE(omap44xx_dss_dispc_slaves), 1364 .slaves_cnt = ARRAY_SIZE(omap44xx_dss_dispc_slaves),
1365 .dev_attr = &omap44xx_dss_dispc_dev_attr
1366}; 1366};
1367 1367
1368/* 1368/*
@@ -1624,7 +1624,7 @@ static struct omap_hwmod omap44xx_dss_hdmi_hwmod = {
1624 .clkdm_name = "l3_dss_clkdm", 1624 .clkdm_name = "l3_dss_clkdm",
1625 .mpu_irqs = omap44xx_dss_hdmi_irqs, 1625 .mpu_irqs = omap44xx_dss_hdmi_irqs,
1626 .sdma_reqs = omap44xx_dss_hdmi_sdma_reqs, 1626 .sdma_reqs = omap44xx_dss_hdmi_sdma_reqs,
1627 .main_clk = "dss_dss_clk", 1627 .main_clk = "dss_48mhz_clk",
1628 .prcm = { 1628 .prcm = {
1629 .omap4 = { 1629 .omap4 = {
1630 .clkctrl_offs = OMAP4_CM_DSS_DSS_CLKCTRL_OFFSET, 1630 .clkctrl_offs = OMAP4_CM_DSS_DSS_CLKCTRL_OFFSET,
@@ -1785,7 +1785,7 @@ static struct omap_hwmod omap44xx_dss_venc_hwmod = {
1785 .name = "dss_venc", 1785 .name = "dss_venc",
1786 .class = &omap44xx_venc_hwmod_class, 1786 .class = &omap44xx_venc_hwmod_class,
1787 .clkdm_name = "l3_dss_clkdm", 1787 .clkdm_name = "l3_dss_clkdm",
1788 .main_clk = "dss_dss_clk", 1788 .main_clk = "dss_tv_clk",
1789 .prcm = { 1789 .prcm = {
1790 .omap4 = { 1790 .omap4 = {
1791 .clkctrl_offs = OMAP4_CM_DSS_DSS_CLKCTRL_OFFSET, 1791 .clkctrl_offs = OMAP4_CM_DSS_DSS_CLKCTRL_OFFSET,
diff --git a/arch/arm/mach-omap2/omap_hwmod_common_data.c b/arch/arm/mach-omap2/omap_hwmod_common_data.c
index de832ebc93a..51e5418899f 100644
--- a/arch/arm/mach-omap2/omap_hwmod_common_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_common_data.c
@@ -49,3 +49,7 @@ struct omap_hwmod_sysc_fields omap_hwmod_sysc_type2 = {
49 .srst_shift = SYSC_TYPE2_SOFTRESET_SHIFT, 49 .srst_shift = SYSC_TYPE2_SOFTRESET_SHIFT,
50}; 50};
51 51
52struct omap_dss_dispc_dev_attr omap2_3_dss_dispc_dev_attr = {
53 .manager_count = 2,
54 .has_framedonetv_irq = 0
55};
diff --git a/arch/arm/mach-omap2/omap_hwmod_common_data.h b/arch/arm/mach-omap2/omap_hwmod_common_data.h
index 39a7c37f458..ad5d8f04c0b 100644
--- a/arch/arm/mach-omap2/omap_hwmod_common_data.h
+++ b/arch/arm/mach-omap2/omap_hwmod_common_data.h
@@ -16,6 +16,8 @@
16 16
17#include <plat/omap_hwmod.h> 17#include <plat/omap_hwmod.h>
18 18
19#include "display.h"
20
19/* Common address space across OMAP2xxx */ 21/* Common address space across OMAP2xxx */
20extern struct omap_hwmod_addr_space omap2xxx_uart1_addr_space[]; 22extern struct omap_hwmod_addr_space omap2xxx_uart1_addr_space[];
21extern struct omap_hwmod_addr_space omap2xxx_uart2_addr_space[]; 23extern struct omap_hwmod_addr_space omap2xxx_uart2_addr_space[];
@@ -111,4 +113,6 @@ extern struct omap_hwmod_class omap2xxx_dma_hwmod_class;
111extern struct omap_hwmod_class omap2xxx_mailbox_hwmod_class; 113extern struct omap_hwmod_class omap2xxx_mailbox_hwmod_class;
112extern struct omap_hwmod_class omap2xxx_mcspi_class; 114extern struct omap_hwmod_class omap2xxx_mcspi_class;
113 115
116extern struct omap_dss_dispc_dev_attr omap2_3_dss_dispc_dev_attr;
117
114#endif 118#endif
diff --git a/arch/arm/mach-omap2/omap_l3_noc.c b/arch/arm/mach-omap2/omap_l3_noc.c
index 6a66aa5e2a5..d15225ff5c4 100644
--- a/arch/arm/mach-omap2/omap_l3_noc.c
+++ b/arch/arm/mach-omap2/omap_l3_noc.c
@@ -237,7 +237,7 @@ static int __devexit omap4_l3_remove(struct platform_device *pdev)
237static const struct of_device_id l3_noc_match[] = { 237static const struct of_device_id l3_noc_match[] = {
238 {.compatible = "ti,omap4-l3-noc", }, 238 {.compatible = "ti,omap4-l3-noc", },
239 {}, 239 {},
240} 240};
241MODULE_DEVICE_TABLE(of, l3_noc_match); 241MODULE_DEVICE_TABLE(of, l3_noc_match);
242#else 242#else
243#define l3_noc_match NULL 243#define l3_noc_match NULL
diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c
index e7bee5ca407..1881fe91514 100644
--- a/arch/arm/mach-omap2/pm.c
+++ b/arch/arm/mach-omap2/pm.c
@@ -24,6 +24,7 @@
24#include "powerdomain.h" 24#include "powerdomain.h"
25#include "clockdomain.h" 25#include "clockdomain.h"
26#include "pm.h" 26#include "pm.h"
27#include "twl-common.h"
27 28
28static struct omap_device_pm_latency *pm_lats; 29static struct omap_device_pm_latency *pm_lats;
29 30
@@ -226,11 +227,8 @@ postcore_initcall(omap2_common_pm_init);
226 227
227static int __init omap2_common_pm_late_init(void) 228static int __init omap2_common_pm_late_init(void)
228{ 229{
229 /* Init the OMAP TWL parameters */
230 omap3_twl_init();
231 omap4_twl_init();
232
233 /* Init the voltage layer */ 230 /* Init the voltage layer */
231 omap_pmic_late_init();
234 omap_voltage_late_init(); 232 omap_voltage_late_init();
235 233
236 /* Initialize the voltages */ 234 /* Initialize the voltages */
diff --git a/arch/arm/mach-omap2/smartreflex.c b/arch/arm/mach-omap2/smartreflex.c
index 919d827ed70..9dd93453e56 100644
--- a/arch/arm/mach-omap2/smartreflex.c
+++ b/arch/arm/mach-omap2/smartreflex.c
@@ -139,7 +139,7 @@ static irqreturn_t sr_interrupt(int irq, void *data)
139 sr_write_reg(sr_info, ERRCONFIG_V1, status); 139 sr_write_reg(sr_info, ERRCONFIG_V1, status);
140 } else if (sr_info->ip_type == SR_TYPE_V2) { 140 } else if (sr_info->ip_type == SR_TYPE_V2) {
141 /* Read the status bits */ 141 /* Read the status bits */
142 sr_read_reg(sr_info, IRQSTATUS); 142 status = sr_read_reg(sr_info, IRQSTATUS);
143 143
144 /* Clear them by writing back */ 144 /* Clear them by writing back */
145 sr_write_reg(sr_info, IRQSTATUS, status); 145 sr_write_reg(sr_info, IRQSTATUS, status);
diff --git a/arch/arm/mach-omap2/twl-common.c b/arch/arm/mach-omap2/twl-common.c
index 52243577216..10b20c652e5 100644
--- a/arch/arm/mach-omap2/twl-common.c
+++ b/arch/arm/mach-omap2/twl-common.c
@@ -30,6 +30,7 @@
30#include <plat/usb.h> 30#include <plat/usb.h>
31 31
32#include "twl-common.h" 32#include "twl-common.h"
33#include "pm.h"
33 34
34static struct i2c_board_info __initdata pmic_i2c_board_info = { 35static struct i2c_board_info __initdata pmic_i2c_board_info = {
35 .addr = 0x48, 36 .addr = 0x48,
@@ -48,6 +49,16 @@ void __init omap_pmic_init(int bus, u32 clkrate,
48 omap_register_i2c_bus(bus, clkrate, &pmic_i2c_board_info, 1); 49 omap_register_i2c_bus(bus, clkrate, &pmic_i2c_board_info, 1);
49} 50}
50 51
52void __init omap_pmic_late_init(void)
53{
54 /* Init the OMAP TWL parameters (if PMIC has been registerd) */
55 if (!pmic_i2c_board_info.irq)
56 return;
57
58 omap3_twl_init();
59 omap4_twl_init();
60}
61
51#if defined(CONFIG_ARCH_OMAP3) 62#if defined(CONFIG_ARCH_OMAP3)
52static struct twl4030_usb_data omap3_usb_pdata = { 63static struct twl4030_usb_data omap3_usb_pdata = {
53 .usb_mode = T2_USB_MODE_ULPI, 64 .usb_mode = T2_USB_MODE_ULPI,
diff --git a/arch/arm/mach-omap2/twl-common.h b/arch/arm/mach-omap2/twl-common.h
index 5e83a5bd37f..275dde8cb27 100644
--- a/arch/arm/mach-omap2/twl-common.h
+++ b/arch/arm/mach-omap2/twl-common.h
@@ -1,6 +1,8 @@
1#ifndef __OMAP_PMIC_COMMON__ 1#ifndef __OMAP_PMIC_COMMON__
2#define __OMAP_PMIC_COMMON__ 2#define __OMAP_PMIC_COMMON__
3 3
4#include <plat/irqs.h>
5
4#define TWL_COMMON_PDATA_USB (1 << 0) 6#define TWL_COMMON_PDATA_USB (1 << 0)
5#define TWL_COMMON_PDATA_BCI (1 << 1) 7#define TWL_COMMON_PDATA_BCI (1 << 1)
6#define TWL_COMMON_PDATA_MADC (1 << 2) 8#define TWL_COMMON_PDATA_MADC (1 << 2)
@@ -30,6 +32,7 @@ struct twl4030_platform_data;
30 32
31void omap_pmic_init(int bus, u32 clkrate, const char *pmic_type, int pmic_irq, 33void omap_pmic_init(int bus, u32 clkrate, const char *pmic_type, int pmic_irq,
32 struct twl4030_platform_data *pmic_data); 34 struct twl4030_platform_data *pmic_data);
35void omap_pmic_late_init(void);
33 36
34static inline void omap2_pmic_init(const char *pmic_type, 37static inline void omap2_pmic_init(const char *pmic_type,
35 struct twl4030_platform_data *pmic_data) 38 struct twl4030_platform_data *pmic_data)
diff --git a/arch/arm/mach-pxa/balloon3.c b/arch/arm/mach-pxa/balloon3.c
index fc0b8544e17..4b81f59a4cb 100644
--- a/arch/arm/mach-pxa/balloon3.c
+++ b/arch/arm/mach-pxa/balloon3.c
@@ -307,7 +307,7 @@ static inline void balloon3_mmc_init(void) {}
307/****************************************************************************** 307/******************************************************************************
308 * USB Gadget 308 * USB Gadget
309 ******************************************************************************/ 309 ******************************************************************************/
310#if defined(CONFIG_USB_GADGET_PXA27X)||defined(CONFIG_USB_GADGET_PXA27X_MODULE) 310#if defined(CONFIG_USB_PXA27X)||defined(CONFIG_USB_PXA27X_MODULE)
311static void balloon3_udc_command(int cmd) 311static void balloon3_udc_command(int cmd)
312{ 312{
313 if (cmd == PXA2XX_UDC_CMD_CONNECT) 313 if (cmd == PXA2XX_UDC_CMD_CONNECT)
diff --git a/arch/arm/mach-pxa/colibri-pxa320.c b/arch/arm/mach-pxa/colibri-pxa320.c
index 692e1ffc558..d23b92b8048 100644
--- a/arch/arm/mach-pxa/colibri-pxa320.c
+++ b/arch/arm/mach-pxa/colibri-pxa320.c
@@ -146,7 +146,7 @@ static void __init colibri_pxa320_init_eth(void)
146static inline void __init colibri_pxa320_init_eth(void) {} 146static inline void __init colibri_pxa320_init_eth(void) {}
147#endif /* CONFIG_AX88796 */ 147#endif /* CONFIG_AX88796 */
148 148
149#if defined(CONFIG_USB_GADGET_PXA27X)||defined(CONFIG_USB_GADGET_PXA27X_MODULE) 149#if defined(CONFIG_USB_PXA27X)||defined(CONFIG_USB_PXA27X_MODULE)
150static struct gpio_vbus_mach_info colibri_pxa320_gpio_vbus_info = { 150static struct gpio_vbus_mach_info colibri_pxa320_gpio_vbus_info = {
151 .gpio_vbus = mfp_to_gpio(MFP_PIN_GPIO96), 151 .gpio_vbus = mfp_to_gpio(MFP_PIN_GPIO96),
152 .gpio_pullup = -1, 152 .gpio_pullup = -1,
diff --git a/arch/arm/mach-pxa/gumstix.c b/arch/arm/mach-pxa/gumstix.c
index 9c8208ca041..ffdd70dad32 100644
--- a/arch/arm/mach-pxa/gumstix.c
+++ b/arch/arm/mach-pxa/gumstix.c
@@ -106,7 +106,7 @@ static void __init gumstix_mmc_init(void)
106} 106}
107#endif 107#endif
108 108
109#ifdef CONFIG_USB_GADGET_PXA25X 109#ifdef CONFIG_USB_PXA25X
110static struct gpio_vbus_mach_info gumstix_udc_info = { 110static struct gpio_vbus_mach_info gumstix_udc_info = {
111 .gpio_vbus = GPIO_GUMSTIX_USB_GPIOn, 111 .gpio_vbus = GPIO_GUMSTIX_USB_GPIOn,
112 .gpio_pullup = GPIO_GUMSTIX_USB_GPIOx, 112 .gpio_pullup = GPIO_GUMSTIX_USB_GPIOx,
diff --git a/arch/arm/mach-pxa/include/mach/palm27x.h b/arch/arm/mach-pxa/include/mach/palm27x.h
index f80bbe246af..d4eac3d6ffb 100644
--- a/arch/arm/mach-pxa/include/mach/palm27x.h
+++ b/arch/arm/mach-pxa/include/mach/palm27x.h
@@ -37,8 +37,8 @@ extern void __init palm27x_lcd_init(int power,
37#define palm27x_lcd_init(power, mode) do {} while (0) 37#define palm27x_lcd_init(power, mode) do {} while (0)
38#endif 38#endif
39 39
40#if defined(CONFIG_USB_GADGET_PXA27X) || \ 40#if defined(CONFIG_USB_PXA27X) || \
41 defined(CONFIG_USB_GADGET_PXA27X_MODULE) 41 defined(CONFIG_USB_PXA27X_MODULE)
42extern void __init palm27x_udc_init(int vbus, int pullup, 42extern void __init palm27x_udc_init(int vbus, int pullup,
43 int vbus_inverted); 43 int vbus_inverted);
44#else 44#else
diff --git a/arch/arm/mach-pxa/palm27x.c b/arch/arm/mach-pxa/palm27x.c
index 325c245c0a0..fbc10d7b95d 100644
--- a/arch/arm/mach-pxa/palm27x.c
+++ b/arch/arm/mach-pxa/palm27x.c
@@ -164,8 +164,8 @@ void __init palm27x_lcd_init(int power, struct pxafb_mode_info *mode)
164/****************************************************************************** 164/******************************************************************************
165 * USB Gadget 165 * USB Gadget
166 ******************************************************************************/ 166 ******************************************************************************/
167#if defined(CONFIG_USB_GADGET_PXA27X) || \ 167#if defined(CONFIG_USB_PXA27X) || \
168 defined(CONFIG_USB_GADGET_PXA27X_MODULE) 168 defined(CONFIG_USB_PXA27X_MODULE)
169static struct gpio_vbus_mach_info palm27x_udc_info = { 169static struct gpio_vbus_mach_info palm27x_udc_info = {
170 .gpio_vbus_inverted = 1, 170 .gpio_vbus_inverted = 1,
171}; 171};
diff --git a/arch/arm/mach-pxa/palmtc.c b/arch/arm/mach-pxa/palmtc.c
index 6ec7caefb37..2c24c67fd92 100644
--- a/arch/arm/mach-pxa/palmtc.c
+++ b/arch/arm/mach-pxa/palmtc.c
@@ -338,7 +338,7 @@ static inline void palmtc_mkp_init(void) {}
338/****************************************************************************** 338/******************************************************************************
339 * UDC 339 * UDC
340 ******************************************************************************/ 340 ******************************************************************************/
341#if defined(CONFIG_USB_GADGET_PXA25X)||defined(CONFIG_USB_GADGET_PXA25X_MODULE) 341#if defined(CONFIG_USB_PXA25X)||defined(CONFIG_USB_PXA25X_MODULE)
342static struct gpio_vbus_mach_info palmtc_udc_info = { 342static struct gpio_vbus_mach_info palmtc_udc_info = {
343 .gpio_vbus = GPIO_NR_PALMTC_USB_DETECT_N, 343 .gpio_vbus = GPIO_NR_PALMTC_USB_DETECT_N,
344 .gpio_vbus_inverted = 1, 344 .gpio_vbus_inverted = 1,
diff --git a/arch/arm/mach-pxa/vpac270.c b/arch/arm/mach-pxa/vpac270.c
index a7539a6ed1f..ca0c6615028 100644
--- a/arch/arm/mach-pxa/vpac270.c
+++ b/arch/arm/mach-pxa/vpac270.c
@@ -343,7 +343,7 @@ static inline void vpac270_uhc_init(void) {}
343/****************************************************************************** 343/******************************************************************************
344 * USB Gadget 344 * USB Gadget
345 ******************************************************************************/ 345 ******************************************************************************/
346#if defined(CONFIG_USB_GADGET_PXA27X)||defined(CONFIG_USB_GADGET_PXA27X_MODULE) 346#if defined(CONFIG_USB_PXA27X)||defined(CONFIG_USB_PXA27X_MODULE)
347static struct gpio_vbus_mach_info vpac270_gpio_vbus_info = { 347static struct gpio_vbus_mach_info vpac270_gpio_vbus_info = {
348 .gpio_vbus = GPIO41_VPAC270_UDC_DETECT, 348 .gpio_vbus = GPIO41_VPAC270_UDC_DETECT,
349 .gpio_pullup = -1, 349 .gpio_pullup = -1,
diff --git a/arch/arm/mach-s3c64xx/mach-crag6410-module.c b/arch/arm/mach-s3c64xx/mach-crag6410-module.c
index 66668565ee7..f208154b138 100644
--- a/arch/arm/mach-s3c64xx/mach-crag6410-module.c
+++ b/arch/arm/mach-s3c64xx/mach-crag6410-module.c
@@ -8,7 +8,7 @@
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10 10
11#include <linux/module.h> 11#include <linux/export.h>
12#include <linux/interrupt.h> 12#include <linux/interrupt.h>
13#include <linux/i2c.h> 13#include <linux/i2c.h>
14 14
diff --git a/arch/arm/mach-shmobile/pm-sh7372.c b/arch/arm/mach-shmobile/pm-sh7372.c
index 0a5b22942fd..34bbcbfb170 100644
--- a/arch/arm/mach-shmobile/pm-sh7372.c
+++ b/arch/arm/mach-shmobile/pm-sh7372.c
@@ -20,6 +20,7 @@
20#include <linux/delay.h> 20#include <linux/delay.h>
21#include <linux/irq.h> 21#include <linux/irq.h>
22#include <linux/bitrev.h> 22#include <linux/bitrev.h>
23#include <linux/console.h>
23#include <asm/system.h> 24#include <asm/system.h>
24#include <asm/io.h> 25#include <asm/io.h>
25#include <asm/tlbflush.h> 26#include <asm/tlbflush.h>
@@ -106,9 +107,8 @@ static int pd_power_down(struct generic_pm_domain *genpd)
106 return 0; 107 return 0;
107} 108}
108 109
109static int pd_power_up(struct generic_pm_domain *genpd) 110static int __pd_power_up(struct sh7372_pm_domain *sh7372_pd, bool do_resume)
110{ 111{
111 struct sh7372_pm_domain *sh7372_pd = to_sh7372_pd(genpd);
112 unsigned int mask = 1 << sh7372_pd->bit_shift; 112 unsigned int mask = 1 << sh7372_pd->bit_shift;
113 unsigned int retry_count; 113 unsigned int retry_count;
114 int ret = 0; 114 int ret = 0;
@@ -123,13 +123,13 @@ static int pd_power_up(struct generic_pm_domain *genpd)
123 123
124 for (retry_count = 2 * PSTR_RETRIES; retry_count; retry_count--) { 124 for (retry_count = 2 * PSTR_RETRIES; retry_count; retry_count--) {
125 if (!(__raw_readl(SWUCR) & mask)) 125 if (!(__raw_readl(SWUCR) & mask))
126 goto out; 126 break;
127 if (retry_count > PSTR_RETRIES) 127 if (retry_count > PSTR_RETRIES)
128 udelay(PSTR_DELAY_US); 128 udelay(PSTR_DELAY_US);
129 else 129 else
130 cpu_relax(); 130 cpu_relax();
131 } 131 }
132 if (__raw_readl(SWUCR) & mask) 132 if (!retry_count)
133 ret = -EIO; 133 ret = -EIO;
134 134
135 if (!sh7372_pd->no_debug) 135 if (!sh7372_pd->no_debug)
@@ -137,12 +137,17 @@ static int pd_power_up(struct generic_pm_domain *genpd)
137 mask, __raw_readl(PSTR)); 137 mask, __raw_readl(PSTR));
138 138
139 out: 139 out:
140 if (ret == 0 && sh7372_pd->resume) 140 if (ret == 0 && sh7372_pd->resume && do_resume)
141 sh7372_pd->resume(); 141 sh7372_pd->resume();
142 142
143 return ret; 143 return ret;
144} 144}
145 145
146static int pd_power_up(struct generic_pm_domain *genpd)
147{
148 return __pd_power_up(to_sh7372_pd(genpd), true);
149}
150
146static void sh7372_a4r_suspend(void) 151static void sh7372_a4r_suspend(void)
147{ 152{
148 sh7372_intcs_suspend(); 153 sh7372_intcs_suspend();
@@ -174,7 +179,7 @@ void sh7372_init_pm_domain(struct sh7372_pm_domain *sh7372_pd)
174 genpd->active_wakeup = pd_active_wakeup; 179 genpd->active_wakeup = pd_active_wakeup;
175 genpd->power_off = pd_power_down; 180 genpd->power_off = pd_power_down;
176 genpd->power_on = pd_power_up; 181 genpd->power_on = pd_power_up;
177 genpd->power_on(&sh7372_pd->genpd); 182 __pd_power_up(sh7372_pd, false);
178} 183}
179 184
180void sh7372_add_device_to_domain(struct sh7372_pm_domain *sh7372_pd, 185void sh7372_add_device_to_domain(struct sh7372_pm_domain *sh7372_pd,
@@ -227,11 +232,23 @@ struct sh7372_pm_domain sh7372_a3sp = {
227 .no_debug = true, 232 .no_debug = true,
228}; 233};
229 234
235static void sh7372_a3sp_init(void)
236{
237 /* serial consoles make use of SCIF hardware located in A3SP,
238 * keep such power domain on if "no_console_suspend" is set.
239 */
240 sh7372_a3sp.stay_on = !console_suspend_enabled;
241}
242
230struct sh7372_pm_domain sh7372_a3sg = { 243struct sh7372_pm_domain sh7372_a3sg = {
231 .bit_shift = 13, 244 .bit_shift = 13,
232}; 245};
233 246
234#endif /* CONFIG_PM */ 247#else /* !CONFIG_PM */
248
249static inline void sh7372_a3sp_init(void) {}
250
251#endif /* !CONFIG_PM */
235 252
236#if defined(CONFIG_SUSPEND) || defined(CONFIG_CPU_IDLE) 253#if defined(CONFIG_SUSPEND) || defined(CONFIG_CPU_IDLE)
237static int sh7372_do_idle_core_standby(unsigned long unused) 254static int sh7372_do_idle_core_standby(unsigned long unused)
@@ -465,6 +482,8 @@ void __init sh7372_pm_init(void)
465 /* do not convert A3SM, A3SP, A3SG, A4R power down into A4S */ 482 /* do not convert A3SM, A3SP, A3SG, A4R power down into A4S */
466 __raw_writel(0, PDNSEL); 483 __raw_writel(0, PDNSEL);
467 484
485 sh7372_a3sp_init();
486
468 sh7372_suspend_init(); 487 sh7372_suspend_init();
469 sh7372_cpuidle_init(); 488 sh7372_cpuidle_init();
470} 489}
diff --git a/arch/arm/mach-w90x900/dev.c b/arch/arm/mach-w90x900/dev.c
index 7a1fa6adb7c..5b0c38abacc 100644
--- a/arch/arm/mach-w90x900/dev.c
+++ b/arch/arm/mach-w90x900/dev.c
@@ -422,7 +422,7 @@ struct platform_device nuc900_device_kpi = {
422 422
423/* LCD controller*/ 423/* LCD controller*/
424 424
425static struct nuc900fb_display __initdata nuc900_lcd_info[] = { 425static struct nuc900fb_display nuc900_lcd_info[] = {
426 /* Giantplus Technology GPM1040A0 320x240 Color TFT LCD */ 426 /* Giantplus Technology GPM1040A0 320x240 Color TFT LCD */
427 [0] = { 427 [0] = {
428 .type = LCM_DCCS_VA_SRC_RGB565, 428 .type = LCM_DCCS_VA_SRC_RGB565,
@@ -445,7 +445,7 @@ static struct nuc900fb_display __initdata nuc900_lcd_info[] = {
445 }, 445 },
446}; 446};
447 447
448static struct nuc900fb_mach_info nuc900_fb_info __initdata = { 448static struct nuc900fb_mach_info nuc900_fb_info = {
449#if defined(CONFIG_GPM1040A0_320X240) 449#if defined(CONFIG_GPM1040A0_320X240)
450 .displays = &nuc900_lcd_info[0], 450 .displays = &nuc900_lcd_info[0],
451#else 451#else
diff --git a/arch/arm/mach-w90x900/include/mach/mfp.h b/arch/arm/mach-w90x900/include/mach/mfp.h
index 94c0e71617c..23ef1f573ab 100644
--- a/arch/arm/mach-w90x900/include/mach/mfp.h
+++ b/arch/arm/mach-w90x900/include/mach/mfp.h
@@ -19,6 +19,7 @@
19extern void mfp_set_groupf(struct device *dev); 19extern void mfp_set_groupf(struct device *dev);
20extern void mfp_set_groupc(struct device *dev); 20extern void mfp_set_groupc(struct device *dev);
21extern void mfp_set_groupi(struct device *dev); 21extern void mfp_set_groupi(struct device *dev);
22extern void mfp_set_groupg(struct device *dev); 22extern void mfp_set_groupg(struct device *dev, const char *subname);
23extern void mfp_set_groupd(struct device *dev, const char *subname);
23 24
24#endif /* __ASM_ARCH_MFP_H */ 25#endif /* __ASM_ARCH_MFP_H */
diff --git a/arch/arm/mach-w90x900/include/mach/nuc900_spi.h b/arch/arm/mach-w90x900/include/mach/nuc900_spi.h
index bd94819e314..2c4e0c12850 100644
--- a/arch/arm/mach-w90x900/include/mach/nuc900_spi.h
+++ b/arch/arm/mach-w90x900/include/mach/nuc900_spi.h
@@ -14,7 +14,7 @@
14#ifndef __ASM_ARCH_SPI_H 14#ifndef __ASM_ARCH_SPI_H
15#define __ASM_ARCH_SPI_H 15#define __ASM_ARCH_SPI_H
16 16
17extern void mfp_set_groupg(struct device *dev); 17extern void mfp_set_groupg(struct device *dev, const char *subname);
18 18
19struct nuc900_spi_info { 19struct nuc900_spi_info {
20 unsigned int num_cs; 20 unsigned int num_cs;
diff --git a/arch/arm/mach-w90x900/mfp.c b/arch/arm/mach-w90x900/mfp.c
index fb7fb627b1a..9dd74612bb8 100644
--- a/arch/arm/mach-w90x900/mfp.c
+++ b/arch/arm/mach-w90x900/mfp.c
@@ -26,10 +26,8 @@
26#define REG_MFSEL (W90X900_VA_GCR + 0xC) 26#define REG_MFSEL (W90X900_VA_GCR + 0xC)
27 27
28#define GPSELF (0x01 << 1) 28#define GPSELF (0x01 << 1)
29
30#define GPSELC (0x03 << 2) 29#define GPSELC (0x03 << 2)
31#define ENKPI (0x02 << 2) 30#define GPSELD (0x0f << 4)
32#define ENNAND (0x01 << 2)
33 31
34#define GPSELEI0 (0x01 << 26) 32#define GPSELEI0 (0x01 << 26)
35#define GPSELEI1 (0x01 << 27) 33#define GPSELEI1 (0x01 << 27)
@@ -37,11 +35,16 @@
37#define GPIOG0TO1 (0x03 << 14) 35#define GPIOG0TO1 (0x03 << 14)
38#define GPIOG2TO3 (0x03 << 16) 36#define GPIOG2TO3 (0x03 << 16)
39#define GPIOG22TO23 (0x03 << 22) 37#define GPIOG22TO23 (0x03 << 22)
38#define GPIOG18TO20 (0x07 << 18)
40 39
41#define ENSPI (0x0a << 14) 40#define ENSPI (0x0a << 14)
42#define ENI2C0 (0x01 << 14) 41#define ENI2C0 (0x01 << 14)
43#define ENI2C1 (0x01 << 16) 42#define ENI2C1 (0x01 << 16)
44#define ENAC97 (0x02 << 22) 43#define ENAC97 (0x02 << 22)
44#define ENSD1 (0x02 << 18)
45#define ENSD0 (0x0a << 4)
46#define ENKPI (0x02 << 2)
47#define ENNAND (0x01 << 2)
45 48
46static DEFINE_MUTEX(mfp_mutex); 49static DEFINE_MUTEX(mfp_mutex);
47 50
@@ -127,16 +130,19 @@ void mfp_set_groupi(struct device *dev)
127} 130}
128EXPORT_SYMBOL(mfp_set_groupi); 131EXPORT_SYMBOL(mfp_set_groupi);
129 132
130void mfp_set_groupg(struct device *dev) 133void mfp_set_groupg(struct device *dev, const char *subname)
131{ 134{
132 unsigned long mfpen; 135 unsigned long mfpen;
133 const char *dev_id; 136 const char *dev_id;
134 137
135 BUG_ON(!dev); 138 BUG_ON((!dev) && (!subname));
136 139
137 mutex_lock(&mfp_mutex); 140 mutex_lock(&mfp_mutex);
138 141
139 dev_id = dev_name(dev); 142 if (subname != NULL)
143 dev_id = subname;
144 else
145 dev_id = dev_name(dev);
140 146
141 mfpen = __raw_readl(REG_MFSEL); 147 mfpen = __raw_readl(REG_MFSEL);
142 148
@@ -152,6 +158,9 @@ void mfp_set_groupg(struct device *dev)
152 } else if (strcmp(dev_id, "nuc900-audio") == 0) { 158 } else if (strcmp(dev_id, "nuc900-audio") == 0) {
153 mfpen &= ~(GPIOG22TO23); 159 mfpen &= ~(GPIOG22TO23);
154 mfpen |= ENAC97;/*enable AC97*/ 160 mfpen |= ENAC97;/*enable AC97*/
161 } else if (strcmp(dev_id, "nuc900-mmc-port1") == 0) {
162 mfpen &= ~(GPIOG18TO20);
163 mfpen |= (ENSD1 | 0x01);/*enable sd1*/
155 } else { 164 } else {
156 mfpen &= ~(GPIOG0TO1 | GPIOG2TO3);/*GPIOG[3:0]*/ 165 mfpen &= ~(GPIOG0TO1 | GPIOG2TO3);/*GPIOG[3:0]*/
157 } 166 }
@@ -162,3 +171,30 @@ void mfp_set_groupg(struct device *dev)
162} 171}
163EXPORT_SYMBOL(mfp_set_groupg); 172EXPORT_SYMBOL(mfp_set_groupg);
164 173
174void mfp_set_groupd(struct device *dev, const char *subname)
175{
176 unsigned long mfpen;
177 const char *dev_id;
178
179 BUG_ON((!dev) && (!subname));
180
181 mutex_lock(&mfp_mutex);
182
183 if (subname != NULL)
184 dev_id = subname;
185 else
186 dev_id = dev_name(dev);
187
188 mfpen = __raw_readl(REG_MFSEL);
189
190 if (strcmp(dev_id, "nuc900-mmc-port0") == 0) {
191 mfpen &= ~GPSELD;/*enable sd0*/
192 mfpen |= ENSD0;
193 } else
194 mfpen &= (~GPSELD);
195
196 __raw_writel(mfpen, REG_MFSEL);
197
198 mutex_unlock(&mfp_mutex);
199}
200EXPORT_SYMBOL(mfp_set_groupd);
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 8ac9e9f8479..b1e192ba8c2 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -61,7 +61,7 @@ static inline void cache_sync(void)
61{ 61{
62 void __iomem *base = l2x0_base; 62 void __iomem *base = l2x0_base;
63 63
64#ifdef CONFIG_ARM_ERRATA_753970 64#ifdef CONFIG_PL310_ERRATA_753970
65 /* write to an unmmapped register */ 65 /* write to an unmmapped register */
66 writel_relaxed(0, base + L2X0_DUMMY_REG); 66 writel_relaxed(0, base + L2X0_DUMMY_REG);
67#else 67#else
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index e4e7f6cba1a..1aa664a1999 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -168,7 +168,7 @@ static int __init consistent_init(void)
168 pte_t *pte; 168 pte_t *pte;
169 int i = 0; 169 int i = 0;
170 unsigned long base = consistent_base; 170 unsigned long base = consistent_base;
171 unsigned long num_ptes = (CONSISTENT_END - base) >> PGDIR_SHIFT; 171 unsigned long num_ptes = (CONSISTENT_END - base) >> PMD_SHIFT;
172 172
173 consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL); 173 consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL);
174 if (!consistent_pte) { 174 if (!consistent_pte) {
@@ -332,6 +332,15 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
332 struct page *page; 332 struct page *page;
333 void *addr; 333 void *addr;
334 334
335 /*
336 * Following is a work-around (a.k.a. hack) to prevent pages
337 * with __GFP_COMP being passed to split_page() which cannot
338 * handle them. The real problem is that this flag probably
339 * should be 0 on ARM as it is not supported on this
340 * platform; see CONFIG_HUGETLBFS.
341 */
342 gfp &= ~(__GFP_COMP);
343
335 *handle = ~0; 344 *handle = ~0;
336 size = PAGE_ALIGN(size); 345 size = PAGE_ALIGN(size);
337 346
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index 74be05f3e03..44b628e4d6e 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -9,8 +9,7 @@
9#include <linux/io.h> 9#include <linux/io.h>
10#include <linux/personality.h> 10#include <linux/personality.h>
11#include <linux/random.h> 11#include <linux/random.h>
12#include <asm/cputype.h> 12#include <asm/cachetype.h>
13#include <asm/system.h>
14 13
15#define COLOUR_ALIGN(addr,pgoff) \ 14#define COLOUR_ALIGN(addr,pgoff) \
16 ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \ 15 ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
@@ -32,25 +31,15 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
32 struct mm_struct *mm = current->mm; 31 struct mm_struct *mm = current->mm;
33 struct vm_area_struct *vma; 32 struct vm_area_struct *vma;
34 unsigned long start_addr; 33 unsigned long start_addr;
35#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) 34 int do_align = 0;
36 unsigned int cache_type; 35 int aliasing = cache_is_vipt_aliasing();
37 int do_align = 0, aliasing = 0;
38 36
39 /* 37 /*
40 * We only need to do colour alignment if either the I or D 38 * We only need to do colour alignment if either the I or D
41 * caches alias. This is indicated by bits 9 and 21 of the 39 * caches alias.
42 * cache type register.
43 */ 40 */
44 cache_type = read_cpuid_cachetype(); 41 if (aliasing)
45 if (cache_type != read_cpuid_id()) { 42 do_align = filp || (flags & MAP_SHARED);
46 aliasing = (cache_type | cache_type >> 12) & (1 << 11);
47 if (aliasing)
48 do_align = filp || flags & MAP_SHARED;
49 }
50#else
51#define do_align 0
52#define aliasing 0
53#endif
54 43
55 /* 44 /*
56 * We enforce the MAP_FIXED case. 45 * We enforce the MAP_FIXED case.
diff --git a/arch/arm/plat-mxc/include/mach/common.h b/arch/arm/plat-mxc/include/mach/common.h
index 14b4703e6e4..6698cae942f 100644
--- a/arch/arm/plat-mxc/include/mach/common.h
+++ b/arch/arm/plat-mxc/include/mach/common.h
@@ -85,7 +85,6 @@ enum mxc_cpu_pwr_mode {
85}; 85};
86 86
87extern void mx5_cpu_lp_set(enum mxc_cpu_pwr_mode mode); 87extern void mx5_cpu_lp_set(enum mxc_cpu_pwr_mode mode);
88extern void (*imx_idle)(void);
89extern void imx_print_silicon_rev(const char *cpu, int srev); 88extern void imx_print_silicon_rev(const char *cpu, int srev);
90 89
91void avic_handle_irq(struct pt_regs *); 90void avic_handle_irq(struct pt_regs *);
@@ -132,4 +131,5 @@ extern void imx53_qsb_common_init(void);
132extern void imx53_smd_common_init(void); 131extern void imx53_smd_common_init(void);
133extern int imx6q_set_lpm(enum mxc_cpu_pwr_mode mode); 132extern int imx6q_set_lpm(enum mxc_cpu_pwr_mode mode);
134extern void imx6q_pm_init(void); 133extern void imx6q_pm_init(void);
134extern void imx6q_clock_map_io(void);
135#endif 135#endif
diff --git a/arch/arm/plat-mxc/include/mach/mxc.h b/arch/arm/plat-mxc/include/mach/mxc.h
index 00a78193c68..a4d36d601d5 100644
--- a/arch/arm/plat-mxc/include/mach/mxc.h
+++ b/arch/arm/plat-mxc/include/mach/mxc.h
@@ -50,20 +50,6 @@
50#define IMX_CHIP_REVISION_3_3 0x33 50#define IMX_CHIP_REVISION_3_3 0x33
51#define IMX_CHIP_REVISION_UNKNOWN 0xff 51#define IMX_CHIP_REVISION_UNKNOWN 0xff
52 52
53#define IMX_CHIP_REVISION_1_0_STRING "1.0"
54#define IMX_CHIP_REVISION_1_1_STRING "1.1"
55#define IMX_CHIP_REVISION_1_2_STRING "1.2"
56#define IMX_CHIP_REVISION_1_3_STRING "1.3"
57#define IMX_CHIP_REVISION_2_0_STRING "2.0"
58#define IMX_CHIP_REVISION_2_1_STRING "2.1"
59#define IMX_CHIP_REVISION_2_2_STRING "2.2"
60#define IMX_CHIP_REVISION_2_3_STRING "2.3"
61#define IMX_CHIP_REVISION_3_0_STRING "3.0"
62#define IMX_CHIP_REVISION_3_1_STRING "3.1"
63#define IMX_CHIP_REVISION_3_2_STRING "3.2"
64#define IMX_CHIP_REVISION_3_3_STRING "3.3"
65#define IMX_CHIP_REVISION_UNKNOWN_STRING "unknown"
66
67#ifndef __ASSEMBLY__ 53#ifndef __ASSEMBLY__
68extern unsigned int __mxc_cpu_type; 54extern unsigned int __mxc_cpu_type;
69#endif 55#endif
diff --git a/arch/arm/plat-mxc/include/mach/system.h b/arch/arm/plat-mxc/include/mach/system.h
index cf88b3593fb..b9895d25016 100644
--- a/arch/arm/plat-mxc/include/mach/system.h
+++ b/arch/arm/plat-mxc/include/mach/system.h
@@ -17,14 +17,9 @@
17#ifndef __ASM_ARCH_MXC_SYSTEM_H__ 17#ifndef __ASM_ARCH_MXC_SYSTEM_H__
18#define __ASM_ARCH_MXC_SYSTEM_H__ 18#define __ASM_ARCH_MXC_SYSTEM_H__
19 19
20extern void (*imx_idle)(void);
21
22static inline void arch_idle(void) 20static inline void arch_idle(void)
23{ 21{
24 if (imx_idle != NULL) 22 cpu_do_idle();
25 (imx_idle)();
26 else
27 cpu_do_idle();
28} 23}
29 24
30void arch_reset(char mode, const char *cmd); 25void arch_reset(char mode, const char *cmd);
diff --git a/arch/arm/plat-mxc/system.c b/arch/arm/plat-mxc/system.c
index b1cfc6a4971..7e5c76ea446 100644
--- a/arch/arm/plat-mxc/system.c
+++ b/arch/arm/plat-mxc/system.c
@@ -21,6 +21,7 @@
21#include <linux/io.h> 21#include <linux/io.h>
22#include <linux/err.h> 22#include <linux/err.h>
23#include <linux/delay.h> 23#include <linux/delay.h>
24#include <linux/module.h>
24 25
25#include <mach/hardware.h> 26#include <mach/hardware.h>
26#include <mach/common.h> 27#include <mach/common.h>
@@ -28,8 +29,8 @@
28#include <asm/system.h> 29#include <asm/system.h>
29#include <asm/mach-types.h> 30#include <asm/mach-types.h>
30 31
31void (*imx_idle)(void) = NULL;
32void __iomem *(*imx_ioremap)(unsigned long, size_t, unsigned int) = NULL; 32void __iomem *(*imx_ioremap)(unsigned long, size_t, unsigned int) = NULL;
33EXPORT_SYMBOL_GPL(imx_ioremap);
33 34
34static void __iomem *wdog_base; 35static void __iomem *wdog_base;
35 36
diff --git a/arch/arm/plat-omap/include/plat/clock.h b/arch/arm/plat-omap/include/plat/clock.h
index 197ca03c3f7..eb73ab40e95 100644
--- a/arch/arm/plat-omap/include/plat/clock.h
+++ b/arch/arm/plat-omap/include/plat/clock.h
@@ -165,8 +165,8 @@ struct dpll_data {
165 u8 auto_recal_bit; 165 u8 auto_recal_bit;
166 u8 recal_en_bit; 166 u8 recal_en_bit;
167 u8 recal_st_bit; 167 u8 recal_st_bit;
168 u8 flags;
169# endif 168# endif
169 u8 flags;
170}; 170};
171 171
172#endif 172#endif
diff --git a/arch/arm/plat-omap/include/plat/common.h b/arch/arm/plat-omap/include/plat/common.h
index 346098fb921..257f9770b2d 100644
--- a/arch/arm/plat-omap/include/plat/common.h
+++ b/arch/arm/plat-omap/include/plat/common.h
@@ -28,11 +28,14 @@
28#define __ARCH_ARM_MACH_OMAP_COMMON_H 28#define __ARCH_ARM_MACH_OMAP_COMMON_H
29 29
30#include <plat/i2c.h> 30#include <plat/i2c.h>
31#include <plat/omap_hwmod.h>
31 32
32extern int __init omap_init_clocksource_32k(void); 33extern int __init omap_init_clocksource_32k(void);
33extern unsigned long long notrace omap_32k_sched_clock(void); 34extern unsigned long long notrace omap_32k_sched_clock(void);
34 35
35extern void omap_reserve(void); 36extern void omap_reserve(void);
37extern int omap_dss_reset(struct omap_hwmod *);
38
36void omap_sram_init(void); 39void omap_sram_init(void);
37 40
38#endif /* __ARCH_ARM_MACH_OMAP_COMMON_H */ 41#endif /* __ARCH_ARM_MACH_OMAP_COMMON_H */
diff --git a/arch/arm/plat-s3c24xx/cpu-freq-debugfs.c b/arch/arm/plat-s3c24xx/cpu-freq-debugfs.c
index a9276667c2f..c7adad0e8de 100644
--- a/arch/arm/plat-s3c24xx/cpu-freq-debugfs.c
+++ b/arch/arm/plat-s3c24xx/cpu-freq-debugfs.c
@@ -12,7 +12,7 @@
12*/ 12*/
13 13
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/module.h> 15#include <linux/export.h>
16#include <linux/interrupt.h> 16#include <linux/interrupt.h>
17#include <linux/ioport.h> 17#include <linux/ioport.h>
18#include <linux/cpufreq.h> 18#include <linux/cpufreq.h>
diff --git a/arch/arm/plat-s5p/sysmmu.c b/arch/arm/plat-s5p/sysmmu.c
index e1cbc728c77..c8bec9c7655 100644
--- a/arch/arm/plat-s5p/sysmmu.c
+++ b/arch/arm/plat-s5p/sysmmu.c
@@ -11,6 +11,7 @@
11#include <linux/io.h> 11#include <linux/io.h>
12#include <linux/interrupt.h> 12#include <linux/interrupt.h>
13#include <linux/platform_device.h> 13#include <linux/platform_device.h>
14#include <linux/export.h>
14 15
15#include <asm/pgtable.h> 16#include <asm/pgtable.h>
16 17
diff --git a/arch/arm/plat-samsung/include/plat/gpio-cfg.h b/arch/arm/plat-samsung/include/plat/gpio-cfg.h
index d48245bb02b..df8155b9d4d 100644
--- a/arch/arm/plat-samsung/include/plat/gpio-cfg.h
+++ b/arch/arm/plat-samsung/include/plat/gpio-cfg.h
@@ -24,6 +24,8 @@
24#ifndef __PLAT_GPIO_CFG_H 24#ifndef __PLAT_GPIO_CFG_H
25#define __PLAT_GPIO_CFG_H __FILE__ 25#define __PLAT_GPIO_CFG_H __FILE__
26 26
27#include<linux/types.h>
28
27typedef unsigned int __bitwise__ samsung_gpio_pull_t; 29typedef unsigned int __bitwise__ samsung_gpio_pull_t;
28typedef unsigned int __bitwise__ s5p_gpio_drvstr_t; 30typedef unsigned int __bitwise__ s5p_gpio_drvstr_t;
29 31
diff --git a/arch/arm/plat-samsung/pd.c b/arch/arm/plat-samsung/pd.c
index efe1d564473..312b510d86b 100644
--- a/arch/arm/plat-samsung/pd.c
+++ b/arch/arm/plat-samsung/pd.c
@@ -11,7 +11,7 @@
11*/ 11*/
12 12
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/module.h> 14#include <linux/export.h>
15#include <linux/platform_device.h> 15#include <linux/platform_device.h>
16#include <linux/err.h> 16#include <linux/err.h>
17#include <linux/pm_runtime.h> 17#include <linux/pm_runtime.h>
diff --git a/arch/arm/plat-samsung/pwm.c b/arch/arm/plat-samsung/pwm.c
index dc1185dcf80..c559d8438c7 100644
--- a/arch/arm/plat-samsung/pwm.c
+++ b/arch/arm/plat-samsung/pwm.c
@@ -11,7 +11,7 @@
11 * the Free Software Foundation; either version 2 of the License. 11 * the Free Software Foundation; either version 2 of the License.
12*/ 12*/
13 13
14#include <linux/module.h> 14#include <linux/export.h>
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/platform_device.h> 16#include <linux/platform_device.h>
17#include <linux/slab.h> 17#include <linux/slab.h>
diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types
index 5bdeef96984..ccbe16f4722 100644
--- a/arch/arm/tools/mach-types
+++ b/arch/arm/tools/mach-types
@@ -1123,5 +1123,6 @@ blissc MACH_BLISSC BLISSC 3491
1123thales_adc MACH_THALES_ADC THALES_ADC 3492 1123thales_adc MACH_THALES_ADC THALES_ADC 3492
1124ubisys_p9d_evp MACH_UBISYS_P9D_EVP UBISYS_P9D_EVP 3493 1124ubisys_p9d_evp MACH_UBISYS_P9D_EVP UBISYS_P9D_EVP 3493
1125atdgp318 MACH_ATDGP318 ATDGP318 3494 1125atdgp318 MACH_ATDGP318 ATDGP318 3494
1126m28evk MACH_M28EVK M28EVK 3613
1126smdk4212 MACH_SMDK4212 SMDK4212 3638 1127smdk4212 MACH_SMDK4212 SMDK4212 3638
1127smdk4412 MACH_SMDK4412 SMDK4412 3765 1128smdk4412 MACH_SMDK4412 SMDK4412 3765
diff --git a/arch/cris/arch-v10/drivers/Kconfig b/arch/cris/arch-v10/drivers/Kconfig
index 32d90867a98..5f2cdb3e428 100644
--- a/arch/cris/arch-v10/drivers/Kconfig
+++ b/arch/cris/arch-v10/drivers/Kconfig
@@ -3,7 +3,7 @@ if ETRAX_ARCH_V10
3config ETRAX_ETHERNET 3config ETRAX_ETHERNET
4 bool "Ethernet support" 4 bool "Ethernet support"
5 depends on ETRAX_ARCH_V10 5 depends on ETRAX_ARCH_V10
6 select NET_ETHERNET 6 select ETHERNET
7 select NET_CORE 7 select NET_CORE
8 select MII 8 select MII
9 help 9 help
diff --git a/arch/cris/arch-v32/drivers/Kconfig b/arch/cris/arch-v32/drivers/Kconfig
index e47e9c3401b..de43aadcdbc 100644
--- a/arch/cris/arch-v32/drivers/Kconfig
+++ b/arch/cris/arch-v32/drivers/Kconfig
@@ -3,7 +3,7 @@ if ETRAX_ARCH_V32
3config ETRAX_ETHERNET 3config ETRAX_ETHERNET
4 bool "Ethernet support" 4 bool "Ethernet support"
5 depends on ETRAX_ARCH_V32 5 depends on ETRAX_ARCH_V32
6 select NET_ETHERNET 6 select ETHERNET
7 select NET_CORE 7 select NET_CORE
8 select MII 8 select MII
9 help 9 help
diff --git a/arch/microblaze/include/asm/namei.h b/arch/microblaze/include/asm/namei.h
deleted file mode 100644
index 61d60b8a07d..00000000000
--- a/arch/microblaze/include/asm/namei.h
+++ /dev/null
@@ -1,22 +0,0 @@
1/*
2 * Copyright (C) 2006 Atmark Techno, Inc.
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 */
8
9#ifndef _ASM_MICROBLAZE_NAMEI_H
10#define _ASM_MICROBLAZE_NAMEI_H
11
12#ifdef __KERNEL__
13
14/* This dummy routine maybe changed to something useful
15 * for /usr/gnemul/ emulation stuff.
16 * Look at asm-sparc/namei.h for details.
17 */
18#define __emul_prefix() NULL
19
20#endif /* __KERNEL__ */
21
22#endif /* _ASM_MICROBLAZE_NAMEI_H */
diff --git a/arch/mips/cavium-octeon/flash_setup.c b/arch/mips/cavium-octeon/flash_setup.c
index 975c20327bb..0a430e06f5e 100644
--- a/arch/mips/cavium-octeon/flash_setup.c
+++ b/arch/mips/cavium-octeon/flash_setup.c
@@ -17,8 +17,6 @@
17 17
18static struct map_info flash_map; 18static struct map_info flash_map;
19static struct mtd_info *mymtd; 19static struct mtd_info *mymtd;
20static int nr_parts;
21static struct mtd_partition *parts;
22static const char *part_probe_types[] = { 20static const char *part_probe_types[] = {
23 "cmdlinepart", 21 "cmdlinepart",
24#ifdef CONFIG_MTD_REDBOOT_PARTS 22#ifdef CONFIG_MTD_REDBOOT_PARTS
@@ -61,11 +59,8 @@ static int __init flash_init(void)
61 mymtd = do_map_probe("cfi_probe", &flash_map); 59 mymtd = do_map_probe("cfi_probe", &flash_map);
62 if (mymtd) { 60 if (mymtd) {
63 mymtd->owner = THIS_MODULE; 61 mymtd->owner = THIS_MODULE;
64 62 mtd_device_parse_register(mymtd, part_probe_types,
65 nr_parts = parse_mtd_partitions(mymtd, 63 0, NULL, 0);
66 part_probe_types,
67 &parts, 0);
68 mtd_device_register(mymtd, parts, nr_parts);
69 } else { 64 } else {
70 pr_err("Failed to register MTD device for flash\n"); 65 pr_err("Failed to register MTD device for flash\n");
71 } 66 }
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
index 8b606423bbd..efcfff4d462 100644
--- a/arch/mips/cavium-octeon/smp.c
+++ b/arch/mips/cavium-octeon/smp.c
@@ -207,8 +207,9 @@ void octeon_prepare_cpus(unsigned int max_cpus)
207 * the other bits alone. 207 * the other bits alone.
208 */ 208 */
209 cvmx_write_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()), 0xffff); 209 cvmx_write_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()), 0xffff);
210 if (request_irq(OCTEON_IRQ_MBOX0, mailbox_interrupt, IRQF_DISABLED, 210 if (request_irq(OCTEON_IRQ_MBOX0, mailbox_interrupt,
211 "SMP-IPI", mailbox_interrupt)) { 211 IRQF_PERCPU | IRQF_NO_THREAD, "SMP-IPI",
212 mailbox_interrupt)) {
212 panic("Cannot request_irq(OCTEON_IRQ_MBOX0)\n"); 213 panic("Cannot request_irq(OCTEON_IRQ_MBOX0)\n");
213 } 214 }
214} 215}
diff --git a/arch/mips/emma/common/prom.c b/arch/mips/emma/common/prom.c
index 708f0876140..cae42259d6d 100644
--- a/arch/mips/emma/common/prom.c
+++ b/arch/mips/emma/common/prom.c
@@ -50,7 +50,7 @@ void __init prom_init(void)
50 50
51 /* arg[0] is "g", the rest is boot parameters */ 51 /* arg[0] is "g", the rest is boot parameters */
52 for (i = 1; i < argc; i++) { 52 for (i = 1; i < argc; i++) {
53 if (strlen(arcs_cmdline) + strlen(arg[i] + 1) 53 if (strlen(arcs_cmdline) + strlen(arg[i]) + 1
54 >= sizeof(arcs_cmdline)) 54 >= sizeof(arcs_cmdline))
55 break; 55 break;
56 strcat(arcs_cmdline, arg[i]); 56 strcat(arcs_cmdline, arg[i]);
diff --git a/arch/mips/include/asm/mach-bcm47xx/gpio.h b/arch/mips/include/asm/mach-bcm47xx/gpio.h
index 76961cabeed..2ef17e8df40 100644
--- a/arch/mips/include/asm/mach-bcm47xx/gpio.h
+++ b/arch/mips/include/asm/mach-bcm47xx/gpio.h
@@ -36,6 +36,8 @@ static inline int gpio_get_value(unsigned gpio)
36 return -EINVAL; 36 return -EINVAL;
37} 37}
38 38
39#define gpio_get_value_cansleep gpio_get_value
40
39static inline void gpio_set_value(unsigned gpio, int value) 41static inline void gpio_set_value(unsigned gpio, int value)
40{ 42{
41 switch (bcm47xx_bus_type) { 43 switch (bcm47xx_bus_type) {
@@ -54,6 +56,19 @@ static inline void gpio_set_value(unsigned gpio, int value)
54 } 56 }
55} 57}
56 58
59#define gpio_set_value_cansleep gpio_set_value
60
61static inline int gpio_cansleep(unsigned gpio)
62{
63 return 0;
64}
65
66static inline int gpio_is_valid(unsigned gpio)
67{
68 return gpio < (BCM47XX_EXTIF_GPIO_LINES + BCM47XX_CHIPCO_GPIO_LINES);
69}
70
71
57static inline int gpio_direction_input(unsigned gpio) 72static inline int gpio_direction_input(unsigned gpio)
58{ 73{
59 switch (bcm47xx_bus_type) { 74 switch (bcm47xx_bus_type) {
@@ -137,7 +152,4 @@ static inline int gpio_polarity(unsigned gpio, int value)
137} 152}
138 153
139 154
140/* cansleep wrappers */
141#include <asm-generic/gpio.h>
142
143#endif /* __BCM47XX_GPIO_H */ 155#endif /* __BCM47XX_GPIO_H */
diff --git a/arch/mips/include/asm/unistd.h b/arch/mips/include/asm/unistd.h
index ecea7871dec..d8dad5340ea 100644
--- a/arch/mips/include/asm/unistd.h
+++ b/arch/mips/include/asm/unistd.h
@@ -365,16 +365,18 @@
365#define __NR_syncfs (__NR_Linux + 342) 365#define __NR_syncfs (__NR_Linux + 342)
366#define __NR_sendmmsg (__NR_Linux + 343) 366#define __NR_sendmmsg (__NR_Linux + 343)
367#define __NR_setns (__NR_Linux + 344) 367#define __NR_setns (__NR_Linux + 344)
368#define __NR_process_vm_readv (__NR_Linux + 345)
369#define __NR_process_vm_writev (__NR_Linux + 346)
368 370
369/* 371/*
370 * Offset of the last Linux o32 flavoured syscall 372 * Offset of the last Linux o32 flavoured syscall
371 */ 373 */
372#define __NR_Linux_syscalls 344 374#define __NR_Linux_syscalls 346
373 375
374#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ 376#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
375 377
376#define __NR_O32_Linux 4000 378#define __NR_O32_Linux 4000
377#define __NR_O32_Linux_syscalls 344 379#define __NR_O32_Linux_syscalls 346
378 380
379#if _MIPS_SIM == _MIPS_SIM_ABI64 381#if _MIPS_SIM == _MIPS_SIM_ABI64
380 382
@@ -686,16 +688,18 @@
686#define __NR_syncfs (__NR_Linux + 301) 688#define __NR_syncfs (__NR_Linux + 301)
687#define __NR_sendmmsg (__NR_Linux + 302) 689#define __NR_sendmmsg (__NR_Linux + 302)
688#define __NR_setns (__NR_Linux + 303) 690#define __NR_setns (__NR_Linux + 303)
691#define __NR_process_vm_readv (__NR_Linux + 304)
692#define __NR_process_vm_writev (__NR_Linux + 305)
689 693
690/* 694/*
691 * Offset of the last Linux 64-bit flavoured syscall 695 * Offset of the last Linux 64-bit flavoured syscall
692 */ 696 */
693#define __NR_Linux_syscalls 303 697#define __NR_Linux_syscalls 305
694 698
695#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ 699#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
696 700
697#define __NR_64_Linux 5000 701#define __NR_64_Linux 5000
698#define __NR_64_Linux_syscalls 303 702#define __NR_64_Linux_syscalls 305
699 703
700#if _MIPS_SIM == _MIPS_SIM_NABI32 704#if _MIPS_SIM == _MIPS_SIM_NABI32
701 705
@@ -1012,16 +1016,18 @@
1012#define __NR_syncfs (__NR_Linux + 306) 1016#define __NR_syncfs (__NR_Linux + 306)
1013#define __NR_sendmmsg (__NR_Linux + 307) 1017#define __NR_sendmmsg (__NR_Linux + 307)
1014#define __NR_setns (__NR_Linux + 308) 1018#define __NR_setns (__NR_Linux + 308)
1019#define __NR_process_vm_readv (__NR_Linux + 309)
1020#define __NR_process_vm_writev (__NR_Linux + 310)
1015 1021
1016/* 1022/*
1017 * Offset of the last N32 flavoured syscall 1023 * Offset of the last N32 flavoured syscall
1018 */ 1024 */
1019#define __NR_Linux_syscalls 308 1025#define __NR_Linux_syscalls 310
1020 1026
1021#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ 1027#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
1022 1028
1023#define __NR_N32_Linux 6000 1029#define __NR_N32_Linux 6000
1024#define __NR_N32_Linux_syscalls 308 1030#define __NR_N32_Linux_syscalls 310
1025 1031
1026#ifdef __KERNEL__ 1032#ifdef __KERNEL__
1027 1033
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
index 98c5a9737c1..e2d8e199be3 100644
--- a/arch/mips/kernel/cevt-r4k.c
+++ b/arch/mips/kernel/cevt-r4k.c
@@ -103,19 +103,10 @@ static int c0_compare_int_pending(void)
103 103
104/* 104/*
105 * Compare interrupt can be routed and latched outside the core, 105 * Compare interrupt can be routed and latched outside the core,
106 * so a single execution hazard barrier may not be enough to give 106 * so wait up to worst case number of cycle counter ticks for timer interrupt
107 * it time to clear as seen in the Cause register. 4 time the 107 * changes to propagate to the cause register.
108 * pipeline depth seems reasonably conservative, and empirically
109 * works better in configurations with high CPU/bus clock ratios.
110 */ 108 */
111 109#define COMPARE_INT_SEEN_TICKS 50
112#define compare_change_hazard() \
113 do { \
114 irq_disable_hazard(); \
115 irq_disable_hazard(); \
116 irq_disable_hazard(); \
117 irq_disable_hazard(); \
118 } while (0)
119 110
120int c0_compare_int_usable(void) 111int c0_compare_int_usable(void)
121{ 112{
@@ -126,8 +117,12 @@ int c0_compare_int_usable(void)
126 * IP7 already pending? Try to clear it by acking the timer. 117 * IP7 already pending? Try to clear it by acking the timer.
127 */ 118 */
128 if (c0_compare_int_pending()) { 119 if (c0_compare_int_pending()) {
129 write_c0_compare(read_c0_count()); 120 cnt = read_c0_count();
130 compare_change_hazard(); 121 write_c0_compare(cnt);
122 back_to_back_c0_hazard();
123 while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS))
124 if (!c0_compare_int_pending())
125 break;
131 if (c0_compare_int_pending()) 126 if (c0_compare_int_pending())
132 return 0; 127 return 0;
133 } 128 }
@@ -136,7 +131,7 @@ int c0_compare_int_usable(void)
136 cnt = read_c0_count(); 131 cnt = read_c0_count();
137 cnt += delta; 132 cnt += delta;
138 write_c0_compare(cnt); 133 write_c0_compare(cnt);
139 compare_change_hazard(); 134 back_to_back_c0_hazard();
140 if ((int)(read_c0_count() - cnt) < 0) 135 if ((int)(read_c0_count() - cnt) < 0)
141 break; 136 break;
142 /* increase delta if the timer was already expired */ 137 /* increase delta if the timer was already expired */
@@ -145,12 +140,17 @@ int c0_compare_int_usable(void)
145 while ((int)(read_c0_count() - cnt) <= 0) 140 while ((int)(read_c0_count() - cnt) <= 0)
146 ; /* Wait for expiry */ 141 ; /* Wait for expiry */
147 142
148 compare_change_hazard(); 143 while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS))
144 if (c0_compare_int_pending())
145 break;
149 if (!c0_compare_int_pending()) 146 if (!c0_compare_int_pending())
150 return 0; 147 return 0;
151 148 cnt = read_c0_count();
152 write_c0_compare(read_c0_count()); 149 write_c0_compare(cnt);
153 compare_change_hazard(); 150 back_to_back_c0_hazard();
151 while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS))
152 if (!c0_compare_int_pending())
153 break;
154 if (c0_compare_int_pending()) 154 if (c0_compare_int_pending())
155 return 0; 155 return 0;
156 156
diff --git a/arch/mips/kernel/cpufreq/loongson2_clock.c b/arch/mips/kernel/cpufreq/loongson2_clock.c
index cefc6e259ba..5426779d9fd 100644
--- a/arch/mips/kernel/cpufreq/loongson2_clock.c
+++ b/arch/mips/kernel/cpufreq/loongson2_clock.c
@@ -7,6 +7,7 @@
7 * for more details. 7 * for more details.
8 */ 8 */
9 9
10#include <linux/module.h>
10#include <linux/cpufreq.h> 11#include <linux/cpufreq.h>
11#include <linux/platform_device.h> 12#include <linux/platform_device.h>
12 13
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index 47920657968..a632bc144ef 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -591,6 +591,8 @@ einval: li v0, -ENOSYS
591 sys sys_syncfs 1 591 sys sys_syncfs 1
592 sys sys_sendmmsg 4 592 sys sys_sendmmsg 4
593 sys sys_setns 2 593 sys sys_setns 2
594 sys sys_process_vm_readv 6 /* 4345 */
595 sys sys_process_vm_writev 6
594 .endm 596 .endm
595 597
596 /* We pre-compute the number of _instruction_ bytes needed to 598 /* We pre-compute the number of _instruction_ bytes needed to
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index fb7334bea73..3b5a5e9ae49 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -430,4 +430,6 @@ sys_call_table:
430 PTR sys_syncfs 430 PTR sys_syncfs
431 PTR sys_sendmmsg 431 PTR sys_sendmmsg
432 PTR sys_setns 432 PTR sys_setns
433 PTR sys_process_vm_readv
434 PTR sys_process_vm_writev /* 5305 */
433 .size sys_call_table,.-sys_call_table 435 .size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 6de1f598346..6be6f702092 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -430,4 +430,6 @@ EXPORT(sysn32_call_table)
430 PTR sys_syncfs 430 PTR sys_syncfs
431 PTR compat_sys_sendmmsg 431 PTR compat_sys_sendmmsg
432 PTR sys_setns 432 PTR sys_setns
433 PTR compat_sys_process_vm_readv
434 PTR compat_sys_process_vm_writev /* 6310 */
433 .size sysn32_call_table,.-sysn32_call_table 435 .size sysn32_call_table,.-sysn32_call_table
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 1d813169e45..54228553691 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -548,4 +548,6 @@ sys_call_table:
548 PTR sys_syncfs 548 PTR sys_syncfs
549 PTR compat_sys_sendmmsg 549 PTR compat_sys_sendmmsg
550 PTR sys_setns 550 PTR sys_setns
551 PTR compat_sys_process_vm_readv /* 4345 */
552 PTR compat_sys_process_vm_writev
551 .size sys_call_table,.-sys_call_table 553 .size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 261ccbc0774..5c8a49d5505 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -1596,7 +1596,8 @@ void __cpuinit per_cpu_trap_init(void)
1596 } 1596 }
1597#endif /* CONFIG_MIPS_MT_SMTC */ 1597#endif /* CONFIG_MIPS_MT_SMTC */
1598 1598
1599 cpu_data[cpu].asid_cache = ASID_FIRST_VERSION; 1599 if (!cpu_data[cpu].asid_cache)
1600 cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
1600 1601
1601 atomic_inc(&init_mm.mm_count); 1602 atomic_inc(&init_mm.mm_count);
1602 current->active_mm = &init_mm; 1603 current->active_mm = &init_mm;
diff --git a/arch/mips/lantiq/clk.c b/arch/mips/lantiq/clk.c
index 7e9c0ffc11a..77ed70fc2fe 100644
--- a/arch/mips/lantiq/clk.c
+++ b/arch/mips/lantiq/clk.c
@@ -7,7 +7,7 @@
7 * Copyright (C) 2010 John Crispin <blogic@openwrt.org> 7 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
8 */ 8 */
9#include <linux/io.h> 9#include <linux/io.h>
10#include <linux/module.h> 10#include <linux/export.h>
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/types.h> 13#include <linux/types.h>
diff --git a/arch/mips/lantiq/devices.c b/arch/mips/lantiq/devices.c
index 44a36771c81..de1cb2bcd79 100644
--- a/arch/mips/lantiq/devices.c
+++ b/arch/mips/lantiq/devices.c
@@ -7,7 +7,7 @@
7 */ 7 */
8 8
9#include <linux/init.h> 9#include <linux/init.h>
10#include <linux/module.h> 10#include <linux/export.h>
11#include <linux/types.h> 11#include <linux/types.h>
12#include <linux/string.h> 12#include <linux/string.h>
13#include <linux/kernel.h> 13#include <linux/kernel.h>
diff --git a/arch/mips/lantiq/prom.c b/arch/mips/lantiq/prom.c
index 56ba007bf1e..e34fcfd0d5c 100644
--- a/arch/mips/lantiq/prom.c
+++ b/arch/mips/lantiq/prom.c
@@ -6,7 +6,7 @@
6 * Copyright (C) 2010 John Crispin <blogic@openwrt.org> 6 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
7 */ 7 */
8 8
9#include <linux/module.h> 9#include <linux/export.h>
10#include <linux/clk.h> 10#include <linux/clk.h>
11#include <asm/bootinfo.h> 11#include <asm/bootinfo.h>
12#include <asm/time.h> 12#include <asm/time.h>
diff --git a/arch/mips/lantiq/setup.c b/arch/mips/lantiq/setup.c
index 9b8af77ed0f..1ff6c9d6cb9 100644
--- a/arch/mips/lantiq/setup.c
+++ b/arch/mips/lantiq/setup.c
@@ -7,7 +7,7 @@
7 */ 7 */
8 8
9#include <linux/kernel.h> 9#include <linux/kernel.h>
10#include <linux/module.h> 10#include <linux/export.h>
11#include <linux/io.h> 11#include <linux/io.h>
12#include <linux/ioport.h> 12#include <linux/ioport.h>
13#include <asm/bootinfo.h> 13#include <asm/bootinfo.h>
diff --git a/arch/mips/lantiq/xway/clk-ase.c b/arch/mips/lantiq/xway/clk-ase.c
index 22d823acd53..652258309c9 100644
--- a/arch/mips/lantiq/xway/clk-ase.c
+++ b/arch/mips/lantiq/xway/clk-ase.c
@@ -7,7 +7,7 @@
7 */ 7 */
8 8
9#include <linux/io.h> 9#include <linux/io.h>
10#include <linux/module.h> 10#include <linux/export.h>
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/clk.h> 12#include <linux/clk.h>
13 13
diff --git a/arch/mips/lantiq/xway/clk-xway.c b/arch/mips/lantiq/xway/clk-xway.c
index ddd39593c58..696b1a3e064 100644
--- a/arch/mips/lantiq/xway/clk-xway.c
+++ b/arch/mips/lantiq/xway/clk-xway.c
@@ -7,7 +7,7 @@
7 */ 7 */
8 8
9#include <linux/io.h> 9#include <linux/io.h>
10#include <linux/module.h> 10#include <linux/export.h>
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/clk.h> 12#include <linux/clk.h>
13 13
diff --git a/arch/mips/lantiq/xway/devices.c b/arch/mips/lantiq/xway/devices.c
index d0e32ab2ea0..d614aa7ff07 100644
--- a/arch/mips/lantiq/xway/devices.c
+++ b/arch/mips/lantiq/xway/devices.c
@@ -7,7 +7,7 @@
7 */ 7 */
8 8
9#include <linux/init.h> 9#include <linux/init.h>
10#include <linux/module.h> 10#include <linux/export.h>
11#include <linux/types.h> 11#include <linux/types.h>
12#include <linux/string.h> 12#include <linux/string.h>
13#include <linux/mtd/physmap.h> 13#include <linux/mtd/physmap.h>
diff --git a/arch/mips/lantiq/xway/dma.c b/arch/mips/lantiq/xway/dma.c
index 4278a459d6c..cbb6ae5747b 100644
--- a/arch/mips/lantiq/xway/dma.c
+++ b/arch/mips/lantiq/xway/dma.c
@@ -19,6 +19,7 @@
19#include <linux/platform_device.h> 19#include <linux/platform_device.h>
20#include <linux/io.h> 20#include <linux/io.h>
21#include <linux/dma-mapping.h> 21#include <linux/dma-mapping.h>
22#include <linux/export.h>
22 23
23#include <lantiq_soc.h> 24#include <lantiq_soc.h>
24#include <xway_dma.h> 25#include <xway_dma.h>
diff --git a/arch/mips/lantiq/xway/gpio.c b/arch/mips/lantiq/xway/gpio.c
index a321451a545..d2fa98f3c78 100644
--- a/arch/mips/lantiq/xway/gpio.c
+++ b/arch/mips/lantiq/xway/gpio.c
@@ -7,7 +7,7 @@
7 */ 7 */
8 8
9#include <linux/slab.h> 9#include <linux/slab.h>
10#include <linux/module.h> 10#include <linux/export.h>
11#include <linux/platform_device.h> 11#include <linux/platform_device.h>
12#include <linux/gpio.h> 12#include <linux/gpio.h>
13#include <linux/ioport.h> 13#include <linux/ioport.h>
diff --git a/arch/mips/lantiq/xway/gpio_ebu.c b/arch/mips/lantiq/xway/gpio_ebu.c
index a479355abdb..b91c7f17f10 100644
--- a/arch/mips/lantiq/xway/gpio_ebu.c
+++ b/arch/mips/lantiq/xway/gpio_ebu.c
@@ -7,7 +7,7 @@
7 */ 7 */
8 8
9#include <linux/init.h> 9#include <linux/init.h>
10#include <linux/module.h> 10#include <linux/export.h>
11#include <linux/types.h> 11#include <linux/types.h>
12#include <linux/platform_device.h> 12#include <linux/platform_device.h>
13#include <linux/mutex.h> 13#include <linux/mutex.h>
diff --git a/arch/mips/lantiq/xway/gpio_stp.c b/arch/mips/lantiq/xway/gpio_stp.c
index 67d59d69034..ff9991cddea 100644
--- a/arch/mips/lantiq/xway/gpio_stp.c
+++ b/arch/mips/lantiq/xway/gpio_stp.c
@@ -9,7 +9,7 @@
9 9
10#include <linux/slab.h> 10#include <linux/slab.h>
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/module.h> 12#include <linux/export.h>
13#include <linux/types.h> 13#include <linux/types.h>
14#include <linux/platform_device.h> 14#include <linux/platform_device.h>
15#include <linux/mutex.h> 15#include <linux/mutex.h>
diff --git a/arch/mips/lantiq/xway/prom-ase.c b/arch/mips/lantiq/xway/prom-ase.c
index abe49f4db57..ae4959ae865 100644
--- a/arch/mips/lantiq/xway/prom-ase.c
+++ b/arch/mips/lantiq/xway/prom-ase.c
@@ -6,7 +6,7 @@
6 * Copyright (C) 2010 John Crispin <blogic@openwrt.org> 6 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
7 */ 7 */
8 8
9#include <linux/module.h> 9#include <linux/export.h>
10#include <linux/clk.h> 10#include <linux/clk.h>
11#include <asm/bootinfo.h> 11#include <asm/bootinfo.h>
12#include <asm/time.h> 12#include <asm/time.h>
diff --git a/arch/mips/lantiq/xway/prom-xway.c b/arch/mips/lantiq/xway/prom-xway.c
index 1686692ac24..2228133ca35 100644
--- a/arch/mips/lantiq/xway/prom-xway.c
+++ b/arch/mips/lantiq/xway/prom-xway.c
@@ -6,7 +6,7 @@
6 * Copyright (C) 2010 John Crispin <blogic@openwrt.org> 6 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
7 */ 7 */
8 8
9#include <linux/module.h> 9#include <linux/export.h>
10#include <linux/clk.h> 10#include <linux/clk.h>
11#include <asm/bootinfo.h> 11#include <asm/bootinfo.h>
12#include <asm/time.h> 12#include <asm/time.h>
diff --git a/arch/mips/lantiq/xway/reset.c b/arch/mips/lantiq/xway/reset.c
index a1be36d0e49..3d41f0bb5bf 100644
--- a/arch/mips/lantiq/xway/reset.c
+++ b/arch/mips/lantiq/xway/reset.c
@@ -10,7 +10,7 @@
10#include <linux/io.h> 10#include <linux/io.h>
11#include <linux/ioport.h> 11#include <linux/ioport.h>
12#include <linux/pm.h> 12#include <linux/pm.h>
13#include <linux/module.h> 13#include <linux/export.h>
14#include <asm/reboot.h> 14#include <asm/reboot.h>
15 15
16#include <lantiq_soc.h> 16#include <lantiq_soc.h>
diff --git a/arch/mips/nxp/pnx8550/common/pci.c b/arch/mips/nxp/pnx8550/common/pci.c
deleted file mode 100644
index 98e86ddb86c..00000000000
--- a/arch/mips/nxp/pnx8550/common/pci.c
+++ /dev/null
@@ -1,134 +0,0 @@
1/*
2 *
3 * BRIEF MODULE DESCRIPTION
4 *
5 * Author: source@mvista.com
6 *
7 * This program is free software; you can distribute it and/or modify it
8 * under the terms of the GNU General Public License (Version 2) as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * for more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
19 */
20#include <linux/types.h>
21#include <linux/pci.h>
22#include <linux/kernel.h>
23#include <linux/init.h>
24
25#include <pci.h>
26#include <glb.h>
27#include <nand.h>
28
29static struct resource pci_io_resource = {
30 .start = PNX8550_PCIIO + 0x1000, /* reserve regacy I/O space */
31 .end = PNX8550_PCIIO + PNX8550_PCIIO_SIZE,
32 .name = "pci IO space",
33 .flags = IORESOURCE_IO
34};
35
36static struct resource pci_mem_resource = {
37 .start = PNX8550_PCIMEM,
38 .end = PNX8550_PCIMEM + PNX8550_PCIMEM_SIZE - 1,
39 .name = "pci memory space",
40 .flags = IORESOURCE_MEM
41};
42
43extern struct pci_ops pnx8550_pci_ops;
44
45static struct pci_controller pnx8550_controller = {
46 .pci_ops = &pnx8550_pci_ops,
47 .io_map_base = PNX8550_PORT_BASE,
48 .io_resource = &pci_io_resource,
49 .mem_resource = &pci_mem_resource,
50};
51
52/* Return the total size of DRAM-memory, (RANK0 + RANK1) */
53static inline unsigned long get_system_mem_size(void)
54{
55 /* Read IP2031_RANK0_ADDR_LO */
56 unsigned long dram_r0_lo = inl(PCI_BASE | 0x65010);
57 /* Read IP2031_RANK1_ADDR_HI */
58 unsigned long dram_r1_hi = inl(PCI_BASE | 0x65018);
59
60 return dram_r1_hi - dram_r0_lo + 1;
61}
62
63static int __init pnx8550_pci_setup(void)
64{
65 int pci_mem_code;
66 int mem_size = get_system_mem_size() >> 20;
67
68 /* Clear the Global 2 Register, PCI Inta Output Enable Registers
69 Bit 1:Enable DAC Powerdown
70 -> 0:DACs are enabled and are working normally
71 1:DACs are powerdown
72 Bit 0:Enable of PCI inta output
73 -> 0 = Disable PCI inta output
74 1 = Enable PCI inta output
75 */
76 PNX8550_GLB2_ENAB_INTA_O = 0;
77
78 /* Calc the PCI mem size code */
79 if (mem_size >= 128)
80 pci_mem_code = SIZE_128M;
81 else if (mem_size >= 64)
82 pci_mem_code = SIZE_64M;
83 else if (mem_size >= 32)
84 pci_mem_code = SIZE_32M;
85 else
86 pci_mem_code = SIZE_16M;
87
88 /* Set PCI_XIO registers */
89 outl(pci_mem_resource.start, PCI_BASE | PCI_BASE1_LO);
90 outl(pci_mem_resource.end + 1, PCI_BASE | PCI_BASE1_HI);
91 outl(pci_io_resource.start, PCI_BASE | PCI_BASE2_LO);
92 outl(pci_io_resource.end, PCI_BASE | PCI_BASE2_HI);
93
94 /* Send memory transaction via PCI_BASE2 */
95 outl(0x00000001, PCI_BASE | PCI_IO);
96
97 /* Unlock the setup register */
98 outl(0xca, PCI_BASE | PCI_UNLOCKREG);
99
100 /*
101 * BAR0 of PNX8550 (pci base 10) must be zero in order for ide
102 * to work, and in order for bus_to_baddr to work without any
103 * hacks.
104 */
105 outl(0x00000000, PCI_BASE | PCI_BASE10);
106
107 /*
108 *These two bars are set by default or the boot code.
109 * However, it's safer to set them here so we're not boot
110 * code dependent.
111 */
112 outl(0x1be00000, PCI_BASE | PCI_BASE14); /* PNX MMIO */
113 outl(PNX8550_NAND_BASE_ADDR, PCI_BASE | PCI_BASE18); /* XIO */
114
115 outl(PCI_EN_TA |
116 PCI_EN_PCI2MMI |
117 PCI_EN_XIO |
118 PCI_SETUP_BASE18_SIZE(SIZE_32M) |
119 PCI_SETUP_BASE18_EN |
120 PCI_SETUP_BASE14_EN |
121 PCI_SETUP_BASE10_PREF |
122 PCI_SETUP_BASE10_SIZE(pci_mem_code) |
123 PCI_SETUP_CFGMANAGE_EN |
124 PCI_SETUP_PCIARB_EN,
125 PCI_BASE |
126 PCI_SETUP); /* PCI_SETUP */
127 outl(0x00000000, PCI_BASE | PCI_CTRL); /* PCI_CONTROL */
128
129 register_pci_controller(&pnx8550_controller);
130
131 return 0;
132}
133
134arch_initcall(pnx8550_pci_setup);
diff --git a/arch/mips/nxp/pnx8550/common/setup.c b/arch/mips/nxp/pnx8550/common/setup.c
deleted file mode 100644
index 71adac32332..00000000000
--- a/arch/mips/nxp/pnx8550/common/setup.c
+++ /dev/null
@@ -1,143 +0,0 @@
1/*
2 *
3 * 2.6 port, Embedded Alley Solutions, Inc
4 *
5 * Based on Per Hallsmark, per.hallsmark@mvista.com
6 *
7 * This program is free software; you can distribute it and/or modify it
8 * under the terms of the GNU General Public License (Version 2) as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * for more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
19 */
20#include <linux/init.h>
21#include <linux/sched.h>
22#include <linux/ioport.h>
23#include <linux/irq.h>
24#include <linux/mm.h>
25#include <linux/delay.h>
26#include <linux/interrupt.h>
27#include <linux/serial_pnx8xxx.h>
28#include <linux/pm.h>
29
30#include <asm/cpu.h>
31#include <asm/bootinfo.h>
32#include <asm/irq.h>
33#include <asm/mipsregs.h>
34#include <asm/reboot.h>
35#include <asm/pgtable.h>
36#include <asm/time.h>
37
38#include <glb.h>
39#include <int.h>
40#include <pci.h>
41#include <uart.h>
42#include <nand.h>
43
44extern void __init board_setup(void);
45extern void pnx8550_machine_restart(char *);
46extern void pnx8550_machine_halt(void);
47extern void pnx8550_machine_power_off(void);
48extern struct resource ioport_resource;
49extern struct resource iomem_resource;
50extern char *prom_getcmdline(void);
51
52struct resource standard_io_resources[] = {
53 {
54 .start = 0x00,
55 .end = 0x1f,
56 .name = "dma1",
57 .flags = IORESOURCE_BUSY
58 }, {
59 .start = 0x40,
60 .end = 0x5f,
61 .name = "timer",
62 .flags = IORESOURCE_BUSY
63 }, {
64 .start = 0x80,
65 .end = 0x8f,
66 .name = "dma page reg",
67 .flags = IORESOURCE_BUSY
68 }, {
69 .start = 0xc0,
70 .end = 0xdf,
71 .name = "dma2",
72 .flags = IORESOURCE_BUSY
73 },
74};
75
76#define STANDARD_IO_RESOURCES ARRAY_SIZE(standard_io_resources)
77
78extern struct resource pci_io_resource;
79extern struct resource pci_mem_resource;
80
81/* Return the total size of DRAM-memory, (RANK0 + RANK1) */
82unsigned long get_system_mem_size(void)
83{
84 /* Read IP2031_RANK0_ADDR_LO */
85 unsigned long dram_r0_lo = inl(PCI_BASE | 0x65010);
86 /* Read IP2031_RANK1_ADDR_HI */
87 unsigned long dram_r1_hi = inl(PCI_BASE | 0x65018);
88
89 return dram_r1_hi - dram_r0_lo + 1;
90}
91
92int pnx8550_console_port = -1;
93
94void __init plat_mem_setup(void)
95{
96 int i;
97 char* argptr;
98
99 board_setup(); /* board specific setup */
100
101 _machine_restart = pnx8550_machine_restart;
102 _machine_halt = pnx8550_machine_halt;
103 pm_power_off = pnx8550_machine_power_off;
104
105 /* Clear the Global 2 Register, PCI Inta Output Enable Registers
106 Bit 1:Enable DAC Powerdown
107 -> 0:DACs are enabled and are working normally
108 1:DACs are powerdown
109 Bit 0:Enable of PCI inta output
110 -> 0 = Disable PCI inta output
111 1 = Enable PCI inta output
112 */
113 PNX8550_GLB2_ENAB_INTA_O = 0;
114
115 /* IO/MEM resources. */
116 set_io_port_base(PNX8550_PORT_BASE);
117 ioport_resource.start = 0;
118 ioport_resource.end = ~0;
119 iomem_resource.start = 0;
120 iomem_resource.end = ~0;
121
122 /* Request I/O space for devices on this board */
123 for (i = 0; i < STANDARD_IO_RESOURCES; i++)
124 request_resource(&ioport_resource, standard_io_resources + i);
125
126 /* Place the Mode Control bit for GPIO pin 16 in primary function */
127 /* Pin 16 is used by UART1, UA1_TX */
128 outl((PNX8550_GPIO_MODE_PRIMOP << PNX8550_GPIO_MC_16_BIT) |
129 (PNX8550_GPIO_MODE_PRIMOP << PNX8550_GPIO_MC_17_BIT),
130 PNX8550_GPIO_MC1);
131
132 argptr = prom_getcmdline();
133 if ((argptr = strstr(argptr, "console=ttyS")) != NULL) {
134 argptr += strlen("console=ttyS");
135 pnx8550_console_port = *argptr == '0' ? 0 : 1;
136
137 /* We must initialize the UART (console) before early printk */
138 /* Set LCR to 8-bit and BAUD to 38400 (no 5) */
139 ip3106_lcr(UART_BASE, pnx8550_console_port) =
140 PNX8XXX_UART_LCR_8BIT;
141 ip3106_baud(UART_BASE, pnx8550_console_port) = 5;
142 }
143}
diff --git a/arch/mips/pci/pci-alchemy.c b/arch/mips/pci/pci-alchemy.c
index 4ee57104e47..b5ce041cdaf 100644
--- a/arch/mips/pci/pci-alchemy.c
+++ b/arch/mips/pci/pci-alchemy.c
@@ -7,6 +7,7 @@
7 * Support for all devices (greater than 16) added by David Gathright. 7 * Support for all devices (greater than 16) added by David Gathright.
8 */ 8 */
9 9
10#include <linux/export.h>
10#include <linux/types.h> 11#include <linux/types.h>
11#include <linux/pci.h> 12#include <linux/pci.h>
12#include <linux/platform_device.h> 13#include <linux/platform_device.h>
diff --git a/arch/mips/pci/pci-lantiq.c b/arch/mips/pci/pci-lantiq.c
index 8656388b34b..be1e1afe12c 100644
--- a/arch/mips/pci/pci-lantiq.c
+++ b/arch/mips/pci/pci-lantiq.c
@@ -13,6 +13,7 @@
13#include <linux/delay.h> 13#include <linux/delay.h>
14#include <linux/mm.h> 14#include <linux/mm.h>
15#include <linux/vmalloc.h> 15#include <linux/vmalloc.h>
16#include <linux/export.h>
16#include <linux/platform_device.h> 17#include <linux/platform_device.h>
17 18
18#include <asm/pci.h> 19#include <asm/pci.h>
diff --git a/arch/mips/pmc-sierra/yosemite/prom.c b/arch/mips/pmc-sierra/yosemite/prom.c
index cf4c868715a..dcc926e06fc 100644
--- a/arch/mips/pmc-sierra/yosemite/prom.c
+++ b/arch/mips/pmc-sierra/yosemite/prom.c
@@ -102,7 +102,7 @@ void __init prom_init(void)
102 102
103 /* Get the boot parameters */ 103 /* Get the boot parameters */
104 for (i = 1; i < argc; i++) { 104 for (i = 1; i < argc; i++) {
105 if (strlen(arcs_cmdline) + strlen(arg[i] + 1) >= 105 if (strlen(arcs_cmdline) + strlen(arg[i]) + 1 >=
106 sizeof(arcs_cmdline)) 106 sizeof(arcs_cmdline))
107 break; 107 break;
108 108
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index b177caa56d9..951e18f5335 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -345,7 +345,7 @@ config ARCH_ENABLE_MEMORY_HOTREMOVE
345 345
346config KEXEC 346config KEXEC
347 bool "kexec system call (EXPERIMENTAL)" 347 bool "kexec system call (EXPERIMENTAL)"
348 depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP && !47x)) && EXPERIMENTAL 348 depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP && !PPC_47x)) && EXPERIMENTAL
349 help 349 help
350 kexec is a system call that implements the ability to shutdown your 350 kexec is a system call that implements the ability to shutdown your
351 current kernel, and to start another kernel. It is like a reboot 351 current kernel, and to start another kernel. It is like a reboot
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 57af16edc19..70ba0c0a122 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -255,12 +255,6 @@ checkbin:
255 echo 'disable kernel modules' ; \ 255 echo 'disable kernel modules' ; \
256 false ; \ 256 false ; \
257 fi 257 fi
258 @if ! /bin/echo dssall | $(AS) -many -o $(TOUT) >/dev/null 2>&1 ; then \
259 echo -n '*** ${VERSION}.${PATCHLEVEL} kernels no longer build ' ; \
260 echo 'correctly with old versions of binutils.' ; \
261 echo '*** Please upgrade your binutils to 2.12.1 or newer' ; \
262 false ; \
263 fi
264 258
265CLEAN_FILES += $(TOUT) 259CLEAN_FILES += $(TOUT)
266 260
diff --git a/arch/powerpc/boot/dts/p1023rds.dts b/arch/powerpc/boot/dts/p1023rds.dts
index d9b776740a6..d3b478242ea 100644
--- a/arch/powerpc/boot/dts/p1023rds.dts
+++ b/arch/powerpc/boot/dts/p1023rds.dts
@@ -449,6 +449,7 @@
449 interrupt-parent = <&mpic>; 449 interrupt-parent = <&mpic>;
450 interrupts = <16 2>; 450 interrupts = <16 2>;
451 interrupt-map-mask = <0xf800 0 0 7>; 451 interrupt-map-mask = <0xf800 0 0 7>;
452 /* IRQ[0:3] are pulled up on board, set to active-low */
452 interrupt-map = < 453 interrupt-map = <
453 /* IDSEL 0x0 */ 454 /* IDSEL 0x0 */
454 0000 0 0 1 &mpic 0 1 455 0000 0 0 1 &mpic 0 1
@@ -488,11 +489,15 @@
488 interrupt-parent = <&mpic>; 489 interrupt-parent = <&mpic>;
489 interrupts = <16 2>; 490 interrupts = <16 2>;
490 interrupt-map-mask = <0xf800 0 0 7>; 491 interrupt-map-mask = <0xf800 0 0 7>;
492 /*
493 * IRQ[4:6] only for PCIe, set to active-high,
494 * IRQ[7] is pulled up on board, set to active-low
495 */
491 interrupt-map = < 496 interrupt-map = <
492 /* IDSEL 0x0 */ 497 /* IDSEL 0x0 */
493 0000 0 0 1 &mpic 4 1 498 0000 0 0 1 &mpic 4 2
494 0000 0 0 2 &mpic 5 1 499 0000 0 0 2 &mpic 5 2
495 0000 0 0 3 &mpic 6 1 500 0000 0 0 3 &mpic 6 2
496 0000 0 0 4 &mpic 7 1 501 0000 0 0 4 &mpic 7 1
497 >; 502 >;
498 ranges = <0x2000000 0x0 0xa0000000 503 ranges = <0x2000000 0x0 0xa0000000
@@ -527,12 +532,16 @@
527 interrupt-parent = <&mpic>; 532 interrupt-parent = <&mpic>;
528 interrupts = <16 2>; 533 interrupts = <16 2>;
529 interrupt-map-mask = <0xf800 0 0 7>; 534 interrupt-map-mask = <0xf800 0 0 7>;
535 /*
536 * IRQ[8:10] are pulled up on board, set to active-low
537 * IRQ[11] only for PCIe, set to active-high,
538 */
530 interrupt-map = < 539 interrupt-map = <
531 /* IDSEL 0x0 */ 540 /* IDSEL 0x0 */
532 0000 0 0 1 &mpic 8 1 541 0000 0 0 1 &mpic 8 1
533 0000 0 0 2 &mpic 9 1 542 0000 0 0 2 &mpic 9 1
534 0000 0 0 3 &mpic 10 1 543 0000 0 0 3 &mpic 10 1
535 0000 0 0 4 &mpic 11 1 544 0000 0 0 4 &mpic 11 2
536 >; 545 >;
537 ranges = <0x2000000 0x0 0x80000000 546 ranges = <0x2000000 0x0 0x80000000
538 0x2000000 0x0 0x80000000 547 0x2000000 0x0 0x80000000
diff --git a/arch/powerpc/configs/ppc44x_defconfig b/arch/powerpc/configs/ppc44x_defconfig
index 6cdf1c0d2c8..3b98d735434 100644
--- a/arch/powerpc/configs/ppc44x_defconfig
+++ b/arch/powerpc/configs/ppc44x_defconfig
@@ -52,6 +52,8 @@ CONFIG_MTD_CFI=y
52CONFIG_MTD_JEDECPROBE=y 52CONFIG_MTD_JEDECPROBE=y
53CONFIG_MTD_CFI_AMDSTD=y 53CONFIG_MTD_CFI_AMDSTD=y
54CONFIG_MTD_PHYSMAP_OF=y 54CONFIG_MTD_PHYSMAP_OF=y
55CONFIG_MTD_NAND=m
56CONFIG_MTD_NAND_NDFC=m
55CONFIG_MTD_UBI=m 57CONFIG_MTD_UBI=m
56CONFIG_MTD_UBI_GLUEBI=m 58CONFIG_MTD_UBI_GLUEBI=m
57CONFIG_PROC_DEVICETREE=y 59CONFIG_PROC_DEVICETREE=y
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index e2a4c26ad37..02e41b53488 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -49,13 +49,13 @@ static __inline__ int atomic_add_return(int a, atomic_t *v)
49 int t; 49 int t;
50 50
51 __asm__ __volatile__( 51 __asm__ __volatile__(
52 PPC_RELEASE_BARRIER 52 PPC_ATOMIC_ENTRY_BARRIER
53"1: lwarx %0,0,%2 # atomic_add_return\n\ 53"1: lwarx %0,0,%2 # atomic_add_return\n\
54 add %0,%1,%0\n" 54 add %0,%1,%0\n"
55 PPC405_ERR77(0,%2) 55 PPC405_ERR77(0,%2)
56" stwcx. %0,0,%2 \n\ 56" stwcx. %0,0,%2 \n\
57 bne- 1b" 57 bne- 1b"
58 PPC_ACQUIRE_BARRIER 58 PPC_ATOMIC_EXIT_BARRIER
59 : "=&r" (t) 59 : "=&r" (t)
60 : "r" (a), "r" (&v->counter) 60 : "r" (a), "r" (&v->counter)
61 : "cc", "memory"); 61 : "cc", "memory");
@@ -85,13 +85,13 @@ static __inline__ int atomic_sub_return(int a, atomic_t *v)
85 int t; 85 int t;
86 86
87 __asm__ __volatile__( 87 __asm__ __volatile__(
88 PPC_RELEASE_BARRIER 88 PPC_ATOMIC_ENTRY_BARRIER
89"1: lwarx %0,0,%2 # atomic_sub_return\n\ 89"1: lwarx %0,0,%2 # atomic_sub_return\n\
90 subf %0,%1,%0\n" 90 subf %0,%1,%0\n"
91 PPC405_ERR77(0,%2) 91 PPC405_ERR77(0,%2)
92" stwcx. %0,0,%2 \n\ 92" stwcx. %0,0,%2 \n\
93 bne- 1b" 93 bne- 1b"
94 PPC_ACQUIRE_BARRIER 94 PPC_ATOMIC_EXIT_BARRIER
95 : "=&r" (t) 95 : "=&r" (t)
96 : "r" (a), "r" (&v->counter) 96 : "r" (a), "r" (&v->counter)
97 : "cc", "memory"); 97 : "cc", "memory");
@@ -119,13 +119,13 @@ static __inline__ int atomic_inc_return(atomic_t *v)
119 int t; 119 int t;
120 120
121 __asm__ __volatile__( 121 __asm__ __volatile__(
122 PPC_RELEASE_BARRIER 122 PPC_ATOMIC_ENTRY_BARRIER
123"1: lwarx %0,0,%1 # atomic_inc_return\n\ 123"1: lwarx %0,0,%1 # atomic_inc_return\n\
124 addic %0,%0,1\n" 124 addic %0,%0,1\n"
125 PPC405_ERR77(0,%1) 125 PPC405_ERR77(0,%1)
126" stwcx. %0,0,%1 \n\ 126" stwcx. %0,0,%1 \n\
127 bne- 1b" 127 bne- 1b"
128 PPC_ACQUIRE_BARRIER 128 PPC_ATOMIC_EXIT_BARRIER
129 : "=&r" (t) 129 : "=&r" (t)
130 : "r" (&v->counter) 130 : "r" (&v->counter)
131 : "cc", "xer", "memory"); 131 : "cc", "xer", "memory");
@@ -163,13 +163,13 @@ static __inline__ int atomic_dec_return(atomic_t *v)
163 int t; 163 int t;
164 164
165 __asm__ __volatile__( 165 __asm__ __volatile__(
166 PPC_RELEASE_BARRIER 166 PPC_ATOMIC_ENTRY_BARRIER
167"1: lwarx %0,0,%1 # atomic_dec_return\n\ 167"1: lwarx %0,0,%1 # atomic_dec_return\n\
168 addic %0,%0,-1\n" 168 addic %0,%0,-1\n"
169 PPC405_ERR77(0,%1) 169 PPC405_ERR77(0,%1)
170" stwcx. %0,0,%1\n\ 170" stwcx. %0,0,%1\n\
171 bne- 1b" 171 bne- 1b"
172 PPC_ACQUIRE_BARRIER 172 PPC_ATOMIC_EXIT_BARRIER
173 : "=&r" (t) 173 : "=&r" (t)
174 : "r" (&v->counter) 174 : "r" (&v->counter)
175 : "cc", "xer", "memory"); 175 : "cc", "xer", "memory");
@@ -194,7 +194,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
194 int t; 194 int t;
195 195
196 __asm__ __volatile__ ( 196 __asm__ __volatile__ (
197 PPC_RELEASE_BARRIER 197 PPC_ATOMIC_ENTRY_BARRIER
198"1: lwarx %0,0,%1 # __atomic_add_unless\n\ 198"1: lwarx %0,0,%1 # __atomic_add_unless\n\
199 cmpw 0,%0,%3 \n\ 199 cmpw 0,%0,%3 \n\
200 beq- 2f \n\ 200 beq- 2f \n\
@@ -202,7 +202,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
202 PPC405_ERR77(0,%2) 202 PPC405_ERR77(0,%2)
203" stwcx. %0,0,%1 \n\ 203" stwcx. %0,0,%1 \n\
204 bne- 1b \n" 204 bne- 1b \n"
205 PPC_ACQUIRE_BARRIER 205 PPC_ATOMIC_EXIT_BARRIER
206" subf %0,%2,%0 \n\ 206" subf %0,%2,%0 \n\
2072:" 2072:"
208 : "=&r" (t) 208 : "=&r" (t)
@@ -226,7 +226,7 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
226 int t; 226 int t;
227 227
228 __asm__ __volatile__( 228 __asm__ __volatile__(
229 PPC_RELEASE_BARRIER 229 PPC_ATOMIC_ENTRY_BARRIER
230"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\ 230"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
231 cmpwi %0,1\n\ 231 cmpwi %0,1\n\
232 addi %0,%0,-1\n\ 232 addi %0,%0,-1\n\
@@ -234,7 +234,7 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
234 PPC405_ERR77(0,%1) 234 PPC405_ERR77(0,%1)
235" stwcx. %0,0,%1\n\ 235" stwcx. %0,0,%1\n\
236 bne- 1b" 236 bne- 1b"
237 PPC_ACQUIRE_BARRIER 237 PPC_ATOMIC_EXIT_BARRIER
238 "\n\ 238 "\n\
2392:" : "=&b" (t) 2392:" : "=&b" (t)
240 : "r" (&v->counter) 240 : "r" (&v->counter)
@@ -285,12 +285,12 @@ static __inline__ long atomic64_add_return(long a, atomic64_t *v)
285 long t; 285 long t;
286 286
287 __asm__ __volatile__( 287 __asm__ __volatile__(
288 PPC_RELEASE_BARRIER 288 PPC_ATOMIC_ENTRY_BARRIER
289"1: ldarx %0,0,%2 # atomic64_add_return\n\ 289"1: ldarx %0,0,%2 # atomic64_add_return\n\
290 add %0,%1,%0\n\ 290 add %0,%1,%0\n\
291 stdcx. %0,0,%2 \n\ 291 stdcx. %0,0,%2 \n\
292 bne- 1b" 292 bne- 1b"
293 PPC_ACQUIRE_BARRIER 293 PPC_ATOMIC_EXIT_BARRIER
294 : "=&r" (t) 294 : "=&r" (t)
295 : "r" (a), "r" (&v->counter) 295 : "r" (a), "r" (&v->counter)
296 : "cc", "memory"); 296 : "cc", "memory");
@@ -319,12 +319,12 @@ static __inline__ long atomic64_sub_return(long a, atomic64_t *v)
319 long t; 319 long t;
320 320
321 __asm__ __volatile__( 321 __asm__ __volatile__(
322 PPC_RELEASE_BARRIER 322 PPC_ATOMIC_ENTRY_BARRIER
323"1: ldarx %0,0,%2 # atomic64_sub_return\n\ 323"1: ldarx %0,0,%2 # atomic64_sub_return\n\
324 subf %0,%1,%0\n\ 324 subf %0,%1,%0\n\
325 stdcx. %0,0,%2 \n\ 325 stdcx. %0,0,%2 \n\
326 bne- 1b" 326 bne- 1b"
327 PPC_ACQUIRE_BARRIER 327 PPC_ATOMIC_EXIT_BARRIER
328 : "=&r" (t) 328 : "=&r" (t)
329 : "r" (a), "r" (&v->counter) 329 : "r" (a), "r" (&v->counter)
330 : "cc", "memory"); 330 : "cc", "memory");
@@ -351,12 +351,12 @@ static __inline__ long atomic64_inc_return(atomic64_t *v)
351 long t; 351 long t;
352 352
353 __asm__ __volatile__( 353 __asm__ __volatile__(
354 PPC_RELEASE_BARRIER 354 PPC_ATOMIC_ENTRY_BARRIER
355"1: ldarx %0,0,%1 # atomic64_inc_return\n\ 355"1: ldarx %0,0,%1 # atomic64_inc_return\n\
356 addic %0,%0,1\n\ 356 addic %0,%0,1\n\
357 stdcx. %0,0,%1 \n\ 357 stdcx. %0,0,%1 \n\
358 bne- 1b" 358 bne- 1b"
359 PPC_ACQUIRE_BARRIER 359 PPC_ATOMIC_EXIT_BARRIER
360 : "=&r" (t) 360 : "=&r" (t)
361 : "r" (&v->counter) 361 : "r" (&v->counter)
362 : "cc", "xer", "memory"); 362 : "cc", "xer", "memory");
@@ -393,12 +393,12 @@ static __inline__ long atomic64_dec_return(atomic64_t *v)
393 long t; 393 long t;
394 394
395 __asm__ __volatile__( 395 __asm__ __volatile__(
396 PPC_RELEASE_BARRIER 396 PPC_ATOMIC_ENTRY_BARRIER
397"1: ldarx %0,0,%1 # atomic64_dec_return\n\ 397"1: ldarx %0,0,%1 # atomic64_dec_return\n\
398 addic %0,%0,-1\n\ 398 addic %0,%0,-1\n\
399 stdcx. %0,0,%1\n\ 399 stdcx. %0,0,%1\n\
400 bne- 1b" 400 bne- 1b"
401 PPC_ACQUIRE_BARRIER 401 PPC_ATOMIC_EXIT_BARRIER
402 : "=&r" (t) 402 : "=&r" (t)
403 : "r" (&v->counter) 403 : "r" (&v->counter)
404 : "cc", "xer", "memory"); 404 : "cc", "xer", "memory");
@@ -418,13 +418,13 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
418 long t; 418 long t;
419 419
420 __asm__ __volatile__( 420 __asm__ __volatile__(
421 PPC_RELEASE_BARRIER 421 PPC_ATOMIC_ENTRY_BARRIER
422"1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\ 422"1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\
423 addic. %0,%0,-1\n\ 423 addic. %0,%0,-1\n\
424 blt- 2f\n\ 424 blt- 2f\n\
425 stdcx. %0,0,%1\n\ 425 stdcx. %0,0,%1\n\
426 bne- 1b" 426 bne- 1b"
427 PPC_ACQUIRE_BARRIER 427 PPC_ATOMIC_EXIT_BARRIER
428 "\n\ 428 "\n\
4292:" : "=&r" (t) 4292:" : "=&r" (t)
430 : "r" (&v->counter) 430 : "r" (&v->counter)
@@ -450,14 +450,14 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
450 long t; 450 long t;
451 451
452 __asm__ __volatile__ ( 452 __asm__ __volatile__ (
453 PPC_RELEASE_BARRIER 453 PPC_ATOMIC_ENTRY_BARRIER
454"1: ldarx %0,0,%1 # __atomic_add_unless\n\ 454"1: ldarx %0,0,%1 # __atomic_add_unless\n\
455 cmpd 0,%0,%3 \n\ 455 cmpd 0,%0,%3 \n\
456 beq- 2f \n\ 456 beq- 2f \n\
457 add %0,%2,%0 \n" 457 add %0,%2,%0 \n"
458" stdcx. %0,0,%1 \n\ 458" stdcx. %0,0,%1 \n\
459 bne- 1b \n" 459 bne- 1b \n"
460 PPC_ACQUIRE_BARRIER 460 PPC_ATOMIC_EXIT_BARRIER
461" subf %0,%2,%0 \n\ 461" subf %0,%2,%0 \n\
4622:" 4622:"
463 : "=&r" (t) 463 : "=&r" (t)
diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h
index e137afcc10f..efdc92618b3 100644
--- a/arch/powerpc/include/asm/bitops.h
+++ b/arch/powerpc/include/asm/bitops.h
@@ -124,14 +124,14 @@ static __inline__ unsigned long fn( \
124 return (old & mask); \ 124 return (old & mask); \
125} 125}
126 126
127DEFINE_TESTOP(test_and_set_bits, or, PPC_RELEASE_BARRIER, 127DEFINE_TESTOP(test_and_set_bits, or, PPC_ATOMIC_ENTRY_BARRIER,
128 PPC_ACQUIRE_BARRIER, 0) 128 PPC_ATOMIC_EXIT_BARRIER, 0)
129DEFINE_TESTOP(test_and_set_bits_lock, or, "", 129DEFINE_TESTOP(test_and_set_bits_lock, or, "",
130 PPC_ACQUIRE_BARRIER, 1) 130 PPC_ACQUIRE_BARRIER, 1)
131DEFINE_TESTOP(test_and_clear_bits, andc, PPC_RELEASE_BARRIER, 131DEFINE_TESTOP(test_and_clear_bits, andc, PPC_ATOMIC_ENTRY_BARRIER,
132 PPC_ACQUIRE_BARRIER, 0) 132 PPC_ATOMIC_EXIT_BARRIER, 0)
133DEFINE_TESTOP(test_and_change_bits, xor, PPC_RELEASE_BARRIER, 133DEFINE_TESTOP(test_and_change_bits, xor, PPC_ATOMIC_ENTRY_BARRIER,
134 PPC_ACQUIRE_BARRIER, 0) 134 PPC_ATOMIC_EXIT_BARRIER, 0)
135 135
136static __inline__ int test_and_set_bit(unsigned long nr, 136static __inline__ int test_and_set_bit(unsigned long nr,
137 volatile unsigned long *addr) 137 volatile unsigned long *addr)
diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h
index c94e4a3fe2e..2a9cf845473 100644
--- a/arch/powerpc/include/asm/futex.h
+++ b/arch/powerpc/include/asm/futex.h
@@ -11,12 +11,13 @@
11 11
12#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ 12#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
13 __asm__ __volatile ( \ 13 __asm__ __volatile ( \
14 PPC_RELEASE_BARRIER \ 14 PPC_ATOMIC_ENTRY_BARRIER \
15"1: lwarx %0,0,%2\n" \ 15"1: lwarx %0,0,%2\n" \
16 insn \ 16 insn \
17 PPC405_ERR77(0, %2) \ 17 PPC405_ERR77(0, %2) \
18"2: stwcx. %1,0,%2\n" \ 18"2: stwcx. %1,0,%2\n" \
19 "bne- 1b\n" \ 19 "bne- 1b\n" \
20 PPC_ATOMIC_EXIT_BARRIER \
20 "li %1,0\n" \ 21 "li %1,0\n" \
21"3: .section .fixup,\"ax\"\n" \ 22"3: .section .fixup,\"ax\"\n" \
22"4: li %1,%3\n" \ 23"4: li %1,%3\n" \
@@ -92,14 +93,14 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
92 return -EFAULT; 93 return -EFAULT;
93 94
94 __asm__ __volatile__ ( 95 __asm__ __volatile__ (
95 PPC_RELEASE_BARRIER 96 PPC_ATOMIC_ENTRY_BARRIER
96"1: lwarx %1,0,%3 # futex_atomic_cmpxchg_inatomic\n\ 97"1: lwarx %1,0,%3 # futex_atomic_cmpxchg_inatomic\n\
97 cmpw 0,%1,%4\n\ 98 cmpw 0,%1,%4\n\
98 bne- 3f\n" 99 bne- 3f\n"
99 PPC405_ERR77(0,%3) 100 PPC405_ERR77(0,%3)
100"2: stwcx. %5,0,%3\n\ 101"2: stwcx. %5,0,%3\n\
101 bne- 1b\n" 102 bne- 1b\n"
102 PPC_ACQUIRE_BARRIER 103 PPC_ATOMIC_EXIT_BARRIER
103"3: .section .fixup,\"ax\"\n\ 104"3: .section .fixup,\"ax\"\n\
1044: li %0,%6\n\ 1054: li %0,%6\n\
105 b 3b\n\ 106 b 3b\n\
diff --git a/arch/powerpc/include/asm/kvm.h b/arch/powerpc/include/asm/kvm.h
index 08fe69edcd1..0ad432bc81d 100644
--- a/arch/powerpc/include/asm/kvm.h
+++ b/arch/powerpc/include/asm/kvm.h
@@ -149,12 +149,6 @@ struct kvm_regs {
149#define KVM_SREGS_E_UPDATE_DBSR (1 << 3) 149#define KVM_SREGS_E_UPDATE_DBSR (1 << 3)
150 150
151/* 151/*
152 * Book3S special bits to indicate contents in the struct by maintaining
153 * backwards compatibility with older structs. If adding a new field,
154 * please make sure to add a flag for that new field */
155#define KVM_SREGS_S_HIOR (1 << 0)
156
157/*
158 * In KVM_SET_SREGS, reserved/pad fields must be left untouched from a 152 * In KVM_SET_SREGS, reserved/pad fields must be left untouched from a
159 * previous KVM_GET_REGS. 153 * previous KVM_GET_REGS.
160 * 154 *
@@ -179,8 +173,6 @@ struct kvm_sregs {
179 __u64 ibat[8]; 173 __u64 ibat[8];
180 __u64 dbat[8]; 174 __u64 dbat[8];
181 } ppc32; 175 } ppc32;
182 __u64 flags; /* KVM_SREGS_S_ */
183 __u64 hior;
184 } s; 176 } s;
185 struct { 177 struct {
186 union { 178 union {
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index a384ffdf33d..d4df013ad77 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -90,8 +90,6 @@ struct kvmppc_vcpu_book3s {
90#endif 90#endif
91 int context_id[SID_CONTEXTS]; 91 int context_id[SID_CONTEXTS];
92 92
93 bool hior_sregs; /* HIOR is set by SREGS, not PVR */
94
95 struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE]; 93 struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE];
96 struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG]; 94 struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG];
97 struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE]; 95 struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE];
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index 28cdbd9f399..03c48e819c8 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -31,7 +31,7 @@
31 31
32#define MSR_ MSR_ME | MSR_CE 32#define MSR_ MSR_ME | MSR_CE
33#define MSR_KERNEL MSR_ | MSR_64BIT 33#define MSR_KERNEL MSR_ | MSR_64BIT
34#define MSR_USER32 MSR_ | MSR_PR | MSR_EE | MSR_DE 34#define MSR_USER32 MSR_ | MSR_PR | MSR_EE
35#define MSR_USER64 MSR_USER32 | MSR_64BIT 35#define MSR_USER64 MSR_USER32 | MSR_64BIT
36#elif defined (CONFIG_40x) 36#elif defined (CONFIG_40x)
37#define MSR_KERNEL (MSR_ME|MSR_RI|MSR_IR|MSR_DR|MSR_CE) 37#define MSR_KERNEL (MSR_ME|MSR_RI|MSR_IR|MSR_DR|MSR_CE)
diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h
index 6fbce725c71..a0f358d4a00 100644
--- a/arch/powerpc/include/asm/sections.h
+++ b/arch/powerpc/include/asm/sections.h
@@ -8,7 +8,7 @@
8 8
9#ifdef __powerpc64__ 9#ifdef __powerpc64__
10 10
11extern char _end[]; 11extern char __end_interrupts[];
12 12
13static inline int in_kernel_text(unsigned long addr) 13static inline int in_kernel_text(unsigned long addr)
14{ 14{
diff --git a/arch/powerpc/include/asm/synch.h b/arch/powerpc/include/asm/synch.h
index d7cab44643c..e682a7143ed 100644
--- a/arch/powerpc/include/asm/synch.h
+++ b/arch/powerpc/include/asm/synch.h
@@ -13,6 +13,7 @@
13extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup; 13extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup;
14extern void do_lwsync_fixups(unsigned long value, void *fixup_start, 14extern void do_lwsync_fixups(unsigned long value, void *fixup_start,
15 void *fixup_end); 15 void *fixup_end);
16extern void do_final_fixups(void);
16 17
17static inline void eieio(void) 18static inline void eieio(void)
18{ 19{
@@ -41,11 +42,15 @@ static inline void isync(void)
41 START_LWSYNC_SECTION(97); \ 42 START_LWSYNC_SECTION(97); \
42 isync; \ 43 isync; \
43 MAKE_LWSYNC_SECTION_ENTRY(97, __lwsync_fixup); 44 MAKE_LWSYNC_SECTION_ENTRY(97, __lwsync_fixup);
44#define PPC_ACQUIRE_BARRIER "\n" stringify_in_c(__PPC_ACQUIRE_BARRIER) 45#define PPC_ACQUIRE_BARRIER "\n" stringify_in_c(__PPC_ACQUIRE_BARRIER)
45#define PPC_RELEASE_BARRIER stringify_in_c(LWSYNC) "\n" 46#define PPC_RELEASE_BARRIER stringify_in_c(LWSYNC) "\n"
47#define PPC_ATOMIC_ENTRY_BARRIER "\n" stringify_in_c(LWSYNC) "\n"
48#define PPC_ATOMIC_EXIT_BARRIER "\n" stringify_in_c(sync) "\n"
46#else 49#else
47#define PPC_ACQUIRE_BARRIER 50#define PPC_ACQUIRE_BARRIER
48#define PPC_RELEASE_BARRIER 51#define PPC_RELEASE_BARRIER
52#define PPC_ATOMIC_ENTRY_BARRIER
53#define PPC_ATOMIC_EXIT_BARRIER
49#endif 54#endif
50 55
51#endif /* __KERNEL__ */ 56#endif /* __KERNEL__ */
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 56212bc0ab0..4f80cf1ce77 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -215,7 +215,22 @@ reenable_mmu: /* re-enable mmu so we can */
215 stw r9,8(r1) 215 stw r9,8(r1)
216 stw r11,12(r1) 216 stw r11,12(r1)
217 stw r3,ORIG_GPR3(r1) 217 stw r3,ORIG_GPR3(r1)
218 /*
219 * The trace_hardirqs_off will use CALLER_ADDR0 and CALLER_ADDR1.
220 * If from user mode there is only one stack frame on the stack, and
221 * accessing CALLER_ADDR1 will cause oops. So we need create a dummy
222 * stack frame to make trace_hardirqs_off happy.
223 */
224 andi. r12,r12,MSR_PR
225 beq 11f
226 stwu r1,-16(r1)
227 bl trace_hardirqs_off
228 addi r1,r1,16
229 b 12f
230
23111:
218 bl trace_hardirqs_off 232 bl trace_hardirqs_off
23312:
219 lwz r0,GPR0(r1) 234 lwz r0,GPR0(r1)
220 lwz r3,ORIG_GPR3(r1) 235 lwz r3,ORIG_GPR3(r1)
221 lwz r4,GPR4(r1) 236 lwz r4,GPR4(r1)
diff --git a/arch/powerpc/kernel/jump_label.c b/arch/powerpc/kernel/jump_label.c
index 368d158d665..a1ed8a8c7cb 100644
--- a/arch/powerpc/kernel/jump_label.c
+++ b/arch/powerpc/kernel/jump_label.c
@@ -11,6 +11,7 @@
11#include <linux/jump_label.h> 11#include <linux/jump_label.h>
12#include <asm/code-patching.h> 12#include <asm/code-patching.h>
13 13
14#ifdef HAVE_JUMP_LABEL
14void arch_jump_label_transform(struct jump_entry *entry, 15void arch_jump_label_transform(struct jump_entry *entry,
15 enum jump_label_type type) 16 enum jump_label_type type)
16{ 17{
@@ -21,3 +22,4 @@ void arch_jump_label_transform(struct jump_entry *entry,
21 else 22 else
22 patch_instruction(addr, PPC_INST_NOP); 23 patch_instruction(addr, PPC_INST_NOP);
23} 24}
25#endif
diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c
index 35f27646c4f..2985338d0e1 100644
--- a/arch/powerpc/kernel/kvm.c
+++ b/arch/powerpc/kernel/kvm.c
@@ -132,7 +132,6 @@ static void kvm_patch_ins_b(u32 *inst, int addr)
132 /* On relocatable kernels interrupts handlers and our code 132 /* On relocatable kernels interrupts handlers and our code
133 can be in different regions, so we don't patch them */ 133 can be in different regions, so we don't patch them */
134 134
135 extern u32 __end_interrupts;
136 if ((ulong)inst < (ulong)&__end_interrupts) 135 if ((ulong)inst < (ulong)&__end_interrupts)
137 return; 136 return;
138#endif 137#endif
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index f7d760ab5ca..7cd07b42ca1 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -738,7 +738,7 @@ relocate_new_kernel:
738 mr r5, r31 738 mr r5, r31
739 739
740 li r0, 0 740 li r0, 0
741#elif defined(CONFIG_44x) && !defined(CONFIG_47x) 741#elif defined(CONFIG_44x) && !defined(CONFIG_PPC_47x)
742 742
743/* 743/*
744 * Code for setting up 1:1 mapping for PPC440x for KEXEC 744 * Code for setting up 1:1 mapping for PPC440x for KEXEC
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 9054ca9ab4f..6457574c0b2 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -486,28 +486,6 @@ struct task_struct *__switch_to(struct task_struct *prev,
486 new_thread = &new->thread; 486 new_thread = &new->thread;
487 old_thread = &current->thread; 487 old_thread = &current->thread;
488 488
489#if defined(CONFIG_PPC_BOOK3E_64)
490 /* XXX Current Book3E code doesn't deal with kernel side DBCR0,
491 * we always hold the user values, so we set it now.
492 *
493 * However, we ensure the kernel MSR:DE is appropriately cleared too
494 * to avoid spurrious single step exceptions in the kernel.
495 *
496 * This will have to change to merge with the ppc32 code at some point,
497 * but I don't like much what ppc32 is doing today so there's some
498 * thinking needed there
499 */
500 if ((new_thread->dbcr0 | old_thread->dbcr0) & DBCR0_IDM) {
501 u32 dbcr0;
502
503 mtmsr(mfmsr() & ~MSR_DE);
504 isync();
505 dbcr0 = mfspr(SPRN_DBCR0);
506 dbcr0 = (dbcr0 & DBCR0_EDM) | new_thread->dbcr0;
507 mtspr(SPRN_DBCR0, dbcr0);
508 }
509#endif /* CONFIG_PPC64_BOOK3E */
510
511#ifdef CONFIG_PPC64 489#ifdef CONFIG_PPC64
512 /* 490 /*
513 * Collect processor utilization data per process 491 * Collect processor utilization data per process
@@ -657,7 +635,7 @@ void show_regs(struct pt_regs * regs)
657 if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR)) 635 if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
658 printk("CFAR: "REG"\n", regs->orig_gpr3); 636 printk("CFAR: "REG"\n", regs->orig_gpr3);
659 if (trap == 0x300 || trap == 0x600) 637 if (trap == 0x300 || trap == 0x600)
660#ifdef CONFIG_PPC_ADV_DEBUG_REGS 638#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
661 printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr); 639 printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr);
662#else 640#else
663 printk("DAR: "REG", DSISR: %08lx\n", regs->dar, regs->dsisr); 641 printk("DAR: "REG", DSISR: %08lx\n", regs->dar, regs->dsisr);
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index b4fa6612749..cc584865b3d 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -1579,10 +1579,8 @@ static void __init prom_instantiate_rtas(void)
1579 return; 1579 return;
1580 1580
1581 base = alloc_down(size, PAGE_SIZE, 0); 1581 base = alloc_down(size, PAGE_SIZE, 0);
1582 if (base == 0) { 1582 if (base == 0)
1583 prom_printf("RTAS allocation failed !\n"); 1583 prom_panic("Could not allocate memory for RTAS\n");
1584 return;
1585 }
1586 1584
1587 rtas_inst = call_prom("open", 1, 1, ADDR("/rtas")); 1585 rtas_inst = call_prom("open", 1, 1, ADDR("/rtas"));
1588 if (!IHANDLE_VALID(rtas_inst)) { 1586 if (!IHANDLE_VALID(rtas_inst)) {
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index c1ce86357ec..ac761081511 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -107,6 +107,8 @@ notrace unsigned long __init early_init(unsigned long dt_ptr)
107 PTRRELOC(&__start___lwsync_fixup), 107 PTRRELOC(&__start___lwsync_fixup),
108 PTRRELOC(&__stop___lwsync_fixup)); 108 PTRRELOC(&__stop___lwsync_fixup));
109 109
110 do_final_fixups();
111
110 return KERNELBASE + offset; 112 return KERNELBASE + offset;
111} 113}
112 114
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 1a9dea80a69..fb9bb46e7e8 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -359,6 +359,7 @@ void __init setup_system(void)
359 &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup); 359 &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup);
360 do_lwsync_fixups(cur_cpu_spec->cpu_features, 360 do_lwsync_fixups(cur_cpu_spec->cpu_features,
361 &__start___lwsync_fixup, &__stop___lwsync_fixup); 361 &__start___lwsync_fixup, &__stop___lwsync_fixup);
362 do_final_fixups();
362 363
363 /* 364 /*
364 * Unflatten the device-tree passed by prom_init or kexec 365 * Unflatten the device-tree passed by prom_init or kexec
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 78b76dc54df..836a5a19eb2 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -97,7 +97,7 @@ static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set)
97 compat_sigset_t cset; 97 compat_sigset_t cset;
98 98
99 switch (_NSIG_WORDS) { 99 switch (_NSIG_WORDS) {
100 case 4: cset.sig[5] = set->sig[3] & 0xffffffffull; 100 case 4: cset.sig[6] = set->sig[3] & 0xffffffffull;
101 cset.sig[7] = set->sig[3] >> 32; 101 cset.sig[7] = set->sig[3] >> 32;
102 case 3: cset.sig[4] = set->sig[2] & 0xffffffffull; 102 case 3: cset.sig[4] = set->sig[2] & 0xffffffffull;
103 cset.sig[5] = set->sig[2] >> 32; 103 cset.sig[5] = set->sig[2] >> 32;
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 4e5908264d1..5459d148a0f 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -1298,14 +1298,12 @@ void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status)
1298 1298
1299 if (user_mode(regs)) { 1299 if (user_mode(regs)) {
1300 current->thread.dbcr0 &= ~DBCR0_IC; 1300 current->thread.dbcr0 &= ~DBCR0_IC;
1301#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1302 if (DBCR_ACTIVE_EVENTS(current->thread.dbcr0, 1301 if (DBCR_ACTIVE_EVENTS(current->thread.dbcr0,
1303 current->thread.dbcr1)) 1302 current->thread.dbcr1))
1304 regs->msr |= MSR_DE; 1303 regs->msr |= MSR_DE;
1305 else 1304 else
1306 /* Make sure the IDM bit is off */ 1305 /* Make sure the IDM bit is off */
1307 current->thread.dbcr0 &= ~DBCR0_IDM; 1306 current->thread.dbcr0 &= ~DBCR0_IDM;
1308#endif
1309 } 1307 }
1310 1308
1311 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); 1309 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 0cdbc07cec1..0cb137a9b03 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -44,6 +44,7 @@
44#include <asm/processor.h> 44#include <asm/processor.h>
45#include <asm/cputhreads.h> 45#include <asm/cputhreads.h>
46#include <asm/page.h> 46#include <asm/page.h>
47#include <asm/hvcall.h>
47#include <linux/gfp.h> 48#include <linux/gfp.h>
48#include <linux/sched.h> 49#include <linux/sched.h>
49#include <linux/vmalloc.h> 50#include <linux/vmalloc.h>
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index bc4d50dec78..3c791e1eb67 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -151,16 +151,14 @@ void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
151#ifdef CONFIG_PPC_BOOK3S_64 151#ifdef CONFIG_PPC_BOOK3S_64
152 if ((pvr >= 0x330000) && (pvr < 0x70330000)) { 152 if ((pvr >= 0x330000) && (pvr < 0x70330000)) {
153 kvmppc_mmu_book3s_64_init(vcpu); 153 kvmppc_mmu_book3s_64_init(vcpu);
154 if (!to_book3s(vcpu)->hior_sregs) 154 to_book3s(vcpu)->hior = 0xfff00000;
155 to_book3s(vcpu)->hior = 0xfff00000;
156 to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL; 155 to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL;
157 vcpu->arch.cpu_type = KVM_CPU_3S_64; 156 vcpu->arch.cpu_type = KVM_CPU_3S_64;
158 } else 157 } else
159#endif 158#endif
160 { 159 {
161 kvmppc_mmu_book3s_32_init(vcpu); 160 kvmppc_mmu_book3s_32_init(vcpu);
162 if (!to_book3s(vcpu)->hior_sregs) 161 to_book3s(vcpu)->hior = 0;
163 to_book3s(vcpu)->hior = 0;
164 to_book3s(vcpu)->msr_mask = 0xffffffffULL; 162 to_book3s(vcpu)->msr_mask = 0xffffffffULL;
165 vcpu->arch.cpu_type = KVM_CPU_3S_32; 163 vcpu->arch.cpu_type = KVM_CPU_3S_32;
166 } 164 }
@@ -797,9 +795,6 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
797 } 795 }
798 } 796 }
799 797
800 if (sregs->u.s.flags & KVM_SREGS_S_HIOR)
801 sregs->u.s.hior = to_book3s(vcpu)->hior;
802
803 return 0; 798 return 0;
804} 799}
805 800
@@ -836,11 +831,6 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
836 /* Flush the MMU after messing with the segments */ 831 /* Flush the MMU after messing with the segments */
837 kvmppc_mmu_pte_flush(vcpu, 0, 0); 832 kvmppc_mmu_pte_flush(vcpu, 0, 0);
838 833
839 if (sregs->u.s.flags & KVM_SREGS_S_HIOR) {
840 to_book3s(vcpu)->hior_sregs = true;
841 to_book3s(vcpu)->hior = sregs->u.s.hior;
842 }
843
844 return 0; 834 return 0;
845} 835}
846 836
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index efbf9ad8720..607fbdf24b8 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -208,7 +208,6 @@ int kvm_dev_ioctl_check_extension(long ext)
208 case KVM_CAP_PPC_BOOKE_SREGS: 208 case KVM_CAP_PPC_BOOKE_SREGS:
209#else 209#else
210 case KVM_CAP_PPC_SEGSTATE: 210 case KVM_CAP_PPC_SEGSTATE:
211 case KVM_CAP_PPC_HIOR:
212 case KVM_CAP_PPC_PAPR: 211 case KVM_CAP_PPC_PAPR:
213#endif 212#endif
214 case KVM_CAP_PPC_UNSET_IRQ: 213 case KVM_CAP_PPC_UNSET_IRQ:
diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
index 0d08d017139..7a8a7487cee 100644
--- a/arch/powerpc/lib/feature-fixups.c
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -18,6 +18,8 @@
18#include <linux/init.h> 18#include <linux/init.h>
19#include <asm/cputable.h> 19#include <asm/cputable.h>
20#include <asm/code-patching.h> 20#include <asm/code-patching.h>
21#include <asm/page.h>
22#include <asm/sections.h>
21 23
22 24
23struct fixup_entry { 25struct fixup_entry {
@@ -128,6 +130,27 @@ void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
128 } 130 }
129} 131}
130 132
133void do_final_fixups(void)
134{
135#if defined(CONFIG_PPC64) && defined(CONFIG_RELOCATABLE)
136 int *src, *dest;
137 unsigned long length;
138
139 if (PHYSICAL_START == 0)
140 return;
141
142 src = (int *)(KERNELBASE + PHYSICAL_START);
143 dest = (int *)KERNELBASE;
144 length = (__end_interrupts - _stext) / sizeof(int);
145
146 while (length--) {
147 patch_instruction(dest, *src);
148 src++;
149 dest++;
150 }
151#endif
152}
153
131#ifdef CONFIG_FTR_FIXUP_SELFTEST 154#ifdef CONFIG_FTR_FIXUP_SELFTEST
132 155
133#define check(x) \ 156#define check(x) \
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 5964371303a..8558b572e55 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -15,6 +15,7 @@
15#include <linux/of_fdt.h> 15#include <linux/of_fdt.h>
16#include <linux/memblock.h> 16#include <linux/memblock.h>
17#include <linux/bootmem.h> 17#include <linux/bootmem.h>
18#include <linux/moduleparam.h>
18#include <asm/pgtable.h> 19#include <asm/pgtable.h>
19#include <asm/pgalloc.h> 20#include <asm/pgalloc.h>
20#include <asm/tlb.h> 21#include <asm/tlb.h>
diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig
index 45023e26aea..d7946be298b 100644
--- a/arch/powerpc/platforms/85xx/Kconfig
+++ b/arch/powerpc/platforms/85xx/Kconfig
@@ -203,7 +203,7 @@ config P3060_QDS
203 select PPC_E500MC 203 select PPC_E500MC
204 select PHYS_64BIT 204 select PHYS_64BIT
205 select SWIOTLB 205 select SWIOTLB
206 select MPC8xxx_GPIO 206 select GPIO_MPC8XXX
207 select HAS_RAPIDIO 207 select HAS_RAPIDIO
208 select PPC_EPAPR_HV_PIC 208 select PPC_EPAPR_HV_PIC
209 help 209 help
diff --git a/arch/powerpc/platforms/85xx/p3060_qds.c b/arch/powerpc/platforms/85xx/p3060_qds.c
index 01dcf44871e..081cf4ac188 100644
--- a/arch/powerpc/platforms/85xx/p3060_qds.c
+++ b/arch/powerpc/platforms/85xx/p3060_qds.c
@@ -70,7 +70,7 @@ define_machine(p3060_qds) {
70 .power_save = e500_idle, 70 .power_save = e500_idle,
71}; 71};
72 72
73machine_device_initcall(p3060_qds, declare_of_platform_devices); 73machine_device_initcall(p3060_qds, corenet_ds_publish_devices);
74 74
75#ifdef CONFIG_SWIOTLB 75#ifdef CONFIG_SWIOTLB
76machine_arch_initcall(p3060_qds, swiotlb_setup_bus_notifier); 76machine_arch_initcall(p3060_qds, swiotlb_setup_bus_notifier);
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index e4588721ef3..3fe6d927ad7 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -347,7 +347,7 @@ config SIMPLE_GPIO
347 347
348config MCU_MPC8349EMITX 348config MCU_MPC8349EMITX
349 bool "MPC8349E-mITX MCU driver" 349 bool "MPC8349E-mITX MCU driver"
350 depends on I2C && PPC_83xx 350 depends on I2C=y && PPC_83xx
351 select GENERIC_GPIO 351 select GENERIC_GPIO
352 select ARCH_REQUIRE_GPIOLIB 352 select ARCH_REQUIRE_GPIOLIB
353 help 353 help
diff --git a/arch/powerpc/platforms/ps3/interrupt.c b/arch/powerpc/platforms/ps3/interrupt.c
index 404bc52b780..1d6f4f478fe 100644
--- a/arch/powerpc/platforms/ps3/interrupt.c
+++ b/arch/powerpc/platforms/ps3/interrupt.c
@@ -88,6 +88,7 @@ struct ps3_private {
88 struct ps3_bmp bmp __attribute__ ((aligned (PS3_BMP_MINALIGN))); 88 struct ps3_bmp bmp __attribute__ ((aligned (PS3_BMP_MINALIGN)));
89 u64 ppe_id; 89 u64 ppe_id;
90 u64 thread_id; 90 u64 thread_id;
91 unsigned long ipi_mask;
91}; 92};
92 93
93static DEFINE_PER_CPU(struct ps3_private, ps3_private); 94static DEFINE_PER_CPU(struct ps3_private, ps3_private);
@@ -144,7 +145,11 @@ static void ps3_chip_unmask(struct irq_data *d)
144static void ps3_chip_eoi(struct irq_data *d) 145static void ps3_chip_eoi(struct irq_data *d)
145{ 146{
146 const struct ps3_private *pd = irq_data_get_irq_chip_data(d); 147 const struct ps3_private *pd = irq_data_get_irq_chip_data(d);
147 lv1_end_of_interrupt_ext(pd->ppe_id, pd->thread_id, d->irq); 148
149 /* non-IPIs are EOIed here. */
150
151 if (!test_bit(63 - d->irq, &pd->ipi_mask))
152 lv1_end_of_interrupt_ext(pd->ppe_id, pd->thread_id, d->irq);
148} 153}
149 154
150/** 155/**
@@ -691,6 +696,16 @@ void __init ps3_register_ipi_debug_brk(unsigned int cpu, unsigned int virq)
691 cpu, virq, pd->bmp.ipi_debug_brk_mask); 696 cpu, virq, pd->bmp.ipi_debug_brk_mask);
692} 697}
693 698
699void __init ps3_register_ipi_irq(unsigned int cpu, unsigned int virq)
700{
701 struct ps3_private *pd = &per_cpu(ps3_private, cpu);
702
703 set_bit(63 - virq, &pd->ipi_mask);
704
705 DBG("%s:%d: cpu %u, virq %u, ipi_mask %lxh\n", __func__, __LINE__,
706 cpu, virq, pd->ipi_mask);
707}
708
694static unsigned int ps3_get_irq(void) 709static unsigned int ps3_get_irq(void)
695{ 710{
696 struct ps3_private *pd = &__get_cpu_var(ps3_private); 711 struct ps3_private *pd = &__get_cpu_var(ps3_private);
@@ -720,6 +735,12 @@ static unsigned int ps3_get_irq(void)
720 BUG(); 735 BUG();
721 } 736 }
722#endif 737#endif
738
739 /* IPIs are EOIed here. */
740
741 if (test_bit(63 - plug, &pd->ipi_mask))
742 lv1_end_of_interrupt_ext(pd->ppe_id, pd->thread_id, plug);
743
723 return plug; 744 return plug;
724} 745}
725 746
diff --git a/arch/powerpc/platforms/ps3/platform.h b/arch/powerpc/platforms/ps3/platform.h
index 9a196a88eda..1a633ed0fe9 100644
--- a/arch/powerpc/platforms/ps3/platform.h
+++ b/arch/powerpc/platforms/ps3/platform.h
@@ -43,6 +43,7 @@ void ps3_mm_shutdown(void);
43void ps3_init_IRQ(void); 43void ps3_init_IRQ(void);
44void ps3_shutdown_IRQ(int cpu); 44void ps3_shutdown_IRQ(int cpu);
45void __init ps3_register_ipi_debug_brk(unsigned int cpu, unsigned int virq); 45void __init ps3_register_ipi_debug_brk(unsigned int cpu, unsigned int virq);
46void __init ps3_register_ipi_irq(unsigned int cpu, unsigned int virq);
46 47
47/* smp */ 48/* smp */
48 49
diff --git a/arch/powerpc/platforms/ps3/smp.c b/arch/powerpc/platforms/ps3/smp.c
index 4c44794faac..efc1cd8c034 100644
--- a/arch/powerpc/platforms/ps3/smp.c
+++ b/arch/powerpc/platforms/ps3/smp.c
@@ -59,46 +59,49 @@ static void ps3_smp_message_pass(int cpu, int msg)
59 59
60static int ps3_smp_probe(void) 60static int ps3_smp_probe(void)
61{ 61{
62 return 2; 62 int cpu;
63}
64 63
65static void __init ps3_smp_setup_cpu(int cpu) 64 for (cpu = 0; cpu < 2; cpu++) {
66{ 65 int result;
67 int result; 66 unsigned int *virqs = per_cpu(ps3_ipi_virqs, cpu);
68 unsigned int *virqs = per_cpu(ps3_ipi_virqs, cpu); 67 int i;
69 int i;
70 68
71 DBG(" -> %s:%d: (%d)\n", __func__, __LINE__, cpu); 69 DBG(" -> %s:%d: (%d)\n", __func__, __LINE__, cpu);
72 70
73 /* 71 /*
74 * Check assumptions on ps3_ipi_virqs[] indexing. If this 72 * Check assumptions on ps3_ipi_virqs[] indexing. If this
75 * check fails, then a different mapping of PPC_MSG_ 73 * check fails, then a different mapping of PPC_MSG_
76 * to index needs to be setup. 74 * to index needs to be setup.
77 */ 75 */
78 76
79 BUILD_BUG_ON(PPC_MSG_CALL_FUNCTION != 0); 77 BUILD_BUG_ON(PPC_MSG_CALL_FUNCTION != 0);
80 BUILD_BUG_ON(PPC_MSG_RESCHEDULE != 1); 78 BUILD_BUG_ON(PPC_MSG_RESCHEDULE != 1);
81 BUILD_BUG_ON(PPC_MSG_CALL_FUNC_SINGLE != 2); 79 BUILD_BUG_ON(PPC_MSG_CALL_FUNC_SINGLE != 2);
82 BUILD_BUG_ON(PPC_MSG_DEBUGGER_BREAK != 3); 80 BUILD_BUG_ON(PPC_MSG_DEBUGGER_BREAK != 3);
83 81
84 for (i = 0; i < MSG_COUNT; i++) { 82 for (i = 0; i < MSG_COUNT; i++) {
85 result = ps3_event_receive_port_setup(cpu, &virqs[i]); 83 result = ps3_event_receive_port_setup(cpu, &virqs[i]);
86 84
87 if (result) 85 if (result)
88 continue; 86 continue;
89 87
90 DBG("%s:%d: (%d, %d) => virq %u\n", 88 DBG("%s:%d: (%d, %d) => virq %u\n",
91 __func__, __LINE__, cpu, i, virqs[i]); 89 __func__, __LINE__, cpu, i, virqs[i]);
92 90
93 result = smp_request_message_ipi(virqs[i], i); 91 result = smp_request_message_ipi(virqs[i], i);
94 92
95 if (result) 93 if (result)
96 virqs[i] = NO_IRQ; 94 virqs[i] = NO_IRQ;
97 } 95 else
96 ps3_register_ipi_irq(cpu, virqs[i]);
97 }
98 98
99 ps3_register_ipi_debug_brk(cpu, virqs[PPC_MSG_DEBUGGER_BREAK]); 99 ps3_register_ipi_debug_brk(cpu, virqs[PPC_MSG_DEBUGGER_BREAK]);
100 100
101 DBG(" <- %s:%d: (%d)\n", __func__, __LINE__, cpu); 101 DBG(" <- %s:%d: (%d)\n", __func__, __LINE__, cpu);
102 }
103
104 return 2;
102} 105}
103 106
104void ps3_smp_cleanup_cpu(int cpu) 107void ps3_smp_cleanup_cpu(int cpu)
@@ -121,7 +124,6 @@ static struct smp_ops_t ps3_smp_ops = {
121 .probe = ps3_smp_probe, 124 .probe = ps3_smp_probe,
122 .message_pass = ps3_smp_message_pass, 125 .message_pass = ps3_smp_message_pass,
123 .kick_cpu = smp_generic_kick_cpu, 126 .kick_cpu = smp_generic_kick_cpu,
124 .setup_cpu = ps3_smp_setup_cpu,
125}; 127};
126 128
127void smp_init_ps3(void) 129void smp_init_ps3(void)
diff --git a/arch/powerpc/sysdev/ehv_pic.c b/arch/powerpc/sysdev/ehv_pic.c
index af1a5df46b3..b6731e4a664 100644
--- a/arch/powerpc/sysdev/ehv_pic.c
+++ b/arch/powerpc/sysdev/ehv_pic.c
@@ -280,6 +280,7 @@ void __init ehv_pic_init(void)
280 280
281 if (!ehv_pic->irqhost) { 281 if (!ehv_pic->irqhost) {
282 of_node_put(np); 282 of_node_put(np);
283 kfree(ehv_pic);
283 return; 284 return;
284 } 285 }
285 286
diff --git a/arch/powerpc/sysdev/fsl_lbc.c b/arch/powerpc/sysdev/fsl_lbc.c
index c4d96fa32ba..d5c3c90ee69 100644
--- a/arch/powerpc/sysdev/fsl_lbc.c
+++ b/arch/powerpc/sysdev/fsl_lbc.c
@@ -328,6 +328,7 @@ static int __devinit fsl_lbc_ctrl_probe(struct platform_device *dev)
328err: 328err:
329 iounmap(fsl_lbc_ctrl_dev->regs); 329 iounmap(fsl_lbc_ctrl_dev->regs);
330 kfree(fsl_lbc_ctrl_dev); 330 kfree(fsl_lbc_ctrl_dev);
331 fsl_lbc_ctrl_dev = NULL;
331 return ret; 332 return ret;
332} 333}
333 334
diff --git a/arch/powerpc/sysdev/qe_lib/qe.c b/arch/powerpc/sysdev/qe_lib/qe.c
index 3363fbc964f..ceb09cbd232 100644
--- a/arch/powerpc/sysdev/qe_lib/qe.c
+++ b/arch/powerpc/sysdev/qe_lib/qe.c
@@ -216,7 +216,7 @@ int qe_setbrg(enum qe_clock brg, unsigned int rate, unsigned int multiplier)
216 /* Errata QE_General4, which affects some MPC832x and MPC836x SOCs, says 216 /* Errata QE_General4, which affects some MPC832x and MPC836x SOCs, says
217 that the BRG divisor must be even if you're not using divide-by-16 217 that the BRG divisor must be even if you're not using divide-by-16
218 mode. */ 218 mode. */
219 if (!div16 && (divisor & 1)) 219 if (!div16 && (divisor & 1) && (divisor > 3))
220 divisor++; 220 divisor++;
221 221
222 tempval = ((divisor - 1) << QE_BRGC_DIVISOR_SHIFT) | 222 tempval = ((divisor - 1) << QE_BRGC_DIVISOR_SHIFT) |
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index a9fbd43395f..373679b3744 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -572,6 +572,7 @@ config KEXEC
572config CRASH_DUMP 572config CRASH_DUMP
573 bool "kernel crash dumps" 573 bool "kernel crash dumps"
574 depends on 64BIT 574 depends on 64BIT
575 select KEXEC
575 help 576 help
576 Generate crash dump after being started by kexec. 577 Generate crash dump after being started by kexec.
577 Crash dump kernels are loaded in the main kernel with kexec-tools 578 Crash dump kernels are loaded in the main kernel with kexec-tools
diff --git a/arch/s390/crypto/crypt_s390.h b/arch/s390/crypto/crypt_s390.h
index 49676771bd6..ffd1ac255f1 100644
--- a/arch/s390/crypto/crypt_s390.h
+++ b/arch/s390/crypto/crypt_s390.h
@@ -368,9 +368,12 @@ static inline int crypt_s390_func_available(int func,
368 368
369 if (facility_mask & CRYPT_S390_MSA && !test_facility(17)) 369 if (facility_mask & CRYPT_S390_MSA && !test_facility(17))
370 return 0; 370 return 0;
371 if (facility_mask & CRYPT_S390_MSA3 && !test_facility(76)) 371
372 if (facility_mask & CRYPT_S390_MSA3 &&
373 (!test_facility(2) || !test_facility(76)))
372 return 0; 374 return 0;
373 if (facility_mask & CRYPT_S390_MSA4 && !test_facility(77)) 375 if (facility_mask & CRYPT_S390_MSA4 &&
376 (!test_facility(2) || !test_facility(77)))
374 return 0; 377 return 0;
375 378
376 switch (func & CRYPT_S390_OP_MASK) { 379 switch (func & CRYPT_S390_OP_MASK) {
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 24e18473d92..b0c235cb6ad 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -47,7 +47,7 @@ struct sca_block {
47#define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1)) 47#define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1))
48#define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE) 48#define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE)
49 49
50#define CPUSTAT_HOST 0x80000000 50#define CPUSTAT_STOPPED 0x80000000
51#define CPUSTAT_WAIT 0x10000000 51#define CPUSTAT_WAIT 0x10000000
52#define CPUSTAT_ECALL_PEND 0x08000000 52#define CPUSTAT_ECALL_PEND 0x08000000
53#define CPUSTAT_STOP_INT 0x04000000 53#define CPUSTAT_STOP_INT 0x04000000
@@ -139,6 +139,7 @@ struct kvm_vcpu_stat {
139 u32 instruction_stfl; 139 u32 instruction_stfl;
140 u32 instruction_tprot; 140 u32 instruction_tprot;
141 u32 instruction_sigp_sense; 141 u32 instruction_sigp_sense;
142 u32 instruction_sigp_sense_running;
142 u32 instruction_sigp_external_call; 143 u32 instruction_sigp_external_call;
143 u32 instruction_sigp_emergency; 144 u32 instruction_sigp_emergency;
144 u32 instruction_sigp_stop; 145 u32 instruction_sigp_stop;
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 34ede0ea85a..524d23b8610 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -593,6 +593,8 @@ static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste)
593 unsigned long address, bits; 593 unsigned long address, bits;
594 unsigned char skey; 594 unsigned char skey;
595 595
596 if (!pte_present(*ptep))
597 return pgste;
596 address = pte_val(*ptep) & PAGE_MASK; 598 address = pte_val(*ptep) & PAGE_MASK;
597 skey = page_get_storage_key(address); 599 skey = page_get_storage_key(address);
598 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED); 600 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
@@ -625,6 +627,8 @@ static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste)
625#ifdef CONFIG_PGSTE 627#ifdef CONFIG_PGSTE
626 int young; 628 int young;
627 629
630 if (!pte_present(*ptep))
631 return pgste;
628 young = page_reset_referenced(pte_val(*ptep) & PAGE_MASK); 632 young = page_reset_referenced(pte_val(*ptep) & PAGE_MASK);
629 /* Transfer page referenced bit to pte software bit (host view) */ 633 /* Transfer page referenced bit to pte software bit (host view) */
630 if (young || (pgste_val(pgste) & RCP_HR_BIT)) 634 if (young || (pgste_val(pgste) & RCP_HR_BIT))
@@ -638,13 +642,15 @@ static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste)
638 642
639} 643}
640 644
641static inline void pgste_set_pte(pte_t *ptep, pgste_t pgste) 645static inline void pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry)
642{ 646{
643#ifdef CONFIG_PGSTE 647#ifdef CONFIG_PGSTE
644 unsigned long address; 648 unsigned long address;
645 unsigned long okey, nkey; 649 unsigned long okey, nkey;
646 650
647 address = pte_val(*ptep) & PAGE_MASK; 651 if (!pte_present(entry))
652 return;
653 address = pte_val(entry) & PAGE_MASK;
648 okey = nkey = page_get_storage_key(address); 654 okey = nkey = page_get_storage_key(address);
649 nkey &= ~(_PAGE_ACC_BITS | _PAGE_FP_BIT); 655 nkey &= ~(_PAGE_ACC_BITS | _PAGE_FP_BIT);
650 /* Set page access key and fetch protection bit from pgste */ 656 /* Set page access key and fetch protection bit from pgste */
@@ -712,7 +718,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
712 718
713 if (mm_has_pgste(mm)) { 719 if (mm_has_pgste(mm)) {
714 pgste = pgste_get_lock(ptep); 720 pgste = pgste_get_lock(ptep);
715 pgste_set_pte(ptep, pgste); 721 pgste_set_pte(ptep, pgste, entry);
716 *ptep = entry; 722 *ptep = entry;
717 pgste_set_unlock(ptep, pgste); 723 pgste_set_unlock(ptep, pgste);
718 } else 724 } else
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 5a099714df0..097183c7040 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -82,6 +82,7 @@ extern unsigned int user_mode;
82#define MACHINE_FLAG_LPAR (1UL << 12) 82#define MACHINE_FLAG_LPAR (1UL << 12)
83#define MACHINE_FLAG_SPP (1UL << 13) 83#define MACHINE_FLAG_SPP (1UL << 13)
84#define MACHINE_FLAG_TOPOLOGY (1UL << 14) 84#define MACHINE_FLAG_TOPOLOGY (1UL << 14)
85#define MACHINE_FLAG_STCKF (1UL << 15)
85 86
86#define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM) 87#define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM)
87#define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM) 88#define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM)
@@ -100,6 +101,7 @@ extern unsigned int user_mode;
100#define MACHINE_HAS_PFMF (0) 101#define MACHINE_HAS_PFMF (0)
101#define MACHINE_HAS_SPP (0) 102#define MACHINE_HAS_SPP (0)
102#define MACHINE_HAS_TOPOLOGY (0) 103#define MACHINE_HAS_TOPOLOGY (0)
104#define MACHINE_HAS_STCKF (0)
103#else /* __s390x__ */ 105#else /* __s390x__ */
104#define MACHINE_HAS_IEEE (1) 106#define MACHINE_HAS_IEEE (1)
105#define MACHINE_HAS_CSP (1) 107#define MACHINE_HAS_CSP (1)
@@ -111,6 +113,7 @@ extern unsigned int user_mode;
111#define MACHINE_HAS_PFMF (S390_lowcore.machine_flags & MACHINE_FLAG_PFMF) 113#define MACHINE_HAS_PFMF (S390_lowcore.machine_flags & MACHINE_FLAG_PFMF)
112#define MACHINE_HAS_SPP (S390_lowcore.machine_flags & MACHINE_FLAG_SPP) 114#define MACHINE_HAS_SPP (S390_lowcore.machine_flags & MACHINE_FLAG_SPP)
113#define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY) 115#define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY)
116#define MACHINE_HAS_STCKF (S390_lowcore.machine_flags & MACHINE_FLAG_STCKF)
114#endif /* __s390x__ */ 117#endif /* __s390x__ */
115 118
116#define ZFCPDUMP_HSA_SIZE (32UL<<20) 119#define ZFCPDUMP_HSA_SIZE (32UL<<20)
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index d610bef9c5e..c447a27a7fd 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -90,7 +90,7 @@ static inline unsigned long long get_clock_fast(void)
90{ 90{
91 unsigned long long clk; 91 unsigned long long clk;
92 92
93 if (test_facility(25)) 93 if (MACHINE_HAS_STCKF)
94 asm volatile(".insn s,0xb27c0000,%0" : "=Q" (clk) : : "cc"); 94 asm volatile(".insn s,0xb27c0000,%0" : "=Q" (clk) : : "cc");
95 else 95 else
96 clk = get_clock(); 96 clk = get_clock();
diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h
index 404bdb9671b..58de4c91c33 100644
--- a/arch/s390/include/asm/unistd.h
+++ b/arch/s390/include/asm/unistd.h
@@ -277,7 +277,9 @@
277#define __NR_clock_adjtime 337 277#define __NR_clock_adjtime 337
278#define __NR_syncfs 338 278#define __NR_syncfs 338
279#define __NR_setns 339 279#define __NR_setns 339
280#define NR_syscalls 340 280#define __NR_process_vm_readv 340
281#define __NR_process_vm_writev 341
282#define NR_syscalls 342
281 283
282/* 284/*
283 * There are some system calls that are not present on 64 bit, some 285 * There are some system calls that are not present on 64 bit, some
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
index 5006a1d9f5d..18c51df9fe0 100644
--- a/arch/s390/kernel/compat_wrapper.S
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -1627,3 +1627,23 @@ ENTRY(sys_setns_wrapper)
1627 lgfr %r2,%r2 # int 1627 lgfr %r2,%r2 # int
1628 lgfr %r3,%r3 # int 1628 lgfr %r3,%r3 # int
1629 jg sys_setns 1629 jg sys_setns
1630
1631ENTRY(compat_sys_process_vm_readv_wrapper)
1632 lgfr %r2,%r2 # compat_pid_t
1633 llgtr %r3,%r3 # struct compat_iovec __user *
1634 llgfr %r4,%r4 # unsigned long
1635 llgtr %r5,%r5 # struct compat_iovec __user *
1636 llgfr %r6,%r6 # unsigned long
1637 llgf %r0,164(%r15) # unsigned long
1638 stg %r0,160(%r15)
1639 jg sys_process_vm_readv
1640
1641ENTRY(compat_sys_process_vm_writev_wrapper)
1642 lgfr %r2,%r2 # compat_pid_t
1643 llgtr %r3,%r3 # struct compat_iovec __user *
1644 llgfr %r4,%r4 # unsigned long
1645 llgtr %r5,%r5 # struct compat_iovec __user *
1646 llgfr %r6,%r6 # unsigned long
1647 llgf %r0,164(%r15) # unsigned long
1648 stg %r0,160(%r15)
1649 jg sys_process_vm_writev
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 37394b3413e..c9ffe002519 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -390,6 +390,8 @@ static __init void detect_machine_facilities(void)
390 S390_lowcore.machine_flags |= MACHINE_FLAG_MVCOS; 390 S390_lowcore.machine_flags |= MACHINE_FLAG_MVCOS;
391 if (test_facility(40)) 391 if (test_facility(40))
392 S390_lowcore.machine_flags |= MACHINE_FLAG_SPP; 392 S390_lowcore.machine_flags |= MACHINE_FLAG_SPP;
393 if (test_facility(25))
394 S390_lowcore.machine_flags |= MACHINE_FLAG_STCKF;
393#endif 395#endif
394} 396}
395 397
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 8ac6bfa2786..e58a462949b 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -211,6 +211,8 @@ static void __init setup_zfcpdump(unsigned int console_devno)
211 211
212 if (ipl_info.type != IPL_TYPE_FCP_DUMP) 212 if (ipl_info.type != IPL_TYPE_FCP_DUMP)
213 return; 213 return;
214 if (OLDMEM_BASE)
215 return;
214 if (console_devno != -1) 216 if (console_devno != -1)
215 sprintf(str, " cio_ignore=all,!0.0.%04x,!0.0.%04x", 217 sprintf(str, " cio_ignore=all,!0.0.%04x,!0.0.%04x",
216 ipl_info.data.fcp.dev_id.devno, console_devno); 218 ipl_info.data.fcp.dev_id.devno, console_devno);
@@ -482,7 +484,7 @@ static void __init setup_memory_end(void)
482 484
483 485
484#ifdef CONFIG_ZFCPDUMP 486#ifdef CONFIG_ZFCPDUMP
485 if (ipl_info.type == IPL_TYPE_FCP_DUMP) { 487 if (ipl_info.type == IPL_TYPE_FCP_DUMP && !OLDMEM_BASE) {
486 memory_end = ZFCPDUMP_HSA_SIZE; 488 memory_end = ZFCPDUMP_HSA_SIZE;
487 memory_end_set = 1; 489 memory_end_set = 1;
488 } 490 }
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index 73eb08c874f..bcab2f04ba5 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -348,3 +348,5 @@ SYSCALL(sys_open_by_handle_at,sys_open_by_handle_at,compat_sys_open_by_handle_at
348SYSCALL(sys_clock_adjtime,sys_clock_adjtime,compat_sys_clock_adjtime_wrapper) 348SYSCALL(sys_clock_adjtime,sys_clock_adjtime,compat_sys_clock_adjtime_wrapper)
349SYSCALL(sys_syncfs,sys_syncfs,sys_syncfs_wrapper) 349SYSCALL(sys_syncfs,sys_syncfs,sys_syncfs_wrapper)
350SYSCALL(sys_setns,sys_setns,sys_setns_wrapper) 350SYSCALL(sys_setns,sys_setns,sys_setns_wrapper)
351SYSCALL(sys_process_vm_readv,sys_process_vm_readv,compat_sys_process_vm_readv_wrapper) /* 340 */
352SYSCALL(sys_process_vm_writev,sys_process_vm_writev,compat_sys_process_vm_writev_wrapper)
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 77b8942b9a1..fdb5b8cb260 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -68,8 +68,10 @@ static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
68 return mask; 68 return mask;
69} 69}
70 70
71static void add_cpus_to_mask(struct topology_cpu *tl_cpu, 71static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu,
72 struct mask_info *book, struct mask_info *core) 72 struct mask_info *book,
73 struct mask_info *core,
74 int z10)
73{ 75{
74 unsigned int cpu; 76 unsigned int cpu;
75 77
@@ -88,10 +90,16 @@ static void add_cpus_to_mask(struct topology_cpu *tl_cpu,
88 cpu_book_id[lcpu] = book->id; 90 cpu_book_id[lcpu] = book->id;
89#endif 91#endif
90 cpumask_set_cpu(lcpu, &core->mask); 92 cpumask_set_cpu(lcpu, &core->mask);
91 cpu_core_id[lcpu] = core->id; 93 if (z10) {
94 cpu_core_id[lcpu] = rcpu;
95 core = core->next;
96 } else {
97 cpu_core_id[lcpu] = core->id;
98 }
92 smp_cpu_polarization[lcpu] = tl_cpu->pp; 99 smp_cpu_polarization[lcpu] = tl_cpu->pp;
93 } 100 }
94 } 101 }
102 return core;
95} 103}
96 104
97static void clear_masks(void) 105static void clear_masks(void)
@@ -123,18 +131,41 @@ static void tl_to_cores(struct sysinfo_15_1_x *info)
123{ 131{
124#ifdef CONFIG_SCHED_BOOK 132#ifdef CONFIG_SCHED_BOOK
125 struct mask_info *book = &book_info; 133 struct mask_info *book = &book_info;
134 struct cpuid cpu_id;
126#else 135#else
127 struct mask_info *book = NULL; 136 struct mask_info *book = NULL;
128#endif 137#endif
129 struct mask_info *core = &core_info; 138 struct mask_info *core = &core_info;
130 union topology_entry *tle, *end; 139 union topology_entry *tle, *end;
140 int z10 = 0;
131 141
132 142#ifdef CONFIG_SCHED_BOOK
143 get_cpu_id(&cpu_id);
144 z10 = cpu_id.machine == 0x2097 || cpu_id.machine == 0x2098;
145#endif
133 spin_lock_irq(&topology_lock); 146 spin_lock_irq(&topology_lock);
134 clear_masks(); 147 clear_masks();
135 tle = info->tle; 148 tle = info->tle;
136 end = (union topology_entry *)((unsigned long)info + info->length); 149 end = (union topology_entry *)((unsigned long)info + info->length);
137 while (tle < end) { 150 while (tle < end) {
151#ifdef CONFIG_SCHED_BOOK
152 if (z10) {
153 switch (tle->nl) {
154 case 1:
155 book = book->next;
156 book->id = tle->container.id;
157 break;
158 case 0:
159 core = add_cpus_to_mask(&tle->cpu, book, core, z10);
160 break;
161 default:
162 clear_masks();
163 goto out;
164 }
165 tle = next_tle(tle);
166 continue;
167 }
168#endif
138 switch (tle->nl) { 169 switch (tle->nl) {
139#ifdef CONFIG_SCHED_BOOK 170#ifdef CONFIG_SCHED_BOOK
140 case 2: 171 case 2:
@@ -147,7 +178,7 @@ static void tl_to_cores(struct sysinfo_15_1_x *info)
147 core->id = tle->container.id; 178 core->id = tle->container.id;
148 break; 179 break;
149 case 0: 180 case 0:
150 add_cpus_to_mask(&tle->cpu, book, core); 181 add_cpus_to_mask(&tle->cpu, book, core, z10);
151 break; 182 break;
152 default: 183 default:
153 clear_masks(); 184 clear_masks();
@@ -328,8 +359,8 @@ void __init s390_init_cpu_topology(void)
328 for (i = 0; i < TOPOLOGY_NR_MAG; i++) 359 for (i = 0; i < TOPOLOGY_NR_MAG; i++)
329 printk(" %d", info->mag[i]); 360 printk(" %d", info->mag[i]);
330 printk(" / %d\n", info->mnest); 361 printk(" / %d\n", info->mnest);
331 alloc_masks(info, &core_info, 2); 362 alloc_masks(info, &core_info, 1);
332#ifdef CONFIG_SCHED_BOOK 363#ifdef CONFIG_SCHED_BOOK
333 alloc_masks(info, &book_info, 3); 364 alloc_masks(info, &book_info, 2);
334#endif 365#endif
335} 366}
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 56fe6bc81fe..e4c79ebb40e 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -43,6 +43,8 @@ SECTIONS
43 43
44 NOTES :text :note 44 NOTES :text :note
45 45
46 .dummy : { *(.dummy) } :data
47
46 RODATA 48 RODATA
47 49
48#ifdef CONFIG_SHARED_KERNEL 50#ifdef CONFIG_SHARED_KERNEL
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index 87cedd61be0..8943e82cd4d 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -70,7 +70,7 @@ static int __diag_ipl_functions(struct kvm_vcpu *vcpu)
70 return -EOPNOTSUPP; 70 return -EOPNOTSUPP;
71 } 71 }
72 72
73 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); 73 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
74 vcpu->run->s390_reset_flags |= KVM_S390_RESET_SUBSYSTEM; 74 vcpu->run->s390_reset_flags |= KVM_S390_RESET_SUBSYSTEM;
75 vcpu->run->s390_reset_flags |= KVM_S390_RESET_IPL; 75 vcpu->run->s390_reset_flags |= KVM_S390_RESET_IPL;
76 vcpu->run->s390_reset_flags |= KVM_S390_RESET_CPU_INIT; 76 vcpu->run->s390_reset_flags |= KVM_S390_RESET_CPU_INIT;
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index c7c51898984..02434543eab 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -132,7 +132,6 @@ static int handle_stop(struct kvm_vcpu *vcpu)
132 int rc = 0; 132 int rc = 0;
133 133
134 vcpu->stat.exit_stop_request++; 134 vcpu->stat.exit_stop_request++;
135 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
136 spin_lock_bh(&vcpu->arch.local_int.lock); 135 spin_lock_bh(&vcpu->arch.local_int.lock);
137 if (vcpu->arch.local_int.action_bits & ACTION_STORE_ON_STOP) { 136 if (vcpu->arch.local_int.action_bits & ACTION_STORE_ON_STOP) {
138 vcpu->arch.local_int.action_bits &= ~ACTION_STORE_ON_STOP; 137 vcpu->arch.local_int.action_bits &= ~ACTION_STORE_ON_STOP;
@@ -149,6 +148,8 @@ static int handle_stop(struct kvm_vcpu *vcpu)
149 } 148 }
150 149
151 if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) { 150 if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) {
151 atomic_set_mask(CPUSTAT_STOPPED,
152 &vcpu->arch.sie_block->cpuflags);
152 vcpu->arch.local_int.action_bits &= ~ACTION_STOP_ON_STOP; 153 vcpu->arch.local_int.action_bits &= ~ACTION_STOP_ON_STOP;
153 VCPU_EVENT(vcpu, 3, "%s", "cpu stopped"); 154 VCPU_EVENT(vcpu, 3, "%s", "cpu stopped");
154 rc = -EOPNOTSUPP; 155 rc = -EOPNOTSUPP;
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 87c16705b38..278ee009ce6 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -252,6 +252,7 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
252 offsetof(struct _lowcore, restart_psw), sizeof(psw_t)); 252 offsetof(struct _lowcore, restart_psw), sizeof(psw_t));
253 if (rc == -EFAULT) 253 if (rc == -EFAULT)
254 exception = 1; 254 exception = 1;
255 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
255 break; 256 break;
256 257
257 case KVM_S390_PROGRAM_INT: 258 case KVM_S390_PROGRAM_INT:
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 0bd3bea1e4c..d1c44573245 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -65,6 +65,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
65 { "instruction_stfl", VCPU_STAT(instruction_stfl) }, 65 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
66 { "instruction_tprot", VCPU_STAT(instruction_tprot) }, 66 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
67 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) }, 67 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
68 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
68 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) }, 69 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
69 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) }, 70 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
70 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) }, 71 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
@@ -127,6 +128,7 @@ int kvm_dev_ioctl_check_extension(long ext)
127 switch (ext) { 128 switch (ext) {
128 case KVM_CAP_S390_PSW: 129 case KVM_CAP_S390_PSW:
129 case KVM_CAP_S390_GMAP: 130 case KVM_CAP_S390_GMAP:
131 case KVM_CAP_SYNC_MMU:
130 r = 1; 132 r = 1;
131 break; 133 break;
132 default: 134 default:
@@ -270,10 +272,12 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
270 restore_fp_regs(&vcpu->arch.guest_fpregs); 272 restore_fp_regs(&vcpu->arch.guest_fpregs);
271 restore_access_regs(vcpu->arch.guest_acrs); 273 restore_access_regs(vcpu->arch.guest_acrs);
272 gmap_enable(vcpu->arch.gmap); 274 gmap_enable(vcpu->arch.gmap);
275 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
273} 276}
274 277
275void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 278void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
276{ 279{
280 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
277 gmap_disable(vcpu->arch.gmap); 281 gmap_disable(vcpu->arch.gmap);
278 save_fp_regs(&vcpu->arch.guest_fpregs); 282 save_fp_regs(&vcpu->arch.guest_fpregs);
279 save_access_regs(vcpu->arch.guest_acrs); 283 save_access_regs(vcpu->arch.guest_acrs);
@@ -301,7 +305,9 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
301 305
302int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) 306int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
303{ 307{
304 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | CPUSTAT_SM); 308 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
309 CPUSTAT_SM |
310 CPUSTAT_STOPPED);
305 vcpu->arch.sie_block->ecb = 6; 311 vcpu->arch.sie_block->ecb = 6;
306 vcpu->arch.sie_block->eca = 0xC1002001U; 312 vcpu->arch.sie_block->eca = 0xC1002001U;
307 vcpu->arch.sie_block->fac = (int) (long) facilities; 313 vcpu->arch.sie_block->fac = (int) (long) facilities;
@@ -428,7 +434,7 @@ static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
428{ 434{
429 int rc = 0; 435 int rc = 0;
430 436
431 if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING) 437 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
432 rc = -EBUSY; 438 rc = -EBUSY;
433 else { 439 else {
434 vcpu->run->psw_mask = psw.mask; 440 vcpu->run->psw_mask = psw.mask;
@@ -501,7 +507,7 @@ rerun_vcpu:
501 if (vcpu->sigset_active) 507 if (vcpu->sigset_active)
502 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); 508 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
503 509
504 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); 510 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
505 511
506 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL); 512 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
507 513
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 39162636108..d0263895992 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -336,6 +336,7 @@ static int handle_tprot(struct kvm_vcpu *vcpu)
336 u64 address1 = disp1 + base1 ? vcpu->arch.guest_gprs[base1] : 0; 336 u64 address1 = disp1 + base1 ? vcpu->arch.guest_gprs[base1] : 0;
337 u64 address2 = disp2 + base2 ? vcpu->arch.guest_gprs[base2] : 0; 337 u64 address2 = disp2 + base2 ? vcpu->arch.guest_gprs[base2] : 0;
338 struct vm_area_struct *vma; 338 struct vm_area_struct *vma;
339 unsigned long user_address;
339 340
340 vcpu->stat.instruction_tprot++; 341 vcpu->stat.instruction_tprot++;
341 342
@@ -349,9 +350,14 @@ static int handle_tprot(struct kvm_vcpu *vcpu)
349 return -EOPNOTSUPP; 350 return -EOPNOTSUPP;
350 351
351 352
353 /* we must resolve the address without holding the mmap semaphore.
354 * This is ok since the userspace hypervisor is not supposed to change
355 * the mapping while the guest queries the memory. Otherwise the guest
356 * might crash or get wrong info anyway. */
357 user_address = (unsigned long) __guestaddr_to_user(vcpu, address1);
358
352 down_read(&current->mm->mmap_sem); 359 down_read(&current->mm->mmap_sem);
353 vma = find_vma(current->mm, 360 vma = find_vma(current->mm, user_address);
354 (unsigned long) __guestaddr_to_user(vcpu, address1));
355 if (!vma) { 361 if (!vma) {
356 up_read(&current->mm->mmap_sem); 362 up_read(&current->mm->mmap_sem);
357 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 363 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index f815118835f..0a7941d74bc 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -31,9 +31,11 @@
31#define SIGP_SET_PREFIX 0x0d 31#define SIGP_SET_PREFIX 0x0d
32#define SIGP_STORE_STATUS_ADDR 0x0e 32#define SIGP_STORE_STATUS_ADDR 0x0e
33#define SIGP_SET_ARCH 0x12 33#define SIGP_SET_ARCH 0x12
34#define SIGP_SENSE_RUNNING 0x15
34 35
35/* cpu status bits */ 36/* cpu status bits */
36#define SIGP_STAT_EQUIPMENT_CHECK 0x80000000UL 37#define SIGP_STAT_EQUIPMENT_CHECK 0x80000000UL
38#define SIGP_STAT_NOT_RUNNING 0x00000400UL
37#define SIGP_STAT_INCORRECT_STATE 0x00000200UL 39#define SIGP_STAT_INCORRECT_STATE 0x00000200UL
38#define SIGP_STAT_INVALID_PARAMETER 0x00000100UL 40#define SIGP_STAT_INVALID_PARAMETER 0x00000100UL
39#define SIGP_STAT_EXT_CALL_PENDING 0x00000080UL 41#define SIGP_STAT_EXT_CALL_PENDING 0x00000080UL
@@ -57,8 +59,8 @@ static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
57 spin_lock(&fi->lock); 59 spin_lock(&fi->lock);
58 if (fi->local_int[cpu_addr] == NULL) 60 if (fi->local_int[cpu_addr] == NULL)
59 rc = 3; /* not operational */ 61 rc = 3; /* not operational */
60 else if (atomic_read(fi->local_int[cpu_addr]->cpuflags) 62 else if (!(atomic_read(fi->local_int[cpu_addr]->cpuflags)
61 & CPUSTAT_RUNNING) { 63 & CPUSTAT_STOPPED)) {
62 *reg &= 0xffffffff00000000UL; 64 *reg &= 0xffffffff00000000UL;
63 rc = 1; /* status stored */ 65 rc = 1; /* status stored */
64 } else { 66 } else {
@@ -251,7 +253,7 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
251 253
252 spin_lock_bh(&li->lock); 254 spin_lock_bh(&li->lock);
253 /* cpu must be in stopped state */ 255 /* cpu must be in stopped state */
254 if (atomic_read(li->cpuflags) & CPUSTAT_RUNNING) { 256 if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
255 rc = 1; /* incorrect state */ 257 rc = 1; /* incorrect state */
256 *reg &= SIGP_STAT_INCORRECT_STATE; 258 *reg &= SIGP_STAT_INCORRECT_STATE;
257 kfree(inti); 259 kfree(inti);
@@ -275,6 +277,38 @@ out_fi:
275 return rc; 277 return rc;
276} 278}
277 279
280static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr,
281 unsigned long *reg)
282{
283 int rc;
284 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
285
286 if (cpu_addr >= KVM_MAX_VCPUS)
287 return 3; /* not operational */
288
289 spin_lock(&fi->lock);
290 if (fi->local_int[cpu_addr] == NULL)
291 rc = 3; /* not operational */
292 else {
293 if (atomic_read(fi->local_int[cpu_addr]->cpuflags)
294 & CPUSTAT_RUNNING) {
295 /* running */
296 rc = 1;
297 } else {
298 /* not running */
299 *reg &= 0xffffffff00000000UL;
300 *reg |= SIGP_STAT_NOT_RUNNING;
301 rc = 0;
302 }
303 }
304 spin_unlock(&fi->lock);
305
306 VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x", cpu_addr,
307 rc);
308
309 return rc;
310}
311
278int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) 312int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
279{ 313{
280 int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 314 int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
@@ -331,6 +365,11 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
331 rc = __sigp_set_prefix(vcpu, cpu_addr, parameter, 365 rc = __sigp_set_prefix(vcpu, cpu_addr, parameter,
332 &vcpu->arch.guest_gprs[r1]); 366 &vcpu->arch.guest_gprs[r1]);
333 break; 367 break;
368 case SIGP_SENSE_RUNNING:
369 vcpu->stat.instruction_sigp_sense_running++;
370 rc = __sigp_sense_running(vcpu, cpu_addr,
371 &vcpu->arch.guest_gprs[r1]);
372 break;
334 case SIGP_RESTART: 373 case SIGP_RESTART:
335 vcpu->stat.instruction_sigp_restart++; 374 vcpu->stat.instruction_sigp_restart++;
336 /* user space must know about restart */ 375 /* user space must know about restart */
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 1766def5bc3..a9a301866b3 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -587,8 +587,13 @@ static void pfault_interrupt(unsigned int ext_int_code,
587 } else { 587 } else {
588 /* Completion interrupt was faster than initial 588 /* Completion interrupt was faster than initial
589 * interrupt. Set pfault_wait to -1 so the initial 589 * interrupt. Set pfault_wait to -1 so the initial
590 * interrupt doesn't put the task to sleep. */ 590 * interrupt doesn't put the task to sleep.
591 tsk->thread.pfault_wait = -1; 591 * If the task is not running, ignore the completion
592 * interrupt since it must be a leftover of a PFAULT
593 * CANCEL operation which didn't remove all pending
594 * completion interrupts. */
595 if (tsk->state == TASK_RUNNING)
596 tsk->thread.pfault_wait = -1;
592 } 597 }
593 put_task_struct(tsk); 598 put_task_struct(tsk);
594 } else { 599 } else {
diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
index 5b31a8e8982..a790cc65747 100644
--- a/arch/sparc/include/asm/pgtable_32.h
+++ b/arch/sparc/include/asm/pgtable_32.h
@@ -431,10 +431,6 @@ extern unsigned long *sparc_valid_addr_bitmap;
431#define kern_addr_valid(addr) \ 431#define kern_addr_valid(addr) \
432 (test_bit(__pa((unsigned long)(addr))>>20, sparc_valid_addr_bitmap)) 432 (test_bit(__pa((unsigned long)(addr))>>20, sparc_valid_addr_bitmap))
433 433
434extern int io_remap_pfn_range(struct vm_area_struct *vma,
435 unsigned long from, unsigned long pfn,
436 unsigned long size, pgprot_t prot);
437
438/* 434/*
439 * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in 435 * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
440 * its high 4 bits. These macros/functions put it there or get it from there. 436 * its high 4 bits. These macros/functions put it there or get it from there.
@@ -443,6 +439,22 @@ extern int io_remap_pfn_range(struct vm_area_struct *vma,
443#define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4)) 439#define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4))
444#define GET_PFN(pfn) (pfn & 0x0fffffffUL) 440#define GET_PFN(pfn) (pfn & 0x0fffffffUL)
445 441
442extern int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
443 unsigned long, pgprot_t);
444
445static inline int io_remap_pfn_range(struct vm_area_struct *vma,
446 unsigned long from, unsigned long pfn,
447 unsigned long size, pgprot_t prot)
448{
449 unsigned long long offset, space, phys_base;
450
451 offset = ((unsigned long long) GET_PFN(pfn)) << PAGE_SHIFT;
452 space = GET_IOSPACE(pfn);
453 phys_base = offset | (space << 32ULL);
454
455 return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
456}
457
446#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 458#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
447#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ 459#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
448({ \ 460({ \
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index adf89329af5..38ebb2c6013 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -757,10 +757,6 @@ static inline bool kern_addr_valid(unsigned long addr)
757 757
758extern int page_in_phys_avail(unsigned long paddr); 758extern int page_in_phys_avail(unsigned long paddr);
759 759
760extern int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
761 unsigned long pfn,
762 unsigned long size, pgprot_t prot);
763
764/* 760/*
765 * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in 761 * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
766 * its high 4 bits. These macros/functions put it there or get it from there. 762 * its high 4 bits. These macros/functions put it there or get it from there.
@@ -769,6 +765,22 @@ extern int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
769#define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4)) 765#define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4))
770#define GET_PFN(pfn) (pfn & 0x0fffffffffffffffUL) 766#define GET_PFN(pfn) (pfn & 0x0fffffffffffffffUL)
771 767
768extern int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
769 unsigned long, pgprot_t);
770
771static inline int io_remap_pfn_range(struct vm_area_struct *vma,
772 unsigned long from, unsigned long pfn,
773 unsigned long size, pgprot_t prot)
774{
775 unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
776 int space = GET_IOSPACE(pfn);
777 unsigned long phys_base;
778
779 phys_base = offset | (((unsigned long) space) << 32UL);
780
781 return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
782}
783
772#include <asm-generic/pgtable.h> 784#include <asm-generic/pgtable.h>
773 785
774/* We provide our own get_unmapped_area to cope with VA holes and 786/* We provide our own get_unmapped_area to cope with VA holes and
diff --git a/arch/sparc/kernel/entry.h b/arch/sparc/kernel/entry.h
index e27f8ea8656..0c218e4c088 100644
--- a/arch/sparc/kernel/entry.h
+++ b/arch/sparc/kernel/entry.h
@@ -42,6 +42,9 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
42extern void fpload(unsigned long *fpregs, unsigned long *fsr); 42extern void fpload(unsigned long *fpregs, unsigned long *fsr);
43 43
44#else /* CONFIG_SPARC32 */ 44#else /* CONFIG_SPARC32 */
45
46#include <asm/trap_block.h>
47
45struct popc_3insn_patch_entry { 48struct popc_3insn_patch_entry {
46 unsigned int addr; 49 unsigned int addr;
47 unsigned int insns[3]; 50 unsigned int insns[3];
@@ -57,6 +60,10 @@ extern struct popc_6insn_patch_entry __popc_6insn_patch,
57 __popc_6insn_patch_end; 60 __popc_6insn_patch_end;
58 61
59extern void __init per_cpu_patch(void); 62extern void __init per_cpu_patch(void);
63extern void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *,
64 struct sun4v_1insn_patch_entry *);
65extern void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *,
66 struct sun4v_2insn_patch_entry *);
60extern void __init sun4v_patch(void); 67extern void __init sun4v_patch(void);
61extern void __init boot_cpu_id_too_large(int cpu); 68extern void __init boot_cpu_id_too_large(int cpu);
62extern unsigned int dcache_parity_tl1_occurred; 69extern unsigned int dcache_parity_tl1_occurred;
diff --git a/arch/sparc/kernel/module.c b/arch/sparc/kernel/module.c
index da0c6c70ccb..e5519870c3d 100644
--- a/arch/sparc/kernel/module.c
+++ b/arch/sparc/kernel/module.c
@@ -17,6 +17,8 @@
17#include <asm/processor.h> 17#include <asm/processor.h>
18#include <asm/spitfire.h> 18#include <asm/spitfire.h>
19 19
20#include "entry.h"
21
20#ifdef CONFIG_SPARC64 22#ifdef CONFIG_SPARC64
21 23
22#include <linux/jump_label.h> 24#include <linux/jump_label.h>
@@ -203,6 +205,29 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
203} 205}
204 206
205#ifdef CONFIG_SPARC64 207#ifdef CONFIG_SPARC64
208static void do_patch_sections(const Elf_Ehdr *hdr,
209 const Elf_Shdr *sechdrs)
210{
211 const Elf_Shdr *s, *sun4v_1insn = NULL, *sun4v_2insn = NULL;
212 char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
213
214 for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
215 if (!strcmp(".sun4v_1insn_patch", secstrings + s->sh_name))
216 sun4v_1insn = s;
217 if (!strcmp(".sun4v_2insn_patch", secstrings + s->sh_name))
218 sun4v_2insn = s;
219 }
220
221 if (sun4v_1insn && tlb_type == hypervisor) {
222 void *p = (void *) sun4v_1insn->sh_addr;
223 sun4v_patch_1insn_range(p, p + sun4v_1insn->sh_size);
224 }
225 if (sun4v_2insn && tlb_type == hypervisor) {
226 void *p = (void *) sun4v_2insn->sh_addr;
227 sun4v_patch_2insn_range(p, p + sun4v_2insn->sh_size);
228 }
229}
230
206int module_finalize(const Elf_Ehdr *hdr, 231int module_finalize(const Elf_Ehdr *hdr,
207 const Elf_Shdr *sechdrs, 232 const Elf_Shdr *sechdrs,
208 struct module *me) 233 struct module *me)
@@ -210,6 +235,8 @@ int module_finalize(const Elf_Ehdr *hdr,
210 /* make jump label nops */ 235 /* make jump label nops */
211 jump_label_apply_nops(me); 236 jump_label_apply_nops(me);
212 237
238 do_patch_sections(hdr, sechdrs);
239
213 /* Cheetah's I-cache is fully coherent. */ 240 /* Cheetah's I-cache is fully coherent. */
214 if (tlb_type == spitfire) { 241 if (tlb_type == spitfire) {
215 unsigned long va; 242 unsigned long va;
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
index c965595aa7e..a854a1c240f 100644
--- a/arch/sparc/kernel/setup_64.c
+++ b/arch/sparc/kernel/setup_64.c
@@ -234,40 +234,50 @@ void __init per_cpu_patch(void)
234 } 234 }
235} 235}
236 236
237void __init sun4v_patch(void) 237void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *start,
238 struct sun4v_1insn_patch_entry *end)
238{ 239{
239 extern void sun4v_hvapi_init(void); 240 while (start < end) {
240 struct sun4v_1insn_patch_entry *p1; 241 unsigned long addr = start->addr;
241 struct sun4v_2insn_patch_entry *p2;
242
243 if (tlb_type != hypervisor)
244 return;
245 242
246 p1 = &__sun4v_1insn_patch; 243 *(unsigned int *) (addr + 0) = start->insn;
247 while (p1 < &__sun4v_1insn_patch_end) {
248 unsigned long addr = p1->addr;
249
250 *(unsigned int *) (addr + 0) = p1->insn;
251 wmb(); 244 wmb();
252 __asm__ __volatile__("flush %0" : : "r" (addr + 0)); 245 __asm__ __volatile__("flush %0" : : "r" (addr + 0));
253 246
254 p1++; 247 start++;
255 } 248 }
249}
256 250
257 p2 = &__sun4v_2insn_patch; 251void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *start,
258 while (p2 < &__sun4v_2insn_patch_end) { 252 struct sun4v_2insn_patch_entry *end)
259 unsigned long addr = p2->addr; 253{
254 while (start < end) {
255 unsigned long addr = start->addr;
260 256
261 *(unsigned int *) (addr + 0) = p2->insns[0]; 257 *(unsigned int *) (addr + 0) = start->insns[0];
262 wmb(); 258 wmb();
263 __asm__ __volatile__("flush %0" : : "r" (addr + 0)); 259 __asm__ __volatile__("flush %0" : : "r" (addr + 0));
264 260
265 *(unsigned int *) (addr + 4) = p2->insns[1]; 261 *(unsigned int *) (addr + 4) = start->insns[1];
266 wmb(); 262 wmb();
267 __asm__ __volatile__("flush %0" : : "r" (addr + 4)); 263 __asm__ __volatile__("flush %0" : : "r" (addr + 4));
268 264
269 p2++; 265 start++;
270 } 266 }
267}
268
269void __init sun4v_patch(void)
270{
271 extern void sun4v_hvapi_init(void);
272
273 if (tlb_type != hypervisor)
274 return;
275
276 sun4v_patch_1insn_range(&__sun4v_1insn_patch,
277 &__sun4v_1insn_patch_end);
278
279 sun4v_patch_2insn_range(&__sun4v_2insn_patch,
280 &__sun4v_2insn_patch_end);
271 281
272 sun4v_hvapi_init(); 282 sun4v_hvapi_init();
273} 283}
diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c
index 2caa556db86..023b8860dc9 100644
--- a/arch/sparc/kernel/signal32.c
+++ b/arch/sparc/kernel/signal32.c
@@ -822,21 +822,23 @@ static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs
822 * want to handle. Thus you cannot kill init even with a SIGKILL even by 822 * want to handle. Thus you cannot kill init even with a SIGKILL even by
823 * mistake. 823 * mistake.
824 */ 824 */
825void do_signal32(sigset_t *oldset, struct pt_regs * regs, 825void do_signal32(sigset_t *oldset, struct pt_regs * regs)
826 int restart_syscall, unsigned long orig_i0)
827{ 826{
828 struct k_sigaction ka; 827 struct k_sigaction ka;
828 unsigned long orig_i0;
829 int restart_syscall;
829 siginfo_t info; 830 siginfo_t info;
830 int signr; 831 int signr;
831 832
832 signr = get_signal_to_deliver(&info, &ka, regs, NULL); 833 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
833 834
834 /* If the debugger messes with the program counter, it clears 835 restart_syscall = 0;
835 * the "in syscall" bit, directing us to not perform a syscall 836 orig_i0 = 0;
836 * restart. 837 if (pt_regs_is_syscall(regs) &&
837 */ 838 (regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY))) {
838 if (restart_syscall && !pt_regs_is_syscall(regs)) 839 restart_syscall = 1;
839 restart_syscall = 0; 840 orig_i0 = regs->u_regs[UREG_G6];
841 }
840 842
841 if (signr > 0) { 843 if (signr > 0) {
842 if (restart_syscall) 844 if (restart_syscall)
diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c
index 8ce247ac04c..d54c6e53aba 100644
--- a/arch/sparc/kernel/signal_32.c
+++ b/arch/sparc/kernel/signal_32.c
@@ -519,10 +519,26 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
519 siginfo_t info; 519 siginfo_t info;
520 int signr; 520 int signr;
521 521
522 /* It's a lot of work and synchronization to add a new ptrace
523 * register for GDB to save and restore in order to get
524 * orig_i0 correct for syscall restarts when debugging.
525 *
526 * Although it should be the case that most of the global
527 * registers are volatile across a system call, glibc already
528 * depends upon that fact that we preserve them. So we can't
529 * just use any global register to save away the orig_i0 value.
530 *
531 * In particular %g2, %g3, %g4, and %g5 are all assumed to be
532 * preserved across a system call trap by various pieces of
533 * code in glibc.
534 *
535 * %g7 is used as the "thread register". %g6 is not used in
536 * any fixed manner. %g6 is used as a scratch register and
537 * a compiler temporary, but it's value is never used across
538 * a system call. Therefore %g6 is usable for orig_i0 storage.
539 */
522 if (pt_regs_is_syscall(regs) && (regs->psr & PSR_C)) 540 if (pt_regs_is_syscall(regs) && (regs->psr & PSR_C))
523 restart_syscall = 1; 541 regs->u_regs[UREG_G6] = orig_i0;
524 else
525 restart_syscall = 0;
526 542
527 if (test_thread_flag(TIF_RESTORE_SIGMASK)) 543 if (test_thread_flag(TIF_RESTORE_SIGMASK))
528 oldset = &current->saved_sigmask; 544 oldset = &current->saved_sigmask;
@@ -535,8 +551,12 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
535 * the software "in syscall" bit, directing us to not perform 551 * the software "in syscall" bit, directing us to not perform
536 * a syscall restart. 552 * a syscall restart.
537 */ 553 */
538 if (restart_syscall && !pt_regs_is_syscall(regs)) 554 restart_syscall = 0;
539 restart_syscall = 0; 555 if (pt_regs_is_syscall(regs) && (regs->psr & PSR_C)) {
556 restart_syscall = 1;
557 orig_i0 = regs->u_regs[UREG_G6];
558 }
559
540 560
541 if (signr > 0) { 561 if (signr > 0) {
542 if (restart_syscall) 562 if (restart_syscall)
diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
index a2b81598d90..f0836cd0e2f 100644
--- a/arch/sparc/kernel/signal_64.c
+++ b/arch/sparc/kernel/signal_64.c
@@ -529,11 +529,27 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
529 siginfo_t info; 529 siginfo_t info;
530 int signr; 530 int signr;
531 531
532 /* It's a lot of work and synchronization to add a new ptrace
533 * register for GDB to save and restore in order to get
534 * orig_i0 correct for syscall restarts when debugging.
535 *
536 * Although it should be the case that most of the global
537 * registers are volatile across a system call, glibc already
538 * depends upon that fact that we preserve them. So we can't
539 * just use any global register to save away the orig_i0 value.
540 *
541 * In particular %g2, %g3, %g4, and %g5 are all assumed to be
542 * preserved across a system call trap by various pieces of
543 * code in glibc.
544 *
545 * %g7 is used as the "thread register". %g6 is not used in
546 * any fixed manner. %g6 is used as a scratch register and
547 * a compiler temporary, but it's value is never used across
548 * a system call. Therefore %g6 is usable for orig_i0 storage.
549 */
532 if (pt_regs_is_syscall(regs) && 550 if (pt_regs_is_syscall(regs) &&
533 (regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY))) { 551 (regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY)))
534 restart_syscall = 1; 552 regs->u_regs[UREG_G6] = orig_i0;
535 } else
536 restart_syscall = 0;
537 553
538 if (current_thread_info()->status & TS_RESTORE_SIGMASK) 554 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
539 oldset = &current->saved_sigmask; 555 oldset = &current->saved_sigmask;
@@ -542,22 +558,20 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
542 558
543#ifdef CONFIG_COMPAT 559#ifdef CONFIG_COMPAT
544 if (test_thread_flag(TIF_32BIT)) { 560 if (test_thread_flag(TIF_32BIT)) {
545 extern void do_signal32(sigset_t *, struct pt_regs *, 561 extern void do_signal32(sigset_t *, struct pt_regs *);
546 int restart_syscall, 562 do_signal32(oldset, regs);
547 unsigned long orig_i0);
548 do_signal32(oldset, regs, restart_syscall, orig_i0);
549 return; 563 return;
550 } 564 }
551#endif 565#endif
552 566
553 signr = get_signal_to_deliver(&info, &ka, regs, NULL); 567 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
554 568
555 /* If the debugger messes with the program counter, it clears 569 restart_syscall = 0;
556 * the software "in syscall" bit, directing us to not perform 570 if (pt_regs_is_syscall(regs) &&
557 * a syscall restart. 571 (regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY))) {
558 */ 572 restart_syscall = 1;
559 if (restart_syscall && !pt_regs_is_syscall(regs)) 573 orig_i0 = regs->u_regs[UREG_G6];
560 restart_syscall = 0; 574 }
561 575
562 if (signr > 0) { 576 if (signr > 0) {
563 if (restart_syscall) 577 if (restart_syscall)
diff --git a/arch/sparc/kernel/sigutil_64.c b/arch/sparc/kernel/sigutil_64.c
index e7dc508c38e..b19570d41a3 100644
--- a/arch/sparc/kernel/sigutil_64.c
+++ b/arch/sparc/kernel/sigutil_64.c
@@ -2,6 +2,7 @@
2#include <linux/types.h> 2#include <linux/types.h>
3#include <linux/thread_info.h> 3#include <linux/thread_info.h>
4#include <linux/uaccess.h> 4#include <linux/uaccess.h>
5#include <linux/errno.h>
5 6
6#include <asm/sigcontext.h> 7#include <asm/sigcontext.h>
7#include <asm/fpumacro.h> 8#include <asm/fpumacro.h>
diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
index e3cda21b5ee..301421c1129 100644
--- a/arch/sparc/mm/Makefile
+++ b/arch/sparc/mm/Makefile
@@ -8,7 +8,6 @@ obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
8obj-y += fault_$(BITS).o 8obj-y += fault_$(BITS).o
9obj-y += init_$(BITS).o 9obj-y += init_$(BITS).o
10obj-$(CONFIG_SPARC32) += loadmmu.o 10obj-$(CONFIG_SPARC32) += loadmmu.o
11obj-y += generic_$(BITS).o
12obj-$(CONFIG_SPARC32) += extable.o btfixup.o srmmu.o iommu.o io-unit.o 11obj-$(CONFIG_SPARC32) += extable.o btfixup.o srmmu.o iommu.o io-unit.o
13obj-$(CONFIG_SPARC32) += hypersparc.o viking.o tsunami.o swift.o 12obj-$(CONFIG_SPARC32) += hypersparc.o viking.o tsunami.o swift.o
14obj-$(CONFIG_SPARC_LEON)+= leon_mm.o 13obj-$(CONFIG_SPARC_LEON)+= leon_mm.o
diff --git a/arch/sparc/mm/generic_32.c b/arch/sparc/mm/generic_32.c
deleted file mode 100644
index 6ca39a60a19..00000000000
--- a/arch/sparc/mm/generic_32.c
+++ /dev/null
@@ -1,99 +0,0 @@
1/*
2 * generic.c: Generic Sparc mm routines that are not dependent upon
3 * MMU type but are Sparc specific.
4 *
5 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
6 */
7
8#include <linux/kernel.h>
9#include <linux/mm.h>
10#include <linux/swap.h>
11#include <linux/pagemap.h>
12#include <linux/export.h>
13
14#include <asm/pgalloc.h>
15#include <asm/pgtable.h>
16#include <asm/page.h>
17#include <asm/cacheflush.h>
18#include <asm/tlbflush.h>
19
20/* Remap IO memory, the same way as remap_pfn_range(), but use
21 * the obio memory space.
22 *
23 * They use a pgprot that sets PAGE_IO and does not check the
24 * mem_map table as this is independent of normal memory.
25 */
26static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte, unsigned long address, unsigned long size,
27 unsigned long offset, pgprot_t prot, int space)
28{
29 unsigned long end;
30
31 address &= ~PMD_MASK;
32 end = address + size;
33 if (end > PMD_SIZE)
34 end = PMD_SIZE;
35 do {
36 set_pte_at(mm, address, pte, mk_pte_io(offset, prot, space));
37 address += PAGE_SIZE;
38 offset += PAGE_SIZE;
39 pte++;
40 } while (address < end);
41}
42
43static inline int io_remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size,
44 unsigned long offset, pgprot_t prot, int space)
45{
46 unsigned long end;
47
48 address &= ~PGDIR_MASK;
49 end = address + size;
50 if (end > PGDIR_SIZE)
51 end = PGDIR_SIZE;
52 offset -= address;
53 do {
54 pte_t *pte = pte_alloc_map(mm, NULL, pmd, address);
55 if (!pte)
56 return -ENOMEM;
57 io_remap_pte_range(mm, pte, address, end - address, address + offset, prot, space);
58 address = (address + PMD_SIZE) & PMD_MASK;
59 pmd++;
60 } while (address < end);
61 return 0;
62}
63
64int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
65 unsigned long pfn, unsigned long size, pgprot_t prot)
66{
67 int error = 0;
68 pgd_t * dir;
69 unsigned long beg = from;
70 unsigned long end = from + size;
71 struct mm_struct *mm = vma->vm_mm;
72 int space = GET_IOSPACE(pfn);
73 unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
74
75 /* See comment in mm/memory.c remap_pfn_range */
76 vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
77 vma->vm_pgoff = (offset >> PAGE_SHIFT) |
78 ((unsigned long)space << 28UL);
79
80 offset -= from;
81 dir = pgd_offset(mm, from);
82 flush_cache_range(vma, beg, end);
83
84 while (from < end) {
85 pmd_t *pmd = pmd_alloc(mm, dir, from);
86 error = -ENOMEM;
87 if (!pmd)
88 break;
89 error = io_remap_pmd_range(mm, pmd, from, end - from, offset + from, prot, space);
90 if (error)
91 break;
92 from = (from + PGDIR_SIZE) & PGDIR_MASK;
93 dir++;
94 }
95
96 flush_tlb_range(vma, beg, end);
97 return error;
98}
99EXPORT_SYMBOL(io_remap_pfn_range);
diff --git a/arch/sparc/mm/generic_64.c b/arch/sparc/mm/generic_64.c
deleted file mode 100644
index 9b357ddae39..00000000000
--- a/arch/sparc/mm/generic_64.c
+++ /dev/null
@@ -1,165 +0,0 @@
1/*
2 * generic.c: Generic Sparc mm routines that are not dependent upon
3 * MMU type but are Sparc specific.
4 *
5 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
6 */
7
8#include <linux/kernel.h>
9#include <linux/mm.h>
10#include <linux/swap.h>
11#include <linux/export.h>
12#include <linux/pagemap.h>
13
14#include <asm/pgalloc.h>
15#include <asm/pgtable.h>
16#include <asm/page.h>
17#include <asm/tlbflush.h>
18
19/* Remap IO memory, the same way as remap_pfn_range(), but use
20 * the obio memory space.
21 *
22 * They use a pgprot that sets PAGE_IO and does not check the
23 * mem_map table as this is independent of normal memory.
24 */
25static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte,
26 unsigned long address,
27 unsigned long size,
28 unsigned long offset, pgprot_t prot,
29 int space)
30{
31 unsigned long end;
32
33 /* clear hack bit that was used as a write_combine side-effect flag */
34 offset &= ~0x1UL;
35 address &= ~PMD_MASK;
36 end = address + size;
37 if (end > PMD_SIZE)
38 end = PMD_SIZE;
39 do {
40 pte_t entry;
41 unsigned long curend = address + PAGE_SIZE;
42
43 entry = mk_pte_io(offset, prot, space, PAGE_SIZE);
44 if (!(address & 0xffff)) {
45 if (PAGE_SIZE < (4 * 1024 * 1024) &&
46 !(address & 0x3fffff) &&
47 !(offset & 0x3ffffe) &&
48 end >= address + 0x400000) {
49 entry = mk_pte_io(offset, prot, space,
50 4 * 1024 * 1024);
51 curend = address + 0x400000;
52 offset += 0x400000;
53 } else if (PAGE_SIZE < (512 * 1024) &&
54 !(address & 0x7ffff) &&
55 !(offset & 0x7fffe) &&
56 end >= address + 0x80000) {
57 entry = mk_pte_io(offset, prot, space,
58 512 * 1024 * 1024);
59 curend = address + 0x80000;
60 offset += 0x80000;
61 } else if (PAGE_SIZE < (64 * 1024) &&
62 !(offset & 0xfffe) &&
63 end >= address + 0x10000) {
64 entry = mk_pte_io(offset, prot, space,
65 64 * 1024);
66 curend = address + 0x10000;
67 offset += 0x10000;
68 } else
69 offset += PAGE_SIZE;
70 } else
71 offset += PAGE_SIZE;
72
73 if (pte_write(entry))
74 entry = pte_mkdirty(entry);
75 do {
76 BUG_ON(!pte_none(*pte));
77 set_pte_at(mm, address, pte, entry);
78 address += PAGE_SIZE;
79 pte_val(entry) += PAGE_SIZE;
80 pte++;
81 } while (address < curend);
82 } while (address < end);
83}
84
85static inline int io_remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size,
86 unsigned long offset, pgprot_t prot, int space)
87{
88 unsigned long end;
89
90 address &= ~PGDIR_MASK;
91 end = address + size;
92 if (end > PGDIR_SIZE)
93 end = PGDIR_SIZE;
94 offset -= address;
95 do {
96 pte_t *pte = pte_alloc_map(mm, NULL, pmd, address);
97 if (!pte)
98 return -ENOMEM;
99 io_remap_pte_range(mm, pte, address, end - address, address + offset, prot, space);
100 pte_unmap(pte);
101 address = (address + PMD_SIZE) & PMD_MASK;
102 pmd++;
103 } while (address < end);
104 return 0;
105}
106
107static inline int io_remap_pud_range(struct mm_struct *mm, pud_t * pud, unsigned long address, unsigned long size,
108 unsigned long offset, pgprot_t prot, int space)
109{
110 unsigned long end;
111
112 address &= ~PUD_MASK;
113 end = address + size;
114 if (end > PUD_SIZE)
115 end = PUD_SIZE;
116 offset -= address;
117 do {
118 pmd_t *pmd = pmd_alloc(mm, pud, address);
119 if (!pud)
120 return -ENOMEM;
121 io_remap_pmd_range(mm, pmd, address, end - address, address + offset, prot, space);
122 address = (address + PUD_SIZE) & PUD_MASK;
123 pud++;
124 } while (address < end);
125 return 0;
126}
127
128int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
129 unsigned long pfn, unsigned long size, pgprot_t prot)
130{
131 int error = 0;
132 pgd_t * dir;
133 unsigned long beg = from;
134 unsigned long end = from + size;
135 struct mm_struct *mm = vma->vm_mm;
136 int space = GET_IOSPACE(pfn);
137 unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
138 unsigned long phys_base;
139
140 phys_base = offset | (((unsigned long) space) << 32UL);
141
142 /* See comment in mm/memory.c remap_pfn_range */
143 vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
144 vma->vm_pgoff = phys_base >> PAGE_SHIFT;
145
146 offset -= from;
147 dir = pgd_offset(mm, from);
148 flush_cache_range(vma, beg, end);
149
150 while (from < end) {
151 pud_t *pud = pud_alloc(mm, dir, from);
152 error = -ENOMEM;
153 if (!pud)
154 break;
155 error = io_remap_pud_range(mm, pud, from, end - from, offset + from, prot, space);
156 if (error)
157 break;
158 from = (from + PGDIR_SIZE) & PGDIR_MASK;
159 dir++;
160 }
161
162 flush_tlb_range(vma, beg, end);
163 return error;
164}
165EXPORT_SYMBOL(io_remap_pfn_range);
diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig
index e57dcce9bfd..942ed6174f1 100644
--- a/arch/unicore32/Kconfig
+++ b/arch/unicore32/Kconfig
@@ -237,13 +237,13 @@ menu "PKUnity NetBook-0916 Features"
237 237
238config I2C_BATTERY_BQ27200 238config I2C_BATTERY_BQ27200
239 tristate "I2C Battery BQ27200 Support" 239 tristate "I2C Battery BQ27200 Support"
240 select PUV3_I2C 240 select I2C_PUV3
241 select POWER_SUPPLY 241 select POWER_SUPPLY
242 select BATTERY_BQ27x00 242 select BATTERY_BQ27x00
243 243
244config I2C_EEPROM_AT24 244config I2C_EEPROM_AT24
245 tristate "I2C EEPROMs AT24 support" 245 tristate "I2C EEPROMs AT24 support"
246 select PUV3_I2C 246 select I2C_PUV3
247 select MISC_DEVICES 247 select MISC_DEVICES
248 select EEPROM_AT24 248 select EEPROM_AT24
249 249
diff --git a/arch/unicore32/Kconfig.debug b/arch/unicore32/Kconfig.debug
index ae2ec334c3c..1a362623984 100644
--- a/arch/unicore32/Kconfig.debug
+++ b/arch/unicore32/Kconfig.debug
@@ -44,18 +44,4 @@ config DEBUG_OCD
44 Say Y here if you want the debug print routines to direct their 44 Say Y here if you want the debug print routines to direct their
45 output to the UniCore On-Chip-Debugger channel using CP #1. 45 output to the UniCore On-Chip-Debugger channel using CP #1.
46 46
47config DEBUG_OCD_BREAKPOINT
48 bool "Breakpoint support via On-Chip-Debugger"
49 depends on DEBUG_OCD
50
51config DEBUG_UART
52 int "Kernel low-level debugging messages via serial port"
53 depends on DEBUG_LL
54 range 0 1
55 default "0"
56 help
57 Choice for UART for kernel low-level using PKUnity UARTS,
58 should be between zero and one. The port must have been
59 initialised by the boot-loader before use.
60
61endmenu 47endmenu
diff --git a/arch/unicore32/boot/compressed/Makefile b/arch/unicore32/boot/compressed/Makefile
index b0954a2d23c..950a9afa38f 100644
--- a/arch/unicore32/boot/compressed/Makefile
+++ b/arch/unicore32/boot/compressed/Makefile
@@ -10,8 +10,8 @@
10# Copyright (C) 2001~2010 GUAN Xue-tao 10# Copyright (C) 2001~2010 GUAN Xue-tao
11# 11#
12 12
13EXTRA_CFLAGS := -fpic -fno-builtin 13ccflags-y := -fpic -fno-builtin
14EXTRA_AFLAGS := -Wa,-march=all 14asflags-y := -Wa,-march=all
15 15
16OBJS := misc.o 16OBJS := misc.o
17 17
diff --git a/arch/unicore32/include/asm/bitops.h b/arch/unicore32/include/asm/bitops.h
index 1628a632899..401f597bc38 100644
--- a/arch/unicore32/include/asm/bitops.h
+++ b/arch/unicore32/include/asm/bitops.h
@@ -13,12 +13,6 @@
13#ifndef __UNICORE_BITOPS_H__ 13#ifndef __UNICORE_BITOPS_H__
14#define __UNICORE_BITOPS_H__ 14#define __UNICORE_BITOPS_H__
15 15
16#define find_next_bit __uc32_find_next_bit
17#define find_next_zero_bit __uc32_find_next_zero_bit
18
19#define find_first_bit __uc32_find_first_bit
20#define find_first_zero_bit __uc32_find_first_zero_bit
21
22#define _ASM_GENERIC_BITOPS_FLS_H_ 16#define _ASM_GENERIC_BITOPS_FLS_H_
23#define _ASM_GENERIC_BITOPS___FLS_H_ 17#define _ASM_GENERIC_BITOPS___FLS_H_
24#define _ASM_GENERIC_BITOPS_FFS_H_ 18#define _ASM_GENERIC_BITOPS_FFS_H_
@@ -44,4 +38,10 @@ static inline int fls(int x)
44 38
45#include <asm-generic/bitops.h> 39#include <asm-generic/bitops.h>
46 40
41/* following definitions: to avoid using codes in lib/find_*.c */
42#define find_next_bit find_next_bit
43#define find_next_zero_bit find_next_zero_bit
44#define find_first_bit find_first_bit
45#define find_first_zero_bit find_first_zero_bit
46
47#endif /* __UNICORE_BITOPS_H__ */ 47#endif /* __UNICORE_BITOPS_H__ */
diff --git a/arch/unicore32/include/asm/processor.h b/arch/unicore32/include/asm/processor.h
index e11cb078657..f0d780a51f9 100644
--- a/arch/unicore32/include/asm/processor.h
+++ b/arch/unicore32/include/asm/processor.h
@@ -53,7 +53,6 @@ struct thread_struct {
53#define start_thread(regs, pc, sp) \ 53#define start_thread(regs, pc, sp) \
54({ \ 54({ \
55 unsigned long *stack = (unsigned long *)sp; \ 55 unsigned long *stack = (unsigned long *)sp; \
56 set_fs(USER_DS); \
57 memset(regs->uregs, 0, sizeof(regs->uregs)); \ 56 memset(regs->uregs, 0, sizeof(regs->uregs)); \
58 regs->UCreg_asr = USER_MODE; \ 57 regs->UCreg_asr = USER_MODE; \
59 regs->UCreg_pc = pc & ~1; /* pc */ \ 58 regs->UCreg_pc = pc & ~1; /* pc */ \
diff --git a/arch/unicore32/kernel/ksyms.c b/arch/unicore32/kernel/ksyms.c
index a8970809428..d98bd812cae 100644
--- a/arch/unicore32/kernel/ksyms.c
+++ b/arch/unicore32/kernel/ksyms.c
@@ -24,8 +24,8 @@
24 24
25#include "ksyms.h" 25#include "ksyms.h"
26 26
27EXPORT_SYMBOL(__uc32_find_next_zero_bit); 27EXPORT_SYMBOL(find_next_zero_bit);
28EXPORT_SYMBOL(__uc32_find_next_bit); 28EXPORT_SYMBOL(find_next_bit);
29 29
30EXPORT_SYMBOL(__backtrace); 30EXPORT_SYMBOL(__backtrace);
31 31
diff --git a/arch/unicore32/lib/findbit.S b/arch/unicore32/lib/findbit.S
index c360ce905d8..c77746247d3 100644
--- a/arch/unicore32/lib/findbit.S
+++ b/arch/unicore32/lib/findbit.S
@@ -17,7 +17,7 @@
17 * Purpose : Find a 'zero' bit 17 * Purpose : Find a 'zero' bit
18 * Prototype: int find_first_zero_bit(void *addr, unsigned int maxbit); 18 * Prototype: int find_first_zero_bit(void *addr, unsigned int maxbit);
19 */ 19 */
20__uc32_find_first_zero_bit: 20ENTRY(find_first_zero_bit)
21 cxor.a r1, #0 21 cxor.a r1, #0
22 beq 3f 22 beq 3f
23 mov r2, #0 23 mov r2, #0
@@ -29,13 +29,14 @@ __uc32_find_first_zero_bit:
29 bub 1b 29 bub 1b
303: mov r0, r1 @ no free bits 303: mov r0, r1 @ no free bits
31 mov pc, lr 31 mov pc, lr
32ENDPROC(find_first_zero_bit)
32 33
33/* 34/*
34 * Purpose : Find next 'zero' bit 35 * Purpose : Find next 'zero' bit
35 * Prototype: int find_next_zero_bit 36 * Prototype: int find_next_zero_bit
36 * (void *addr, unsigned int maxbit, int offset) 37 * (void *addr, unsigned int maxbit, int offset)
37 */ 38 */
38ENTRY(__uc32_find_next_zero_bit) 39ENTRY(find_next_zero_bit)
39 cxor.a r1, #0 40 cxor.a r1, #0
40 beq 3b 41 beq 3b
41 and.a ip, r2, #7 42 and.a ip, r2, #7
@@ -47,14 +48,14 @@ ENTRY(__uc32_find_next_zero_bit)
47 or r2, r2, #7 @ if zero, then no bits here 48 or r2, r2, #7 @ if zero, then no bits here
48 add r2, r2, #1 @ align bit pointer 49 add r2, r2, #1 @ align bit pointer
49 b 2b @ loop for next bit 50 b 2b @ loop for next bit
50ENDPROC(__uc32_find_next_zero_bit) 51ENDPROC(find_next_zero_bit)
51 52
52/* 53/*
53 * Purpose : Find a 'one' bit 54 * Purpose : Find a 'one' bit
54 * Prototype: int find_first_bit 55 * Prototype: int find_first_bit
55 * (const unsigned long *addr, unsigned int maxbit); 56 * (const unsigned long *addr, unsigned int maxbit);
56 */ 57 */
57__uc32_find_first_bit: 58ENTRY(find_first_bit)
58 cxor.a r1, #0 59 cxor.a r1, #0
59 beq 3f 60 beq 3f
60 mov r2, #0 61 mov r2, #0
@@ -66,13 +67,14 @@ __uc32_find_first_bit:
66 bub 1b 67 bub 1b
673: mov r0, r1 @ no free bits 683: mov r0, r1 @ no free bits
68 mov pc, lr 69 mov pc, lr
70ENDPROC(find_first_bit)
69 71
70/* 72/*
71 * Purpose : Find next 'one' bit 73 * Purpose : Find next 'one' bit
72 * Prototype: int find_next_zero_bit 74 * Prototype: int find_next_zero_bit
73 * (void *addr, unsigned int maxbit, int offset) 75 * (void *addr, unsigned int maxbit, int offset)
74 */ 76 */
75ENTRY(__uc32_find_next_bit) 77ENTRY(find_next_bit)
76 cxor.a r1, #0 78 cxor.a r1, #0
77 beq 3b 79 beq 3b
78 and.a ip, r2, #7 80 and.a ip, r2, #7
@@ -83,7 +85,7 @@ ENTRY(__uc32_find_next_bit)
83 or r2, r2, #7 @ if zero, then no bits here 85 or r2, r2, #7 @ if zero, then no bits here
84 add r2, r2, #1 @ align bit pointer 86 add r2, r2, #1 @ align bit pointer
85 b 2b @ loop for next bit 87 b 2b @ loop for next bit
86ENDPROC(__uc32_find_next_bit) 88ENDPROC(find_next_bit)
87 89
88/* 90/*
89 * One or more bits in the LSB of r3 are assumed to be set. 91 * One or more bits in the LSB of r3 are assumed to be set.
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 9b7273cb219..1a6c09af048 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -49,6 +49,7 @@ extern unsigned int apic_verbosity;
49extern int local_apic_timer_c2_ok; 49extern int local_apic_timer_c2_ok;
50 50
51extern int disable_apic; 51extern int disable_apic;
52extern unsigned int lapic_timer_frequency;
52 53
53#ifdef CONFIG_SMP 54#ifdef CONFIG_SMP
54extern void __inquire_remote_apic(int apicid); 55extern void __inquire_remote_apic(int apicid);
diff --git a/arch/x86/include/asm/mach_traps.h b/arch/x86/include/asm/mach_traps.h
index 72a8b52e7df..a01e7ec7d23 100644
--- a/arch/x86/include/asm/mach_traps.h
+++ b/arch/x86/include/asm/mach_traps.h
@@ -17,7 +17,7 @@
17#define NMI_REASON_CLEAR_IOCHK 0x08 17#define NMI_REASON_CLEAR_IOCHK 0x08
18#define NMI_REASON_CLEAR_MASK 0x0f 18#define NMI_REASON_CLEAR_MASK 0x0f
19 19
20static inline unsigned char get_nmi_reason(void) 20static inline unsigned char default_get_nmi_reason(void)
21{ 21{
22 return inb(NMI_REASON_PORT); 22 return inb(NMI_REASON_PORT);
23} 23}
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index c9321f34e55..0e8ae57d365 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -201,7 +201,10 @@ int mce_notify_irq(void);
201void mce_notify_process(void); 201void mce_notify_process(void);
202 202
203DECLARE_PER_CPU(struct mce, injectm); 203DECLARE_PER_CPU(struct mce, injectm);
204extern struct file_operations mce_chrdev_ops; 204
205extern void register_mce_write_callback(ssize_t (*)(struct file *filp,
206 const char __user *ubuf,
207 size_t usize, loff_t *off));
205 208
206/* 209/*
207 * Exception handler 210 * Exception handler
diff --git a/arch/x86/include/asm/mrst.h b/arch/x86/include/asm/mrst.h
index 719f00b28ff..e6283129c82 100644
--- a/arch/x86/include/asm/mrst.h
+++ b/arch/x86/include/asm/mrst.h
@@ -44,6 +44,13 @@ enum mrst_timer_options {
44 44
45extern enum mrst_timer_options mrst_timer_options; 45extern enum mrst_timer_options mrst_timer_options;
46 46
47/*
48 * Penwell uses spread spectrum clock, so the freq number is not exactly
49 * the same as reported by MSR based on SDM.
50 */
51#define PENWELL_FSB_FREQ_83SKU 83200
52#define PENWELL_FSB_FREQ_100SKU 99840
53
47#define SFI_MTMR_MAX_NUM 8 54#define SFI_MTMR_MAX_NUM 8
48#define SFI_MRTC_MAX 8 55#define SFI_MRTC_MAX 8
49 56
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index d3d859035af..1971e652d24 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -152,6 +152,7 @@ struct x86_cpuinit_ops {
152/** 152/**
153 * struct x86_platform_ops - platform specific runtime functions 153 * struct x86_platform_ops - platform specific runtime functions
154 * @calibrate_tsc: calibrate TSC 154 * @calibrate_tsc: calibrate TSC
155 * @wallclock_init: init the wallclock device
155 * @get_wallclock: get time from HW clock like RTC etc. 156 * @get_wallclock: get time from HW clock like RTC etc.
156 * @set_wallclock: set time back to HW clock 157 * @set_wallclock: set time back to HW clock
157 * @is_untracked_pat_range exclude from PAT logic 158 * @is_untracked_pat_range exclude from PAT logic
@@ -160,11 +161,13 @@ struct x86_cpuinit_ops {
160 */ 161 */
161struct x86_platform_ops { 162struct x86_platform_ops {
162 unsigned long (*calibrate_tsc)(void); 163 unsigned long (*calibrate_tsc)(void);
164 void (*wallclock_init)(void);
163 unsigned long (*get_wallclock)(void); 165 unsigned long (*get_wallclock)(void);
164 int (*set_wallclock)(unsigned long nowtime); 166 int (*set_wallclock)(unsigned long nowtime);
165 void (*iommu_shutdown)(void); 167 void (*iommu_shutdown)(void);
166 bool (*is_untracked_pat_range)(u64 start, u64 end); 168 bool (*is_untracked_pat_range)(u64 start, u64 end);
167 void (*nmi_init)(void); 169 void (*nmi_init)(void);
170 unsigned char (*get_nmi_reason)(void);
168 int (*i8042_detect)(void); 171 int (*i8042_detect)(void);
169}; 172};
170 173
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index c6382281624..1f84794f075 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -738,5 +738,5 @@ void __kprobes text_poke_smp_batch(struct text_poke_param *params, int n)
738 738
739 atomic_set(&stop_machine_first, 1); 739 atomic_set(&stop_machine_first, 1);
740 wrote_text = 0; 740 wrote_text = 0;
741 __stop_machine(stop_machine_text_poke, (void *)&tpp, NULL); 741 __stop_machine(stop_machine_text_poke, (void *)&tpp, cpu_online_mask);
742} 742}
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index a2fd72e0ab3..f98d84caf94 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -186,7 +186,7 @@ static struct resource lapic_resource = {
186 .flags = IORESOURCE_MEM | IORESOURCE_BUSY, 186 .flags = IORESOURCE_MEM | IORESOURCE_BUSY,
187}; 187};
188 188
189static unsigned int calibration_result; 189unsigned int lapic_timer_frequency = 0;
190 190
191static void apic_pm_activate(void); 191static void apic_pm_activate(void);
192 192
@@ -454,7 +454,7 @@ static void lapic_timer_setup(enum clock_event_mode mode,
454 switch (mode) { 454 switch (mode) {
455 case CLOCK_EVT_MODE_PERIODIC: 455 case CLOCK_EVT_MODE_PERIODIC:
456 case CLOCK_EVT_MODE_ONESHOT: 456 case CLOCK_EVT_MODE_ONESHOT:
457 __setup_APIC_LVTT(calibration_result, 457 __setup_APIC_LVTT(lapic_timer_frequency,
458 mode != CLOCK_EVT_MODE_PERIODIC, 1); 458 mode != CLOCK_EVT_MODE_PERIODIC, 1);
459 break; 459 break;
460 case CLOCK_EVT_MODE_UNUSED: 460 case CLOCK_EVT_MODE_UNUSED:
@@ -638,6 +638,25 @@ static int __init calibrate_APIC_clock(void)
638 long delta, deltatsc; 638 long delta, deltatsc;
639 int pm_referenced = 0; 639 int pm_referenced = 0;
640 640
641 /**
642 * check if lapic timer has already been calibrated by platform
643 * specific routine, such as tsc calibration code. if so, we just fill
644 * in the clockevent structure and return.
645 */
646
647 if (lapic_timer_frequency) {
648 apic_printk(APIC_VERBOSE, "lapic timer already calibrated %d\n",
649 lapic_timer_frequency);
650 lapic_clockevent.mult = div_sc(lapic_timer_frequency/APIC_DIVISOR,
651 TICK_NSEC, lapic_clockevent.shift);
652 lapic_clockevent.max_delta_ns =
653 clockevent_delta2ns(0x7FFFFF, &lapic_clockevent);
654 lapic_clockevent.min_delta_ns =
655 clockevent_delta2ns(0xF, &lapic_clockevent);
656 lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY;
657 return 0;
658 }
659
641 local_irq_disable(); 660 local_irq_disable();
642 661
643 /* Replace the global interrupt handler */ 662 /* Replace the global interrupt handler */
@@ -679,12 +698,12 @@ static int __init calibrate_APIC_clock(void)
679 lapic_clockevent.min_delta_ns = 698 lapic_clockevent.min_delta_ns =
680 clockevent_delta2ns(0xF, &lapic_clockevent); 699 clockevent_delta2ns(0xF, &lapic_clockevent);
681 700
682 calibration_result = (delta * APIC_DIVISOR) / LAPIC_CAL_LOOPS; 701 lapic_timer_frequency = (delta * APIC_DIVISOR) / LAPIC_CAL_LOOPS;
683 702
684 apic_printk(APIC_VERBOSE, "..... delta %ld\n", delta); 703 apic_printk(APIC_VERBOSE, "..... delta %ld\n", delta);
685 apic_printk(APIC_VERBOSE, "..... mult: %u\n", lapic_clockevent.mult); 704 apic_printk(APIC_VERBOSE, "..... mult: %u\n", lapic_clockevent.mult);
686 apic_printk(APIC_VERBOSE, "..... calibration result: %u\n", 705 apic_printk(APIC_VERBOSE, "..... calibration result: %u\n",
687 calibration_result); 706 lapic_timer_frequency);
688 707
689 if (cpu_has_tsc) { 708 if (cpu_has_tsc) {
690 apic_printk(APIC_VERBOSE, "..... CPU clock speed is " 709 apic_printk(APIC_VERBOSE, "..... CPU clock speed is "
@@ -695,13 +714,13 @@ static int __init calibrate_APIC_clock(void)
695 714
696 apic_printk(APIC_VERBOSE, "..... host bus clock speed is " 715 apic_printk(APIC_VERBOSE, "..... host bus clock speed is "
697 "%u.%04u MHz.\n", 716 "%u.%04u MHz.\n",
698 calibration_result / (1000000 / HZ), 717 lapic_timer_frequency / (1000000 / HZ),
699 calibration_result % (1000000 / HZ)); 718 lapic_timer_frequency % (1000000 / HZ));
700 719
701 /* 720 /*
702 * Do a sanity check on the APIC calibration result 721 * Do a sanity check on the APIC calibration result
703 */ 722 */
704 if (calibration_result < (1000000 / HZ)) { 723 if (lapic_timer_frequency < (1000000 / HZ)) {
705 local_irq_enable(); 724 local_irq_enable();
706 pr_warning("APIC frequency too slow, disabling apic timer\n"); 725 pr_warning("APIC frequency too slow, disabling apic timer\n");
707 return -1; 726 return -1;
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 3c31fa98af6..6d939d7847e 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -193,10 +193,8 @@ int __init arch_early_irq_init(void)
193 struct irq_cfg *cfg; 193 struct irq_cfg *cfg;
194 int count, node, i; 194 int count, node, i;
195 195
196 if (!legacy_pic->nr_legacy_irqs) { 196 if (!legacy_pic->nr_legacy_irqs)
197 nr_irqs_gsi = 0;
198 io_apic_irqs = ~0UL; 197 io_apic_irqs = ~0UL;
199 }
200 198
201 for (i = 0; i < nr_ioapics; i++) { 199 for (i = 0; i < nr_ioapics; i++) {
202 ioapics[i].saved_registers = 200 ioapics[i].saved_registers =
@@ -1696,6 +1694,7 @@ __apicdebuginit(void) print_IO_APICs(void)
1696 int ioapic_idx; 1694 int ioapic_idx;
1697 struct irq_cfg *cfg; 1695 struct irq_cfg *cfg;
1698 unsigned int irq; 1696 unsigned int irq;
1697 struct irq_chip *chip;
1699 1698
1700 printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries); 1699 printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
1701 for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) 1700 for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++)
@@ -1716,6 +1715,10 @@ __apicdebuginit(void) print_IO_APICs(void)
1716 for_each_active_irq(irq) { 1715 for_each_active_irq(irq) {
1717 struct irq_pin_list *entry; 1716 struct irq_pin_list *entry;
1718 1717
1718 chip = irq_get_chip(irq);
1719 if (chip != &ioapic_chip)
1720 continue;
1721
1719 cfg = irq_get_chip_data(irq); 1722 cfg = irq_get_chip_data(irq);
1720 if (!cfg) 1723 if (!cfg)
1721 continue; 1724 continue;
diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
index 6199232161c..319882ef848 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
@@ -208,7 +208,7 @@ static int inject_init(void)
208 if (!alloc_cpumask_var(&mce_inject_cpumask, GFP_KERNEL)) 208 if (!alloc_cpumask_var(&mce_inject_cpumask, GFP_KERNEL))
209 return -ENOMEM; 209 return -ENOMEM;
210 printk(KERN_INFO "Machine check injector initialized\n"); 210 printk(KERN_INFO "Machine check injector initialized\n");
211 mce_chrdev_ops.write = mce_write; 211 register_mce_write_callback(mce_write);
212 register_nmi_handler(NMI_LOCAL, mce_raise_notify, 0, 212 register_nmi_handler(NMI_LOCAL, mce_raise_notify, 0,
213 "mce_notify"); 213 "mce_notify");
214 return 0; 214 return 0;
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 362056aefeb..2af127d4c3d 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -1634,16 +1634,35 @@ static long mce_chrdev_ioctl(struct file *f, unsigned int cmd,
1634 } 1634 }
1635} 1635}
1636 1636
1637/* Modified in mce-inject.c, so not static or const */ 1637static ssize_t (*mce_write)(struct file *filp, const char __user *ubuf,
1638struct file_operations mce_chrdev_ops = { 1638 size_t usize, loff_t *off);
1639
1640void register_mce_write_callback(ssize_t (*fn)(struct file *filp,
1641 const char __user *ubuf,
1642 size_t usize, loff_t *off))
1643{
1644 mce_write = fn;
1645}
1646EXPORT_SYMBOL_GPL(register_mce_write_callback);
1647
1648ssize_t mce_chrdev_write(struct file *filp, const char __user *ubuf,
1649 size_t usize, loff_t *off)
1650{
1651 if (mce_write)
1652 return mce_write(filp, ubuf, usize, off);
1653 else
1654 return -EINVAL;
1655}
1656
1657static const struct file_operations mce_chrdev_ops = {
1639 .open = mce_chrdev_open, 1658 .open = mce_chrdev_open,
1640 .release = mce_chrdev_release, 1659 .release = mce_chrdev_release,
1641 .read = mce_chrdev_read, 1660 .read = mce_chrdev_read,
1661 .write = mce_chrdev_write,
1642 .poll = mce_chrdev_poll, 1662 .poll = mce_chrdev_poll,
1643 .unlocked_ioctl = mce_chrdev_ioctl, 1663 .unlocked_ioctl = mce_chrdev_ioctl,
1644 .llseek = no_llseek, 1664 .llseek = no_llseek,
1645}; 1665};
1646EXPORT_SYMBOL_GPL(mce_chrdev_ops);
1647 1666
1648static struct miscdevice mce_chrdev_device = { 1667static struct miscdevice mce_chrdev_device = {
1649 MISC_MCELOG_MINOR, 1668 MISC_MCELOG_MINOR,
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index c1a0188e29a..44842d756b2 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -74,9 +74,10 @@ static cycle_t kvm_clock_read(void)
74 struct pvclock_vcpu_time_info *src; 74 struct pvclock_vcpu_time_info *src;
75 cycle_t ret; 75 cycle_t ret;
76 76
77 src = &get_cpu_var(hv_clock); 77 preempt_disable_notrace();
78 src = &__get_cpu_var(hv_clock);
78 ret = pvclock_clocksource_read(src); 79 ret = pvclock_clocksource_read(src);
79 put_cpu_var(hv_clock); 80 preempt_enable_notrace();
80 return ret; 81 return ret;
81} 82}
82 83
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index b9c8628974a..e88f37b58dd 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -29,6 +29,7 @@
29#include <asm/traps.h> 29#include <asm/traps.h>
30#include <asm/mach_traps.h> 30#include <asm/mach_traps.h>
31#include <asm/nmi.h> 31#include <asm/nmi.h>
32#include <asm/x86_init.h>
32 33
33#define NMI_MAX_NAMELEN 16 34#define NMI_MAX_NAMELEN 16
34struct nmiaction { 35struct nmiaction {
@@ -348,7 +349,7 @@ static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
348 349
349 /* Non-CPU-specific NMI: NMI sources can be processed on any CPU */ 350 /* Non-CPU-specific NMI: NMI sources can be processed on any CPU */
350 raw_spin_lock(&nmi_reason_lock); 351 raw_spin_lock(&nmi_reason_lock);
351 reason = get_nmi_reason(); 352 reason = x86_platform.get_nmi_reason();
352 353
353 if (reason & NMI_REASON_MASK) { 354 if (reason & NMI_REASON_MASK) {
354 if (reason & NMI_REASON_SERR) 355 if (reason & NMI_REASON_SERR)
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index afaf38447ef..cf0ef986cb6 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -1045,6 +1045,8 @@ void __init setup_arch(char **cmdline_p)
1045 1045
1046 x86_init.timers.wallclock_init(); 1046 x86_init.timers.wallclock_init();
1047 1047
1048 x86_platform.wallclock_init();
1049
1048 mcheck_init(); 1050 mcheck_init();
1049 1051
1050 arch_init_ideal_nops(); 1052 arch_init_ideal_nops();
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
index 6f164bd5e14..c1d6cd54939 100644
--- a/arch/x86/kernel/x86_init.c
+++ b/arch/x86/kernel/x86_init.c
@@ -21,12 +21,14 @@
21#include <asm/pat.h> 21#include <asm/pat.h>
22#include <asm/tsc.h> 22#include <asm/tsc.h>
23#include <asm/iommu.h> 23#include <asm/iommu.h>
24#include <asm/mach_traps.h>
24 25
25void __cpuinit x86_init_noop(void) { } 26void __cpuinit x86_init_noop(void) { }
26void __init x86_init_uint_noop(unsigned int unused) { } 27void __init x86_init_uint_noop(unsigned int unused) { }
27void __init x86_init_pgd_noop(pgd_t *unused) { } 28void __init x86_init_pgd_noop(pgd_t *unused) { }
28int __init iommu_init_noop(void) { return 0; } 29int __init iommu_init_noop(void) { return 0; }
29void iommu_shutdown_noop(void) { } 30void iommu_shutdown_noop(void) { }
31void wallclock_init_noop(void) { }
30 32
31/* 33/*
32 * The platform setup functions are preset with the default functions 34 * The platform setup functions are preset with the default functions
@@ -97,11 +99,13 @@ static int default_i8042_detect(void) { return 1; };
97 99
98struct x86_platform_ops x86_platform = { 100struct x86_platform_ops x86_platform = {
99 .calibrate_tsc = native_calibrate_tsc, 101 .calibrate_tsc = native_calibrate_tsc,
102 .wallclock_init = wallclock_init_noop,
100 .get_wallclock = mach_get_cmos_time, 103 .get_wallclock = mach_get_cmos_time,
101 .set_wallclock = mach_set_rtc_mmss, 104 .set_wallclock = mach_set_rtc_mmss,
102 .iommu_shutdown = iommu_shutdown_noop, 105 .iommu_shutdown = iommu_shutdown_noop,
103 .is_untracked_pat_range = is_ISA_range, 106 .is_untracked_pat_range = is_ISA_range,
104 .nmi_init = default_nmi_init, 107 .nmi_init = default_nmi_init,
108 .get_nmi_reason = default_get_nmi_reason,
105 .i8042_detect = default_i8042_detect 109 .i8042_detect = default_i8042_detect
106}; 110};
107 111
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index a0d6bd9ad44..579a0b51696 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -39,6 +39,7 @@
39#include <asm/mce.h> 39#include <asm/mce.h>
40#include <asm/i387.h> 40#include <asm/i387.h>
41#include <asm/xcr.h> 41#include <asm/xcr.h>
42#include <asm/perf_event.h>
42 43
43#include "trace.h" 44#include "trace.h"
44 45
@@ -118,7 +119,7 @@ module_param(ple_gap, int, S_IRUGO);
118static int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW; 119static int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
119module_param(ple_window, int, S_IRUGO); 120module_param(ple_window, int, S_IRUGO);
120 121
121#define NR_AUTOLOAD_MSRS 1 122#define NR_AUTOLOAD_MSRS 8
122#define VMCS02_POOL_SIZE 1 123#define VMCS02_POOL_SIZE 1
123 124
124struct vmcs { 125struct vmcs {
@@ -622,6 +623,7 @@ static unsigned long *vmx_msr_bitmap_legacy;
622static unsigned long *vmx_msr_bitmap_longmode; 623static unsigned long *vmx_msr_bitmap_longmode;
623 624
624static bool cpu_has_load_ia32_efer; 625static bool cpu_has_load_ia32_efer;
626static bool cpu_has_load_perf_global_ctrl;
625 627
626static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS); 628static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
627static DEFINE_SPINLOCK(vmx_vpid_lock); 629static DEFINE_SPINLOCK(vmx_vpid_lock);
@@ -1191,15 +1193,34 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
1191 vmcs_write32(EXCEPTION_BITMAP, eb); 1193 vmcs_write32(EXCEPTION_BITMAP, eb);
1192} 1194}
1193 1195
1196static void clear_atomic_switch_msr_special(unsigned long entry,
1197 unsigned long exit)
1198{
1199 vmcs_clear_bits(VM_ENTRY_CONTROLS, entry);
1200 vmcs_clear_bits(VM_EXIT_CONTROLS, exit);
1201}
1202
1194static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr) 1203static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
1195{ 1204{
1196 unsigned i; 1205 unsigned i;
1197 struct msr_autoload *m = &vmx->msr_autoload; 1206 struct msr_autoload *m = &vmx->msr_autoload;
1198 1207
1199 if (msr == MSR_EFER && cpu_has_load_ia32_efer) { 1208 switch (msr) {
1200 vmcs_clear_bits(VM_ENTRY_CONTROLS, VM_ENTRY_LOAD_IA32_EFER); 1209 case MSR_EFER:
1201 vmcs_clear_bits(VM_EXIT_CONTROLS, VM_EXIT_LOAD_IA32_EFER); 1210 if (cpu_has_load_ia32_efer) {
1202 return; 1211 clear_atomic_switch_msr_special(VM_ENTRY_LOAD_IA32_EFER,
1212 VM_EXIT_LOAD_IA32_EFER);
1213 return;
1214 }
1215 break;
1216 case MSR_CORE_PERF_GLOBAL_CTRL:
1217 if (cpu_has_load_perf_global_ctrl) {
1218 clear_atomic_switch_msr_special(
1219 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
1220 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
1221 return;
1222 }
1223 break;
1203 } 1224 }
1204 1225
1205 for (i = 0; i < m->nr; ++i) 1226 for (i = 0; i < m->nr; ++i)
@@ -1215,25 +1236,55 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
1215 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr); 1236 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
1216} 1237}
1217 1238
1239static void add_atomic_switch_msr_special(unsigned long entry,
1240 unsigned long exit, unsigned long guest_val_vmcs,
1241 unsigned long host_val_vmcs, u64 guest_val, u64 host_val)
1242{
1243 vmcs_write64(guest_val_vmcs, guest_val);
1244 vmcs_write64(host_val_vmcs, host_val);
1245 vmcs_set_bits(VM_ENTRY_CONTROLS, entry);
1246 vmcs_set_bits(VM_EXIT_CONTROLS, exit);
1247}
1248
1218static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, 1249static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
1219 u64 guest_val, u64 host_val) 1250 u64 guest_val, u64 host_val)
1220{ 1251{
1221 unsigned i; 1252 unsigned i;
1222 struct msr_autoload *m = &vmx->msr_autoload; 1253 struct msr_autoload *m = &vmx->msr_autoload;
1223 1254
1224 if (msr == MSR_EFER && cpu_has_load_ia32_efer) { 1255 switch (msr) {
1225 vmcs_write64(GUEST_IA32_EFER, guest_val); 1256 case MSR_EFER:
1226 vmcs_write64(HOST_IA32_EFER, host_val); 1257 if (cpu_has_load_ia32_efer) {
1227 vmcs_set_bits(VM_ENTRY_CONTROLS, VM_ENTRY_LOAD_IA32_EFER); 1258 add_atomic_switch_msr_special(VM_ENTRY_LOAD_IA32_EFER,
1228 vmcs_set_bits(VM_EXIT_CONTROLS, VM_EXIT_LOAD_IA32_EFER); 1259 VM_EXIT_LOAD_IA32_EFER,
1229 return; 1260 GUEST_IA32_EFER,
1261 HOST_IA32_EFER,
1262 guest_val, host_val);
1263 return;
1264 }
1265 break;
1266 case MSR_CORE_PERF_GLOBAL_CTRL:
1267 if (cpu_has_load_perf_global_ctrl) {
1268 add_atomic_switch_msr_special(
1269 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
1270 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL,
1271 GUEST_IA32_PERF_GLOBAL_CTRL,
1272 HOST_IA32_PERF_GLOBAL_CTRL,
1273 guest_val, host_val);
1274 return;
1275 }
1276 break;
1230 } 1277 }
1231 1278
1232 for (i = 0; i < m->nr; ++i) 1279 for (i = 0; i < m->nr; ++i)
1233 if (m->guest[i].index == msr) 1280 if (m->guest[i].index == msr)
1234 break; 1281 break;
1235 1282
1236 if (i == m->nr) { 1283 if (i == NR_AUTOLOAD_MSRS) {
1284 printk_once(KERN_WARNING"Not enough mst switch entries. "
1285 "Can't add msr %x\n", msr);
1286 return;
1287 } else if (i == m->nr) {
1237 ++m->nr; 1288 ++m->nr;
1238 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr); 1289 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
1239 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr); 1290 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
@@ -2455,6 +2506,42 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
2455 && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS, 2506 && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS,
2456 VM_EXIT_LOAD_IA32_EFER); 2507 VM_EXIT_LOAD_IA32_EFER);
2457 2508
2509 cpu_has_load_perf_global_ctrl =
2510 allow_1_setting(MSR_IA32_VMX_ENTRY_CTLS,
2511 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL)
2512 && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS,
2513 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
2514
2515 /*
2516 * Some cpus support VM_ENTRY_(LOAD|SAVE)_IA32_PERF_GLOBAL_CTRL
2517 * but due to arrata below it can't be used. Workaround is to use
2518 * msr load mechanism to switch IA32_PERF_GLOBAL_CTRL.
2519 *
2520 * VM Exit May Incorrectly Clear IA32_PERF_GLOBAL_CTRL [34:32]
2521 *
2522 * AAK155 (model 26)
2523 * AAP115 (model 30)
2524 * AAT100 (model 37)
2525 * BC86,AAY89,BD102 (model 44)
2526 * BA97 (model 46)
2527 *
2528 */
2529 if (cpu_has_load_perf_global_ctrl && boot_cpu_data.x86 == 0x6) {
2530 switch (boot_cpu_data.x86_model) {
2531 case 26:
2532 case 30:
2533 case 37:
2534 case 44:
2535 case 46:
2536 cpu_has_load_perf_global_ctrl = false;
2537 printk_once(KERN_WARNING"kvm: VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL "
2538 "does not work properly. Using workaround\n");
2539 break;
2540 default:
2541 break;
2542 }
2543 }
2544
2458 return 0; 2545 return 0;
2459} 2546}
2460 2547
@@ -5968,6 +6055,24 @@ static void vmx_cancel_injection(struct kvm_vcpu *vcpu)
5968 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); 6055 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
5969} 6056}
5970 6057
6058static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
6059{
6060 int i, nr_msrs;
6061 struct perf_guest_switch_msr *msrs;
6062
6063 msrs = perf_guest_get_msrs(&nr_msrs);
6064
6065 if (!msrs)
6066 return;
6067
6068 for (i = 0; i < nr_msrs; i++)
6069 if (msrs[i].host == msrs[i].guest)
6070 clear_atomic_switch_msr(vmx, msrs[i].msr);
6071 else
6072 add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest,
6073 msrs[i].host);
6074}
6075
5971#ifdef CONFIG_X86_64 6076#ifdef CONFIG_X86_64
5972#define R "r" 6077#define R "r"
5973#define Q "q" 6078#define Q "q"
@@ -6017,6 +6122,8 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
6017 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) 6122 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
6018 vmx_set_interrupt_shadow(vcpu, 0); 6123 vmx_set_interrupt_shadow(vcpu, 0);
6019 6124
6125 atomic_switch_perf_msrs(vmx);
6126
6020 vmx->__launched = vmx->loaded_vmcs->launched; 6127 vmx->__launched = vmx->loaded_vmcs->launched;
6021 asm( 6128 asm(
6022 /* Store host registers */ 6129 /* Store host registers */
diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
index 541020df0da..b1489a06a49 100644
--- a/arch/x86/platform/mrst/mrst.c
+++ b/arch/x86/platform/mrst/mrst.c
@@ -187,11 +187,34 @@ int __init sfi_parse_mrtc(struct sfi_table_header *table)
187static unsigned long __init mrst_calibrate_tsc(void) 187static unsigned long __init mrst_calibrate_tsc(void)
188{ 188{
189 unsigned long flags, fast_calibrate; 189 unsigned long flags, fast_calibrate;
190 190 if (__mrst_cpu_chip == MRST_CPU_CHIP_PENWELL) {
191 local_irq_save(flags); 191 u32 lo, hi, ratio, fsb;
192 fast_calibrate = apbt_quick_calibrate(); 192
193 local_irq_restore(flags); 193 rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
194 194 pr_debug("IA32 perf status is 0x%x, 0x%0x\n", lo, hi);
195 ratio = (hi >> 8) & 0x1f;
196 pr_debug("ratio is %d\n", ratio);
197 if (!ratio) {
198 pr_err("read a zero ratio, should be incorrect!\n");
199 pr_err("force tsc ratio to 16 ...\n");
200 ratio = 16;
201 }
202 rdmsr(MSR_FSB_FREQ, lo, hi);
203 if ((lo & 0x7) == 0x7)
204 fsb = PENWELL_FSB_FREQ_83SKU;
205 else
206 fsb = PENWELL_FSB_FREQ_100SKU;
207 fast_calibrate = ratio * fsb;
208 pr_debug("read penwell tsc %lu khz\n", fast_calibrate);
209 lapic_timer_frequency = fsb * 1000 / HZ;
210 /* mark tsc clocksource as reliable */
211 set_cpu_cap(&boot_cpu_data, X86_FEATURE_TSC_RELIABLE);
212 } else {
213 local_irq_save(flags);
214 fast_calibrate = apbt_quick_calibrate();
215 local_irq_restore(flags);
216 }
217
195 if (fast_calibrate) 218 if (fast_calibrate)
196 return fast_calibrate; 219 return fast_calibrate;
197 220
@@ -254,6 +277,17 @@ static void mrst_reboot(void)
254} 277}
255 278
256/* 279/*
280 * Moorestown does not have external NMI source nor port 0x61 to report
281 * NMI status. The possible NMI sources are from pmu as a result of NMI
282 * watchdog or lock debug. Reading io port 0x61 results in 0xff which
283 * misled NMI handler.
284 */
285static unsigned char mrst_get_nmi_reason(void)
286{
287 return 0;
288}
289
290/*
257 * Moorestown specific x86_init function overrides and early setup 291 * Moorestown specific x86_init function overrides and early setup
258 * calls. 292 * calls.
259 */ 293 */
@@ -274,6 +308,8 @@ void __init x86_mrst_early_setup(void)
274 x86_platform.calibrate_tsc = mrst_calibrate_tsc; 308 x86_platform.calibrate_tsc = mrst_calibrate_tsc;
275 x86_platform.i8042_detect = mrst_i8042_detect; 309 x86_platform.i8042_detect = mrst_i8042_detect;
276 x86_init.timers.wallclock_init = mrst_rtc_init; 310 x86_init.timers.wallclock_init = mrst_rtc_init;
311 x86_platform.get_nmi_reason = mrst_get_nmi_reason;
312
277 x86_init.pci.init = pci_mrst_init; 313 x86_init.pci.init = pci_mrst_init;
278 x86_init.pci.fixup_irqs = x86_init_noop; 314 x86_init.pci.fixup_irqs = x86_init_noop;
279 315
diff --git a/arch/x86/um/asm/processor.h b/arch/x86/um/asm/processor.h
index 118c143a9cb..2c32df6fe23 100644
--- a/arch/x86/um/asm/processor.h
+++ b/arch/x86/um/asm/processor.h
@@ -11,7 +11,7 @@
11#endif 11#endif
12 12
13#define KSTK_EIP(tsk) KSTK_REG(tsk, HOST_IP) 13#define KSTK_EIP(tsk) KSTK_REG(tsk, HOST_IP)
14#define KSTK_ESP(tsk) KSTK_REG(tsk, HOST_IP) 14#define KSTK_ESP(tsk) KSTK_REG(tsk, HOST_SP)
15#define KSTK_EBP(tsk) KSTK_REG(tsk, HOST_BP) 15#define KSTK_EBP(tsk) KSTK_REG(tsk, HOST_BP)
16 16
17#define ARCH_IS_STACKGROW(address) \ 17#define ARCH_IS_STACKGROW(address) \
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index da8afd576a6..1f928659c33 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1356,7 +1356,7 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self,
1356 int cpu = (long)hcpu; 1356 int cpu = (long)hcpu;
1357 switch (action) { 1357 switch (action) {
1358 case CPU_UP_PREPARE: 1358 case CPU_UP_PREPARE:
1359 per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; 1359 xen_vcpu_setup(cpu);
1360 if (xen_have_vector_callback) 1360 if (xen_have_vector_callback)
1361 xen_init_lock_cpu(cpu); 1361 xen_init_lock_cpu(cpu);
1362 break; 1362 break;
@@ -1386,7 +1386,6 @@ static void __init xen_hvm_guest_init(void)
1386 xen_hvm_smp_init(); 1386 xen_hvm_smp_init();
1387 register_cpu_notifier(&xen_hvm_cpu_notifier); 1387 register_cpu_notifier(&xen_hvm_cpu_notifier);
1388 xen_unplug_emulated_devices(); 1388 xen_unplug_emulated_devices();
1389 have_vcpu_info_placement = 0;
1390 x86_init.irqs.intr_init = xen_init_IRQ; 1389 x86_init.irqs.intr_init = xen_init_IRQ;
1391 xen_hvm_init_time_ops(); 1390 xen_hvm_init_time_ops();
1392 xen_hvm_init_mmu_ops(); 1391 xen_hvm_init_mmu_ops();
diff --git a/arch/x86/xen/grant-table.c b/arch/x86/xen/grant-table.c
index 6bbfd7ac5e8..5a40d24ba33 100644
--- a/arch/x86/xen/grant-table.c
+++ b/arch/x86/xen/grant-table.c
@@ -71,7 +71,7 @@ int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes,
71 71
72 if (shared == NULL) { 72 if (shared == NULL) {
73 struct vm_struct *area = 73 struct vm_struct *area =
74 alloc_vm_area(PAGE_SIZE * max_nr_gframes); 74 alloc_vm_area(PAGE_SIZE * max_nr_gframes, NULL);
75 BUG_ON(area == NULL); 75 BUG_ON(area == NULL);
76 shared = area->addr; 76 shared = area->addr;
77 *__shared = shared; 77 *__shared = shared;
diff --git a/block/blk-core.c b/block/blk-core.c
index f43c8a5840a..ea70e6c80cd 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1379,15 +1379,19 @@ get_rq:
1379 */ 1379 */
1380 if (list_empty(&plug->list)) 1380 if (list_empty(&plug->list))
1381 trace_block_plug(q); 1381 trace_block_plug(q);
1382 else if (!plug->should_sort) { 1382 else {
1383 struct request *__rq; 1383 if (!plug->should_sort) {
1384 struct request *__rq;
1384 1385
1385 __rq = list_entry_rq(plug->list.prev); 1386 __rq = list_entry_rq(plug->list.prev);
1386 if (__rq->q != q) 1387 if (__rq->q != q)
1387 plug->should_sort = 1; 1388 plug->should_sort = 1;
1389 }
1390 if (request_count >= BLK_MAX_REQUEST_COUNT) {
1391 blk_flush_plug_list(plug, false);
1392 trace_block_plug(q);
1393 }
1388 } 1394 }
1389 if (request_count >= BLK_MAX_REQUEST_COUNT)
1390 blk_flush_plug_list(plug, false);
1391 list_add_tail(&req->queuelist, &plug->list); 1395 list_add_tail(&req->queuelist, &plug->list);
1392 drive_stat_acct(req, 1); 1396 drive_stat_acct(req, 1);
1393 } else { 1397 } else {
diff --git a/block/blk-map.c b/block/blk-map.c
index e663ac2d8e6..164cd005970 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -204,10 +204,11 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
204 if (!iov[i].iov_len) 204 if (!iov[i].iov_len)
205 return -EINVAL; 205 return -EINVAL;
206 206
207 if (uaddr & queue_dma_alignment(q)) { 207 /*
208 * Keep going so we check length of all segments
209 */
210 if (uaddr & queue_dma_alignment(q))
208 unaligned = 1; 211 unaligned = 1;
209 break;
210 }
211 } 212 }
212 213
213 if (unaligned || (q->dma_pad_mask & len) || map_data) 214 if (unaligned || (q->dma_pad_mask & len) || map_data)
diff --git a/block/genhd.c b/block/genhd.c
index 9253839714f..02e9fca8082 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -19,7 +19,6 @@
19#include <linux/mutex.h> 19#include <linux/mutex.h>
20#include <linux/idr.h> 20#include <linux/idr.h>
21#include <linux/log2.h> 21#include <linux/log2.h>
22#include <linux/ctype.h>
23 22
24#include "blk.h" 23#include "blk.h"
25 24
@@ -916,74 +915,6 @@ static int __init genhd_device_init(void)
916 915
917subsys_initcall(genhd_device_init); 916subsys_initcall(genhd_device_init);
918 917
919static ssize_t alias_show(struct device *dev,
920 struct device_attribute *attr, char *buf)
921{
922 struct gendisk *disk = dev_to_disk(dev);
923 ssize_t ret = 0;
924
925 if (disk->alias)
926 ret = snprintf(buf, ALIAS_LEN, "%s\n", disk->alias);
927 return ret;
928}
929
930static ssize_t alias_store(struct device *dev, struct device_attribute *attr,
931 const char *buf, size_t count)
932{
933 struct gendisk *disk = dev_to_disk(dev);
934 char *alias;
935 char *envp[] = { NULL, NULL };
936 unsigned char c;
937 int i;
938 ssize_t ret = count;
939
940 if (!count)
941 return -EINVAL;
942
943 if (count >= ALIAS_LEN) {
944 printk(KERN_ERR "alias: alias is too long\n");
945 return -EINVAL;
946 }
947
948 /* Validation check */
949 for (i = 0; i < count; i++) {
950 c = buf[i];
951 if (i == count - 1 && c == '\n')
952 break;
953 if (!isalnum(c) && c != '_' && c != '-') {
954 printk(KERN_ERR "alias: invalid alias\n");
955 return -EINVAL;
956 }
957 }
958
959 if (disk->alias) {
960 printk(KERN_INFO "alias: %s is already assigned (%s)\n",
961 disk->disk_name, disk->alias);
962 return -EINVAL;
963 }
964
965 alias = kasprintf(GFP_KERNEL, "%s", buf);
966 if (!alias)
967 return -ENOMEM;
968
969 if (alias[count - 1] == '\n')
970 alias[count - 1] = '\0';
971
972 envp[0] = kasprintf(GFP_KERNEL, "ALIAS=%s", alias);
973 if (!envp[0]) {
974 kfree(alias);
975 return -ENOMEM;
976 }
977
978 disk->alias = alias;
979 printk(KERN_INFO "alias: assigned %s to %s\n", alias, disk->disk_name);
980
981 kobject_uevent_env(&dev->kobj, KOBJ_ADD, envp);
982
983 kfree(envp[0]);
984 return ret;
985}
986
987static ssize_t disk_range_show(struct device *dev, 918static ssize_t disk_range_show(struct device *dev,
988 struct device_attribute *attr, char *buf) 919 struct device_attribute *attr, char *buf)
989{ 920{
@@ -1043,7 +974,6 @@ static ssize_t disk_discard_alignment_show(struct device *dev,
1043 return sprintf(buf, "%d\n", queue_discard_alignment(disk->queue)); 974 return sprintf(buf, "%d\n", queue_discard_alignment(disk->queue));
1044} 975}
1045 976
1046static DEVICE_ATTR(alias, S_IRUGO|S_IWUSR, alias_show, alias_store);
1047static DEVICE_ATTR(range, S_IRUGO, disk_range_show, NULL); 977static DEVICE_ATTR(range, S_IRUGO, disk_range_show, NULL);
1048static DEVICE_ATTR(ext_range, S_IRUGO, disk_ext_range_show, NULL); 978static DEVICE_ATTR(ext_range, S_IRUGO, disk_ext_range_show, NULL);
1049static DEVICE_ATTR(removable, S_IRUGO, disk_removable_show, NULL); 979static DEVICE_ATTR(removable, S_IRUGO, disk_removable_show, NULL);
@@ -1066,7 +996,6 @@ static struct device_attribute dev_attr_fail_timeout =
1066#endif 996#endif
1067 997
1068static struct attribute *disk_attrs[] = { 998static struct attribute *disk_attrs[] = {
1069 &dev_attr_alias.attr,
1070 &dev_attr_range.attr, 999 &dev_attr_range.attr,
1071 &dev_attr_ext_range.attr, 1000 &dev_attr_ext_range.attr,
1072 &dev_attr_removable.attr, 1001 &dev_attr_removable.attr,
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index 127408069ca..631b9477b99 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -932,7 +932,8 @@ static int erst_check_table(struct acpi_table_erst *erst_tab)
932static int erst_open_pstore(struct pstore_info *psi); 932static int erst_open_pstore(struct pstore_info *psi);
933static int erst_close_pstore(struct pstore_info *psi); 933static int erst_close_pstore(struct pstore_info *psi);
934static ssize_t erst_reader(u64 *id, enum pstore_type_id *type, 934static ssize_t erst_reader(u64 *id, enum pstore_type_id *type,
935 struct timespec *time, struct pstore_info *psi); 935 struct timespec *time, char **buf,
936 struct pstore_info *psi);
936static int erst_writer(enum pstore_type_id type, u64 *id, unsigned int part, 937static int erst_writer(enum pstore_type_id type, u64 *id, unsigned int part,
937 size_t size, struct pstore_info *psi); 938 size_t size, struct pstore_info *psi);
938static int erst_clearer(enum pstore_type_id type, u64 id, 939static int erst_clearer(enum pstore_type_id type, u64 id,
@@ -986,17 +987,23 @@ static int erst_close_pstore(struct pstore_info *psi)
986} 987}
987 988
988static ssize_t erst_reader(u64 *id, enum pstore_type_id *type, 989static ssize_t erst_reader(u64 *id, enum pstore_type_id *type,
989 struct timespec *time, struct pstore_info *psi) 990 struct timespec *time, char **buf,
991 struct pstore_info *psi)
990{ 992{
991 int rc; 993 int rc;
992 ssize_t len = 0; 994 ssize_t len = 0;
993 u64 record_id; 995 u64 record_id;
994 struct cper_pstore_record *rcd = (struct cper_pstore_record *) 996 struct cper_pstore_record *rcd;
995 (erst_info.buf - sizeof(*rcd)); 997 size_t rcd_len = sizeof(*rcd) + erst_info.bufsize;
996 998
997 if (erst_disable) 999 if (erst_disable)
998 return -ENODEV; 1000 return -ENODEV;
999 1001
1002 rcd = kmalloc(rcd_len, GFP_KERNEL);
1003 if (!rcd) {
1004 rc = -ENOMEM;
1005 goto out;
1006 }
1000skip: 1007skip:
1001 rc = erst_get_record_id_next(&reader_pos, &record_id); 1008 rc = erst_get_record_id_next(&reader_pos, &record_id);
1002 if (rc) 1009 if (rc)
@@ -1004,22 +1011,27 @@ skip:
1004 1011
1005 /* no more record */ 1012 /* no more record */
1006 if (record_id == APEI_ERST_INVALID_RECORD_ID) { 1013 if (record_id == APEI_ERST_INVALID_RECORD_ID) {
1007 rc = -1; 1014 rc = -EINVAL;
1008 goto out; 1015 goto out;
1009 } 1016 }
1010 1017
1011 len = erst_read(record_id, &rcd->hdr, sizeof(*rcd) + 1018 len = erst_read(record_id, &rcd->hdr, rcd_len);
1012 erst_info.bufsize);
1013 /* The record may be cleared by others, try read next record */ 1019 /* The record may be cleared by others, try read next record */
1014 if (len == -ENOENT) 1020 if (len == -ENOENT)
1015 goto skip; 1021 goto skip;
1016 else if (len < 0) { 1022 else if (len < sizeof(*rcd)) {
1017 rc = -1; 1023 rc = -EIO;
1018 goto out; 1024 goto out;
1019 } 1025 }
1020 if (uuid_le_cmp(rcd->hdr.creator_id, CPER_CREATOR_PSTORE) != 0) 1026 if (uuid_le_cmp(rcd->hdr.creator_id, CPER_CREATOR_PSTORE) != 0)
1021 goto skip; 1027 goto skip;
1022 1028
1029 *buf = kmalloc(len, GFP_KERNEL);
1030 if (*buf == NULL) {
1031 rc = -ENOMEM;
1032 goto out;
1033 }
1034 memcpy(*buf, rcd->data, len - sizeof(*rcd));
1023 *id = record_id; 1035 *id = record_id;
1024 if (uuid_le_cmp(rcd->sec_hdr.section_type, 1036 if (uuid_le_cmp(rcd->sec_hdr.section_type,
1025 CPER_SECTION_TYPE_DMESG) == 0) 1037 CPER_SECTION_TYPE_DMESG) == 0)
@@ -1037,6 +1049,7 @@ skip:
1037 time->tv_nsec = 0; 1049 time->tv_nsec = 0;
1038 1050
1039out: 1051out:
1052 kfree(rcd);
1040 return (rc < 0) ? rc : (len - sizeof(*rcd)); 1053 return (rc < 0) ? rc : (len - sizeof(*rcd));
1041} 1054}
1042 1055
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index ec555951176..43b875810d1 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -67,7 +67,7 @@ static int __init ahci_probe(struct platform_device *pdev)
67 struct device *dev = &pdev->dev; 67 struct device *dev = &pdev->dev;
68 struct ahci_platform_data *pdata = dev_get_platdata(dev); 68 struct ahci_platform_data *pdata = dev_get_platdata(dev);
69 const struct platform_device_id *id = platform_get_device_id(pdev); 69 const struct platform_device_id *id = platform_get_device_id(pdev);
70 struct ata_port_info pi = ahci_port_info[id->driver_data]; 70 struct ata_port_info pi = ahci_port_info[id ? id->driver_data : 0];
71 const struct ata_port_info *ppi[] = { &pi, NULL }; 71 const struct ata_port_info *ppi[] = { &pi, NULL };
72 struct ahci_host_priv *hpriv; 72 struct ahci_host_priv *hpriv;
73 struct ata_host *host; 73 struct ata_host *host;
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 63d53277d6a..4cadfa28f94 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -2533,10 +2533,12 @@ static int ata_pci_init_one(struct pci_dev *pdev,
2533 if (rc) 2533 if (rc)
2534 goto out; 2534 goto out;
2535 2535
2536#ifdef CONFIG_ATA_BMDMA
2536 if (bmdma) 2537 if (bmdma)
2537 /* prepare and activate BMDMA host */ 2538 /* prepare and activate BMDMA host */
2538 rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host); 2539 rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
2539 else 2540 else
2541#endif
2540 /* prepare and activate SFF host */ 2542 /* prepare and activate SFF host */
2541 rc = ata_pci_sff_prepare_host(pdev, ppi, &host); 2543 rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
2542 if (rc) 2544 if (rc)
@@ -2544,10 +2546,12 @@ static int ata_pci_init_one(struct pci_dev *pdev,
2544 host->private_data = host_priv; 2546 host->private_data = host_priv;
2545 host->flags |= hflags; 2547 host->flags |= hflags;
2546 2548
2549#ifdef CONFIG_ATA_BMDMA
2547 if (bmdma) { 2550 if (bmdma) {
2548 pci_set_master(pdev); 2551 pci_set_master(pdev);
2549 rc = ata_pci_sff_activate_host(host, ata_bmdma_interrupt, sht); 2552 rc = ata_pci_sff_activate_host(host, ata_bmdma_interrupt, sht);
2550 } else 2553 } else
2554#endif
2551 rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht); 2555 rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht);
2552out: 2556out:
2553 if (rc == 0) 2557 if (rc == 0)
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 82c865452c7..d8b3d89db04 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -22,6 +22,7 @@
22#include <linux/kallsyms.h> 22#include <linux/kallsyms.h>
23#include <linux/mutex.h> 23#include <linux/mutex.h>
24#include <linux/async.h> 24#include <linux/async.h>
25#include <linux/pm_runtime.h>
25 26
26#include "base.h" 27#include "base.h"
27#include "power/power.h" 28#include "power/power.h"
@@ -1742,6 +1743,8 @@ void device_shutdown(void)
1742 */ 1743 */
1743 list_del_init(&dev->kobj.entry); 1744 list_del_init(&dev->kobj.entry);
1744 spin_unlock(&devices_kset->list_lock); 1745 spin_unlock(&devices_kset->list_lock);
1746 /* Disable all device's runtime power management */
1747 pm_runtime_disable(dev);
1745 1748
1746 if (dev->bus && dev->bus->shutdown) { 1749 if (dev->bus && dev->bus->shutdown) {
1747 dev_dbg(dev, "shutdown\n"); 1750 dev_dbg(dev, "shutdown\n");
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 793f796c4da..5693ecee9a4 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -127,12 +127,13 @@ static ssize_t node_read_meminfo(struct sys_device * dev,
127 nid, K(node_page_state(nid, NR_WRITEBACK)), 127 nid, K(node_page_state(nid, NR_WRITEBACK)),
128 nid, K(node_page_state(nid, NR_FILE_PAGES)), 128 nid, K(node_page_state(nid, NR_FILE_PAGES)),
129 nid, K(node_page_state(nid, NR_FILE_MAPPED)), 129 nid, K(node_page_state(nid, NR_FILE_MAPPED)),
130 nid, K(node_page_state(nid, NR_ANON_PAGES)
131#ifdef CONFIG_TRANSPARENT_HUGEPAGE 130#ifdef CONFIG_TRANSPARENT_HUGEPAGE
131 nid, K(node_page_state(nid, NR_ANON_PAGES)
132 + node_page_state(nid, NR_ANON_TRANSPARENT_HUGEPAGES) * 132 + node_page_state(nid, NR_ANON_TRANSPARENT_HUGEPAGES) *
133 HPAGE_PMD_NR 133 HPAGE_PMD_NR),
134#else
135 nid, K(node_page_state(nid, NR_ANON_PAGES)),
134#endif 136#endif
135 ),
136 nid, K(node_page_state(nid, NR_SHMEM)), 137 nid, K(node_page_state(nid, NR_SHMEM)),
137 nid, node_page_state(nid, NR_KERNEL_STACK) * 138 nid, node_page_state(nid, NR_KERNEL_STACK) *
138 THREAD_SIZE / 1024, 139 THREAD_SIZE / 1024,
@@ -143,13 +144,14 @@ static ssize_t node_read_meminfo(struct sys_device * dev,
143 nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE) + 144 nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE) +
144 node_page_state(nid, NR_SLAB_UNRECLAIMABLE)), 145 node_page_state(nid, NR_SLAB_UNRECLAIMABLE)),
145 nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE)), 146 nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE)),
146 nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE))
147#ifdef CONFIG_TRANSPARENT_HUGEPAGE 147#ifdef CONFIG_TRANSPARENT_HUGEPAGE
148 nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE))
148 , nid, 149 , nid,
149 K(node_page_state(nid, NR_ANON_TRANSPARENT_HUGEPAGES) * 150 K(node_page_state(nid, NR_ANON_TRANSPARENT_HUGEPAGES) *
150 HPAGE_PMD_NR) 151 HPAGE_PMD_NR));
152#else
153 nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE)));
151#endif 154#endif
152 );
153 n += hugetlb_report_node_meminfo(nid, buf + n); 155 n += hugetlb_report_node_meminfo(nid, buf + n);
154 return n; 156 return n;
155} 157}
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index 5f0f85d5c57..428e55e012d 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -229,7 +229,8 @@ int pm_clk_suspend(struct device *dev)
229 229
230 list_for_each_entry_reverse(ce, &psd->clock_list, node) { 230 list_for_each_entry_reverse(ce, &psd->clock_list, node) {
231 if (ce->status < PCE_STATUS_ERROR) { 231 if (ce->status < PCE_STATUS_ERROR) {
232 clk_disable(ce->clk); 232 if (ce->status == PCE_STATUS_ENABLED)
233 clk_disable(ce->clk);
233 ce->status = PCE_STATUS_ACQUIRED; 234 ce->status = PCE_STATUS_ACQUIRED;
234 } 235 }
235 } 236 }
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 7fa098464da..c3d2dfcf438 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -920,7 +920,8 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
920 End: 920 End:
921 if (!error) { 921 if (!error) {
922 dev->power.is_suspended = true; 922 dev->power.is_suspended = true;
923 if (dev->power.wakeup_path && dev->parent) 923 if (dev->power.wakeup_path
924 && dev->parent && !dev->parent->power.ignore_children)
924 dev->parent->power.wakeup_path = true; 925 dev->parent->power.wakeup_path = true;
925 } 926 }
926 927
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 30a94eadc20..86de6c50fc4 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -212,11 +212,9 @@ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
212 if (!dev || !req) /*guard against callers passing in null */ 212 if (!dev || !req) /*guard against callers passing in null */
213 return -EINVAL; 213 return -EINVAL;
214 214
215 if (dev_pm_qos_request_active(req)) { 215 if (WARN(dev_pm_qos_request_active(req),
216 WARN(1, KERN_ERR "dev_pm_qos_add_request() called for already " 216 "%s() called for already added request\n", __func__))
217 "added request\n");
218 return -EINVAL; 217 return -EINVAL;
219 }
220 218
221 req->dev = dev; 219 req->dev = dev;
222 220
@@ -271,11 +269,9 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req,
271 if (!req) /*guard against callers passing in null */ 269 if (!req) /*guard against callers passing in null */
272 return -EINVAL; 270 return -EINVAL;
273 271
274 if (!dev_pm_qos_request_active(req)) { 272 if (WARN(!dev_pm_qos_request_active(req),
275 WARN(1, KERN_ERR "dev_pm_qos_update_request() called for " 273 "%s() called for unknown object\n", __func__))
276 "unknown object\n");
277 return -EINVAL; 274 return -EINVAL;
278 }
279 275
280 mutex_lock(&dev_pm_qos_mtx); 276 mutex_lock(&dev_pm_qos_mtx);
281 277
@@ -312,11 +308,9 @@ int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
312 if (!req) /*guard against callers passing in null */ 308 if (!req) /*guard against callers passing in null */
313 return -EINVAL; 309 return -EINVAL;
314 310
315 if (!dev_pm_qos_request_active(req)) { 311 if (WARN(!dev_pm_qos_request_active(req),
316 WARN(1, KERN_ERR "dev_pm_qos_remove_request() called for " 312 "%s() called for unknown object\n", __func__))
317 "unknown object\n");
318 return -EINVAL; 313 return -EINVAL;
319 }
320 314
321 mutex_lock(&dev_pm_qos_mtx); 315 mutex_lock(&dev_pm_qos_mtx);
322 316
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 486f94ef24d..8004ac30a7a 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -24,6 +24,7 @@
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25#include <linux/types.h> 25#include <linux/types.h>
26#include <linux/pci.h> 26#include <linux/pci.h>
27#include <linux/pci-aspm.h>
27#include <linux/kernel.h> 28#include <linux/kernel.h>
28#include <linux/slab.h> 29#include <linux/slab.h>
29#include <linux/delay.h> 30#include <linux/delay.h>
@@ -4319,6 +4320,10 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
4319 dev_warn(&h->pdev->dev, "controller appears to be disabled\n"); 4320 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
4320 return -ENODEV; 4321 return -ENODEV;
4321 } 4322 }
4323
4324 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
4325 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
4326
4322 err = pci_enable_device(h->pdev); 4327 err = pci_enable_device(h->pdev);
4323 if (err) { 4328 if (err) {
4324 dev_warn(&h->pdev->dev, "Unable to Enable PCI device\n"); 4329 dev_warn(&h->pdev->dev, "Unable to Enable PCI device\n");
@@ -5158,6 +5163,7 @@ reinit_after_soft_reset:
5158 h->cciss_max_sectors = 8192; 5163 h->cciss_max_sectors = 8192;
5159 5164
5160 rebuild_lun_table(h, 1, 0); 5165 rebuild_lun_table(h, 1, 0);
5166 cciss_engage_scsi(h);
5161 h->busy_initializing = 0; 5167 h->busy_initializing = 0;
5162 return 1; 5168 return 1;
5163 5169
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
index 951a4e33b92..e820b68d2f6 100644
--- a/drivers/block/cciss_scsi.c
+++ b/drivers/block/cciss_scsi.c
@@ -1720,5 +1720,6 @@ static int cciss_eh_abort_handler(struct scsi_cmnd *scsicmd)
1720/* If no tape support, then these become defined out of existence */ 1720/* If no tape support, then these become defined out of existence */
1721 1721
1722#define cciss_scsi_setup(cntl_num) 1722#define cciss_scsi_setup(cntl_num)
1723#define cciss_engage_scsi(h)
1723 1724
1724#endif /* CONFIG_CISS_SCSI_TAPE */ 1725#endif /* CONFIG_CISS_SCSI_TAPE */
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 3d806820280..68b205a9338 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -161,17 +161,19 @@ static struct loop_func_table *xfer_funcs[MAX_LO_CRYPT] = {
161 &xor_funcs 161 &xor_funcs
162}; 162};
163 163
164static loff_t get_loop_size(struct loop_device *lo, struct file *file) 164static loff_t get_size(loff_t offset, loff_t sizelimit, struct file *file)
165{ 165{
166 loff_t size, offset, loopsize; 166 loff_t size, loopsize;
167 167
168 /* Compute loopsize in bytes */ 168 /* Compute loopsize in bytes */
169 size = i_size_read(file->f_mapping->host); 169 size = i_size_read(file->f_mapping->host);
170 offset = lo->lo_offset;
171 loopsize = size - offset; 170 loopsize = size - offset;
172 if (lo->lo_sizelimit > 0 && lo->lo_sizelimit < loopsize) 171 /* offset is beyond i_size, wierd but possible */
173 loopsize = lo->lo_sizelimit; 172 if (loopsize < 0)
173 return 0;
174 174
175 if (sizelimit > 0 && sizelimit < loopsize)
176 loopsize = sizelimit;
175 /* 177 /*
176 * Unfortunately, if we want to do I/O on the device, 178 * Unfortunately, if we want to do I/O on the device,
177 * the number of 512-byte sectors has to fit into a sector_t. 179 * the number of 512-byte sectors has to fit into a sector_t.
@@ -179,17 +181,25 @@ static loff_t get_loop_size(struct loop_device *lo, struct file *file)
179 return loopsize >> 9; 181 return loopsize >> 9;
180} 182}
181 183
184static loff_t get_loop_size(struct loop_device *lo, struct file *file)
185{
186 return get_size(lo->lo_offset, lo->lo_sizelimit, file);
187}
188
182static int 189static int
183figure_loop_size(struct loop_device *lo) 190figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit)
184{ 191{
185 loff_t size = get_loop_size(lo, lo->lo_backing_file); 192 loff_t size = get_size(offset, sizelimit, lo->lo_backing_file);
186 sector_t x = (sector_t)size; 193 sector_t x = (sector_t)size;
187 194
188 if (unlikely((loff_t)x != size)) 195 if (unlikely((loff_t)x != size))
189 return -EFBIG; 196 return -EFBIG;
190 197 if (lo->lo_offset != offset)
198 lo->lo_offset = offset;
199 if (lo->lo_sizelimit != sizelimit)
200 lo->lo_sizelimit = sizelimit;
191 set_capacity(lo->lo_disk, x); 201 set_capacity(lo->lo_disk, x);
192 return 0; 202 return 0;
193} 203}
194 204
195static inline int 205static inline int
@@ -372,7 +382,8 @@ do_lo_receive(struct loop_device *lo,
372 382
373 if (retval < 0) 383 if (retval < 0)
374 return retval; 384 return retval;
375 385 if (retval != bvec->bv_len)
386 return -EIO;
376 return 0; 387 return 0;
377} 388}
378 389
@@ -1058,9 +1069,7 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
1058 1069
1059 if (lo->lo_offset != info->lo_offset || 1070 if (lo->lo_offset != info->lo_offset ||
1060 lo->lo_sizelimit != info->lo_sizelimit) { 1071 lo->lo_sizelimit != info->lo_sizelimit) {
1061 lo->lo_offset = info->lo_offset; 1072 if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit))
1062 lo->lo_sizelimit = info->lo_sizelimit;
1063 if (figure_loop_size(lo))
1064 return -EFBIG; 1073 return -EFBIG;
1065 } 1074 }
1066 loop_config_discard(lo); 1075 loop_config_discard(lo);
@@ -1246,7 +1255,7 @@ static int loop_set_capacity(struct loop_device *lo, struct block_device *bdev)
1246 err = -ENXIO; 1255 err = -ENXIO;
1247 if (unlikely(lo->lo_state != Lo_bound)) 1256 if (unlikely(lo->lo_state != Lo_bound))
1248 goto out; 1257 goto out;
1249 err = figure_loop_size(lo); 1258 err = figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit);
1250 if (unlikely(err)) 1259 if (unlikely(err))
1251 goto out; 1260 goto out;
1252 sec = get_capacity(lo->lo_disk); 1261 sec = get_capacity(lo->lo_disk);
@@ -1284,13 +1293,19 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
1284 goto out_unlocked; 1293 goto out_unlocked;
1285 break; 1294 break;
1286 case LOOP_SET_STATUS: 1295 case LOOP_SET_STATUS:
1287 err = loop_set_status_old(lo, (struct loop_info __user *) arg); 1296 err = -EPERM;
1297 if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
1298 err = loop_set_status_old(lo,
1299 (struct loop_info __user *)arg);
1288 break; 1300 break;
1289 case LOOP_GET_STATUS: 1301 case LOOP_GET_STATUS:
1290 err = loop_get_status_old(lo, (struct loop_info __user *) arg); 1302 err = loop_get_status_old(lo, (struct loop_info __user *) arg);
1291 break; 1303 break;
1292 case LOOP_SET_STATUS64: 1304 case LOOP_SET_STATUS64:
1293 err = loop_set_status64(lo, (struct loop_info64 __user *) arg); 1305 err = -EPERM;
1306 if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
1307 err = loop_set_status64(lo,
1308 (struct loop_info64 __user *) arg);
1294 break; 1309 break;
1295 case LOOP_GET_STATUS64: 1310 case LOOP_GET_STATUS64:
1296 err = loop_get_status64(lo, (struct loop_info64 __user *) arg); 1311 err = loop_get_status64(lo, (struct loop_info64 __user *) arg);
diff --git a/drivers/block/paride/pg.c b/drivers/block/paride/pg.c
index 6b9a2000d56..a79fb4f7ff6 100644
--- a/drivers/block/paride/pg.c
+++ b/drivers/block/paride/pg.c
@@ -630,6 +630,7 @@ static ssize_t pg_read(struct file *filp, char __user *buf, size_t count, loff_t
630 if (dev->status & 0x10) 630 if (dev->status & 0x10)
631 return -ETIME; 631 return -ETIME;
632 632
633 memset(&hdr, 0, sizeof(hdr));
633 hdr.magic = PG_MAGIC; 634 hdr.magic = PG_MAGIC;
634 hdr.dlen = dev->dlen; 635 hdr.dlen = dev->dlen;
635 copy = 0; 636 copy = 0;
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index f9b726091ad..fe4ebc375b3 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -100,6 +100,9 @@ static struct usb_device_id btusb_table[] = {
100 /* Canyon CN-BTU1 with HID interfaces */ 100 /* Canyon CN-BTU1 with HID interfaces */
101 { USB_DEVICE(0x0c10, 0x0000) }, 101 { USB_DEVICE(0x0c10, 0x0000) },
102 102
103 /* Broadcom BCM20702A0 */
104 { USB_DEVICE(0x413c, 0x8197) },
105
103 { } /* Terminating entry */ 106 { } /* Terminating entry */
104}; 107};
105 108
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 63e19ba56bb..6035ab8d5ef 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -941,7 +941,7 @@ void get_random_bytes(void *buf, int nbytes)
941 if (!arch_get_random_long(&v)) 941 if (!arch_get_random_long(&v))
942 break; 942 break;
943 943
944 memcpy(buf, &v, chunk); 944 memcpy(p, &v, chunk);
945 p += chunk; 945 p += chunk;
946 nbytes -= chunk; 946 nbytes -= chunk;
947 } 947 }
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
index 5c6f56f2144..dcd8babae9e 100644
--- a/drivers/crypto/mv_cesa.c
+++ b/drivers/crypto/mv_cesa.c
@@ -343,11 +343,13 @@ static void mv_process_hash_current(int first_block)
343 else 343 else
344 op.config |= CFG_MID_FRAG; 344 op.config |= CFG_MID_FRAG;
345 345
346 writel(req_ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A); 346 if (first_block) {
347 writel(req_ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B); 347 writel(req_ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A);
348 writel(req_ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C); 348 writel(req_ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B);
349 writel(req_ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D); 349 writel(req_ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C);
350 writel(req_ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E); 350 writel(req_ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D);
351 writel(req_ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E);
352 }
351 } 353 }
352 354
353 memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config)); 355 memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config));
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index 643b055ed3c..8f049103708 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -1,36 +1,29 @@
1config ARCH_HAS_DEVFREQ
2 bool
3 depends on ARCH_HAS_OPP
4 help
5 Denotes that the architecture supports DEVFREQ. If the architecture
6 supports multiple OPP entries per device and the frequency of the
7 devices with OPPs may be altered dynamically, the architecture
8 supports DEVFREQ.
9
10menuconfig PM_DEVFREQ 1menuconfig PM_DEVFREQ
11 bool "Generic Dynamic Voltage and Frequency Scaling (DVFS) support" 2 bool "Generic Dynamic Voltage and Frequency Scaling (DVFS) support"
12 depends on PM_OPP && ARCH_HAS_DEVFREQ
13 help 3 help
14 With OPP support, a device may have a list of frequencies and 4 A device may have a list of frequencies and voltages available.
15 voltages available. DEVFREQ, a generic DVFS framework can be 5 devfreq, a generic DVFS framework can be registered for a device
16 registered for a device with OPP support in order to let the 6 in order to let the governor provided to devfreq choose an
17 governor provided to DEVFREQ choose an operating frequency 7 operating frequency based on the device driver's policy.
18 based on the OPP's list and the policy given with DEVFREQ.
19 8
20 Each device may have its own governor and policy. DEVFREQ can 9 Each device may have its own governor and policy. Devfreq can
21 reevaluate the device state periodically and/or based on the 10 reevaluate the device state periodically and/or based on the
22 OPP list changes (each frequency/voltage pair in OPP may be 11 notification to "nb", a notifier block, of devfreq.
23 disabled or enabled).
24 12
25 Like some CPUs with CPUFREQ, a device may have multiple clocks. 13 Like some CPUs with CPUfreq, a device may have multiple clocks.
26 However, because the clock frequencies of a single device are 14 However, because the clock frequencies of a single device are
27 determined by the single device's state, an instance of DEVFREQ 15 determined by the single device's state, an instance of devfreq
28 is attached to a single device and returns a "representative" 16 is attached to a single device and returns a "representative"
29 clock frequency from the OPP of the device, which is also attached 17 clock frequency of the device, which is also attached
30 to a device by 1-to-1. The device registering DEVFREQ takes the 18 to a device by 1-to-1. The device registering devfreq takes the
31 responsiblity to "interpret" the frequency listed in OPP and 19 responsiblity to "interpret" the representative frequency and
32 to set its every clock accordingly with the "target" callback 20 to set its every clock accordingly with the "target" callback
33 given to DEVFREQ. 21 given to devfreq.
22
23 When OPP is used with the devfreq device, it is recommended to
24 register devfreq's nb to the OPP's notifier head. If OPP is
25 used with the devfreq device, you may use OPP helper
26 functions defined in devfreq.h.
34 27
35if PM_DEVFREQ 28if PM_DEVFREQ
36 29
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 5d15b812377..59d24e9cb8c 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -15,7 +15,9 @@
15#include <linux/errno.h> 15#include <linux/errno.h>
16#include <linux/err.h> 16#include <linux/err.h>
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/module.h>
18#include <linux/slab.h> 19#include <linux/slab.h>
20#include <linux/stat.h>
19#include <linux/opp.h> 21#include <linux/opp.h>
20#include <linux/devfreq.h> 22#include <linux/devfreq.h>
21#include <linux/workqueue.h> 23#include <linux/workqueue.h>
@@ -416,10 +418,14 @@ out:
416 */ 418 */
417int devfreq_remove_device(struct devfreq *devfreq) 419int devfreq_remove_device(struct devfreq *devfreq)
418{ 420{
421 bool central_polling;
422
419 if (!devfreq) 423 if (!devfreq)
420 return -EINVAL; 424 return -EINVAL;
421 425
422 if (!devfreq->governor->no_central_polling) { 426 central_polling = !devfreq->governor->no_central_polling;
427
428 if (central_polling) {
423 mutex_lock(&devfreq_list_lock); 429 mutex_lock(&devfreq_list_lock);
424 while (wait_remove_device == devfreq) { 430 while (wait_remove_device == devfreq) {
425 mutex_unlock(&devfreq_list_lock); 431 mutex_unlock(&devfreq_list_lock);
@@ -431,7 +437,7 @@ int devfreq_remove_device(struct devfreq *devfreq)
431 mutex_lock(&devfreq->lock); 437 mutex_lock(&devfreq->lock);
432 _remove_devfreq(devfreq, false); /* it unlocks devfreq->lock */ 438 _remove_devfreq(devfreq, false); /* it unlocks devfreq->lock */
433 439
434 if (!devfreq->governor->no_central_polling) 440 if (central_polling)
435 mutex_unlock(&devfreq_list_lock); 441 mutex_unlock(&devfreq_list_lock);
436 442
437 return 0; 443 return 0;
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index 8af8e864a9c..73464a62adf 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -1128,7 +1128,7 @@ static struct of_device_id mpc85xx_mc_err_of_match[] = {
1128 { .compatible = "fsl,p1020-memory-controller", }, 1128 { .compatible = "fsl,p1020-memory-controller", },
1129 { .compatible = "fsl,p1021-memory-controller", }, 1129 { .compatible = "fsl,p1021-memory-controller", },
1130 { .compatible = "fsl,p2020-memory-controller", }, 1130 { .compatible = "fsl,p2020-memory-controller", },
1131 { .compatible = "fsl,p4080-memory-controller", }, 1131 { .compatible = "fsl,qoriq-memory-controller", },
1132 {}, 1132 {},
1133}; 1133};
1134MODULE_DEVICE_TABLE(of, mpc85xx_mc_err_of_match); 1134MODULE_DEVICE_TABLE(of, mpc85xx_mc_err_of_match);
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index bcb1126e3d0..153980be4ee 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -585,14 +585,12 @@ int dmi_name_in_serial(const char *str)
585} 585}
586 586
587/** 587/**
588 * dmi_name_in_vendors - Check if string is anywhere in the DMI vendor information. 588 * dmi_name_in_vendors - Check if string is in the DMI system or board vendor name
589 * @str: Case sensitive Name 589 * @str: Case sensitive Name
590 */ 590 */
591int dmi_name_in_vendors(const char *str) 591int dmi_name_in_vendors(const char *str)
592{ 592{
593 static int fields[] = { DMI_BIOS_VENDOR, DMI_BIOS_VERSION, DMI_SYS_VENDOR, 593 static int fields[] = { DMI_SYS_VENDOR, DMI_BOARD_VENDOR, DMI_NONE };
594 DMI_PRODUCT_NAME, DMI_PRODUCT_VERSION, DMI_BOARD_VENDOR,
595 DMI_BOARD_NAME, DMI_BOARD_VERSION, DMI_NONE };
596 int i; 594 int i;
597 for (i = 0; fields[i] != DMI_NONE; i++) { 595 for (i = 0; fields[i] != DMI_NONE; i++) {
598 int f = fields[i]; 596 int f = fields[i];
diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
index 8370f72d87f..b0a81173a26 100644
--- a/drivers/firmware/efivars.c
+++ b/drivers/firmware/efivars.c
@@ -457,7 +457,8 @@ static int efi_pstore_close(struct pstore_info *psi)
457} 457}
458 458
459static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type, 459static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
460 struct timespec *timespec, struct pstore_info *psi) 460 struct timespec *timespec,
461 char **buf, struct pstore_info *psi)
461{ 462{
462 efi_guid_t vendor = LINUX_EFI_CRASH_GUID; 463 efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
463 struct efivars *efivars = psi->data; 464 struct efivars *efivars = psi->data;
@@ -478,7 +479,11 @@ static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
478 timespec->tv_nsec = 0; 479 timespec->tv_nsec = 0;
479 get_var_data_locked(efivars, &efivars->walk_entry->var); 480 get_var_data_locked(efivars, &efivars->walk_entry->var);
480 size = efivars->walk_entry->var.DataSize; 481 size = efivars->walk_entry->var.DataSize;
481 memcpy(psi->buf, efivars->walk_entry->var.Data, size); 482 *buf = kmalloc(size, GFP_KERNEL);
483 if (*buf == NULL)
484 return -ENOMEM;
485 memcpy(*buf, efivars->walk_entry->var.Data,
486 size);
482 efivars->walk_entry = list_entry(efivars->walk_entry->list.next, 487 efivars->walk_entry = list_entry(efivars->walk_entry->list.next,
483 struct efivar_entry, list); 488 struct efivar_entry, list);
484 return size; 489 return size;
@@ -576,7 +581,8 @@ static int efi_pstore_close(struct pstore_info *psi)
576} 581}
577 582
578static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type, 583static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
579 struct timespec *time, struct pstore_info *psi) 584 struct timespec *timespec,
585 char **buf, struct pstore_info *psi)
580{ 586{
581 return -1; 587 return -1;
582} 588}
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 147df8ae79d..d3f3e8f5456 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -546,7 +546,7 @@ static void pca953x_irq_teardown(struct pca953x_chip *chip)
546 * Translate OpenFirmware node properties into platform_data 546 * Translate OpenFirmware node properties into platform_data
547 * WARNING: This is DEPRECATED and will be removed eventually! 547 * WARNING: This is DEPRECATED and will be removed eventually!
548 */ 548 */
549void 549static void
550pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert) 550pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert)
551{ 551{
552 struct device_node *node; 552 struct device_node *node;
@@ -574,7 +574,7 @@ pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert)
574 *invert = *val; 574 *invert = *val;
575} 575}
576#else 576#else
577void 577static void
578pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert) 578pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert)
579{ 579{
580 *gpio_base = -1; 580 *gpio_base = -1;
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 405c63b9d53..8323fc38984 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -1873,6 +1873,10 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
1873 } 1873 }
1874 1874
1875 if (num_clips && clips_ptr) { 1875 if (num_clips && clips_ptr) {
1876 if (num_clips < 0 || num_clips > DRM_MODE_FB_DIRTY_MAX_CLIPS) {
1877 ret = -EINVAL;
1878 goto out_err1;
1879 }
1876 clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL); 1880 clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
1877 if (!clips) { 1881 if (!clips) {
1878 ret = -ENOMEM; 1882 ret = -ENOMEM;
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 68b756253f9..44a5d0ad8b7 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -110,10 +110,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
110 /* Prevent vblank irq processing while disabling vblank irqs, 110 /* Prevent vblank irq processing while disabling vblank irqs,
111 * so no updates of timestamps or count can happen after we've 111 * so no updates of timestamps or count can happen after we've
112 * disabled. Needed to prevent races in case of delayed irq's. 112 * disabled. Needed to prevent races in case of delayed irq's.
113 * Disable preemption, so vblank_time_lock is held as short as
114 * possible, even under a kernel with PREEMPT_RT patches.
115 */ 113 */
116 preempt_disable();
117 spin_lock_irqsave(&dev->vblank_time_lock, irqflags); 114 spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
118 115
119 dev->driver->disable_vblank(dev, crtc); 116 dev->driver->disable_vblank(dev, crtc);
@@ -164,7 +161,6 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
164 clear_vblank_timestamps(dev, crtc); 161 clear_vblank_timestamps(dev, crtc);
165 162
166 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); 163 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
167 preempt_enable();
168} 164}
169 165
170static void vblank_disable_fn(unsigned long arg) 166static void vblank_disable_fn(unsigned long arg)
@@ -889,10 +885,6 @@ int drm_vblank_get(struct drm_device *dev, int crtc)
889 spin_lock_irqsave(&dev->vbl_lock, irqflags); 885 spin_lock_irqsave(&dev->vbl_lock, irqflags);
890 /* Going from 0->1 means we have to enable interrupts again */ 886 /* Going from 0->1 means we have to enable interrupts again */
891 if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) { 887 if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) {
892 /* Disable preemption while holding vblank_time_lock. Do
893 * it explicitely to guard against PREEMPT_RT kernel.
894 */
895 preempt_disable();
896 spin_lock_irqsave(&dev->vblank_time_lock, irqflags2); 888 spin_lock_irqsave(&dev->vblank_time_lock, irqflags2);
897 if (!dev->vblank_enabled[crtc]) { 889 if (!dev->vblank_enabled[crtc]) {
898 /* Enable vblank irqs under vblank_time_lock protection. 890 /* Enable vblank irqs under vblank_time_lock protection.
@@ -912,7 +904,6 @@ int drm_vblank_get(struct drm_device *dev, int crtc)
912 } 904 }
913 } 905 }
914 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags2); 906 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags2);
915 preempt_enable();
916 } else { 907 } else {
917 if (!dev->vblank_enabled[crtc]) { 908 if (!dev->vblank_enabled[crtc]) {
918 atomic_dec(&dev->vblank_refcount[crtc]); 909 atomic_dec(&dev->vblank_refcount[crtc]);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c
index 6f8afea94fc..2bb07bca511 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.c
@@ -27,82 +27,84 @@
27#include "drm.h" 27#include "drm.h"
28 28
29#include "exynos_drm_drv.h" 29#include "exynos_drm_drv.h"
30#include "exynos_drm_gem.h"
30#include "exynos_drm_buf.h" 31#include "exynos_drm_buf.h"
31 32
32static DEFINE_MUTEX(exynos_drm_buf_lock);
33
34static int lowlevel_buffer_allocate(struct drm_device *dev, 33static int lowlevel_buffer_allocate(struct drm_device *dev,
35 struct exynos_drm_buf_entry *entry) 34 struct exynos_drm_gem_buf *buffer)
36{ 35{
37 DRM_DEBUG_KMS("%s\n", __FILE__); 36 DRM_DEBUG_KMS("%s\n", __FILE__);
38 37
39 entry->vaddr = dma_alloc_writecombine(dev->dev, entry->size, 38 buffer->kvaddr = dma_alloc_writecombine(dev->dev, buffer->size,
40 (dma_addr_t *)&entry->paddr, GFP_KERNEL); 39 &buffer->dma_addr, GFP_KERNEL);
41 if (!entry->paddr) { 40 if (!buffer->kvaddr) {
42 DRM_ERROR("failed to allocate buffer.\n"); 41 DRM_ERROR("failed to allocate buffer.\n");
43 return -ENOMEM; 42 return -ENOMEM;
44 } 43 }
45 44
46 DRM_DEBUG_KMS("allocated : vaddr(0x%x), paddr(0x%x), size(0x%x)\n", 45 DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n",
47 (unsigned int)entry->vaddr, entry->paddr, entry->size); 46 (unsigned long)buffer->kvaddr,
47 (unsigned long)buffer->dma_addr,
48 buffer->size);
48 49
49 return 0; 50 return 0;
50} 51}
51 52
52static void lowlevel_buffer_deallocate(struct drm_device *dev, 53static void lowlevel_buffer_deallocate(struct drm_device *dev,
53 struct exynos_drm_buf_entry *entry) 54 struct exynos_drm_gem_buf *buffer)
54{ 55{
55 DRM_DEBUG_KMS("%s.\n", __FILE__); 56 DRM_DEBUG_KMS("%s.\n", __FILE__);
56 57
57 if (entry->paddr && entry->vaddr && entry->size) 58 if (buffer->dma_addr && buffer->size)
58 dma_free_writecombine(dev->dev, entry->size, entry->vaddr, 59 dma_free_writecombine(dev->dev, buffer->size, buffer->kvaddr,
59 entry->paddr); 60 (dma_addr_t)buffer->dma_addr);
60 else 61 else
61 DRM_DEBUG_KMS("entry data is null.\n"); 62 DRM_DEBUG_KMS("buffer data are invalid.\n");
62} 63}
63 64
64struct exynos_drm_buf_entry *exynos_drm_buf_create(struct drm_device *dev, 65struct exynos_drm_gem_buf *exynos_drm_buf_create(struct drm_device *dev,
65 unsigned int size) 66 unsigned int size)
66{ 67{
67 struct exynos_drm_buf_entry *entry; 68 struct exynos_drm_gem_buf *buffer;
68 69
69 DRM_DEBUG_KMS("%s.\n", __FILE__); 70 DRM_DEBUG_KMS("%s.\n", __FILE__);
71 DRM_DEBUG_KMS("desired size = 0x%x\n", size);
70 72
71 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 73 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
72 if (!entry) { 74 if (!buffer) {
73 DRM_ERROR("failed to allocate exynos_drm_buf_entry.\n"); 75 DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
74 return ERR_PTR(-ENOMEM); 76 return ERR_PTR(-ENOMEM);
75 } 77 }
76 78
77 entry->size = size; 79 buffer->size = size;
78 80
79 /* 81 /*
80 * allocate memory region with size and set the memory information 82 * allocate memory region with size and set the memory information
81 * to vaddr and paddr of a entry object. 83 * to vaddr and dma_addr of a buffer object.
82 */ 84 */
83 if (lowlevel_buffer_allocate(dev, entry) < 0) { 85 if (lowlevel_buffer_allocate(dev, buffer) < 0) {
84 kfree(entry); 86 kfree(buffer);
85 entry = NULL; 87 buffer = NULL;
86 return ERR_PTR(-ENOMEM); 88 return ERR_PTR(-ENOMEM);
87 } 89 }
88 90
89 return entry; 91 return buffer;
90} 92}
91 93
92void exynos_drm_buf_destroy(struct drm_device *dev, 94void exynos_drm_buf_destroy(struct drm_device *dev,
93 struct exynos_drm_buf_entry *entry) 95 struct exynos_drm_gem_buf *buffer)
94{ 96{
95 DRM_DEBUG_KMS("%s.\n", __FILE__); 97 DRM_DEBUG_KMS("%s.\n", __FILE__);
96 98
97 if (!entry) { 99 if (!buffer) {
98 DRM_DEBUG_KMS("entry is null.\n"); 100 DRM_DEBUG_KMS("buffer is null.\n");
99 return; 101 return;
100 } 102 }
101 103
102 lowlevel_buffer_deallocate(dev, entry); 104 lowlevel_buffer_deallocate(dev, buffer);
103 105
104 kfree(entry); 106 kfree(buffer);
105 entry = NULL; 107 buffer = NULL;
106} 108}
107 109
108MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>"); 110MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.h b/drivers/gpu/drm/exynos/exynos_drm_buf.h
index 045d59eab01..6e91f9caa5d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.h
@@ -26,28 +26,15 @@
26#ifndef _EXYNOS_DRM_BUF_H_ 26#ifndef _EXYNOS_DRM_BUF_H_
27#define _EXYNOS_DRM_BUF_H_ 27#define _EXYNOS_DRM_BUF_H_
28 28
29/*
30 * exynos drm buffer entry structure.
31 *
32 * @paddr: physical address of allocated memory.
33 * @vaddr: kernel virtual address of allocated memory.
34 * @size: size of allocated memory.
35 */
36struct exynos_drm_buf_entry {
37 dma_addr_t paddr;
38 void __iomem *vaddr;
39 unsigned int size;
40};
41
42/* allocate physical memory. */ 29/* allocate physical memory. */
43struct exynos_drm_buf_entry *exynos_drm_buf_create(struct drm_device *dev, 30struct exynos_drm_gem_buf *exynos_drm_buf_create(struct drm_device *dev,
44 unsigned int size); 31 unsigned int size);
45 32
46/* get physical memory information of a drm framebuffer. */ 33/* get memory information of a drm framebuffer. */
47struct exynos_drm_buf_entry *exynos_drm_fb_get_buf(struct drm_framebuffer *fb); 34struct exynos_drm_gem_buf *exynos_drm_fb_get_buf(struct drm_framebuffer *fb);
48 35
49/* remove allocated physical memory. */ 36/* remove allocated physical memory. */
50void exynos_drm_buf_destroy(struct drm_device *dev, 37void exynos_drm_buf_destroy(struct drm_device *dev,
51 struct exynos_drm_buf_entry *entry); 38 struct exynos_drm_gem_buf *buffer);
52 39
53#endif 40#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c
index 985d9e76872..d620b078425 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_connector.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_connector.c
@@ -37,6 +37,8 @@
37 37
38struct exynos_drm_connector { 38struct exynos_drm_connector {
39 struct drm_connector drm_connector; 39 struct drm_connector drm_connector;
40 uint32_t encoder_id;
41 struct exynos_drm_manager *manager;
40}; 42};
41 43
42/* convert exynos_video_timings to drm_display_mode */ 44/* convert exynos_video_timings to drm_display_mode */
@@ -47,6 +49,7 @@ convert_to_display_mode(struct drm_display_mode *mode,
47 DRM_DEBUG_KMS("%s\n", __FILE__); 49 DRM_DEBUG_KMS("%s\n", __FILE__);
48 50
49 mode->clock = timing->pixclock / 1000; 51 mode->clock = timing->pixclock / 1000;
52 mode->vrefresh = timing->refresh;
50 53
51 mode->hdisplay = timing->xres; 54 mode->hdisplay = timing->xres;
52 mode->hsync_start = mode->hdisplay + timing->left_margin; 55 mode->hsync_start = mode->hdisplay + timing->left_margin;
@@ -57,6 +60,12 @@ convert_to_display_mode(struct drm_display_mode *mode,
57 mode->vsync_start = mode->vdisplay + timing->upper_margin; 60 mode->vsync_start = mode->vdisplay + timing->upper_margin;
58 mode->vsync_end = mode->vsync_start + timing->vsync_len; 61 mode->vsync_end = mode->vsync_start + timing->vsync_len;
59 mode->vtotal = mode->vsync_end + timing->lower_margin; 62 mode->vtotal = mode->vsync_end + timing->lower_margin;
63
64 if (timing->vmode & FB_VMODE_INTERLACED)
65 mode->flags |= DRM_MODE_FLAG_INTERLACE;
66
67 if (timing->vmode & FB_VMODE_DOUBLE)
68 mode->flags |= DRM_MODE_FLAG_DBLSCAN;
60} 69}
61 70
62/* convert drm_display_mode to exynos_video_timings */ 71/* convert drm_display_mode to exynos_video_timings */
@@ -69,7 +78,7 @@ convert_to_video_timing(struct fb_videomode *timing,
69 memset(timing, 0, sizeof(*timing)); 78 memset(timing, 0, sizeof(*timing));
70 79
71 timing->pixclock = mode->clock * 1000; 80 timing->pixclock = mode->clock * 1000;
72 timing->refresh = mode->vrefresh; 81 timing->refresh = drm_mode_vrefresh(mode);
73 82
74 timing->xres = mode->hdisplay; 83 timing->xres = mode->hdisplay;
75 timing->left_margin = mode->hsync_start - mode->hdisplay; 84 timing->left_margin = mode->hsync_start - mode->hdisplay;
@@ -92,15 +101,16 @@ convert_to_video_timing(struct fb_videomode *timing,
92 101
93static int exynos_drm_connector_get_modes(struct drm_connector *connector) 102static int exynos_drm_connector_get_modes(struct drm_connector *connector)
94{ 103{
95 struct exynos_drm_manager *manager = 104 struct exynos_drm_connector *exynos_connector =
96 exynos_drm_get_manager(connector->encoder); 105 to_exynos_connector(connector);
97 struct exynos_drm_display *display = manager->display; 106 struct exynos_drm_manager *manager = exynos_connector->manager;
107 struct exynos_drm_display_ops *display_ops = manager->display_ops;
98 unsigned int count; 108 unsigned int count;
99 109
100 DRM_DEBUG_KMS("%s\n", __FILE__); 110 DRM_DEBUG_KMS("%s\n", __FILE__);
101 111
102 if (!display) { 112 if (!display_ops) {
103 DRM_DEBUG_KMS("display is null.\n"); 113 DRM_DEBUG_KMS("display_ops is null.\n");
104 return 0; 114 return 0;
105 } 115 }
106 116
@@ -112,7 +122,7 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
112 * P.S. in case of lcd panel, count is always 1 if success 122 * P.S. in case of lcd panel, count is always 1 if success
113 * because lcd panel has only one mode. 123 * because lcd panel has only one mode.
114 */ 124 */
115 if (display->get_edid) { 125 if (display_ops->get_edid) {
116 int ret; 126 int ret;
117 void *edid; 127 void *edid;
118 128
@@ -122,7 +132,7 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
122 return 0; 132 return 0;
123 } 133 }
124 134
125 ret = display->get_edid(manager->dev, connector, 135 ret = display_ops->get_edid(manager->dev, connector,
126 edid, MAX_EDID); 136 edid, MAX_EDID);
127 if (ret < 0) { 137 if (ret < 0) {
128 DRM_ERROR("failed to get edid data.\n"); 138 DRM_ERROR("failed to get edid data.\n");
@@ -140,8 +150,8 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
140 struct drm_display_mode *mode = drm_mode_create(connector->dev); 150 struct drm_display_mode *mode = drm_mode_create(connector->dev);
141 struct fb_videomode *timing; 151 struct fb_videomode *timing;
142 152
143 if (display->get_timing) 153 if (display_ops->get_timing)
144 timing = display->get_timing(manager->dev); 154 timing = display_ops->get_timing(manager->dev);
145 else { 155 else {
146 drm_mode_destroy(connector->dev, mode); 156 drm_mode_destroy(connector->dev, mode);
147 return 0; 157 return 0;
@@ -162,9 +172,10 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
162static int exynos_drm_connector_mode_valid(struct drm_connector *connector, 172static int exynos_drm_connector_mode_valid(struct drm_connector *connector,
163 struct drm_display_mode *mode) 173 struct drm_display_mode *mode)
164{ 174{
165 struct exynos_drm_manager *manager = 175 struct exynos_drm_connector *exynos_connector =
166 exynos_drm_get_manager(connector->encoder); 176 to_exynos_connector(connector);
167 struct exynos_drm_display *display = manager->display; 177 struct exynos_drm_manager *manager = exynos_connector->manager;
178 struct exynos_drm_display_ops *display_ops = manager->display_ops;
168 struct fb_videomode timing; 179 struct fb_videomode timing;
169 int ret = MODE_BAD; 180 int ret = MODE_BAD;
170 181
@@ -172,8 +183,8 @@ static int exynos_drm_connector_mode_valid(struct drm_connector *connector,
172 183
173 convert_to_video_timing(&timing, mode); 184 convert_to_video_timing(&timing, mode);
174 185
175 if (display && display->check_timing) 186 if (display_ops && display_ops->check_timing)
176 if (!display->check_timing(manager->dev, (void *)&timing)) 187 if (!display_ops->check_timing(manager->dev, (void *)&timing))
177 ret = MODE_OK; 188 ret = MODE_OK;
178 189
179 return ret; 190 return ret;
@@ -181,9 +192,25 @@ static int exynos_drm_connector_mode_valid(struct drm_connector *connector,
181 192
182struct drm_encoder *exynos_drm_best_encoder(struct drm_connector *connector) 193struct drm_encoder *exynos_drm_best_encoder(struct drm_connector *connector)
183{ 194{
195 struct drm_device *dev = connector->dev;
196 struct exynos_drm_connector *exynos_connector =
197 to_exynos_connector(connector);
198 struct drm_mode_object *obj;
199 struct drm_encoder *encoder;
200
184 DRM_DEBUG_KMS("%s\n", __FILE__); 201 DRM_DEBUG_KMS("%s\n", __FILE__);
185 202
186 return connector->encoder; 203 obj = drm_mode_object_find(dev, exynos_connector->encoder_id,
204 DRM_MODE_OBJECT_ENCODER);
205 if (!obj) {
206 DRM_DEBUG_KMS("Unknown ENCODER ID %d\n",
207 exynos_connector->encoder_id);
208 return NULL;
209 }
210
211 encoder = obj_to_encoder(obj);
212
213 return encoder;
187} 214}
188 215
189static struct drm_connector_helper_funcs exynos_connector_helper_funcs = { 216static struct drm_connector_helper_funcs exynos_connector_helper_funcs = {
@@ -196,15 +223,17 @@ static struct drm_connector_helper_funcs exynos_connector_helper_funcs = {
196static enum drm_connector_status 223static enum drm_connector_status
197exynos_drm_connector_detect(struct drm_connector *connector, bool force) 224exynos_drm_connector_detect(struct drm_connector *connector, bool force)
198{ 225{
199 struct exynos_drm_manager *manager = 226 struct exynos_drm_connector *exynos_connector =
200 exynos_drm_get_manager(connector->encoder); 227 to_exynos_connector(connector);
201 struct exynos_drm_display *display = manager->display; 228 struct exynos_drm_manager *manager = exynos_connector->manager;
229 struct exynos_drm_display_ops *display_ops =
230 manager->display_ops;
202 enum drm_connector_status status = connector_status_disconnected; 231 enum drm_connector_status status = connector_status_disconnected;
203 232
204 DRM_DEBUG_KMS("%s\n", __FILE__); 233 DRM_DEBUG_KMS("%s\n", __FILE__);
205 234
206 if (display && display->is_connected) { 235 if (display_ops && display_ops->is_connected) {
207 if (display->is_connected(manager->dev)) 236 if (display_ops->is_connected(manager->dev))
208 status = connector_status_connected; 237 status = connector_status_connected;
209 else 238 else
210 status = connector_status_disconnected; 239 status = connector_status_disconnected;
@@ -251,9 +280,11 @@ struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
251 280
252 connector = &exynos_connector->drm_connector; 281 connector = &exynos_connector->drm_connector;
253 282
254 switch (manager->display->type) { 283 switch (manager->display_ops->type) {
255 case EXYNOS_DISPLAY_TYPE_HDMI: 284 case EXYNOS_DISPLAY_TYPE_HDMI:
256 type = DRM_MODE_CONNECTOR_HDMIA; 285 type = DRM_MODE_CONNECTOR_HDMIA;
286 connector->interlace_allowed = true;
287 connector->polled = DRM_CONNECTOR_POLL_HPD;
257 break; 288 break;
258 default: 289 default:
259 type = DRM_MODE_CONNECTOR_Unknown; 290 type = DRM_MODE_CONNECTOR_Unknown;
@@ -267,7 +298,10 @@ struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
267 if (err) 298 if (err)
268 goto err_connector; 299 goto err_connector;
269 300
301 exynos_connector->encoder_id = encoder->base.id;
302 exynos_connector->manager = manager;
270 connector->encoder = encoder; 303 connector->encoder = encoder;
304
271 err = drm_mode_connector_attach_encoder(connector, encoder); 305 err = drm_mode_connector_attach_encoder(connector, encoder);
272 if (err) { 306 if (err) {
273 DRM_ERROR("failed to attach a connector to a encoder\n"); 307 DRM_ERROR("failed to attach a connector to a encoder\n");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 9337e5e2dbb..ee43cc22085 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -29,36 +29,17 @@
29#include "drmP.h" 29#include "drmP.h"
30#include "drm_crtc_helper.h" 30#include "drm_crtc_helper.h"
31 31
32#include "exynos_drm_crtc.h"
32#include "exynos_drm_drv.h" 33#include "exynos_drm_drv.h"
33#include "exynos_drm_fb.h" 34#include "exynos_drm_fb.h"
34#include "exynos_drm_encoder.h" 35#include "exynos_drm_encoder.h"
36#include "exynos_drm_gem.h"
35#include "exynos_drm_buf.h" 37#include "exynos_drm_buf.h"
36 38
37#define to_exynos_crtc(x) container_of(x, struct exynos_drm_crtc,\ 39#define to_exynos_crtc(x) container_of(x, struct exynos_drm_crtc,\
38 drm_crtc) 40 drm_crtc)
39 41
40/* 42/*
41 * Exynos specific crtc postion structure.
42 *
43 * @fb_x: offset x on a framebuffer to be displyed
44 * - the unit is screen coordinates.
45 * @fb_y: offset y on a framebuffer to be displayed
46 * - the unit is screen coordinates.
47 * @crtc_x: offset x on hardware screen.
48 * @crtc_y: offset y on hardware screen.
49 * @crtc_w: width of hardware screen.
50 * @crtc_h: height of hardware screen.
51 */
52struct exynos_drm_crtc_pos {
53 unsigned int fb_x;
54 unsigned int fb_y;
55 unsigned int crtc_x;
56 unsigned int crtc_y;
57 unsigned int crtc_w;
58 unsigned int crtc_h;
59};
60
61/*
62 * Exynos specific crtc structure. 43 * Exynos specific crtc structure.
63 * 44 *
64 * @drm_crtc: crtc object. 45 * @drm_crtc: crtc object.
@@ -85,30 +66,31 @@ static void exynos_drm_crtc_apply(struct drm_crtc *crtc)
85 66
86 exynos_drm_fn_encoder(crtc, overlay, 67 exynos_drm_fn_encoder(crtc, overlay,
87 exynos_drm_encoder_crtc_mode_set); 68 exynos_drm_encoder_crtc_mode_set);
88 exynos_drm_fn_encoder(crtc, NULL, exynos_drm_encoder_crtc_commit); 69 exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe,
70 exynos_drm_encoder_crtc_commit);
89} 71}
90 72
91static int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay, 73int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay,
92 struct drm_framebuffer *fb, 74 struct drm_framebuffer *fb,
93 struct drm_display_mode *mode, 75 struct drm_display_mode *mode,
94 struct exynos_drm_crtc_pos *pos) 76 struct exynos_drm_crtc_pos *pos)
95{ 77{
96 struct exynos_drm_buf_entry *entry; 78 struct exynos_drm_gem_buf *buffer;
97 unsigned int actual_w; 79 unsigned int actual_w;
98 unsigned int actual_h; 80 unsigned int actual_h;
99 81
100 entry = exynos_drm_fb_get_buf(fb); 82 buffer = exynos_drm_fb_get_buf(fb);
101 if (!entry) { 83 if (!buffer) {
102 DRM_LOG_KMS("entry is null.\n"); 84 DRM_LOG_KMS("buffer is null.\n");
103 return -EFAULT; 85 return -EFAULT;
104 } 86 }
105 87
106 overlay->paddr = entry->paddr; 88 overlay->dma_addr = buffer->dma_addr;
107 overlay->vaddr = entry->vaddr; 89 overlay->vaddr = buffer->kvaddr;
108 90
109 DRM_DEBUG_KMS("vaddr = 0x%lx, paddr = 0x%lx\n", 91 DRM_DEBUG_KMS("vaddr = 0x%lx, dma_addr = 0x%lx\n",
110 (unsigned long)overlay->vaddr, 92 (unsigned long)overlay->vaddr,
111 (unsigned long)overlay->paddr); 93 (unsigned long)overlay->dma_addr);
112 94
113 actual_w = min((mode->hdisplay - pos->crtc_x), pos->crtc_w); 95 actual_w = min((mode->hdisplay - pos->crtc_x), pos->crtc_w);
114 actual_h = min((mode->vdisplay - pos->crtc_y), pos->crtc_h); 96 actual_h = min((mode->vdisplay - pos->crtc_y), pos->crtc_h);
@@ -171,9 +153,26 @@ static int exynos_drm_crtc_update(struct drm_crtc *crtc)
171 153
172static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode) 154static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
173{ 155{
174 DRM_DEBUG_KMS("%s\n", __FILE__); 156 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
175 157
176 /* TODO */ 158 DRM_DEBUG_KMS("crtc[%d] mode[%d]\n", crtc->base.id, mode);
159
160 switch (mode) {
161 case DRM_MODE_DPMS_ON:
162 exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe,
163 exynos_drm_encoder_crtc_commit);
164 break;
165 case DRM_MODE_DPMS_STANDBY:
166 case DRM_MODE_DPMS_SUSPEND:
167 case DRM_MODE_DPMS_OFF:
168 /* TODO */
169 exynos_drm_fn_encoder(crtc, NULL,
170 exynos_drm_encoder_crtc_disable);
171 break;
172 default:
173 DRM_DEBUG_KMS("unspecified mode %d\n", mode);
174 break;
175 }
177} 176}
178 177
179static void exynos_drm_crtc_prepare(struct drm_crtc *crtc) 178static void exynos_drm_crtc_prepare(struct drm_crtc *crtc)
@@ -185,9 +184,12 @@ static void exynos_drm_crtc_prepare(struct drm_crtc *crtc)
185 184
186static void exynos_drm_crtc_commit(struct drm_crtc *crtc) 185static void exynos_drm_crtc_commit(struct drm_crtc *crtc)
187{ 186{
187 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
188
188 DRM_DEBUG_KMS("%s\n", __FILE__); 189 DRM_DEBUG_KMS("%s\n", __FILE__);
189 190
190 /* drm framework doesn't check NULL. */ 191 exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe,
192 exynos_drm_encoder_crtc_commit);
191} 193}
192 194
193static bool 195static bool
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
index c584042d6d2..25f72a62cb8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
@@ -35,4 +35,29 @@ int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr);
35int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int crtc); 35int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int crtc);
36void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc); 36void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc);
37 37
38/*
39 * Exynos specific crtc postion structure.
40 *
41 * @fb_x: offset x on a framebuffer to be displyed
42 * - the unit is screen coordinates.
43 * @fb_y: offset y on a framebuffer to be displayed
44 * - the unit is screen coordinates.
45 * @crtc_x: offset x on hardware screen.
46 * @crtc_y: offset y on hardware screen.
47 * @crtc_w: width of hardware screen.
48 * @crtc_h: height of hardware screen.
49 */
50struct exynos_drm_crtc_pos {
51 unsigned int fb_x;
52 unsigned int fb_y;
53 unsigned int crtc_x;
54 unsigned int crtc_y;
55 unsigned int crtc_w;
56 unsigned int crtc_h;
57};
58
59int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay,
60 struct drm_framebuffer *fb,
61 struct drm_display_mode *mode,
62 struct exynos_drm_crtc_pos *pos);
38#endif 63#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 83810cbe3c1..53e2216de61 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -27,6 +27,7 @@
27 27
28#include "drmP.h" 28#include "drmP.h"
29#include "drm.h" 29#include "drm.h"
30#include "drm_crtc_helper.h"
30 31
31#include <drm/exynos_drm.h> 32#include <drm/exynos_drm.h>
32 33
@@ -61,6 +62,9 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
61 62
62 drm_mode_config_init(dev); 63 drm_mode_config_init(dev);
63 64
65 /* init kms poll for handling hpd */
66 drm_kms_helper_poll_init(dev);
67
64 exynos_drm_mode_config_init(dev); 68 exynos_drm_mode_config_init(dev);
65 69
66 /* 70 /*
@@ -116,6 +120,7 @@ static int exynos_drm_unload(struct drm_device *dev)
116 exynos_drm_fbdev_fini(dev); 120 exynos_drm_fbdev_fini(dev);
117 exynos_drm_device_unregister(dev); 121 exynos_drm_device_unregister(dev);
118 drm_vblank_cleanup(dev); 122 drm_vblank_cleanup(dev);
123 drm_kms_helper_poll_fini(dev);
119 drm_mode_config_cleanup(dev); 124 drm_mode_config_cleanup(dev);
120 kfree(dev->dev_private); 125 kfree(dev->dev_private);
121 126
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index c03683f2ae7..5e02e6ecc2e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -29,6 +29,7 @@
29#ifndef _EXYNOS_DRM_DRV_H_ 29#ifndef _EXYNOS_DRM_DRV_H_
30#define _EXYNOS_DRM_DRV_H_ 30#define _EXYNOS_DRM_DRV_H_
31 31
32#include <linux/module.h>
32#include "drm.h" 33#include "drm.h"
33 34
34#define MAX_CRTC 2 35#define MAX_CRTC 2
@@ -79,8 +80,8 @@ struct exynos_drm_overlay_ops {
79 * @scan_flag: interlace or progressive way. 80 * @scan_flag: interlace or progressive way.
80 * (it could be DRM_MODE_FLAG_*) 81 * (it could be DRM_MODE_FLAG_*)
81 * @bpp: pixel size.(in bit) 82 * @bpp: pixel size.(in bit)
82 * @paddr: bus(accessed by dma) physical memory address to this overlay 83 * @dma_addr: bus(accessed by dma) address to the memory region allocated
83 * and this is physically continuous. 84 * for a overlay.
84 * @vaddr: virtual memory addresss to this overlay. 85 * @vaddr: virtual memory addresss to this overlay.
85 * @default_win: a window to be enabled. 86 * @default_win: a window to be enabled.
86 * @color_key: color key on or off. 87 * @color_key: color key on or off.
@@ -108,7 +109,7 @@ struct exynos_drm_overlay {
108 unsigned int scan_flag; 109 unsigned int scan_flag;
109 unsigned int bpp; 110 unsigned int bpp;
110 unsigned int pitch; 111 unsigned int pitch;
111 dma_addr_t paddr; 112 dma_addr_t dma_addr;
112 void __iomem *vaddr; 113 void __iomem *vaddr;
113 114
114 bool default_win; 115 bool default_win;
@@ -130,7 +131,7 @@ struct exynos_drm_overlay {
130 * @check_timing: check if timing is valid or not. 131 * @check_timing: check if timing is valid or not.
131 * @power_on: display device on or off. 132 * @power_on: display device on or off.
132 */ 133 */
133struct exynos_drm_display { 134struct exynos_drm_display_ops {
134 enum exynos_drm_output_type type; 135 enum exynos_drm_output_type type;
135 bool (*is_connected)(struct device *dev); 136 bool (*is_connected)(struct device *dev);
136 int (*get_edid)(struct device *dev, struct drm_connector *connector, 137 int (*get_edid)(struct device *dev, struct drm_connector *connector,
@@ -146,12 +147,14 @@ struct exynos_drm_display {
146 * @mode_set: convert drm_display_mode to hw specific display mode and 147 * @mode_set: convert drm_display_mode to hw specific display mode and
147 * would be called by encoder->mode_set(). 148 * would be called by encoder->mode_set().
148 * @commit: set current hw specific display mode to hw. 149 * @commit: set current hw specific display mode to hw.
150 * @disable: disable hardware specific display mode.
149 * @enable_vblank: specific driver callback for enabling vblank interrupt. 151 * @enable_vblank: specific driver callback for enabling vblank interrupt.
150 * @disable_vblank: specific driver callback for disabling vblank interrupt. 152 * @disable_vblank: specific driver callback for disabling vblank interrupt.
151 */ 153 */
152struct exynos_drm_manager_ops { 154struct exynos_drm_manager_ops {
153 void (*mode_set)(struct device *subdrv_dev, void *mode); 155 void (*mode_set)(struct device *subdrv_dev, void *mode);
154 void (*commit)(struct device *subdrv_dev); 156 void (*commit)(struct device *subdrv_dev);
157 void (*disable)(struct device *subdrv_dev);
155 int (*enable_vblank)(struct device *subdrv_dev); 158 int (*enable_vblank)(struct device *subdrv_dev);
156 void (*disable_vblank)(struct device *subdrv_dev); 159 void (*disable_vblank)(struct device *subdrv_dev);
157}; 160};
@@ -178,7 +181,7 @@ struct exynos_drm_manager {
178 int pipe; 181 int pipe;
179 struct exynos_drm_manager_ops *ops; 182 struct exynos_drm_manager_ops *ops;
180 struct exynos_drm_overlay_ops *overlay_ops; 183 struct exynos_drm_overlay_ops *overlay_ops;
181 struct exynos_drm_display *display; 184 struct exynos_drm_display_ops *display_ops;
182}; 185};
183 186
184/* 187/*
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
index 7cf6fa86a67..153061415ba 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
@@ -53,15 +53,36 @@ static void exynos_drm_encoder_dpms(struct drm_encoder *encoder, int mode)
53 struct drm_device *dev = encoder->dev; 53 struct drm_device *dev = encoder->dev;
54 struct drm_connector *connector; 54 struct drm_connector *connector;
55 struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder); 55 struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
56 struct exynos_drm_manager_ops *manager_ops = manager->ops;
56 57
57 DRM_DEBUG_KMS("%s, encoder dpms: %d\n", __FILE__, mode); 58 DRM_DEBUG_KMS("%s, encoder dpms: %d\n", __FILE__, mode);
58 59
60 switch (mode) {
61 case DRM_MODE_DPMS_ON:
62 if (manager_ops && manager_ops->commit)
63 manager_ops->commit(manager->dev);
64 break;
65 case DRM_MODE_DPMS_STANDBY:
66 case DRM_MODE_DPMS_SUSPEND:
67 case DRM_MODE_DPMS_OFF:
68 /* TODO */
69 if (manager_ops && manager_ops->disable)
70 manager_ops->disable(manager->dev);
71 break;
72 default:
73 DRM_ERROR("unspecified mode %d\n", mode);
74 break;
75 }
76
59 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 77 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
60 if (connector->encoder == encoder) { 78 if (connector->encoder == encoder) {
61 struct exynos_drm_display *display = manager->display; 79 struct exynos_drm_display_ops *display_ops =
80 manager->display_ops;
62 81
63 if (display && display->power_on) 82 DRM_DEBUG_KMS("connector[%d] dpms[%d]\n",
64 display->power_on(manager->dev, mode); 83 connector->base.id, mode);
84 if (display_ops && display_ops->power_on)
85 display_ops->power_on(manager->dev, mode);
65 } 86 }
66 } 87 }
67} 88}
@@ -116,15 +137,11 @@ static void exynos_drm_encoder_commit(struct drm_encoder *encoder)
116{ 137{
117 struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder); 138 struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
118 struct exynos_drm_manager_ops *manager_ops = manager->ops; 139 struct exynos_drm_manager_ops *manager_ops = manager->ops;
119 struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
120 140
121 DRM_DEBUG_KMS("%s\n", __FILE__); 141 DRM_DEBUG_KMS("%s\n", __FILE__);
122 142
123 if (manager_ops && manager_ops->commit) 143 if (manager_ops && manager_ops->commit)
124 manager_ops->commit(manager->dev); 144 manager_ops->commit(manager->dev);
125
126 if (overlay_ops && overlay_ops->commit)
127 overlay_ops->commit(manager->dev);
128} 145}
129 146
130static struct drm_crtc * 147static struct drm_crtc *
@@ -208,10 +225,23 @@ void exynos_drm_fn_encoder(struct drm_crtc *crtc, void *data,
208{ 225{
209 struct drm_device *dev = crtc->dev; 226 struct drm_device *dev = crtc->dev;
210 struct drm_encoder *encoder; 227 struct drm_encoder *encoder;
228 struct exynos_drm_private *private = dev->dev_private;
229 struct exynos_drm_manager *manager;
211 230
212 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 231 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
213 if (encoder->crtc != crtc) 232 /*
214 continue; 233 * if crtc is detached from encoder, check pipe,
234 * otherwise check crtc attached to encoder
235 */
236 if (!encoder->crtc) {
237 manager = to_exynos_encoder(encoder)->manager;
238 if (manager->pipe < 0 ||
239 private->crtc[manager->pipe] != crtc)
240 continue;
241 } else {
242 if (encoder->crtc != crtc)
243 continue;
244 }
215 245
216 fn(encoder, data); 246 fn(encoder, data);
217 } 247 }
@@ -250,8 +280,18 @@ void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data)
250 struct exynos_drm_manager *manager = 280 struct exynos_drm_manager *manager =
251 to_exynos_encoder(encoder)->manager; 281 to_exynos_encoder(encoder)->manager;
252 struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops; 282 struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
283 int crtc = *(int *)data;
284
285 DRM_DEBUG_KMS("%s\n", __FILE__);
286
287 /*
288 * when crtc is detached from encoder, this pipe is used
289 * to select manager operation
290 */
291 manager->pipe = crtc;
253 292
254 overlay_ops->commit(manager->dev); 293 if (overlay_ops && overlay_ops->commit)
294 overlay_ops->commit(manager->dev);
255} 295}
256 296
257void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data) 297void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data)
@@ -261,7 +301,28 @@ void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data)
261 struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops; 301 struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
262 struct exynos_drm_overlay *overlay = data; 302 struct exynos_drm_overlay *overlay = data;
263 303
264 overlay_ops->mode_set(manager->dev, overlay); 304 if (overlay_ops && overlay_ops->mode_set)
305 overlay_ops->mode_set(manager->dev, overlay);
306}
307
308void exynos_drm_encoder_crtc_disable(struct drm_encoder *encoder, void *data)
309{
310 struct exynos_drm_manager *manager =
311 to_exynos_encoder(encoder)->manager;
312 struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
313
314 DRM_DEBUG_KMS("\n");
315
316 if (overlay_ops && overlay_ops->disable)
317 overlay_ops->disable(manager->dev);
318
319 /*
320 * crtc is already detached from encoder and last
321 * function for detaching is properly done, so
322 * clear pipe from manager to prevent repeated call
323 */
324 if (!encoder->crtc)
325 manager->pipe = -1;
265} 326}
266 327
267MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>"); 328MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.h b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
index 5ecd645d06a..a22acfbf0e4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
@@ -41,5 +41,6 @@ void exynos_drm_enable_vblank(struct drm_encoder *encoder, void *data);
41void exynos_drm_disable_vblank(struct drm_encoder *encoder, void *data); 41void exynos_drm_disable_vblank(struct drm_encoder *encoder, void *data);
42void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data); 42void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data);
43void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data); 43void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data);
44void exynos_drm_encoder_crtc_disable(struct drm_encoder *encoder, void *data);
44 45
45#endif 46#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 48d29cfd524..5bf4a1ac7f8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -29,7 +29,9 @@
29#include "drmP.h" 29#include "drmP.h"
30#include "drm_crtc.h" 30#include "drm_crtc.h"
31#include "drm_crtc_helper.h" 31#include "drm_crtc_helper.h"
32#include "drm_fb_helper.h"
32 33
34#include "exynos_drm_drv.h"
33#include "exynos_drm_fb.h" 35#include "exynos_drm_fb.h"
34#include "exynos_drm_buf.h" 36#include "exynos_drm_buf.h"
35#include "exynos_drm_gem.h" 37#include "exynos_drm_gem.h"
@@ -41,14 +43,14 @@
41 * 43 *
42 * @fb: drm framebuffer obejct. 44 * @fb: drm framebuffer obejct.
43 * @exynos_gem_obj: exynos specific gem object containing a gem object. 45 * @exynos_gem_obj: exynos specific gem object containing a gem object.
44 * @entry: pointer to exynos drm buffer entry object. 46 * @buffer: pointer to exynos_drm_gem_buffer object.
45 * - containing only the information to physically continuous memory 47 * - contain the memory information to memory region allocated
46 * region allocated at default framebuffer creation. 48 * at default framebuffer creation.
47 */ 49 */
48struct exynos_drm_fb { 50struct exynos_drm_fb {
49 struct drm_framebuffer fb; 51 struct drm_framebuffer fb;
50 struct exynos_drm_gem_obj *exynos_gem_obj; 52 struct exynos_drm_gem_obj *exynos_gem_obj;
51 struct exynos_drm_buf_entry *entry; 53 struct exynos_drm_gem_buf *buffer;
52}; 54};
53 55
54static void exynos_drm_fb_destroy(struct drm_framebuffer *fb) 56static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
@@ -63,8 +65,8 @@ static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
63 * default framebuffer has no gem object so 65 * default framebuffer has no gem object so
64 * a buffer of the default framebuffer should be released at here. 66 * a buffer of the default framebuffer should be released at here.
65 */ 67 */
66 if (!exynos_fb->exynos_gem_obj && exynos_fb->entry) 68 if (!exynos_fb->exynos_gem_obj && exynos_fb->buffer)
67 exynos_drm_buf_destroy(fb->dev, exynos_fb->entry); 69 exynos_drm_buf_destroy(fb->dev, exynos_fb->buffer);
68 70
69 kfree(exynos_fb); 71 kfree(exynos_fb);
70 exynos_fb = NULL; 72 exynos_fb = NULL;
@@ -143,29 +145,29 @@ exynos_drm_fb_init(struct drm_file *file_priv, struct drm_device *dev,
143 */ 145 */
144 if (!mode_cmd->handle) { 146 if (!mode_cmd->handle) {
145 if (!file_priv) { 147 if (!file_priv) {
146 struct exynos_drm_buf_entry *entry; 148 struct exynos_drm_gem_buf *buffer;
147 149
148 /* 150 /*
149 * in case that file_priv is NULL, it allocates 151 * in case that file_priv is NULL, it allocates
150 * only buffer and this buffer would be used 152 * only buffer and this buffer would be used
151 * for default framebuffer. 153 * for default framebuffer.
152 */ 154 */
153 entry = exynos_drm_buf_create(dev, size); 155 buffer = exynos_drm_buf_create(dev, size);
154 if (IS_ERR(entry)) { 156 if (IS_ERR(buffer)) {
155 ret = PTR_ERR(entry); 157 ret = PTR_ERR(buffer);
156 goto err_buffer; 158 goto err_buffer;
157 } 159 }
158 160
159 exynos_fb->entry = entry; 161 exynos_fb->buffer = buffer;
160 162
161 DRM_LOG_KMS("default fb: paddr = 0x%lx, size = 0x%x\n", 163 DRM_LOG_KMS("default: dma_addr = 0x%lx, size = 0x%x\n",
162 (unsigned long)entry->paddr, size); 164 (unsigned long)buffer->dma_addr, size);
163 165
164 goto out; 166 goto out;
165 } else { 167 } else {
166 exynos_gem_obj = exynos_drm_gem_create(file_priv, dev, 168 exynos_gem_obj = exynos_drm_gem_create(dev, file_priv,
167 size, 169 &mode_cmd->handle,
168 &mode_cmd->handle); 170 size);
169 if (IS_ERR(exynos_gem_obj)) { 171 if (IS_ERR(exynos_gem_obj)) {
170 ret = PTR_ERR(exynos_gem_obj); 172 ret = PTR_ERR(exynos_gem_obj);
171 goto err_buffer; 173 goto err_buffer;
@@ -189,10 +191,10 @@ exynos_drm_fb_init(struct drm_file *file_priv, struct drm_device *dev,
189 * so that default framebuffer has no its own gem object, 191 * so that default framebuffer has no its own gem object,
190 * only its own buffer object. 192 * only its own buffer object.
191 */ 193 */
192 exynos_fb->entry = exynos_gem_obj->entry; 194 exynos_fb->buffer = exynos_gem_obj->buffer;
193 195
194 DRM_LOG_KMS("paddr = 0x%lx, size = 0x%x, gem object = 0x%x\n", 196 DRM_LOG_KMS("dma_addr = 0x%lx, size = 0x%x, gem object = 0x%x\n",
195 (unsigned long)exynos_fb->entry->paddr, size, 197 (unsigned long)exynos_fb->buffer->dma_addr, size,
196 (unsigned int)&exynos_gem_obj->base); 198 (unsigned int)&exynos_gem_obj->base);
197 199
198out: 200out:
@@ -220,26 +222,36 @@ struct drm_framebuffer *exynos_drm_fb_create(struct drm_device *dev,
220 return exynos_drm_fb_init(file_priv, dev, mode_cmd); 222 return exynos_drm_fb_init(file_priv, dev, mode_cmd);
221} 223}
222 224
223struct exynos_drm_buf_entry *exynos_drm_fb_get_buf(struct drm_framebuffer *fb) 225struct exynos_drm_gem_buf *exynos_drm_fb_get_buf(struct drm_framebuffer *fb)
224{ 226{
225 struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); 227 struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
226 struct exynos_drm_buf_entry *entry; 228 struct exynos_drm_gem_buf *buffer;
227 229
228 DRM_DEBUG_KMS("%s\n", __FILE__); 230 DRM_DEBUG_KMS("%s\n", __FILE__);
229 231
230 entry = exynos_fb->entry; 232 buffer = exynos_fb->buffer;
231 if (!entry) 233 if (!buffer)
232 return NULL; 234 return NULL;
233 235
234 DRM_DEBUG_KMS("vaddr = 0x%lx, paddr = 0x%lx\n", 236 DRM_DEBUG_KMS("vaddr = 0x%lx, dma_addr = 0x%lx\n",
235 (unsigned long)entry->vaddr, 237 (unsigned long)buffer->kvaddr,
236 (unsigned long)entry->paddr); 238 (unsigned long)buffer->dma_addr);
237 239
238 return entry; 240 return buffer;
241}
242
243static void exynos_drm_output_poll_changed(struct drm_device *dev)
244{
245 struct exynos_drm_private *private = dev->dev_private;
246 struct drm_fb_helper *fb_helper = private->fb_helper;
247
248 if (fb_helper)
249 drm_fb_helper_hotplug_event(fb_helper);
239} 250}
240 251
241static struct drm_mode_config_funcs exynos_drm_mode_config_funcs = { 252static struct drm_mode_config_funcs exynos_drm_mode_config_funcs = {
242 .fb_create = exynos_drm_fb_create, 253 .fb_create = exynos_drm_fb_create,
254 .output_poll_changed = exynos_drm_output_poll_changed,
243}; 255};
244 256
245void exynos_drm_mode_config_init(struct drm_device *dev) 257void exynos_drm_mode_config_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 1f4b3d1a771..836f4100818 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -33,6 +33,7 @@
33 33
34#include "exynos_drm_drv.h" 34#include "exynos_drm_drv.h"
35#include "exynos_drm_fb.h" 35#include "exynos_drm_fb.h"
36#include "exynos_drm_gem.h"
36#include "exynos_drm_buf.h" 37#include "exynos_drm_buf.h"
37 38
38#define MAX_CONNECTOR 4 39#define MAX_CONNECTOR 4
@@ -85,15 +86,13 @@ static struct fb_ops exynos_drm_fb_ops = {
85}; 86};
86 87
87static int exynos_drm_fbdev_update(struct drm_fb_helper *helper, 88static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
88 struct drm_framebuffer *fb, 89 struct drm_framebuffer *fb)
89 unsigned int fb_width,
90 unsigned int fb_height)
91{ 90{
92 struct fb_info *fbi = helper->fbdev; 91 struct fb_info *fbi = helper->fbdev;
93 struct drm_device *dev = helper->dev; 92 struct drm_device *dev = helper->dev;
94 struct exynos_drm_fbdev *exynos_fb = to_exynos_fbdev(helper); 93 struct exynos_drm_fbdev *exynos_fb = to_exynos_fbdev(helper);
95 struct exynos_drm_buf_entry *entry; 94 struct exynos_drm_gem_buf *buffer;
96 unsigned int size = fb_width * fb_height * (fb->bits_per_pixel >> 3); 95 unsigned int size = fb->width * fb->height * (fb->bits_per_pixel >> 3);
97 unsigned long offset; 96 unsigned long offset;
98 97
99 DRM_DEBUG_KMS("%s\n", __FILE__); 98 DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -101,20 +100,20 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
101 exynos_fb->fb = fb; 100 exynos_fb->fb = fb;
102 101
103 drm_fb_helper_fill_fix(fbi, fb->pitch, fb->depth); 102 drm_fb_helper_fill_fix(fbi, fb->pitch, fb->depth);
104 drm_fb_helper_fill_var(fbi, helper, fb_width, fb_height); 103 drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height);
105 104
106 entry = exynos_drm_fb_get_buf(fb); 105 buffer = exynos_drm_fb_get_buf(fb);
107 if (!entry) { 106 if (!buffer) {
108 DRM_LOG_KMS("entry is null.\n"); 107 DRM_LOG_KMS("buffer is null.\n");
109 return -EFAULT; 108 return -EFAULT;
110 } 109 }
111 110
112 offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3); 111 offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3);
113 offset += fbi->var.yoffset * fb->pitch; 112 offset += fbi->var.yoffset * fb->pitch;
114 113
115 dev->mode_config.fb_base = entry->paddr; 114 dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr;
116 fbi->screen_base = entry->vaddr + offset; 115 fbi->screen_base = buffer->kvaddr + offset;
117 fbi->fix.smem_start = entry->paddr + offset; 116 fbi->fix.smem_start = (unsigned long)(buffer->dma_addr + offset);
118 fbi->screen_size = size; 117 fbi->screen_size = size;
119 fbi->fix.smem_len = size; 118 fbi->fix.smem_len = size;
120 119
@@ -171,8 +170,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
171 goto out; 170 goto out;
172 } 171 }
173 172
174 ret = exynos_drm_fbdev_update(helper, helper->fb, sizes->fb_width, 173 ret = exynos_drm_fbdev_update(helper, helper->fb);
175 sizes->fb_height);
176 if (ret < 0) 174 if (ret < 0)
177 fb_dealloc_cmap(&fbi->cmap); 175 fb_dealloc_cmap(&fbi->cmap);
178 176
@@ -235,8 +233,7 @@ static int exynos_drm_fbdev_recreate(struct drm_fb_helper *helper,
235 } 233 }
236 234
237 helper->fb = exynos_fbdev->fb; 235 helper->fb = exynos_fbdev->fb;
238 return exynos_drm_fbdev_update(helper, helper->fb, sizes->fb_width, 236 return exynos_drm_fbdev_update(helper, helper->fb);
239 sizes->fb_height);
240} 237}
241 238
242static int exynos_drm_fbdev_probe(struct drm_fb_helper *helper, 239static int exynos_drm_fbdev_probe(struct drm_fb_helper *helper,
@@ -405,6 +402,18 @@ int exynos_drm_fbdev_reinit(struct drm_device *dev)
405 fb_helper = private->fb_helper; 402 fb_helper = private->fb_helper;
406 403
407 if (fb_helper) { 404 if (fb_helper) {
405 struct list_head temp_list;
406
407 INIT_LIST_HEAD(&temp_list);
408
409 /*
410 * fb_helper is reintialized but kernel fb is reused
411 * so kernel_fb_list need to be backuped and restored
412 */
413 if (!list_empty(&fb_helper->kernel_fb_list))
414 list_replace_init(&fb_helper->kernel_fb_list,
415 &temp_list);
416
408 drm_fb_helper_fini(fb_helper); 417 drm_fb_helper_fini(fb_helper);
409 418
410 ret = drm_fb_helper_init(dev, fb_helper, 419 ret = drm_fb_helper_init(dev, fb_helper,
@@ -414,6 +423,9 @@ int exynos_drm_fbdev_reinit(struct drm_device *dev)
414 return ret; 423 return ret;
415 } 424 }
416 425
426 if (!list_empty(&temp_list))
427 list_replace(&temp_list, &fb_helper->kernel_fb_list);
428
417 ret = drm_fb_helper_single_add_all_connectors(fb_helper); 429 ret = drm_fb_helper_single_add_all_connectors(fb_helper);
418 if (ret < 0) { 430 if (ret < 0) {
419 DRM_ERROR("failed to add fb helper to connectors\n"); 431 DRM_ERROR("failed to add fb helper to connectors\n");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 4659c88cdd9..db3b3d9e731 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -64,7 +64,7 @@ struct fimd_win_data {
64 unsigned int fb_width; 64 unsigned int fb_width;
65 unsigned int fb_height; 65 unsigned int fb_height;
66 unsigned int bpp; 66 unsigned int bpp;
67 dma_addr_t paddr; 67 dma_addr_t dma_addr;
68 void __iomem *vaddr; 68 void __iomem *vaddr;
69 unsigned int buf_offsize; 69 unsigned int buf_offsize;
70 unsigned int line_size; /* bytes */ 70 unsigned int line_size; /* bytes */
@@ -124,7 +124,7 @@ static int fimd_display_power_on(struct device *dev, int mode)
124 return 0; 124 return 0;
125} 125}
126 126
127static struct exynos_drm_display fimd_display = { 127static struct exynos_drm_display_ops fimd_display_ops = {
128 .type = EXYNOS_DISPLAY_TYPE_LCD, 128 .type = EXYNOS_DISPLAY_TYPE_LCD,
129 .is_connected = fimd_display_is_connected, 129 .is_connected = fimd_display_is_connected,
130 .get_timing = fimd_get_timing, 130 .get_timing = fimd_get_timing,
@@ -177,6 +177,40 @@ static void fimd_commit(struct device *dev)
177 writel(val, ctx->regs + VIDCON0); 177 writel(val, ctx->regs + VIDCON0);
178} 178}
179 179
180static void fimd_disable(struct device *dev)
181{
182 struct fimd_context *ctx = get_fimd_context(dev);
183 struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
184 struct drm_device *drm_dev = subdrv->drm_dev;
185 struct exynos_drm_manager *manager = &subdrv->manager;
186 u32 val;
187
188 DRM_DEBUG_KMS("%s\n", __FILE__);
189
190 /* fimd dma off */
191 val = readl(ctx->regs + VIDCON0);
192 val &= ~(VIDCON0_ENVID | VIDCON0_ENVID_F);
193 writel(val, ctx->regs + VIDCON0);
194
195 /*
196 * if vblank is enabled status with dma off then
197 * it disables vsync interrupt.
198 */
199 if (drm_dev->vblank_enabled[manager->pipe] &&
200 atomic_read(&drm_dev->vblank_refcount[manager->pipe])) {
201 drm_vblank_put(drm_dev, manager->pipe);
202
203 /*
204 * if vblank_disable_allowed is 0 then disable
205 * vsync interrupt right now else the vsync interrupt
206 * would be disabled by drm timer once a current process
207 * gives up ownershop of vblank event.
208 */
209 if (!drm_dev->vblank_disable_allowed)
210 drm_vblank_off(drm_dev, manager->pipe);
211 }
212}
213
180static int fimd_enable_vblank(struct device *dev) 214static int fimd_enable_vblank(struct device *dev)
181{ 215{
182 struct fimd_context *ctx = get_fimd_context(dev); 216 struct fimd_context *ctx = get_fimd_context(dev);
@@ -220,6 +254,7 @@ static void fimd_disable_vblank(struct device *dev)
220 254
221static struct exynos_drm_manager_ops fimd_manager_ops = { 255static struct exynos_drm_manager_ops fimd_manager_ops = {
222 .commit = fimd_commit, 256 .commit = fimd_commit,
257 .disable = fimd_disable,
223 .enable_vblank = fimd_enable_vblank, 258 .enable_vblank = fimd_enable_vblank,
224 .disable_vblank = fimd_disable_vblank, 259 .disable_vblank = fimd_disable_vblank,
225}; 260};
@@ -251,7 +286,7 @@ static void fimd_win_mode_set(struct device *dev,
251 win_data->ovl_height = overlay->crtc_height; 286 win_data->ovl_height = overlay->crtc_height;
252 win_data->fb_width = overlay->fb_width; 287 win_data->fb_width = overlay->fb_width;
253 win_data->fb_height = overlay->fb_height; 288 win_data->fb_height = overlay->fb_height;
254 win_data->paddr = overlay->paddr + offset; 289 win_data->dma_addr = overlay->dma_addr + offset;
255 win_data->vaddr = overlay->vaddr + offset; 290 win_data->vaddr = overlay->vaddr + offset;
256 win_data->bpp = overlay->bpp; 291 win_data->bpp = overlay->bpp;
257 win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) * 292 win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) *
@@ -263,7 +298,7 @@ static void fimd_win_mode_set(struct device *dev,
263 DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n", 298 DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
264 win_data->ovl_width, win_data->ovl_height); 299 win_data->ovl_width, win_data->ovl_height);
265 DRM_DEBUG_KMS("paddr = 0x%lx, vaddr = 0x%lx\n", 300 DRM_DEBUG_KMS("paddr = 0x%lx, vaddr = 0x%lx\n",
266 (unsigned long)win_data->paddr, 301 (unsigned long)win_data->dma_addr,
267 (unsigned long)win_data->vaddr); 302 (unsigned long)win_data->vaddr);
268 DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n", 303 DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n",
269 overlay->fb_width, overlay->crtc_width); 304 overlay->fb_width, overlay->crtc_width);
@@ -376,16 +411,16 @@ static void fimd_win_commit(struct device *dev)
376 writel(val, ctx->regs + SHADOWCON); 411 writel(val, ctx->regs + SHADOWCON);
377 412
378 /* buffer start address */ 413 /* buffer start address */
379 val = win_data->paddr; 414 val = (unsigned long)win_data->dma_addr;
380 writel(val, ctx->regs + VIDWx_BUF_START(win, 0)); 415 writel(val, ctx->regs + VIDWx_BUF_START(win, 0));
381 416
382 /* buffer end address */ 417 /* buffer end address */
383 size = win_data->fb_width * win_data->ovl_height * (win_data->bpp >> 3); 418 size = win_data->fb_width * win_data->ovl_height * (win_data->bpp >> 3);
384 val = win_data->paddr + size; 419 val = (unsigned long)(win_data->dma_addr + size);
385 writel(val, ctx->regs + VIDWx_BUF_END(win, 0)); 420 writel(val, ctx->regs + VIDWx_BUF_END(win, 0));
386 421
387 DRM_DEBUG_KMS("start addr = 0x%lx, end addr = 0x%lx, size = 0x%lx\n", 422 DRM_DEBUG_KMS("start addr = 0x%lx, end addr = 0x%lx, size = 0x%lx\n",
388 (unsigned long)win_data->paddr, val, size); 423 (unsigned long)win_data->dma_addr, val, size);
389 DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n", 424 DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
390 win_data->ovl_width, win_data->ovl_height); 425 win_data->ovl_width, win_data->ovl_height);
391 426
@@ -447,7 +482,6 @@ static void fimd_win_commit(struct device *dev)
447static void fimd_win_disable(struct device *dev) 482static void fimd_win_disable(struct device *dev)
448{ 483{
449 struct fimd_context *ctx = get_fimd_context(dev); 484 struct fimd_context *ctx = get_fimd_context(dev);
450 struct fimd_win_data *win_data;
451 int win = ctx->default_win; 485 int win = ctx->default_win;
452 u32 val; 486 u32 val;
453 487
@@ -456,8 +490,6 @@ static void fimd_win_disable(struct device *dev)
456 if (win < 0 || win > WINDOWS_NR) 490 if (win < 0 || win > WINDOWS_NR)
457 return; 491 return;
458 492
459 win_data = &ctx->win_data[win];
460
461 /* protect windows */ 493 /* protect windows */
462 val = readl(ctx->regs + SHADOWCON); 494 val = readl(ctx->regs + SHADOWCON);
463 val |= SHADOWCON_WINx_PROTECT(win); 495 val |= SHADOWCON_WINx_PROTECT(win);
@@ -528,6 +560,16 @@ static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
528 /* VSYNC interrupt */ 560 /* VSYNC interrupt */
529 writel(VIDINTCON1_INT_FRAME, ctx->regs + VIDINTCON1); 561 writel(VIDINTCON1_INT_FRAME, ctx->regs + VIDINTCON1);
530 562
563 /*
564 * in case that vblank_disable_allowed is 1, it could induce
565 * the problem that manager->pipe could be -1 because with
566 * disable callback, vsync interrupt isn't disabled and at this moment,
567 * vsync interrupt could occur. the vsync interrupt would be disabled
568 * by timer handler later.
569 */
570 if (manager->pipe == -1)
571 return IRQ_HANDLED;
572
531 drm_handle_vblank(drm_dev, manager->pipe); 573 drm_handle_vblank(drm_dev, manager->pipe);
532 fimd_finish_pageflip(drm_dev, manager->pipe); 574 fimd_finish_pageflip(drm_dev, manager->pipe);
533 575
@@ -548,13 +590,6 @@ static int fimd_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
548 */ 590 */
549 drm_dev->irq_enabled = 1; 591 drm_dev->irq_enabled = 1;
550 592
551 /*
552 * with vblank_disable_allowed = 1, vblank interrupt will be disabled
553 * by drm timer once a current process gives up ownership of
554 * vblank event.(drm_vblank_put function was called)
555 */
556 drm_dev->vblank_disable_allowed = 1;
557
558 return 0; 593 return 0;
559} 594}
560 595
@@ -731,7 +766,7 @@ static int __devinit fimd_probe(struct platform_device *pdev)
731 subdrv->manager.pipe = -1; 766 subdrv->manager.pipe = -1;
732 subdrv->manager.ops = &fimd_manager_ops; 767 subdrv->manager.ops = &fimd_manager_ops;
733 subdrv->manager.overlay_ops = &fimd_overlay_ops; 768 subdrv->manager.overlay_ops = &fimd_overlay_ops;
734 subdrv->manager.display = &fimd_display; 769 subdrv->manager.display_ops = &fimd_display_ops;
735 subdrv->manager.dev = dev; 770 subdrv->manager.dev = dev;
736 771
737 platform_set_drvdata(pdev, ctx); 772 platform_set_drvdata(pdev, ctx);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index a8e7a88906e..aba0fe47f7e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -62,40 +62,28 @@ static unsigned int get_gem_mmap_offset(struct drm_gem_object *obj)
62 return (unsigned int)obj->map_list.hash.key << PAGE_SHIFT; 62 return (unsigned int)obj->map_list.hash.key << PAGE_SHIFT;
63} 63}
64 64
65struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_file *file_priv, 65static struct exynos_drm_gem_obj
66 struct drm_device *dev, unsigned int size, 66 *exynos_drm_gem_init(struct drm_device *drm_dev,
67 unsigned int *handle) 67 struct drm_file *file_priv, unsigned int *handle,
68 unsigned int size)
68{ 69{
69 struct exynos_drm_gem_obj *exynos_gem_obj; 70 struct exynos_drm_gem_obj *exynos_gem_obj;
70 struct exynos_drm_buf_entry *entry;
71 struct drm_gem_object *obj; 71 struct drm_gem_object *obj;
72 int ret; 72 int ret;
73 73
74 DRM_DEBUG_KMS("%s\n", __FILE__);
75
76 size = roundup(size, PAGE_SIZE);
77
78 exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL); 74 exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
79 if (!exynos_gem_obj) { 75 if (!exynos_gem_obj) {
80 DRM_ERROR("failed to allocate exynos gem object.\n"); 76 DRM_ERROR("failed to allocate exynos gem object.\n");
81 return ERR_PTR(-ENOMEM); 77 return ERR_PTR(-ENOMEM);
82 } 78 }
83 79
84 /* allocate the new buffer object and memory region. */
85 entry = exynos_drm_buf_create(dev, size);
86 if (!entry) {
87 kfree(exynos_gem_obj);
88 return ERR_PTR(-ENOMEM);
89 }
90
91 exynos_gem_obj->entry = entry;
92
93 obj = &exynos_gem_obj->base; 80 obj = &exynos_gem_obj->base;
94 81
95 ret = drm_gem_object_init(dev, obj, size); 82 ret = drm_gem_object_init(drm_dev, obj, size);
96 if (ret < 0) { 83 if (ret < 0) {
97 DRM_ERROR("failed to initailize gem object.\n"); 84 DRM_ERROR("failed to initialize gem object.\n");
98 goto err_obj_init; 85 ret = -EINVAL;
86 goto err_object_init;
99 } 87 }
100 88
101 DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp); 89 DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
@@ -127,24 +115,50 @@ err_handle_create:
127err_create_mmap_offset: 115err_create_mmap_offset:
128 drm_gem_object_release(obj); 116 drm_gem_object_release(obj);
129 117
130err_obj_init: 118err_object_init:
131 exynos_drm_buf_destroy(dev, exynos_gem_obj->entry);
132
133 kfree(exynos_gem_obj); 119 kfree(exynos_gem_obj);
134 120
135 return ERR_PTR(ret); 121 return ERR_PTR(ret);
136} 122}
137 123
124struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
125 struct drm_file *file_priv,
126 unsigned int *handle, unsigned long size)
127{
128
129 struct exynos_drm_gem_obj *exynos_gem_obj = NULL;
130 struct exynos_drm_gem_buf *buffer;
131
132 size = roundup(size, PAGE_SIZE);
133
134 DRM_DEBUG_KMS("%s: size = 0x%lx\n", __FILE__, size);
135
136 buffer = exynos_drm_buf_create(dev, size);
137 if (IS_ERR(buffer)) {
138 return ERR_CAST(buffer);
139 }
140
141 exynos_gem_obj = exynos_drm_gem_init(dev, file_priv, handle, size);
142 if (IS_ERR(exynos_gem_obj)) {
143 exynos_drm_buf_destroy(dev, buffer);
144 return exynos_gem_obj;
145 }
146
147 exynos_gem_obj->buffer = buffer;
148
149 return exynos_gem_obj;
150}
151
138int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data, 152int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
139 struct drm_file *file_priv) 153 struct drm_file *file_priv)
140{ 154{
141 struct drm_exynos_gem_create *args = data; 155 struct drm_exynos_gem_create *args = data;
142 struct exynos_drm_gem_obj *exynos_gem_obj; 156 struct exynos_drm_gem_obj *exynos_gem_obj = NULL;
143 157
144 DRM_DEBUG_KMS("%s : size = 0x%x\n", __FILE__, args->size); 158 DRM_DEBUG_KMS("%s\n", __FILE__);
145 159
146 exynos_gem_obj = exynos_drm_gem_create(file_priv, dev, args->size, 160 exynos_gem_obj = exynos_drm_gem_create(dev, file_priv,
147 &args->handle); 161 &args->handle, args->size);
148 if (IS_ERR(exynos_gem_obj)) 162 if (IS_ERR(exynos_gem_obj))
149 return PTR_ERR(exynos_gem_obj); 163 return PTR_ERR(exynos_gem_obj);
150 164
@@ -175,7 +189,7 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
175{ 189{
176 struct drm_gem_object *obj = filp->private_data; 190 struct drm_gem_object *obj = filp->private_data;
177 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); 191 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
178 struct exynos_drm_buf_entry *entry; 192 struct exynos_drm_gem_buf *buffer;
179 unsigned long pfn, vm_size; 193 unsigned long pfn, vm_size;
180 194
181 DRM_DEBUG_KMS("%s\n", __FILE__); 195 DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -187,20 +201,20 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
187 201
188 vm_size = vma->vm_end - vma->vm_start; 202 vm_size = vma->vm_end - vma->vm_start;
189 /* 203 /*
190 * a entry contains information to physically continuous memory 204 * a buffer contains information to physically continuous memory
191 * allocated by user request or at framebuffer creation. 205 * allocated by user request or at framebuffer creation.
192 */ 206 */
193 entry = exynos_gem_obj->entry; 207 buffer = exynos_gem_obj->buffer;
194 208
195 /* check if user-requested size is valid. */ 209 /* check if user-requested size is valid. */
196 if (vm_size > entry->size) 210 if (vm_size > buffer->size)
197 return -EINVAL; 211 return -EINVAL;
198 212
199 /* 213 /*
200 * get page frame number to physical memory to be mapped 214 * get page frame number to physical memory to be mapped
201 * to user space. 215 * to user space.
202 */ 216 */
203 pfn = exynos_gem_obj->entry->paddr >> PAGE_SHIFT; 217 pfn = ((unsigned long)exynos_gem_obj->buffer->dma_addr) >> PAGE_SHIFT;
204 218
205 DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn); 219 DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn);
206 220
@@ -281,7 +295,7 @@ void exynos_drm_gem_free_object(struct drm_gem_object *gem_obj)
281 295
282 exynos_gem_obj = to_exynos_gem_obj(gem_obj); 296 exynos_gem_obj = to_exynos_gem_obj(gem_obj);
283 297
284 exynos_drm_buf_destroy(gem_obj->dev, exynos_gem_obj->entry); 298 exynos_drm_buf_destroy(gem_obj->dev, exynos_gem_obj->buffer);
285 299
286 kfree(exynos_gem_obj); 300 kfree(exynos_gem_obj);
287} 301}
@@ -302,8 +316,8 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
302 args->pitch = args->width * args->bpp >> 3; 316 args->pitch = args->width * args->bpp >> 3;
303 args->size = args->pitch * args->height; 317 args->size = args->pitch * args->height;
304 318
305 exynos_gem_obj = exynos_drm_gem_create(file_priv, dev, args->size, 319 exynos_gem_obj = exynos_drm_gem_create(dev, file_priv, &args->handle,
306 &args->handle); 320 args->size);
307 if (IS_ERR(exynos_gem_obj)) 321 if (IS_ERR(exynos_gem_obj))
308 return PTR_ERR(exynos_gem_obj); 322 return PTR_ERR(exynos_gem_obj);
309 323
@@ -360,7 +374,8 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
360 374
361 mutex_lock(&dev->struct_mutex); 375 mutex_lock(&dev->struct_mutex);
362 376
363 pfn = (exynos_gem_obj->entry->paddr >> PAGE_SHIFT) + page_offset; 377 pfn = (((unsigned long)exynos_gem_obj->buffer->dma_addr) >>
378 PAGE_SHIFT) + page_offset;
364 379
365 ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn); 380 ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
366 381
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index e5fc0148277..ef8797334e6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -30,13 +30,29 @@
30 struct exynos_drm_gem_obj, base) 30 struct exynos_drm_gem_obj, base)
31 31
32/* 32/*
33 * exynos drm gem buffer structure.
34 *
35 * @kvaddr: kernel virtual address to allocated memory region.
36 * @dma_addr: bus address(accessed by dma) to allocated memory region.
37 * - this address could be physical address without IOMMU and
38 * device address with IOMMU.
39 * @size: size of allocated memory region.
40 */
41struct exynos_drm_gem_buf {
42 void __iomem *kvaddr;
43 dma_addr_t dma_addr;
44 unsigned long size;
45};
46
47/*
33 * exynos drm buffer structure. 48 * exynos drm buffer structure.
34 * 49 *
35 * @base: a gem object. 50 * @base: a gem object.
36 * - a new handle to this gem object would be created 51 * - a new handle to this gem object would be created
37 * by drm_gem_handle_create(). 52 * by drm_gem_handle_create().
38 * @entry: pointer to exynos drm buffer entry object. 53 * @buffer: a pointer to exynos_drm_gem_buffer object.
39 * - containing the information to physically 54 * - contain the information to memory region allocated
55 * by user request or at framebuffer creation.
40 * continuous memory region allocated by user request 56 * continuous memory region allocated by user request
41 * or at framebuffer creation. 57 * or at framebuffer creation.
42 * 58 *
@@ -45,13 +61,13 @@
45 */ 61 */
46struct exynos_drm_gem_obj { 62struct exynos_drm_gem_obj {
47 struct drm_gem_object base; 63 struct drm_gem_object base;
48 struct exynos_drm_buf_entry *entry; 64 struct exynos_drm_gem_buf *buffer;
49}; 65};
50 66
51/* create a new buffer and get a new gem handle. */ 67/* create a new buffer and get a new gem handle. */
52struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_file *file_priv, 68struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
53 struct drm_device *dev, unsigned int size, 69 struct drm_file *file_priv,
54 unsigned int *handle); 70 unsigned int *handle, unsigned long size);
55 71
56/* 72/*
57 * request gem object creation and buffer allocation as the size 73 * request gem object creation and buffer allocation as the size
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 4f40f1ce1d8..d09a6e02dc9 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -636,11 +636,16 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
636 struct drm_device *dev = node->minor->dev; 636 struct drm_device *dev = node->minor->dev;
637 drm_i915_private_t *dev_priv = dev->dev_private; 637 drm_i915_private_t *dev_priv = dev->dev_private;
638 struct intel_ring_buffer *ring; 638 struct intel_ring_buffer *ring;
639 int ret;
639 640
640 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; 641 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
641 if (ring->size == 0) 642 if (ring->size == 0)
642 return 0; 643 return 0;
643 644
645 ret = mutex_lock_interruptible(&dev->struct_mutex);
646 if (ret)
647 return ret;
648
644 seq_printf(m, "Ring %s:\n", ring->name); 649 seq_printf(m, "Ring %s:\n", ring->name);
645 seq_printf(m, " Head : %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR); 650 seq_printf(m, " Head : %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR);
646 seq_printf(m, " Tail : %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR); 651 seq_printf(m, " Tail : %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR);
@@ -654,6 +659,8 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
654 seq_printf(m, " Control : %08x\n", I915_READ_CTL(ring)); 659 seq_printf(m, " Control : %08x\n", I915_READ_CTL(ring));
655 seq_printf(m, " Start : %08x\n", I915_READ_START(ring)); 660 seq_printf(m, " Start : %08x\n", I915_READ_START(ring));
656 661
662 mutex_unlock(&dev->struct_mutex);
663
657 return 0; 664 return 0;
658} 665}
659 666
@@ -842,7 +849,16 @@ static int i915_rstdby_delays(struct seq_file *m, void *unused)
842 struct drm_info_node *node = (struct drm_info_node *) m->private; 849 struct drm_info_node *node = (struct drm_info_node *) m->private;
843 struct drm_device *dev = node->minor->dev; 850 struct drm_device *dev = node->minor->dev;
844 drm_i915_private_t *dev_priv = dev->dev_private; 851 drm_i915_private_t *dev_priv = dev->dev_private;
845 u16 crstanddelay = I915_READ16(CRSTANDVID); 852 u16 crstanddelay;
853 int ret;
854
855 ret = mutex_lock_interruptible(&dev->struct_mutex);
856 if (ret)
857 return ret;
858
859 crstanddelay = I915_READ16(CRSTANDVID);
860
861 mutex_unlock(&dev->struct_mutex);
846 862
847 seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f)); 863 seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
848 864
@@ -940,7 +956,11 @@ static int i915_delayfreq_table(struct seq_file *m, void *unused)
940 struct drm_device *dev = node->minor->dev; 956 struct drm_device *dev = node->minor->dev;
941 drm_i915_private_t *dev_priv = dev->dev_private; 957 drm_i915_private_t *dev_priv = dev->dev_private;
942 u32 delayfreq; 958 u32 delayfreq;
943 int i; 959 int ret, i;
960
961 ret = mutex_lock_interruptible(&dev->struct_mutex);
962 if (ret)
963 return ret;
944 964
945 for (i = 0; i < 16; i++) { 965 for (i = 0; i < 16; i++) {
946 delayfreq = I915_READ(PXVFREQ_BASE + i * 4); 966 delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
@@ -948,6 +968,8 @@ static int i915_delayfreq_table(struct seq_file *m, void *unused)
948 (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT); 968 (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
949 } 969 }
950 970
971 mutex_unlock(&dev->struct_mutex);
972
951 return 0; 973 return 0;
952} 974}
953 975
@@ -962,13 +984,19 @@ static int i915_inttoext_table(struct seq_file *m, void *unused)
962 struct drm_device *dev = node->minor->dev; 984 struct drm_device *dev = node->minor->dev;
963 drm_i915_private_t *dev_priv = dev->dev_private; 985 drm_i915_private_t *dev_priv = dev->dev_private;
964 u32 inttoext; 986 u32 inttoext;
965 int i; 987 int ret, i;
988
989 ret = mutex_lock_interruptible(&dev->struct_mutex);
990 if (ret)
991 return ret;
966 992
967 for (i = 1; i <= 32; i++) { 993 for (i = 1; i <= 32; i++) {
968 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4); 994 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
969 seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext); 995 seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
970 } 996 }
971 997
998 mutex_unlock(&dev->struct_mutex);
999
972 return 0; 1000 return 0;
973} 1001}
974 1002
@@ -977,9 +1005,19 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
977 struct drm_info_node *node = (struct drm_info_node *) m->private; 1005 struct drm_info_node *node = (struct drm_info_node *) m->private;
978 struct drm_device *dev = node->minor->dev; 1006 struct drm_device *dev = node->minor->dev;
979 drm_i915_private_t *dev_priv = dev->dev_private; 1007 drm_i915_private_t *dev_priv = dev->dev_private;
980 u32 rgvmodectl = I915_READ(MEMMODECTL); 1008 u32 rgvmodectl, rstdbyctl;
981 u32 rstdbyctl = I915_READ(RSTDBYCTL); 1009 u16 crstandvid;
982 u16 crstandvid = I915_READ16(CRSTANDVID); 1010 int ret;
1011
1012 ret = mutex_lock_interruptible(&dev->struct_mutex);
1013 if (ret)
1014 return ret;
1015
1016 rgvmodectl = I915_READ(MEMMODECTL);
1017 rstdbyctl = I915_READ(RSTDBYCTL);
1018 crstandvid = I915_READ16(CRSTANDVID);
1019
1020 mutex_unlock(&dev->struct_mutex);
983 1021
984 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ? 1022 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
985 "yes" : "no"); 1023 "yes" : "no");
@@ -1167,9 +1205,16 @@ static int i915_gfxec(struct seq_file *m, void *unused)
1167 struct drm_info_node *node = (struct drm_info_node *) m->private; 1205 struct drm_info_node *node = (struct drm_info_node *) m->private;
1168 struct drm_device *dev = node->minor->dev; 1206 struct drm_device *dev = node->minor->dev;
1169 drm_i915_private_t *dev_priv = dev->dev_private; 1207 drm_i915_private_t *dev_priv = dev->dev_private;
1208 int ret;
1209
1210 ret = mutex_lock_interruptible(&dev->struct_mutex);
1211 if (ret)
1212 return ret;
1170 1213
1171 seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4)); 1214 seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
1172 1215
1216 mutex_unlock(&dev->struct_mutex);
1217
1173 return 0; 1218 return 0;
1174} 1219}
1175 1220
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index e9c2cfe45da..15bfa9145d2 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -68,7 +68,7 @@ module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
68MODULE_PARM_DESC(i915_enable_rc6, 68MODULE_PARM_DESC(i915_enable_rc6,
69 "Enable power-saving render C-state 6 (default: true)"); 69 "Enable power-saving render C-state 6 (default: true)");
70 70
71unsigned int i915_enable_fbc __read_mostly = -1; 71int i915_enable_fbc __read_mostly = -1;
72module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600); 72module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
73MODULE_PARM_DESC(i915_enable_fbc, 73MODULE_PARM_DESC(i915_enable_fbc,
74 "Enable frame buffer compression for power savings " 74 "Enable frame buffer compression for power savings "
@@ -80,7 +80,7 @@ MODULE_PARM_DESC(lvds_downclock,
80 "Use panel (LVDS/eDP) downclocking for power savings " 80 "Use panel (LVDS/eDP) downclocking for power savings "
81 "(default: false)"); 81 "(default: false)");
82 82
83unsigned int i915_panel_use_ssc __read_mostly = -1; 83int i915_panel_use_ssc __read_mostly = -1;
84module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600); 84module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600);
85MODULE_PARM_DESC(lvds_use_ssc, 85MODULE_PARM_DESC(lvds_use_ssc,
86 "Use Spread Spectrum Clock with panels [LVDS/eDP] " 86 "Use Spread Spectrum Clock with panels [LVDS/eDP] "
@@ -107,7 +107,7 @@ static struct drm_driver driver;
107extern int intel_agp_enabled; 107extern int intel_agp_enabled;
108 108
109#define INTEL_VGA_DEVICE(id, info) { \ 109#define INTEL_VGA_DEVICE(id, info) { \
110 .class = PCI_CLASS_DISPLAY_VGA << 8, \ 110 .class = PCI_BASE_CLASS_DISPLAY << 16, \
111 .class_mask = 0xff0000, \ 111 .class_mask = 0xff0000, \
112 .vendor = 0x8086, \ 112 .vendor = 0x8086, \
113 .device = id, \ 113 .device = id, \
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 06a37f4fd74..4a9c1b97980 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -126,6 +126,9 @@ struct drm_i915_master_private {
126 struct _drm_i915_sarea *sarea_priv; 126 struct _drm_i915_sarea *sarea_priv;
127}; 127};
128#define I915_FENCE_REG_NONE -1 128#define I915_FENCE_REG_NONE -1
129#define I915_MAX_NUM_FENCES 16
130/* 16 fences + sign bit for FENCE_REG_NONE */
131#define I915_MAX_NUM_FENCE_BITS 5
129 132
130struct drm_i915_fence_reg { 133struct drm_i915_fence_reg {
131 struct list_head lru_list; 134 struct list_head lru_list;
@@ -168,7 +171,7 @@ struct drm_i915_error_state {
168 u32 instdone1; 171 u32 instdone1;
169 u32 seqno; 172 u32 seqno;
170 u64 bbaddr; 173 u64 bbaddr;
171 u64 fence[16]; 174 u64 fence[I915_MAX_NUM_FENCES];
172 struct timeval time; 175 struct timeval time;
173 struct drm_i915_error_object { 176 struct drm_i915_error_object {
174 int page_count; 177 int page_count;
@@ -182,7 +185,7 @@ struct drm_i915_error_state {
182 u32 gtt_offset; 185 u32 gtt_offset;
183 u32 read_domains; 186 u32 read_domains;
184 u32 write_domain; 187 u32 write_domain;
185 s32 fence_reg:5; 188 s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
186 s32 pinned:2; 189 s32 pinned:2;
187 u32 tiling:2; 190 u32 tiling:2;
188 u32 dirty:1; 191 u32 dirty:1;
@@ -375,7 +378,7 @@ typedef struct drm_i915_private {
375 struct notifier_block lid_notifier; 378 struct notifier_block lid_notifier;
376 379
377 int crt_ddc_pin; 380 int crt_ddc_pin;
378 struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */ 381 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
379 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ 382 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
380 int num_fence_regs; /* 8 on pre-965, 16 otherwise */ 383 int num_fence_regs; /* 8 on pre-965, 16 otherwise */
381 384
@@ -506,7 +509,7 @@ typedef struct drm_i915_private {
506 u8 saveAR[21]; 509 u8 saveAR[21];
507 u8 saveDACMASK; 510 u8 saveDACMASK;
508 u8 saveCR[37]; 511 u8 saveCR[37];
509 uint64_t saveFENCE[16]; 512 uint64_t saveFENCE[I915_MAX_NUM_FENCES];
510 u32 saveCURACNTR; 513 u32 saveCURACNTR;
511 u32 saveCURAPOS; 514 u32 saveCURAPOS;
512 u32 saveCURABASE; 515 u32 saveCURABASE;
@@ -777,10 +780,8 @@ struct drm_i915_gem_object {
777 * Fence register bits (if any) for this object. Will be set 780 * Fence register bits (if any) for this object. Will be set
778 * as needed when mapped into the GTT. 781 * as needed when mapped into the GTT.
779 * Protected by dev->struct_mutex. 782 * Protected by dev->struct_mutex.
780 *
781 * Size: 4 bits for 16 fences + sign (for FENCE_REG_NONE)
782 */ 783 */
783 signed int fence_reg:5; 784 signed int fence_reg:I915_MAX_NUM_FENCE_BITS;
784 785
785 /** 786 /**
786 * Advice: are the backing pages purgeable? 787 * Advice: are the backing pages purgeable?
@@ -999,10 +1000,10 @@ extern int i915_panel_ignore_lid __read_mostly;
999extern unsigned int i915_powersave __read_mostly; 1000extern unsigned int i915_powersave __read_mostly;
1000extern unsigned int i915_semaphores __read_mostly; 1001extern unsigned int i915_semaphores __read_mostly;
1001extern unsigned int i915_lvds_downclock __read_mostly; 1002extern unsigned int i915_lvds_downclock __read_mostly;
1002extern unsigned int i915_panel_use_ssc __read_mostly; 1003extern int i915_panel_use_ssc __read_mostly;
1003extern int i915_vbt_sdvo_panel_type __read_mostly; 1004extern int i915_vbt_sdvo_panel_type __read_mostly;
1004extern unsigned int i915_enable_rc6 __read_mostly; 1005extern unsigned int i915_enable_rc6 __read_mostly;
1005extern unsigned int i915_enable_fbc __read_mostly; 1006extern int i915_enable_fbc __read_mostly;
1006extern bool i915_enable_hangcheck __read_mostly; 1007extern bool i915_enable_hangcheck __read_mostly;
1007 1008
1008extern int i915_suspend(struct drm_device *dev, pm_message_t state); 1009extern int i915_suspend(struct drm_device *dev, pm_message_t state);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index d18b07adcff..8359dc77704 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1745,7 +1745,7 @@ static void i915_gem_reset_fences(struct drm_device *dev)
1745 struct drm_i915_private *dev_priv = dev->dev_private; 1745 struct drm_i915_private *dev_priv = dev->dev_private;
1746 int i; 1746 int i;
1747 1747
1748 for (i = 0; i < 16; i++) { 1748 for (i = 0; i < dev_priv->num_fence_regs; i++) {
1749 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; 1749 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
1750 struct drm_i915_gem_object *obj = reg->obj; 1750 struct drm_i915_gem_object *obj = reg->obj;
1751 1751
@@ -3512,9 +3512,11 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3512 * so emit a request to do so. 3512 * so emit a request to do so.
3513 */ 3513 */
3514 request = kzalloc(sizeof(*request), GFP_KERNEL); 3514 request = kzalloc(sizeof(*request), GFP_KERNEL);
3515 if (request) 3515 if (request) {
3516 ret = i915_add_request(obj->ring, NULL, request); 3516 ret = i915_add_request(obj->ring, NULL, request);
3517 else 3517 if (ret)
3518 kfree(request);
3519 } else
3518 ret = -ENOMEM; 3520 ret = -ENOMEM;
3519 } 3521 }
3520 3522
@@ -3613,7 +3615,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3613 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 3615 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3614 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 3616 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3615 3617
3616 if (IS_GEN6(dev)) { 3618 if (IS_GEN6(dev) || IS_GEN7(dev)) {
3617 /* On Gen6, we can have the GPU use the LLC (the CPU 3619 /* On Gen6, we can have the GPU use the LLC (the CPU
3618 * cache) for about a 10% performance improvement 3620 * cache) for about a 10% performance improvement
3619 * compared to uncached. Graphics requests other than 3621 * compared to uncached. Graphics requests other than
@@ -3877,7 +3879,7 @@ i915_gem_load(struct drm_device *dev)
3877 INIT_LIST_HEAD(&dev_priv->mm.gtt_list); 3879 INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
3878 for (i = 0; i < I915_NUM_RINGS; i++) 3880 for (i = 0; i < I915_NUM_RINGS; i++)
3879 init_ring_lists(&dev_priv->ring[i]); 3881 init_ring_lists(&dev_priv->ring[i]);
3880 for (i = 0; i < 16; i++) 3882 for (i = 0; i < I915_MAX_NUM_FENCES; i++)
3881 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); 3883 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
3882 INIT_DELAYED_WORK(&dev_priv->mm.retire_work, 3884 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
3883 i915_gem_retire_work_handler); 3885 i915_gem_retire_work_handler);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 9ee2729fe5c..b40004b5597 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -824,6 +824,7 @@ static void i915_gem_record_fences(struct drm_device *dev,
824 824
825 /* Fences */ 825 /* Fences */
826 switch (INTEL_INFO(dev)->gen) { 826 switch (INTEL_INFO(dev)->gen) {
827 case 7:
827 case 6: 828 case 6:
828 for (i = 0; i < 16; i++) 829 for (i = 0; i < 16; i++)
829 error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); 830 error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 5a09416e611..b080cc82400 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1553,12 +1553,21 @@
1553 */ 1553 */
1554#define PP_READY (1 << 30) 1554#define PP_READY (1 << 30)
1555#define PP_SEQUENCE_NONE (0 << 28) 1555#define PP_SEQUENCE_NONE (0 << 28)
1556#define PP_SEQUENCE_ON (1 << 28) 1556#define PP_SEQUENCE_POWER_UP (1 << 28)
1557#define PP_SEQUENCE_OFF (2 << 28) 1557#define PP_SEQUENCE_POWER_DOWN (2 << 28)
1558#define PP_SEQUENCE_MASK 0x30000000 1558#define PP_SEQUENCE_MASK (3 << 28)
1559#define PP_SEQUENCE_SHIFT 28
1559#define PP_CYCLE_DELAY_ACTIVE (1 << 27) 1560#define PP_CYCLE_DELAY_ACTIVE (1 << 27)
1560#define PP_SEQUENCE_STATE_ON_IDLE (1 << 3)
1561#define PP_SEQUENCE_STATE_MASK 0x0000000f 1561#define PP_SEQUENCE_STATE_MASK 0x0000000f
1562#define PP_SEQUENCE_STATE_OFF_IDLE (0x0 << 0)
1563#define PP_SEQUENCE_STATE_OFF_S0_1 (0x1 << 0)
1564#define PP_SEQUENCE_STATE_OFF_S0_2 (0x2 << 0)
1565#define PP_SEQUENCE_STATE_OFF_S0_3 (0x3 << 0)
1566#define PP_SEQUENCE_STATE_ON_IDLE (0x8 << 0)
1567#define PP_SEQUENCE_STATE_ON_S1_0 (0x9 << 0)
1568#define PP_SEQUENCE_STATE_ON_S1_2 (0xa << 0)
1569#define PP_SEQUENCE_STATE_ON_S1_3 (0xb << 0)
1570#define PP_SEQUENCE_STATE_RESET (0xf << 0)
1562#define PP_CONTROL 0x61204 1571#define PP_CONTROL 0x61204
1563#define POWER_TARGET_ON (1 << 0) 1572#define POWER_TARGET_ON (1 << 0)
1564#define PP_ON_DELAYS 0x61208 1573#define PP_ON_DELAYS 0x61208
@@ -3444,6 +3453,10 @@
3444#define GT_FIFO_FREE_ENTRIES 0x120008 3453#define GT_FIFO_FREE_ENTRIES 0x120008
3445#define GT_FIFO_NUM_RESERVED_ENTRIES 20 3454#define GT_FIFO_NUM_RESERVED_ENTRIES 20
3446 3455
3456#define GEN6_UCGCTL2 0x9404
3457# define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE (1 << 12)
3458# define GEN6_RCCUNIT_CLOCK_GATE_DISABLE (1 << 11)
3459
3447#define GEN6_RPNSWREQ 0xA008 3460#define GEN6_RPNSWREQ 0xA008
3448#define GEN6_TURBO_DISABLE (1<<31) 3461#define GEN6_TURBO_DISABLE (1<<31)
3449#define GEN6_FREQUENCY(x) ((x)<<25) 3462#define GEN6_FREQUENCY(x) ((x)<<25)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index f8f602d7665..7886e4fb60e 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -370,6 +370,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
370 370
371 /* Fences */ 371 /* Fences */
372 switch (INTEL_INFO(dev)->gen) { 372 switch (INTEL_INFO(dev)->gen) {
373 case 7:
373 case 6: 374 case 6:
374 for (i = 0; i < 16; i++) 375 for (i = 0; i < 16; i++)
375 dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); 376 dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
@@ -404,6 +405,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
404 405
405 /* Fences */ 406 /* Fences */
406 switch (INTEL_INFO(dev)->gen) { 407 switch (INTEL_INFO(dev)->gen) {
408 case 7:
407 case 6: 409 case 6:
408 for (i = 0; i < 16; i++) 410 for (i = 0; i < 16; i++)
409 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]); 411 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 981b1f1c04d..e77a863a383 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2933,7 +2933,8 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
2933 2933
2934 /* For PCH DP, enable TRANS_DP_CTL */ 2934 /* For PCH DP, enable TRANS_DP_CTL */
2935 if (HAS_PCH_CPT(dev) && 2935 if (HAS_PCH_CPT(dev) &&
2936 intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { 2936 (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
2937 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
2937 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5; 2938 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5;
2938 reg = TRANS_DP_CTL(pipe); 2939 reg = TRANS_DP_CTL(pipe);
2939 temp = I915_READ(reg); 2940 temp = I915_READ(reg);
@@ -4711,7 +4712,7 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
4711 lvds_bpc = 6; 4712 lvds_bpc = 6;
4712 4713
4713 if (lvds_bpc < display_bpc) { 4714 if (lvds_bpc < display_bpc) {
4714 DRM_DEBUG_DRIVER("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc); 4715 DRM_DEBUG_KMS("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc);
4715 display_bpc = lvds_bpc; 4716 display_bpc = lvds_bpc;
4716 } 4717 }
4717 continue; 4718 continue;
@@ -4722,7 +4723,7 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
4722 unsigned int edp_bpc = dev_priv->edp.bpp / 3; 4723 unsigned int edp_bpc = dev_priv->edp.bpp / 3;
4723 4724
4724 if (edp_bpc < display_bpc) { 4725 if (edp_bpc < display_bpc) {
4725 DRM_DEBUG_DRIVER("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc); 4726 DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
4726 display_bpc = edp_bpc; 4727 display_bpc = edp_bpc;
4727 } 4728 }
4728 continue; 4729 continue;
@@ -4737,7 +4738,7 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
4737 /* Don't use an invalid EDID bpc value */ 4738 /* Don't use an invalid EDID bpc value */
4738 if (connector->display_info.bpc && 4739 if (connector->display_info.bpc &&
4739 connector->display_info.bpc < display_bpc) { 4740 connector->display_info.bpc < display_bpc) {
4740 DRM_DEBUG_DRIVER("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc); 4741 DRM_DEBUG_KMS("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc);
4741 display_bpc = connector->display_info.bpc; 4742 display_bpc = connector->display_info.bpc;
4742 } 4743 }
4743 } 4744 }
@@ -4748,10 +4749,10 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
4748 */ 4749 */
4749 if (intel_encoder->type == INTEL_OUTPUT_HDMI) { 4750 if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
4750 if (display_bpc > 8 && display_bpc < 12) { 4751 if (display_bpc > 8 && display_bpc < 12) {
4751 DRM_DEBUG_DRIVER("forcing bpc to 12 for HDMI\n"); 4752 DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n");
4752 display_bpc = 12; 4753 display_bpc = 12;
4753 } else { 4754 } else {
4754 DRM_DEBUG_DRIVER("forcing bpc to 8 for HDMI\n"); 4755 DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n");
4755 display_bpc = 8; 4756 display_bpc = 8;
4756 } 4757 }
4757 } 4758 }
@@ -4789,8 +4790,8 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
4789 4790
4790 display_bpc = min(display_bpc, bpc); 4791 display_bpc = min(display_bpc, bpc);
4791 4792
4792 DRM_DEBUG_DRIVER("setting pipe bpc to %d (max display bpc %d)\n", 4793 DRM_DEBUG_KMS("setting pipe bpc to %d (max display bpc %d)\n",
4793 bpc, display_bpc); 4794 bpc, display_bpc);
4794 4795
4795 *pipe_bpp = display_bpc * 3; 4796 *pipe_bpp = display_bpc * 3;
4796 4797
@@ -5671,7 +5672,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5671 pipeconf &= ~PIPECONF_DITHER_TYPE_MASK; 5672 pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
5672 if ((is_lvds && dev_priv->lvds_dither) || dither) { 5673 if ((is_lvds && dev_priv->lvds_dither) || dither) {
5673 pipeconf |= PIPECONF_DITHER_EN; 5674 pipeconf |= PIPECONF_DITHER_EN;
5674 pipeconf |= PIPECONF_DITHER_TYPE_ST1; 5675 pipeconf |= PIPECONF_DITHER_TYPE_SP;
5675 } 5676 }
5676 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { 5677 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5677 intel_dp_set_m_n(crtc, mode, adjusted_mode); 5678 intel_dp_set_m_n(crtc, mode, adjusted_mode);
@@ -8148,6 +8149,20 @@ static void gen6_init_clock_gating(struct drm_device *dev)
8148 I915_WRITE(WM2_LP_ILK, 0); 8149 I915_WRITE(WM2_LP_ILK, 0);
8149 I915_WRITE(WM1_LP_ILK, 0); 8150 I915_WRITE(WM1_LP_ILK, 0);
8150 8151
8152 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
8153 * gating disable must be set. Failure to set it results in
8154 * flickering pixels due to Z write ordering failures after
8155 * some amount of runtime in the Mesa "fire" demo, and Unigine
8156 * Sanctuary and Tropics, and apparently anything else with
8157 * alpha test or pixel discard.
8158 *
8159 * According to the spec, bit 11 (RCCUNIT) must also be set,
8160 * but we didn't debug actual testcases to find it out.
8161 */
8162 I915_WRITE(GEN6_UCGCTL2,
8163 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
8164 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
8165
8151 /* 8166 /*
8152 * According to the spec the following bits should be 8167 * According to the spec the following bits should be
8153 * set in order to enable memory self-refresh and fbc: 8168 * set in order to enable memory self-refresh and fbc:
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 09b318b0227..4d0358fad93 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -59,7 +59,6 @@ struct intel_dp {
59 struct i2c_algo_dp_aux_data algo; 59 struct i2c_algo_dp_aux_data algo;
60 bool is_pch_edp; 60 bool is_pch_edp;
61 uint8_t train_set[4]; 61 uint8_t train_set[4];
62 uint8_t link_status[DP_LINK_STATUS_SIZE];
63 int panel_power_up_delay; 62 int panel_power_up_delay;
64 int panel_power_down_delay; 63 int panel_power_down_delay;
65 int panel_power_cycle_delay; 64 int panel_power_cycle_delay;
@@ -68,7 +67,6 @@ struct intel_dp {
68 struct drm_display_mode *panel_fixed_mode; /* for eDP */ 67 struct drm_display_mode *panel_fixed_mode; /* for eDP */
69 struct delayed_work panel_vdd_work; 68 struct delayed_work panel_vdd_work;
70 bool want_panel_vdd; 69 bool want_panel_vdd;
71 unsigned long panel_off_jiffies;
72}; 70};
73 71
74/** 72/**
@@ -157,16 +155,12 @@ intel_edp_link_config(struct intel_encoder *intel_encoder,
157static int 155static int
158intel_dp_max_lane_count(struct intel_dp *intel_dp) 156intel_dp_max_lane_count(struct intel_dp *intel_dp)
159{ 157{
160 int max_lane_count = 4; 158 int max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f;
161 159 switch (max_lane_count) {
162 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) { 160 case 1: case 2: case 4:
163 max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f; 161 break;
164 switch (max_lane_count) { 162 default:
165 case 1: case 2: case 4: 163 max_lane_count = 4;
166 break;
167 default:
168 max_lane_count = 4;
169 }
170 } 164 }
171 return max_lane_count; 165 return max_lane_count;
172} 166}
@@ -768,12 +762,11 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
768 continue; 762 continue;
769 763
770 intel_dp = enc_to_intel_dp(encoder); 764 intel_dp = enc_to_intel_dp(encoder);
771 if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) { 765 if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT ||
766 intel_dp->base.type == INTEL_OUTPUT_EDP)
767 {
772 lane_count = intel_dp->lane_count; 768 lane_count = intel_dp->lane_count;
773 break; 769 break;
774 } else if (is_edp(intel_dp)) {
775 lane_count = dev_priv->edp.lanes;
776 break;
777 } 770 }
778 } 771 }
779 772
@@ -810,6 +803,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
810 struct drm_display_mode *adjusted_mode) 803 struct drm_display_mode *adjusted_mode)
811{ 804{
812 struct drm_device *dev = encoder->dev; 805 struct drm_device *dev = encoder->dev;
806 struct drm_i915_private *dev_priv = dev->dev_private;
813 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 807 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
814 struct drm_crtc *crtc = intel_dp->base.base.crtc; 808 struct drm_crtc *crtc = intel_dp->base.base.crtc;
815 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 809 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -822,18 +816,31 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
822 ironlake_edp_pll_off(encoder); 816 ironlake_edp_pll_off(encoder);
823 } 817 }
824 818
825 intel_dp->DP = DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 819 /*
826 intel_dp->DP |= intel_dp->color_range; 820 * There are three kinds of DP registers:
821 *
822 * IBX PCH
823 * CPU
824 * CPT PCH
825 *
826 * IBX PCH and CPU are the same for almost everything,
827 * except that the CPU DP PLL is configured in this
828 * register
829 *
830 * CPT PCH is quite different, having many bits moved
831 * to the TRANS_DP_CTL register instead. That
832 * configuration happens (oddly) in ironlake_pch_enable
833 */
827 834
828 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 835 /* Preserve the BIOS-computed detected bit. This is
829 intel_dp->DP |= DP_SYNC_HS_HIGH; 836 * supposed to be read-only.
830 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 837 */
831 intel_dp->DP |= DP_SYNC_VS_HIGH; 838 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
839 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
832 840
833 if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) 841 /* Handle DP bits in common between all three register formats */
834 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 842
835 else 843 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
836 intel_dp->DP |= DP_LINK_TRAIN_OFF;
837 844
838 switch (intel_dp->lane_count) { 845 switch (intel_dp->lane_count) {
839 case 1: 846 case 1:
@@ -852,59 +859,106 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
852 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; 859 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
853 intel_write_eld(encoder, adjusted_mode); 860 intel_write_eld(encoder, adjusted_mode);
854 } 861 }
855
856 memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); 862 memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
857 intel_dp->link_configuration[0] = intel_dp->link_bw; 863 intel_dp->link_configuration[0] = intel_dp->link_bw;
858 intel_dp->link_configuration[1] = intel_dp->lane_count; 864 intel_dp->link_configuration[1] = intel_dp->lane_count;
859 intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B; 865 intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
860
861 /* 866 /*
862 * Check for DPCD version > 1.1 and enhanced framing support 867 * Check for DPCD version > 1.1 and enhanced framing support
863 */ 868 */
864 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && 869 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
865 (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) { 870 (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
866 intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; 871 intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
867 intel_dp->DP |= DP_ENHANCED_FRAMING;
868 } 872 }
869 873
870 /* CPT DP's pipe select is decided in TRANS_DP_CTL */ 874 /* Split out the IBX/CPU vs CPT settings */
871 if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev))
872 intel_dp->DP |= DP_PIPEB_SELECT;
873 875
874 if (is_cpu_edp(intel_dp)) { 876 if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
875 /* don't miss out required setting for eDP */ 877 intel_dp->DP |= intel_dp->color_range;
876 intel_dp->DP |= DP_PLL_ENABLE; 878
877 if (adjusted_mode->clock < 200000) 879 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
878 intel_dp->DP |= DP_PLL_FREQ_160MHZ; 880 intel_dp->DP |= DP_SYNC_HS_HIGH;
879 else 881 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
880 intel_dp->DP |= DP_PLL_FREQ_270MHZ; 882 intel_dp->DP |= DP_SYNC_VS_HIGH;
883 intel_dp->DP |= DP_LINK_TRAIN_OFF;
884
885 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
886 intel_dp->DP |= DP_ENHANCED_FRAMING;
887
888 if (intel_crtc->pipe == 1)
889 intel_dp->DP |= DP_PIPEB_SELECT;
890
891 if (is_cpu_edp(intel_dp)) {
892 /* don't miss out required setting for eDP */
893 intel_dp->DP |= DP_PLL_ENABLE;
894 if (adjusted_mode->clock < 200000)
895 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
896 else
897 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
898 }
899 } else {
900 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
881 } 901 }
882} 902}
883 903
884static void ironlake_wait_panel_off(struct intel_dp *intel_dp) 904#define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
905#define IDLE_ON_VALUE (PP_ON | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
906
907#define IDLE_OFF_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
908#define IDLE_OFF_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
909
910#define IDLE_CYCLE_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
911#define IDLE_CYCLE_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
912
913static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
914 u32 mask,
915 u32 value)
885{ 916{
886 unsigned long off_time; 917 struct drm_device *dev = intel_dp->base.base.dev;
887 unsigned long delay; 918 struct drm_i915_private *dev_priv = dev->dev_private;
888 919
889 DRM_DEBUG_KMS("Wait for panel power off time\n"); 920 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
921 mask, value,
922 I915_READ(PCH_PP_STATUS),
923 I915_READ(PCH_PP_CONTROL));
890 924
891 if (ironlake_edp_have_panel_power(intel_dp) || 925 if (_wait_for((I915_READ(PCH_PP_STATUS) & mask) == value, 5000, 10)) {
892 ironlake_edp_have_panel_vdd(intel_dp)) 926 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
893 { 927 I915_READ(PCH_PP_STATUS),
894 DRM_DEBUG_KMS("Panel still on, no delay needed\n"); 928 I915_READ(PCH_PP_CONTROL));
895 return;
896 } 929 }
930}
897 931
898 off_time = intel_dp->panel_off_jiffies + msecs_to_jiffies(intel_dp->panel_power_down_delay); 932static void ironlake_wait_panel_on(struct intel_dp *intel_dp)
899 if (time_after(jiffies, off_time)) { 933{
900 DRM_DEBUG_KMS("Time already passed"); 934 DRM_DEBUG_KMS("Wait for panel power on\n");
901 return; 935 ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
902 } 936}
903 delay = jiffies_to_msecs(off_time - jiffies); 937
904 if (delay > intel_dp->panel_power_down_delay) 938static void ironlake_wait_panel_off(struct intel_dp *intel_dp)
905 delay = intel_dp->panel_power_down_delay; 939{
906 DRM_DEBUG_KMS("Waiting an additional %ld ms\n", delay); 940 DRM_DEBUG_KMS("Wait for panel power off time\n");
907 msleep(delay); 941 ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
942}
943
944static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp)
945{
946 DRM_DEBUG_KMS("Wait for panel power cycle\n");
947 ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
948}
949
950
951/* Read the current pp_control value, unlocking the register if it
952 * is locked
953 */
954
955static u32 ironlake_get_pp_control(struct drm_i915_private *dev_priv)
956{
957 u32 control = I915_READ(PCH_PP_CONTROL);
958
959 control &= ~PANEL_UNLOCK_MASK;
960 control |= PANEL_UNLOCK_REGS;
961 return control;
908} 962}
909 963
910static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp) 964static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
@@ -921,15 +975,16 @@ static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
921 "eDP VDD already requested on\n"); 975 "eDP VDD already requested on\n");
922 976
923 intel_dp->want_panel_vdd = true; 977 intel_dp->want_panel_vdd = true;
978
924 if (ironlake_edp_have_panel_vdd(intel_dp)) { 979 if (ironlake_edp_have_panel_vdd(intel_dp)) {
925 DRM_DEBUG_KMS("eDP VDD already on\n"); 980 DRM_DEBUG_KMS("eDP VDD already on\n");
926 return; 981 return;
927 } 982 }
928 983
929 ironlake_wait_panel_off(intel_dp); 984 if (!ironlake_edp_have_panel_power(intel_dp))
930 pp = I915_READ(PCH_PP_CONTROL); 985 ironlake_wait_panel_power_cycle(intel_dp);
931 pp &= ~PANEL_UNLOCK_MASK; 986
932 pp |= PANEL_UNLOCK_REGS; 987 pp = ironlake_get_pp_control(dev_priv);
933 pp |= EDP_FORCE_VDD; 988 pp |= EDP_FORCE_VDD;
934 I915_WRITE(PCH_PP_CONTROL, pp); 989 I915_WRITE(PCH_PP_CONTROL, pp);
935 POSTING_READ(PCH_PP_CONTROL); 990 POSTING_READ(PCH_PP_CONTROL);
@@ -952,9 +1007,7 @@ static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
952 u32 pp; 1007 u32 pp;
953 1008
954 if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) { 1009 if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) {
955 pp = I915_READ(PCH_PP_CONTROL); 1010 pp = ironlake_get_pp_control(dev_priv);
956 pp &= ~PANEL_UNLOCK_MASK;
957 pp |= PANEL_UNLOCK_REGS;
958 pp &= ~EDP_FORCE_VDD; 1011 pp &= ~EDP_FORCE_VDD;
959 I915_WRITE(PCH_PP_CONTROL, pp); 1012 I915_WRITE(PCH_PP_CONTROL, pp);
960 POSTING_READ(PCH_PP_CONTROL); 1013 POSTING_READ(PCH_PP_CONTROL);
@@ -962,7 +1015,8 @@ static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
962 /* Make sure sequencer is idle before allowing subsequent activity */ 1015 /* Make sure sequencer is idle before allowing subsequent activity */
963 DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n", 1016 DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n",
964 I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL)); 1017 I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL));
965 intel_dp->panel_off_jiffies = jiffies; 1018
1019 msleep(intel_dp->panel_power_down_delay);
966 } 1020 }
967} 1021}
968 1022
@@ -972,9 +1026,9 @@ static void ironlake_panel_vdd_work(struct work_struct *__work)
972 struct intel_dp, panel_vdd_work); 1026 struct intel_dp, panel_vdd_work);
973 struct drm_device *dev = intel_dp->base.base.dev; 1027 struct drm_device *dev = intel_dp->base.base.dev;
974 1028
975 mutex_lock(&dev->struct_mutex); 1029 mutex_lock(&dev->mode_config.mutex);
976 ironlake_panel_vdd_off_sync(intel_dp); 1030 ironlake_panel_vdd_off_sync(intel_dp);
977 mutex_unlock(&dev->struct_mutex); 1031 mutex_unlock(&dev->mode_config.mutex);
978} 1032}
979 1033
980static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) 1034static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
@@ -984,7 +1038,7 @@ static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
984 1038
985 DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd); 1039 DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd);
986 WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on"); 1040 WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on");
987 1041
988 intel_dp->want_panel_vdd = false; 1042 intel_dp->want_panel_vdd = false;
989 1043
990 if (sync) { 1044 if (sync) {
@@ -1000,23 +1054,25 @@ static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1000 } 1054 }
1001} 1055}
1002 1056
1003/* Returns true if the panel was already on when called */
1004static void ironlake_edp_panel_on(struct intel_dp *intel_dp) 1057static void ironlake_edp_panel_on(struct intel_dp *intel_dp)
1005{ 1058{
1006 struct drm_device *dev = intel_dp->base.base.dev; 1059 struct drm_device *dev = intel_dp->base.base.dev;
1007 struct drm_i915_private *dev_priv = dev->dev_private; 1060 struct drm_i915_private *dev_priv = dev->dev_private;
1008 u32 pp, idle_on_mask = PP_ON | PP_SEQUENCE_STATE_ON_IDLE; 1061 u32 pp;
1009 1062
1010 if (!is_edp(intel_dp)) 1063 if (!is_edp(intel_dp))
1011 return; 1064 return;
1012 if (ironlake_edp_have_panel_power(intel_dp)) 1065
1066 DRM_DEBUG_KMS("Turn eDP power on\n");
1067
1068 if (ironlake_edp_have_panel_power(intel_dp)) {
1069 DRM_DEBUG_KMS("eDP power already on\n");
1013 return; 1070 return;
1071 }
1014 1072
1015 ironlake_wait_panel_off(intel_dp); 1073 ironlake_wait_panel_power_cycle(intel_dp);
1016 pp = I915_READ(PCH_PP_CONTROL);
1017 pp &= ~PANEL_UNLOCK_MASK;
1018 pp |= PANEL_UNLOCK_REGS;
1019 1074
1075 pp = ironlake_get_pp_control(dev_priv);
1020 if (IS_GEN5(dev)) { 1076 if (IS_GEN5(dev)) {
1021 /* ILK workaround: disable reset around power sequence */ 1077 /* ILK workaround: disable reset around power sequence */
1022 pp &= ~PANEL_POWER_RESET; 1078 pp &= ~PANEL_POWER_RESET;
@@ -1025,13 +1081,13 @@ static void ironlake_edp_panel_on(struct intel_dp *intel_dp)
1025 } 1081 }
1026 1082
1027 pp |= POWER_TARGET_ON; 1083 pp |= POWER_TARGET_ON;
1084 if (!IS_GEN5(dev))
1085 pp |= PANEL_POWER_RESET;
1086
1028 I915_WRITE(PCH_PP_CONTROL, pp); 1087 I915_WRITE(PCH_PP_CONTROL, pp);
1029 POSTING_READ(PCH_PP_CONTROL); 1088 POSTING_READ(PCH_PP_CONTROL);
1030 1089
1031 if (wait_for((I915_READ(PCH_PP_STATUS) & idle_on_mask) == idle_on_mask, 1090 ironlake_wait_panel_on(intel_dp);
1032 5000))
1033 DRM_ERROR("panel on wait timed out: 0x%08x\n",
1034 I915_READ(PCH_PP_STATUS));
1035 1091
1036 if (IS_GEN5(dev)) { 1092 if (IS_GEN5(dev)) {
1037 pp |= PANEL_POWER_RESET; /* restore panel reset bit */ 1093 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
@@ -1040,46 +1096,25 @@ static void ironlake_edp_panel_on(struct intel_dp *intel_dp)
1040 } 1096 }
1041} 1097}
1042 1098
1043static void ironlake_edp_panel_off(struct drm_encoder *encoder) 1099static void ironlake_edp_panel_off(struct intel_dp *intel_dp)
1044{ 1100{
1045 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1101 struct drm_device *dev = intel_dp->base.base.dev;
1046 struct drm_device *dev = encoder->dev;
1047 struct drm_i915_private *dev_priv = dev->dev_private; 1102 struct drm_i915_private *dev_priv = dev->dev_private;
1048 u32 pp, idle_off_mask = PP_ON | PP_SEQUENCE_MASK | 1103 u32 pp;
1049 PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK;
1050 1104
1051 if (!is_edp(intel_dp)) 1105 if (!is_edp(intel_dp))
1052 return; 1106 return;
1053 pp = I915_READ(PCH_PP_CONTROL);
1054 pp &= ~PANEL_UNLOCK_MASK;
1055 pp |= PANEL_UNLOCK_REGS;
1056 1107
1057 if (IS_GEN5(dev)) { 1108 DRM_DEBUG_KMS("Turn eDP power off\n");
1058 /* ILK workaround: disable reset around power sequence */
1059 pp &= ~PANEL_POWER_RESET;
1060 I915_WRITE(PCH_PP_CONTROL, pp);
1061 POSTING_READ(PCH_PP_CONTROL);
1062 }
1063 1109
1064 intel_dp->panel_off_jiffies = jiffies; 1110 WARN(intel_dp->want_panel_vdd, "Cannot turn power off while VDD is on\n");
1065 1111
1066 if (IS_GEN5(dev)) { 1112 pp = ironlake_get_pp_control(dev_priv);
1067 pp &= ~POWER_TARGET_ON; 1113 pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
1068 I915_WRITE(PCH_PP_CONTROL, pp); 1114 I915_WRITE(PCH_PP_CONTROL, pp);
1069 POSTING_READ(PCH_PP_CONTROL); 1115 POSTING_READ(PCH_PP_CONTROL);
1070 pp &= ~POWER_TARGET_ON;
1071 I915_WRITE(PCH_PP_CONTROL, pp);
1072 POSTING_READ(PCH_PP_CONTROL);
1073 msleep(intel_dp->panel_power_cycle_delay);
1074
1075 if (wait_for((I915_READ(PCH_PP_STATUS) & idle_off_mask) == 0, 5000))
1076 DRM_ERROR("panel off wait timed out: 0x%08x\n",
1077 I915_READ(PCH_PP_STATUS));
1078 1116
1079 pp |= PANEL_POWER_RESET; /* restore panel reset bit */ 1117 ironlake_wait_panel_off(intel_dp);
1080 I915_WRITE(PCH_PP_CONTROL, pp);
1081 POSTING_READ(PCH_PP_CONTROL);
1082 }
1083} 1118}
1084 1119
1085static void ironlake_edp_backlight_on(struct intel_dp *intel_dp) 1120static void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
@@ -1099,9 +1134,7 @@ static void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
1099 * allowing it to appear. 1134 * allowing it to appear.
1100 */ 1135 */
1101 msleep(intel_dp->backlight_on_delay); 1136 msleep(intel_dp->backlight_on_delay);
1102 pp = I915_READ(PCH_PP_CONTROL); 1137 pp = ironlake_get_pp_control(dev_priv);
1103 pp &= ~PANEL_UNLOCK_MASK;
1104 pp |= PANEL_UNLOCK_REGS;
1105 pp |= EDP_BLC_ENABLE; 1138 pp |= EDP_BLC_ENABLE;
1106 I915_WRITE(PCH_PP_CONTROL, pp); 1139 I915_WRITE(PCH_PP_CONTROL, pp);
1107 POSTING_READ(PCH_PP_CONTROL); 1140 POSTING_READ(PCH_PP_CONTROL);
@@ -1117,9 +1150,7 @@ static void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
1117 return; 1150 return;
1118 1151
1119 DRM_DEBUG_KMS("\n"); 1152 DRM_DEBUG_KMS("\n");
1120 pp = I915_READ(PCH_PP_CONTROL); 1153 pp = ironlake_get_pp_control(dev_priv);
1121 pp &= ~PANEL_UNLOCK_MASK;
1122 pp |= PANEL_UNLOCK_REGS;
1123 pp &= ~EDP_BLC_ENABLE; 1154 pp &= ~EDP_BLC_ENABLE;
1124 I915_WRITE(PCH_PP_CONTROL, pp); 1155 I915_WRITE(PCH_PP_CONTROL, pp);
1125 POSTING_READ(PCH_PP_CONTROL); 1156 POSTING_READ(PCH_PP_CONTROL);
@@ -1187,17 +1218,18 @@ static void intel_dp_prepare(struct drm_encoder *encoder)
1187{ 1218{
1188 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1219 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1189 1220
1221 ironlake_edp_backlight_off(intel_dp);
1222 ironlake_edp_panel_off(intel_dp);
1223
1190 /* Wake up the sink first */ 1224 /* Wake up the sink first */
1191 ironlake_edp_panel_vdd_on(intel_dp); 1225 ironlake_edp_panel_vdd_on(intel_dp);
1192 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 1226 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
1227 intel_dp_link_down(intel_dp);
1193 ironlake_edp_panel_vdd_off(intel_dp, false); 1228 ironlake_edp_panel_vdd_off(intel_dp, false);
1194 1229
1195 /* Make sure the panel is off before trying to 1230 /* Make sure the panel is off before trying to
1196 * change the mode 1231 * change the mode
1197 */ 1232 */
1198 ironlake_edp_backlight_off(intel_dp);
1199 intel_dp_link_down(intel_dp);
1200 ironlake_edp_panel_off(encoder);
1201} 1233}
1202 1234
1203static void intel_dp_commit(struct drm_encoder *encoder) 1235static void intel_dp_commit(struct drm_encoder *encoder)
@@ -1211,7 +1243,6 @@ static void intel_dp_commit(struct drm_encoder *encoder)
1211 intel_dp_start_link_train(intel_dp); 1243 intel_dp_start_link_train(intel_dp);
1212 ironlake_edp_panel_on(intel_dp); 1244 ironlake_edp_panel_on(intel_dp);
1213 ironlake_edp_panel_vdd_off(intel_dp, true); 1245 ironlake_edp_panel_vdd_off(intel_dp, true);
1214
1215 intel_dp_complete_link_train(intel_dp); 1246 intel_dp_complete_link_train(intel_dp);
1216 ironlake_edp_backlight_on(intel_dp); 1247 ironlake_edp_backlight_on(intel_dp);
1217 1248
@@ -1230,16 +1261,20 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
1230 uint32_t dp_reg = I915_READ(intel_dp->output_reg); 1261 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
1231 1262
1232 if (mode != DRM_MODE_DPMS_ON) { 1263 if (mode != DRM_MODE_DPMS_ON) {
1264 ironlake_edp_backlight_off(intel_dp);
1265 ironlake_edp_panel_off(intel_dp);
1266
1233 ironlake_edp_panel_vdd_on(intel_dp); 1267 ironlake_edp_panel_vdd_on(intel_dp);
1234 if (is_edp(intel_dp))
1235 ironlake_edp_backlight_off(intel_dp);
1236 intel_dp_sink_dpms(intel_dp, mode); 1268 intel_dp_sink_dpms(intel_dp, mode);
1237 intel_dp_link_down(intel_dp); 1269 intel_dp_link_down(intel_dp);
1238 ironlake_edp_panel_off(encoder);
1239 if (is_edp(intel_dp) && !is_pch_edp(intel_dp))
1240 ironlake_edp_pll_off(encoder);
1241 ironlake_edp_panel_vdd_off(intel_dp, false); 1270 ironlake_edp_panel_vdd_off(intel_dp, false);
1271
1272 if (is_cpu_edp(intel_dp))
1273 ironlake_edp_pll_off(encoder);
1242 } else { 1274 } else {
1275 if (is_cpu_edp(intel_dp))
1276 ironlake_edp_pll_on(encoder);
1277
1243 ironlake_edp_panel_vdd_on(intel_dp); 1278 ironlake_edp_panel_vdd_on(intel_dp);
1244 intel_dp_sink_dpms(intel_dp, mode); 1279 intel_dp_sink_dpms(intel_dp, mode);
1245 if (!(dp_reg & DP_PORT_EN)) { 1280 if (!(dp_reg & DP_PORT_EN)) {
@@ -1247,7 +1282,6 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
1247 ironlake_edp_panel_on(intel_dp); 1282 ironlake_edp_panel_on(intel_dp);
1248 ironlake_edp_panel_vdd_off(intel_dp, true); 1283 ironlake_edp_panel_vdd_off(intel_dp, true);
1249 intel_dp_complete_link_train(intel_dp); 1284 intel_dp_complete_link_train(intel_dp);
1250 ironlake_edp_backlight_on(intel_dp);
1251 } else 1285 } else
1252 ironlake_edp_panel_vdd_off(intel_dp, false); 1286 ironlake_edp_panel_vdd_off(intel_dp, false);
1253 ironlake_edp_backlight_on(intel_dp); 1287 ironlake_edp_backlight_on(intel_dp);
@@ -1285,11 +1319,11 @@ intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address,
1285 * link status information 1319 * link status information
1286 */ 1320 */
1287static bool 1321static bool
1288intel_dp_get_link_status(struct intel_dp *intel_dp) 1322intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
1289{ 1323{
1290 return intel_dp_aux_native_read_retry(intel_dp, 1324 return intel_dp_aux_native_read_retry(intel_dp,
1291 DP_LANE0_1_STATUS, 1325 DP_LANE0_1_STATUS,
1292 intel_dp->link_status, 1326 link_status,
1293 DP_LINK_STATUS_SIZE); 1327 DP_LINK_STATUS_SIZE);
1294} 1328}
1295 1329
@@ -1301,27 +1335,25 @@ intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
1301} 1335}
1302 1336
1303static uint8_t 1337static uint8_t
1304intel_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE], 1338intel_get_adjust_request_voltage(uint8_t adjust_request[2],
1305 int lane) 1339 int lane)
1306{ 1340{
1307 int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
1308 int s = ((lane & 1) ? 1341 int s = ((lane & 1) ?
1309 DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT : 1342 DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
1310 DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT); 1343 DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
1311 uint8_t l = intel_dp_link_status(link_status, i); 1344 uint8_t l = adjust_request[lane>>1];
1312 1345
1313 return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT; 1346 return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
1314} 1347}
1315 1348
1316static uint8_t 1349static uint8_t
1317intel_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZE], 1350intel_get_adjust_request_pre_emphasis(uint8_t adjust_request[2],
1318 int lane) 1351 int lane)
1319{ 1352{
1320 int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
1321 int s = ((lane & 1) ? 1353 int s = ((lane & 1) ?
1322 DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT : 1354 DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
1323 DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT); 1355 DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
1324 uint8_t l = intel_dp_link_status(link_status, i); 1356 uint8_t l = adjust_request[lane>>1];
1325 1357
1326 return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT; 1358 return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
1327} 1359}
@@ -1344,6 +1376,7 @@ static char *link_train_names[] = {
1344 * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB 1376 * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
1345 */ 1377 */
1346#define I830_DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_800 1378#define I830_DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_800
1379#define I830_DP_VOLTAGE_MAX_CPT DP_TRAIN_VOLTAGE_SWING_1200
1347 1380
1348static uint8_t 1381static uint8_t
1349intel_dp_pre_emphasis_max(uint8_t voltage_swing) 1382intel_dp_pre_emphasis_max(uint8_t voltage_swing)
@@ -1362,15 +1395,18 @@ intel_dp_pre_emphasis_max(uint8_t voltage_swing)
1362} 1395}
1363 1396
1364static void 1397static void
1365intel_get_adjust_train(struct intel_dp *intel_dp) 1398intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
1366{ 1399{
1400 struct drm_device *dev = intel_dp->base.base.dev;
1367 uint8_t v = 0; 1401 uint8_t v = 0;
1368 uint8_t p = 0; 1402 uint8_t p = 0;
1369 int lane; 1403 int lane;
1404 uint8_t *adjust_request = link_status + (DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS);
1405 int voltage_max;
1370 1406
1371 for (lane = 0; lane < intel_dp->lane_count; lane++) { 1407 for (lane = 0; lane < intel_dp->lane_count; lane++) {
1372 uint8_t this_v = intel_get_adjust_request_voltage(intel_dp->link_status, lane); 1408 uint8_t this_v = intel_get_adjust_request_voltage(adjust_request, lane);
1373 uint8_t this_p = intel_get_adjust_request_pre_emphasis(intel_dp->link_status, lane); 1409 uint8_t this_p = intel_get_adjust_request_pre_emphasis(adjust_request, lane);
1374 1410
1375 if (this_v > v) 1411 if (this_v > v)
1376 v = this_v; 1412 v = this_v;
@@ -1378,8 +1414,12 @@ intel_get_adjust_train(struct intel_dp *intel_dp)
1378 p = this_p; 1414 p = this_p;
1379 } 1415 }
1380 1416
1381 if (v >= I830_DP_VOLTAGE_MAX) 1417 if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
1382 v = I830_DP_VOLTAGE_MAX | DP_TRAIN_MAX_SWING_REACHED; 1418 voltage_max = I830_DP_VOLTAGE_MAX_CPT;
1419 else
1420 voltage_max = I830_DP_VOLTAGE_MAX;
1421 if (v >= voltage_max)
1422 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
1383 1423
1384 if (p >= intel_dp_pre_emphasis_max(v)) 1424 if (p >= intel_dp_pre_emphasis_max(v))
1385 p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; 1425 p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
@@ -1389,7 +1429,7 @@ intel_get_adjust_train(struct intel_dp *intel_dp)
1389} 1429}
1390 1430
1391static uint32_t 1431static uint32_t
1392intel_dp_signal_levels(uint8_t train_set, int lane_count) 1432intel_dp_signal_levels(uint8_t train_set)
1393{ 1433{
1394 uint32_t signal_levels = 0; 1434 uint32_t signal_levels = 0;
1395 1435
@@ -1458,9 +1498,8 @@ static uint8_t
1458intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE], 1498intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
1459 int lane) 1499 int lane)
1460{ 1500{
1461 int i = DP_LANE0_1_STATUS + (lane >> 1);
1462 int s = (lane & 1) * 4; 1501 int s = (lane & 1) * 4;
1463 uint8_t l = intel_dp_link_status(link_status, i); 1502 uint8_t l = link_status[lane>>1];
1464 1503
1465 return (l >> s) & 0xf; 1504 return (l >> s) & 0xf;
1466} 1505}
@@ -1485,18 +1524,18 @@ intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count
1485 DP_LANE_CHANNEL_EQ_DONE|\ 1524 DP_LANE_CHANNEL_EQ_DONE|\
1486 DP_LANE_SYMBOL_LOCKED) 1525 DP_LANE_SYMBOL_LOCKED)
1487static bool 1526static bool
1488intel_channel_eq_ok(struct intel_dp *intel_dp) 1527intel_channel_eq_ok(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
1489{ 1528{
1490 uint8_t lane_align; 1529 uint8_t lane_align;
1491 uint8_t lane_status; 1530 uint8_t lane_status;
1492 int lane; 1531 int lane;
1493 1532
1494 lane_align = intel_dp_link_status(intel_dp->link_status, 1533 lane_align = intel_dp_link_status(link_status,
1495 DP_LANE_ALIGN_STATUS_UPDATED); 1534 DP_LANE_ALIGN_STATUS_UPDATED);
1496 if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0) 1535 if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
1497 return false; 1536 return false;
1498 for (lane = 0; lane < intel_dp->lane_count; lane++) { 1537 for (lane = 0; lane < intel_dp->lane_count; lane++) {
1499 lane_status = intel_get_lane_status(intel_dp->link_status, lane); 1538 lane_status = intel_get_lane_status(link_status, lane);
1500 if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS) 1539 if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS)
1501 return false; 1540 return false;
1502 } 1541 }
@@ -1521,8 +1560,9 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
1521 1560
1522 ret = intel_dp_aux_native_write(intel_dp, 1561 ret = intel_dp_aux_native_write(intel_dp,
1523 DP_TRAINING_LANE0_SET, 1562 DP_TRAINING_LANE0_SET,
1524 intel_dp->train_set, 4); 1563 intel_dp->train_set,
1525 if (ret != 4) 1564 intel_dp->lane_count);
1565 if (ret != intel_dp->lane_count)
1526 return false; 1566 return false;
1527 1567
1528 return true; 1568 return true;
@@ -1538,7 +1578,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
1538 int i; 1578 int i;
1539 uint8_t voltage; 1579 uint8_t voltage;
1540 bool clock_recovery = false; 1580 bool clock_recovery = false;
1541 int tries; 1581 int voltage_tries, loop_tries;
1542 u32 reg; 1582 u32 reg;
1543 uint32_t DP = intel_dp->DP; 1583 uint32_t DP = intel_dp->DP;
1544 1584
@@ -1565,16 +1605,20 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
1565 DP &= ~DP_LINK_TRAIN_MASK; 1605 DP &= ~DP_LINK_TRAIN_MASK;
1566 memset(intel_dp->train_set, 0, 4); 1606 memset(intel_dp->train_set, 0, 4);
1567 voltage = 0xff; 1607 voltage = 0xff;
1568 tries = 0; 1608 voltage_tries = 0;
1609 loop_tries = 0;
1569 clock_recovery = false; 1610 clock_recovery = false;
1570 for (;;) { 1611 for (;;) {
1571 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ 1612 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
1613 uint8_t link_status[DP_LINK_STATUS_SIZE];
1572 uint32_t signal_levels; 1614 uint32_t signal_levels;
1573 if (IS_GEN6(dev) && is_edp(intel_dp)) { 1615
1616 if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
1574 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); 1617 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
1575 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; 1618 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
1576 } else { 1619 } else {
1577 signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count); 1620 signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
1621 DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n", signal_levels);
1578 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1622 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
1579 } 1623 }
1580 1624
@@ -1590,10 +1634,13 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
1590 /* Set training pattern 1 */ 1634 /* Set training pattern 1 */
1591 1635
1592 udelay(100); 1636 udelay(100);
1593 if (!intel_dp_get_link_status(intel_dp)) 1637 if (!intel_dp_get_link_status(intel_dp, link_status)) {
1638 DRM_ERROR("failed to get link status\n");
1594 break; 1639 break;
1640 }
1595 1641
1596 if (intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) { 1642 if (intel_clock_recovery_ok(link_status, intel_dp->lane_count)) {
1643 DRM_DEBUG_KMS("clock recovery OK\n");
1597 clock_recovery = true; 1644 clock_recovery = true;
1598 break; 1645 break;
1599 } 1646 }
@@ -1602,20 +1649,30 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
1602 for (i = 0; i < intel_dp->lane_count; i++) 1649 for (i = 0; i < intel_dp->lane_count; i++)
1603 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) 1650 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
1604 break; 1651 break;
1605 if (i == intel_dp->lane_count) 1652 if (i == intel_dp->lane_count) {
1606 break; 1653 ++loop_tries;
1654 if (loop_tries == 5) {
1655 DRM_DEBUG_KMS("too many full retries, give up\n");
1656 break;
1657 }
1658 memset(intel_dp->train_set, 0, 4);
1659 voltage_tries = 0;
1660 continue;
1661 }
1607 1662
1608 /* Check to see if we've tried the same voltage 5 times */ 1663 /* Check to see if we've tried the same voltage 5 times */
1609 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { 1664 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
1610 ++tries; 1665 ++voltage_tries;
1611 if (tries == 5) 1666 if (voltage_tries == 5) {
1667 DRM_DEBUG_KMS("too many voltage retries, give up\n");
1612 break; 1668 break;
1669 }
1613 } else 1670 } else
1614 tries = 0; 1671 voltage_tries = 0;
1615 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; 1672 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
1616 1673
1617 /* Compute new intel_dp->train_set as requested by target */ 1674 /* Compute new intel_dp->train_set as requested by target */
1618 intel_get_adjust_train(intel_dp); 1675 intel_get_adjust_train(intel_dp, link_status);
1619 } 1676 }
1620 1677
1621 intel_dp->DP = DP; 1678 intel_dp->DP = DP;
@@ -1638,6 +1695,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
1638 for (;;) { 1695 for (;;) {
1639 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ 1696 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
1640 uint32_t signal_levels; 1697 uint32_t signal_levels;
1698 uint8_t link_status[DP_LINK_STATUS_SIZE];
1641 1699
1642 if (cr_tries > 5) { 1700 if (cr_tries > 5) {
1643 DRM_ERROR("failed to train DP, aborting\n"); 1701 DRM_ERROR("failed to train DP, aborting\n");
@@ -1645,11 +1703,11 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
1645 break; 1703 break;
1646 } 1704 }
1647 1705
1648 if (IS_GEN6(dev) && is_edp(intel_dp)) { 1706 if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
1649 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); 1707 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
1650 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; 1708 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
1651 } else { 1709 } else {
1652 signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count); 1710 signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
1653 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1711 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
1654 } 1712 }
1655 1713
@@ -1665,17 +1723,17 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
1665 break; 1723 break;
1666 1724
1667 udelay(400); 1725 udelay(400);
1668 if (!intel_dp_get_link_status(intel_dp)) 1726 if (!intel_dp_get_link_status(intel_dp, link_status))
1669 break; 1727 break;
1670 1728
1671 /* Make sure clock is still ok */ 1729 /* Make sure clock is still ok */
1672 if (!intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) { 1730 if (!intel_clock_recovery_ok(link_status, intel_dp->lane_count)) {
1673 intel_dp_start_link_train(intel_dp); 1731 intel_dp_start_link_train(intel_dp);
1674 cr_tries++; 1732 cr_tries++;
1675 continue; 1733 continue;
1676 } 1734 }
1677 1735
1678 if (intel_channel_eq_ok(intel_dp)) { 1736 if (intel_channel_eq_ok(intel_dp, link_status)) {
1679 channel_eq = true; 1737 channel_eq = true;
1680 break; 1738 break;
1681 } 1739 }
@@ -1690,7 +1748,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
1690 } 1748 }
1691 1749
1692 /* Compute new intel_dp->train_set as requested by target */ 1750 /* Compute new intel_dp->train_set as requested by target */
1693 intel_get_adjust_train(intel_dp); 1751 intel_get_adjust_train(intel_dp, link_status);
1694 ++tries; 1752 ++tries;
1695 } 1753 }
1696 1754
@@ -1735,8 +1793,12 @@ intel_dp_link_down(struct intel_dp *intel_dp)
1735 1793
1736 msleep(17); 1794 msleep(17);
1737 1795
1738 if (is_edp(intel_dp)) 1796 if (is_edp(intel_dp)) {
1739 DP |= DP_LINK_TRAIN_OFF; 1797 if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
1798 DP |= DP_LINK_TRAIN_OFF_CPT;
1799 else
1800 DP |= DP_LINK_TRAIN_OFF;
1801 }
1740 1802
1741 if (!HAS_PCH_CPT(dev) && 1803 if (!HAS_PCH_CPT(dev) &&
1742 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { 1804 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
@@ -1822,6 +1884,7 @@ static void
1822intel_dp_check_link_status(struct intel_dp *intel_dp) 1884intel_dp_check_link_status(struct intel_dp *intel_dp)
1823{ 1885{
1824 u8 sink_irq_vector; 1886 u8 sink_irq_vector;
1887 u8 link_status[DP_LINK_STATUS_SIZE];
1825 1888
1826 if (intel_dp->dpms_mode != DRM_MODE_DPMS_ON) 1889 if (intel_dp->dpms_mode != DRM_MODE_DPMS_ON)
1827 return; 1890 return;
@@ -1830,7 +1893,7 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
1830 return; 1893 return;
1831 1894
1832 /* Try to read receiver status if the link appears to be up */ 1895 /* Try to read receiver status if the link appears to be up */
1833 if (!intel_dp_get_link_status(intel_dp)) { 1896 if (!intel_dp_get_link_status(intel_dp, link_status)) {
1834 intel_dp_link_down(intel_dp); 1897 intel_dp_link_down(intel_dp);
1835 return; 1898 return;
1836 } 1899 }
@@ -1855,7 +1918,7 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
1855 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n"); 1918 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
1856 } 1919 }
1857 1920
1858 if (!intel_channel_eq_ok(intel_dp)) { 1921 if (!intel_channel_eq_ok(intel_dp, link_status)) {
1859 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n", 1922 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
1860 drm_get_encoder_name(&intel_dp->base.base)); 1923 drm_get_encoder_name(&intel_dp->base.base));
1861 intel_dp_start_link_train(intel_dp); 1924 intel_dp_start_link_train(intel_dp);
@@ -2179,7 +2242,8 @@ intel_trans_dp_port_sel(struct drm_crtc *crtc)
2179 continue; 2242 continue;
2180 2243
2181 intel_dp = enc_to_intel_dp(encoder); 2244 intel_dp = enc_to_intel_dp(encoder);
2182 if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) 2245 if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT ||
2246 intel_dp->base.type == INTEL_OUTPUT_EDP)
2183 return intel_dp->output_reg; 2247 return intel_dp->output_reg;
2184 } 2248 }
2185 2249
@@ -2321,7 +2385,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
2321 2385
2322 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >> 2386 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
2323 PANEL_LIGHT_ON_DELAY_SHIFT; 2387 PANEL_LIGHT_ON_DELAY_SHIFT;
2324 2388
2325 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >> 2389 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
2326 PANEL_LIGHT_OFF_DELAY_SHIFT; 2390 PANEL_LIGHT_OFF_DELAY_SHIFT;
2327 2391
@@ -2354,11 +2418,10 @@ intel_dp_init(struct drm_device *dev, int output_reg)
2354 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n", 2418 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
2355 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); 2419 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
2356 2420
2357 intel_dp->panel_off_jiffies = jiffies - intel_dp->panel_power_down_delay;
2358
2359 ironlake_edp_panel_vdd_on(intel_dp); 2421 ironlake_edp_panel_vdd_on(intel_dp);
2360 ret = intel_dp_get_dpcd(intel_dp); 2422 ret = intel_dp_get_dpcd(intel_dp);
2361 ironlake_edp_panel_vdd_off(intel_dp, false); 2423 ironlake_edp_panel_vdd_off(intel_dp, false);
2424
2362 if (ret) { 2425 if (ret) {
2363 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) 2426 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
2364 dev_priv->no_aux_handshake = 2427 dev_priv->no_aux_handshake =
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 499d4c0dbee..21f60b7d69a 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -326,7 +326,8 @@ static int intel_panel_update_status(struct backlight_device *bd)
326static int intel_panel_get_brightness(struct backlight_device *bd) 326static int intel_panel_get_brightness(struct backlight_device *bd)
327{ 327{
328 struct drm_device *dev = bl_get_data(bd); 328 struct drm_device *dev = bl_get_data(bd);
329 return intel_panel_get_backlight(dev); 329 struct drm_i915_private *dev_priv = dev->dev_private;
330 return dev_priv->backlight_level;
330} 331}
331 332
332static const struct backlight_ops intel_panel_bl_ops = { 333static const struct backlight_ops intel_panel_bl_ops = {
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 7fdfa8ea757..38e1bda73d3 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -480,21 +480,23 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
480 } 480 }
481 break; 481 break;
482 case DB_Z_INFO: 482 case DB_Z_INFO:
483 r = evergreen_cs_packet_next_reloc(p, &reloc);
484 if (r) {
485 dev_warn(p->dev, "bad SET_CONTEXT_REG "
486 "0x%04X\n", reg);
487 return -EINVAL;
488 }
489 track->db_z_info = radeon_get_ib_value(p, idx); 483 track->db_z_info = radeon_get_ib_value(p, idx);
490 ib[idx] &= ~Z_ARRAY_MODE(0xf); 484 if (!p->keep_tiling_flags) {
491 track->db_z_info &= ~Z_ARRAY_MODE(0xf); 485 r = evergreen_cs_packet_next_reloc(p, &reloc);
492 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { 486 if (r) {
493 ib[idx] |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 487 dev_warn(p->dev, "bad SET_CONTEXT_REG "
494 track->db_z_info |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 488 "0x%04X\n", reg);
495 } else { 489 return -EINVAL;
496 ib[idx] |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 490 }
497 track->db_z_info |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 491 ib[idx] &= ~Z_ARRAY_MODE(0xf);
492 track->db_z_info &= ~Z_ARRAY_MODE(0xf);
493 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
494 ib[idx] |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
495 track->db_z_info |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
496 } else {
497 ib[idx] |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
498 track->db_z_info |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
499 }
498 } 500 }
499 break; 501 break;
500 case DB_STENCIL_INFO: 502 case DB_STENCIL_INFO:
@@ -607,40 +609,44 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
607 case CB_COLOR5_INFO: 609 case CB_COLOR5_INFO:
608 case CB_COLOR6_INFO: 610 case CB_COLOR6_INFO:
609 case CB_COLOR7_INFO: 611 case CB_COLOR7_INFO:
610 r = evergreen_cs_packet_next_reloc(p, &reloc);
611 if (r) {
612 dev_warn(p->dev, "bad SET_CONTEXT_REG "
613 "0x%04X\n", reg);
614 return -EINVAL;
615 }
616 tmp = (reg - CB_COLOR0_INFO) / 0x3c; 612 tmp = (reg - CB_COLOR0_INFO) / 0x3c;
617 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); 613 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
618 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { 614 if (!p->keep_tiling_flags) {
619 ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 615 r = evergreen_cs_packet_next_reloc(p, &reloc);
620 track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 616 if (r) {
621 } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { 617 dev_warn(p->dev, "bad SET_CONTEXT_REG "
622 ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 618 "0x%04X\n", reg);
623 track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 619 return -EINVAL;
620 }
621 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
622 ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
623 track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
624 } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
625 ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
626 track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
627 }
624 } 628 }
625 break; 629 break;
626 case CB_COLOR8_INFO: 630 case CB_COLOR8_INFO:
627 case CB_COLOR9_INFO: 631 case CB_COLOR9_INFO:
628 case CB_COLOR10_INFO: 632 case CB_COLOR10_INFO:
629 case CB_COLOR11_INFO: 633 case CB_COLOR11_INFO:
630 r = evergreen_cs_packet_next_reloc(p, &reloc);
631 if (r) {
632 dev_warn(p->dev, "bad SET_CONTEXT_REG "
633 "0x%04X\n", reg);
634 return -EINVAL;
635 }
636 tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8; 634 tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8;
637 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); 635 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
638 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { 636 if (!p->keep_tiling_flags) {
639 ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 637 r = evergreen_cs_packet_next_reloc(p, &reloc);
640 track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 638 if (r) {
641 } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { 639 dev_warn(p->dev, "bad SET_CONTEXT_REG "
642 ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 640 "0x%04X\n", reg);
643 track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 641 return -EINVAL;
642 }
643 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
644 ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
645 track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
646 } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
647 ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
648 track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
649 }
644 } 650 }
645 break; 651 break;
646 case CB_COLOR0_PITCH: 652 case CB_COLOR0_PITCH:
@@ -1311,10 +1317,12 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
1311 return -EINVAL; 1317 return -EINVAL;
1312 } 1318 }
1313 ib[idx+1+(i*8)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1319 ib[idx+1+(i*8)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1314 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 1320 if (!p->keep_tiling_flags) {
1315 ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 1321 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
1316 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 1322 ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
1317 ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 1323 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
1324 ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
1325 }
1318 texture = reloc->robj; 1326 texture = reloc->robj;
1319 /* tex mip base */ 1327 /* tex mip base */
1320 r = evergreen_cs_packet_next_reloc(p, &reloc); 1328 r = evergreen_cs_packet_next_reloc(p, &reloc);
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 400b26df652..c93bc64707e 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -701,16 +701,21 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
701 return r; 701 return r;
702 } 702 }
703 703
704 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 704 if (p->keep_tiling_flags) {
705 tile_flags |= R300_TXO_MACRO_TILE; 705 ib[idx] = (idx_value & 31) | /* keep the 1st 5 bits */
706 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 706 ((idx_value & ~31) + (u32)reloc->lobj.gpu_offset);
707 tile_flags |= R300_TXO_MICRO_TILE; 707 } else {
708 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) 708 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
709 tile_flags |= R300_TXO_MICRO_TILE_SQUARE; 709 tile_flags |= R300_TXO_MACRO_TILE;
710 710 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
711 tmp = idx_value + ((u32)reloc->lobj.gpu_offset); 711 tile_flags |= R300_TXO_MICRO_TILE;
712 tmp |= tile_flags; 712 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
713 ib[idx] = tmp; 713 tile_flags |= R300_TXO_MICRO_TILE_SQUARE;
714
715 tmp = idx_value + ((u32)reloc->lobj.gpu_offset);
716 tmp |= tile_flags;
717 ib[idx] = tmp;
718 }
714 track->textures[i].robj = reloc->robj; 719 track->textures[i].robj = reloc->robj;
715 track->tex_dirty = true; 720 track->tex_dirty = true;
716 break; 721 break;
@@ -760,24 +765,26 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
760 /* RB3D_COLORPITCH1 */ 765 /* RB3D_COLORPITCH1 */
761 /* RB3D_COLORPITCH2 */ 766 /* RB3D_COLORPITCH2 */
762 /* RB3D_COLORPITCH3 */ 767 /* RB3D_COLORPITCH3 */
763 r = r100_cs_packet_next_reloc(p, &reloc); 768 if (!p->keep_tiling_flags) {
764 if (r) { 769 r = r100_cs_packet_next_reloc(p, &reloc);
765 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 770 if (r) {
766 idx, reg); 771 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
767 r100_cs_dump_packet(p, pkt); 772 idx, reg);
768 return r; 773 r100_cs_dump_packet(p, pkt);
769 } 774 return r;
775 }
770 776
771 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 777 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
772 tile_flags |= R300_COLOR_TILE_ENABLE; 778 tile_flags |= R300_COLOR_TILE_ENABLE;
773 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 779 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
774 tile_flags |= R300_COLOR_MICROTILE_ENABLE; 780 tile_flags |= R300_COLOR_MICROTILE_ENABLE;
775 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) 781 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
776 tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE; 782 tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE;
777 783
778 tmp = idx_value & ~(0x7 << 16); 784 tmp = idx_value & ~(0x7 << 16);
779 tmp |= tile_flags; 785 tmp |= tile_flags;
780 ib[idx] = tmp; 786 ib[idx] = tmp;
787 }
781 i = (reg - 0x4E38) >> 2; 788 i = (reg - 0x4E38) >> 2;
782 track->cb[i].pitch = idx_value & 0x3FFE; 789 track->cb[i].pitch = idx_value & 0x3FFE;
783 switch (((idx_value >> 21) & 0xF)) { 790 switch (((idx_value >> 21) & 0xF)) {
@@ -843,25 +850,26 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
843 break; 850 break;
844 case 0x4F24: 851 case 0x4F24:
845 /* ZB_DEPTHPITCH */ 852 /* ZB_DEPTHPITCH */
846 r = r100_cs_packet_next_reloc(p, &reloc); 853 if (!p->keep_tiling_flags) {
847 if (r) { 854 r = r100_cs_packet_next_reloc(p, &reloc);
848 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 855 if (r) {
849 idx, reg); 856 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
850 r100_cs_dump_packet(p, pkt); 857 idx, reg);
851 return r; 858 r100_cs_dump_packet(p, pkt);
852 } 859 return r;
853 860 }
854 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
855 tile_flags |= R300_DEPTHMACROTILE_ENABLE;
856 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
857 tile_flags |= R300_DEPTHMICROTILE_TILED;
858 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
859 tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE;
860 861
861 tmp = idx_value & ~(0x7 << 16); 862 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
862 tmp |= tile_flags; 863 tile_flags |= R300_DEPTHMACROTILE_ENABLE;
863 ib[idx] = tmp; 864 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
865 tile_flags |= R300_DEPTHMICROTILE_TILED;
866 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
867 tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE;
864 868
869 tmp = idx_value & ~(0x7 << 16);
870 tmp |= tile_flags;
871 ib[idx] = tmp;
872 }
865 track->zb.pitch = idx_value & 0x3FFC; 873 track->zb.pitch = idx_value & 0x3FFC;
866 track->zb_dirty = true; 874 track->zb_dirty = true;
867 break; 875 break;
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 0a2e023c155..cb1acffd243 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -941,7 +941,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
941 track->db_depth_control = radeon_get_ib_value(p, idx); 941 track->db_depth_control = radeon_get_ib_value(p, idx);
942 break; 942 break;
943 case R_028010_DB_DEPTH_INFO: 943 case R_028010_DB_DEPTH_INFO:
944 if (r600_cs_packet_next_is_pkt3_nop(p)) { 944 if (!p->keep_tiling_flags &&
945 r600_cs_packet_next_is_pkt3_nop(p)) {
945 r = r600_cs_packet_next_reloc(p, &reloc); 946 r = r600_cs_packet_next_reloc(p, &reloc);
946 if (r) { 947 if (r) {
947 dev_warn(p->dev, "bad SET_CONTEXT_REG " 948 dev_warn(p->dev, "bad SET_CONTEXT_REG "
@@ -992,7 +993,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
992 case R_0280B4_CB_COLOR5_INFO: 993 case R_0280B4_CB_COLOR5_INFO:
993 case R_0280B8_CB_COLOR6_INFO: 994 case R_0280B8_CB_COLOR6_INFO:
994 case R_0280BC_CB_COLOR7_INFO: 995 case R_0280BC_CB_COLOR7_INFO:
995 if (r600_cs_packet_next_is_pkt3_nop(p)) { 996 if (!p->keep_tiling_flags &&
997 r600_cs_packet_next_is_pkt3_nop(p)) {
996 r = r600_cs_packet_next_reloc(p, &reloc); 998 r = r600_cs_packet_next_reloc(p, &reloc);
997 if (r) { 999 if (r) {
998 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); 1000 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
@@ -1291,10 +1293,12 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
1291 mip_offset <<= 8; 1293 mip_offset <<= 8;
1292 1294
1293 word0 = radeon_get_ib_value(p, idx + 0); 1295 word0 = radeon_get_ib_value(p, idx + 0);
1294 if (tiling_flags & RADEON_TILING_MACRO) 1296 if (!p->keep_tiling_flags) {
1295 word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); 1297 if (tiling_flags & RADEON_TILING_MACRO)
1296 else if (tiling_flags & RADEON_TILING_MICRO) 1298 word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
1297 word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1); 1299 else if (tiling_flags & RADEON_TILING_MICRO)
1300 word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
1301 }
1298 word1 = radeon_get_ib_value(p, idx + 1); 1302 word1 = radeon_get_ib_value(p, idx + 1);
1299 w0 = G_038000_TEX_WIDTH(word0) + 1; 1303 w0 = G_038000_TEX_WIDTH(word0) + 1;
1300 h0 = G_038004_TEX_HEIGHT(word1) + 1; 1304 h0 = G_038004_TEX_HEIGHT(word1) + 1;
@@ -1621,10 +1625,12 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
1621 return -EINVAL; 1625 return -EINVAL;
1622 } 1626 }
1623 base_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1627 base_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1624 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 1628 if (!p->keep_tiling_flags) {
1625 ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); 1629 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
1626 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 1630 ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
1627 ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1); 1631 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
1632 ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
1633 }
1628 texture = reloc->robj; 1634 texture = reloc->robj;
1629 /* tex mip base */ 1635 /* tex mip base */
1630 r = r600_cs_packet_next_reloc(p, &reloc); 1636 r = r600_cs_packet_next_reloc(p, &reloc);
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index fc5a1d642cb..8227e76b5c7 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -611,7 +611,8 @@ struct radeon_cs_parser {
611 struct radeon_ib *ib; 611 struct radeon_ib *ib;
612 void *track; 612 void *track;
613 unsigned family; 613 unsigned family;
614 int parser_error; 614 int parser_error;
615 bool keep_tiling_flags;
615}; 616};
616 617
617extern int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx); 618extern int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx);
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index d2d179267af..d24baf30efc 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -62,6 +62,87 @@ union atom_supported_devices {
62 struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 info_2d1; 62 struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 info_2d1;
63}; 63};
64 64
65static void radeon_lookup_i2c_gpio_quirks(struct radeon_device *rdev,
66 ATOM_GPIO_I2C_ASSIGMENT *gpio,
67 u8 index)
68{
69 /* r4xx mask is technically not used by the hw, so patch in the legacy mask bits */
70 if ((rdev->family == CHIP_R420) ||
71 (rdev->family == CHIP_R423) ||
72 (rdev->family == CHIP_RV410)) {
73 if ((le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0018) ||
74 (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0019) ||
75 (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x001a)) {
76 gpio->ucClkMaskShift = 0x19;
77 gpio->ucDataMaskShift = 0x18;
78 }
79 }
80
81 /* some evergreen boards have bad data for this entry */
82 if (ASIC_IS_DCE4(rdev)) {
83 if ((index == 7) &&
84 (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) &&
85 (gpio->sucI2cId.ucAccess == 0)) {
86 gpio->sucI2cId.ucAccess = 0x97;
87 gpio->ucDataMaskShift = 8;
88 gpio->ucDataEnShift = 8;
89 gpio->ucDataY_Shift = 8;
90 gpio->ucDataA_Shift = 8;
91 }
92 }
93
94 /* some DCE3 boards have bad data for this entry */
95 if (ASIC_IS_DCE3(rdev)) {
96 if ((index == 4) &&
97 (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) &&
98 (gpio->sucI2cId.ucAccess == 0x94))
99 gpio->sucI2cId.ucAccess = 0x14;
100 }
101}
102
103static struct radeon_i2c_bus_rec radeon_get_bus_rec_for_i2c_gpio(ATOM_GPIO_I2C_ASSIGMENT *gpio)
104{
105 struct radeon_i2c_bus_rec i2c;
106
107 memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec));
108
109 i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
110 i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
111 i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
112 i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
113 i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
114 i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
115 i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
116 i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
117 i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
118 i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
119 i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
120 i2c.en_data_mask = (1 << gpio->ucDataEnShift);
121 i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
122 i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
123 i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
124 i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
125
126 if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
127 i2c.hw_capable = true;
128 else
129 i2c.hw_capable = false;
130
131 if (gpio->sucI2cId.ucAccess == 0xa0)
132 i2c.mm_i2c = true;
133 else
134 i2c.mm_i2c = false;
135
136 i2c.i2c_id = gpio->sucI2cId.ucAccess;
137
138 if (i2c.mask_clk_reg)
139 i2c.valid = true;
140 else
141 i2c.valid = false;
142
143 return i2c;
144}
145
65static struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rdev, 146static struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rdev,
66 uint8_t id) 147 uint8_t id)
67{ 148{
@@ -85,59 +166,10 @@ static struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rd
85 for (i = 0; i < num_indices; i++) { 166 for (i = 0; i < num_indices; i++) {
86 gpio = &i2c_info->asGPIO_Info[i]; 167 gpio = &i2c_info->asGPIO_Info[i];
87 168
88 /* some evergreen boards have bad data for this entry */ 169 radeon_lookup_i2c_gpio_quirks(rdev, gpio, i);
89 if (ASIC_IS_DCE4(rdev)) {
90 if ((i == 7) &&
91 (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) &&
92 (gpio->sucI2cId.ucAccess == 0)) {
93 gpio->sucI2cId.ucAccess = 0x97;
94 gpio->ucDataMaskShift = 8;
95 gpio->ucDataEnShift = 8;
96 gpio->ucDataY_Shift = 8;
97 gpio->ucDataA_Shift = 8;
98 }
99 }
100
101 /* some DCE3 boards have bad data for this entry */
102 if (ASIC_IS_DCE3(rdev)) {
103 if ((i == 4) &&
104 (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) &&
105 (gpio->sucI2cId.ucAccess == 0x94))
106 gpio->sucI2cId.ucAccess = 0x14;
107 }
108 170
109 if (gpio->sucI2cId.ucAccess == id) { 171 if (gpio->sucI2cId.ucAccess == id) {
110 i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; 172 i2c = radeon_get_bus_rec_for_i2c_gpio(gpio);
111 i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
112 i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
113 i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
114 i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
115 i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
116 i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
117 i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
118 i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
119 i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
120 i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
121 i2c.en_data_mask = (1 << gpio->ucDataEnShift);
122 i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
123 i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
124 i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
125 i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
126
127 if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
128 i2c.hw_capable = true;
129 else
130 i2c.hw_capable = false;
131
132 if (gpio->sucI2cId.ucAccess == 0xa0)
133 i2c.mm_i2c = true;
134 else
135 i2c.mm_i2c = false;
136
137 i2c.i2c_id = gpio->sucI2cId.ucAccess;
138
139 if (i2c.mask_clk_reg)
140 i2c.valid = true;
141 break; 173 break;
142 } 174 }
143 } 175 }
@@ -157,8 +189,6 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
157 int i, num_indices; 189 int i, num_indices;
158 char stmp[32]; 190 char stmp[32];
159 191
160 memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec));
161
162 if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) { 192 if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) {
163 i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset); 193 i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset);
164 194
@@ -167,60 +197,12 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
167 197
168 for (i = 0; i < num_indices; i++) { 198 for (i = 0; i < num_indices; i++) {
169 gpio = &i2c_info->asGPIO_Info[i]; 199 gpio = &i2c_info->asGPIO_Info[i];
170 i2c.valid = false;
171
172 /* some evergreen boards have bad data for this entry */
173 if (ASIC_IS_DCE4(rdev)) {
174 if ((i == 7) &&
175 (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) &&
176 (gpio->sucI2cId.ucAccess == 0)) {
177 gpio->sucI2cId.ucAccess = 0x97;
178 gpio->ucDataMaskShift = 8;
179 gpio->ucDataEnShift = 8;
180 gpio->ucDataY_Shift = 8;
181 gpio->ucDataA_Shift = 8;
182 }
183 }
184
185 /* some DCE3 boards have bad data for this entry */
186 if (ASIC_IS_DCE3(rdev)) {
187 if ((i == 4) &&
188 (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) &&
189 (gpio->sucI2cId.ucAccess == 0x94))
190 gpio->sucI2cId.ucAccess = 0x14;
191 }
192 200
193 i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; 201 radeon_lookup_i2c_gpio_quirks(rdev, gpio, i);
194 i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
195 i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
196 i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
197 i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
198 i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
199 i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
200 i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
201 i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
202 i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
203 i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
204 i2c.en_data_mask = (1 << gpio->ucDataEnShift);
205 i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
206 i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
207 i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
208 i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
209
210 if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
211 i2c.hw_capable = true;
212 else
213 i2c.hw_capable = false;
214 202
215 if (gpio->sucI2cId.ucAccess == 0xa0) 203 i2c = radeon_get_bus_rec_for_i2c_gpio(gpio);
216 i2c.mm_i2c = true;
217 else
218 i2c.mm_i2c = false;
219 204
220 i2c.i2c_id = gpio->sucI2cId.ucAccess; 205 if (i2c.valid) {
221
222 if (i2c.mask_clk_reg) {
223 i2c.valid = true;
224 sprintf(stmp, "0x%x", i2c.i2c_id); 206 sprintf(stmp, "0x%x", i2c.i2c_id);
225 rdev->i2c_bus[i] = radeon_i2c_create(rdev->ddev, &i2c, stmp); 207 rdev->i2c_bus[i] = radeon_i2c_create(rdev->ddev, &i2c, stmp);
226 } 208 }
@@ -1996,14 +1978,14 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
1996 return state_index; 1978 return state_index;
1997 /* last mode is usually default, array is low to high */ 1979 /* last mode is usually default, array is low to high */
1998 for (i = 0; i < num_modes; i++) { 1980 for (i = 0; i < num_modes; i++) {
1981 rdev->pm.power_state[state_index].clock_info =
1982 kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
1983 if (!rdev->pm.power_state[state_index].clock_info)
1984 return state_index;
1985 rdev->pm.power_state[state_index].num_clock_modes = 1;
1999 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; 1986 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
2000 switch (frev) { 1987 switch (frev) {
2001 case 1: 1988 case 1:
2002 rdev->pm.power_state[state_index].clock_info =
2003 kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
2004 if (!rdev->pm.power_state[state_index].clock_info)
2005 return state_index;
2006 rdev->pm.power_state[state_index].num_clock_modes = 1;
2007 rdev->pm.power_state[state_index].clock_info[0].mclk = 1989 rdev->pm.power_state[state_index].clock_info[0].mclk =
2008 le16_to_cpu(power_info->info.asPowerPlayInfo[i].usMemoryClock); 1990 le16_to_cpu(power_info->info.asPowerPlayInfo[i].usMemoryClock);
2009 rdev->pm.power_state[state_index].clock_info[0].sclk = 1991 rdev->pm.power_state[state_index].clock_info[0].sclk =
@@ -2039,11 +2021,6 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
2039 state_index++; 2021 state_index++;
2040 break; 2022 break;
2041 case 2: 2023 case 2:
2042 rdev->pm.power_state[state_index].clock_info =
2043 kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
2044 if (!rdev->pm.power_state[state_index].clock_info)
2045 return state_index;
2046 rdev->pm.power_state[state_index].num_clock_modes = 1;
2047 rdev->pm.power_state[state_index].clock_info[0].mclk = 2024 rdev->pm.power_state[state_index].clock_info[0].mclk =
2048 le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMemoryClock); 2025 le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMemoryClock);
2049 rdev->pm.power_state[state_index].clock_info[0].sclk = 2026 rdev->pm.power_state[state_index].clock_info[0].sclk =
@@ -2080,11 +2057,6 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
2080 state_index++; 2057 state_index++;
2081 break; 2058 break;
2082 case 3: 2059 case 3:
2083 rdev->pm.power_state[state_index].clock_info =
2084 kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
2085 if (!rdev->pm.power_state[state_index].clock_info)
2086 return state_index;
2087 rdev->pm.power_state[state_index].num_clock_modes = 1;
2088 rdev->pm.power_state[state_index].clock_info[0].mclk = 2060 rdev->pm.power_state[state_index].clock_info[0].mclk =
2089 le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMemoryClock); 2061 le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMemoryClock);
2090 rdev->pm.power_state[state_index].clock_info[0].sclk = 2062 rdev->pm.power_state[state_index].clock_info[0].sclk =
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index ccaa243c144..29afd71e084 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -93,7 +93,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
93{ 93{
94 struct drm_radeon_cs *cs = data; 94 struct drm_radeon_cs *cs = data;
95 uint64_t *chunk_array_ptr; 95 uint64_t *chunk_array_ptr;
96 unsigned size, i; 96 unsigned size, i, flags = 0;
97 97
98 if (!cs->num_chunks) { 98 if (!cs->num_chunks) {
99 return 0; 99 return 0;
@@ -140,6 +140,10 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
140 if (p->chunks[i].length_dw == 0) 140 if (p->chunks[i].length_dw == 0)
141 return -EINVAL; 141 return -EINVAL;
142 } 142 }
143 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS &&
144 !p->chunks[i].length_dw) {
145 return -EINVAL;
146 }
143 147
144 p->chunks[i].length_dw = user_chunk.length_dw; 148 p->chunks[i].length_dw = user_chunk.length_dw;
145 p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data; 149 p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data;
@@ -155,6 +159,9 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
155 p->chunks[i].user_ptr, size)) { 159 p->chunks[i].user_ptr, size)) {
156 return -EFAULT; 160 return -EFAULT;
157 } 161 }
162 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
163 flags = p->chunks[i].kdata[0];
164 }
158 } else { 165 } else {
159 p->chunks[i].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL); 166 p->chunks[i].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
160 p->chunks[i].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL); 167 p->chunks[i].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
@@ -174,6 +181,8 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
174 p->chunks[p->chunk_ib_idx].length_dw); 181 p->chunks[p->chunk_ib_idx].length_dw);
175 return -EINVAL; 182 return -EINVAL;
176 } 183 }
184
185 p->keep_tiling_flags = (flags & RADEON_CS_KEEP_TILING_FLAGS) != 0;
177 return 0; 186 return 0;
178} 187}
179 188
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index a0b35e90948..71499fc3daf 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -53,9 +53,10 @@
53 * 2.9.0 - r600 tiling (s3tc,rgtc) working, SET_PREDICATION packet 3 on r600 + eg, backend query 53 * 2.9.0 - r600 tiling (s3tc,rgtc) working, SET_PREDICATION packet 3 on r600 + eg, backend query
54 * 2.10.0 - fusion 2D tiling 54 * 2.10.0 - fusion 2D tiling
55 * 2.11.0 - backend map, initial compute support for the CS checker 55 * 2.11.0 - backend map, initial compute support for the CS checker
56 * 2.12.0 - RADEON_CS_KEEP_TILING_FLAGS
56 */ 57 */
57#define KMS_DRIVER_MAJOR 2 58#define KMS_DRIVER_MAJOR 2
58#define KMS_DRIVER_MINOR 11 59#define KMS_DRIVER_MINOR 12
59#define KMS_DRIVER_PATCHLEVEL 0 60#define KMS_DRIVER_PATCHLEVEL 0
60int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 61int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
61int radeon_driver_unload_kms(struct drm_device *dev); 62int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 617b64678fc..0bb0f5f713e 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -574,10 +574,16 @@ retry:
574 return ret; 574 return ret;
575 575
576 spin_lock(&glob->lru_lock); 576 spin_lock(&glob->lru_lock);
577
578 if (unlikely(list_empty(&bo->ddestroy))) {
579 spin_unlock(&glob->lru_lock);
580 return 0;
581 }
582
577 ret = ttm_bo_reserve_locked(bo, interruptible, 583 ret = ttm_bo_reserve_locked(bo, interruptible,
578 no_wait_reserve, false, 0); 584 no_wait_reserve, false, 0);
579 585
580 if (unlikely(ret != 0) || list_empty(&bo->ddestroy)) { 586 if (unlikely(ret != 0)) {
581 spin_unlock(&glob->lru_lock); 587 spin_unlock(&glob->lru_lock);
582 return ret; 588 return ret;
583 } 589 }
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index c72f1c0b5e6..111d956d8e7 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -465,31 +465,29 @@ static void vga_arbiter_check_bridge_sharing(struct vga_device *vgadev)
465 while (new_bus) { 465 while (new_bus) {
466 new_bridge = new_bus->self; 466 new_bridge = new_bus->self;
467 467
468 if (new_bridge) { 468 /* go through list of devices already registered */
469 /* go through list of devices already registered */ 469 list_for_each_entry(same_bridge_vgadev, &vga_list, list) {
470 list_for_each_entry(same_bridge_vgadev, &vga_list, list) { 470 bus = same_bridge_vgadev->pdev->bus;
471 bus = same_bridge_vgadev->pdev->bus; 471 bridge = bus->self;
472 bridge = bus->self; 472
473 473 /* see if the share a bridge with this device */
474 /* see if the share a bridge with this device */ 474 if (new_bridge == bridge) {
475 if (new_bridge == bridge) { 475 /* if their direct parent bridge is the same
476 /* if their direct parent bridge is the same 476 as any bridge of this device then it can't be used
477 as any bridge of this device then it can't be used 477 for that device */
478 for that device */ 478 same_bridge_vgadev->bridge_has_one_vga = false;
479 same_bridge_vgadev->bridge_has_one_vga = false; 479 }
480 }
481 480
482 /* now iterate the previous devices bridge hierarchy */ 481 /* now iterate the previous devices bridge hierarchy */
483 /* if the new devices parent bridge is in the other devices 482 /* if the new devices parent bridge is in the other devices
484 hierarchy then we can't use it to control this device */ 483 hierarchy then we can't use it to control this device */
485 while (bus) { 484 while (bus) {
486 bridge = bus->self; 485 bridge = bus->self;
487 if (bridge) { 486 if (bridge) {
488 if (bridge == vgadev->pdev->bus->self) 487 if (bridge == vgadev->pdev->bus->self)
489 vgadev->bridge_has_one_vga = false; 488 vgadev->bridge_has_one_vga = false;
490 }
491 bus = bus->parent;
492 } 489 }
490 bus = bus->parent;
493 } 491 }
494 } 492 }
495 new_bus = new_bus->parent; 493 new_bus = new_bus->parent;
@@ -993,14 +991,20 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
993 uc = &priv->cards[i]; 991 uc = &priv->cards[i];
994 } 992 }
995 993
996 if (!uc) 994 if (!uc) {
997 return -EINVAL; 995 ret_val = -EINVAL;
996 goto done;
997 }
998 998
999 if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0) 999 if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0) {
1000 return -EINVAL; 1000 ret_val = -EINVAL;
1001 goto done;
1002 }
1001 1003
1002 if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0) 1004 if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0) {
1003 return -EINVAL; 1005 ret_val = -EINVAL;
1006 goto done;
1007 }
1004 1008
1005 vga_put(pdev, io_state); 1009 vga_put(pdev, io_state);
1006 1010
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 9ec854ae118..91be41f6080 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -315,7 +315,7 @@ config SENSORS_DS1621
315 315
316config SENSORS_EXYNOS4_TMU 316config SENSORS_EXYNOS4_TMU
317 tristate "Temperature sensor on Samsung EXYNOS4" 317 tristate "Temperature sensor on Samsung EXYNOS4"
318 depends on EXYNOS4_DEV_TMU 318 depends on ARCH_EXYNOS4
319 help 319 help
320 If you say yes here you get support for TMU (Thermal Managment 320 If you say yes here you get support for TMU (Thermal Managment
321 Unit) on SAMSUNG EXYNOS4 series of SoC. 321 Unit) on SAMSUNG EXYNOS4 series of SoC.
diff --git a/drivers/hwmon/ad7314.c b/drivers/hwmon/ad7314.c
index 318e38e8537..5d760f3d21c 100644
--- a/drivers/hwmon/ad7314.c
+++ b/drivers/hwmon/ad7314.c
@@ -160,7 +160,6 @@ MODULE_DEVICE_TABLE(spi, ad7314_id);
160static struct spi_driver ad7314_driver = { 160static struct spi_driver ad7314_driver = {
161 .driver = { 161 .driver = {
162 .name = "ad7314", 162 .name = "ad7314",
163 .bus = &spi_bus_type,
164 .owner = THIS_MODULE, 163 .owner = THIS_MODULE,
165 }, 164 },
166 .probe = ad7314_probe, 165 .probe = ad7314_probe,
diff --git a/drivers/hwmon/ads7871.c b/drivers/hwmon/ads7871.c
index 52319340e18..04450f8bf5d 100644
--- a/drivers/hwmon/ads7871.c
+++ b/drivers/hwmon/ads7871.c
@@ -227,7 +227,6 @@ static int __devexit ads7871_remove(struct spi_device *spi)
227static struct spi_driver ads7871_driver = { 227static struct spi_driver ads7871_driver = {
228 .driver = { 228 .driver = {
229 .name = DEVICE_NAME, 229 .name = DEVICE_NAME,
230 .bus = &spi_bus_type,
231 .owner = THIS_MODULE, 230 .owner = THIS_MODULE,
232 }, 231 },
233 232
diff --git a/drivers/hwmon/exynos4_tmu.c b/drivers/hwmon/exynos4_tmu.c
index faa0884f61f..f2359a0093b 100644
--- a/drivers/hwmon/exynos4_tmu.c
+++ b/drivers/hwmon/exynos4_tmu.c
@@ -506,17 +506,7 @@ static struct platform_driver exynos4_tmu_driver = {
506 .resume = exynos4_tmu_resume, 506 .resume = exynos4_tmu_resume,
507}; 507};
508 508
509static int __init exynos4_tmu_driver_init(void) 509module_platform_driver(exynos4_tmu_driver);
510{
511 return platform_driver_register(&exynos4_tmu_driver);
512}
513module_init(exynos4_tmu_driver_init);
514
515static void __exit exynos4_tmu_driver_exit(void)
516{
517 platform_driver_unregister(&exynos4_tmu_driver);
518}
519module_exit(exynos4_tmu_driver_exit);
520 510
521MODULE_DESCRIPTION("EXYNOS4 TMU Driver"); 511MODULE_DESCRIPTION("EXYNOS4 TMU Driver");
522MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>"); 512MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>");
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index 89aa9fb743a..9ba38f318ff 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -539,18 +539,7 @@ static struct platform_driver gpio_fan_driver = {
539 }, 539 },
540}; 540};
541 541
542static int __init gpio_fan_init(void) 542module_platform_driver(gpio_fan_driver);
543{
544 return platform_driver_register(&gpio_fan_driver);
545}
546
547static void __exit gpio_fan_exit(void)
548{
549 platform_driver_unregister(&gpio_fan_driver);
550}
551
552module_init(gpio_fan_init);
553module_exit(gpio_fan_exit);
554 543
555MODULE_AUTHOR("Simon Guinot <sguinot@lacie.com>"); 544MODULE_AUTHOR("Simon Guinot <sguinot@lacie.com>");
556MODULE_DESCRIPTION("GPIO FAN driver"); 545MODULE_DESCRIPTION("GPIO FAN driver");
diff --git a/drivers/hwmon/jz4740-hwmon.c b/drivers/hwmon/jz4740-hwmon.c
index fea292d4340..7a48b1eb423 100644
--- a/drivers/hwmon/jz4740-hwmon.c
+++ b/drivers/hwmon/jz4740-hwmon.c
@@ -212,17 +212,7 @@ struct platform_driver jz4740_hwmon_driver = {
212 }, 212 },
213}; 213};
214 214
215static int __init jz4740_hwmon_init(void) 215module_platform_driver(jz4740_hwmon_driver);
216{
217 return platform_driver_register(&jz4740_hwmon_driver);
218}
219module_init(jz4740_hwmon_init);
220
221static void __exit jz4740_hwmon_exit(void)
222{
223 platform_driver_unregister(&jz4740_hwmon_driver);
224}
225module_exit(jz4740_hwmon_exit);
226 216
227MODULE_DESCRIPTION("JZ4740 SoC HWMON driver"); 217MODULE_DESCRIPTION("JZ4740 SoC HWMON driver");
228MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); 218MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index eab11615dce..9b382ec2c3b 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -432,19 +432,7 @@ static struct platform_driver ntc_thermistor_driver = {
432 .id_table = ntc_thermistor_id, 432 .id_table = ntc_thermistor_id,
433}; 433};
434 434
435static int __init ntc_thermistor_init(void) 435module_platform_driver(ntc_thermistor_driver);
436{
437 return platform_driver_register(&ntc_thermistor_driver);
438}
439
440module_init(ntc_thermistor_init);
441
442static void __exit ntc_thermistor_cleanup(void)
443{
444 platform_driver_unregister(&ntc_thermistor_driver);
445}
446
447module_exit(ntc_thermistor_cleanup);
448 436
449MODULE_DESCRIPTION("NTC Thermistor Driver"); 437MODULE_DESCRIPTION("NTC Thermistor Driver");
450MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>"); 438MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
diff --git a/drivers/hwmon/s3c-hwmon.c b/drivers/hwmon/s3c-hwmon.c
index b39f52e2752..f6c26d19f52 100644
--- a/drivers/hwmon/s3c-hwmon.c
+++ b/drivers/hwmon/s3c-hwmon.c
@@ -393,18 +393,7 @@ static struct platform_driver s3c_hwmon_driver = {
393 .remove = __devexit_p(s3c_hwmon_remove), 393 .remove = __devexit_p(s3c_hwmon_remove),
394}; 394};
395 395
396static int __init s3c_hwmon_init(void) 396module_platform_driver(s3c_hwmon_driver);
397{
398 return platform_driver_register(&s3c_hwmon_driver);
399}
400
401static void __exit s3c_hwmon_exit(void)
402{
403 platform_driver_unregister(&s3c_hwmon_driver);
404}
405
406module_init(s3c_hwmon_init);
407module_exit(s3c_hwmon_exit);
408 397
409MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>"); 398MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
410MODULE_DESCRIPTION("S3C ADC HWMon driver"); 399MODULE_DESCRIPTION("S3C ADC HWMon driver");
diff --git a/drivers/hwmon/sch5627.c b/drivers/hwmon/sch5627.c
index e3b5c6039c2..79b6dabe316 100644
--- a/drivers/hwmon/sch5627.c
+++ b/drivers/hwmon/sch5627.c
@@ -590,19 +590,8 @@ static struct platform_driver sch5627_driver = {
590 .remove = sch5627_remove, 590 .remove = sch5627_remove,
591}; 591};
592 592
593static int __init sch5627_init(void) 593module_platform_driver(sch5627_driver);
594{
595 return platform_driver_register(&sch5627_driver);
596}
597
598static void __exit sch5627_exit(void)
599{
600 platform_driver_unregister(&sch5627_driver);
601}
602 594
603MODULE_DESCRIPTION("SMSC SCH5627 Hardware Monitoring Driver"); 595MODULE_DESCRIPTION("SMSC SCH5627 Hardware Monitoring Driver");
604MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>"); 596MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
605MODULE_LICENSE("GPL"); 597MODULE_LICENSE("GPL");
606
607module_init(sch5627_init);
608module_exit(sch5627_exit);
diff --git a/drivers/hwmon/sch5636.c b/drivers/hwmon/sch5636.c
index 244407aa79f..9d5236fb09b 100644
--- a/drivers/hwmon/sch5636.c
+++ b/drivers/hwmon/sch5636.c
@@ -521,19 +521,8 @@ static struct platform_driver sch5636_driver = {
521 .remove = sch5636_remove, 521 .remove = sch5636_remove,
522}; 522};
523 523
524static int __init sch5636_init(void) 524module_platform_driver(sch5636_driver);
525{
526 return platform_driver_register(&sch5636_driver);
527}
528
529static void __exit sch5636_exit(void)
530{
531 platform_driver_unregister(&sch5636_driver);
532}
533 525
534MODULE_DESCRIPTION("SMSC SCH5636 Hardware Monitoring Driver"); 526MODULE_DESCRIPTION("SMSC SCH5636 Hardware Monitoring Driver");
535MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>"); 527MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
536MODULE_LICENSE("GPL"); 528MODULE_LICENSE("GPL");
537
538module_init(sch5636_init);
539module_exit(sch5636_exit);
diff --git a/drivers/hwmon/twl4030-madc-hwmon.c b/drivers/hwmon/twl4030-madc-hwmon.c
index 57240740b16..0018c7dd009 100644
--- a/drivers/hwmon/twl4030-madc-hwmon.c
+++ b/drivers/hwmon/twl4030-madc-hwmon.c
@@ -136,19 +136,7 @@ static struct platform_driver twl4030_madc_hwmon_driver = {
136 }, 136 },
137}; 137};
138 138
139static int __init twl4030_madc_hwmon_init(void) 139module_platform_driver(twl4030_madc_hwmon_driver);
140{
141 return platform_driver_register(&twl4030_madc_hwmon_driver);
142}
143
144module_init(twl4030_madc_hwmon_init);
145
146static void __exit twl4030_madc_hwmon_exit(void)
147{
148 platform_driver_unregister(&twl4030_madc_hwmon_driver);
149}
150
151module_exit(twl4030_madc_hwmon_exit);
152 140
153MODULE_DESCRIPTION("TWL4030 ADC Hwmon driver"); 141MODULE_DESCRIPTION("TWL4030 ADC Hwmon driver");
154MODULE_LICENSE("GPL"); 142MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/ultra45_env.c b/drivers/hwmon/ultra45_env.c
index 3cd07bf42dc..b9a87e89bab 100644
--- a/drivers/hwmon/ultra45_env.c
+++ b/drivers/hwmon/ultra45_env.c
@@ -309,15 +309,4 @@ static struct platform_driver env_driver = {
309 .remove = __devexit_p(env_remove), 309 .remove = __devexit_p(env_remove),
310}; 310};
311 311
312static int __init env_init(void) 312module_platform_driver(env_driver);
313{
314 return platform_driver_register(&env_driver);
315}
316
317static void __exit env_exit(void)
318{
319 platform_driver_unregister(&env_driver);
320}
321
322module_init(env_init);
323module_exit(env_exit);
diff --git a/drivers/hwmon/wm831x-hwmon.c b/drivers/hwmon/wm831x-hwmon.c
index 97b1f834a47..9b598ed2602 100644
--- a/drivers/hwmon/wm831x-hwmon.c
+++ b/drivers/hwmon/wm831x-hwmon.c
@@ -209,17 +209,7 @@ static struct platform_driver wm831x_hwmon_driver = {
209 }, 209 },
210}; 210};
211 211
212static int __init wm831x_hwmon_init(void) 212module_platform_driver(wm831x_hwmon_driver);
213{
214 return platform_driver_register(&wm831x_hwmon_driver);
215}
216module_init(wm831x_hwmon_init);
217
218static void __exit wm831x_hwmon_exit(void)
219{
220 platform_driver_unregister(&wm831x_hwmon_driver);
221}
222module_exit(wm831x_hwmon_exit);
223 213
224MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>"); 214MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
225MODULE_DESCRIPTION("WM831x Hardware Monitoring"); 215MODULE_DESCRIPTION("WM831x Hardware Monitoring");
diff --git a/drivers/hwmon/wm8350-hwmon.c b/drivers/hwmon/wm8350-hwmon.c
index 13290595ca8..3ff67edbdc4 100644
--- a/drivers/hwmon/wm8350-hwmon.c
+++ b/drivers/hwmon/wm8350-hwmon.c
@@ -133,17 +133,7 @@ static struct platform_driver wm8350_hwmon_driver = {
133 }, 133 },
134}; 134};
135 135
136static int __init wm8350_hwmon_init(void) 136module_platform_driver(wm8350_hwmon_driver);
137{
138 return platform_driver_register(&wm8350_hwmon_driver);
139}
140module_init(wm8350_hwmon_init);
141
142static void __exit wm8350_hwmon_exit(void)
143{
144 platform_driver_unregister(&wm8350_hwmon_driver);
145}
146module_exit(wm8350_hwmon_exit);
147 137
148MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>"); 138MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
149MODULE_DESCRIPTION("WM8350 Hardware Monitoring"); 139MODULE_DESCRIPTION("WM8350 Hardware Monitoring");
diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c
index 85584a547c2..525c7345fa0 100644
--- a/drivers/i2c/algos/i2c-algo-bit.c
+++ b/drivers/i2c/algos/i2c-algo-bit.c
@@ -488,7 +488,7 @@ static int bit_doAddress(struct i2c_adapter *i2c_adap, struct i2c_msg *msg)
488 488
489 if (flags & I2C_M_TEN) { 489 if (flags & I2C_M_TEN) {
490 /* a ten bit address */ 490 /* a ten bit address */
491 addr = 0xf0 | ((msg->addr >> 7) & 0x03); 491 addr = 0xf0 | ((msg->addr >> 7) & 0x06);
492 bit_dbg(2, &i2c_adap->dev, "addr0: %d\n", addr); 492 bit_dbg(2, &i2c_adap->dev, "addr0: %d\n", addr);
493 /* try extended address code...*/ 493 /* try extended address code...*/
494 ret = try_address(i2c_adap, addr, retries); 494 ret = try_address(i2c_adap, addr, retries);
@@ -498,7 +498,7 @@ static int bit_doAddress(struct i2c_adapter *i2c_adap, struct i2c_msg *msg)
498 return -ENXIO; 498 return -ENXIO;
499 } 499 }
500 /* the remaining 8 bit address */ 500 /* the remaining 8 bit address */
501 ret = i2c_outb(i2c_adap, msg->addr & 0x7f); 501 ret = i2c_outb(i2c_adap, msg->addr & 0xff);
502 if ((ret != 1) && !nak_ok) { 502 if ((ret != 1) && !nak_ok) {
503 /* the chip did not ack / xmission error occurred */ 503 /* the chip did not ack / xmission error occurred */
504 dev_err(&i2c_adap->dev, "died at 2nd address code\n"); 504 dev_err(&i2c_adap->dev, "died at 2nd address code\n");
diff --git a/drivers/i2c/busses/i2c-nuc900.c b/drivers/i2c/busses/i2c-nuc900.c
index 835e47b39bc..03b61577888 100644
--- a/drivers/i2c/busses/i2c-nuc900.c
+++ b/drivers/i2c/busses/i2c-nuc900.c
@@ -593,7 +593,7 @@ static int __devinit nuc900_i2c_probe(struct platform_device *pdev)
593 i2c->adap.algo_data = i2c; 593 i2c->adap.algo_data = i2c;
594 i2c->adap.dev.parent = &pdev->dev; 594 i2c->adap.dev.parent = &pdev->dev;
595 595
596 mfp_set_groupg(&pdev->dev); 596 mfp_set_groupg(&pdev->dev, NULL);
597 597
598 clk_get_rate(i2c->clk); 598 clk_get_rate(i2c->clk);
599 599
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 131079a3e29..1e5606185b4 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -539,8 +539,10 @@ i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
539 client->dev.type = &i2c_client_type; 539 client->dev.type = &i2c_client_type;
540 client->dev.of_node = info->of_node; 540 client->dev.of_node = info->of_node;
541 541
542 /* For 10-bit clients, add an arbitrary offset to avoid collisions */
542 dev_set_name(&client->dev, "%d-%04x", i2c_adapter_id(adap), 543 dev_set_name(&client->dev, "%d-%04x", i2c_adapter_id(adap),
543 client->addr); 544 client->addr | ((client->flags & I2C_CLIENT_TEN)
545 ? 0xa000 : 0));
544 status = device_register(&client->dev); 546 status = device_register(&client->dev);
545 if (status) 547 if (status)
546 goto out_err; 548 goto out_err;
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index c90ce50b619..57a45ce84b2 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -579,7 +579,7 @@ static int i2cdev_detach_adapter(struct device *dev, void *dummy)
579 return 0; 579 return 0;
580} 580}
581 581
582int i2cdev_notifier_call(struct notifier_block *nb, unsigned long action, 582static int i2cdev_notifier_call(struct notifier_block *nb, unsigned long action,
583 void *data) 583 void *data)
584{ 584{
585 struct device *dev = data; 585 struct device *dev = data;
diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
index 67cbcfa3512..847553fd8b9 100644
--- a/drivers/ide/cy82c693.c
+++ b/drivers/ide/cy82c693.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Copyright (C) 1998-2000 Andreas S. Krebs (akrebs@altavista.net), Maintainer 2 * Copyright (C) 1998-2000 Andreas S. Krebs (akrebs@altavista.net), Maintainer
3 * Copyright (C) 1998-2002 Andre Hedrick <andre@linux-ide.org>, Integrator 3 * Copyright (C) 1998-2002 Andre Hedrick <andre@linux-ide.org>, Integrator
4 * Copyright (C) 2007-2010 Bartlomiej Zolnierkiewicz 4 * Copyright (C) 2007-2011 Bartlomiej Zolnierkiewicz
5 * 5 *
6 * CYPRESS CY82C693 chipset IDE controller 6 * CYPRESS CY82C693 chipset IDE controller
7 * 7 *
@@ -90,7 +90,7 @@ static void cy82c693_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
90 u8 time_16, time_8; 90 u8 time_16, time_8;
91 91
92 /* select primary or secondary channel */ 92 /* select primary or secondary channel */
93 if (hwif->index > 0) { /* drive is on the secondary channel */ 93 if (drive->dn > 1) { /* drive is on the secondary channel */
94 dev = pci_get_slot(dev->bus, dev->devfn+1); 94 dev = pci_get_slot(dev->bus, dev->devfn+1);
95 if (!dev) { 95 if (!dev) {
96 printk(KERN_ERR "%s: tune_drive: " 96 printk(KERN_ERR "%s: tune_drive: "
@@ -141,7 +141,7 @@ static void cy82c693_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
141 pci_write_config_byte(dev, CY82_IDE_SLAVE_IOW, time_16); 141 pci_write_config_byte(dev, CY82_IDE_SLAVE_IOW, time_16);
142 pci_write_config_byte(dev, CY82_IDE_SLAVE_8BIT, time_8); 142 pci_write_config_byte(dev, CY82_IDE_SLAVE_8BIT, time_8);
143 } 143 }
144 if (hwif->index > 0) 144 if (drive->dn > 1)
145 pci_dev_put(dev); 145 pci_dev_put(dev);
146} 146}
147 147
diff --git a/drivers/ide/icside.c b/drivers/ide/icside.c
index 4a697a238e2..8716066a2f2 100644
--- a/drivers/ide/icside.c
+++ b/drivers/ide/icside.c
@@ -521,8 +521,8 @@ icside_register_v6(struct icside_state *state, struct expansion_card *ec)
521 if (ec->dma != NO_DMA && !request_dma(ec->dma, DRV_NAME)) { 521 if (ec->dma != NO_DMA && !request_dma(ec->dma, DRV_NAME)) {
522 d.init_dma = icside_dma_init; 522 d.init_dma = icside_dma_init;
523 d.port_ops = &icside_v6_port_ops; 523 d.port_ops = &icside_v6_port_ops;
524 } else
524 d.dma_ops = NULL; 525 d.dma_ops = NULL;
525 }
526 526
527 ret = ide_host_register(host, &d, hws); 527 ret = ide_host_register(host, &d, hws);
528 if (ret) 528 if (ret)
diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
index b59d04c7205..1892e81fb00 100644
--- a/drivers/ide/piix.c
+++ b/drivers/ide/piix.c
@@ -331,7 +331,7 @@ static const struct ide_port_ops ich_port_ops = {
331 .udma_mask = udma, \ 331 .udma_mask = udma, \
332 } 332 }
333 333
334#define DECLARE_ICH_DEV(udma) \ 334#define DECLARE_ICH_DEV(mwdma, udma) \
335 { \ 335 { \
336 .name = DRV_NAME, \ 336 .name = DRV_NAME, \
337 .init_chipset = init_chipset_ich, \ 337 .init_chipset = init_chipset_ich, \
@@ -340,7 +340,7 @@ static const struct ide_port_ops ich_port_ops = {
340 .port_ops = &ich_port_ops, \ 340 .port_ops = &ich_port_ops, \
341 .pio_mask = ATA_PIO4, \ 341 .pio_mask = ATA_PIO4, \
342 .swdma_mask = ATA_SWDMA2_ONLY, \ 342 .swdma_mask = ATA_SWDMA2_ONLY, \
343 .mwdma_mask = ATA_MWDMA12_ONLY, \ 343 .mwdma_mask = mwdma, \
344 .udma_mask = udma, \ 344 .udma_mask = udma, \
345 } 345 }
346 346
@@ -362,13 +362,15 @@ static const struct ide_port_info piix_pci_info[] __devinitdata = {
362 /* 2: PIIX4 */ 362 /* 2: PIIX4 */
363 DECLARE_PIIX_DEV(ATA_UDMA2), 363 DECLARE_PIIX_DEV(ATA_UDMA2),
364 /* 3: ICH0 */ 364 /* 3: ICH0 */
365 DECLARE_ICH_DEV(ATA_UDMA2), 365 DECLARE_ICH_DEV(ATA_MWDMA12_ONLY, ATA_UDMA2),
366 /* 4: ICH */ 366 /* 4: ICH */
367 DECLARE_ICH_DEV(ATA_UDMA4), 367 DECLARE_ICH_DEV(ATA_MWDMA12_ONLY, ATA_UDMA4),
368 /* 5: PIIX4 */ 368 /* 5: PIIX4 */
369 DECLARE_PIIX_DEV(ATA_UDMA4), 369 DECLARE_PIIX_DEV(ATA_UDMA4),
370 /* 6: ICH[2-7]/ICH[2-3]M/C-ICH/ICH5-SATA/ESB2/ICH8M */ 370 /* 6: ICH[2-6]/ICH[2-3]M/C-ICH/ICH5-SATA/ESB2/ICH8M */
371 DECLARE_ICH_DEV(ATA_UDMA5), 371 DECLARE_ICH_DEV(ATA_MWDMA12_ONLY, ATA_UDMA5),
372 /* 7: ICH7/7-R, no MWDMA1 */
373 DECLARE_ICH_DEV(ATA_MWDMA2_ONLY, ATA_UDMA5),
372}; 374};
373 375
374/** 376/**
@@ -438,9 +440,9 @@ static const struct pci_device_id piix_pci_tbl[] = {
438#endif 440#endif
439 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ESB_2), 6 }, 441 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ESB_2), 6 },
440 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICH6_19), 6 }, 442 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICH6_19), 6 },
441 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICH7_21), 6 }, 443 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICH7_21), 7 },
442 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801DB_1), 6 }, 444 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801DB_1), 6 },
443 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ESB2_18), 6 }, 445 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ESB2_18), 7 },
444 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICH8_6), 6 }, 446 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICH8_6), 6 },
445 { 0, }, 447 { 0, },
446}; 448};
diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
index e53a1b78378..281c9142634 100644
--- a/drivers/ide/triflex.c
+++ b/drivers/ide/triflex.c
@@ -113,12 +113,26 @@ static const struct pci_device_id triflex_pci_tbl[] = {
113}; 113};
114MODULE_DEVICE_TABLE(pci, triflex_pci_tbl); 114MODULE_DEVICE_TABLE(pci, triflex_pci_tbl);
115 115
116#ifdef CONFIG_PM
117static int triflex_ide_pci_suspend(struct pci_dev *dev, pm_message_t state)
118{
119 /*
120 * We must not disable or powerdown the device.
121 * APM bios refuses to suspend if IDE is not accessible.
122 */
123 pci_save_state(dev);
124 return 0;
125}
126#else
127#define triflex_ide_pci_suspend NULL
128#endif
129
116static struct pci_driver triflex_pci_driver = { 130static struct pci_driver triflex_pci_driver = {
117 .name = "TRIFLEX_IDE", 131 .name = "TRIFLEX_IDE",
118 .id_table = triflex_pci_tbl, 132 .id_table = triflex_pci_tbl,
119 .probe = triflex_init_one, 133 .probe = triflex_init_one,
120 .remove = ide_pci_remove, 134 .remove = ide_pci_remove,
121 .suspend = ide_pci_suspend, 135 .suspend = triflex_ide_pci_suspend,
122 .resume = ide_pci_resume, 136 .resume = ide_pci_resume,
123}; 137};
124 138
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 691276bafd7..e9cf51b1343 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -216,7 +216,9 @@ static int addr4_resolve(struct sockaddr_in *src_in,
216 216
217 neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, rt->dst.dev); 217 neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, rt->dst.dev);
218 if (!neigh || !(neigh->nud_state & NUD_VALID)) { 218 if (!neigh || !(neigh->nud_state & NUD_VALID)) {
219 rcu_read_lock();
219 neigh_event_send(dst_get_neighbour(&rt->dst), NULL); 220 neigh_event_send(dst_get_neighbour(&rt->dst), NULL);
221 rcu_read_unlock();
220 ret = -ENODATA; 222 ret = -ENODATA;
221 if (neigh) 223 if (neigh)
222 goto release; 224 goto release;
@@ -274,15 +276,16 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
274 goto put; 276 goto put;
275 } 277 }
276 278
279 rcu_read_lock();
277 neigh = dst_get_neighbour(dst); 280 neigh = dst_get_neighbour(dst);
278 if (!neigh || !(neigh->nud_state & NUD_VALID)) { 281 if (!neigh || !(neigh->nud_state & NUD_VALID)) {
279 if (neigh) 282 if (neigh)
280 neigh_event_send(neigh, NULL); 283 neigh_event_send(neigh, NULL);
281 ret = -ENODATA; 284 ret = -ENODATA;
282 goto put; 285 } else {
286 ret = rdma_copy_addr(addr, dst->dev, neigh->ha);
283 } 287 }
284 288 rcu_read_unlock();
285 ret = rdma_copy_addr(addr, dst->dev, neigh->ha);
286put: 289put:
287 dst_release(dst); 290 dst_release(dst);
288 return ret; 291 return ret;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index de6d0774e60..c88b12beef2 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -1375,8 +1375,10 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1375 goto reject; 1375 goto reject;
1376 } 1376 }
1377 dst = &rt->dst; 1377 dst = &rt->dst;
1378 rcu_read_lock();
1378 neigh = dst_get_neighbour(dst); 1379 neigh = dst_get_neighbour(dst);
1379 l2t = t3_l2t_get(tdev, neigh, neigh->dev); 1380 l2t = t3_l2t_get(tdev, neigh, neigh->dev);
1381 rcu_read_unlock();
1380 if (!l2t) { 1382 if (!l2t) {
1381 printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n", 1383 printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
1382 __func__); 1384 __func__);
@@ -1946,10 +1948,12 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1946 } 1948 }
1947 ep->dst = &rt->dst; 1949 ep->dst = &rt->dst;
1948 1950
1951 rcu_read_lock();
1949 neigh = dst_get_neighbour(ep->dst); 1952 neigh = dst_get_neighbour(ep->dst);
1950 1953
1951 /* get a l2t entry */ 1954 /* get a l2t entry */
1952 ep->l2t = t3_l2t_get(ep->com.tdev, neigh, neigh->dev); 1955 ep->l2t = t3_l2t_get(ep->com.tdev, neigh, neigh->dev);
1956 rcu_read_unlock();
1953 if (!ep->l2t) { 1957 if (!ep->l2t) {
1954 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); 1958 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
1955 err = -ENOMEM; 1959 err = -ENOMEM;
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index b36cdac9c55..0747004313a 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -542,8 +542,10 @@ static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
542 (mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0); 542 (mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0);
543 mpa->private_data_size = htons(ep->plen); 543 mpa->private_data_size = htons(ep->plen);
544 mpa->revision = mpa_rev_to_use; 544 mpa->revision = mpa_rev_to_use;
545 if (mpa_rev_to_use == 1) 545 if (mpa_rev_to_use == 1) {
546 ep->tried_with_mpa_v1 = 1; 546 ep->tried_with_mpa_v1 = 1;
547 ep->retry_with_mpa_v1 = 0;
548 }
547 549
548 if (mpa_rev_to_use == 2) { 550 if (mpa_rev_to_use == 2) {
549 mpa->private_data_size += 551 mpa->private_data_size +=
@@ -1594,6 +1596,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
1594 goto reject; 1596 goto reject;
1595 } 1597 }
1596 dst = &rt->dst; 1598 dst = &rt->dst;
1599 rcu_read_lock();
1597 neigh = dst_get_neighbour(dst); 1600 neigh = dst_get_neighbour(dst);
1598 if (neigh->dev->flags & IFF_LOOPBACK) { 1601 if (neigh->dev->flags & IFF_LOOPBACK) {
1599 pdev = ip_dev_find(&init_net, peer_ip); 1602 pdev = ip_dev_find(&init_net, peer_ip);
@@ -1620,6 +1623,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
1620 rss_qid = dev->rdev.lldi.rxq_ids[ 1623 rss_qid = dev->rdev.lldi.rxq_ids[
1621 cxgb4_port_idx(neigh->dev) * step]; 1624 cxgb4_port_idx(neigh->dev) * step];
1622 } 1625 }
1626 rcu_read_unlock();
1623 if (!l2t) { 1627 if (!l2t) {
1624 printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n", 1628 printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
1625 __func__); 1629 __func__);
@@ -1820,6 +1824,7 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
1820 } 1824 }
1821 ep->dst = &rt->dst; 1825 ep->dst = &rt->dst;
1822 1826
1827 rcu_read_lock();
1823 neigh = dst_get_neighbour(ep->dst); 1828 neigh = dst_get_neighbour(ep->dst);
1824 1829
1825 /* get a l2t entry */ 1830 /* get a l2t entry */
@@ -1856,6 +1861,7 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
1856 ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[ 1861 ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
1857 cxgb4_port_idx(neigh->dev) * step]; 1862 cxgb4_port_idx(neigh->dev) * step];
1858 } 1863 }
1864 rcu_read_unlock();
1859 if (!ep->l2t) { 1865 if (!ep->l2t) {
1860 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); 1866 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
1861 err = -ENOMEM; 1867 err = -ENOMEM;
@@ -2301,6 +2307,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2301 } 2307 }
2302 ep->dst = &rt->dst; 2308 ep->dst = &rt->dst;
2303 2309
2310 rcu_read_lock();
2304 neigh = dst_get_neighbour(ep->dst); 2311 neigh = dst_get_neighbour(ep->dst);
2305 2312
2306 /* get a l2t entry */ 2313 /* get a l2t entry */
@@ -2339,6 +2346,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2339 ep->retry_with_mpa_v1 = 0; 2346 ep->retry_with_mpa_v1 = 0;
2340 ep->tried_with_mpa_v1 = 0; 2347 ep->tried_with_mpa_v1 = 0;
2341 } 2348 }
2349 rcu_read_unlock();
2342 if (!ep->l2t) { 2350 if (!ep->l2t) {
2343 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); 2351 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
2344 err = -ENOMEM; 2352 err = -ENOMEM;
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index f35a935267e..0f1607c8325 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -311,7 +311,7 @@ void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
311 while (ptr != cq->sw_pidx) { 311 while (ptr != cq->sw_pidx) {
312 cqe = &cq->sw_queue[ptr]; 312 cqe = &cq->sw_queue[ptr];
313 if (RQ_TYPE(cqe) && (CQE_OPCODE(cqe) != FW_RI_READ_RESP) && 313 if (RQ_TYPE(cqe) && (CQE_OPCODE(cqe) != FW_RI_READ_RESP) &&
314 (CQE_QPID(cqe) == wq->rq.qid) && cqe_completes_wr(cqe, wq)) 314 (CQE_QPID(cqe) == wq->sq.qid) && cqe_completes_wr(cqe, wq))
315 (*count)++; 315 (*count)++;
316 if (++ptr == cq->size) 316 if (++ptr == cq->size)
317 ptr = 0; 317 ptr = 0;
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index dfce9ea98a3..0a52d72371e 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -1377,9 +1377,11 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
1377 neigh_release(neigh); 1377 neigh_release(neigh);
1378 } 1378 }
1379 1379
1380 if ((neigh == NULL) || (!(neigh->nud_state & NUD_VALID))) 1380 if ((neigh == NULL) || (!(neigh->nud_state & NUD_VALID))) {
1381 rcu_read_lock();
1381 neigh_event_send(dst_get_neighbour(&rt->dst), NULL); 1382 neigh_event_send(dst_get_neighbour(&rt->dst), NULL);
1382 1383 rcu_read_unlock();
1384 }
1383 ip_rt_put(rt); 1385 ip_rt_put(rt);
1384 return rc; 1386 return rc;
1385} 1387}
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 5bd2162b95d..1d5895941e1 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -2307,19 +2307,11 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
2307 SYM_LSB(IBCCtrlA_0, MaxPktLen); 2307 SYM_LSB(IBCCtrlA_0, MaxPktLen);
2308 ppd->cpspec->ibcctrl_a = ibc; /* without linkcmd or linkinitcmd! */ 2308 ppd->cpspec->ibcctrl_a = ibc; /* without linkcmd or linkinitcmd! */
2309 2309
2310 /* initially come up waiting for TS1, without sending anything. */
2311 val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
2312 QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
2313
2314 ppd->cpspec->ibcctrl_a = val;
2315 /* 2310 /*
2316 * Reset the PCS interface to the serdes (and also ibc, which is still 2311 * Reset the PCS interface to the serdes (and also ibc, which is still
2317 * in reset from above). Writes new value of ibcctrl_a as last step. 2312 * in reset from above). Writes new value of ibcctrl_a as last step.
2318 */ 2313 */
2319 qib_7322_mini_pcs_reset(ppd); 2314 qib_7322_mini_pcs_reset(ppd);
2320 qib_write_kreg(dd, kr_scratch, 0ULL);
2321 /* clear the linkinit cmds */
2322 ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, LinkInitCmd);
2323 2315
2324 if (!ppd->cpspec->ibcctrl_b) { 2316 if (!ppd->cpspec->ibcctrl_b) {
2325 unsigned lse = ppd->link_speed_enabled; 2317 unsigned lse = ppd->link_speed_enabled;
@@ -2385,6 +2377,14 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
2385 ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, IBLinkEn); 2377 ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, IBLinkEn);
2386 set_vls(ppd); 2378 set_vls(ppd);
2387 2379
2380 /* initially come up DISABLED, without sending anything. */
2381 val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
2382 QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
2383 qib_write_kreg_port(ppd, krp_ibcctrl_a, val);
2384 qib_write_kreg(dd, kr_scratch, 0ULL);
2385 /* clear the linkinit cmds */
2386 ppd->cpspec->ibcctrl_a = val & ~SYM_MASK(IBCCtrlA_0, LinkInitCmd);
2387
2388 /* be paranoid against later code motion, etc. */ 2388 /* be paranoid against later code motion, etc. */
2389 spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags); 2389 spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
2390 ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvIBPortEnable); 2390 ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvIBPortEnable);
@@ -5241,7 +5241,7 @@ static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
5241 off */ 5241 off */
5242 if (ppd->dd->flags & QIB_HAS_QSFP) { 5242 if (ppd->dd->flags & QIB_HAS_QSFP) {
5243 qd->t_insert = get_jiffies_64(); 5243 qd->t_insert = get_jiffies_64();
5244 schedule_work(&qd->work); 5244 queue_work(ib_wq, &qd->work);
5245 } 5245 }
5246 spin_lock_irqsave(&ppd->sdma_lock, flags); 5246 spin_lock_irqsave(&ppd->sdma_lock, flags);
5247 if (__qib_sdma_running(ppd)) 5247 if (__qib_sdma_running(ppd))
diff --git a/drivers/infiniband/hw/qib/qib_qsfp.c b/drivers/infiniband/hw/qib/qib_qsfp.c
index e06c4ed383f..fa71b1e666c 100644
--- a/drivers/infiniband/hw/qib/qib_qsfp.c
+++ b/drivers/infiniband/hw/qib/qib_qsfp.c
@@ -480,18 +480,6 @@ void qib_qsfp_init(struct qib_qsfp_data *qd,
480 udelay(20); /* Generous RST dwell */ 480 udelay(20); /* Generous RST dwell */
481 481
482 dd->f_gpio_mod(dd, mask, mask, mask); 482 dd->f_gpio_mod(dd, mask, mask, mask);
483 /* Spec says module can take up to two seconds! */
484 mask = QSFP_GPIO_MOD_PRS_N;
485 if (qd->ppd->hw_pidx)
486 mask <<= QSFP_GPIO_PORT2_SHIFT;
487
488 /* Do not try to wait here. Better to let event handle it */
489 if (!qib_qsfp_mod_present(qd->ppd))
490 goto bail;
491 /* We see a module, but it may be unwise to look yet. Just schedule */
492 qd->t_insert = get_jiffies_64();
493 queue_work(ib_wq, &qd->work);
494bail:
495 return; 483 return;
496} 484}
497 485
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 0ef9af94997..4115be54ba3 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -57,21 +57,24 @@ struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
57 struct ib_pd *pd, struct ib_ah_attr *attr) 57 struct ib_pd *pd, struct ib_ah_attr *attr)
58{ 58{
59 struct ipoib_ah *ah; 59 struct ipoib_ah *ah;
60 struct ib_ah *vah;
60 61
61 ah = kmalloc(sizeof *ah, GFP_KERNEL); 62 ah = kmalloc(sizeof *ah, GFP_KERNEL);
62 if (!ah) 63 if (!ah)
63 return NULL; 64 return ERR_PTR(-ENOMEM);
64 65
65 ah->dev = dev; 66 ah->dev = dev;
66 ah->last_send = 0; 67 ah->last_send = 0;
67 kref_init(&ah->ref); 68 kref_init(&ah->ref);
68 69
69 ah->ah = ib_create_ah(pd, attr); 70 vah = ib_create_ah(pd, attr);
70 if (IS_ERR(ah->ah)) { 71 if (IS_ERR(vah)) {
71 kfree(ah); 72 kfree(ah);
72 ah = NULL; 73 ah = (struct ipoib_ah *)vah;
73 } else 74 } else {
75 ah->ah = vah;
74 ipoib_dbg(netdev_priv(dev), "Created ah %p\n", ah->ah); 76 ipoib_dbg(netdev_priv(dev), "Created ah %p\n", ah->ah);
77 }
75 78
76 return ah; 79 return ah;
77} 80}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 7567b600023..83695b48b01 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -432,7 +432,7 @@ static void path_rec_completion(int status,
432 432
433 spin_lock_irqsave(&priv->lock, flags); 433 spin_lock_irqsave(&priv->lock, flags);
434 434
435 if (ah) { 435 if (!IS_ERR_OR_NULL(ah)) {
436 path->pathrec = *pathrec; 436 path->pathrec = *pathrec;
437 437
438 old_ah = path->ah; 438 old_ah = path->ah;
@@ -555,6 +555,7 @@ static int path_rec_start(struct net_device *dev,
555 return 0; 555 return 0;
556} 556}
557 557
558/* called with rcu_read_lock */
558static void neigh_add_path(struct sk_buff *skb, struct net_device *dev) 559static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
559{ 560{
560 struct ipoib_dev_priv *priv = netdev_priv(dev); 561 struct ipoib_dev_priv *priv = netdev_priv(dev);
@@ -636,6 +637,7 @@ err_drop:
636 spin_unlock_irqrestore(&priv->lock, flags); 637 spin_unlock_irqrestore(&priv->lock, flags);
637} 638}
638 639
640/* called with rcu_read_lock */
639static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev) 641static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev)
640{ 642{
641 struct ipoib_dev_priv *priv = netdev_priv(skb->dev); 643 struct ipoib_dev_priv *priv = netdev_priv(skb->dev);
@@ -720,13 +722,14 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
720 struct neighbour *n = NULL; 722 struct neighbour *n = NULL;
721 unsigned long flags; 723 unsigned long flags;
722 724
725 rcu_read_lock();
723 if (likely(skb_dst(skb))) 726 if (likely(skb_dst(skb)))
724 n = dst_get_neighbour(skb_dst(skb)); 727 n = dst_get_neighbour(skb_dst(skb));
725 728
726 if (likely(n)) { 729 if (likely(n)) {
727 if (unlikely(!*to_ipoib_neigh(n))) { 730 if (unlikely(!*to_ipoib_neigh(n))) {
728 ipoib_path_lookup(skb, dev); 731 ipoib_path_lookup(skb, dev);
729 return NETDEV_TX_OK; 732 goto unlock;
730 } 733 }
731 734
732 neigh = *to_ipoib_neigh(n); 735 neigh = *to_ipoib_neigh(n);
@@ -749,17 +752,17 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
749 ipoib_neigh_free(dev, neigh); 752 ipoib_neigh_free(dev, neigh);
750 spin_unlock_irqrestore(&priv->lock, flags); 753 spin_unlock_irqrestore(&priv->lock, flags);
751 ipoib_path_lookup(skb, dev); 754 ipoib_path_lookup(skb, dev);
752 return NETDEV_TX_OK; 755 goto unlock;
753 } 756 }
754 757
755 if (ipoib_cm_get(neigh)) { 758 if (ipoib_cm_get(neigh)) {
756 if (ipoib_cm_up(neigh)) { 759 if (ipoib_cm_up(neigh)) {
757 ipoib_cm_send(dev, skb, ipoib_cm_get(neigh)); 760 ipoib_cm_send(dev, skb, ipoib_cm_get(neigh));
758 return NETDEV_TX_OK; 761 goto unlock;
759 } 762 }
760 } else if (neigh->ah) { 763 } else if (neigh->ah) {
761 ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(n->ha)); 764 ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(n->ha));
762 return NETDEV_TX_OK; 765 goto unlock;
763 } 766 }
764 767
765 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) { 768 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
@@ -793,13 +796,14 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
793 phdr->hwaddr + 4); 796 phdr->hwaddr + 4);
794 dev_kfree_skb_any(skb); 797 dev_kfree_skb_any(skb);
795 ++dev->stats.tx_dropped; 798 ++dev->stats.tx_dropped;
796 return NETDEV_TX_OK; 799 goto unlock;
797 } 800 }
798 801
799 unicast_arp_send(skb, dev, phdr); 802 unicast_arp_send(skb, dev, phdr);
800 } 803 }
801 } 804 }
802 805unlock:
806 rcu_read_unlock();
803 return NETDEV_TX_OK; 807 return NETDEV_TX_OK;
804} 808}
805 809
@@ -837,7 +841,7 @@ static int ipoib_hard_header(struct sk_buff *skb,
837 dst = skb_dst(skb); 841 dst = skb_dst(skb);
838 n = NULL; 842 n = NULL;
839 if (dst) 843 if (dst)
840 n = dst_get_neighbour(dst); 844 n = dst_get_neighbour_raw(dst);
841 if ((!dst || !n) && daddr) { 845 if ((!dst || !n) && daddr) {
842 struct ipoib_pseudoheader *phdr = 846 struct ipoib_pseudoheader *phdr =
843 (struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr); 847 (struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 1b7a9768635..873bff97e69 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -240,8 +240,11 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
240 av.grh.dgid = mcast->mcmember.mgid; 240 av.grh.dgid = mcast->mcmember.mgid;
241 241
242 ah = ipoib_create_ah(dev, priv->pd, &av); 242 ah = ipoib_create_ah(dev, priv->pd, &av);
243 if (!ah) { 243 if (IS_ERR(ah)) {
244 ipoib_warn(priv, "ib_address_create failed\n"); 244 ipoib_warn(priv, "ib_address_create failed %ld\n",
245 -PTR_ERR(ah));
246 /* use original error */
247 return PTR_ERR(ah);
245 } else { 248 } else {
246 spin_lock_irq(&priv->lock); 249 spin_lock_irq(&priv->lock);
247 mcast->ah = ah; 250 mcast->ah = ah;
@@ -266,7 +269,7 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
266 269
267 skb->dev = dev; 270 skb->dev = dev;
268 if (dst) 271 if (dst)
269 n = dst_get_neighbour(dst); 272 n = dst_get_neighbour_raw(dst);
270 if (!dst || !n) { 273 if (!dst || !n) {
271 /* put pseudoheader back on for next time */ 274 /* put pseudoheader back on for next time */
272 skb_push(skb, sizeof (struct ipoib_pseudoheader)); 275 skb_push(skb, sizeof (struct ipoib_pseudoheader));
@@ -722,6 +725,8 @@ out:
722 if (mcast && mcast->ah) { 725 if (mcast && mcast->ah) {
723 struct dst_entry *dst = skb_dst(skb); 726 struct dst_entry *dst = skb_dst(skb);
724 struct neighbour *n = NULL; 727 struct neighbour *n = NULL;
728
729 rcu_read_lock();
725 if (dst) 730 if (dst)
726 n = dst_get_neighbour(dst); 731 n = dst_get_neighbour(dst);
727 if (n && !*to_ipoib_neigh(n)) { 732 if (n && !*to_ipoib_neigh(n)) {
@@ -734,7 +739,7 @@ out:
734 list_add_tail(&neigh->list, &mcast->neigh_list); 739 list_add_tail(&neigh->list, &mcast->neigh_list);
735 } 740 }
736 } 741 }
737 742 rcu_read_unlock();
738 spin_unlock_irqrestore(&priv->lock, flags); 743 spin_unlock_irqrestore(&priv->lock, flags);
739 ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN); 744 ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN);
740 return; 745 return;
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 09b93b11a27..e2a9867c19d 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1210,18 +1210,28 @@ static int elantech_reconnect(struct psmouse *psmouse)
1210 */ 1210 */
1211static int elantech_set_properties(struct elantech_data *etd) 1211static int elantech_set_properties(struct elantech_data *etd)
1212{ 1212{
1213 /* This represents the version of IC body. */
1213 int ver = (etd->fw_version & 0x0f0000) >> 16; 1214 int ver = (etd->fw_version & 0x0f0000) >> 16;
1214 1215
1216 /* Early version of Elan touchpads doesn't obey the rule. */
1215 if (etd->fw_version < 0x020030 || etd->fw_version == 0x020600) 1217 if (etd->fw_version < 0x020030 || etd->fw_version == 0x020600)
1216 etd->hw_version = 1; 1218 etd->hw_version = 1;
1217 else if (etd->fw_version < 0x150600) 1219 else {
1218 etd->hw_version = 2; 1220 switch (ver) {
1219 else if (ver == 5) 1221 case 2:
1220 etd->hw_version = 3; 1222 case 4:
1221 else if (ver == 6) 1223 etd->hw_version = 2;
1222 etd->hw_version = 4; 1224 break;
1223 else 1225 case 5:
1224 return -1; 1226 etd->hw_version = 3;
1227 break;
1228 case 6:
1229 etd->hw_version = 4;
1230 break;
1231 default:
1232 return -1;
1233 }
1234 }
1225 1235
1226 /* 1236 /*
1227 * Turn on packet checking by default. 1237 * Turn on packet checking by default.
diff --git a/drivers/input/serio/ams_delta_serio.c b/drivers/input/serio/ams_delta_serio.c
index 4b2a42f9f0b..d4d08bd9205 100644
--- a/drivers/input/serio/ams_delta_serio.c
+++ b/drivers/input/serio/ams_delta_serio.c
@@ -24,6 +24,7 @@
24#include <linux/irq.h> 24#include <linux/irq.h>
25#include <linux/serio.h> 25#include <linux/serio.h>
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/module.h>
27 28
28#include <asm/mach-types.h> 29#include <asm/mach-types.h>
29#include <plat/board-ams-delta.h> 30#include <plat/board-ams-delta.h>
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index bb9f5d31f0d..b4cfc6c8be8 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -431,6 +431,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
431 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"), 431 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"),
432 }, 432 },
433 }, 433 },
434 {
435 /* Newer HP Pavilion dv4 models */
436 .matches = {
437 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
438 DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"),
439 },
440 },
434 { } 441 { }
435}; 442};
436 443
@@ -560,6 +567,13 @@ static const struct dmi_system_id __initconst i8042_dmi_notimeout_table[] = {
560 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"), 567 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"),
561 }, 568 },
562 }, 569 },
570 {
571 /* Newer HP Pavilion dv4 models */
572 .matches = {
573 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
574 DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"),
575 },
576 },
563 { } 577 { }
564}; 578};
565 579
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 661b692573e..6d5628bb060 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -270,11 +270,8 @@ void led_blink_set(struct led_classdev *led_cdev,
270 del_timer_sync(&led_cdev->blink_timer); 270 del_timer_sync(&led_cdev->blink_timer);
271 271
272 if (led_cdev->blink_set && 272 if (led_cdev->blink_set &&
273 !led_cdev->blink_set(led_cdev, delay_on, delay_off)) { 273 !led_cdev->blink_set(led_cdev, delay_on, delay_off))
274 led_cdev->blink_delay_on = *delay_on;
275 led_cdev->blink_delay_off = *delay_off;
276 return; 274 return;
277 }
278 275
279 /* blink with 1 Hz as default if nothing specified */ 276 /* blink with 1 Hz as default if nothing specified */
280 if (!*delay_on && !*delay_off) 277 if (!*delay_on && !*delay_off)
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index d593878d66d..5664696f2d3 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -472,7 +472,7 @@ config BMP085
472 module will be called bmp085. 472 module will be called bmp085.
473 473
474config PCH_PHUB 474config PCH_PHUB
475 tristate "Intel EG20T PCH / OKI SEMICONDUCTOR IOH(ML7213/ML7223) PHUB" 475 tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) PHUB"
476 depends on PCI 476 depends on PCI
477 help 477 help
478 This driver is for PCH(Platform controller Hub) PHUB(Packet Hub) of 478 This driver is for PCH(Platform controller Hub) PHUB(Packet Hub) of
@@ -480,12 +480,13 @@ config PCH_PHUB
480 processor. The Topcliff has MAC address and Option ROM data in SROM. 480 processor. The Topcliff has MAC address and Option ROM data in SROM.
481 This driver can access MAC address and Option ROM data in SROM. 481 This driver can access MAC address and Option ROM data in SROM.
482 482
483 This driver also can be used for OKI SEMICONDUCTOR IOH(Input/ 483 This driver also can be used for LAPIS Semiconductor's IOH,
484 Output Hub), ML7213 and ML7223. 484 ML7213/ML7223/ML7831.
485 ML7213 IOH is for IVI(In-Vehicle Infotainment) use and ML7223 IOH is 485 ML7213 which is for IVI(In-Vehicle Infotainment) use.
486 for MP(Media Phone) use. 486 ML7223 IOH is for MP(Media Phone) use.
487 ML7213/ML7223 is companion chip for Intel Atom E6xx series. 487 ML7831 IOH is for general purpose use.
488 ML7213/ML7223 is completely compatible for Intel EG20T PCH. 488 ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series.
489 ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH.
489 490
490 To compile this driver as a module, choose M here: the module will 491 To compile this driver as a module, choose M here: the module will
491 be called pch_phub. 492 be called pch_phub.
diff --git a/drivers/misc/ad525x_dpot.h b/drivers/misc/ad525x_dpot.h
index a662f5987b6..82b2cb77ae1 100644
--- a/drivers/misc/ad525x_dpot.h
+++ b/drivers/misc/ad525x_dpot.h
@@ -100,7 +100,7 @@ enum dpot_devid {
100 AD5293_ID = DPOT_CONF(F_RDACS_RW | F_SPI_16BIT, BRDAC0, 10, 27), 100 AD5293_ID = DPOT_CONF(F_RDACS_RW | F_SPI_16BIT, BRDAC0, 10, 27),
101 AD7376_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT, 101 AD7376_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
102 BRDAC0, 7, 28), 102 BRDAC0, 7, 28),
103 AD8400_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT, 103 AD8400_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT,
104 BRDAC0, 8, 29), 104 BRDAC0, 8, 29),
105 AD8402_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT, 105 AD8402_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT,
106 BRDAC0 | BRDAC1, 8, 30), 106 BRDAC0 | BRDAC1, 8, 30),
diff --git a/drivers/misc/carma/carma-fpga-program.c b/drivers/misc/carma/carma-fpga-program.c
index 7ce6065dc20..eb5cd28bc6d 100644
--- a/drivers/misc/carma/carma-fpga-program.c
+++ b/drivers/misc/carma/carma-fpga-program.c
@@ -945,8 +945,7 @@ static int fpga_of_remove(struct platform_device *op)
945/* CTL-CPLD Version Register */ 945/* CTL-CPLD Version Register */
946#define CTL_CPLD_VERSION 0x2000 946#define CTL_CPLD_VERSION 0x2000
947 947
948static int fpga_of_probe(struct platform_device *op, 948static int fpga_of_probe(struct platform_device *op)
949 const struct of_device_id *match)
950{ 949{
951 struct device_node *of_node = op->dev.of_node; 950 struct device_node *of_node = op->dev.of_node;
952 struct device *this_device; 951 struct device *this_device;
@@ -1107,7 +1106,7 @@ static struct of_device_id fpga_of_match[] = {
1107 {}, 1106 {},
1108}; 1107};
1109 1108
1110static struct of_platform_driver fpga_of_driver = { 1109static struct platform_driver fpga_of_driver = {
1111 .probe = fpga_of_probe, 1110 .probe = fpga_of_probe,
1112 .remove = fpga_of_remove, 1111 .remove = fpga_of_remove,
1113 .driver = { 1112 .driver = {
@@ -1124,12 +1123,12 @@ static struct of_platform_driver fpga_of_driver = {
1124static int __init fpga_init(void) 1123static int __init fpga_init(void)
1125{ 1124{
1126 led_trigger_register_simple("fpga", &ledtrig_fpga); 1125 led_trigger_register_simple("fpga", &ledtrig_fpga);
1127 return of_register_platform_driver(&fpga_of_driver); 1126 return platform_driver_register(&fpga_of_driver);
1128} 1127}
1129 1128
1130static void __exit fpga_exit(void) 1129static void __exit fpga_exit(void)
1131{ 1130{
1132 of_unregister_platform_driver(&fpga_of_driver); 1131 platform_driver_unregister(&fpga_of_driver);
1133 led_trigger_unregister_simple(ledtrig_fpga); 1132 led_trigger_unregister_simple(ledtrig_fpga);
1134} 1133}
1135 1134
diff --git a/drivers/misc/carma/carma-fpga.c b/drivers/misc/carma/carma-fpga.c
index 3965821fef1..14e974b2a78 100644
--- a/drivers/misc/carma/carma-fpga.c
+++ b/drivers/misc/carma/carma-fpga.c
@@ -1249,8 +1249,7 @@ static bool dma_filter(struct dma_chan *chan, void *data)
1249 return true; 1249 return true;
1250} 1250}
1251 1251
1252static int data_of_probe(struct platform_device *op, 1252static int data_of_probe(struct platform_device *op)
1253 const struct of_device_id *match)
1254{ 1253{
1255 struct device_node *of_node = op->dev.of_node; 1254 struct device_node *of_node = op->dev.of_node;
1256 struct device *this_device; 1255 struct device *this_device;
@@ -1401,7 +1400,7 @@ static struct of_device_id data_of_match[] = {
1401 {}, 1400 {},
1402}; 1401};
1403 1402
1404static struct of_platform_driver data_of_driver = { 1403static struct platform_driver data_of_driver = {
1405 .probe = data_of_probe, 1404 .probe = data_of_probe,
1406 .remove = data_of_remove, 1405 .remove = data_of_remove,
1407 .driver = { 1406 .driver = {
@@ -1417,12 +1416,12 @@ static struct of_platform_driver data_of_driver = {
1417 1416
1418static int __init data_init(void) 1417static int __init data_init(void)
1419{ 1418{
1420 return of_register_platform_driver(&data_of_driver); 1419 return platform_driver_register(&data_of_driver);
1421} 1420}
1422 1421
1423static void __exit data_exit(void) 1422static void __exit data_exit(void)
1424{ 1423{
1425 of_unregister_platform_driver(&data_of_driver); 1424 platform_driver_unregister(&data_of_driver);
1426} 1425}
1427 1426
1428MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>"); 1427MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>");
diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig
index 26cf12ca7f5..701edf65897 100644
--- a/drivers/misc/eeprom/Kconfig
+++ b/drivers/misc/eeprom/Kconfig
@@ -85,7 +85,7 @@ config EEPROM_93XX46
85 85
86config EEPROM_DIGSY_MTC_CFG 86config EEPROM_DIGSY_MTC_CFG
87 bool "DigsyMTC display configuration EEPROMs device" 87 bool "DigsyMTC display configuration EEPROMs device"
88 depends on PPC_MPC5200_GPIO && GPIOLIB && SPI_GPIO 88 depends on GPIO_MPC5200 && SPI_GPIO
89 help 89 help
90 This option enables access to display configuration EEPROMs 90 This option enables access to display configuration EEPROMs
91 on digsy_mtc board. You have to additionally select Microwire 91 on digsy_mtc board. You have to additionally select Microwire
diff --git a/drivers/misc/pch_phub.c b/drivers/misc/pch_phub.c
index dee33addcae..10fc4785dba 100644
--- a/drivers/misc/pch_phub.c
+++ b/drivers/misc/pch_phub.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD. 2 * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by 5 * it under the terms of the GNU General Public License as published by
@@ -41,10 +41,10 @@
41#define PCH_PHUB_ROM_START_ADDR_EG20T 0x80 /* ROM data area start address offset 41#define PCH_PHUB_ROM_START_ADDR_EG20T 0x80 /* ROM data area start address offset
42 (Intel EG20T PCH)*/ 42 (Intel EG20T PCH)*/
43#define PCH_PHUB_ROM_START_ADDR_ML7213 0x400 /* ROM data area start address 43#define PCH_PHUB_ROM_START_ADDR_ML7213 0x400 /* ROM data area start address
44 offset(OKI SEMICONDUCTOR ML7213) 44 offset(LAPIS Semicon ML7213)
45 */ 45 */
46#define PCH_PHUB_ROM_START_ADDR_ML7223 0x400 /* ROM data area start address 46#define PCH_PHUB_ROM_START_ADDR_ML7223 0x400 /* ROM data area start address
47 offset(OKI SEMICONDUCTOR ML7223) 47 offset(LAPIS Semicon ML7223)
48 */ 48 */
49 49
50/* MAX number of INT_REDUCE_CONTROL registers */ 50/* MAX number of INT_REDUCE_CONTROL registers */
@@ -73,6 +73,9 @@
73#define PCI_DEVICE_ID_ROHM_ML7223_mPHUB 0x8012 /* for Bus-m */ 73#define PCI_DEVICE_ID_ROHM_ML7223_mPHUB 0x8012 /* for Bus-m */
74#define PCI_DEVICE_ID_ROHM_ML7223_nPHUB 0x8002 /* for Bus-n */ 74#define PCI_DEVICE_ID_ROHM_ML7223_nPHUB 0x8002 /* for Bus-n */
75 75
76/* Macros for ML7831 */
77#define PCI_DEVICE_ID_ROHM_ML7831_PHUB 0x8801
78
76/* SROM ACCESS Macro */ 79/* SROM ACCESS Macro */
77#define PCH_WORD_ADDR_MASK (~((1 << 2) - 1)) 80#define PCH_WORD_ADDR_MASK (~((1 << 2) - 1))
78 81
@@ -115,6 +118,7 @@
115 * @pch_mac_start_address: MAC address area start address 118 * @pch_mac_start_address: MAC address area start address
116 * @pch_opt_rom_start_address: Option ROM start address 119 * @pch_opt_rom_start_address: Option ROM start address
117 * @ioh_type: Save IOH type 120 * @ioh_type: Save IOH type
121 * @pdev: pointer to pci device struct
118 */ 122 */
119struct pch_phub_reg { 123struct pch_phub_reg {
120 u32 phub_id_reg; 124 u32 phub_id_reg;
@@ -136,6 +140,7 @@ struct pch_phub_reg {
136 u32 pch_mac_start_address; 140 u32 pch_mac_start_address;
137 u32 pch_opt_rom_start_address; 141 u32 pch_opt_rom_start_address;
138 int ioh_type; 142 int ioh_type;
143 struct pci_dev *pdev;
139}; 144};
140 145
141/* SROM SPEC for MAC address assignment offset */ 146/* SROM SPEC for MAC address assignment offset */
@@ -471,7 +476,7 @@ static int pch_phub_write_gbe_mac_addr(struct pch_phub_reg *chip, u8 *data)
471 int retval; 476 int retval;
472 int i; 477 int i;
473 478
474 if (chip->ioh_type == 1) /* EG20T */ 479 if ((chip->ioh_type == 1) || (chip->ioh_type == 5)) /* EG20T or ML7831*/
475 retval = pch_phub_gbe_serial_rom_conf(chip); 480 retval = pch_phub_gbe_serial_rom_conf(chip);
476 else /* ML7223 */ 481 else /* ML7223 */
477 retval = pch_phub_gbe_serial_rom_conf_mp(chip); 482 retval = pch_phub_gbe_serial_rom_conf_mp(chip);
@@ -498,6 +503,7 @@ static ssize_t pch_phub_bin_read(struct file *filp, struct kobject *kobj,
498 unsigned int orom_size; 503 unsigned int orom_size;
499 int ret; 504 int ret;
500 int err; 505 int err;
506 ssize_t rom_size;
501 507
502 struct pch_phub_reg *chip = 508 struct pch_phub_reg *chip =
503 dev_get_drvdata(container_of(kobj, struct device, kobj)); 509 dev_get_drvdata(container_of(kobj, struct device, kobj));
@@ -509,6 +515,10 @@ static ssize_t pch_phub_bin_read(struct file *filp, struct kobject *kobj,
509 } 515 }
510 516
511 /* Get Rom signature */ 517 /* Get Rom signature */
518 chip->pch_phub_extrom_base_address = pci_map_rom(chip->pdev, &rom_size);
519 if (!chip->pch_phub_extrom_base_address)
520 goto exrom_map_err;
521
512 pch_phub_read_serial_rom(chip, chip->pch_opt_rom_start_address, 522 pch_phub_read_serial_rom(chip, chip->pch_opt_rom_start_address,
513 (unsigned char *)&rom_signature); 523 (unsigned char *)&rom_signature);
514 rom_signature &= 0xff; 524 rom_signature &= 0xff;
@@ -539,10 +549,13 @@ static ssize_t pch_phub_bin_read(struct file *filp, struct kobject *kobj,
539 goto return_err; 549 goto return_err;
540 } 550 }
541return_ok: 551return_ok:
552 pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address);
542 mutex_unlock(&pch_phub_mutex); 553 mutex_unlock(&pch_phub_mutex);
543 return addr_offset; 554 return addr_offset;
544 555
545return_err: 556return_err:
557 pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address);
558exrom_map_err:
546 mutex_unlock(&pch_phub_mutex); 559 mutex_unlock(&pch_phub_mutex);
547return_err_nomutex: 560return_err_nomutex:
548 return err; 561 return err;
@@ -555,6 +568,7 @@ static ssize_t pch_phub_bin_write(struct file *filp, struct kobject *kobj,
555 int err; 568 int err;
556 unsigned int addr_offset; 569 unsigned int addr_offset;
557 int ret; 570 int ret;
571 ssize_t rom_size;
558 struct pch_phub_reg *chip = 572 struct pch_phub_reg *chip =
559 dev_get_drvdata(container_of(kobj, struct device, kobj)); 573 dev_get_drvdata(container_of(kobj, struct device, kobj));
560 574
@@ -571,6 +585,12 @@ static ssize_t pch_phub_bin_write(struct file *filp, struct kobject *kobj,
571 goto return_ok; 585 goto return_ok;
572 } 586 }
573 587
588 chip->pch_phub_extrom_base_address = pci_map_rom(chip->pdev, &rom_size);
589 if (!chip->pch_phub_extrom_base_address) {
590 err = -ENOMEM;
591 goto exrom_map_err;
592 }
593
574 for (addr_offset = 0; addr_offset < count; addr_offset++) { 594 for (addr_offset = 0; addr_offset < count; addr_offset++) {
575 if (PCH_PHUB_OROM_SIZE < off + addr_offset) 595 if (PCH_PHUB_OROM_SIZE < off + addr_offset)
576 goto return_ok; 596 goto return_ok;
@@ -585,10 +605,14 @@ static ssize_t pch_phub_bin_write(struct file *filp, struct kobject *kobj,
585 } 605 }
586 606
587return_ok: 607return_ok:
608 pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address);
588 mutex_unlock(&pch_phub_mutex); 609 mutex_unlock(&pch_phub_mutex);
589 return addr_offset; 610 return addr_offset;
590 611
591return_err: 612return_err:
613 pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address);
614
615exrom_map_err:
592 mutex_unlock(&pch_phub_mutex); 616 mutex_unlock(&pch_phub_mutex);
593 return err; 617 return err;
594} 618}
@@ -598,8 +622,14 @@ static ssize_t show_pch_mac(struct device *dev, struct device_attribute *attr,
598{ 622{
599 u8 mac[8]; 623 u8 mac[8];
600 struct pch_phub_reg *chip = dev_get_drvdata(dev); 624 struct pch_phub_reg *chip = dev_get_drvdata(dev);
625 ssize_t rom_size;
626
627 chip->pch_phub_extrom_base_address = pci_map_rom(chip->pdev, &rom_size);
628 if (!chip->pch_phub_extrom_base_address)
629 return -ENOMEM;
601 630
602 pch_phub_read_gbe_mac_addr(chip, mac); 631 pch_phub_read_gbe_mac_addr(chip, mac);
632 pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address);
603 633
604 return sprintf(buf, "%pM\n", mac); 634 return sprintf(buf, "%pM\n", mac);
605} 635}
@@ -608,6 +638,7 @@ static ssize_t store_pch_mac(struct device *dev, struct device_attribute *attr,
608 const char *buf, size_t count) 638 const char *buf, size_t count)
609{ 639{
610 u8 mac[6]; 640 u8 mac[6];
641 ssize_t rom_size;
611 struct pch_phub_reg *chip = dev_get_drvdata(dev); 642 struct pch_phub_reg *chip = dev_get_drvdata(dev);
612 643
613 if (count != 18) 644 if (count != 18)
@@ -617,7 +648,12 @@ static ssize_t store_pch_mac(struct device *dev, struct device_attribute *attr,
617 (u32 *)&mac[0], (u32 *)&mac[1], (u32 *)&mac[2], (u32 *)&mac[3], 648 (u32 *)&mac[0], (u32 *)&mac[1], (u32 *)&mac[2], (u32 *)&mac[3],
618 (u32 *)&mac[4], (u32 *)&mac[5]); 649 (u32 *)&mac[4], (u32 *)&mac[5]);
619 650
651 chip->pch_phub_extrom_base_address = pci_map_rom(chip->pdev, &rom_size);
652 if (!chip->pch_phub_extrom_base_address)
653 return -ENOMEM;
654
620 pch_phub_write_gbe_mac_addr(chip, mac); 655 pch_phub_write_gbe_mac_addr(chip, mac);
656 pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address);
621 657
622 return count; 658 return count;
623} 659}
@@ -640,7 +676,6 @@ static int __devinit pch_phub_probe(struct pci_dev *pdev,
640 int retval; 676 int retval;
641 677
642 int ret; 678 int ret;
643 ssize_t rom_size;
644 struct pch_phub_reg *chip; 679 struct pch_phub_reg *chip;
645 680
646 chip = kzalloc(sizeof(struct pch_phub_reg), GFP_KERNEL); 681 chip = kzalloc(sizeof(struct pch_phub_reg), GFP_KERNEL);
@@ -677,19 +712,7 @@ static int __devinit pch_phub_probe(struct pci_dev *pdev,
677 "in pch_phub_base_address variable is %p\n", __func__, 712 "in pch_phub_base_address variable is %p\n", __func__,
678 chip->pch_phub_base_address); 713 chip->pch_phub_base_address);
679 714
680 if (id->driver_data != 3) { 715 chip->pdev = pdev; /* Save pci device struct */
681 chip->pch_phub_extrom_base_address =\
682 pci_map_rom(pdev, &rom_size);
683 if (chip->pch_phub_extrom_base_address == 0) {
684 dev_err(&pdev->dev, "%s: pci_map_rom FAILED", __func__);
685 ret = -ENOMEM;
686 goto err_pci_map;
687 }
688 dev_dbg(&pdev->dev, "%s : "
689 "pci_map_rom SUCCESS and value in "
690 "pch_phub_extrom_base_address variable is %p\n",
691 __func__, chip->pch_phub_extrom_base_address);
692 }
693 716
694 if (id->driver_data == 1) { /* EG20T PCH */ 717 if (id->driver_data == 1) { /* EG20T PCH */
695 const char *board_name; 718 const char *board_name;
@@ -763,6 +786,22 @@ static int __devinit pch_phub_probe(struct pci_dev *pdev,
763 chip->pch_opt_rom_start_address =\ 786 chip->pch_opt_rom_start_address =\
764 PCH_PHUB_ROM_START_ADDR_ML7223; 787 PCH_PHUB_ROM_START_ADDR_ML7223;
765 chip->pch_mac_start_address = PCH_PHUB_MAC_START_ADDR_ML7223; 788 chip->pch_mac_start_address = PCH_PHUB_MAC_START_ADDR_ML7223;
789 } else if (id->driver_data == 5) { /* ML7831 */
790 retval = sysfs_create_file(&pdev->dev.kobj,
791 &dev_attr_pch_mac.attr);
792 if (retval)
793 goto err_sysfs_create;
794
795 retval = sysfs_create_bin_file(&pdev->dev.kobj, &pch_bin_attr);
796 if (retval)
797 goto exit_bin_attr;
798
799 /* set the prefech value */
800 iowrite32(0x000affaa, chip->pch_phub_base_address + 0x14);
801 /* set the interrupt delay value */
802 iowrite32(0x25, chip->pch_phub_base_address + 0x44);
803 chip->pch_opt_rom_start_address = PCH_PHUB_ROM_START_ADDR_EG20T;
804 chip->pch_mac_start_address = PCH_PHUB_MAC_START_ADDR_EG20T;
766 } 805 }
767 806
768 chip->ioh_type = id->driver_data; 807 chip->ioh_type = id->driver_data;
@@ -773,8 +812,6 @@ exit_bin_attr:
773 sysfs_remove_file(&pdev->dev.kobj, &dev_attr_pch_mac.attr); 812 sysfs_remove_file(&pdev->dev.kobj, &dev_attr_pch_mac.attr);
774 813
775err_sysfs_create: 814err_sysfs_create:
776 pci_unmap_rom(pdev, chip->pch_phub_extrom_base_address);
777err_pci_map:
778 pci_iounmap(pdev, chip->pch_phub_base_address); 815 pci_iounmap(pdev, chip->pch_phub_base_address);
779err_pci_iomap: 816err_pci_iomap:
780 pci_release_regions(pdev); 817 pci_release_regions(pdev);
@@ -792,7 +829,6 @@ static void __devexit pch_phub_remove(struct pci_dev *pdev)
792 829
793 sysfs_remove_file(&pdev->dev.kobj, &dev_attr_pch_mac.attr); 830 sysfs_remove_file(&pdev->dev.kobj, &dev_attr_pch_mac.attr);
794 sysfs_remove_bin_file(&pdev->dev.kobj, &pch_bin_attr); 831 sysfs_remove_bin_file(&pdev->dev.kobj, &pch_bin_attr);
795 pci_unmap_rom(pdev, chip->pch_phub_extrom_base_address);
796 pci_iounmap(pdev, chip->pch_phub_base_address); 832 pci_iounmap(pdev, chip->pch_phub_base_address);
797 pci_release_regions(pdev); 833 pci_release_regions(pdev);
798 pci_disable_device(pdev); 834 pci_disable_device(pdev);
@@ -847,6 +883,7 @@ static struct pci_device_id pch_phub_pcidev_id[] = {
847 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7213_PHUB), 2, }, 883 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7213_PHUB), 2, },
848 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7223_mPHUB), 3, }, 884 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7223_mPHUB), 3, },
849 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7223_nPHUB), 4, }, 885 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7223_nPHUB), 4, },
886 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7831_PHUB), 5, },
850 { } 887 { }
851}; 888};
852MODULE_DEVICE_TABLE(pci, pch_phub_pcidev_id); 889MODULE_DEVICE_TABLE(pci, pch_phub_pcidev_id);
@@ -873,5 +910,5 @@ static void __exit pch_phub_pci_exit(void)
873module_init(pch_phub_pci_init); 910module_init(pch_phub_pci_init);
874module_exit(pch_phub_pci_exit); 911module_exit(pch_phub_pci_exit);
875 912
876MODULE_DESCRIPTION("Intel EG20T PCH/OKI SEMICONDUCTOR IOH(ML7213/ML7223) PHUB"); 913MODULE_DESCRIPTION("Intel EG20T PCH/LAPIS Semiconductor IOH(ML7213/ML7223) PHUB");
877MODULE_LICENSE("GPL"); 914MODULE_LICENSE("GPL");
diff --git a/drivers/misc/spear13xx_pcie_gadget.c b/drivers/misc/spear13xx_pcie_gadget.c
index cfbddbef11d..43d073bc1d9 100644
--- a/drivers/misc/spear13xx_pcie_gadget.c
+++ b/drivers/misc/spear13xx_pcie_gadget.c
@@ -903,6 +903,6 @@ static void __exit spear_pcie_gadget_exit(void)
903} 903}
904module_exit(spear_pcie_gadget_exit); 904module_exit(spear_pcie_gadget_exit);
905 905
906MODULE_ALIAS("pcie-gadget-spear"); 906MODULE_ALIAS("platform:pcie-gadget-spear");
907MODULE_AUTHOR("Pratyush Anand"); 907MODULE_AUTHOR("Pratyush Anand");
908MODULE_LICENSE("GPL"); 908MODULE_LICENSE("GPL");
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 583f66cd5bb..654a5e94e0e 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -245,6 +245,8 @@ source "drivers/net/ethernet/Kconfig"
245 245
246source "drivers/net/fddi/Kconfig" 246source "drivers/net/fddi/Kconfig"
247 247
248source "drivers/net/hippi/Kconfig"
249
248config NET_SB1000 250config NET_SB1000
249 tristate "General Instruments Surfboard 1000" 251 tristate "General Instruments Surfboard 1000"
250 depends on PNP 252 depends on PNP
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 5a20804fdec..4ef7e2fd9fe 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -319,6 +319,13 @@ static ssize_t bonding_store_mode(struct device *d,
319 goto out; 319 goto out;
320 } 320 }
321 321
322 if (bond->slave_cnt > 0) {
323 pr_err("unable to update mode of %s because it has slaves.\n",
324 bond->dev->name);
325 ret = -EPERM;
326 goto out;
327 }
328
322 new_value = bond_parse_parm(buf, bond_mode_tbl); 329 new_value = bond_parse_parm(buf, bond_mode_tbl);
323 if (new_value < 0) { 330 if (new_value < 0) {
324 pr_err("%s: Ignoring invalid mode value %.*s.\n", 331 pr_err("%s: Ignoring invalid mode value %.*s.\n",
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 6486ab8c8fc..2f6361e949f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -10548,33 +10548,38 @@ do { \
10548 10548
10549int bnx2x_init_firmware(struct bnx2x *bp) 10549int bnx2x_init_firmware(struct bnx2x *bp)
10550{ 10550{
10551 const char *fw_file_name;
10552 struct bnx2x_fw_file_hdr *fw_hdr; 10551 struct bnx2x_fw_file_hdr *fw_hdr;
10553 int rc; 10552 int rc;
10554 10553
10555 if (CHIP_IS_E1(bp))
10556 fw_file_name = FW_FILE_NAME_E1;
10557 else if (CHIP_IS_E1H(bp))
10558 fw_file_name = FW_FILE_NAME_E1H;
10559 else if (!CHIP_IS_E1x(bp))
10560 fw_file_name = FW_FILE_NAME_E2;
10561 else {
10562 BNX2X_ERR("Unsupported chip revision\n");
10563 return -EINVAL;
10564 }
10565 10554
10566 BNX2X_DEV_INFO("Loading %s\n", fw_file_name); 10555 if (!bp->firmware) {
10556 const char *fw_file_name;
10567 10557
10568 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev); 10558 if (CHIP_IS_E1(bp))
10569 if (rc) { 10559 fw_file_name = FW_FILE_NAME_E1;
10570 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name); 10560 else if (CHIP_IS_E1H(bp))
10571 goto request_firmware_exit; 10561 fw_file_name = FW_FILE_NAME_E1H;
10572 } 10562 else if (!CHIP_IS_E1x(bp))
10563 fw_file_name = FW_FILE_NAME_E2;
10564 else {
10565 BNX2X_ERR("Unsupported chip revision\n");
10566 return -EINVAL;
10567 }
10568 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
10573 10569
10574 rc = bnx2x_check_firmware(bp); 10570 rc = request_firmware(&bp->firmware, fw_file_name,
10575 if (rc) { 10571 &bp->pdev->dev);
10576 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name); 10572 if (rc) {
10577 goto request_firmware_exit; 10573 BNX2X_ERR("Can't load firmware file %s\n",
10574 fw_file_name);
10575 goto request_firmware_exit;
10576 }
10577
10578 rc = bnx2x_check_firmware(bp);
10579 if (rc) {
10580 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
10581 goto request_firmware_exit;
10582 }
10578 } 10583 }
10579 10584
10580 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data; 10585 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
@@ -10630,6 +10635,7 @@ static void bnx2x_release_firmware(struct bnx2x *bp)
10630 kfree(bp->init_ops); 10635 kfree(bp->init_ops);
10631 kfree(bp->init_data); 10636 kfree(bp->init_data);
10632 release_firmware(bp->firmware); 10637 release_firmware(bp->firmware);
10638 bp->firmware = NULL;
10633} 10639}
10634 10640
10635 10641
@@ -10925,6 +10931,8 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10925 if (bp->doorbells) 10931 if (bp->doorbells)
10926 iounmap(bp->doorbells); 10932 iounmap(bp->doorbells);
10927 10933
10934 bnx2x_release_firmware(bp);
10935
10928 bnx2x_free_mem_bp(bp); 10936 bnx2x_free_mem_bp(bp);
10929 10937
10930 free_netdev(dev); 10938 free_netdev(dev);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 0440425c83d..14517691f8d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -5380,7 +5380,7 @@ static int bnx2x_func_hw_init(struct bnx2x *bp,
5380 rc = drv->init_fw(bp); 5380 rc = drv->init_fw(bp);
5381 if (rc) { 5381 if (rc) {
5382 BNX2X_ERR("Error loading firmware\n"); 5382 BNX2X_ERR("Error loading firmware\n");
5383 goto fw_init_err; 5383 goto init_err;
5384 } 5384 }
5385 5385
5386 /* Handle the beginning of COMMON_XXX pases separatelly... */ 5386 /* Handle the beginning of COMMON_XXX pases separatelly... */
@@ -5388,25 +5388,25 @@ static int bnx2x_func_hw_init(struct bnx2x *bp,
5388 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: 5388 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
5389 rc = bnx2x_func_init_cmn_chip(bp, drv); 5389 rc = bnx2x_func_init_cmn_chip(bp, drv);
5390 if (rc) 5390 if (rc)
5391 goto init_hw_err; 5391 goto init_err;
5392 5392
5393 break; 5393 break;
5394 case FW_MSG_CODE_DRV_LOAD_COMMON: 5394 case FW_MSG_CODE_DRV_LOAD_COMMON:
5395 rc = bnx2x_func_init_cmn(bp, drv); 5395 rc = bnx2x_func_init_cmn(bp, drv);
5396 if (rc) 5396 if (rc)
5397 goto init_hw_err; 5397 goto init_err;
5398 5398
5399 break; 5399 break;
5400 case FW_MSG_CODE_DRV_LOAD_PORT: 5400 case FW_MSG_CODE_DRV_LOAD_PORT:
5401 rc = bnx2x_func_init_port(bp, drv); 5401 rc = bnx2x_func_init_port(bp, drv);
5402 if (rc) 5402 if (rc)
5403 goto init_hw_err; 5403 goto init_err;
5404 5404
5405 break; 5405 break;
5406 case FW_MSG_CODE_DRV_LOAD_FUNCTION: 5406 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5407 rc = bnx2x_func_init_func(bp, drv); 5407 rc = bnx2x_func_init_func(bp, drv);
5408 if (rc) 5408 if (rc)
5409 goto init_hw_err; 5409 goto init_err;
5410 5410
5411 break; 5411 break;
5412 default: 5412 default:
@@ -5414,10 +5414,7 @@ static int bnx2x_func_hw_init(struct bnx2x *bp,
5414 rc = -EINVAL; 5414 rc = -EINVAL;
5415 } 5415 }
5416 5416
5417init_hw_err: 5417init_err:
5418 drv->release_fw(bp);
5419
5420fw_init_err:
5421 drv->gunzip_end(bp); 5418 drv->gunzip_end(bp);
5422 5419
5423 /* In case of success, complete the comand immediatelly: no ramrods 5420 /* In case of success, complete the comand immediatelly: no ramrods
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
index 98849a1fc74..b48378a41e4 100644
--- a/drivers/net/ethernet/cadence/Kconfig
+++ b/drivers/net/ethernet/cadence/Kconfig
@@ -7,6 +7,7 @@ config HAVE_NET_MACB
7 7
8config NET_ATMEL 8config NET_ATMEL
9 bool "Atmel devices" 9 bool "Atmel devices"
10 default y
10 depends on HAVE_NET_MACB || (ARM && ARCH_AT91RM9200) 11 depends on HAVE_NET_MACB || (ARM && ARCH_AT91RM9200)
11 ---help--- 12 ---help---
12 If you have a network (Ethernet) card belonging to this class, say Y. 13 If you have a network (Ethernet) card belonging to this class, say Y.
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 6bb2b9506ca..0b3567ab812 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -34,6 +34,8 @@
34#include <linux/init.h> 34#include <linux/init.h>
35#include <linux/delay.h> 35#include <linux/delay.h>
36#include <linux/io.h> 36#include <linux/io.h>
37#include <linux/dma-mapping.h>
38#include <linux/module.h>
37 39
38#include <asm/checksum.h> 40#include <asm/checksum.h>
39 41
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index fdc6c394c68..7803efa46eb 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -50,7 +50,7 @@
50#include "sky2.h" 50#include "sky2.h"
51 51
52#define DRV_NAME "sky2" 52#define DRV_NAME "sky2"
53#define DRV_VERSION "1.29" 53#define DRV_VERSION "1.30"
54 54
55/* 55/*
56 * The Yukon II chipset takes 64 bit command blocks (called list elements) 56 * The Yukon II chipset takes 64 bit command blocks (called list elements)
@@ -68,7 +68,7 @@
68#define MAX_SKB_TX_LE (2 + (sizeof(dma_addr_t)/sizeof(u32))*(MAX_SKB_FRAGS+1)) 68#define MAX_SKB_TX_LE (2 + (sizeof(dma_addr_t)/sizeof(u32))*(MAX_SKB_FRAGS+1))
69#define TX_MIN_PENDING (MAX_SKB_TX_LE+1) 69#define TX_MIN_PENDING (MAX_SKB_TX_LE+1)
70#define TX_MAX_PENDING 1024 70#define TX_MAX_PENDING 1024
71#define TX_DEF_PENDING 127 71#define TX_DEF_PENDING 63
72 72
73#define TX_WATCHDOG (5 * HZ) 73#define TX_WATCHDOG (5 * HZ)
74#define NAPI_WEIGHT 64 74#define NAPI_WEIGHT 64
@@ -869,6 +869,7 @@ static void sky2_wol_init(struct sky2_port *sky2)
869 869
870 /* block receiver */ 870 /* block receiver */
871 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); 871 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
872 sky2_read32(hw, B0_CTST);
872} 873}
873 874
874static void sky2_set_tx_stfwd(struct sky2_hw *hw, unsigned port) 875static void sky2_set_tx_stfwd(struct sky2_hw *hw, unsigned port)
@@ -1274,6 +1275,14 @@ static void rx_set_checksum(struct sky2_port *sky2)
1274 ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM); 1275 ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
1275} 1276}
1276 1277
1278/*
1279 * Fixed initial key as seed to RSS.
1280 */
1281static const uint32_t rss_init_key[10] = {
1282 0x7c3351da, 0x51c5cf4e, 0x44adbdd1, 0xe8d38d18, 0x48897c43,
1283 0xb1d60e7e, 0x6a3dd760, 0x01a2e453, 0x16f46f13, 0x1a0e7b30
1284};
1285
1277/* Enable/disable receive hash calculation (RSS) */ 1286/* Enable/disable receive hash calculation (RSS) */
1278static void rx_set_rss(struct net_device *dev, u32 features) 1287static void rx_set_rss(struct net_device *dev, u32 features)
1279{ 1288{
@@ -1289,12 +1298,9 @@ static void rx_set_rss(struct net_device *dev, u32 features)
1289 1298
1290 /* Program RSS initial values */ 1299 /* Program RSS initial values */
1291 if (features & NETIF_F_RXHASH) { 1300 if (features & NETIF_F_RXHASH) {
1292 u32 key[nkeys];
1293
1294 get_random_bytes(key, nkeys * sizeof(u32));
1295 for (i = 0; i < nkeys; i++) 1301 for (i = 0; i < nkeys; i++)
1296 sky2_write32(hw, SK_REG(sky2->port, RSS_KEY + i * 4), 1302 sky2_write32(hw, SK_REG(sky2->port, RSS_KEY + i * 4),
1297 key[i]); 1303 rss_init_key[i]);
1298 1304
1299 /* Need to turn on (undocumented) flag to make hashing work */ 1305 /* Need to turn on (undocumented) flag to make hashing work */
1300 sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), 1306 sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T),
@@ -1717,6 +1723,8 @@ static int sky2_setup_irq(struct sky2_hw *hw, const char *name)
1717 if (err) 1723 if (err)
1718 dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq); 1724 dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq);
1719 else { 1725 else {
1726 hw->flags |= SKY2_HW_IRQ_SETUP;
1727
1720 napi_enable(&hw->napi); 1728 napi_enable(&hw->napi);
1721 sky2_write32(hw, B0_IMSK, Y2_IS_BASE); 1729 sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
1722 sky2_read32(hw, B0_IMSK); 1730 sky2_read32(hw, B0_IMSK);
@@ -1727,7 +1735,7 @@ static int sky2_setup_irq(struct sky2_hw *hw, const char *name)
1727 1735
1728 1736
1729/* Bring up network interface. */ 1737/* Bring up network interface. */
1730static int sky2_up(struct net_device *dev) 1738static int sky2_open(struct net_device *dev)
1731{ 1739{
1732 struct sky2_port *sky2 = netdev_priv(dev); 1740 struct sky2_port *sky2 = netdev_priv(dev);
1733 struct sky2_hw *hw = sky2->hw; 1741 struct sky2_hw *hw = sky2->hw;
@@ -1747,6 +1755,11 @@ static int sky2_up(struct net_device *dev)
1747 1755
1748 sky2_hw_up(sky2); 1756 sky2_hw_up(sky2);
1749 1757
1758 if (hw->chip_id == CHIP_ID_YUKON_OPT ||
1759 hw->chip_id == CHIP_ID_YUKON_PRM ||
1760 hw->chip_id == CHIP_ID_YUKON_OP_2)
1761 imask |= Y2_IS_PHY_QLNK; /* enable PHY Quick Link */
1762
1750 /* Enable interrupts from phy/mac for port */ 1763 /* Enable interrupts from phy/mac for port */
1751 imask = sky2_read32(hw, B0_IMSK); 1764 imask = sky2_read32(hw, B0_IMSK);
1752 imask |= portirq_msk[port]; 1765 imask |= portirq_msk[port];
@@ -2040,6 +2053,8 @@ static void sky2_tx_reset(struct sky2_hw *hw, unsigned port)
2040 2053
2041 sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET); 2054 sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET);
2042 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET); 2055 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET);
2056
2057 sky2_read32(hw, B0_CTST);
2043} 2058}
2044 2059
2045static void sky2_hw_down(struct sky2_port *sky2) 2060static void sky2_hw_down(struct sky2_port *sky2)
@@ -2090,7 +2105,7 @@ static void sky2_hw_down(struct sky2_port *sky2)
2090} 2105}
2091 2106
2092/* Network shutdown */ 2107/* Network shutdown */
2093static int sky2_down(struct net_device *dev) 2108static int sky2_close(struct net_device *dev)
2094{ 2109{
2095 struct sky2_port *sky2 = netdev_priv(dev); 2110 struct sky2_port *sky2 = netdev_priv(dev);
2096 struct sky2_hw *hw = sky2->hw; 2111 struct sky2_hw *hw = sky2->hw;
@@ -2101,15 +2116,22 @@ static int sky2_down(struct net_device *dev)
2101 2116
2102 netif_info(sky2, ifdown, dev, "disabling interface\n"); 2117 netif_info(sky2, ifdown, dev, "disabling interface\n");
2103 2118
2104 /* Disable port IRQ */
2105 sky2_write32(hw, B0_IMSK,
2106 sky2_read32(hw, B0_IMSK) & ~portirq_msk[sky2->port]);
2107 sky2_read32(hw, B0_IMSK);
2108
2109 if (hw->ports == 1) { 2119 if (hw->ports == 1) {
2120 sky2_write32(hw, B0_IMSK, 0);
2121 sky2_read32(hw, B0_IMSK);
2122
2110 napi_disable(&hw->napi); 2123 napi_disable(&hw->napi);
2111 free_irq(hw->pdev->irq, hw); 2124 free_irq(hw->pdev->irq, hw);
2125 hw->flags &= ~SKY2_HW_IRQ_SETUP;
2112 } else { 2126 } else {
2127 u32 imask;
2128
2129 /* Disable port IRQ */
2130 imask = sky2_read32(hw, B0_IMSK);
2131 imask &= ~portirq_msk[sky2->port];
2132 sky2_write32(hw, B0_IMSK, imask);
2133 sky2_read32(hw, B0_IMSK);
2134
2113 synchronize_irq(hw->pdev->irq); 2135 synchronize_irq(hw->pdev->irq);
2114 napi_synchronize(&hw->napi); 2136 napi_synchronize(&hw->napi);
2115 } 2137 }
@@ -2587,7 +2609,7 @@ static inline void sky2_tx_done(struct net_device *dev, u16 last)
2587 if (netif_running(dev)) { 2609 if (netif_running(dev)) {
2588 sky2_tx_complete(sky2, last); 2610 sky2_tx_complete(sky2, last);
2589 2611
2590 /* Wake unless it's detached, and called e.g. from sky2_down() */ 2612 /* Wake unless it's detached, and called e.g. from sky2_close() */
2591 if (tx_avail(sky2) > MAX_SKB_TX_LE + 4) 2613 if (tx_avail(sky2) > MAX_SKB_TX_LE + 4)
2592 netif_wake_queue(dev); 2614 netif_wake_queue(dev);
2593 } 2615 }
@@ -3258,7 +3280,6 @@ static void sky2_reset(struct sky2_hw *hw)
3258 hw->chip_id == CHIP_ID_YUKON_PRM || 3280 hw->chip_id == CHIP_ID_YUKON_PRM ||
3259 hw->chip_id == CHIP_ID_YUKON_OP_2) { 3281 hw->chip_id == CHIP_ID_YUKON_OP_2) {
3260 u16 reg; 3282 u16 reg;
3261 u32 msk;
3262 3283
3263 if (hw->chip_id == CHIP_ID_YUKON_OPT && hw->chip_rev == 0) { 3284 if (hw->chip_id == CHIP_ID_YUKON_OPT && hw->chip_rev == 0) {
3264 /* disable PCI-E PHY power down (set PHY reg 0x80, bit 7 */ 3285 /* disable PCI-E PHY power down (set PHY reg 0x80, bit 7 */
@@ -3281,11 +3302,6 @@ static void sky2_reset(struct sky2_hw *hw)
3281 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 3302 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3282 sky2_pci_write16(hw, PSM_CONFIG_REG4, reg); 3303 sky2_pci_write16(hw, PSM_CONFIG_REG4, reg);
3283 3304
3284 /* enable PHY Quick Link */
3285 msk = sky2_read32(hw, B0_IMSK);
3286 msk |= Y2_IS_PHY_QLNK;
3287 sky2_write32(hw, B0_IMSK, msk);
3288
3289 /* check if PSMv2 was running before */ 3305 /* check if PSMv2 was running before */
3290 reg = sky2_pci_read16(hw, PSM_CONFIG_REG3); 3306 reg = sky2_pci_read16(hw, PSM_CONFIG_REG3);
3291 if (reg & PCI_EXP_LNKCTL_ASPMC) 3307 if (reg & PCI_EXP_LNKCTL_ASPMC)
@@ -3383,7 +3399,7 @@ static void sky2_detach(struct net_device *dev)
3383 netif_tx_lock(dev); 3399 netif_tx_lock(dev);
3384 netif_device_detach(dev); /* stop txq */ 3400 netif_device_detach(dev); /* stop txq */
3385 netif_tx_unlock(dev); 3401 netif_tx_unlock(dev);
3386 sky2_down(dev); 3402 sky2_close(dev);
3387 } 3403 }
3388} 3404}
3389 3405
@@ -3393,7 +3409,7 @@ static int sky2_reattach(struct net_device *dev)
3393 int err = 0; 3409 int err = 0;
3394 3410
3395 if (netif_running(dev)) { 3411 if (netif_running(dev)) {
3396 err = sky2_up(dev); 3412 err = sky2_open(dev);
3397 if (err) { 3413 if (err) {
3398 netdev_info(dev, "could not restart %d\n", err); 3414 netdev_info(dev, "could not restart %d\n", err);
3399 dev_close(dev); 3415 dev_close(dev);
@@ -3410,10 +3426,13 @@ static void sky2_all_down(struct sky2_hw *hw)
3410{ 3426{
3411 int i; 3427 int i;
3412 3428
3413 sky2_read32(hw, B0_IMSK); 3429 if (hw->flags & SKY2_HW_IRQ_SETUP) {
3414 sky2_write32(hw, B0_IMSK, 0); 3430 sky2_read32(hw, B0_IMSK);
3415 synchronize_irq(hw->pdev->irq); 3431 sky2_write32(hw, B0_IMSK, 0);
3416 napi_disable(&hw->napi); 3432
3433 synchronize_irq(hw->pdev->irq);
3434 napi_disable(&hw->napi);
3435 }
3417 3436
3418 for (i = 0; i < hw->ports; i++) { 3437 for (i = 0; i < hw->ports; i++) {
3419 struct net_device *dev = hw->dev[i]; 3438 struct net_device *dev = hw->dev[i];
@@ -3446,11 +3465,12 @@ static void sky2_all_up(struct sky2_hw *hw)
3446 netif_wake_queue(dev); 3465 netif_wake_queue(dev);
3447 } 3466 }
3448 3467
3449 sky2_write32(hw, B0_IMSK, imask); 3468 if (hw->flags & SKY2_HW_IRQ_SETUP) {
3450 sky2_read32(hw, B0_IMSK); 3469 sky2_write32(hw, B0_IMSK, imask);
3451 3470 sky2_read32(hw, B0_IMSK);
3452 sky2_read32(hw, B0_Y2_SP_LISR); 3471 sky2_read32(hw, B0_Y2_SP_LISR);
3453 napi_enable(&hw->napi); 3472 napi_enable(&hw->napi);
3473 }
3454} 3474}
3455 3475
3456static void sky2_restart(struct work_struct *work) 3476static void sky2_restart(struct work_struct *work)
@@ -4071,6 +4091,16 @@ static int sky2_set_coalesce(struct net_device *dev,
4071 return 0; 4091 return 0;
4072} 4092}
4073 4093
4094/*
4095 * Hardware is limited to min of 128 and max of 2048 for ring size
4096 * and rounded up to next power of two
4097 * to avoid division in modulus calclation
4098 */
4099static unsigned long roundup_ring_size(unsigned long pending)
4100{
4101 return max(128ul, roundup_pow_of_two(pending+1));
4102}
4103
4074static void sky2_get_ringparam(struct net_device *dev, 4104static void sky2_get_ringparam(struct net_device *dev,
4075 struct ethtool_ringparam *ering) 4105 struct ethtool_ringparam *ering)
4076{ 4106{
@@ -4098,7 +4128,7 @@ static int sky2_set_ringparam(struct net_device *dev,
4098 4128
4099 sky2->rx_pending = ering->rx_pending; 4129 sky2->rx_pending = ering->rx_pending;
4100 sky2->tx_pending = ering->tx_pending; 4130 sky2->tx_pending = ering->tx_pending;
4101 sky2->tx_ring_size = roundup_pow_of_two(sky2->tx_pending+1); 4131 sky2->tx_ring_size = roundup_ring_size(sky2->tx_pending);
4102 4132
4103 return sky2_reattach(dev); 4133 return sky2_reattach(dev);
4104} 4134}
@@ -4556,7 +4586,7 @@ static int sky2_device_event(struct notifier_block *unused,
4556 struct net_device *dev = ptr; 4586 struct net_device *dev = ptr;
4557 struct sky2_port *sky2 = netdev_priv(dev); 4587 struct sky2_port *sky2 = netdev_priv(dev);
4558 4588
4559 if (dev->netdev_ops->ndo_open != sky2_up || !sky2_debug) 4589 if (dev->netdev_ops->ndo_open != sky2_open || !sky2_debug)
4560 return NOTIFY_DONE; 4590 return NOTIFY_DONE;
4561 4591
4562 switch (event) { 4592 switch (event) {
@@ -4621,8 +4651,8 @@ static __exit void sky2_debug_cleanup(void)
4621 not allowing netpoll on second port */ 4651 not allowing netpoll on second port */
4622static const struct net_device_ops sky2_netdev_ops[2] = { 4652static const struct net_device_ops sky2_netdev_ops[2] = {
4623 { 4653 {
4624 .ndo_open = sky2_up, 4654 .ndo_open = sky2_open,
4625 .ndo_stop = sky2_down, 4655 .ndo_stop = sky2_close,
4626 .ndo_start_xmit = sky2_xmit_frame, 4656 .ndo_start_xmit = sky2_xmit_frame,
4627 .ndo_do_ioctl = sky2_ioctl, 4657 .ndo_do_ioctl = sky2_ioctl,
4628 .ndo_validate_addr = eth_validate_addr, 4658 .ndo_validate_addr = eth_validate_addr,
@@ -4638,8 +4668,8 @@ static const struct net_device_ops sky2_netdev_ops[2] = {
4638#endif 4668#endif
4639 }, 4669 },
4640 { 4670 {
4641 .ndo_open = sky2_up, 4671 .ndo_open = sky2_open,
4642 .ndo_stop = sky2_down, 4672 .ndo_stop = sky2_close,
4643 .ndo_start_xmit = sky2_xmit_frame, 4673 .ndo_start_xmit = sky2_xmit_frame,
4644 .ndo_do_ioctl = sky2_ioctl, 4674 .ndo_do_ioctl = sky2_ioctl,
4645 .ndo_validate_addr = eth_validate_addr, 4675 .ndo_validate_addr = eth_validate_addr,
@@ -4692,7 +4722,7 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
4692 spin_lock_init(&sky2->phy_lock); 4722 spin_lock_init(&sky2->phy_lock);
4693 4723
4694 sky2->tx_pending = TX_DEF_PENDING; 4724 sky2->tx_pending = TX_DEF_PENDING;
4695 sky2->tx_ring_size = roundup_pow_of_two(TX_DEF_PENDING+1); 4725 sky2->tx_ring_size = roundup_ring_size(TX_DEF_PENDING);
4696 sky2->rx_pending = RX_DEF_PENDING; 4726 sky2->rx_pending = RX_DEF_PENDING;
4697 4727
4698 hw->dev[port] = dev; 4728 hw->dev[port] = dev;
diff --git a/drivers/net/ethernet/marvell/sky2.h b/drivers/net/ethernet/marvell/sky2.h
index 0af31b8b5f1..ff6f58bf822 100644
--- a/drivers/net/ethernet/marvell/sky2.h
+++ b/drivers/net/ethernet/marvell/sky2.h
@@ -2287,6 +2287,7 @@ struct sky2_hw {
2287#define SKY2_HW_RSS_BROKEN 0x00000100 2287#define SKY2_HW_RSS_BROKEN 0x00000100
2288#define SKY2_HW_VLAN_BROKEN 0x00000200 2288#define SKY2_HW_VLAN_BROKEN 0x00000200
2289#define SKY2_HW_RSS_CHKSUM 0x00000400 /* RSS requires chksum */ 2289#define SKY2_HW_RSS_CHKSUM 0x00000400 /* RSS requires chksum */
2290#define SKY2_HW_IRQ_SETUP 0x00000800
2290 2291
2291 u8 chip_id; 2292 u8 chip_id;
2292 u8 chip_rev; 2293 u8 chip_rev;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index b89c36dbf5b..c2df6c35860 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -581,6 +581,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
581 * Packet is OK - process it. 581 * Packet is OK - process it.
582 */ 582 */
583 length = be32_to_cpu(cqe->byte_cnt); 583 length = be32_to_cpu(cqe->byte_cnt);
584 length -= ring->fcs_del;
584 ring->bytes += length; 585 ring->bytes += length;
585 ring->packets++; 586 ring->packets++;
586 587
@@ -813,8 +814,11 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
813 context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma); 814 context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma);
814 815
815 /* Cancel FCS removal if FW allows */ 816 /* Cancel FCS removal if FW allows */
816 if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP) 817 if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP) {
817 context->param3 |= cpu_to_be32(1 << 29); 818 context->param3 |= cpu_to_be32(1 << 29);
819 ring->fcs_del = ETH_FCS_LEN;
820 } else
821 ring->fcs_del = 0;
818 822
819 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, context, qp, state); 823 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, context, qp, state);
820 if (err) { 824 if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 8fda331c65d..207b5add3ca 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -272,6 +272,7 @@ struct mlx4_en_rx_ring {
272 u32 prod; 272 u32 prod;
273 u32 cons; 273 u32 cons;
274 u32 buf_size; 274 u32 buf_size;
275 u8 fcs_del;
275 void *buf; 276 void *buf;
276 void *rx_info; 277 void *rx_info;
277 unsigned long bytes; 278 unsigned long bytes;
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 1dca57013cb..1c61d36e657 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -609,7 +609,7 @@ struct nv_ethtool_str {
609}; 609};
610 610
611static const struct nv_ethtool_str nv_estats_str[] = { 611static const struct nv_ethtool_str nv_estats_str[] = {
612 { "tx_bytes" }, 612 { "tx_bytes" }, /* includes Ethernet FCS CRC */
613 { "tx_zero_rexmt" }, 613 { "tx_zero_rexmt" },
614 { "tx_one_rexmt" }, 614 { "tx_one_rexmt" },
615 { "tx_many_rexmt" }, 615 { "tx_many_rexmt" },
@@ -637,7 +637,7 @@ static const struct nv_ethtool_str nv_estats_str[] = {
637 /* version 2 stats */ 637 /* version 2 stats */
638 { "tx_deferral" }, 638 { "tx_deferral" },
639 { "tx_packets" }, 639 { "tx_packets" },
640 { "rx_bytes" }, 640 { "rx_bytes" }, /* includes Ethernet FCS CRC */
641 { "tx_pause" }, 641 { "tx_pause" },
642 { "rx_pause" }, 642 { "rx_pause" },
643 { "rx_drop_frame" }, 643 { "rx_drop_frame" },
@@ -649,7 +649,7 @@ static const struct nv_ethtool_str nv_estats_str[] = {
649}; 649};
650 650
651struct nv_ethtool_stats { 651struct nv_ethtool_stats {
652 u64 tx_bytes; 652 u64 tx_bytes; /* should be ifconfig->tx_bytes + 4*tx_packets */
653 u64 tx_zero_rexmt; 653 u64 tx_zero_rexmt;
654 u64 tx_one_rexmt; 654 u64 tx_one_rexmt;
655 u64 tx_many_rexmt; 655 u64 tx_many_rexmt;
@@ -670,14 +670,14 @@ struct nv_ethtool_stats {
670 u64 rx_unicast; 670 u64 rx_unicast;
671 u64 rx_multicast; 671 u64 rx_multicast;
672 u64 rx_broadcast; 672 u64 rx_broadcast;
673 u64 rx_packets; 673 u64 rx_packets; /* should be ifconfig->rx_packets */
674 u64 rx_errors_total; 674 u64 rx_errors_total;
675 u64 tx_errors_total; 675 u64 tx_errors_total;
676 676
677 /* version 2 stats */ 677 /* version 2 stats */
678 u64 tx_deferral; 678 u64 tx_deferral;
679 u64 tx_packets; 679 u64 tx_packets; /* should be ifconfig->tx_packets */
680 u64 rx_bytes; 680 u64 rx_bytes; /* should be ifconfig->rx_bytes + 4*rx_packets */
681 u64 tx_pause; 681 u64 tx_pause;
682 u64 rx_pause; 682 u64 rx_pause;
683 u64 rx_drop_frame; 683 u64 rx_drop_frame;
@@ -1706,10 +1706,17 @@ static struct net_device_stats *nv_get_stats(struct net_device *dev)
1706 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) { 1706 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) {
1707 nv_get_hw_stats(dev); 1707 nv_get_hw_stats(dev);
1708 1708
1709 /*
1710 * Note: because HW stats are not always available and
1711 * for consistency reasons, the following ifconfig
1712 * stats are managed by software: rx_bytes, tx_bytes,
1713 * rx_packets and tx_packets. The related hardware
1714 * stats reported by ethtool should be equivalent to
1715 * these ifconfig stats, with 4 additional bytes per
1716 * packet (Ethernet FCS CRC).
1717 */
1718
1709 /* copy to net_device stats */ 1719 /* copy to net_device stats */
1710 dev->stats.tx_packets = np->estats.tx_packets;
1711 dev->stats.rx_bytes = np->estats.rx_bytes;
1712 dev->stats.tx_bytes = np->estats.tx_bytes;
1713 dev->stats.tx_fifo_errors = np->estats.tx_fifo_errors; 1720 dev->stats.tx_fifo_errors = np->estats.tx_fifo_errors;
1714 dev->stats.tx_carrier_errors = np->estats.tx_carrier_errors; 1721 dev->stats.tx_carrier_errors = np->estats.tx_carrier_errors;
1715 dev->stats.rx_crc_errors = np->estats.rx_crc_errors; 1722 dev->stats.rx_crc_errors = np->estats.rx_crc_errors;
@@ -2380,6 +2387,9 @@ static int nv_tx_done(struct net_device *dev, int limit)
2380 if (flags & NV_TX_ERROR) { 2387 if (flags & NV_TX_ERROR) {
2381 if ((flags & NV_TX_RETRYERROR) && !(flags & NV_TX_RETRYCOUNT_MASK)) 2388 if ((flags & NV_TX_RETRYERROR) && !(flags & NV_TX_RETRYCOUNT_MASK))
2382 nv_legacybackoff_reseed(dev); 2389 nv_legacybackoff_reseed(dev);
2390 } else {
2391 dev->stats.tx_packets++;
2392 dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
2383 } 2393 }
2384 dev_kfree_skb_any(np->get_tx_ctx->skb); 2394 dev_kfree_skb_any(np->get_tx_ctx->skb);
2385 np->get_tx_ctx->skb = NULL; 2395 np->get_tx_ctx->skb = NULL;
@@ -2390,6 +2400,9 @@ static int nv_tx_done(struct net_device *dev, int limit)
2390 if (flags & NV_TX2_ERROR) { 2400 if (flags & NV_TX2_ERROR) {
2391 if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) 2401 if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK))
2392 nv_legacybackoff_reseed(dev); 2402 nv_legacybackoff_reseed(dev);
2403 } else {
2404 dev->stats.tx_packets++;
2405 dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
2393 } 2406 }
2394 dev_kfree_skb_any(np->get_tx_ctx->skb); 2407 dev_kfree_skb_any(np->get_tx_ctx->skb);
2395 np->get_tx_ctx->skb = NULL; 2408 np->get_tx_ctx->skb = NULL;
@@ -2429,6 +2442,9 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
2429 else 2442 else
2430 nv_legacybackoff_reseed(dev); 2443 nv_legacybackoff_reseed(dev);
2431 } 2444 }
2445 } else {
2446 dev->stats.tx_packets++;
2447 dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
2432 } 2448 }
2433 2449
2434 dev_kfree_skb_any(np->get_tx_ctx->skb); 2450 dev_kfree_skb_any(np->get_tx_ctx->skb);
@@ -2678,6 +2694,7 @@ static int nv_rx_process(struct net_device *dev, int limit)
2678 skb->protocol = eth_type_trans(skb, dev); 2694 skb->protocol = eth_type_trans(skb, dev);
2679 napi_gro_receive(&np->napi, skb); 2695 napi_gro_receive(&np->napi, skb);
2680 dev->stats.rx_packets++; 2696 dev->stats.rx_packets++;
2697 dev->stats.rx_bytes += len;
2681next_pkt: 2698next_pkt:
2682 if (unlikely(np->get_rx.orig++ == np->last_rx.orig)) 2699 if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
2683 np->get_rx.orig = np->first_rx.orig; 2700 np->get_rx.orig = np->first_rx.orig;
@@ -2761,6 +2778,7 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
2761 } 2778 }
2762 napi_gro_receive(&np->napi, skb); 2779 napi_gro_receive(&np->napi, skb);
2763 dev->stats.rx_packets++; 2780 dev->stats.rx_packets++;
2781 dev->stats.rx_bytes += len;
2764 } else { 2782 } else {
2765 dev_kfree_skb(skb); 2783 dev_kfree_skb(skb);
2766 } 2784 }
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
index 9c075ea2682..9cb5f912e48 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
@@ -18,8 +18,8 @@
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
19 */ 19 */
20 20
21#include <linux/module.h> /* for __MODULE_STRING */
22#include "pch_gbe.h" 21#include "pch_gbe.h"
22#include <linux/module.h> /* for __MODULE_STRING */
23 23
24#define OPTION_UNSET -1 24#define OPTION_UNSET -1
25#define OPTION_DISABLED 0 25#define OPTION_DISABLED 0
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index 1fc01ca72b4..4bf68cfef39 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -940,7 +940,7 @@ static void r6040_multicast_list(struct net_device *dev)
940 iowrite16(lp->mcr0, ioaddr + MCR0); 940 iowrite16(lp->mcr0, ioaddr + MCR0);
941 941
942 /* Fill the MAC hash tables with their values */ 942 /* Fill the MAC hash tables with their values */
943 if (lp->mcr0 && MCR0_HASH_EN) { 943 if (lp->mcr0 & MCR0_HASH_EN) {
944 iowrite16(hash_table[0], ioaddr + MAR0); 944 iowrite16(hash_table[0], ioaddr + MAR0);
945 iowrite16(hash_table[1], ioaddr + MAR1); 945 iowrite16(hash_table[1], ioaddr + MAR1);
946 iowrite16(hash_table[2], ioaddr + MAR2); 946 iowrite16(hash_table[2], ioaddr + MAR2);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 92b45f08858..6f06aa10f0d 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -1292,7 +1292,7 @@ static void __rtl8169_check_link_status(struct net_device *dev,
1292 netif_carrier_off(dev); 1292 netif_carrier_off(dev);
1293 netif_info(tp, ifdown, dev, "link down\n"); 1293 netif_info(tp, ifdown, dev, "link down\n");
1294 if (pm) 1294 if (pm)
1295 pm_schedule_suspend(&tp->pci_dev->dev, 100); 1295 pm_schedule_suspend(&tp->pci_dev->dev, 5000);
1296 } 1296 }
1297 spin_unlock_irqrestore(&tp->lock, flags); 1297 spin_unlock_irqrestore(&tp->lock, flags);
1298} 1298}
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index d2be42aafbe..8843071fe98 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -1937,6 +1937,7 @@ static int __devinit smsc911x_init(struct net_device *dev)
1937{ 1937{
1938 struct smsc911x_data *pdata = netdev_priv(dev); 1938 struct smsc911x_data *pdata = netdev_priv(dev);
1939 unsigned int byte_test; 1939 unsigned int byte_test;
1940 unsigned int to = 100;
1940 1941
1941 SMSC_TRACE(pdata, probe, "Driver Parameters:"); 1942 SMSC_TRACE(pdata, probe, "Driver Parameters:");
1942 SMSC_TRACE(pdata, probe, "LAN base: 0x%08lX", 1943 SMSC_TRACE(pdata, probe, "LAN base: 0x%08lX",
@@ -1952,6 +1953,17 @@ static int __devinit smsc911x_init(struct net_device *dev)
1952 return -ENODEV; 1953 return -ENODEV;
1953 } 1954 }
1954 1955
1956 /*
1957 * poll the READY bit in PMT_CTRL. Any other access to the device is
1958 * forbidden while this bit isn't set. Try for 100ms
1959 */
1960 while (!(smsc911x_reg_read(pdata, PMT_CTRL) & PMT_CTRL_READY_) && --to)
1961 udelay(1000);
1962 if (to == 0) {
1963 pr_err("Device not READY in 100ms aborting\n");
1964 return -ENODEV;
1965 }
1966
1955 /* Check byte ordering */ 1967 /* Check byte ordering */
1956 byte_test = smsc911x_reg_read(pdata, BYTE_TEST); 1968 byte_test = smsc911x_reg_read(pdata, BYTE_TEST);
1957 SMSC_TRACE(pdata, probe, "BYTE_TEST: 0x%08X", byte_test); 1969 SMSC_TRACE(pdata, probe, "BYTE_TEST: 0x%08X", byte_test);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
index da66ac511c4..4d5402a1d26 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -39,10 +39,11 @@ static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, u32 dma_tx,
39 /* DMA SW reset */ 39 /* DMA SW reset */
40 value |= DMA_BUS_MODE_SFT_RESET; 40 value |= DMA_BUS_MODE_SFT_RESET;
41 writel(value, ioaddr + DMA_BUS_MODE); 41 writel(value, ioaddr + DMA_BUS_MODE);
42 limit = 15000; 42 limit = 10;
43 while (limit--) { 43 while (limit--) {
44 if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET)) 44 if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET))
45 break; 45 break;
46 mdelay(10);
46 } 47 }
47 if (limit < 0) 48 if (limit < 0)
48 return -EBUSY; 49 return -EBUSY;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
index 627f656b0f3..bc17fd08b55 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
@@ -41,10 +41,11 @@ static int dwmac100_dma_init(void __iomem *ioaddr, int pbl, u32 dma_tx,
41 /* DMA SW reset */ 41 /* DMA SW reset */
42 value |= DMA_BUS_MODE_SFT_RESET; 42 value |= DMA_BUS_MODE_SFT_RESET;
43 writel(value, ioaddr + DMA_BUS_MODE); 43 writel(value, ioaddr + DMA_BUS_MODE);
44 limit = 15000; 44 limit = 10;
45 while (limit--) { 45 while (limit--) {
46 if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET)) 46 if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET))
47 break; 47 break;
48 mdelay(10);
48 } 49 }
49 if (limit < 0) 50 if (limit < 0)
50 return -EBUSY; 51 return -EBUSY;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 9bafa6cf9e8..a140a8fbf05 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -72,7 +72,6 @@ struct stmmac_priv {
72 spinlock_t lock; 72 spinlock_t lock;
73 spinlock_t tx_lock; 73 spinlock_t tx_lock;
74 int wolopts; 74 int wolopts;
75 int wolenabled;
76 int wol_irq; 75 int wol_irq;
77#ifdef CONFIG_STMMAC_TIMER 76#ifdef CONFIG_STMMAC_TIMER
78 struct stmmac_timer *tm; 77 struct stmmac_timer *tm;
@@ -80,6 +79,7 @@ struct stmmac_priv {
80 struct plat_stmmacenet_data *plat; 79 struct plat_stmmacenet_data *plat;
81 struct stmmac_counters mmc; 80 struct stmmac_counters mmc;
82 struct dma_features dma_cap; 81 struct dma_features dma_cap;
82 int hw_cap_support;
83}; 83};
84 84
85extern int stmmac_mdio_unregister(struct net_device *ndev); 85extern int stmmac_mdio_unregister(struct net_device *ndev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index e8eff09bbbd..0395f9eba80 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -430,6 +430,12 @@ static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
430 struct stmmac_priv *priv = netdev_priv(dev); 430 struct stmmac_priv *priv = netdev_priv(dev);
431 u32 support = WAKE_MAGIC | WAKE_UCAST; 431 u32 support = WAKE_MAGIC | WAKE_UCAST;
432 432
433 /* By default almost all GMAC devices support the WoL via
434 * magic frame but we can disable it if the HW capability
435 * register shows no support for pmt_magic_frame. */
436 if ((priv->hw_cap_support) && (!priv->dma_cap.pmt_magic_frame))
437 wol->wolopts &= ~WAKE_MAGIC;
438
433 if (!device_can_wakeup(priv->device)) 439 if (!device_can_wakeup(priv->device))
434 return -EINVAL; 440 return -EINVAL;
435 441
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 20546bbbb8d..8ea770a89f2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -321,12 +321,10 @@ static int stmmac_init_phy(struct net_device *dev)
321 } 321 }
322 322
323 /* Stop Advertising 1000BASE Capability if interface is not GMII */ 323 /* Stop Advertising 1000BASE Capability if interface is not GMII */
324 if ((interface) && ((interface == PHY_INTERFACE_MODE_MII) || 324 if ((interface == PHY_INTERFACE_MODE_MII) ||
325 (interface == PHY_INTERFACE_MODE_RMII))) { 325 (interface == PHY_INTERFACE_MODE_RMII))
326 phydev->supported &= (PHY_BASIC_FEATURES | SUPPORTED_Pause | 326 phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
327 SUPPORTED_Asym_Pause); 327 SUPPORTED_1000baseT_Full);
328 phydev->advertising = phydev->supported;
329 }
330 328
331 /* 329 /*
332 * Broken HW is sometimes missing the pull-up resistor on the 330 * Broken HW is sometimes missing the pull-up resistor on the
@@ -807,8 +805,29 @@ static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv)
807 return 0; 805 return 0;
808} 806}
809 807
810/* New GMAC chips support a new register to indicate the 808/**
811 * presence of the optional feature/functions. 809 * stmmac_selec_desc_mode
810 * @dev : device pointer
811 * Description: select the Enhanced/Alternate or Normal descriptors */
812static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
813{
814 if (priv->plat->enh_desc) {
815 pr_info(" Enhanced/Alternate descriptors\n");
816 priv->hw->desc = &enh_desc_ops;
817 } else {
818 pr_info(" Normal descriptors\n");
819 priv->hw->desc = &ndesc_ops;
820 }
821}
822
823/**
824 * stmmac_get_hw_features
825 * @priv : private device pointer
826 * Description:
827 * new GMAC chip generations have a new register to indicate the
828 * presence of the optional feature/functions.
829 * This can be also used to override the value passed through the
830 * platform and necessary for old MAC10/100 and GMAC chips.
812 */ 831 */
813static int stmmac_get_hw_features(struct stmmac_priv *priv) 832static int stmmac_get_hw_features(struct stmmac_priv *priv)
814{ 833{
@@ -829,7 +848,7 @@ static int stmmac_get_hw_features(struct stmmac_priv *priv)
829 (hw_cap & DMA_HW_FEAT_RWKSEL) >> 9; 848 (hw_cap & DMA_HW_FEAT_RWKSEL) >> 9;
830 priv->dma_cap.pmt_magic_frame = 849 priv->dma_cap.pmt_magic_frame =
831 (hw_cap & DMA_HW_FEAT_MGKSEL) >> 10; 850 (hw_cap & DMA_HW_FEAT_MGKSEL) >> 10;
832 /*MMC*/ 851 /* MMC */
833 priv->dma_cap.rmon = (hw_cap & DMA_HW_FEAT_MMCSEL) >> 11; 852 priv->dma_cap.rmon = (hw_cap & DMA_HW_FEAT_MMCSEL) >> 11;
834 /* IEEE 1588-2002*/ 853 /* IEEE 1588-2002*/
835 priv->dma_cap.time_stamp = 854 priv->dma_cap.time_stamp =
@@ -857,8 +876,7 @@ static int stmmac_get_hw_features(struct stmmac_priv *priv)
857 priv->dma_cap.enh_desc = 876 priv->dma_cap.enh_desc =
858 (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24; 877 (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24;
859 878
860 } else 879 }
861 pr_debug("\tNo HW DMA feature register supported");
862 880
863 return hw_cap; 881 return hw_cap;
864} 882}
@@ -913,6 +931,44 @@ static int stmmac_open(struct net_device *dev)
913 goto open_error; 931 goto open_error;
914 } 932 }
915 933
934 stmmac_get_synopsys_id(priv);
935
936 priv->hw_cap_support = stmmac_get_hw_features(priv);
937
938 if (priv->hw_cap_support) {
939 pr_info(" Support DMA HW capability register");
940
941 /* We can override some gmac/dma configuration fields: e.g.
942 * enh_desc, tx_coe (e.g. that are passed through the
943 * platform) with the values from the HW capability
944 * register (if supported).
945 */
946 priv->plat->enh_desc = priv->dma_cap.enh_desc;
947 priv->plat->tx_coe = priv->dma_cap.tx_coe;
948 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
949
950 /* By default disable wol on magic frame if not supported */
951 if (!priv->dma_cap.pmt_magic_frame)
952 priv->wolopts &= ~WAKE_MAGIC;
953
954 } else
955 pr_info(" No HW DMA feature register supported");
956
957 /* Select the enhnaced/normal descriptor structures */
958 stmmac_selec_desc_mode(priv);
959
960 /* PMT module is not integrated in all the MAC devices. */
961 if (priv->plat->pmt) {
962 pr_info(" Remote wake-up capable\n");
963 device_set_wakeup_capable(priv->device, 1);
964 }
965
966 priv->rx_coe = priv->hw->mac->rx_coe(priv->ioaddr);
967 if (priv->rx_coe)
968 pr_info(" Checksum Offload Engine supported\n");
969 if (priv->plat->tx_coe)
970 pr_info(" Checksum insertion supported\n");
971
916 /* Create and initialize the TX/RX descriptors chains. */ 972 /* Create and initialize the TX/RX descriptors chains. */
917 priv->dma_tx_size = STMMAC_ALIGN(dma_txsize); 973 priv->dma_tx_size = STMMAC_ALIGN(dma_txsize);
918 priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize); 974 priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize);
@@ -935,15 +991,6 @@ static int stmmac_open(struct net_device *dev)
935 /* Initialize the MAC Core */ 991 /* Initialize the MAC Core */
936 priv->hw->mac->core_init(priv->ioaddr); 992 priv->hw->mac->core_init(priv->ioaddr);
937 993
938 stmmac_get_synopsys_id(priv);
939
940 stmmac_get_hw_features(priv);
941
942 priv->rx_coe = priv->hw->mac->rx_coe(priv->ioaddr);
943 if (priv->rx_coe)
944 pr_info("stmmac: Rx Checksum Offload Engine supported\n");
945 if (priv->plat->tx_coe)
946 pr_info("\tTX Checksum insertion supported\n");
947 netdev_update_features(dev); 994 netdev_update_features(dev);
948 995
949 /* Request the IRQ lines */ 996 /* Request the IRQ lines */
@@ -1489,9 +1536,7 @@ static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1489 if (!priv->phydev) 1536 if (!priv->phydev)
1490 return -EINVAL; 1537 return -EINVAL;
1491 1538
1492 spin_lock(&priv->lock);
1493 ret = phy_mii_ioctl(priv->phydev, rq, cmd); 1539 ret = phy_mii_ioctl(priv->phydev, rq, cmd);
1494 spin_unlock(&priv->lock);
1495 1540
1496 return ret; 1541 return ret;
1497} 1542}
@@ -1558,7 +1603,7 @@ static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
1558 struct net_device *dev = seq->private; 1603 struct net_device *dev = seq->private;
1559 struct stmmac_priv *priv = netdev_priv(dev); 1604 struct stmmac_priv *priv = netdev_priv(dev);
1560 1605
1561 if (!stmmac_get_hw_features(priv)) { 1606 if (!priv->hw_cap_support) {
1562 seq_printf(seq, "DMA HW features not supported\n"); 1607 seq_printf(seq, "DMA HW features not supported\n");
1563 return 0; 1608 return 0;
1564 } 1609 }
@@ -1766,12 +1811,6 @@ static int stmmac_mac_device_setup(struct net_device *dev)
1766 if (!device) 1811 if (!device)
1767 return -ENOMEM; 1812 return -ENOMEM;
1768 1813
1769 if (priv->plat->enh_desc) {
1770 device->desc = &enh_desc_ops;
1771 pr_info("\tEnhanced descriptor structure\n");
1772 } else
1773 device->desc = &ndesc_ops;
1774
1775 priv->hw = device; 1814 priv->hw = device;
1776 priv->hw->ring = &ring_mode_ops; 1815 priv->hw->ring = &ring_mode_ops;
1777 1816
@@ -1845,11 +1884,6 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
1845 1884
1846 priv->ioaddr = addr; 1885 priv->ioaddr = addr;
1847 1886
1848 /* PMT module is not integrated in all the MAC devices. */
1849 if (plat_dat->pmt) {
1850 pr_info("\tPMT module supported\n");
1851 device_set_wakeup_capable(&pdev->dev, 1);
1852 }
1853 /* 1887 /*
1854 * On some platforms e.g. SPEAr the wake up irq differs from the mac irq 1888 * On some platforms e.g. SPEAr the wake up irq differs from the mac irq
1855 * The external wake up irq can be passed through the platform code 1889 * The external wake up irq can be passed through the platform code
@@ -1862,7 +1896,6 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
1862 if (priv->wol_irq == -ENXIO) 1896 if (priv->wol_irq == -ENXIO)
1863 priv->wol_irq = ndev->irq; 1897 priv->wol_irq = ndev->irq;
1864 1898
1865
1866 platform_set_drvdata(pdev, ndev); 1899 platform_set_drvdata(pdev, ndev);
1867 1900
1868 /* Set the I/O base addr */ 1901 /* Set the I/O base addr */
@@ -1875,7 +1908,7 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
1875 goto out_free_ndev; 1908 goto out_free_ndev;
1876 } 1909 }
1877 1910
1878 /* MAC HW revice detection */ 1911 /* MAC HW device detection */
1879 ret = stmmac_mac_device_setup(ndev); 1912 ret = stmmac_mac_device_setup(ndev);
1880 if (ret < 0) 1913 if (ret < 0)
1881 goto out_plat_exit; 1914 goto out_plat_exit;
@@ -1978,12 +2011,13 @@ static int stmmac_suspend(struct device *dev)
1978 if (!ndev || !netif_running(ndev)) 2011 if (!ndev || !netif_running(ndev))
1979 return 0; 2012 return 0;
1980 2013
2014 if (priv->phydev)
2015 phy_stop(priv->phydev);
2016
1981 spin_lock(&priv->lock); 2017 spin_lock(&priv->lock);
1982 2018
1983 netif_device_detach(ndev); 2019 netif_device_detach(ndev);
1984 netif_stop_queue(ndev); 2020 netif_stop_queue(ndev);
1985 if (priv->phydev)
1986 phy_stop(priv->phydev);
1987 2021
1988#ifdef CONFIG_STMMAC_TIMER 2022#ifdef CONFIG_STMMAC_TIMER
1989 priv->tm->timer_stop(); 2023 priv->tm->timer_stop();
@@ -2041,12 +2075,13 @@ static int stmmac_resume(struct device *dev)
2041#endif 2075#endif
2042 napi_enable(&priv->napi); 2076 napi_enable(&priv->napi);
2043 2077
2044 if (priv->phydev)
2045 phy_start(priv->phydev);
2046
2047 netif_start_queue(ndev); 2078 netif_start_queue(ndev);
2048 2079
2049 spin_unlock(&priv->lock); 2080 spin_unlock(&priv->lock);
2081
2082 if (priv->phydev)
2083 phy_start(priv->phydev);
2084
2050 return 0; 2085 return 0;
2051} 2086}
2052 2087
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index c517dac02ae..cf14ab9db57 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -2637,7 +2637,7 @@ static int __devinit happy_meal_sbus_probe_one(struct platform_device *op, int i
2637 sbus_dp = op->dev.parent->of_node; 2637 sbus_dp = op->dev.parent->of_node;
2638 2638
2639 /* We can match PCI devices too, do not accept those here. */ 2639 /* We can match PCI devices too, do not accept those here. */
2640 if (strcmp(sbus_dp->name, "sbus")) 2640 if (strcmp(sbus_dp->name, "sbus") && strcmp(sbus_dp->name, "sbi"))
2641 return err; 2641 return err;
2642 2642
2643 if (is_qfe) { 2643 if (is_qfe) {
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index caf3659e173..2681b53820e 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -114,6 +114,7 @@ void temac_indirect_out32(struct temac_local *lp, int reg, u32 value)
114 return; 114 return;
115 temac_iow(lp, XTE_LSW0_OFFSET, value); 115 temac_iow(lp, XTE_LSW0_OFFSET, value);
116 temac_iow(lp, XTE_CTL0_OFFSET, CNTLREG_WRITE_ENABLE_MASK | reg); 116 temac_iow(lp, XTE_CTL0_OFFSET, CNTLREG_WRITE_ENABLE_MASK | reg);
117 temac_indirect_busywait(lp);
117} 118}
118 119
119/** 120/**
@@ -203,6 +204,9 @@ static void temac_dma_bd_release(struct net_device *ndev)
203 struct temac_local *lp = netdev_priv(ndev); 204 struct temac_local *lp = netdev_priv(ndev);
204 int i; 205 int i;
205 206
207 /* Reset Local Link (DMA) */
208 lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
209
206 for (i = 0; i < RX_BD_NUM; i++) { 210 for (i = 0; i < RX_BD_NUM; i++) {
207 if (!lp->rx_skb[i]) 211 if (!lp->rx_skb[i])
208 break; 212 break;
@@ -860,6 +864,8 @@ static int temac_open(struct net_device *ndev)
860 phy_start(lp->phy_dev); 864 phy_start(lp->phy_dev);
861 } 865 }
862 866
867 temac_device_reset(ndev);
868
863 rc = request_irq(lp->tx_irq, ll_temac_tx_irq, 0, ndev->name, ndev); 869 rc = request_irq(lp->tx_irq, ll_temac_tx_irq, 0, ndev->name, ndev);
864 if (rc) 870 if (rc)
865 goto err_tx_irq; 871 goto err_tx_irq;
@@ -867,7 +873,6 @@ static int temac_open(struct net_device *ndev)
867 if (rc) 873 if (rc)
868 goto err_rx_irq; 874 goto err_rx_irq;
869 875
870 temac_device_reset(ndev);
871 return 0; 876 return 0;
872 877
873 err_rx_irq: 878 err_rx_irq:
diff --git a/drivers/net/hippi/Kconfig b/drivers/net/hippi/Kconfig
index 7393eb732ee..95eb34fdbba 100644
--- a/drivers/net/hippi/Kconfig
+++ b/drivers/net/hippi/Kconfig
@@ -36,4 +36,4 @@ config ROADRUNNER_LARGE_RINGS
36 kernel code or by user space programs. Say Y here only if you have 36 kernel code or by user space programs. Say Y here only if you have
37 the memory. 37 the memory.
38 38
39endif /* HIPPI */ 39endif # HIPPI
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index e81e22e3d1d..e6fed4d4cb7 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -36,7 +36,7 @@
36#include <linux/usb/usbnet.h> 36#include <linux/usb/usbnet.h>
37#include <linux/slab.h> 37#include <linux/slab.h>
38 38
39#define DRIVER_VERSION "26-Sep-2011" 39#define DRIVER_VERSION "08-Nov-2011"
40#define DRIVER_NAME "asix" 40#define DRIVER_NAME "asix"
41 41
42/* ASIX AX8817X based USB 2.0 Ethernet Devices */ 42/* ASIX AX8817X based USB 2.0 Ethernet Devices */
@@ -163,7 +163,7 @@
163#define MARVELL_CTRL_TXDELAY 0x0002 163#define MARVELL_CTRL_TXDELAY 0x0002
164#define MARVELL_CTRL_RXDELAY 0x0080 164#define MARVELL_CTRL_RXDELAY 0x0080
165 165
166#define PHY_MODE_RTL8211CL 0x0004 166#define PHY_MODE_RTL8211CL 0x000C
167 167
168/* This structure cannot exceed sizeof(unsigned long [5]) AKA 20 bytes */ 168/* This structure cannot exceed sizeof(unsigned long [5]) AKA 20 bytes */
169struct asix_data { 169struct asix_data {
@@ -652,9 +652,17 @@ static u32 asix_get_phyid(struct usbnet *dev)
652{ 652{
653 int phy_reg; 653 int phy_reg;
654 u32 phy_id; 654 u32 phy_id;
655 int i;
655 656
656 phy_reg = asix_mdio_read(dev->net, dev->mii.phy_id, MII_PHYSID1); 657 /* Poll for the rare case the FW or phy isn't ready yet. */
657 if (phy_reg < 0) 658 for (i = 0; i < 100; i++) {
659 phy_reg = asix_mdio_read(dev->net, dev->mii.phy_id, MII_PHYSID1);
660 if (phy_reg != 0 && phy_reg != 0xFFFF)
661 break;
662 mdelay(1);
663 }
664
665 if (phy_reg <= 0 || phy_reg == 0xFFFF)
658 return 0; 666 return 0;
659 667
660 phy_id = (phy_reg & 0xffff) << 16; 668 phy_id = (phy_reg & 0xffff) << 16;
@@ -1075,7 +1083,7 @@ static const struct net_device_ops ax88772_netdev_ops = {
1075 1083
1076static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf) 1084static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
1077{ 1085{
1078 int ret; 1086 int ret, embd_phy;
1079 struct asix_data *data = (struct asix_data *)&dev->data; 1087 struct asix_data *data = (struct asix_data *)&dev->data;
1080 u8 buf[ETH_ALEN]; 1088 u8 buf[ETH_ALEN];
1081 u32 phyid; 1089 u32 phyid;
@@ -1100,16 +1108,36 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
1100 dev->mii.reg_num_mask = 0x1f; 1108 dev->mii.reg_num_mask = 0x1f;
1101 dev->mii.phy_id = asix_get_phy_addr(dev); 1109 dev->mii.phy_id = asix_get_phy_addr(dev);
1102 1110
1103 phyid = asix_get_phyid(dev);
1104 dbg("PHYID=0x%08x", phyid);
1105
1106 dev->net->netdev_ops = &ax88772_netdev_ops; 1111 dev->net->netdev_ops = &ax88772_netdev_ops;
1107 dev->net->ethtool_ops = &ax88772_ethtool_ops; 1112 dev->net->ethtool_ops = &ax88772_ethtool_ops;
1108 1113
1109 ret = ax88772_reset(dev); 1114 embd_phy = ((dev->mii.phy_id & 0x1f) == 0x10 ? 1 : 0);
1115
1116 /* Reset the PHY to normal operation mode */
1117 ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, embd_phy, 0, 0, NULL);
1118 if (ret < 0) {
1119 dbg("Select PHY #1 failed: %d", ret);
1120 return ret;
1121 }
1122
1123 ret = asix_sw_reset(dev, AX_SWRESET_IPPD | AX_SWRESET_PRL);
1124 if (ret < 0)
1125 return ret;
1126
1127 msleep(150);
1128
1129 ret = asix_sw_reset(dev, AX_SWRESET_CLEAR);
1110 if (ret < 0) 1130 if (ret < 0)
1111 return ret; 1131 return ret;
1112 1132
1133 msleep(150);
1134
1135 ret = asix_sw_reset(dev, embd_phy ? AX_SWRESET_IPRL : AX_SWRESET_PRTE);
1136
1137 /* Read PHYID register *AFTER* the PHY was reset properly */
1138 phyid = asix_get_phyid(dev);
1139 dbg("PHYID=0x%08x", phyid);
1140
1113 /* Asix framing packs multiple eth frames into a 2K usb bulk transfer */ 1141 /* Asix framing packs multiple eth frames into a 2K usb bulk transfer */
1114 if (dev->driver_info->flags & FLAG_FRAMING_AX) { 1142 if (dev->driver_info->flags & FLAG_FRAMING_AX) {
1115 /* hard_mtu is still the default - the device does not support 1143 /* hard_mtu is still the default - the device does not support
@@ -1220,6 +1248,7 @@ static int ax88178_reset(struct usbnet *dev)
1220 __le16 eeprom; 1248 __le16 eeprom;
1221 u8 status; 1249 u8 status;
1222 int gpio0 = 0; 1250 int gpio0 = 0;
1251 u32 phyid;
1223 1252
1224 asix_read_cmd(dev, AX_CMD_READ_GPIOS, 0, 0, 1, &status); 1253 asix_read_cmd(dev, AX_CMD_READ_GPIOS, 0, 0, 1, &status);
1225 dbg("GPIO Status: 0x%04x", status); 1254 dbg("GPIO Status: 0x%04x", status);
@@ -1235,12 +1264,13 @@ static int ax88178_reset(struct usbnet *dev)
1235 data->ledmode = 0; 1264 data->ledmode = 0;
1236 gpio0 = 1; 1265 gpio0 = 1;
1237 } else { 1266 } else {
1238 data->phymode = le16_to_cpu(eeprom) & 7; 1267 data->phymode = le16_to_cpu(eeprom) & 0x7F;
1239 data->ledmode = le16_to_cpu(eeprom) >> 8; 1268 data->ledmode = le16_to_cpu(eeprom) >> 8;
1240 gpio0 = (le16_to_cpu(eeprom) & 0x80) ? 0 : 1; 1269 gpio0 = (le16_to_cpu(eeprom) & 0x80) ? 0 : 1;
1241 } 1270 }
1242 dbg("GPIO0: %d, PhyMode: %d", gpio0, data->phymode); 1271 dbg("GPIO0: %d, PhyMode: %d", gpio0, data->phymode);
1243 1272
1273 /* Power up external GigaPHY through AX88178 GPIO pin */
1244 asix_write_gpio(dev, AX_GPIO_RSE | AX_GPIO_GPO_1 | AX_GPIO_GPO1EN, 40); 1274 asix_write_gpio(dev, AX_GPIO_RSE | AX_GPIO_GPO_1 | AX_GPIO_GPO1EN, 40);
1245 if ((le16_to_cpu(eeprom) >> 8) != 1) { 1275 if ((le16_to_cpu(eeprom) >> 8) != 1) {
1246 asix_write_gpio(dev, 0x003c, 30); 1276 asix_write_gpio(dev, 0x003c, 30);
@@ -1252,6 +1282,13 @@ static int ax88178_reset(struct usbnet *dev)
1252 asix_write_gpio(dev, AX_GPIO_GPO1EN | AX_GPIO_GPO_1, 30); 1282 asix_write_gpio(dev, AX_GPIO_GPO1EN | AX_GPIO_GPO_1, 30);
1253 } 1283 }
1254 1284
1285 /* Read PHYID register *AFTER* powering up PHY */
1286 phyid = asix_get_phyid(dev);
1287 dbg("PHYID=0x%08x", phyid);
1288
1289 /* Set AX88178 to enable MII/GMII/RGMII interface for external PHY */
1290 asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, 0, 0, 0, NULL);
1291
1255 asix_sw_reset(dev, 0); 1292 asix_sw_reset(dev, 0);
1256 msleep(150); 1293 msleep(150);
1257 1294
@@ -1396,7 +1433,6 @@ static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf)
1396{ 1433{
1397 int ret; 1434 int ret;
1398 u8 buf[ETH_ALEN]; 1435 u8 buf[ETH_ALEN];
1399 u32 phyid;
1400 struct asix_data *data = (struct asix_data *)&dev->data; 1436 struct asix_data *data = (struct asix_data *)&dev->data;
1401 1437
1402 data->eeprom_len = AX88772_EEPROM_LEN; 1438 data->eeprom_len = AX88772_EEPROM_LEN;
@@ -1423,12 +1459,12 @@ static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf)
1423 dev->net->netdev_ops = &ax88178_netdev_ops; 1459 dev->net->netdev_ops = &ax88178_netdev_ops;
1424 dev->net->ethtool_ops = &ax88178_ethtool_ops; 1460 dev->net->ethtool_ops = &ax88178_ethtool_ops;
1425 1461
1426 phyid = asix_get_phyid(dev); 1462 /* Blink LEDS so users know driver saw dongle */
1427 dbg("PHYID=0x%08x", phyid); 1463 asix_sw_reset(dev, 0);
1464 msleep(150);
1428 1465
1429 ret = ax88178_reset(dev); 1466 asix_sw_reset(dev, AX_SWRESET_PRL | AX_SWRESET_IPPD);
1430 if (ret < 0) 1467 msleep(150);
1431 return ret;
1432 1468
1433 /* Asix framing packs multiple eth frames into a 2K usb bulk transfer */ 1469 /* Asix framing packs multiple eth frames into a 2K usb bulk transfer */
1434 if (dev->driver_info->flags & FLAG_FRAMING_AX) { 1470 if (dev->driver_info->flags & FLAG_FRAMING_AX) {
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index c924ea2bce0..99ed6eb4dfa 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -567,7 +567,7 @@ static const struct usb_device_id products [] = {
567{ 567{
568 USB_DEVICE_AND_INTERFACE_INFO(0x1004, 0x61aa, USB_CLASS_COMM, 568 USB_DEVICE_AND_INTERFACE_INFO(0x1004, 0x61aa, USB_CLASS_COMM,
569 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), 569 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
570 .driver_info = (unsigned long)&wwan_info, 570 .driver_info = 0,
571}, 571},
572 572
573/* 573/*
diff --git a/drivers/net/usb/lg-vl600.c b/drivers/net/usb/lg-vl600.c
index d43db32f947..9c26c6390d6 100644
--- a/drivers/net/usb/lg-vl600.c
+++ b/drivers/net/usb/lg-vl600.c
@@ -144,10 +144,11 @@ static int vl600_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
144 } 144 }
145 145
146 frame = (struct vl600_frame_hdr *) buf->data; 146 frame = (struct vl600_frame_hdr *) buf->data;
147 /* NOTE: Should check that frame->magic == 0x53544448? 147 /* Yes, check that frame->magic == 0x53544448 (or 0x44544d48),
148 * Otherwise if we receive garbage at the beginning of the frame 148 * otherwise we may run out of memory w/a bad packet */
149 * we may end up allocating a huge buffer and saving all the 149 if (ntohl(frame->magic) != 0x53544448 &&
150 * future incoming data into it. */ 150 ntohl(frame->magic) != 0x44544d48)
151 goto error;
151 152
152 if (buf->len < sizeof(*frame) || 153 if (buf->len < sizeof(*frame) ||
153 buf->len != le32_to_cpup(&frame->len)) { 154 buf->len != le32_to_cpup(&frame->len)) {
@@ -296,6 +297,11 @@ encapsulate:
296 * overwrite the remaining fields. 297 * overwrite the remaining fields.
297 */ 298 */
298 packet = (struct vl600_pkt_hdr *) skb->data; 299 packet = (struct vl600_pkt_hdr *) skb->data;
300 /* The VL600 wants IPv6 packets to have an IPv4 ethertype
301 * Since this modem only supports IPv4 and IPv6, just set all
302 * frames to 0x0800 (ETH_P_IP)
303 */
304 packet->h_proto = htons(ETH_P_IP);
299 memset(&packet->dummy, 0, sizeof(packet->dummy)); 305 memset(&packet->dummy, 0, sizeof(packet->dummy));
300 packet->len = cpu_to_le32(orig_len); 306 packet->len = cpu_to_le32(orig_len);
301 307
@@ -308,21 +314,12 @@ encapsulate:
308 if (skb->len < full_len) /* Pad */ 314 if (skb->len < full_len) /* Pad */
309 skb_put(skb, full_len - skb->len); 315 skb_put(skb, full_len - skb->len);
310 316
311 /* The VL600 wants IPv6 packets to have an IPv4 ethertype
312 * Check if this is an IPv6 packet, and set the ethertype
313 * to 0x800
314 */
315 if ((skb->data[sizeof(struct vl600_pkt_hdr *) + 0x22] & 0xf0) == 0x60) {
316 skb->data[sizeof(struct vl600_pkt_hdr *) + 0x20] = 0x08;
317 skb->data[sizeof(struct vl600_pkt_hdr *) + 0x21] = 0;
318 }
319
320 return skb; 317 return skb;
321} 318}
322 319
323static const struct driver_info vl600_info = { 320static const struct driver_info vl600_info = {
324 .description = "LG VL600 modem", 321 .description = "LG VL600 modem",
325 .flags = FLAG_ETHER | FLAG_RX_ASSEMBLE, 322 .flags = FLAG_RX_ASSEMBLE | FLAG_WWAN,
326 .bind = vl600_bind, 323 .bind = vl600_bind,
327 .unbind = vl600_unbind, 324 .unbind = vl600_unbind,
328 .status = usbnet_cdc_status, 325 .status = usbnet_cdc_status,
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 22a7cf951e7..a5b9b12ef26 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -51,6 +51,7 @@
51#define USB_VENDOR_ID_SMSC (0x0424) 51#define USB_VENDOR_ID_SMSC (0x0424)
52#define USB_PRODUCT_ID_LAN7500 (0x7500) 52#define USB_PRODUCT_ID_LAN7500 (0x7500)
53#define USB_PRODUCT_ID_LAN7505 (0x7505) 53#define USB_PRODUCT_ID_LAN7505 (0x7505)
54#define RXW_PADDING 2
54 55
55#define check_warn(ret, fmt, args...) \ 56#define check_warn(ret, fmt, args...) \
56 ({ if (ret < 0) netdev_warn(dev->net, fmt, ##args); }) 57 ({ if (ret < 0) netdev_warn(dev->net, fmt, ##args); })
@@ -1088,13 +1089,13 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
1088 1089
1089 memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b)); 1090 memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
1090 le32_to_cpus(&rx_cmd_b); 1091 le32_to_cpus(&rx_cmd_b);
1091 skb_pull(skb, 4 + NET_IP_ALIGN); 1092 skb_pull(skb, 4 + RXW_PADDING);
1092 1093
1093 packet = skb->data; 1094 packet = skb->data;
1094 1095
1095 /* get the packet length */ 1096 /* get the packet length */
1096 size = (rx_cmd_a & RX_CMD_A_LEN) - NET_IP_ALIGN; 1097 size = (rx_cmd_a & RX_CMD_A_LEN) - RXW_PADDING;
1097 align_count = (4 - ((size + NET_IP_ALIGN) % 4)) % 4; 1098 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
1098 1099
1099 if (unlikely(rx_cmd_a & RX_CMD_A_RED)) { 1100 if (unlikely(rx_cmd_a & RX_CMD_A_RED)) {
1100 netif_dbg(dev, rx_err, dev->net, 1101 netif_dbg(dev, rx_err, dev->net,
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 2f91acccb7d..8873c6e6fb9 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1827,7 +1827,8 @@ static void ath9k_set_power_sleep(struct ath_hw *ah, int setChip)
1827 } 1827 }
1828 1828
1829 /* Clear Bit 14 of AR_WA after putting chip into Full Sleep mode. */ 1829 /* Clear Bit 14 of AR_WA after putting chip into Full Sleep mode. */
1830 REG_WRITE(ah, AR_WA, ah->WARegVal & ~AR_WA_D3_L1_DISABLE); 1830 if (AR_SREV_9300_20_OR_LATER(ah))
1831 REG_WRITE(ah, AR_WA, ah->WARegVal & ~AR_WA_D3_L1_DISABLE);
1831} 1832}
1832 1833
1833/* 1834/*
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index 85fa9cc7350..65ecb5bab25 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -254,6 +254,8 @@ ath_reg_apply_active_scan_flags(struct wiphy *wiphy,
254 int r; 254 int r;
255 255
256 sband = wiphy->bands[IEEE80211_BAND_2GHZ]; 256 sband = wiphy->bands[IEEE80211_BAND_2GHZ];
257 if (!sband)
258 return;
257 259
258 /* 260 /*
259 * If no country IE has been received always enable active scan 261 * If no country IE has been received always enable active scan
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index 58ea0e5fabf..5f77cbe0b6a 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -175,6 +175,7 @@ void b43_generate_plcp_hdr(struct b43_plcp_hdr4 *plcp,
175 } 175 }
176} 176}
177 177
178/* TODO: verify if needed for SSLPN or LCN */
178static u16 b43_generate_tx_phy_ctl1(struct b43_wldev *dev, u8 bitrate) 179static u16 b43_generate_tx_phy_ctl1(struct b43_wldev *dev, u8 bitrate)
179{ 180{
180 const struct b43_phy *phy = &dev->phy; 181 const struct b43_phy *phy = &dev->phy;
@@ -256,6 +257,9 @@ int b43_generate_txhdr(struct b43_wldev *dev,
256 unsigned int plcp_fragment_len; 257 unsigned int plcp_fragment_len;
257 u32 mac_ctl = 0; 258 u32 mac_ctl = 0;
258 u16 phy_ctl = 0; 259 u16 phy_ctl = 0;
260 bool fill_phy_ctl1 = (phy->type == B43_PHYTYPE_LP ||
261 phy->type == B43_PHYTYPE_N ||
262 phy->type == B43_PHYTYPE_HT);
259 u8 extra_ft = 0; 263 u8 extra_ft = 0;
260 struct ieee80211_rate *txrate; 264 struct ieee80211_rate *txrate;
261 struct ieee80211_tx_rate *rates; 265 struct ieee80211_tx_rate *rates;
@@ -531,7 +535,7 @@ int b43_generate_txhdr(struct b43_wldev *dev,
531 extra_ft |= B43_TXH_EFT_RTSFB_CCK; 535 extra_ft |= B43_TXH_EFT_RTSFB_CCK;
532 536
533 if (rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS && 537 if (rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS &&
534 phy->type == B43_PHYTYPE_N) { 538 fill_phy_ctl1) {
535 txhdr->phy_ctl1_rts = cpu_to_le16( 539 txhdr->phy_ctl1_rts = cpu_to_le16(
536 b43_generate_tx_phy_ctl1(dev, rts_rate)); 540 b43_generate_tx_phy_ctl1(dev, rts_rate));
537 txhdr->phy_ctl1_rts_fb = cpu_to_le16( 541 txhdr->phy_ctl1_rts_fb = cpu_to_le16(
@@ -552,7 +556,7 @@ int b43_generate_txhdr(struct b43_wldev *dev,
552 break; 556 break;
553 } 557 }
554 558
555 if (phy->type == B43_PHYTYPE_N) { 559 if (fill_phy_ctl1) {
556 txhdr->phy_ctl1 = 560 txhdr->phy_ctl1 =
557 cpu_to_le16(b43_generate_tx_phy_ctl1(dev, rate)); 561 cpu_to_le16(b43_generate_tx_phy_ctl1(dev, rate));
558 txhdr->phy_ctl1_fb = 562 txhdr->phy_ctl1_fb =
@@ -736,7 +740,14 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
736 740
737 /* Link quality statistics */ 741 /* Link quality statistics */
738 switch (chanstat & B43_RX_CHAN_PHYTYPE) { 742 switch (chanstat & B43_RX_CHAN_PHYTYPE) {
743 case B43_PHYTYPE_HT:
744 /* TODO: is max the right choice? */
745 status.signal = max_t(__s8,
746 max(rxhdr->phy_ht_power0, rxhdr->phy_ht_power1),
747 rxhdr->phy_ht_power2);
748 break;
739 case B43_PHYTYPE_N: 749 case B43_PHYTYPE_N:
750 /* Broadcom has code for min and avg, but always uses max */
740 if (rxhdr->power0 == 16 || rxhdr->power0 == 32) 751 if (rxhdr->power0 == 16 || rxhdr->power0 == 32)
741 status.signal = max(rxhdr->power1, rxhdr->power2); 752 status.signal = max(rxhdr->power1, rxhdr->power2);
742 else 753 else
diff --git a/drivers/net/wireless/b43/xmit.h b/drivers/net/wireless/b43/xmit.h
index 16c514d54af..98d90747836 100644
--- a/drivers/net/wireless/b43/xmit.h
+++ b/drivers/net/wireless/b43/xmit.h
@@ -249,6 +249,12 @@ struct b43_rxhdr_fw4 {
249 } __packed; 249 } __packed;
250 } __packed; 250 } __packed;
251 union { 251 union {
252 /* HT-PHY */
253 struct {
254 PAD_BYTES(1);
255 __s8 phy_ht_power0;
256 } __packed;
257
252 /* RSSI for N-PHYs */ 258 /* RSSI for N-PHYs */
253 struct { 259 struct {
254 __s8 power2; 260 __s8 power2;
@@ -257,7 +263,15 @@ struct b43_rxhdr_fw4 {
257 263
258 __le16 phy_status2; /* PHY RX Status 2 */ 264 __le16 phy_status2; /* PHY RX Status 2 */
259 } __packed; 265 } __packed;
260 __le16 phy_status3; /* PHY RX Status 3 */ 266 union {
267 /* HT-PHY */
268 struct {
269 __s8 phy_ht_power1;
270 __s8 phy_ht_power2;
271 } __packed;
272
273 __le16 phy_status3; /* PHY RX Status 3 */
274 } __packed;
261 union { 275 union {
262 /* Tested with 598.314, 644.1001 and 666.2 */ 276 /* Tested with 598.314, 644.1001 and 666.2 */
263 struct { 277 struct {
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/dma.c b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
index b56a30297c2..6ebec8f4284 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/dma.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
@@ -358,13 +358,14 @@ static uint nrxdactive(struct dma_info *di, uint h, uint t)
358 358
359static uint _dma_ctrlflags(struct dma_info *di, uint mask, uint flags) 359static uint _dma_ctrlflags(struct dma_info *di, uint mask, uint flags)
360{ 360{
361 uint dmactrlflags = di->dma.dmactrlflags; 361 uint dmactrlflags;
362 362
363 if (di == NULL) { 363 if (di == NULL) {
364 DMA_ERROR(("%s: _dma_ctrlflags: NULL dma handle\n", di->name)); 364 DMA_ERROR(("_dma_ctrlflags: NULL dma handle\n"));
365 return 0; 365 return 0;
366 } 366 }
367 367
368 dmactrlflags = di->dma.dmactrlflags;
368 dmactrlflags &= ~mask; 369 dmactrlflags &= ~mask;
369 dmactrlflags |= flags; 370 dmactrlflags |= flags;
370 371
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
index da3411057af..ce918980e97 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
@@ -990,29 +990,16 @@ static int iwl_trans_tx_stop(struct iwl_trans *trans)
990 return 0; 990 return 0;
991} 991}
992 992
993static void iwl_trans_pcie_disable_sync_irq(struct iwl_trans *trans) 993static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
994{ 994{
995 unsigned long flags; 995 unsigned long flags;
996 struct iwl_trans_pcie *trans_pcie = 996 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
997 IWL_TRANS_GET_PCIE_TRANS(trans);
998 997
998 /* tell the device to stop sending interrupts */
999 spin_lock_irqsave(&trans->shrd->lock, flags); 999 spin_lock_irqsave(&trans->shrd->lock, flags);
1000 iwl_disable_interrupts(trans); 1000 iwl_disable_interrupts(trans);
1001 spin_unlock_irqrestore(&trans->shrd->lock, flags); 1001 spin_unlock_irqrestore(&trans->shrd->lock, flags);
1002 1002
1003 /* wait to make sure we flush pending tasklet*/
1004 synchronize_irq(bus(trans)->irq);
1005 tasklet_kill(&trans_pcie->irq_tasklet);
1006}
1007
1008static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
1009{
1010 /* stop and reset the on-board processor */
1011 iwl_write32(bus(trans), CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
1012
1013 /* tell the device to stop sending interrupts */
1014 iwl_trans_pcie_disable_sync_irq(trans);
1015
1016 /* device going down, Stop using ICT table */ 1003 /* device going down, Stop using ICT table */
1017 iwl_disable_ict(trans); 1004 iwl_disable_ict(trans);
1018 1005
@@ -1039,6 +1026,20 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
1039 1026
1040 /* Stop the device, and put it in low power state */ 1027 /* Stop the device, and put it in low power state */
1041 iwl_apm_stop(priv(trans)); 1028 iwl_apm_stop(priv(trans));
1029
1030 /* Upon stop, the APM issues an interrupt if HW RF kill is set.
1031 * Clean again the interrupt here
1032 */
1033 spin_lock_irqsave(&trans->shrd->lock, flags);
1034 iwl_disable_interrupts(trans);
1035 spin_unlock_irqrestore(&trans->shrd->lock, flags);
1036
1037 /* wait to make sure we flush pending tasklet*/
1038 synchronize_irq(bus(trans)->irq);
1039 tasklet_kill(&trans_pcie->irq_tasklet);
1040
1041 /* stop and reset the on-board processor */
1042 iwl_write32(bus(trans), CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
1042} 1043}
1043 1044
1044static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, 1045static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index 4fcd653bddc..a7f1ab28940 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -634,7 +634,7 @@ static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy,
634 if (channel && 634 if (channel &&
635 !(channel->flags & IEEE80211_CHAN_DISABLED)) 635 !(channel->flags & IEEE80211_CHAN_DISABLED))
636 cfg80211_inform_bss(wiphy, channel, 636 cfg80211_inform_bss(wiphy, channel,
637 bssid, le64_to_cpu(*(__le64 *)tsfdesc), 637 bssid, get_unaligned_le64(tsfdesc),
638 capa, intvl, ie, ielen, 638 capa, intvl, ie, ielen,
639 LBS_SCAN_RSSI_TO_MBM(rssi), 639 LBS_SCAN_RSSI_TO_MBM(rssi),
640 GFP_KERNEL); 640 GFP_KERNEL);
diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c
index 11b69b300dc..728baa44525 100644
--- a/drivers/net/wireless/libertas/if_spi.c
+++ b/drivers/net/wireless/libertas/if_spi.c
@@ -995,6 +995,7 @@ static int if_spi_host_to_card(struct lbs_private *priv,
995 spin_unlock_irqrestore(&card->buffer_lock, flags); 995 spin_unlock_irqrestore(&card->buffer_lock, flags);
996 break; 996 break;
997 default: 997 default:
998 kfree(packet);
998 netdev_err(priv->dev, "can't transfer buffer of type %d\n", 999 netdev_err(priv->dev, "can't transfer buffer of type %d\n",
999 type); 1000 type);
1000 err = -EINVAL; 1001 err = -EINVAL;
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index dae8dbb24a0..8d3ab378662 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -819,8 +819,10 @@ mwifiex_scan_setup_scan_config(struct mwifiex_private *priv,
819 wildcard_ssid_tlv->header.len = cpu_to_le16( 819 wildcard_ssid_tlv->header.len = cpu_to_le16(
820 (u16) (ssid_len + sizeof(wildcard_ssid_tlv-> 820 (u16) (ssid_len + sizeof(wildcard_ssid_tlv->
821 max_ssid_length))); 821 max_ssid_length)));
822 wildcard_ssid_tlv->max_ssid_length = 822
823 user_scan_in->ssid_list[ssid_idx].max_len; 823 /* max_ssid_length = 0 tells firmware to perform
824 specific scan for the SSID filled */
825 wildcard_ssid_tlv->max_ssid_length = 0;
824 826
825 memcpy(wildcard_ssid_tlv->ssid, 827 memcpy(wildcard_ssid_tlv->ssid,
826 user_scan_in->ssid_list[ssid_idx].ssid, 828 user_scan_in->ssid_list[ssid_idx].ssid,
@@ -1469,7 +1471,7 @@ mwifiex_update_curr_bss_params(struct mwifiex_private *priv, u8 *bssid,
1469 s32 rssi, const u8 *ie_buf, size_t ie_len, 1471 s32 rssi, const u8 *ie_buf, size_t ie_len,
1470 u16 beacon_period, u16 cap_info_bitmap, u8 band) 1472 u16 beacon_period, u16 cap_info_bitmap, u8 band)
1471{ 1473{
1472 struct mwifiex_bssdescriptor *bss_desc = NULL; 1474 struct mwifiex_bssdescriptor *bss_desc;
1473 int ret; 1475 int ret;
1474 unsigned long flags; 1476 unsigned long flags;
1475 u8 *beacon_ie; 1477 u8 *beacon_ie;
@@ -1484,6 +1486,7 @@ mwifiex_update_curr_bss_params(struct mwifiex_private *priv, u8 *bssid,
1484 1486
1485 beacon_ie = kmemdup(ie_buf, ie_len, GFP_KERNEL); 1487 beacon_ie = kmemdup(ie_buf, ie_len, GFP_KERNEL);
1486 if (!beacon_ie) { 1488 if (!beacon_ie) {
1489 kfree(bss_desc);
1487 dev_err(priv->adapter->dev, " failed to alloc beacon_ie\n"); 1490 dev_err(priv->adapter->dev, " failed to alloc beacon_ie\n");
1488 return -ENOMEM; 1491 return -ENOMEM;
1489 } 1492 }
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index f1565792f27..377876315b8 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -919,6 +919,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
919 { USB_DEVICE(0x050d, 0x935b) }, 919 { USB_DEVICE(0x050d, 0x935b) },
920 /* Buffalo */ 920 /* Buffalo */
921 { USB_DEVICE(0x0411, 0x00e8) }, 921 { USB_DEVICE(0x0411, 0x00e8) },
922 { USB_DEVICE(0x0411, 0x0158) },
922 { USB_DEVICE(0x0411, 0x016f) }, 923 { USB_DEVICE(0x0411, 0x016f) },
923 { USB_DEVICE(0x0411, 0x01a2) }, 924 { USB_DEVICE(0x0411, 0x01a2) },
924 /* Corega */ 925 /* Corega */
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 2ec5c00235e..99ff12d0c29 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -943,6 +943,7 @@ struct rt2x00_dev {
943 * Powersaving work 943 * Powersaving work
944 */ 944 */
945 struct delayed_work autowakeup_work; 945 struct delayed_work autowakeup_work;
946 struct work_struct sleep_work;
946 947
947 /* 948 /*
948 * Data queue arrays for RX, TX, Beacon and ATIM. 949 * Data queue arrays for RX, TX, Beacon and ATIM.
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index e1fb2a8569b..edd317fa7c0 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -465,6 +465,23 @@ static u8 *rt2x00lib_find_ie(u8 *data, unsigned int len, u8 ie)
465 return NULL; 465 return NULL;
466} 466}
467 467
468static void rt2x00lib_sleep(struct work_struct *work)
469{
470 struct rt2x00_dev *rt2x00dev =
471 container_of(work, struct rt2x00_dev, sleep_work);
472
473 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
474 return;
475
476 /*
477 * Check again is powersaving is enabled, to prevent races from delayed
478 * work execution.
479 */
480 if (!test_bit(CONFIG_POWERSAVING, &rt2x00dev->flags))
481 rt2x00lib_config(rt2x00dev, &rt2x00dev->hw->conf,
482 IEEE80211_CONF_CHANGE_PS);
483}
484
468static void rt2x00lib_rxdone_check_ps(struct rt2x00_dev *rt2x00dev, 485static void rt2x00lib_rxdone_check_ps(struct rt2x00_dev *rt2x00dev,
469 struct sk_buff *skb, 486 struct sk_buff *skb,
470 struct rxdone_entry_desc *rxdesc) 487 struct rxdone_entry_desc *rxdesc)
@@ -512,8 +529,7 @@ static void rt2x00lib_rxdone_check_ps(struct rt2x00_dev *rt2x00dev,
512 cam |= (tim_ie->bitmap_ctrl & 0x01); 529 cam |= (tim_ie->bitmap_ctrl & 0x01);
513 530
514 if (!cam && !test_bit(CONFIG_POWERSAVING, &rt2x00dev->flags)) 531 if (!cam && !test_bit(CONFIG_POWERSAVING, &rt2x00dev->flags))
515 rt2x00lib_config(rt2x00dev, &rt2x00dev->hw->conf, 532 queue_work(rt2x00dev->workqueue, &rt2x00dev->sleep_work);
516 IEEE80211_CONF_CHANGE_PS);
517} 533}
518 534
519static int rt2x00lib_rxdone_read_signal(struct rt2x00_dev *rt2x00dev, 535static int rt2x00lib_rxdone_read_signal(struct rt2x00_dev *rt2x00dev,
@@ -1141,6 +1157,7 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
1141 1157
1142 INIT_WORK(&rt2x00dev->intf_work, rt2x00lib_intf_scheduled); 1158 INIT_WORK(&rt2x00dev->intf_work, rt2x00lib_intf_scheduled);
1143 INIT_DELAYED_WORK(&rt2x00dev->autowakeup_work, rt2x00lib_autowakeup); 1159 INIT_DELAYED_WORK(&rt2x00dev->autowakeup_work, rt2x00lib_autowakeup);
1160 INIT_WORK(&rt2x00dev->sleep_work, rt2x00lib_sleep);
1144 1161
1145 /* 1162 /*
1146 * Let the driver probe the device to detect the capabilities. 1163 * Let the driver probe the device to detect the capabilities.
@@ -1197,6 +1214,7 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
1197 */ 1214 */
1198 cancel_work_sync(&rt2x00dev->intf_work); 1215 cancel_work_sync(&rt2x00dev->intf_work);
1199 cancel_delayed_work_sync(&rt2x00dev->autowakeup_work); 1216 cancel_delayed_work_sync(&rt2x00dev->autowakeup_work);
1217 cancel_work_sync(&rt2x00dev->sleep_work);
1200 if (rt2x00_is_usb(rt2x00dev)) { 1218 if (rt2x00_is_usb(rt2x00dev)) {
1201 del_timer_sync(&rt2x00dev->txstatus_timer); 1219 del_timer_sync(&rt2x00dev->txstatus_timer);
1202 cancel_work_sync(&rt2x00dev->rxdone_work); 1220 cancel_work_sync(&rt2x00dev->rxdone_work);
diff --git a/drivers/net/wireless/wl12xx/scan.c b/drivers/net/wireless/wl12xx/scan.c
index 128ccb79318..fc29c671cf3 100644
--- a/drivers/net/wireless/wl12xx/scan.c
+++ b/drivers/net/wireless/wl12xx/scan.c
@@ -559,7 +559,7 @@ wl12xx_scan_sched_scan_ssid_list(struct wl1271 *wl,
559 break; 559 break;
560 } 560 }
561 /* Fail if SSID isn't present in the filters */ 561 /* Fail if SSID isn't present in the filters */
562 if (j == req->n_ssids) { 562 if (j == cmd->n_ssids) {
563 ret = -EINVAL; 563 ret = -EINVAL;
564 goto out_free; 564 goto out_free;
565 } 565 }
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 6d3dd3988d0..19c0115092d 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -60,27 +60,27 @@ EXPORT_SYMBOL_GPL(irq_of_parse_and_map);
60 */ 60 */
61struct device_node *of_irq_find_parent(struct device_node *child) 61struct device_node *of_irq_find_parent(struct device_node *child)
62{ 62{
63 struct device_node *p, *c = child; 63 struct device_node *p;
64 const __be32 *parp; 64 const __be32 *parp;
65 65
66 if (!of_node_get(c)) 66 if (!of_node_get(child))
67 return NULL; 67 return NULL;
68 68
69 do { 69 do {
70 parp = of_get_property(c, "interrupt-parent", NULL); 70 parp = of_get_property(child, "interrupt-parent", NULL);
71 if (parp == NULL) 71 if (parp == NULL)
72 p = of_get_parent(c); 72 p = of_get_parent(child);
73 else { 73 else {
74 if (of_irq_workarounds & OF_IMAP_NO_PHANDLE) 74 if (of_irq_workarounds & OF_IMAP_NO_PHANDLE)
75 p = of_node_get(of_irq_dflt_pic); 75 p = of_node_get(of_irq_dflt_pic);
76 else 76 else
77 p = of_find_node_by_phandle(be32_to_cpup(parp)); 77 p = of_find_node_by_phandle(be32_to_cpup(parp));
78 } 78 }
79 of_node_put(c); 79 of_node_put(child);
80 c = p; 80 child = p;
81 } while (p && of_get_property(p, "#interrupt-cells", NULL) == NULL); 81 } while (p && of_get_property(p, "#interrupt-cells", NULL) == NULL);
82 82
83 return (p == child) ? NULL : p; 83 return p;
84} 84}
85 85
86/** 86/**
@@ -424,6 +424,8 @@ void __init of_irq_init(const struct of_device_id *matches)
424 424
425 desc->dev = np; 425 desc->dev = np;
426 desc->interrupt_parent = of_irq_find_parent(np); 426 desc->interrupt_parent = of_irq_find_parent(np);
427 if (desc->interrupt_parent == np)
428 desc->interrupt_parent = NULL;
427 list_add_tail(&desc->list, &intc_desc_list); 429 list_add_tail(&desc->list, &intc_desc_list);
428 } 430 }
429 431
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index b6f9749b4fa..f02b5235056 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -76,6 +76,7 @@ config PCI_IOV
76 76
77config PCI_PRI 77config PCI_PRI
78 bool "PCI PRI support" 78 bool "PCI PRI support"
79 depends on PCI
79 select PCI_ATS 80 select PCI_ATS
80 help 81 help
81 PRI is the PCI Page Request Interface. It allows PCI devices that are 82 PRI is the PCI Page Request Interface. It allows PCI devices that are
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 596172b4ae9..fce1c54a0c8 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -459,8 +459,17 @@ static int add_bridge(acpi_handle handle)
459{ 459{
460 acpi_status status; 460 acpi_status status;
461 unsigned long long tmp; 461 unsigned long long tmp;
462 struct acpi_pci_root *root;
462 acpi_handle dummy_handle; 463 acpi_handle dummy_handle;
463 464
465 /*
466 * We shouldn't use this bridge if PCIe native hotplug control has been
467 * granted by the BIOS for it.
468 */
469 root = acpi_pci_find_root(handle);
470 if (root && (root->osc_control_set & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL))
471 return -ENODEV;
472
464 /* if the bridge doesn't have _STA, we assume it is always there */ 473 /* if the bridge doesn't have _STA, we assume it is always there */
465 status = acpi_get_handle(handle, "_STA", &dummy_handle); 474 status = acpi_get_handle(handle, "_STA", &dummy_handle);
466 if (ACPI_SUCCESS(status)) { 475 if (ACPI_SUCCESS(status)) {
@@ -1376,13 +1385,23 @@ static void handle_hotplug_event_func(acpi_handle handle, u32 type,
1376static acpi_status 1385static acpi_status
1377find_root_bridges(acpi_handle handle, u32 lvl, void *context, void **rv) 1386find_root_bridges(acpi_handle handle, u32 lvl, void *context, void **rv)
1378{ 1387{
1388 struct acpi_pci_root *root;
1379 int *count = (int *)context; 1389 int *count = (int *)context;
1380 1390
1381 if (acpi_is_root_bridge(handle)) { 1391 if (!acpi_is_root_bridge(handle))
1382 acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY, 1392 return AE_OK;
1383 handle_hotplug_event_bridge, NULL); 1393
1384 (*count)++; 1394 root = acpi_pci_find_root(handle);
1385 } 1395 if (!root)
1396 return AE_OK;
1397
1398 if (root->osc_control_set & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL)
1399 return AE_OK;
1400
1401 (*count)++;
1402 acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
1403 handle_hotplug_event_bridge, NULL);
1404
1386 return AE_OK ; 1405 return AE_OK ;
1387} 1406}
1388 1407
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index 1e9c9aacc3a..085dbb5fc16 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -213,9 +213,6 @@ static int board_added(struct slot *p_slot)
213 goto err_exit; 213 goto err_exit;
214 } 214 }
215 215
216 /* Wait for 1 second after checking link training status */
217 msleep(1000);
218
219 /* Check for a power fault */ 216 /* Check for a power fault */
220 if (ctrl->power_fault_detected || pciehp_query_power_fault(p_slot)) { 217 if (ctrl->power_fault_detected || pciehp_query_power_fault(p_slot)) {
221 ctrl_err(ctrl, "Power fault on slot %s\n", slot_name(p_slot)); 218 ctrl_err(ctrl, "Power fault on slot %s\n", slot_name(p_slot));
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 96dc4734e4a..7b1414810ae 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -280,6 +280,14 @@ int pciehp_check_link_status(struct controller *ctrl)
280 else 280 else
281 msleep(1000); 281 msleep(1000);
282 282
283 /*
284 * Need to wait for 1000 ms after Data Link Layer Link Active
285 * (DLLLA) bit reads 1b before sending configuration request.
286 * We need it before checking Link Training (LT) bit becuase
287 * LT is still set even after DLLLA bit is set on some platform.
288 */
289 msleep(1000);
290
283 retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status); 291 retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status);
284 if (retval) { 292 if (retval) {
285 ctrl_err(ctrl, "Cannot read LNKSTATUS register\n"); 293 ctrl_err(ctrl, "Cannot read LNKSTATUS register\n");
@@ -294,6 +302,16 @@ int pciehp_check_link_status(struct controller *ctrl)
294 return retval; 302 return retval;
295 } 303 }
296 304
305 /*
306 * If the port supports Link speeds greater than 5.0 GT/s, we
307 * must wait for 100 ms after Link training completes before
308 * sending configuration request.
309 */
310 if (ctrl->pcie->port->subordinate->max_bus_speed > PCIE_SPEED_5_0GT)
311 msleep(100);
312
313 pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status);
314
297 return retval; 315 return retval;
298} 316}
299 317
@@ -484,7 +502,6 @@ int pciehp_power_on_slot(struct slot * slot)
484 u16 slot_cmd; 502 u16 slot_cmd;
485 u16 cmd_mask; 503 u16 cmd_mask;
486 u16 slot_status; 504 u16 slot_status;
487 u16 lnk_status;
488 int retval = 0; 505 int retval = 0;
489 506
490 /* Clear sticky power-fault bit from previous power failures */ 507 /* Clear sticky power-fault bit from previous power failures */
@@ -516,14 +533,6 @@ int pciehp_power_on_slot(struct slot * slot)
516 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, 533 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
517 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd); 534 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd);
518 535
519 retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status);
520 if (retval) {
521 ctrl_err(ctrl, "%s: Cannot read LNKSTA register\n",
522 __func__);
523 return retval;
524 }
525 pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status);
526
527 return retval; 536 return retval;
528} 537}
529 538
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
index aca972bbfb4..dd7e0c51a33 100644
--- a/drivers/pci/hotplug/shpchp_core.c
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -278,8 +278,8 @@ static int get_adapter_status (struct hotplug_slot *hotplug_slot, u8 *value)
278 278
279static int is_shpc_capable(struct pci_dev *dev) 279static int is_shpc_capable(struct pci_dev *dev)
280{ 280{
281 if ((dev->vendor == PCI_VENDOR_ID_AMD) || (dev->device == 281 if (dev->vendor == PCI_VENDOR_ID_AMD &&
282 PCI_DEVICE_ID_AMD_GOLAM_7450)) 282 dev->device == PCI_DEVICE_ID_AMD_GOLAM_7450)
283 return 1; 283 return 1;
284 if (!pci_find_capability(dev, PCI_CAP_ID_SHPC)) 284 if (!pci_find_capability(dev, PCI_CAP_ID_SHPC))
285 return 0; 285 return 0;
diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c
index 36547f0ce30..75ba2311b54 100644
--- a/drivers/pci/hotplug/shpchp_hpc.c
+++ b/drivers/pci/hotplug/shpchp_hpc.c
@@ -944,8 +944,8 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
944 ctrl->pci_dev = pdev; /* pci_dev of the P2P bridge */ 944 ctrl->pci_dev = pdev; /* pci_dev of the P2P bridge */
945 ctrl_dbg(ctrl, "Hotplug Controller:\n"); 945 ctrl_dbg(ctrl, "Hotplug Controller:\n");
946 946
947 if ((pdev->vendor == PCI_VENDOR_ID_AMD) || (pdev->device == 947 if (pdev->vendor == PCI_VENDOR_ID_AMD &&
948 PCI_DEVICE_ID_AMD_GOLAM_7450)) { 948 pdev->device == PCI_DEVICE_ID_AMD_GOLAM_7450) {
949 /* amd shpc driver doesn't use Base Offset; assume 0 */ 949 /* amd shpc driver doesn't use Base Offset; assume 0 */
950 ctrl->mmio_base = pci_resource_start(pdev, 0); 950 ctrl->mmio_base = pci_resource_start(pdev, 0);
951 ctrl->mmio_size = pci_resource_len(pdev, 0); 951 ctrl->mmio_size = pci_resource_len(pdev, 0);
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index a43cfd906c6..d93e962f261 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -589,14 +589,14 @@ static const struct backlight_ops dell_ops = {
589 .update_status = dell_send_intensity, 589 .update_status = dell_send_intensity,
590}; 590};
591 591
592static void touchpad_led_on() 592static void touchpad_led_on(void)
593{ 593{
594 int command = 0x97; 594 int command = 0x97;
595 char data = 1; 595 char data = 1;
596 i8042_command(&data, command | 1 << 12); 596 i8042_command(&data, command | 1 << 12);
597} 597}
598 598
599static void touchpad_led_off() 599static void touchpad_led_off(void)
600{ 600{
601 int command = 0x97; 601 int command = 0x97;
602 char data = 2; 602 char data = 2;
diff --git a/drivers/regulator/aat2870-regulator.c b/drivers/regulator/aat2870-regulator.c
index 5abeb3ac3e8..298c6c6a279 100644
--- a/drivers/regulator/aat2870-regulator.c
+++ b/drivers/regulator/aat2870-regulator.c
@@ -160,7 +160,7 @@ static struct aat2870_regulator *aat2870_get_regulator(int id)
160 break; 160 break;
161 } 161 }
162 162
163 if (!ri) 163 if (i == ARRAY_SIZE(aat2870_regulators))
164 return NULL; 164 return NULL;
165 165
166 ri->enable_addr = AAT2870_LDO_EN; 166 ri->enable_addr = AAT2870_LDO_EN;
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 669d0216022..938398f3e86 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -2799,8 +2799,8 @@ void regulator_unregister(struct regulator_dev *rdev)
2799 list_del(&rdev->list); 2799 list_del(&rdev->list);
2800 if (rdev->supply) 2800 if (rdev->supply)
2801 regulator_put(rdev->supply); 2801 regulator_put(rdev->supply);
2802 device_unregister(&rdev->dev);
2803 kfree(rdev->constraints); 2802 kfree(rdev->constraints);
2803 device_unregister(&rdev->dev);
2804 mutex_unlock(&regulator_list_mutex); 2804 mutex_unlock(&regulator_list_mutex);
2805} 2805}
2806EXPORT_SYMBOL_GPL(regulator_unregister); 2806EXPORT_SYMBOL_GPL(regulator_unregister);
diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c
index 66d2d60b436..b552aae55b4 100644
--- a/drivers/regulator/tps65910-regulator.c
+++ b/drivers/regulator/tps65910-regulator.c
@@ -664,10 +664,10 @@ static int tps65910_set_voltage_dcdc(struct regulator_dev *dev,
664 664
665 switch (id) { 665 switch (id) {
666 case TPS65910_REG_VDD1: 666 case TPS65910_REG_VDD1:
667 dcdc_mult = (selector / VDD1_2_NUM_VOLTS) + 1; 667 dcdc_mult = (selector / VDD1_2_NUM_VOLT_FINE) + 1;
668 if (dcdc_mult == 1) 668 if (dcdc_mult == 1)
669 dcdc_mult--; 669 dcdc_mult--;
670 vsel = (selector % VDD1_2_NUM_VOLTS) + 3; 670 vsel = (selector % VDD1_2_NUM_VOLT_FINE) + 3;
671 671
672 tps65910_modify_bits(pmic, TPS65910_VDD1, 672 tps65910_modify_bits(pmic, TPS65910_VDD1,
673 (dcdc_mult << VDD1_VGAIN_SEL_SHIFT), 673 (dcdc_mult << VDD1_VGAIN_SEL_SHIFT),
@@ -675,10 +675,10 @@ static int tps65910_set_voltage_dcdc(struct regulator_dev *dev,
675 tps65910_reg_write(pmic, TPS65910_VDD1_OP, vsel); 675 tps65910_reg_write(pmic, TPS65910_VDD1_OP, vsel);
676 break; 676 break;
677 case TPS65910_REG_VDD2: 677 case TPS65910_REG_VDD2:
678 dcdc_mult = (selector / VDD1_2_NUM_VOLTS) + 1; 678 dcdc_mult = (selector / VDD1_2_NUM_VOLT_FINE) + 1;
679 if (dcdc_mult == 1) 679 if (dcdc_mult == 1)
680 dcdc_mult--; 680 dcdc_mult--;
681 vsel = (selector % VDD1_2_NUM_VOLTS) + 3; 681 vsel = (selector % VDD1_2_NUM_VOLT_FINE) + 3;
682 682
683 tps65910_modify_bits(pmic, TPS65910_VDD2, 683 tps65910_modify_bits(pmic, TPS65910_VDD2,
684 (dcdc_mult << VDD2_VGAIN_SEL_SHIFT), 684 (dcdc_mult << VDD2_VGAIN_SEL_SHIFT),
@@ -756,9 +756,9 @@ static int tps65910_list_voltage_dcdc(struct regulator_dev *dev,
756 switch (id) { 756 switch (id) {
757 case TPS65910_REG_VDD1: 757 case TPS65910_REG_VDD1:
758 case TPS65910_REG_VDD2: 758 case TPS65910_REG_VDD2:
759 mult = (selector / VDD1_2_NUM_VOLTS) + 1; 759 mult = (selector / VDD1_2_NUM_VOLT_FINE) + 1;
760 volt = VDD1_2_MIN_VOLT + 760 volt = VDD1_2_MIN_VOLT +
761 (selector % VDD1_2_NUM_VOLTS) * VDD1_2_OFFSET; 761 (selector % VDD1_2_NUM_VOLT_FINE) * VDD1_2_OFFSET;
762 break; 762 break;
763 case TPS65911_REG_VDDCTRL: 763 case TPS65911_REG_VDDCTRL:
764 volt = VDDCTRL_MIN_VOLT + (selector * VDDCTRL_OFFSET); 764 volt = VDDCTRL_MIN_VOLT + (selector * VDDCTRL_OFFSET);
@@ -947,6 +947,8 @@ static __devinit int tps65910_probe(struct platform_device *pdev)
947 947
948 if (i == TPS65910_REG_VDD1 || i == TPS65910_REG_VDD2) { 948 if (i == TPS65910_REG_VDD1 || i == TPS65910_REG_VDD2) {
949 pmic->desc[i].ops = &tps65910_ops_dcdc; 949 pmic->desc[i].ops = &tps65910_ops_dcdc;
950 pmic->desc[i].n_voltages = VDD1_2_NUM_VOLT_FINE *
951 VDD1_2_NUM_VOLT_COARSE;
950 } else if (i == TPS65910_REG_VDD3) { 952 } else if (i == TPS65910_REG_VDD3) {
951 if (tps65910_chip_id(tps65910) == TPS65910) 953 if (tps65910_chip_id(tps65910) == TPS65910)
952 pmic->desc[i].ops = &tps65910_ops_vdd3; 954 pmic->desc[i].ops = &tps65910_ops_vdd3;
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index ee8747f4fa0..11cc308d66e 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -71,6 +71,7 @@ struct twlreg_info {
71#define VREG_TYPE 1 71#define VREG_TYPE 1
72#define VREG_REMAP 2 72#define VREG_REMAP 2
73#define VREG_DEDICATED 3 /* LDO control */ 73#define VREG_DEDICATED 3 /* LDO control */
74#define VREG_VOLTAGE_SMPS_4030 9
74/* TWL6030 register offsets */ 75/* TWL6030 register offsets */
75#define VREG_TRANS 1 76#define VREG_TRANS 1
76#define VREG_STATE 2 77#define VREG_STATE 2
@@ -514,6 +515,32 @@ static struct regulator_ops twl4030ldo_ops = {
514 .get_status = twl4030reg_get_status, 515 .get_status = twl4030reg_get_status,
515}; 516};
516 517
518static int
519twl4030smps_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV,
520 unsigned *selector)
521{
522 struct twlreg_info *info = rdev_get_drvdata(rdev);
523 int vsel = DIV_ROUND_UP(min_uV - 600000, 12500);
524
525 twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE_SMPS_4030,
526 vsel);
527 return 0;
528}
529
530static int twl4030smps_get_voltage(struct regulator_dev *rdev)
531{
532 struct twlreg_info *info = rdev_get_drvdata(rdev);
533 int vsel = twlreg_read(info, TWL_MODULE_PM_RECEIVER,
534 VREG_VOLTAGE_SMPS_4030);
535
536 return vsel * 12500 + 600000;
537}
538
539static struct regulator_ops twl4030smps_ops = {
540 .set_voltage = twl4030smps_set_voltage,
541 .get_voltage = twl4030smps_get_voltage,
542};
543
517static int twl6030ldo_list_voltage(struct regulator_dev *rdev, unsigned index) 544static int twl6030ldo_list_voltage(struct regulator_dev *rdev, unsigned index)
518{ 545{
519 struct twlreg_info *info = rdev_get_drvdata(rdev); 546 struct twlreg_info *info = rdev_get_drvdata(rdev);
@@ -856,6 +883,21 @@ static struct regulator_ops twlsmps_ops = {
856 }, \ 883 }, \
857 } 884 }
858 885
886#define TWL4030_ADJUSTABLE_SMPS(label, offset, num, turnon_delay, remap_conf) \
887 { \
888 .base = offset, \
889 .id = num, \
890 .delay = turnon_delay, \
891 .remap = remap_conf, \
892 .desc = { \
893 .name = #label, \
894 .id = TWL4030_REG_##label, \
895 .ops = &twl4030smps_ops, \
896 .type = REGULATOR_VOLTAGE, \
897 .owner = THIS_MODULE, \
898 }, \
899 }
900
859#define TWL6030_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts) { \ 901#define TWL6030_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts) { \
860 .base = offset, \ 902 .base = offset, \
861 .min_mV = min_mVolts, \ 903 .min_mV = min_mVolts, \
@@ -947,8 +989,8 @@ static struct twlreg_info twl_regs[] = {
947 TWL4030_ADJUSTABLE_LDO(VINTANA2, 0x43, 12, 100, 0x08), 989 TWL4030_ADJUSTABLE_LDO(VINTANA2, 0x43, 12, 100, 0x08),
948 TWL4030_FIXED_LDO(VINTDIG, 0x47, 1500, 13, 100, 0x08), 990 TWL4030_FIXED_LDO(VINTDIG, 0x47, 1500, 13, 100, 0x08),
949 TWL4030_ADJUSTABLE_LDO(VIO, 0x4b, 14, 1000, 0x08), 991 TWL4030_ADJUSTABLE_LDO(VIO, 0x4b, 14, 1000, 0x08),
950 TWL4030_ADJUSTABLE_LDO(VDD1, 0x55, 15, 1000, 0x08), 992 TWL4030_ADJUSTABLE_SMPS(VDD1, 0x55, 15, 1000, 0x08),
951 TWL4030_ADJUSTABLE_LDO(VDD2, 0x63, 16, 1000, 0x08), 993 TWL4030_ADJUSTABLE_SMPS(VDD2, 0x63, 16, 1000, 0x08),
952 TWL4030_FIXED_LDO(VUSB1V5, 0x71, 1500, 17, 100, 0x08), 994 TWL4030_FIXED_LDO(VUSB1V5, 0x71, 1500, 17, 100, 0x08),
953 TWL4030_FIXED_LDO(VUSB1V8, 0x74, 1800, 18, 100, 0x08), 995 TWL4030_FIXED_LDO(VUSB1V8, 0x74, 1800, 18, 100, 0x08),
954 TWL4030_FIXED_LDO(VUSB3V1, 0x77, 3100, 19, 150, 0x08), 996 TWL4030_FIXED_LDO(VUSB3V1, 0x77, 3100, 19, 150, 0x08),
diff --git a/drivers/rtc/rtc-puv3.c b/drivers/rtc/rtc-puv3.c
index b3eba3cddd4..e4b6880aabd 100644
--- a/drivers/rtc/rtc-puv3.c
+++ b/drivers/rtc/rtc-puv3.c
@@ -220,7 +220,7 @@ static void puv3_rtc_enable(struct platform_device *pdev, int en)
220 } 220 }
221} 221}
222 222
223static int puv3_rtc_remove(struct platform_device *dev) 223static int __devexit puv3_rtc_remove(struct platform_device *dev)
224{ 224{
225 struct rtc_device *rtc = platform_get_drvdata(dev); 225 struct rtc_device *rtc = platform_get_drvdata(dev);
226 226
@@ -236,7 +236,7 @@ static int puv3_rtc_remove(struct platform_device *dev)
236 return 0; 236 return 0;
237} 237}
238 238
239static int puv3_rtc_probe(struct platform_device *pdev) 239static int __devinit puv3_rtc_probe(struct platform_device *pdev)
240{ 240{
241 struct rtc_device *rtc; 241 struct rtc_device *rtc;
242 struct resource *res; 242 struct resource *res;
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 43068fbd0ba..1b6d9247fdc 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -641,6 +641,8 @@ static int __init zcore_init(void)
641 641
642 if (ipl_info.type != IPL_TYPE_FCP_DUMP) 642 if (ipl_info.type != IPL_TYPE_FCP_DUMP)
643 return -ENODATA; 643 return -ENODATA;
644 if (OLDMEM_BASE)
645 return -ENODATA;
644 646
645 zcore_dbf = debug_register("zcore", 4, 1, 4 * sizeof(long)); 647 zcore_dbf = debug_register("zcore", 4, 1, 4 * sizeof(long));
646 debug_register_view(zcore_dbf, &debug_sprintf_view); 648 debug_register_view(zcore_dbf, &debug_sprintf_view);
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index b77ae519d79..ec94f049e99 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -1271,18 +1271,16 @@ ap_config_timeout(unsigned long ptr)
1271} 1271}
1272 1272
1273/** 1273/**
1274 * ap_schedule_poll_timer(): Schedule poll timer. 1274 * __ap_schedule_poll_timer(): Schedule poll timer.
1275 * 1275 *
1276 * Set up the timer to run the poll tasklet 1276 * Set up the timer to run the poll tasklet
1277 */ 1277 */
1278static inline void ap_schedule_poll_timer(void) 1278static inline void __ap_schedule_poll_timer(void)
1279{ 1279{
1280 ktime_t hr_time; 1280 ktime_t hr_time;
1281 1281
1282 spin_lock_bh(&ap_poll_timer_lock); 1282 spin_lock_bh(&ap_poll_timer_lock);
1283 if (ap_using_interrupts() || ap_suspend_flag) 1283 if (hrtimer_is_queued(&ap_poll_timer) || ap_suspend_flag)
1284 goto out;
1285 if (hrtimer_is_queued(&ap_poll_timer))
1286 goto out; 1284 goto out;
1287 if (ktime_to_ns(hrtimer_expires_remaining(&ap_poll_timer)) <= 0) { 1285 if (ktime_to_ns(hrtimer_expires_remaining(&ap_poll_timer)) <= 0) {
1288 hr_time = ktime_set(0, poll_timeout); 1286 hr_time = ktime_set(0, poll_timeout);
@@ -1294,6 +1292,18 @@ out:
1294} 1292}
1295 1293
1296/** 1294/**
1295 * ap_schedule_poll_timer(): Schedule poll timer.
1296 *
1297 * Set up the timer to run the poll tasklet
1298 */
1299static inline void ap_schedule_poll_timer(void)
1300{
1301 if (ap_using_interrupts())
1302 return;
1303 __ap_schedule_poll_timer();
1304}
1305
1306/**
1297 * ap_poll_read(): Receive pending reply messages from an AP device. 1307 * ap_poll_read(): Receive pending reply messages from an AP device.
1298 * @ap_dev: pointer to the AP device 1308 * @ap_dev: pointer to the AP device
1299 * @flags: pointer to control flags, bit 2^0 is set if another poll is 1309 * @flags: pointer to control flags, bit 2^0 is set if another poll is
@@ -1374,8 +1384,9 @@ static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags)
1374 *flags |= 1; 1384 *flags |= 1;
1375 *flags |= 2; 1385 *flags |= 2;
1376 break; 1386 break;
1377 case AP_RESPONSE_Q_FULL:
1378 case AP_RESPONSE_RESET_IN_PROGRESS: 1387 case AP_RESPONSE_RESET_IN_PROGRESS:
1388 __ap_schedule_poll_timer();
1389 case AP_RESPONSE_Q_FULL:
1379 *flags |= 2; 1390 *flags |= 2;
1380 break; 1391 break;
1381 case AP_RESPONSE_MESSAGE_TOO_BIG: 1392 case AP_RESPONSE_MESSAGE_TOO_BIG:
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index fa80ba1f034..9b66d2d1809 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -4,7 +4,7 @@ menu "S/390 network device drivers"
4config LCS 4config LCS
5 def_tristate m 5 def_tristate m
6 prompt "Lan Channel Station Interface" 6 prompt "Lan Channel Station Interface"
7 depends on CCW && NETDEVICES && (NET_ETHERNET || TR || FDDI) 7 depends on CCW && NETDEVICES && (ETHERNET || TR || FDDI)
8 help 8 help
9 Select this option if you want to use LCS networking on IBM System z. 9 Select this option if you want to use LCS networking on IBM System z.
10 This device driver supports Token Ring (IEEE 802.5), 10 This device driver supports Token Ring (IEEE 802.5),
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index c28713da1ec..863fc219715 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -50,7 +50,7 @@
50#include "lcs.h" 50#include "lcs.h"
51 51
52 52
53#if !defined(CONFIG_NET_ETHERNET) && \ 53#if !defined(CONFIG_ETHERNET) && \
54 !defined(CONFIG_TR) && !defined(CONFIG_FDDI) 54 !defined(CONFIG_TR) && !defined(CONFIG_FDDI)
55#error Cannot compile lcs.c without some net devices switched on. 55#error Cannot compile lcs.c without some net devices switched on.
56#endif 56#endif
@@ -1634,7 +1634,7 @@ lcs_startlan_auto(struct lcs_card *card)
1634 int rc; 1634 int rc;
1635 1635
1636 LCS_DBF_TEXT(2, trace, "strtauto"); 1636 LCS_DBF_TEXT(2, trace, "strtauto");
1637#ifdef CONFIG_NET_ETHERNET 1637#ifdef CONFIG_ETHERNET
1638 card->lan_type = LCS_FRAME_TYPE_ENET; 1638 card->lan_type = LCS_FRAME_TYPE_ENET;
1639 rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP); 1639 rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP);
1640 if (rc == 0) 1640 if (rc == 0)
@@ -2166,7 +2166,7 @@ lcs_new_device(struct ccwgroup_device *ccwgdev)
2166 goto netdev_out; 2166 goto netdev_out;
2167 } 2167 }
2168 switch (card->lan_type) { 2168 switch (card->lan_type) {
2169#ifdef CONFIG_NET_ETHERNET 2169#ifdef CONFIG_ETHERNET
2170 case LCS_FRAME_TYPE_ENET: 2170 case LCS_FRAME_TYPE_ENET:
2171 card->lan_type_trans = eth_type_trans; 2171 card->lan_type_trans = eth_type_trans;
2172 dev = alloc_etherdev(0); 2172 dev = alloc_etherdev(0);
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 3251333a23d..b6a6356d09b 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -1994,6 +1994,8 @@ static struct net_device *netiucv_init_netdevice(char *username)
1994 netiucv_setup_netdevice); 1994 netiucv_setup_netdevice);
1995 if (!dev) 1995 if (!dev)
1996 return NULL; 1996 return NULL;
1997 if (dev_alloc_name(dev, dev->name) < 0)
1998 goto out_netdev;
1997 1999
1998 privptr = netdev_priv(dev); 2000 privptr = netdev_priv(dev);
1999 privptr->fsm = init_fsm("netiucvdev", dev_state_names, 2001 privptr->fsm = init_fsm("netiucvdev", dev_state_names,
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index b77c65ed138..4abc79d3963 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -236,8 +236,7 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
236#define QETH_IN_BUF_COUNT_MAX 128 236#define QETH_IN_BUF_COUNT_MAX 128
237#define QETH_MAX_BUFFER_ELEMENTS(card) ((card)->qdio.in_buf_size >> 12) 237#define QETH_MAX_BUFFER_ELEMENTS(card) ((card)->qdio.in_buf_size >> 12)
238#define QETH_IN_BUF_REQUEUE_THRESHOLD(card) \ 238#define QETH_IN_BUF_REQUEUE_THRESHOLD(card) \
239 ((card)->ssqd.qdioac1 & AC1_SIGA_INPUT_NEEDED ? 1 : \ 239 ((card)->qdio.in_buf_pool.buf_count / 2)
240 ((card)->qdio.in_buf_pool.buf_count / 2))
241 240
242/* buffers we have to be behind before we get a PCI */ 241/* buffers we have to be behind before we get a PCI */
243#define QETH_PCI_THRESHOLD_A(card) ((card)->qdio.in_buf_pool.buf_count+1) 242#define QETH_PCI_THRESHOLD_A(card) ((card)->qdio.in_buf_pool.buf_count+1)
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 81534437373..fff57de7894 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -881,7 +881,6 @@ EXPORT_SYMBOL_GPL(qeth_do_run_thread);
881void qeth_schedule_recovery(struct qeth_card *card) 881void qeth_schedule_recovery(struct qeth_card *card)
882{ 882{
883 QETH_CARD_TEXT(card, 2, "startrec"); 883 QETH_CARD_TEXT(card, 2, "startrec");
884 WARN_ON(1);
885 if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0) 884 if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0)
886 schedule_work(&card->kernel_thread_starter); 885 schedule_work(&card->kernel_thread_starter);
887} 886}
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index e4c1176ee25..4d5307ddbe5 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -2756,11 +2756,13 @@ int inline qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
2756 struct neighbour *n = NULL; 2756 struct neighbour *n = NULL;
2757 struct dst_entry *dst; 2757 struct dst_entry *dst;
2758 2758
2759 rcu_read_lock();
2759 dst = skb_dst(skb); 2760 dst = skb_dst(skb);
2760 if (dst) 2761 if (dst)
2761 n = dst_get_neighbour(dst); 2762 n = dst_get_neighbour(dst);
2762 if (n) { 2763 if (n) {
2763 cast_type = n->type; 2764 cast_type = n->type;
2765 rcu_read_unlock();
2764 if ((cast_type == RTN_BROADCAST) || 2766 if ((cast_type == RTN_BROADCAST) ||
2765 (cast_type == RTN_MULTICAST) || 2767 (cast_type == RTN_MULTICAST) ||
2766 (cast_type == RTN_ANYCAST)) 2768 (cast_type == RTN_ANYCAST))
@@ -2768,6 +2770,8 @@ int inline qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
2768 else 2770 else
2769 return RTN_UNSPEC; 2771 return RTN_UNSPEC;
2770 } 2772 }
2773 rcu_read_unlock();
2774
2771 /* try something else */ 2775 /* try something else */
2772 if (skb->protocol == ETH_P_IPV6) 2776 if (skb->protocol == ETH_P_IPV6)
2773 return (skb_network_header(skb)[24] == 0xff) ? 2777 return (skb_network_header(skb)[24] == 0xff) ?
@@ -2847,6 +2851,8 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
2847 } 2851 }
2848 2852
2849 hdr->hdr.l3.length = skb->len - sizeof(struct qeth_hdr); 2853 hdr->hdr.l3.length = skb->len - sizeof(struct qeth_hdr);
2854
2855 rcu_read_lock();
2850 dst = skb_dst(skb); 2856 dst = skb_dst(skb);
2851 if (dst) 2857 if (dst)
2852 n = dst_get_neighbour(dst); 2858 n = dst_get_neighbour(dst);
@@ -2893,6 +2899,7 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
2893 QETH_CAST_UNICAST | QETH_HDR_PASSTHRU; 2899 QETH_CAST_UNICAST | QETH_HDR_PASSTHRU;
2894 } 2900 }
2895 } 2901 }
2902 rcu_read_unlock();
2896} 2903}
2897 2904
2898static inline void qeth_l3_hdr_csum(struct qeth_card *card, 2905static inline void qeth_l3_hdr_csum(struct qeth_card *card,
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index 0ea2fbfe0e9..d979bb26522 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -335,10 +335,10 @@ static ssize_t qeth_l3_dev_sniffer_store(struct device *dev,
335 QETH_IN_BUF_COUNT_MAX) 335 QETH_IN_BUF_COUNT_MAX)
336 qeth_realloc_buffer_pool(card, 336 qeth_realloc_buffer_pool(card,
337 QETH_IN_BUF_COUNT_MAX); 337 QETH_IN_BUF_COUNT_MAX);
338 break;
339 } else 338 } else
340 rc = -EPERM; 339 rc = -EPERM;
341 default: /* fall through */ 340 break;
341 default:
342 rc = -EINVAL; 342 rc = -EINVAL;
343 } 343 }
344out: 344out:
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 4aa76d6f11d..705e13e470a 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -38,6 +38,7 @@
38#include <linux/module.h> 38#include <linux/module.h>
39#include <linux/moduleparam.h> 39#include <linux/moduleparam.h>
40#include <linux/pci.h> 40#include <linux/pci.h>
41#include <linux/pci-aspm.h>
41#include <linux/slab.h> 42#include <linux/slab.h>
42#include <linux/mutex.h> 43#include <linux/mutex.h>
43#include <linux/spinlock.h> 44#include <linux/spinlock.h>
@@ -1109,6 +1110,9 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
1109 unique_id++; 1110 unique_id++;
1110 } 1111 }
1111 1112
1113 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
1114 PCIE_LINK_STATE_CLKPM);
1115
1112 error = pci_enable_device(pdev); 1116 error = pci_enable_device(pdev);
1113 if (error) 1117 if (error)
1114 goto out; 1118 goto out;
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index e76107b2ade..865d452542b 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -23,6 +23,7 @@
23#include <linux/interrupt.h> 23#include <linux/interrupt.h>
24#include <linux/types.h> 24#include <linux/types.h>
25#include <linux/pci.h> 25#include <linux/pci.h>
26#include <linux/pci-aspm.h>
26#include <linux/kernel.h> 27#include <linux/kernel.h>
27#include <linux/slab.h> 28#include <linux/slab.h>
28#include <linux/delay.h> 29#include <linux/delay.h>
@@ -3922,6 +3923,10 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
3922 dev_warn(&h->pdev->dev, "controller appears to be disabled\n"); 3923 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
3923 return -ENODEV; 3924 return -ENODEV;
3924 } 3925 }
3926
3927 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
3928 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
3929
3925 err = pci_enable_device(h->pdev); 3930 err = pci_enable_device(h->pdev);
3926 if (err) { 3931 if (err) {
3927 dev_warn(&h->pdev->dev, "unable to enable PCI device\n"); 3932 dev_warn(&h->pdev->dev, "unable to enable PCI device\n");
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index 8889b1babca..4e041f6d808 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -2802,6 +2802,11 @@ _scsih_error_recovery_delete_devices(struct MPT2SAS_ADAPTER *ioc)
2802 2802
2803 if (ioc->is_driver_loading) 2803 if (ioc->is_driver_loading)
2804 return; 2804 return;
2805
2806 fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
2807 if (!fw_event)
2808 return;
2809
2805 fw_event->event = MPT2SAS_REMOVE_UNRESPONDING_DEVICES; 2810 fw_event->event = MPT2SAS_REMOVE_UNRESPONDING_DEVICES;
2806 fw_event->ioc = ioc; 2811 fw_event->ioc = ioc;
2807 _scsih_fw_event_add(ioc, fw_event); 2812 _scsih_fw_event_add(ioc, fw_event);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 06bc26554a6..f85cfa6c47b 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1409,6 +1409,8 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
1409 1409
1410 blk_start_request(req); 1410 blk_start_request(req);
1411 1411
1412 scmd_printk(KERN_INFO, cmd, "killing request\n");
1413
1412 sdev = cmd->device; 1414 sdev = cmd->device;
1413 starget = scsi_target(sdev); 1415 starget = scsi_target(sdev);
1414 shost = sdev->host; 1416 shost = sdev->host;
@@ -1490,7 +1492,6 @@ static void scsi_request_fn(struct request_queue *q)
1490 struct request *req; 1492 struct request *req;
1491 1493
1492 if (!sdev) { 1494 if (!sdev) {
1493 printk("scsi: killing requests for dead queue\n");
1494 while ((req = blk_peek_request(q)) != NULL) 1495 while ((req = blk_peek_request(q)) != NULL)
1495 scsi_kill_request(req, q); 1496 scsi_kill_request(req, q);
1496 return; 1497 return;
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 72273a0e566..b3c6d957fbd 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -319,11 +319,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
319 return sdev; 319 return sdev;
320 320
321out_device_destroy: 321out_device_destroy:
322 scsi_device_set_state(sdev, SDEV_DEL); 322 __scsi_remove_device(sdev);
323 transport_destroy_device(&sdev->sdev_gendev);
324 put_device(&sdev->sdev_dev);
325 scsi_free_queue(sdev->request_queue);
326 put_device(&sdev->sdev_gendev);
327out: 323out:
328 if (display_failure_msg) 324 if (display_failure_msg)
329 printk(ALLOC_FAILURE_MSG, __func__); 325 printk(ALLOC_FAILURE_MSG, __func__);
diff --git a/drivers/spi/spi-nuc900.c b/drivers/spi/spi-nuc900.c
index e763254741c..21c70b2b831 100644
--- a/drivers/spi/spi-nuc900.c
+++ b/drivers/spi/spi-nuc900.c
@@ -426,7 +426,7 @@ static int __devinit nuc900_spi_probe(struct platform_device *pdev)
426 goto err_clk; 426 goto err_clk;
427 } 427 }
428 428
429 mfp_set_groupg(&pdev->dev); 429 mfp_set_groupg(&pdev->dev, NULL);
430 nuc900_init_spi(hw); 430 nuc900_init_spi(hw);
431 431
432 err = spi_bitbang_start(&hw->bitbang); 432 err = spi_bitbang_start(&hw->bitbang);
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index f103e470cb6..5559b229919 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -2184,6 +2184,12 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
2184 goto err_clk_prep; 2184 goto err_clk_prep;
2185 } 2185 }
2186 2186
2187 status = clk_enable(pl022->clk);
2188 if (status) {
2189 dev_err(&adev->dev, "could not enable SSP/SPI bus clock\n");
2190 goto err_no_clk_en;
2191 }
2192
2187 /* Disable SSP */ 2193 /* Disable SSP */
2188 writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)), 2194 writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)),
2189 SSP_CR1(pl022->virtbase)); 2195 SSP_CR1(pl022->virtbase));
@@ -2237,6 +2243,8 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
2237 2243
2238 free_irq(adev->irq[0], pl022); 2244 free_irq(adev->irq[0], pl022);
2239 err_no_irq: 2245 err_no_irq:
2246 clk_disable(pl022->clk);
2247 err_no_clk_en:
2240 clk_unprepare(pl022->clk); 2248 clk_unprepare(pl022->clk);
2241 err_clk_prep: 2249 err_clk_prep:
2242 clk_put(pl022->clk); 2250 clk_put(pl022->clk);
diff --git a/drivers/staging/et131x/Kconfig b/drivers/staging/et131x/Kconfig
index 9e1864c6dfd..8190f2aaf53 100644
--- a/drivers/staging/et131x/Kconfig
+++ b/drivers/staging/et131x/Kconfig
@@ -1,6 +1,7 @@
1config ET131X 1config ET131X
2 tristate "Agere ET-1310 Gigabit Ethernet support" 2 tristate "Agere ET-1310 Gigabit Ethernet support"
3 depends on PCI 3 depends on PCI && NET && NETDEVICES
4 select PHYLIB
4 default n 5 default n
5 ---help--- 6 ---help---
6 This driver supports Agere ET-1310 ethernet adapters. 7 This driver supports Agere ET-1310 ethernet adapters.
diff --git a/drivers/staging/et131x/et131x.c b/drivers/staging/et131x/et131x.c
index f5f44a02456..0c1c6ca8c37 100644
--- a/drivers/staging/et131x/et131x.c
+++ b/drivers/staging/et131x/et131x.c
@@ -4469,6 +4469,12 @@ static int et131x_resume(struct device *dev)
4469 return 0; 4469 return 0;
4470} 4470}
4471 4471
4472static SIMPLE_DEV_PM_OPS(et131x_pm_ops, et131x_suspend, et131x_resume);
4473#define ET131X_PM_OPS (&et131x_pm_ops)
4474#else
4475#define ET131X_PM_OPS NULL
4476#endif
4477
4472/* ISR functions */ 4478/* ISR functions */
4473 4479
4474/** 4480/**
@@ -5470,12 +5476,6 @@ err_out:
5470 return result; 5476 return result;
5471} 5477}
5472 5478
5473static SIMPLE_DEV_PM_OPS(et131x_pm_ops, et131x_suspend, et131x_resume);
5474#define ET131X_PM_OPS (&et131x_pm_ops)
5475#else
5476#define ET131X_PM_OPS NULL
5477#endif
5478
5479static DEFINE_PCI_DEVICE_TABLE(et131x_pci_table) = { 5479static DEFINE_PCI_DEVICE_TABLE(et131x_pci_table) = {
5480 { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_GIG), 0UL}, 5480 { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_GIG), 0UL},
5481 { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_FAST), 0UL}, 5481 { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_FAST), 0UL},
diff --git a/drivers/staging/iio/industrialio-core.c b/drivers/staging/iio/industrialio-core.c
index 326e967d54e..aec9311b108 100644
--- a/drivers/staging/iio/industrialio-core.c
+++ b/drivers/staging/iio/industrialio-core.c
@@ -242,19 +242,26 @@ static const struct file_operations iio_event_chrdev_fileops = {
242 242
243static int iio_event_getfd(struct iio_dev *indio_dev) 243static int iio_event_getfd(struct iio_dev *indio_dev)
244{ 244{
245 if (indio_dev->event_interface == NULL) 245 struct iio_event_interface *ev_int = indio_dev->event_interface;
246 int fd;
247
248 if (ev_int == NULL)
246 return -ENODEV; 249 return -ENODEV;
247 250
248 mutex_lock(&indio_dev->event_interface->event_list_lock); 251 mutex_lock(&ev_int->event_list_lock);
249 if (test_and_set_bit(IIO_BUSY_BIT_POS, 252 if (test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
250 &indio_dev->event_interface->flags)) { 253 mutex_unlock(&ev_int->event_list_lock);
251 mutex_unlock(&indio_dev->event_interface->event_list_lock);
252 return -EBUSY; 254 return -EBUSY;
253 } 255 }
254 mutex_unlock(&indio_dev->event_interface->event_list_lock); 256 mutex_unlock(&ev_int->event_list_lock);
255 return anon_inode_getfd("iio:event", 257 fd = anon_inode_getfd("iio:event",
256 &iio_event_chrdev_fileops, 258 &iio_event_chrdev_fileops, ev_int, O_RDONLY);
257 indio_dev->event_interface, O_RDONLY); 259 if (fd < 0) {
260 mutex_lock(&ev_int->event_list_lock);
261 clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
262 mutex_unlock(&ev_int->event_list_lock);
263 }
264 return fd;
258} 265}
259 266
260static int __init iio_init(void) 267static int __init iio_init(void)
diff --git a/drivers/staging/media/as102/as102_drv.c b/drivers/staging/media/as102/as102_drv.c
index d335c7d6fa0..828526d4c28 100644
--- a/drivers/staging/media/as102/as102_drv.c
+++ b/drivers/staging/media/as102/as102_drv.c
@@ -32,8 +32,8 @@
32#include "as102_fw.h" 32#include "as102_fw.h"
33#include "dvbdev.h" 33#include "dvbdev.h"
34 34
35int debug; 35int as102_debug;
36module_param_named(debug, debug, int, 0644); 36module_param_named(debug, as102_debug, int, 0644);
37MODULE_PARM_DESC(debug, "Turn on/off debugging (default: off)"); 37MODULE_PARM_DESC(debug, "Turn on/off debugging (default: off)");
38 38
39int dual_tuner; 39int dual_tuner;
diff --git a/drivers/staging/media/as102/as102_drv.h b/drivers/staging/media/as102/as102_drv.h
index bcda635b5a9..fd33f5a12dc 100644
--- a/drivers/staging/media/as102/as102_drv.h
+++ b/drivers/staging/media/as102/as102_drv.h
@@ -37,7 +37,8 @@ extern struct spi_driver as102_spi_driver;
37#define DRIVER_FULL_NAME "Abilis Systems as10x usb driver" 37#define DRIVER_FULL_NAME "Abilis Systems as10x usb driver"
38#define DRIVER_NAME "as10x_usb" 38#define DRIVER_NAME "as10x_usb"
39 39
40extern int debug; 40extern int as102_debug;
41#define debug as102_debug
41 42
42#define dprintk(debug, args...) \ 43#define dprintk(debug, args...) \
43 do { if (debug) { \ 44 do { if (debug) { \
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index b445cd63f90..2542c374390 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -275,7 +275,7 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
275 CVM_OCT_SKB_CB(skb)[0] = hw_buffer.u64; 275 CVM_OCT_SKB_CB(skb)[0] = hw_buffer.u64;
276 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 276 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
277 struct skb_frag_struct *fs = skb_shinfo(skb)->frags + i; 277 struct skb_frag_struct *fs = skb_shinfo(skb)->frags + i;
278 hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)(page_address(fs->page) + fs->page_offset)); 278 hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)(page_address(fs->page.p) + fs->page_offset));
279 hw_buffer.s.size = fs->size; 279 hw_buffer.s.size = fs->size;
280 CVM_OCT_SKB_CB(skb)[i + 1] = hw_buffer.u64; 280 CVM_OCT_SKB_CB(skb)[i + 1] = hw_buffer.u64;
281 } 281 }
diff --git a/drivers/staging/slicoss/Kconfig b/drivers/staging/slicoss/Kconfig
index 5cde96b2e6e..5c2a15b42df 100644
--- a/drivers/staging/slicoss/Kconfig
+++ b/drivers/staging/slicoss/Kconfig
@@ -1,6 +1,6 @@
1config SLICOSS 1config SLICOSS
2 tristate "Alacritech Gigabit IS-NIC support" 2 tristate "Alacritech Gigabit IS-NIC support"
3 depends on PCI && X86 3 depends on PCI && X86 && NET
4 default n 4 default n
5 help 5 help
6 This driver supports Alacritech's IS-NIC gigabit ethernet cards. 6 This driver supports Alacritech's IS-NIC gigabit ethernet cards.
diff --git a/drivers/tty/hvc/hvc_dcc.c b/drivers/tty/hvc/hvc_dcc.c
index 435f6facbc2..44fbebab507 100644
--- a/drivers/tty/hvc/hvc_dcc.c
+++ b/drivers/tty/hvc/hvc_dcc.c
@@ -46,6 +46,7 @@ static inline char __dcc_getchar(void)
46 46
47 asm volatile("mrc p14, 0, %0, c0, c5, 0 @ read comms data reg" 47 asm volatile("mrc p14, 0, %0, c0, c5, 0 @ read comms data reg"
48 : "=r" (__c)); 48 : "=r" (__c));
49 isb();
49 50
50 return __c; 51 return __c;
51} 52}
@@ -55,6 +56,7 @@ static inline void __dcc_putchar(char c)
55 asm volatile("mcr p14, 0, %0, c0, c5, 0 @ write a char" 56 asm volatile("mcr p14, 0, %0, c0, c5, 0 @ write a char"
56 : /* no output register */ 57 : /* no output register */
57 : "r" (c)); 58 : "r" (c));
59 isb();
58} 60}
59 61
60static int hvc_dcc_put_chars(uint32_t vt, const char *buf, int count) 62static int hvc_dcc_put_chars(uint32_t vt, const char *buf, int count)
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 5f479dada6f..925a1e547a8 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -1560,7 +1560,7 @@ config SERIAL_IFX6X60
1560 Support for the IFX6x60 modem devices on Intel MID platforms. 1560 Support for the IFX6x60 modem devices on Intel MID platforms.
1561 1561
1562config SERIAL_PCH_UART 1562config SERIAL_PCH_UART
1563 tristate "Intel EG20T PCH / OKI SEMICONDUCTOR IOH(ML7213/ML7223) UART" 1563 tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) UART"
1564 depends on PCI 1564 depends on PCI
1565 select SERIAL_CORE 1565 select SERIAL_CORE
1566 help 1566 help
@@ -1568,12 +1568,12 @@ config SERIAL_PCH_UART
1568 which is an IOH(Input/Output Hub) for x86 embedded processor. 1568 which is an IOH(Input/Output Hub) for x86 embedded processor.
1569 Enabling PCH_DMA, this PCH UART works as DMA mode. 1569 Enabling PCH_DMA, this PCH UART works as DMA mode.
1570 1570
1571 This driver also can be used for OKI SEMICONDUCTOR IOH(Input/ 1571 This driver also can be used for LAPIS Semiconductor IOH(Input/
1572 Output Hub), ML7213 and ML7223. 1572 Output Hub), ML7213, ML7223 and ML7831.
1573 ML7213 IOH is for IVI(In-Vehicle Infotainment) use and ML7223 IOH is 1573 ML7213 IOH is for IVI(In-Vehicle Infotainment) use, ML7223 IOH is
1574 for MP(Media Phone) use. 1574 for MP(Media Phone) use and ML7831 IOH is for general purpose use.
1575 ML7213/ML7223 is companion chip for Intel Atom E6xx series. 1575 ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series.
1576 ML7213/ML7223 is completely compatible for Intel EG20T PCH. 1576 ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH.
1577 1577
1578config SERIAL_MSM_SMD 1578config SERIAL_MSM_SMD
1579 bool "Enable tty device interface for some SMD ports" 1579 bool "Enable tty device interface for some SMD ports"
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 4a0f86fa1e9..4c823f341d9 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -228,7 +228,7 @@ void atmel_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf)
228 if (rs485conf->flags & SER_RS485_ENABLED) { 228 if (rs485conf->flags & SER_RS485_ENABLED) {
229 dev_dbg(port->dev, "Setting UART to RS485\n"); 229 dev_dbg(port->dev, "Setting UART to RS485\n");
230 atmel_port->tx_done_mask = ATMEL_US_TXEMPTY; 230 atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
231 if (rs485conf->flags & SER_RS485_RTS_AFTER_SEND) 231 if ((rs485conf->delay_rts_after_send) > 0)
232 UART_PUT_TTGR(port, rs485conf->delay_rts_after_send); 232 UART_PUT_TTGR(port, rs485conf->delay_rts_after_send);
233 mode |= ATMEL_US_USMODE_RS485; 233 mode |= ATMEL_US_USMODE_RS485;
234 } else { 234 } else {
@@ -304,7 +304,7 @@ static void atmel_set_mctrl(struct uart_port *port, u_int mctrl)
304 304
305 if (atmel_port->rs485.flags & SER_RS485_ENABLED) { 305 if (atmel_port->rs485.flags & SER_RS485_ENABLED) {
306 dev_dbg(port->dev, "Setting UART to RS485\n"); 306 dev_dbg(port->dev, "Setting UART to RS485\n");
307 if (atmel_port->rs485.flags & SER_RS485_RTS_AFTER_SEND) 307 if ((atmel_port->rs485.delay_rts_after_send) > 0)
308 UART_PUT_TTGR(port, 308 UART_PUT_TTGR(port,
309 atmel_port->rs485.delay_rts_after_send); 309 atmel_port->rs485.delay_rts_after_send);
310 mode |= ATMEL_US_USMODE_RS485; 310 mode |= ATMEL_US_USMODE_RS485;
@@ -1228,7 +1228,7 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
1228 1228
1229 if (atmel_port->rs485.flags & SER_RS485_ENABLED) { 1229 if (atmel_port->rs485.flags & SER_RS485_ENABLED) {
1230 dev_dbg(port->dev, "Setting UART to RS485\n"); 1230 dev_dbg(port->dev, "Setting UART to RS485\n");
1231 if (atmel_port->rs485.flags & SER_RS485_RTS_AFTER_SEND) 1231 if ((atmel_port->rs485.delay_rts_after_send) > 0)
1232 UART_PUT_TTGR(port, 1232 UART_PUT_TTGR(port,
1233 atmel_port->rs485.delay_rts_after_send); 1233 atmel_port->rs485.delay_rts_after_send);
1234 mode |= ATMEL_US_USMODE_RS485; 1234 mode |= ATMEL_US_USMODE_RS485;
@@ -1447,16 +1447,6 @@ static void __devinit atmel_of_init_port(struct atmel_uart_port *atmel_port,
1447 rs485conf->delay_rts_after_send = rs485_delay[1]; 1447 rs485conf->delay_rts_after_send = rs485_delay[1];
1448 rs485conf->flags = 0; 1448 rs485conf->flags = 0;
1449 1449
1450 if (rs485conf->delay_rts_before_send == 0 &&
1451 rs485conf->delay_rts_after_send == 0) {
1452 rs485conf->flags |= SER_RS485_RTS_ON_SEND;
1453 } else {
1454 if (rs485conf->delay_rts_before_send)
1455 rs485conf->flags |= SER_RS485_RTS_BEFORE_SEND;
1456 if (rs485conf->delay_rts_after_send)
1457 rs485conf->flags |= SER_RS485_RTS_AFTER_SEND;
1458 }
1459
1460 if (of_get_property(np, "rs485-rx-during-tx", NULL)) 1450 if (of_get_property(np, "rs485-rx-during-tx", NULL))
1461 rs485conf->flags |= SER_RS485_RX_DURING_TX; 1451 rs485conf->flags |= SER_RS485_RX_DURING_TX;
1462 1452
diff --git a/drivers/tty/serial/crisv10.c b/drivers/tty/serial/crisv10.c
index b7435043f2f..1dfba7b779c 100644
--- a/drivers/tty/serial/crisv10.c
+++ b/drivers/tty/serial/crisv10.c
@@ -3234,9 +3234,8 @@ rs_write(struct tty_struct *tty,
3234 e100_disable_rx(info); 3234 e100_disable_rx(info);
3235 e100_enable_rx_irq(info); 3235 e100_enable_rx_irq(info);
3236#endif 3236#endif
3237 if ((info->rs485.flags & SER_RS485_RTS_BEFORE_SEND) && 3237 if (info->rs485.delay_rts_before_send > 0)
3238 (info->rs485.delay_rts_before_send > 0)) 3238 msleep(info->rs485.delay_rts_before_send);
3239 msleep(info->rs485.delay_rts_before_send);
3240 } 3239 }
3241#endif /* CONFIG_ETRAX_RS485 */ 3240#endif /* CONFIG_ETRAX_RS485 */
3242 3241
@@ -3693,10 +3692,6 @@ rs_ioctl(struct tty_struct *tty,
3693 3692
3694 rs485data.delay_rts_before_send = rs485ctrl.delay_rts_before_send; 3693 rs485data.delay_rts_before_send = rs485ctrl.delay_rts_before_send;
3695 rs485data.flags = 0; 3694 rs485data.flags = 0;
3696 if (rs485data.delay_rts_before_send != 0)
3697 rs485data.flags |= SER_RS485_RTS_BEFORE_SEND;
3698 else
3699 rs485data.flags &= ~(SER_RS485_RTS_BEFORE_SEND);
3700 3695
3701 if (rs485ctrl.enabled) 3696 if (rs485ctrl.enabled)
3702 rs485data.flags |= SER_RS485_ENABLED; 3697 rs485data.flags |= SER_RS485_ENABLED;
@@ -4531,7 +4526,6 @@ static int __init rs_init(void)
4531 /* Set sane defaults */ 4526 /* Set sane defaults */
4532 info->rs485.flags &= ~(SER_RS485_RTS_ON_SEND); 4527 info->rs485.flags &= ~(SER_RS485_RTS_ON_SEND);
4533 info->rs485.flags |= SER_RS485_RTS_AFTER_SEND; 4528 info->rs485.flags |= SER_RS485_RTS_AFTER_SEND;
4534 info->rs485.flags &= ~(SER_RS485_RTS_BEFORE_SEND);
4535 info->rs485.delay_rts_before_send = 0; 4529 info->rs485.delay_rts_before_send = 0;
4536 info->rs485.flags &= ~(SER_RS485_ENABLED); 4530 info->rs485.flags &= ~(SER_RS485_ENABLED);
4537#endif 4531#endif
diff --git a/drivers/tty/serial/mfd.c b/drivers/tty/serial/mfd.c
index 286c386d9c4..e272d3919c6 100644
--- a/drivers/tty/serial/mfd.c
+++ b/drivers/tty/serial/mfd.c
@@ -884,7 +884,6 @@ serial_hsu_set_termios(struct uart_port *port, struct ktermios *termios,
884{ 884{
885 struct uart_hsu_port *up = 885 struct uart_hsu_port *up =
886 container_of(port, struct uart_hsu_port, port); 886 container_of(port, struct uart_hsu_port, port);
887 struct tty_struct *tty = port->state->port.tty;
888 unsigned char cval, fcr = 0; 887 unsigned char cval, fcr = 0;
889 unsigned long flags; 888 unsigned long flags;
890 unsigned int baud, quot; 889 unsigned int baud, quot;
@@ -907,8 +906,7 @@ serial_hsu_set_termios(struct uart_port *port, struct ktermios *termios,
907 } 906 }
908 907
909 /* CMSPAR isn't supported by this driver */ 908 /* CMSPAR isn't supported by this driver */
910 if (tty) 909 termios->c_cflag &= ~CMSPAR;
911 tty->termios->c_cflag &= ~CMSPAR;
912 910
913 if (termios->c_cflag & CSTOPB) 911 if (termios->c_cflag & CSTOPB)
914 cval |= UART_LCR_STOP; 912 cval |= UART_LCR_STOP;
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index 21febef926a..d6aba8c087e 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -1,5 +1,5 @@
1/* 1/*
2 *Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD. 2 *Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
3 * 3 *
4 *This program is free software; you can redistribute it and/or modify 4 *This program is free software; you can redistribute it and/or modify
5 *it under the terms of the GNU General Public License as published by 5 *it under the terms of the GNU General Public License as published by
@@ -46,8 +46,8 @@ enum {
46 46
47/* Set the max number of UART port 47/* Set the max number of UART port
48 * Intel EG20T PCH: 4 port 48 * Intel EG20T PCH: 4 port
49 * OKI SEMICONDUCTOR ML7213 IOH: 3 port 49 * LAPIS Semiconductor ML7213 IOH: 3 port
50 * OKI SEMICONDUCTOR ML7223 IOH: 2 port 50 * LAPIS Semiconductor ML7223 IOH: 2 port
51*/ 51*/
52#define PCH_UART_NR 4 52#define PCH_UART_NR 4
53 53
@@ -258,6 +258,8 @@ enum pch_uart_num_t {
258 pch_ml7213_uart2, 258 pch_ml7213_uart2,
259 pch_ml7223_uart0, 259 pch_ml7223_uart0,
260 pch_ml7223_uart1, 260 pch_ml7223_uart1,
261 pch_ml7831_uart0,
262 pch_ml7831_uart1,
261}; 263};
262 264
263static struct pch_uart_driver_data drv_dat[] = { 265static struct pch_uart_driver_data drv_dat[] = {
@@ -270,6 +272,8 @@ static struct pch_uart_driver_data drv_dat[] = {
270 [pch_ml7213_uart2] = {PCH_UART_2LINE, 2}, 272 [pch_ml7213_uart2] = {PCH_UART_2LINE, 2},
271 [pch_ml7223_uart0] = {PCH_UART_8LINE, 0}, 273 [pch_ml7223_uart0] = {PCH_UART_8LINE, 0},
272 [pch_ml7223_uart1] = {PCH_UART_2LINE, 1}, 274 [pch_ml7223_uart1] = {PCH_UART_2LINE, 1},
275 [pch_ml7831_uart0] = {PCH_UART_8LINE, 0},
276 [pch_ml7831_uart1] = {PCH_UART_2LINE, 1},
273}; 277};
274 278
275static unsigned int default_baud = 9600; 279static unsigned int default_baud = 9600;
@@ -628,6 +632,7 @@ static void pch_request_dma(struct uart_port *port)
628 dev_err(priv->port.dev, "%s:dma_request_channel FAILS(Rx)\n", 632 dev_err(priv->port.dev, "%s:dma_request_channel FAILS(Rx)\n",
629 __func__); 633 __func__);
630 dma_release_channel(priv->chan_tx); 634 dma_release_channel(priv->chan_tx);
635 priv->chan_tx = NULL;
631 return; 636 return;
632 } 637 }
633 638
@@ -1215,8 +1220,7 @@ static void pch_uart_shutdown(struct uart_port *port)
1215 dev_err(priv->port.dev, 1220 dev_err(priv->port.dev,
1216 "pch_uart_hal_set_fifo Failed(ret=%d)\n", ret); 1221 "pch_uart_hal_set_fifo Failed(ret=%d)\n", ret);
1217 1222
1218 if (priv->use_dma_flag) 1223 pch_free_dma(port);
1219 pch_free_dma(port);
1220 1224
1221 free_irq(priv->port.irq, priv); 1225 free_irq(priv->port.irq, priv);
1222} 1226}
@@ -1280,6 +1284,7 @@ static void pch_uart_set_termios(struct uart_port *port,
1280 if (rtn) 1284 if (rtn)
1281 goto out; 1285 goto out;
1282 1286
1287 pch_uart_set_mctrl(&priv->port, priv->port.mctrl);
1283 /* Don't rewrite B0 */ 1288 /* Don't rewrite B0 */
1284 if (tty_termios_baud_rate(termios)) 1289 if (tty_termios_baud_rate(termios))
1285 tty_termios_encode_baud_rate(termios, baud, baud); 1290 tty_termios_encode_baud_rate(termios, baud, baud);
@@ -1552,6 +1557,10 @@ static DEFINE_PCI_DEVICE_TABLE(pch_uart_pci_id) = {
1552 .driver_data = pch_ml7223_uart0}, 1557 .driver_data = pch_ml7223_uart0},
1553 {PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x800D), 1558 {PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x800D),
1554 .driver_data = pch_ml7223_uart1}, 1559 .driver_data = pch_ml7223_uart1},
1560 {PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8811),
1561 .driver_data = pch_ml7831_uart0},
1562 {PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8812),
1563 .driver_data = pch_ml7831_uart1},
1555 {0,}, 1564 {0,},
1556}; 1565};
1557 1566
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 512c49f98e8..8e0924f5544 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -36,6 +36,7 @@
36 36
37#include <linux/kmod.h> 37#include <linux/kmod.h>
38#include <linux/nsproxy.h> 38#include <linux/nsproxy.h>
39#include <linux/ratelimit.h>
39 40
40/* 41/*
41 * This guards the refcounted line discipline lists. The lock 42 * This guards the refcounted line discipline lists. The lock
@@ -547,15 +548,16 @@ static void tty_ldisc_flush_works(struct tty_struct *tty)
547/** 548/**
548 * tty_ldisc_wait_idle - wait for the ldisc to become idle 549 * tty_ldisc_wait_idle - wait for the ldisc to become idle
549 * @tty: tty to wait for 550 * @tty: tty to wait for
551 * @timeout: for how long to wait at most
550 * 552 *
551 * Wait for the line discipline to become idle. The discipline must 553 * Wait for the line discipline to become idle. The discipline must
552 * have been halted for this to guarantee it remains idle. 554 * have been halted for this to guarantee it remains idle.
553 */ 555 */
554static int tty_ldisc_wait_idle(struct tty_struct *tty) 556static int tty_ldisc_wait_idle(struct tty_struct *tty, long timeout)
555{ 557{
556 int ret; 558 long ret;
557 ret = wait_event_timeout(tty_ldisc_idle, 559 ret = wait_event_timeout(tty_ldisc_idle,
558 atomic_read(&tty->ldisc->users) == 1, 5 * HZ); 560 atomic_read(&tty->ldisc->users) == 1, timeout);
559 if (ret < 0) 561 if (ret < 0)
560 return ret; 562 return ret;
561 return ret > 0 ? 0 : -EBUSY; 563 return ret > 0 ? 0 : -EBUSY;
@@ -665,7 +667,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
665 667
666 tty_ldisc_flush_works(tty); 668 tty_ldisc_flush_works(tty);
667 669
668 retval = tty_ldisc_wait_idle(tty); 670 retval = tty_ldisc_wait_idle(tty, 5 * HZ);
669 671
670 tty_lock(); 672 tty_lock();
671 mutex_lock(&tty->ldisc_mutex); 673 mutex_lock(&tty->ldisc_mutex);
@@ -762,8 +764,6 @@ static int tty_ldisc_reinit(struct tty_struct *tty, int ldisc)
762 if (IS_ERR(ld)) 764 if (IS_ERR(ld))
763 return -1; 765 return -1;
764 766
765 WARN_ON_ONCE(tty_ldisc_wait_idle(tty));
766
767 tty_ldisc_close(tty, tty->ldisc); 767 tty_ldisc_close(tty, tty->ldisc);
768 tty_ldisc_put(tty->ldisc); 768 tty_ldisc_put(tty->ldisc);
769 tty->ldisc = NULL; 769 tty->ldisc = NULL;
@@ -838,7 +838,7 @@ void tty_ldisc_hangup(struct tty_struct *tty)
838 tty_unlock(); 838 tty_unlock();
839 cancel_work_sync(&tty->buf.work); 839 cancel_work_sync(&tty->buf.work);
840 mutex_unlock(&tty->ldisc_mutex); 840 mutex_unlock(&tty->ldisc_mutex);
841 841retry:
842 tty_lock(); 842 tty_lock();
843 mutex_lock(&tty->ldisc_mutex); 843 mutex_lock(&tty->ldisc_mutex);
844 844
@@ -847,6 +847,22 @@ void tty_ldisc_hangup(struct tty_struct *tty)
847 it means auditing a lot of other paths so this is 847 it means auditing a lot of other paths so this is
848 a FIXME */ 848 a FIXME */
849 if (tty->ldisc) { /* Not yet closed */ 849 if (tty->ldisc) { /* Not yet closed */
850 if (atomic_read(&tty->ldisc->users) != 1) {
851 char cur_n[TASK_COMM_LEN], tty_n[64];
852 long timeout = 3 * HZ;
853 tty_unlock();
854
855 while (tty_ldisc_wait_idle(tty, timeout) == -EBUSY) {
856 timeout = MAX_SCHEDULE_TIMEOUT;
857 printk_ratelimited(KERN_WARNING
858 "%s: waiting (%s) for %s took too long, but we keep waiting...\n",
859 __func__, get_task_comm(cur_n, current),
860 tty_name(tty, tty_n));
861 }
862 mutex_unlock(&tty->ldisc_mutex);
863 goto retry;
864 }
865
850 if (reset == 0) { 866 if (reset == 0) {
851 867
852 if (!tty_ldisc_reinit(tty, tty->termios->c_line)) 868 if (!tty_ldisc_reinit(tty, tty->termios->c_line))
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 6960715c506..e8c564a5334 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -539,7 +539,6 @@ static void acm_port_down(struct acm *acm)
539{ 539{
540 int i; 540 int i;
541 541
542 mutex_lock(&open_mutex);
543 if (acm->dev) { 542 if (acm->dev) {
544 usb_autopm_get_interface(acm->control); 543 usb_autopm_get_interface(acm->control);
545 acm_set_control(acm, acm->ctrlout = 0); 544 acm_set_control(acm, acm->ctrlout = 0);
@@ -551,14 +550,15 @@ static void acm_port_down(struct acm *acm)
551 acm->control->needs_remote_wakeup = 0; 550 acm->control->needs_remote_wakeup = 0;
552 usb_autopm_put_interface(acm->control); 551 usb_autopm_put_interface(acm->control);
553 } 552 }
554 mutex_unlock(&open_mutex);
555} 553}
556 554
557static void acm_tty_hangup(struct tty_struct *tty) 555static void acm_tty_hangup(struct tty_struct *tty)
558{ 556{
559 struct acm *acm = tty->driver_data; 557 struct acm *acm = tty->driver_data;
560 tty_port_hangup(&acm->port); 558 tty_port_hangup(&acm->port);
559 mutex_lock(&open_mutex);
561 acm_port_down(acm); 560 acm_port_down(acm);
561 mutex_unlock(&open_mutex);
562} 562}
563 563
564static void acm_tty_close(struct tty_struct *tty, struct file *filp) 564static void acm_tty_close(struct tty_struct *tty, struct file *filp)
@@ -569,8 +569,9 @@ static void acm_tty_close(struct tty_struct *tty, struct file *filp)
569 shutdown */ 569 shutdown */
570 if (!acm) 570 if (!acm)
571 return; 571 return;
572
573 mutex_lock(&open_mutex);
572 if (tty_port_close_start(&acm->port, tty, filp) == 0) { 574 if (tty_port_close_start(&acm->port, tty, filp) == 0) {
573 mutex_lock(&open_mutex);
574 if (!acm->dev) { 575 if (!acm->dev) {
575 tty_port_tty_set(&acm->port, NULL); 576 tty_port_tty_set(&acm->port, NULL);
576 acm_tty_unregister(acm); 577 acm_tty_unregister(acm);
@@ -582,6 +583,7 @@ static void acm_tty_close(struct tty_struct *tty, struct file *filp)
582 acm_port_down(acm); 583 acm_port_down(acm);
583 tty_port_close_end(&acm->port, tty); 584 tty_port_close_end(&acm->port, tty);
584 tty_port_tty_set(&acm->port, NULL); 585 tty_port_tty_set(&acm->port, NULL);
586 mutex_unlock(&open_mutex);
585} 587}
586 588
587static int acm_tty_write(struct tty_struct *tty, 589static int acm_tty_write(struct tty_struct *tty,
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 96f05b29c9a..79781461eec 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -813,6 +813,12 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
813 USB_PORT_FEAT_C_PORT_LINK_STATE); 813 USB_PORT_FEAT_C_PORT_LINK_STATE);
814 } 814 }
815 815
816 if ((portchange & USB_PORT_STAT_C_BH_RESET) &&
817 hub_is_superspeed(hub->hdev)) {
818 need_debounce_delay = true;
819 clear_port_feature(hub->hdev, port1,
820 USB_PORT_FEAT_C_BH_PORT_RESET);
821 }
816 /* We can forget about a "removed" device when there's a 822 /* We can forget about a "removed" device when there's a
817 * physical disconnect or the connect status changes. 823 * physical disconnect or the connect status changes.
818 */ 824 */
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index d6a8d8269bf..ecf12e15a7e 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -50,15 +50,42 @@ static const struct usb_device_id usb_quirk_list[] = {
50 /* Logitech Webcam B/C500 */ 50 /* Logitech Webcam B/C500 */
51 { USB_DEVICE(0x046d, 0x0807), .driver_info = USB_QUIRK_RESET_RESUME }, 51 { USB_DEVICE(0x046d, 0x0807), .driver_info = USB_QUIRK_RESET_RESUME },
52 52
53 /* Logitech Webcam C600 */
54 { USB_DEVICE(0x046d, 0x0808), .driver_info = USB_QUIRK_RESET_RESUME },
55
53 /* Logitech Webcam Pro 9000 */ 56 /* Logitech Webcam Pro 9000 */
54 { USB_DEVICE(0x046d, 0x0809), .driver_info = USB_QUIRK_RESET_RESUME }, 57 { USB_DEVICE(0x046d, 0x0809), .driver_info = USB_QUIRK_RESET_RESUME },
55 58
59 /* Logitech Webcam C905 */
60 { USB_DEVICE(0x046d, 0x080a), .driver_info = USB_QUIRK_RESET_RESUME },
61
62 /* Logitech Webcam C210 */
63 { USB_DEVICE(0x046d, 0x0819), .driver_info = USB_QUIRK_RESET_RESUME },
64
65 /* Logitech Webcam C260 */
66 { USB_DEVICE(0x046d, 0x081a), .driver_info = USB_QUIRK_RESET_RESUME },
67
56 /* Logitech Webcam C310 */ 68 /* Logitech Webcam C310 */
57 { USB_DEVICE(0x046d, 0x081b), .driver_info = USB_QUIRK_RESET_RESUME }, 69 { USB_DEVICE(0x046d, 0x081b), .driver_info = USB_QUIRK_RESET_RESUME },
58 70
71 /* Logitech Webcam C910 */
72 { USB_DEVICE(0x046d, 0x0821), .driver_info = USB_QUIRK_RESET_RESUME },
73
74 /* Logitech Webcam C160 */
75 { USB_DEVICE(0x046d, 0x0824), .driver_info = USB_QUIRK_RESET_RESUME },
76
59 /* Logitech Webcam C270 */ 77 /* Logitech Webcam C270 */
60 { USB_DEVICE(0x046d, 0x0825), .driver_info = USB_QUIRK_RESET_RESUME }, 78 { USB_DEVICE(0x046d, 0x0825), .driver_info = USB_QUIRK_RESET_RESUME },
61 79
80 /* Logitech Quickcam Pro 9000 */
81 { USB_DEVICE(0x046d, 0x0990), .driver_info = USB_QUIRK_RESET_RESUME },
82
83 /* Logitech Quickcam E3500 */
84 { USB_DEVICE(0x046d, 0x09a4), .driver_info = USB_QUIRK_RESET_RESUME },
85
86 /* Logitech Quickcam Vision Pro */
87 { USB_DEVICE(0x046d, 0x09a6), .driver_info = USB_QUIRK_RESET_RESUME },
88
62 /* Logitech Harmony 700-series */ 89 /* Logitech Harmony 700-series */
63 { USB_DEVICE(0x046d, 0xc122), .driver_info = USB_QUIRK_DELAY_INIT }, 90 { USB_DEVICE(0x046d, 0xc122), .driver_info = USB_QUIRK_DELAY_INIT },
64 91
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index fa824cfdd2e..25dbd8614e7 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1284,6 +1284,7 @@ static int __devinit dwc3_gadget_init_endpoints(struct dwc3 *dwc)
1284 int ret; 1284 int ret;
1285 1285
1286 dep->endpoint.maxpacket = 1024; 1286 dep->endpoint.maxpacket = 1024;
1287 dep->endpoint.max_streams = 15;
1287 dep->endpoint.ops = &dwc3_gadget_ep_ops; 1288 dep->endpoint.ops = &dwc3_gadget_ep_ops;
1288 list_add_tail(&dep->endpoint.ep_list, 1289 list_add_tail(&dep->endpoint.ep_list,
1289 &dwc->gadget.ep_list); 1290 &dwc->gadget.ep_list);
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index b21cd376c11..23a447373c5 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -469,7 +469,7 @@ config USB_LANGWELL
469 gadget drivers to also be dynamically linked. 469 gadget drivers to also be dynamically linked.
470 470
471config USB_EG20T 471config USB_EG20T
472 tristate "Intel EG20T PCH/OKI SEMICONDUCTOR ML7213 IOH UDC" 472 tristate "Intel EG20T PCH/LAPIS Semiconductor IOH(ML7213/ML7831) UDC"
473 depends on PCI 473 depends on PCI
474 select USB_GADGET_DUALSPEED 474 select USB_GADGET_DUALSPEED
475 help 475 help
@@ -485,10 +485,11 @@ config USB_EG20T
485 This driver dose not support interrupt transfer or isochronous 485 This driver dose not support interrupt transfer or isochronous
486 transfer modes. 486 transfer modes.
487 487
488 This driver also can be used for OKI SEMICONDUCTOR's ML7213 which is 488 This driver also can be used for LAPIS Semiconductor's ML7213 which is
489 for IVI(In-Vehicle Infotainment) use. 489 for IVI(In-Vehicle Infotainment) use.
490 ML7213 is companion chip for Intel Atom E6xx series. 490 ML7831 is for general purpose use.
491 ML7213 is completely compatible for Intel EG20T PCH. 491 ML7213/ML7831 is companion chip for Intel Atom E6xx series.
492 ML7213/ML7831 is completely compatible for Intel EG20T PCH.
492 493
493config USB_CI13XXX_MSM 494config USB_CI13XXX_MSM
494 tristate "MIPS USB CI13xxx for MSM" 495 tristate "MIPS USB CI13xxx for MSM"
diff --git a/drivers/usb/gadget/ci13xxx_msm.c b/drivers/usb/gadget/ci13xxx_msm.c
index 4eedfe55715..1fc612914c5 100644
--- a/drivers/usb/gadget/ci13xxx_msm.c
+++ b/drivers/usb/gadget/ci13xxx_msm.c
@@ -122,3 +122,5 @@ static int __init ci13xxx_msm_init(void)
122 return platform_driver_register(&ci13xxx_msm_driver); 122 return platform_driver_register(&ci13xxx_msm_driver);
123} 123}
124module_init(ci13xxx_msm_init); 124module_init(ci13xxx_msm_init);
125
126MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/gadget/ci13xxx_udc.c b/drivers/usb/gadget/ci13xxx_udc.c
index 83428f56253..9a0c3979ff4 100644
--- a/drivers/usb/gadget/ci13xxx_udc.c
+++ b/drivers/usb/gadget/ci13xxx_udc.c
@@ -71,6 +71,9 @@
71/****************************************************************************** 71/******************************************************************************
72 * DEFINE 72 * DEFINE
73 *****************************************************************************/ 73 *****************************************************************************/
74
75#define DMA_ADDR_INVALID (~(dma_addr_t)0)
76
74/* ctrl register bank access */ 77/* ctrl register bank access */
75static DEFINE_SPINLOCK(udc_lock); 78static DEFINE_SPINLOCK(udc_lock);
76 79
@@ -1434,7 +1437,7 @@ static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
1434 return -EALREADY; 1437 return -EALREADY;
1435 1438
1436 mReq->req.status = -EALREADY; 1439 mReq->req.status = -EALREADY;
1437 if (length && !mReq->req.dma) { 1440 if (length && mReq->req.dma == DMA_ADDR_INVALID) {
1438 mReq->req.dma = \ 1441 mReq->req.dma = \
1439 dma_map_single(mEp->device, mReq->req.buf, 1442 dma_map_single(mEp->device, mReq->req.buf,
1440 length, mEp->dir ? DMA_TO_DEVICE : 1443 length, mEp->dir ? DMA_TO_DEVICE :
@@ -1453,7 +1456,7 @@ static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
1453 dma_unmap_single(mEp->device, mReq->req.dma, 1456 dma_unmap_single(mEp->device, mReq->req.dma,
1454 length, mEp->dir ? DMA_TO_DEVICE : 1457 length, mEp->dir ? DMA_TO_DEVICE :
1455 DMA_FROM_DEVICE); 1458 DMA_FROM_DEVICE);
1456 mReq->req.dma = 0; 1459 mReq->req.dma = DMA_ADDR_INVALID;
1457 mReq->map = 0; 1460 mReq->map = 0;
1458 } 1461 }
1459 return -ENOMEM; 1462 return -ENOMEM;
@@ -1549,7 +1552,7 @@ static int _hardware_dequeue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
1549 if (mReq->map) { 1552 if (mReq->map) {
1550 dma_unmap_single(mEp->device, mReq->req.dma, mReq->req.length, 1553 dma_unmap_single(mEp->device, mReq->req.dma, mReq->req.length,
1551 mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 1554 mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
1552 mReq->req.dma = 0; 1555 mReq->req.dma = DMA_ADDR_INVALID;
1553 mReq->map = 0; 1556 mReq->map = 0;
1554 } 1557 }
1555 1558
@@ -1610,7 +1613,6 @@ __acquires(mEp->lock)
1610 * @gadget: gadget 1613 * @gadget: gadget
1611 * 1614 *
1612 * This function returns an error code 1615 * This function returns an error code
1613 * Caller must hold lock
1614 */ 1616 */
1615static int _gadget_stop_activity(struct usb_gadget *gadget) 1617static int _gadget_stop_activity(struct usb_gadget *gadget)
1616{ 1618{
@@ -2189,6 +2191,7 @@ static struct usb_request *ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
2189 mReq = kzalloc(sizeof(struct ci13xxx_req), gfp_flags); 2191 mReq = kzalloc(sizeof(struct ci13xxx_req), gfp_flags);
2190 if (mReq != NULL) { 2192 if (mReq != NULL) {
2191 INIT_LIST_HEAD(&mReq->queue); 2193 INIT_LIST_HEAD(&mReq->queue);
2194 mReq->req.dma = DMA_ADDR_INVALID;
2192 2195
2193 mReq->ptr = dma_pool_alloc(mEp->td_pool, gfp_flags, 2196 mReq->ptr = dma_pool_alloc(mEp->td_pool, gfp_flags,
2194 &mReq->dma); 2197 &mReq->dma);
@@ -2328,7 +2331,7 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
2328 if (mReq->map) { 2331 if (mReq->map) {
2329 dma_unmap_single(mEp->device, mReq->req.dma, mReq->req.length, 2332 dma_unmap_single(mEp->device, mReq->req.dma, mReq->req.length,
2330 mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 2333 mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
2331 mReq->req.dma = 0; 2334 mReq->req.dma = DMA_ADDR_INVALID;
2332 mReq->map = 0; 2335 mReq->map = 0;
2333 } 2336 }
2334 req->status = -ECONNRESET; 2337 req->status = -ECONNRESET;
@@ -2500,12 +2503,12 @@ static int ci13xxx_wakeup(struct usb_gadget *_gadget)
2500 spin_lock_irqsave(udc->lock, flags); 2503 spin_lock_irqsave(udc->lock, flags);
2501 if (!udc->remote_wakeup) { 2504 if (!udc->remote_wakeup) {
2502 ret = -EOPNOTSUPP; 2505 ret = -EOPNOTSUPP;
2503 dbg_trace("remote wakeup feature is not enabled\n"); 2506 trace("remote wakeup feature is not enabled\n");
2504 goto out; 2507 goto out;
2505 } 2508 }
2506 if (!hw_cread(CAP_PORTSC, PORTSC_SUSP)) { 2509 if (!hw_cread(CAP_PORTSC, PORTSC_SUSP)) {
2507 ret = -EINVAL; 2510 ret = -EINVAL;
2508 dbg_trace("port is not suspended\n"); 2511 trace("port is not suspended\n");
2509 goto out; 2512 goto out;
2510 } 2513 }
2511 hw_cwrite(CAP_PORTSC, PORTSC_FPR, PORTSC_FPR); 2514 hw_cwrite(CAP_PORTSC, PORTSC_FPR, PORTSC_FPR);
@@ -2703,7 +2706,9 @@ static int ci13xxx_stop(struct usb_gadget_driver *driver)
2703 if (udc->udc_driver->notify_event) 2706 if (udc->udc_driver->notify_event)
2704 udc->udc_driver->notify_event(udc, 2707 udc->udc_driver->notify_event(udc,
2705 CI13XXX_CONTROLLER_STOPPED_EVENT); 2708 CI13XXX_CONTROLLER_STOPPED_EVENT);
2709 spin_unlock_irqrestore(udc->lock, flags);
2706 _gadget_stop_activity(&udc->gadget); 2710 _gadget_stop_activity(&udc->gadget);
2711 spin_lock_irqsave(udc->lock, flags);
2707 pm_runtime_put(&udc->gadget.dev); 2712 pm_runtime_put(&udc->gadget.dev);
2708 } 2713 }
2709 2714
@@ -2850,7 +2855,7 @@ static int udc_probe(struct ci13xxx_udc_driver *driver, struct device *dev,
2850 struct ci13xxx *udc; 2855 struct ci13xxx *udc;
2851 int retval = 0; 2856 int retval = 0;
2852 2857
2853 trace("%p, %p, %p", dev, regs, name); 2858 trace("%p, %p, %p", dev, regs, driver->name);
2854 2859
2855 if (dev == NULL || regs == NULL || driver == NULL || 2860 if (dev == NULL || regs == NULL || driver == NULL ||
2856 driver->name == NULL) 2861 driver->name == NULL)
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
index 52583a23533..c39d58860fa 100644
--- a/drivers/usb/gadget/f_mass_storage.c
+++ b/drivers/usb/gadget/f_mass_storage.c
@@ -624,7 +624,8 @@ static int fsg_setup(struct usb_function *f,
624 if (ctrl->bRequestType != 624 if (ctrl->bRequestType !=
625 (USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE)) 625 (USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
626 break; 626 break;
627 if (w_index != fsg->interface_number || w_value != 0) 627 if (w_index != fsg->interface_number || w_value != 0 ||
628 w_length != 0)
628 return -EDOM; 629 return -EDOM;
629 630
630 /* 631 /*
@@ -639,7 +640,8 @@ static int fsg_setup(struct usb_function *f,
639 if (ctrl->bRequestType != 640 if (ctrl->bRequestType !=
640 (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE)) 641 (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
641 break; 642 break;
642 if (w_index != fsg->interface_number || w_value != 0) 643 if (w_index != fsg->interface_number || w_value != 0 ||
644 w_length != 1)
643 return -EDOM; 645 return -EDOM;
644 VDBG(fsg, "get max LUN\n"); 646 VDBG(fsg, "get max LUN\n");
645 *(u8 *)req->buf = fsg->common->nluns - 1; 647 *(u8 *)req->buf = fsg->common->nluns - 1;
diff --git a/drivers/usb/gadget/f_midi.c b/drivers/usb/gadget/f_midi.c
index 67b222908cf..3797b3d6c62 100644
--- a/drivers/usb/gadget/f_midi.c
+++ b/drivers/usb/gadget/f_midi.c
@@ -95,7 +95,6 @@ static void f_midi_transmit(struct f_midi *midi, struct usb_request *req);
95 95
96DECLARE_UAC_AC_HEADER_DESCRIPTOR(1); 96DECLARE_UAC_AC_HEADER_DESCRIPTOR(1);
97DECLARE_USB_MIDI_OUT_JACK_DESCRIPTOR(1); 97DECLARE_USB_MIDI_OUT_JACK_DESCRIPTOR(1);
98DECLARE_USB_MIDI_OUT_JACK_DESCRIPTOR(16);
99DECLARE_USB_MS_ENDPOINT_DESCRIPTOR(16); 98DECLARE_USB_MS_ENDPOINT_DESCRIPTOR(16);
100 99
101/* B.3.1 Standard AC Interface Descriptor */ 100/* B.3.1 Standard AC Interface Descriptor */
@@ -140,26 +139,6 @@ static struct usb_ms_header_descriptor ms_header_desc __initdata = {
140 /* .wTotalLength = DYNAMIC */ 139 /* .wTotalLength = DYNAMIC */
141}; 140};
142 141
143/* B.4.3 Embedded MIDI IN Jack Descriptor */
144static struct usb_midi_in_jack_descriptor jack_in_emb_desc = {
145 .bLength = USB_DT_MIDI_IN_SIZE,
146 .bDescriptorType = USB_DT_CS_INTERFACE,
147 .bDescriptorSubtype = USB_MS_MIDI_IN_JACK,
148 .bJackType = USB_MS_EMBEDDED,
149 /* .bJackID = DYNAMIC */
150};
151
152/* B.4.4 Embedded MIDI OUT Jack Descriptor */
153static struct usb_midi_out_jack_descriptor_16 jack_out_emb_desc = {
154 /* .bLength = DYNAMIC */
155 .bDescriptorType = USB_DT_CS_INTERFACE,
156 .bDescriptorSubtype = USB_MS_MIDI_OUT_JACK,
157 .bJackType = USB_MS_EMBEDDED,
158 /* .bJackID = DYNAMIC */
159 /* .bNrInputPins = DYNAMIC */
160 /* .pins = DYNAMIC */
161};
162
163/* B.5.1 Standard Bulk OUT Endpoint Descriptor */ 142/* B.5.1 Standard Bulk OUT Endpoint Descriptor */
164static struct usb_endpoint_descriptor bulk_out_desc = { 143static struct usb_endpoint_descriptor bulk_out_desc = {
165 .bLength = USB_DT_ENDPOINT_AUDIO_SIZE, 144 .bLength = USB_DT_ENDPOINT_AUDIO_SIZE,
@@ -758,9 +737,11 @@ fail:
758static int __init 737static int __init
759f_midi_bind(struct usb_configuration *c, struct usb_function *f) 738f_midi_bind(struct usb_configuration *c, struct usb_function *f)
760{ 739{
761 struct usb_descriptor_header *midi_function[(MAX_PORTS * 2) + 12]; 740 struct usb_descriptor_header **midi_function;
762 struct usb_midi_in_jack_descriptor jack_in_ext_desc[MAX_PORTS]; 741 struct usb_midi_in_jack_descriptor jack_in_ext_desc[MAX_PORTS];
742 struct usb_midi_in_jack_descriptor jack_in_emb_desc[MAX_PORTS];
763 struct usb_midi_out_jack_descriptor_1 jack_out_ext_desc[MAX_PORTS]; 743 struct usb_midi_out_jack_descriptor_1 jack_out_ext_desc[MAX_PORTS];
744 struct usb_midi_out_jack_descriptor_1 jack_out_emb_desc[MAX_PORTS];
764 struct usb_composite_dev *cdev = c->cdev; 745 struct usb_composite_dev *cdev = c->cdev;
765 struct f_midi *midi = func_to_midi(f); 746 struct f_midi *midi = func_to_midi(f);
766 int status, n, jack = 1, i = 0; 747 int status, n, jack = 1, i = 0;
@@ -798,6 +779,14 @@ f_midi_bind(struct usb_configuration *c, struct usb_function *f)
798 goto fail; 779 goto fail;
799 midi->out_ep->driver_data = cdev; /* claim */ 780 midi->out_ep->driver_data = cdev; /* claim */
800 781
782 /* allocate temporary function list */
783 midi_function = kcalloc((MAX_PORTS * 4) + 9, sizeof(midi_function),
784 GFP_KERNEL);
785 if (!midi_function) {
786 status = -ENOMEM;
787 goto fail;
788 }
789
801 /* 790 /*
802 * construct the function's descriptor set. As the number of 791 * construct the function's descriptor set. As the number of
803 * input and output MIDI ports is configurable, we have to do 792 * input and output MIDI ports is configurable, we have to do
@@ -811,73 +800,74 @@ f_midi_bind(struct usb_configuration *c, struct usb_function *f)
811 800
812 /* calculate the header's wTotalLength */ 801 /* calculate the header's wTotalLength */
813 n = USB_DT_MS_HEADER_SIZE 802 n = USB_DT_MS_HEADER_SIZE
814 + (1 + midi->in_ports) * USB_DT_MIDI_IN_SIZE 803 + (midi->in_ports + midi->out_ports) *
815 + (1 + midi->out_ports) * USB_DT_MIDI_OUT_SIZE(1); 804 (USB_DT_MIDI_IN_SIZE + USB_DT_MIDI_OUT_SIZE(1));
816 ms_header_desc.wTotalLength = cpu_to_le16(n); 805 ms_header_desc.wTotalLength = cpu_to_le16(n);
817 806
818 midi_function[i++] = (struct usb_descriptor_header *) &ms_header_desc; 807 midi_function[i++] = (struct usb_descriptor_header *) &ms_header_desc;
819 808
820 /* we have one embedded IN jack */ 809 /* configure the external IN jacks, each linked to an embedded OUT jack */
821 jack_in_emb_desc.bJackID = jack++;
822 midi_function[i++] = (struct usb_descriptor_header *) &jack_in_emb_desc;
823
824 /* and a dynamic amount of external IN jacks */
825 for (n = 0; n < midi->in_ports; n++) {
826 struct usb_midi_in_jack_descriptor *ext = &jack_in_ext_desc[n];
827
828 ext->bLength = USB_DT_MIDI_IN_SIZE;
829 ext->bDescriptorType = USB_DT_CS_INTERFACE;
830 ext->bDescriptorSubtype = USB_MS_MIDI_IN_JACK;
831 ext->bJackType = USB_MS_EXTERNAL;
832 ext->bJackID = jack++;
833 ext->iJack = 0;
834
835 midi_function[i++] = (struct usb_descriptor_header *) ext;
836 }
837
838 /* one embedded OUT jack ... */
839 jack_out_emb_desc.bLength = USB_DT_MIDI_OUT_SIZE(midi->in_ports);
840 jack_out_emb_desc.bJackID = jack++;
841 jack_out_emb_desc.bNrInputPins = midi->in_ports;
842 /* ... which referencess all external IN jacks */
843 for (n = 0; n < midi->in_ports; n++) { 810 for (n = 0; n < midi->in_ports; n++) {
844 jack_out_emb_desc.pins[n].baSourceID = jack_in_ext_desc[n].bJackID; 811 struct usb_midi_in_jack_descriptor *in_ext = &jack_in_ext_desc[n];
845 jack_out_emb_desc.pins[n].baSourcePin = 1; 812 struct usb_midi_out_jack_descriptor_1 *out_emb = &jack_out_emb_desc[n];
813
814 in_ext->bLength = USB_DT_MIDI_IN_SIZE;
815 in_ext->bDescriptorType = USB_DT_CS_INTERFACE;
816 in_ext->bDescriptorSubtype = USB_MS_MIDI_IN_JACK;
817 in_ext->bJackType = USB_MS_EXTERNAL;
818 in_ext->bJackID = jack++;
819 in_ext->iJack = 0;
820 midi_function[i++] = (struct usb_descriptor_header *) in_ext;
821
822 out_emb->bLength = USB_DT_MIDI_OUT_SIZE(1);
823 out_emb->bDescriptorType = USB_DT_CS_INTERFACE;
824 out_emb->bDescriptorSubtype = USB_MS_MIDI_OUT_JACK;
825 out_emb->bJackType = USB_MS_EMBEDDED;
826 out_emb->bJackID = jack++;
827 out_emb->bNrInputPins = 1;
828 out_emb->pins[0].baSourcePin = 1;
829 out_emb->pins[0].baSourceID = in_ext->bJackID;
830 out_emb->iJack = 0;
831 midi_function[i++] = (struct usb_descriptor_header *) out_emb;
832
833 /* link it to the endpoint */
834 ms_in_desc.baAssocJackID[n] = out_emb->bJackID;
846 } 835 }
847 836
848 midi_function[i++] = (struct usb_descriptor_header *) &jack_out_emb_desc; 837 /* configure the external OUT jacks, each linked to an embedded IN jack */
849
850 /* and multiple external OUT jacks ... */
851 for (n = 0; n < midi->out_ports; n++) { 838 for (n = 0; n < midi->out_ports; n++) {
852 struct usb_midi_out_jack_descriptor_1 *ext = &jack_out_ext_desc[n]; 839 struct usb_midi_in_jack_descriptor *in_emb = &jack_in_emb_desc[n];
853 int m; 840 struct usb_midi_out_jack_descriptor_1 *out_ext = &jack_out_ext_desc[n];
854 841
855 ext->bLength = USB_DT_MIDI_OUT_SIZE(1); 842 in_emb->bLength = USB_DT_MIDI_IN_SIZE;
856 ext->bDescriptorType = USB_DT_CS_INTERFACE; 843 in_emb->bDescriptorType = USB_DT_CS_INTERFACE;
857 ext->bDescriptorSubtype = USB_MS_MIDI_OUT_JACK; 844 in_emb->bDescriptorSubtype = USB_MS_MIDI_IN_JACK;
858 ext->bJackType = USB_MS_EXTERNAL; 845 in_emb->bJackType = USB_MS_EMBEDDED;
859 ext->bJackID = jack++; 846 in_emb->bJackID = jack++;
860 ext->bNrInputPins = 1; 847 in_emb->iJack = 0;
861 ext->iJack = 0; 848 midi_function[i++] = (struct usb_descriptor_header *) in_emb;
862 /* ... which all reference the same embedded IN jack */ 849
863 for (m = 0; m < midi->out_ports; m++) { 850 out_ext->bLength = USB_DT_MIDI_OUT_SIZE(1);
864 ext->pins[m].baSourceID = jack_in_emb_desc.bJackID; 851 out_ext->bDescriptorType = USB_DT_CS_INTERFACE;
865 ext->pins[m].baSourcePin = 1; 852 out_ext->bDescriptorSubtype = USB_MS_MIDI_OUT_JACK;
866 } 853 out_ext->bJackType = USB_MS_EXTERNAL;
867 854 out_ext->bJackID = jack++;
868 midi_function[i++] = (struct usb_descriptor_header *) ext; 855 out_ext->bNrInputPins = 1;
856 out_ext->iJack = 0;
857 out_ext->pins[0].baSourceID = in_emb->bJackID;
858 out_ext->pins[0].baSourcePin = 1;
859 midi_function[i++] = (struct usb_descriptor_header *) out_ext;
860
861 /* link it to the endpoint */
862 ms_out_desc.baAssocJackID[n] = in_emb->bJackID;
869 } 863 }
870 864
871 /* configure the endpoint descriptors ... */ 865 /* configure the endpoint descriptors ... */
872 ms_out_desc.bLength = USB_DT_MS_ENDPOINT_SIZE(midi->in_ports); 866 ms_out_desc.bLength = USB_DT_MS_ENDPOINT_SIZE(midi->in_ports);
873 ms_out_desc.bNumEmbMIDIJack = midi->in_ports; 867 ms_out_desc.bNumEmbMIDIJack = midi->in_ports;
874 for (n = 0; n < midi->in_ports; n++)
875 ms_out_desc.baAssocJackID[n] = jack_in_emb_desc.bJackID;
876 868
877 ms_in_desc.bLength = USB_DT_MS_ENDPOINT_SIZE(midi->out_ports); 869 ms_in_desc.bLength = USB_DT_MS_ENDPOINT_SIZE(midi->out_ports);
878 ms_in_desc.bNumEmbMIDIJack = midi->out_ports; 870 ms_in_desc.bNumEmbMIDIJack = midi->out_ports;
879 for (n = 0; n < midi->out_ports; n++)
880 ms_in_desc.baAssocJackID[n] = jack_out_emb_desc.bJackID;
881 871
882 /* ... and add them to the list */ 872 /* ... and add them to the list */
883 midi_function[i++] = (struct usb_descriptor_header *) &bulk_out_desc; 873 midi_function[i++] = (struct usb_descriptor_header *) &bulk_out_desc;
@@ -901,6 +891,8 @@ f_midi_bind(struct usb_configuration *c, struct usb_function *f)
901 f->descriptors = usb_copy_descriptors(midi_function); 891 f->descriptors = usb_copy_descriptors(midi_function);
902 } 892 }
903 893
894 kfree(midi_function);
895
904 return 0; 896 return 0;
905 897
906fail: 898fail:
diff --git a/drivers/usb/gadget/f_phonet.c b/drivers/usb/gadget/f_phonet.c
index 34907703333..16a509ae517 100644
--- a/drivers/usb/gadget/f_phonet.c
+++ b/drivers/usb/gadget/f_phonet.c
@@ -346,7 +346,7 @@ static void pn_rx_complete(struct usb_ep *ep, struct usb_request *req)
346 } 346 }
347 347
348 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 348 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
349 skb->len == 0, req->actual); 349 skb->len <= 1, req->actual);
350 page = NULL; 350 page = NULL;
351 351
352 if (req->actual < req->length) { /* Last fragment */ 352 if (req->actual < req->length) { /* Last fragment */
diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c
index f7e39b0365c..11b5196284a 100644
--- a/drivers/usb/gadget/file_storage.c
+++ b/drivers/usb/gadget/file_storage.c
@@ -859,7 +859,7 @@ static int class_setup_req(struct fsg_dev *fsg,
859 if (ctrl->bRequestType != (USB_DIR_OUT | 859 if (ctrl->bRequestType != (USB_DIR_OUT |
860 USB_TYPE_CLASS | USB_RECIP_INTERFACE)) 860 USB_TYPE_CLASS | USB_RECIP_INTERFACE))
861 break; 861 break;
862 if (w_index != 0 || w_value != 0) { 862 if (w_index != 0 || w_value != 0 || w_length != 0) {
863 value = -EDOM; 863 value = -EDOM;
864 break; 864 break;
865 } 865 }
@@ -875,7 +875,7 @@ static int class_setup_req(struct fsg_dev *fsg,
875 if (ctrl->bRequestType != (USB_DIR_IN | 875 if (ctrl->bRequestType != (USB_DIR_IN |
876 USB_TYPE_CLASS | USB_RECIP_INTERFACE)) 876 USB_TYPE_CLASS | USB_RECIP_INTERFACE))
877 break; 877 break;
878 if (w_index != 0 || w_value != 0) { 878 if (w_index != 0 || w_value != 0 || w_length != 1) {
879 value = -EDOM; 879 value = -EDOM;
880 break; 880 break;
881 } 881 }
diff --git a/drivers/usb/gadget/fsl_udc_core.c b/drivers/usb/gadget/fsl_udc_core.c
index b2c44e1d581..b3b3d83b7c3 100644
--- a/drivers/usb/gadget/fsl_udc_core.c
+++ b/drivers/usb/gadget/fsl_udc_core.c
@@ -1717,7 +1717,7 @@ static void dtd_complete_irq(struct fsl_udc *udc)
1717 1717
1718static inline enum usb_device_speed portscx_device_speed(u32 reg) 1718static inline enum usb_device_speed portscx_device_speed(u32 reg)
1719{ 1719{
1720 switch (speed & PORTSCX_PORT_SPEED_MASK) { 1720 switch (reg & PORTSCX_PORT_SPEED_MASK) {
1721 case PORTSCX_PORT_SPEED_HIGH: 1721 case PORTSCX_PORT_SPEED_HIGH:
1722 return USB_SPEED_HIGH; 1722 return USB_SPEED_HIGH;
1723 case PORTSCX_PORT_SPEED_FULL: 1723 case PORTSCX_PORT_SPEED_FULL:
@@ -2480,8 +2480,7 @@ static int __init fsl_udc_probe(struct platform_device *pdev)
2480 2480
2481#ifndef CONFIG_ARCH_MXC 2481#ifndef CONFIG_ARCH_MXC
2482 if (pdata->have_sysif_regs) 2482 if (pdata->have_sysif_regs)
2483 usb_sys_regs = (struct usb_sys_interface *) 2483 usb_sys_regs = (void *)dr_regs + USB_DR_SYS_OFFSET;
2484 ((u32)dr_regs + USB_DR_SYS_OFFSET);
2485#endif 2484#endif
2486 2485
2487 /* Initialize USB clocks */ 2486 /* Initialize USB clocks */
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
index a392ec0d2d5..6ccae2707e5 100644
--- a/drivers/usb/gadget/inode.c
+++ b/drivers/usb/gadget/inode.c
@@ -1730,8 +1730,9 @@ static void
1730gadgetfs_disconnect (struct usb_gadget *gadget) 1730gadgetfs_disconnect (struct usb_gadget *gadget)
1731{ 1731{
1732 struct dev_data *dev = get_gadget_data (gadget); 1732 struct dev_data *dev = get_gadget_data (gadget);
1733 unsigned long flags;
1733 1734
1734 spin_lock (&dev->lock); 1735 spin_lock_irqsave (&dev->lock, flags);
1735 if (dev->state == STATE_DEV_UNCONNECTED) 1736 if (dev->state == STATE_DEV_UNCONNECTED)
1736 goto exit; 1737 goto exit;
1737 dev->state = STATE_DEV_UNCONNECTED; 1738 dev->state = STATE_DEV_UNCONNECTED;
@@ -1740,7 +1741,7 @@ gadgetfs_disconnect (struct usb_gadget *gadget)
1740 next_event (dev, GADGETFS_DISCONNECT); 1741 next_event (dev, GADGETFS_DISCONNECT);
1741 ep0_readable (dev); 1742 ep0_readable (dev);
1742exit: 1743exit:
1743 spin_unlock (&dev->lock); 1744 spin_unlock_irqrestore (&dev->lock, flags);
1744} 1745}
1745 1746
1746static void 1747static void
diff --git a/drivers/usb/gadget/pch_udc.c b/drivers/usb/gadget/pch_udc.c
index 550d6dcdf10..5048a0c0764 100644
--- a/drivers/usb/gadget/pch_udc.c
+++ b/drivers/usb/gadget/pch_udc.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD. 2 * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by 5 * it under the terms of the GNU General Public License as published by
@@ -354,6 +354,7 @@ struct pch_udc_dev {
354#define PCI_DEVICE_ID_INTEL_EG20T_UDC 0x8808 354#define PCI_DEVICE_ID_INTEL_EG20T_UDC 0x8808
355#define PCI_VENDOR_ID_ROHM 0x10DB 355#define PCI_VENDOR_ID_ROHM 0x10DB
356#define PCI_DEVICE_ID_ML7213_IOH_UDC 0x801D 356#define PCI_DEVICE_ID_ML7213_IOH_UDC 0x801D
357#define PCI_DEVICE_ID_ML7831_IOH_UDC 0x8808
357 358
358static const char ep0_string[] = "ep0in"; 359static const char ep0_string[] = "ep0in";
359static DEFINE_SPINLOCK(udc_stall_spinlock); /* stall spin lock */ 360static DEFINE_SPINLOCK(udc_stall_spinlock); /* stall spin lock */
@@ -2970,6 +2971,11 @@ static DEFINE_PCI_DEVICE_TABLE(pch_udc_pcidev_id) = {
2970 .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe, 2971 .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
2971 .class_mask = 0xffffffff, 2972 .class_mask = 0xffffffff,
2972 }, 2973 },
2974 {
2975 PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7831_IOH_UDC),
2976 .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
2977 .class_mask = 0xffffffff,
2978 },
2973 { 0 }, 2979 { 0 },
2974}; 2980};
2975 2981
@@ -2999,5 +3005,5 @@ static void __exit pch_udc_pci_exit(void)
2999module_exit(pch_udc_pci_exit); 3005module_exit(pch_udc_pci_exit);
3000 3006
3001MODULE_DESCRIPTION("Intel EG20T USB Device Controller"); 3007MODULE_DESCRIPTION("Intel EG20T USB Device Controller");
3002MODULE_AUTHOR("OKI SEMICONDUCTOR, <toshiharu-linux@dsn.okisemi.com>"); 3008MODULE_AUTHOR("LAPIS Semiconductor, <tomoya-linux@dsn.lapis-semi.com>");
3003MODULE_LICENSE("GPL"); 3009MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/r8a66597-udc.c b/drivers/usb/gadget/r8a66597-udc.c
index 68a826a1b86..24f84b210ce 100644
--- a/drivers/usb/gadget/r8a66597-udc.c
+++ b/drivers/usb/gadget/r8a66597-udc.c
@@ -1718,6 +1718,8 @@ static void r8a66597_fifo_flush(struct usb_ep *_ep)
1718 if (list_empty(&ep->queue) && !ep->busy) { 1718 if (list_empty(&ep->queue) && !ep->busy) {
1719 pipe_stop(ep->r8a66597, ep->pipenum); 1719 pipe_stop(ep->r8a66597, ep->pipenum);
1720 r8a66597_bclr(ep->r8a66597, BCLR, ep->fifoctr); 1720 r8a66597_bclr(ep->r8a66597, BCLR, ep->fifoctr);
1721 r8a66597_write(ep->r8a66597, ACLRM, ep->pipectr);
1722 r8a66597_write(ep->r8a66597, 0, ep->pipectr);
1721 } 1723 }
1722 spin_unlock_irqrestore(&ep->r8a66597->lock, flags); 1724 spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
1723} 1725}
@@ -1742,7 +1744,6 @@ static int r8a66597_start(struct usb_gadget *gadget,
1742 struct usb_gadget_driver *driver) 1744 struct usb_gadget_driver *driver)
1743{ 1745{
1744 struct r8a66597 *r8a66597 = gadget_to_r8a66597(gadget); 1746 struct r8a66597 *r8a66597 = gadget_to_r8a66597(gadget);
1745 int retval;
1746 1747
1747 if (!driver 1748 if (!driver
1748 || driver->speed != USB_SPEED_HIGH 1749 || driver->speed != USB_SPEED_HIGH
@@ -1752,16 +1753,7 @@ static int r8a66597_start(struct usb_gadget *gadget,
1752 return -ENODEV; 1753 return -ENODEV;
1753 1754
1754 /* hook up the driver */ 1755 /* hook up the driver */
1755 driver->driver.bus = NULL;
1756 r8a66597->driver = driver; 1756 r8a66597->driver = driver;
1757 r8a66597->gadget.dev.driver = &driver->driver;
1758
1759 retval = device_add(&r8a66597->gadget.dev);
1760 if (retval) {
1761 dev_err(r8a66597_to_dev(r8a66597), "device_add error (%d)\n",
1762 retval);
1763 goto error;
1764 }
1765 1757
1766 init_controller(r8a66597); 1758 init_controller(r8a66597);
1767 r8a66597_bset(r8a66597, VBSE, INTENB0); 1759 r8a66597_bset(r8a66597, VBSE, INTENB0);
@@ -1775,12 +1767,6 @@ static int r8a66597_start(struct usb_gadget *gadget,
1775 } 1767 }
1776 1768
1777 return 0; 1769 return 0;
1778
1779error:
1780 r8a66597->driver = NULL;
1781 r8a66597->gadget.dev.driver = NULL;
1782
1783 return retval;
1784} 1770}
1785 1771
1786static int r8a66597_stop(struct usb_gadget *gadget, 1772static int r8a66597_stop(struct usb_gadget *gadget,
@@ -1794,7 +1780,6 @@ static int r8a66597_stop(struct usb_gadget *gadget,
1794 disable_controller(r8a66597); 1780 disable_controller(r8a66597);
1795 spin_unlock_irqrestore(&r8a66597->lock, flags); 1781 spin_unlock_irqrestore(&r8a66597->lock, flags);
1796 1782
1797 device_del(&r8a66597->gadget.dev);
1798 r8a66597->driver = NULL; 1783 r8a66597->driver = NULL;
1799 return 0; 1784 return 0;
1800} 1785}
@@ -1845,6 +1830,7 @@ static int __exit r8a66597_remove(struct platform_device *pdev)
1845 clk_put(r8a66597->clk); 1830 clk_put(r8a66597->clk);
1846 } 1831 }
1847#endif 1832#endif
1833 device_unregister(&r8a66597->gadget.dev);
1848 kfree(r8a66597); 1834 kfree(r8a66597);
1849 return 0; 1835 return 0;
1850} 1836}
@@ -1924,13 +1910,17 @@ static int __init r8a66597_probe(struct platform_device *pdev)
1924 r8a66597->irq_sense_low = irq_trigger == IRQF_TRIGGER_LOW; 1910 r8a66597->irq_sense_low = irq_trigger == IRQF_TRIGGER_LOW;
1925 1911
1926 r8a66597->gadget.ops = &r8a66597_gadget_ops; 1912 r8a66597->gadget.ops = &r8a66597_gadget_ops;
1927 device_initialize(&r8a66597->gadget.dev);
1928 dev_set_name(&r8a66597->gadget.dev, "gadget"); 1913 dev_set_name(&r8a66597->gadget.dev, "gadget");
1929 r8a66597->gadget.is_dualspeed = 1; 1914 r8a66597->gadget.is_dualspeed = 1;
1930 r8a66597->gadget.dev.parent = &pdev->dev; 1915 r8a66597->gadget.dev.parent = &pdev->dev;
1931 r8a66597->gadget.dev.dma_mask = pdev->dev.dma_mask; 1916 r8a66597->gadget.dev.dma_mask = pdev->dev.dma_mask;
1932 r8a66597->gadget.dev.release = pdev->dev.release; 1917 r8a66597->gadget.dev.release = pdev->dev.release;
1933 r8a66597->gadget.name = udc_name; 1918 r8a66597->gadget.name = udc_name;
1919 ret = device_register(&r8a66597->gadget.dev);
1920 if (ret < 0) {
1921 dev_err(&pdev->dev, "device_register failed\n");
1922 goto clean_up;
1923 }
1934 1924
1935 init_timer(&r8a66597->timer); 1925 init_timer(&r8a66597->timer);
1936 r8a66597->timer.function = r8a66597_timer; 1926 r8a66597->timer.function = r8a66597_timer;
@@ -1945,7 +1935,7 @@ static int __init r8a66597_probe(struct platform_device *pdev)
1945 dev_err(&pdev->dev, "cannot get clock \"%s\"\n", 1935 dev_err(&pdev->dev, "cannot get clock \"%s\"\n",
1946 clk_name); 1936 clk_name);
1947 ret = PTR_ERR(r8a66597->clk); 1937 ret = PTR_ERR(r8a66597->clk);
1948 goto clean_up; 1938 goto clean_up_dev;
1949 } 1939 }
1950 clk_enable(r8a66597->clk); 1940 clk_enable(r8a66597->clk);
1951 } 1941 }
@@ -2014,7 +2004,9 @@ clean_up2:
2014 clk_disable(r8a66597->clk); 2004 clk_disable(r8a66597->clk);
2015 clk_put(r8a66597->clk); 2005 clk_put(r8a66597->clk);
2016 } 2006 }
2007clean_up_dev:
2017#endif 2008#endif
2009 device_unregister(&r8a66597->gadget.dev);
2018clean_up: 2010clean_up:
2019 if (r8a66597) { 2011 if (r8a66597) {
2020 if (r8a66597->sudmac_reg) 2012 if (r8a66597->sudmac_reg)
diff --git a/drivers/usb/gadget/udc-core.c b/drivers/usb/gadget/udc-core.c
index 022baeca7c9..6939e17f458 100644
--- a/drivers/usb/gadget/udc-core.c
+++ b/drivers/usb/gadget/udc-core.c
@@ -210,10 +210,10 @@ static void usb_gadget_remove_driver(struct usb_udc *udc)
210 kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE); 210 kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
211 211
212 if (udc_is_newstyle(udc)) { 212 if (udc_is_newstyle(udc)) {
213 usb_gadget_disconnect(udc->gadget); 213 udc->driver->disconnect(udc->gadget);
214 udc->driver->unbind(udc->gadget); 214 udc->driver->unbind(udc->gadget);
215 usb_gadget_udc_stop(udc->gadget, udc->driver); 215 usb_gadget_udc_stop(udc->gadget, udc->driver);
216 216 usb_gadget_disconnect(udc->gadget);
217 } else { 217 } else {
218 usb_gadget_stop(udc->gadget, udc->driver); 218 usb_gadget_stop(udc->gadget, udc->driver);
219 } 219 }
@@ -344,7 +344,7 @@ EXPORT_SYMBOL_GPL(usb_gadget_unregister_driver);
344static ssize_t usb_udc_srp_store(struct device *dev, 344static ssize_t usb_udc_srp_store(struct device *dev,
345 struct device_attribute *attr, const char *buf, size_t n) 345 struct device_attribute *attr, const char *buf, size_t n)
346{ 346{
347 struct usb_udc *udc = dev_get_drvdata(dev); 347 struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
348 348
349 if (sysfs_streq(buf, "1")) 349 if (sysfs_streq(buf, "1"))
350 usb_gadget_wakeup(udc->gadget); 350 usb_gadget_wakeup(udc->gadget);
@@ -378,7 +378,7 @@ static ssize_t usb_udc_speed_show(struct device *dev,
378 return snprintf(buf, PAGE_SIZE, "%s\n", 378 return snprintf(buf, PAGE_SIZE, "%s\n",
379 usb_speed_string(udc->gadget->speed)); 379 usb_speed_string(udc->gadget->speed));
380} 380}
381static DEVICE_ATTR(speed, S_IRUSR, usb_udc_speed_show, NULL); 381static DEVICE_ATTR(speed, S_IRUGO, usb_udc_speed_show, NULL);
382 382
383#define USB_UDC_ATTR(name) \ 383#define USB_UDC_ATTR(name) \
384ssize_t usb_udc_##name##_show(struct device *dev, \ 384ssize_t usb_udc_##name##_show(struct device *dev, \
@@ -389,7 +389,7 @@ ssize_t usb_udc_##name##_show(struct device *dev, \
389 \ 389 \
390 return snprintf(buf, PAGE_SIZE, "%d\n", gadget->name); \ 390 return snprintf(buf, PAGE_SIZE, "%d\n", gadget->name); \
391} \ 391} \
392static DEVICE_ATTR(name, S_IRUSR, usb_udc_##name##_show, NULL) 392static DEVICE_ATTR(name, S_IRUGO, usb_udc_##name##_show, NULL)
393 393
394static USB_UDC_ATTR(is_dualspeed); 394static USB_UDC_ATTR(is_dualspeed);
395static USB_UDC_ATTR(is_otg); 395static USB_UDC_ATTR(is_otg);
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index 2e829fae648..56a32033adb 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -1479,10 +1479,15 @@ iso_stream_schedule (
1479 1479
1480 /* NOTE: assumes URB_ISO_ASAP, to limit complexity/bugs */ 1480 /* NOTE: assumes URB_ISO_ASAP, to limit complexity/bugs */
1481 1481
1482 /* find a uframe slot with enough bandwidth */ 1482 /* find a uframe slot with enough bandwidth.
1483 next = start + period; 1483 * Early uframes are more precious because full-speed
1484 for (; start < next; start++) { 1484 * iso IN transfers can't use late uframes,
1485 1485 * and therefore they should be allocated last.
1486 */
1487 next = start;
1488 start += period;
1489 do {
1490 start--;
1486 /* check schedule: enough space? */ 1491 /* check schedule: enough space? */
1487 if (stream->highspeed) { 1492 if (stream->highspeed) {
1488 if (itd_slot_ok(ehci, mod, start, 1493 if (itd_slot_ok(ehci, mod, start,
@@ -1495,7 +1500,7 @@ iso_stream_schedule (
1495 start, sched, period)) 1500 start, sched, period))
1496 break; 1501 break;
1497 } 1502 }
1498 } 1503 } while (start > next);
1499 1504
1500 /* no room in the schedule */ 1505 /* no room in the schedule */
1501 if (start == next) { 1506 if (start == next) {
diff --git a/drivers/usb/host/ehci-xls.c b/drivers/usb/host/ehci-xls.c
index fe74bd67601..b4fb511d24b 100644
--- a/drivers/usb/host/ehci-xls.c
+++ b/drivers/usb/host/ehci-xls.c
@@ -19,7 +19,7 @@ static int ehci_xls_setup(struct usb_hcd *hcd)
19 19
20 ehci->caps = hcd->regs; 20 ehci->caps = hcd->regs;
21 ehci->regs = hcd->regs + 21 ehci->regs = hcd->regs +
22 HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase)); 22 HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
23 dbg_hcs_params(ehci, "reset"); 23 dbg_hcs_params(ehci, "reset");
24 dbg_hcc_params(ehci, "reset"); 24 dbg_hcc_params(ehci, "reset");
25 25
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index ba3a46b78b7..95a9fec38e8 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -223,6 +223,9 @@ static void ohci_at91_usb_set_power(struct at91_usbh_data *pdata, int port, int
223 if (port < 0 || port >= 2) 223 if (port < 0 || port >= 2)
224 return; 224 return;
225 225
226 if (pdata->vbus_pin[port] <= 0)
227 return;
228
226 gpio_set_value(pdata->vbus_pin[port], !pdata->vbus_pin_inverted ^ enable); 229 gpio_set_value(pdata->vbus_pin[port], !pdata->vbus_pin_inverted ^ enable);
227} 230}
228 231
@@ -231,6 +234,9 @@ static int ohci_at91_usb_get_power(struct at91_usbh_data *pdata, int port)
231 if (port < 0 || port >= 2) 234 if (port < 0 || port >= 2)
232 return -EINVAL; 235 return -EINVAL;
233 236
237 if (pdata->vbus_pin[port] <= 0)
238 return -EINVAL;
239
234 return gpio_get_value(pdata->vbus_pin[port]) ^ !pdata->vbus_pin_inverted; 240 return gpio_get_value(pdata->vbus_pin[port]) ^ !pdata->vbus_pin_inverted;
235} 241}
236 242
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 34efd479e06..b2639191549 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -389,17 +389,14 @@ ohci_shutdown (struct usb_hcd *hcd)
389 struct ohci_hcd *ohci; 389 struct ohci_hcd *ohci;
390 390
391 ohci = hcd_to_ohci (hcd); 391 ohci = hcd_to_ohci (hcd);
392 ohci_writel (ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable); 392 ohci_writel(ohci, (u32) ~0, &ohci->regs->intrdisable);
393 ohci->hc_control = ohci_readl(ohci, &ohci->regs->control);
394 393
395 /* If the SHUTDOWN quirk is set, don't put the controller in RESET */ 394 /* Software reset, after which the controller goes into SUSPEND */
396 ohci->hc_control &= (ohci->flags & OHCI_QUIRK_SHUTDOWN ? 395 ohci_writel(ohci, OHCI_HCR, &ohci->regs->cmdstatus);
397 OHCI_CTRL_RWC | OHCI_CTRL_HCFS : 396 ohci_readl(ohci, &ohci->regs->cmdstatus); /* flush the writes */
398 OHCI_CTRL_RWC); 397 udelay(10);
399 ohci_writel(ohci, ohci->hc_control, &ohci->regs->control);
400 398
401 /* flush the writes */ 399 ohci_writel(ohci, ohci->fminterval, &ohci->regs->fminterval);
402 (void) ohci_readl (ohci, &ohci->regs->control);
403} 400}
404 401
405static int check_ed(struct ohci_hcd *ohci, struct ed *ed) 402static int check_ed(struct ohci_hcd *ohci, struct ed *ed)
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index ad8166c681e..bc01b064585 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -175,28 +175,6 @@ static int ohci_quirk_amd700(struct usb_hcd *hcd)
175 return 0; 175 return 0;
176} 176}
177 177
178/* nVidia controllers continue to drive Reset signalling on the bus
179 * even after system shutdown, wasting power. This flag tells the
180 * shutdown routine to leave the controller OPERATIONAL instead of RESET.
181 */
182static int ohci_quirk_nvidia_shutdown(struct usb_hcd *hcd)
183{
184 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
185 struct ohci_hcd *ohci = hcd_to_ohci(hcd);
186
187 /* Evidently nVidia fixed their later hardware; this is a guess at
188 * the changeover point.
189 */
190#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_USB 0x026d
191
192 if (pdev->device < PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_USB) {
193 ohci->flags |= OHCI_QUIRK_SHUTDOWN;
194 ohci_dbg(ohci, "enabled nVidia shutdown quirk\n");
195 }
196
197 return 0;
198}
199
200static void sb800_prefetch(struct ohci_hcd *ohci, int on) 178static void sb800_prefetch(struct ohci_hcd *ohci, int on)
201{ 179{
202 struct pci_dev *pdev; 180 struct pci_dev *pdev;
@@ -260,10 +238,6 @@ static const struct pci_device_id ohci_pci_quirks[] = {
260 PCI_DEVICE(PCI_VENDOR_ID_ATI, 0x4399), 238 PCI_DEVICE(PCI_VENDOR_ID_ATI, 0x4399),
261 .driver_data = (unsigned long)ohci_quirk_amd700, 239 .driver_data = (unsigned long)ohci_quirk_amd700,
262 }, 240 },
263 {
264 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID),
265 .driver_data = (unsigned long) ohci_quirk_nvidia_shutdown,
266 },
267 241
268 /* FIXME for some of the early AMD 760 southbridges, OHCI 242 /* FIXME for some of the early AMD 760 southbridges, OHCI
269 * won't work at all. blacklist them. 243 * won't work at all. blacklist them.
diff --git a/drivers/usb/host/ohci.h b/drivers/usb/host/ohci.h
index 35e5fd640ce..0795b934d00 100644
--- a/drivers/usb/host/ohci.h
+++ b/drivers/usb/host/ohci.h
@@ -403,7 +403,6 @@ struct ohci_hcd {
403#define OHCI_QUIRK_HUB_POWER 0x100 /* distrust firmware power/oc setup */ 403#define OHCI_QUIRK_HUB_POWER 0x100 /* distrust firmware power/oc setup */
404#define OHCI_QUIRK_AMD_PLL 0x200 /* AMD PLL quirk*/ 404#define OHCI_QUIRK_AMD_PLL 0x200 /* AMD PLL quirk*/
405#define OHCI_QUIRK_AMD_PREFETCH 0x400 /* pre-fetch for ISO transfer */ 405#define OHCI_QUIRK_AMD_PREFETCH 0x400 /* pre-fetch for ISO transfer */
406#define OHCI_QUIRK_SHUTDOWN 0x800 /* nVidia power bug */
407 // there are also chip quirks/bugs in init logic 406 // there are also chip quirks/bugs in init logic
408 407
409 struct work_struct nec_work; /* Worker for NEC quirk */ 408 struct work_struct nec_work; /* Worker for NEC quirk */
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 27a3dec32fa..caf87428ca4 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -37,6 +37,7 @@
37#define OHCI_INTRENABLE 0x10 37#define OHCI_INTRENABLE 0x10
38#define OHCI_INTRDISABLE 0x14 38#define OHCI_INTRDISABLE 0x14
39#define OHCI_FMINTERVAL 0x34 39#define OHCI_FMINTERVAL 0x34
40#define OHCI_HCFS (3 << 6) /* hc functional state */
40#define OHCI_HCR (1 << 0) /* host controller reset */ 41#define OHCI_HCR (1 << 0) /* host controller reset */
41#define OHCI_OCR (1 << 3) /* ownership change request */ 42#define OHCI_OCR (1 << 3) /* ownership change request */
42#define OHCI_CTRL_RWC (1 << 9) /* remote wakeup connected */ 43#define OHCI_CTRL_RWC (1 << 9) /* remote wakeup connected */
@@ -466,6 +467,8 @@ static void __devinit quirk_usb_handoff_ohci(struct pci_dev *pdev)
466{ 467{
467 void __iomem *base; 468 void __iomem *base;
468 u32 control; 469 u32 control;
470 u32 fminterval;
471 int cnt;
469 472
470 if (!mmio_resource_enabled(pdev, 0)) 473 if (!mmio_resource_enabled(pdev, 0))
471 return; 474 return;
@@ -498,41 +501,32 @@ static void __devinit quirk_usb_handoff_ohci(struct pci_dev *pdev)
498 } 501 }
499#endif 502#endif
500 503
501 /* reset controller, preserving RWC (and possibly IR) */ 504 /* disable interrupts */
502 writel(control & OHCI_CTRL_MASK, base + OHCI_CONTROL); 505 writel((u32) ~0, base + OHCI_INTRDISABLE);
503 readl(base + OHCI_CONTROL);
504 506
505 /* Some NVIDIA controllers stop working if kept in RESET for too long */ 507 /* Reset the USB bus, if the controller isn't already in RESET */
506 if (pdev->vendor == PCI_VENDOR_ID_NVIDIA) { 508 if (control & OHCI_HCFS) {
507 u32 fminterval; 509 /* Go into RESET, preserving RWC (and possibly IR) */
508 int cnt; 510 writel(control & OHCI_CTRL_MASK, base + OHCI_CONTROL);
511 readl(base + OHCI_CONTROL);
509 512
510 /* drive reset for at least 50 ms (7.1.7.5) */ 513 /* drive bus reset for at least 50 ms (7.1.7.5) */
511 msleep(50); 514 msleep(50);
515 }
512 516
513 /* software reset of the controller, preserving HcFmInterval */ 517 /* software reset of the controller, preserving HcFmInterval */
514 fminterval = readl(base + OHCI_FMINTERVAL); 518 fminterval = readl(base + OHCI_FMINTERVAL);
515 writel(OHCI_HCR, base + OHCI_CMDSTATUS); 519 writel(OHCI_HCR, base + OHCI_CMDSTATUS);
516 520
517 /* reset requires max 10 us delay */ 521 /* reset requires max 10 us delay */
518 for (cnt = 30; cnt > 0; --cnt) { /* ... allow extra time */ 522 for (cnt = 30; cnt > 0; --cnt) { /* ... allow extra time */
519 if ((readl(base + OHCI_CMDSTATUS) & OHCI_HCR) == 0) 523 if ((readl(base + OHCI_CMDSTATUS) & OHCI_HCR) == 0)
520 break; 524 break;
521 udelay(1); 525 udelay(1);
522 }
523 writel(fminterval, base + OHCI_FMINTERVAL);
524
525 /* Now we're in the SUSPEND state with all devices reset
526 * and wakeups and interrupts disabled
527 */
528 } 526 }
527 writel(fminterval, base + OHCI_FMINTERVAL);
529 528
530 /* 529 /* Now the controller is safely in SUSPEND and nothing can wake it up */
531 * disable interrupts
532 */
533 writel(~(u32)0, base + OHCI_INTRDISABLE);
534 writel(~(u32)0, base + OHCI_INTRSTATUS);
535
536 iounmap(base); 530 iounmap(base);
537} 531}
538 532
@@ -627,7 +621,7 @@ static void __devinit quirk_usb_disable_ehci(struct pci_dev *pdev)
627 void __iomem *base, *op_reg_base; 621 void __iomem *base, *op_reg_base;
628 u32 hcc_params, cap, val; 622 u32 hcc_params, cap, val;
629 u8 offset, cap_length; 623 u8 offset, cap_length;
630 int wait_time, delta, count = 256/4; 624 int wait_time, count = 256/4;
631 625
632 if (!mmio_resource_enabled(pdev, 0)) 626 if (!mmio_resource_enabled(pdev, 0))
633 return; 627 return;
@@ -673,11 +667,10 @@ static void __devinit quirk_usb_disable_ehci(struct pci_dev *pdev)
673 writel(val, op_reg_base + EHCI_USBCMD); 667 writel(val, op_reg_base + EHCI_USBCMD);
674 668
675 wait_time = 2000; 669 wait_time = 2000;
676 delta = 100;
677 do { 670 do {
678 writel(0x3f, op_reg_base + EHCI_USBSTS); 671 writel(0x3f, op_reg_base + EHCI_USBSTS);
679 udelay(delta); 672 udelay(100);
680 wait_time -= delta; 673 wait_time -= 100;
681 val = readl(op_reg_base + EHCI_USBSTS); 674 val = readl(op_reg_base + EHCI_USBSTS);
682 if ((val == ~(u32)0) || (val & EHCI_USBSTS_HALTED)) { 675 if ((val == ~(u32)0) || (val & EHCI_USBSTS_HALTED)) {
683 break; 676 break;
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 42a22b8e692..0e4b25fa3bc 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -982,7 +982,6 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
982 struct xhci_virt_device *dev; 982 struct xhci_virt_device *dev;
983 struct xhci_ep_ctx *ep0_ctx; 983 struct xhci_ep_ctx *ep0_ctx;
984 struct xhci_slot_ctx *slot_ctx; 984 struct xhci_slot_ctx *slot_ctx;
985 struct xhci_input_control_ctx *ctrl_ctx;
986 u32 port_num; 985 u32 port_num;
987 struct usb_device *top_dev; 986 struct usb_device *top_dev;
988 987
@@ -994,12 +993,8 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
994 return -EINVAL; 993 return -EINVAL;
995 } 994 }
996 ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0); 995 ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0);
997 ctrl_ctx = xhci_get_input_control_ctx(xhci, dev->in_ctx);
998 slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx); 996 slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx);
999 997
1000 /* 2) New slot context and endpoint 0 context are valid*/
1001 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
1002
1003 /* 3) Only the control endpoint is valid - one endpoint context */ 998 /* 3) Only the control endpoint is valid - one endpoint context */
1004 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route); 999 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route);
1005 switch (udev->speed) { 1000 switch (udev->speed) {
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 940321b3ec6..9f1d4b15d81 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -816,23 +816,24 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
816 struct xhci_ring *ring; 816 struct xhci_ring *ring;
817 struct xhci_td *cur_td; 817 struct xhci_td *cur_td;
818 int ret, i, j; 818 int ret, i, j;
819 unsigned long flags;
819 820
820 ep = (struct xhci_virt_ep *) arg; 821 ep = (struct xhci_virt_ep *) arg;
821 xhci = ep->xhci; 822 xhci = ep->xhci;
822 823
823 spin_lock(&xhci->lock); 824 spin_lock_irqsave(&xhci->lock, flags);
824 825
825 ep->stop_cmds_pending--; 826 ep->stop_cmds_pending--;
826 if (xhci->xhc_state & XHCI_STATE_DYING) { 827 if (xhci->xhc_state & XHCI_STATE_DYING) {
827 xhci_dbg(xhci, "Stop EP timer ran, but another timer marked " 828 xhci_dbg(xhci, "Stop EP timer ran, but another timer marked "
828 "xHCI as DYING, exiting.\n"); 829 "xHCI as DYING, exiting.\n");
829 spin_unlock(&xhci->lock); 830 spin_unlock_irqrestore(&xhci->lock, flags);
830 return; 831 return;
831 } 832 }
832 if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) { 833 if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
833 xhci_dbg(xhci, "Stop EP timer ran, but no command pending, " 834 xhci_dbg(xhci, "Stop EP timer ran, but no command pending, "
834 "exiting.\n"); 835 "exiting.\n");
835 spin_unlock(&xhci->lock); 836 spin_unlock_irqrestore(&xhci->lock, flags);
836 return; 837 return;
837 } 838 }
838 839
@@ -844,11 +845,11 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
844 xhci->xhc_state |= XHCI_STATE_DYING; 845 xhci->xhc_state |= XHCI_STATE_DYING;
845 /* Disable interrupts from the host controller and start halting it */ 846 /* Disable interrupts from the host controller and start halting it */
846 xhci_quiesce(xhci); 847 xhci_quiesce(xhci);
847 spin_unlock(&xhci->lock); 848 spin_unlock_irqrestore(&xhci->lock, flags);
848 849
849 ret = xhci_halt(xhci); 850 ret = xhci_halt(xhci);
850 851
851 spin_lock(&xhci->lock); 852 spin_lock_irqsave(&xhci->lock, flags);
852 if (ret < 0) { 853 if (ret < 0) {
853 /* This is bad; the host is not responding to commands and it's 854 /* This is bad; the host is not responding to commands and it's
854 * not allowing itself to be halted. At least interrupts are 855 * not allowing itself to be halted. At least interrupts are
@@ -896,7 +897,7 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
896 } 897 }
897 } 898 }
898 } 899 }
899 spin_unlock(&xhci->lock); 900 spin_unlock_irqrestore(&xhci->lock, flags);
900 xhci_dbg(xhci, "Calling usb_hc_died()\n"); 901 xhci_dbg(xhci, "Calling usb_hc_died()\n");
901 usb_hc_died(xhci_to_hcd(xhci)->primary_hcd); 902 usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
902 xhci_dbg(xhci, "xHCI host controller is dead.\n"); 903 xhci_dbg(xhci, "xHCI host controller is dead.\n");
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 1ff95a0df57..aa94c019579 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -799,7 +799,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
799 u32 command, temp = 0; 799 u32 command, temp = 0;
800 struct usb_hcd *hcd = xhci_to_hcd(xhci); 800 struct usb_hcd *hcd = xhci_to_hcd(xhci);
801 struct usb_hcd *secondary_hcd; 801 struct usb_hcd *secondary_hcd;
802 int retval; 802 int retval = 0;
803 803
804 /* Wait a bit if either of the roothubs need to settle from the 804 /* Wait a bit if either of the roothubs need to settle from the
805 * transition into bus suspend. 805 * transition into bus suspend.
@@ -809,6 +809,9 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
809 xhci->bus_state[1].next_statechange)) 809 xhci->bus_state[1].next_statechange))
810 msleep(100); 810 msleep(100);
811 811
812 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
813 set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
814
812 spin_lock_irq(&xhci->lock); 815 spin_lock_irq(&xhci->lock);
813 if (xhci->quirks & XHCI_RESET_ON_RESUME) 816 if (xhci->quirks & XHCI_RESET_ON_RESUME)
814 hibernated = true; 817 hibernated = true;
@@ -878,20 +881,13 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
878 return retval; 881 return retval;
879 xhci_dbg(xhci, "Start the primary HCD\n"); 882 xhci_dbg(xhci, "Start the primary HCD\n");
880 retval = xhci_run(hcd->primary_hcd); 883 retval = xhci_run(hcd->primary_hcd);
881 if (retval)
882 goto failed_restart;
883
884 xhci_dbg(xhci, "Start the secondary HCD\n");
885 retval = xhci_run(secondary_hcd);
886 if (!retval) { 884 if (!retval) {
887 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); 885 xhci_dbg(xhci, "Start the secondary HCD\n");
888 set_bit(HCD_FLAG_HW_ACCESSIBLE, 886 retval = xhci_run(secondary_hcd);
889 &xhci->shared_hcd->flags);
890 } 887 }
891failed_restart:
892 hcd->state = HC_STATE_SUSPENDED; 888 hcd->state = HC_STATE_SUSPENDED;
893 xhci->shared_hcd->state = HC_STATE_SUSPENDED; 889 xhci->shared_hcd->state = HC_STATE_SUSPENDED;
894 return retval; 890 goto done;
895 } 891 }
896 892
897 /* step 4: set Run/Stop bit */ 893 /* step 4: set Run/Stop bit */
@@ -910,11 +906,14 @@ failed_restart:
910 * Running endpoints by ringing their doorbells 906 * Running endpoints by ringing their doorbells
911 */ 907 */
912 908
913 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
914 set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
915
916 spin_unlock_irq(&xhci->lock); 909 spin_unlock_irq(&xhci->lock);
917 return 0; 910
911 done:
912 if (retval == 0) {
913 usb_hcd_resume_root_hub(hcd);
914 usb_hcd_resume_root_hub(xhci->shared_hcd);
915 }
916 return retval;
918} 917}
919#endif /* CONFIG_PM */ 918#endif /* CONFIG_PM */
920 919
@@ -3504,6 +3503,10 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
3504 /* Otherwise, update the control endpoint ring enqueue pointer. */ 3503 /* Otherwise, update the control endpoint ring enqueue pointer. */
3505 else 3504 else
3506 xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev); 3505 xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev);
3506 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
3507 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
3508 ctrl_ctx->drop_flags = 0;
3509
3507 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); 3510 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
3508 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); 3511 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
3509 3512
@@ -3585,7 +3588,6 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
3585 virt_dev->address = (le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK) 3588 virt_dev->address = (le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK)
3586 + 1; 3589 + 1;
3587 /* Zero the input context control for later use */ 3590 /* Zero the input context control for later use */
3588 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
3589 ctrl_ctx->add_flags = 0; 3591 ctrl_ctx->add_flags = 0;
3590 ctrl_ctx->drop_flags = 0; 3592 ctrl_ctx->drop_flags = 0;
3591 3593
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index fc34b8b1191..07a03460a59 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -11,6 +11,7 @@ config USB_MUSB_HDRC
11 select TWL4030_USB if MACH_OMAP_3430SDP 11 select TWL4030_USB if MACH_OMAP_3430SDP
12 select TWL6030_USB if MACH_OMAP_4430SDP || MACH_OMAP4_PANDA 12 select TWL6030_USB if MACH_OMAP_4430SDP || MACH_OMAP4_PANDA
13 select USB_OTG_UTILS 13 select USB_OTG_UTILS
14 select USB_GADGET_DUALSPEED
14 tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)' 15 tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)'
15 help 16 help
16 Say Y here if your system has a dual role high speed USB 17 Say Y here if your system has a dual role high speed USB
@@ -60,7 +61,7 @@ config USB_MUSB_BLACKFIN
60 61
61config USB_MUSB_UX500 62config USB_MUSB_UX500
62 tristate "U8500 and U5500" 63 tristate "U8500 and U5500"
63 depends on (ARCH_U8500 && AB8500_USB) || (ARCH_U5500) 64 depends on (ARCH_U8500 && AB8500_USB)
64 65
65endchoice 66endchoice
66 67
diff --git a/drivers/usb/musb/am35x.c b/drivers/usb/musb/am35x.c
index 08f1d0b662a..e233d2b7d33 100644
--- a/drivers/usb/musb/am35x.c
+++ b/drivers/usb/musb/am35x.c
@@ -27,6 +27,7 @@
27 */ 27 */
28 28
29#include <linux/init.h> 29#include <linux/init.h>
30#include <linux/module.h>
30#include <linux/clk.h> 31#include <linux/clk.h>
31#include <linux/io.h> 32#include <linux/io.h>
32#include <linux/platform_device.h> 33#include <linux/platform_device.h>
diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c
index 4da7492ddbd..2613bfdb09b 100644
--- a/drivers/usb/musb/da8xx.c
+++ b/drivers/usb/musb/da8xx.c
@@ -27,6 +27,7 @@
27 */ 27 */
28 28
29#include <linux/init.h> 29#include <linux/init.h>
30#include <linux/module.h>
30#include <linux/clk.h> 31#include <linux/clk.h>
31#include <linux/io.h> 32#include <linux/io.h>
32#include <linux/platform_device.h> 33#include <linux/platform_device.h>
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 20a28731c33..c1fa12ec7a9 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1477,8 +1477,7 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb)
1477/*-------------------------------------------------------------------------*/ 1477/*-------------------------------------------------------------------------*/
1478 1478
1479#if defined(CONFIG_SOC_OMAP2430) || defined(CONFIG_SOC_OMAP3430) || \ 1479#if defined(CONFIG_SOC_OMAP2430) || defined(CONFIG_SOC_OMAP3430) || \
1480 defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_ARCH_U8500) || \ 1480 defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_ARCH_U8500)
1481 defined(CONFIG_ARCH_U5500)
1482 1481
1483static irqreturn_t generic_interrupt(int irq, void *__hci) 1482static irqreturn_t generic_interrupt(int irq, void *__hci)
1484{ 1483{
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index ae4a20acef6..d51043acfe1 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1999,10 +1999,6 @@ static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver)
1999 nuke(&hw_ep->ep_out, -ESHUTDOWN); 1999 nuke(&hw_ep->ep_out, -ESHUTDOWN);
2000 } 2000 }
2001 } 2001 }
2002
2003 spin_unlock(&musb->lock);
2004 driver->disconnect(&musb->g);
2005 spin_lock(&musb->lock);
2006 } 2002 }
2007} 2003}
2008 2004
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index d2e2efaba65..08c679c0dde 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -405,7 +405,7 @@ int usbhsc_drvcllbck_notify_hotplug(struct platform_device *pdev)
405/* 405/*
406 * platform functions 406 * platform functions
407 */ 407 */
408static int __devinit usbhs_probe(struct platform_device *pdev) 408static int usbhs_probe(struct platform_device *pdev)
409{ 409{
410 struct renesas_usbhs_platform_info *info = pdev->dev.platform_data; 410 struct renesas_usbhs_platform_info *info = pdev->dev.platform_data;
411 struct renesas_usbhs_driver_callback *dfunc; 411 struct renesas_usbhs_driver_callback *dfunc;
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index 8da685e796d..ffdf5d15085 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -820,7 +820,7 @@ static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done)
820 if (len % 4) /* 32bit alignment */ 820 if (len % 4) /* 32bit alignment */
821 goto usbhsf_pio_prepare_push; 821 goto usbhsf_pio_prepare_push;
822 822
823 if ((*(u32 *) pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */ 823 if ((uintptr_t)(pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */
824 goto usbhsf_pio_prepare_push; 824 goto usbhsf_pio_prepare_push;
825 825
826 /* get enable DMA fifo */ 826 /* get enable DMA fifo */
@@ -897,7 +897,7 @@ static int usbhsf_dma_try_pop(struct usbhs_pkt *pkt, int *is_done)
897 if (!fifo) 897 if (!fifo)
898 goto usbhsf_pio_prepare_pop; 898 goto usbhsf_pio_prepare_pop;
899 899
900 if ((*(u32 *) pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */ 900 if ((uintptr_t)(pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */
901 goto usbhsf_pio_prepare_pop; 901 goto usbhsf_pio_prepare_pop;
902 902
903 ret = usbhsf_fifo_select(pipe, fifo, 0); 903 ret = usbhsf_fifo_select(pipe, fifo, 0);
diff --git a/drivers/usb/renesas_usbhs/mod.h b/drivers/usb/renesas_usbhs/mod.h
index 8ae3733031c..6c6875533f0 100644
--- a/drivers/usb/renesas_usbhs/mod.h
+++ b/drivers/usb/renesas_usbhs/mod.h
@@ -143,8 +143,8 @@ void usbhs_irq_callback_update(struct usbhs_priv *priv, struct usbhs_mod *mod);
143 */ 143 */
144#if defined(CONFIG_USB_RENESAS_USBHS_HCD) || \ 144#if defined(CONFIG_USB_RENESAS_USBHS_HCD) || \
145 defined(CONFIG_USB_RENESAS_USBHS_HCD_MODULE) 145 defined(CONFIG_USB_RENESAS_USBHS_HCD_MODULE)
146extern int __devinit usbhs_mod_host_probe(struct usbhs_priv *priv); 146extern int usbhs_mod_host_probe(struct usbhs_priv *priv);
147extern int __devexit usbhs_mod_host_remove(struct usbhs_priv *priv); 147extern int usbhs_mod_host_remove(struct usbhs_priv *priv);
148#else 148#else
149static inline int usbhs_mod_host_probe(struct usbhs_priv *priv) 149static inline int usbhs_mod_host_probe(struct usbhs_priv *priv)
150{ 150{
@@ -157,8 +157,8 @@ static inline void usbhs_mod_host_remove(struct usbhs_priv *priv)
157 157
158#if defined(CONFIG_USB_RENESAS_USBHS_UDC) || \ 158#if defined(CONFIG_USB_RENESAS_USBHS_UDC) || \
159 defined(CONFIG_USB_RENESAS_USBHS_UDC_MODULE) 159 defined(CONFIG_USB_RENESAS_USBHS_UDC_MODULE)
160extern int __devinit usbhs_mod_gadget_probe(struct usbhs_priv *priv); 160extern int usbhs_mod_gadget_probe(struct usbhs_priv *priv);
161extern void __devexit usbhs_mod_gadget_remove(struct usbhs_priv *priv); 161extern void usbhs_mod_gadget_remove(struct usbhs_priv *priv);
162#else 162#else
163static inline int usbhs_mod_gadget_probe(struct usbhs_priv *priv) 163static inline int usbhs_mod_gadget_probe(struct usbhs_priv *priv)
164{ 164{
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index 4cc7ee0babc..d9717e0bc1f 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -830,7 +830,7 @@ static int usbhsg_stop(struct usbhs_priv *priv)
830 return usbhsg_try_stop(priv, USBHSG_STATUS_STARTED); 830 return usbhsg_try_stop(priv, USBHSG_STATUS_STARTED);
831} 831}
832 832
833int __devinit usbhs_mod_gadget_probe(struct usbhs_priv *priv) 833int usbhs_mod_gadget_probe(struct usbhs_priv *priv)
834{ 834{
835 struct usbhsg_gpriv *gpriv; 835 struct usbhsg_gpriv *gpriv;
836 struct usbhsg_uep *uep; 836 struct usbhsg_uep *uep;
@@ -927,7 +927,7 @@ usbhs_mod_gadget_probe_err_gpriv:
927 return ret; 927 return ret;
928} 928}
929 929
930void __devexit usbhs_mod_gadget_remove(struct usbhs_priv *priv) 930void usbhs_mod_gadget_remove(struct usbhs_priv *priv)
931{ 931{
932 struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv); 932 struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv);
933 933
diff --git a/drivers/usb/renesas_usbhs/mod_host.c b/drivers/usb/renesas_usbhs/mod_host.c
index 1a7208a50af..bade761a1e5 100644
--- a/drivers/usb/renesas_usbhs/mod_host.c
+++ b/drivers/usb/renesas_usbhs/mod_host.c
@@ -103,7 +103,7 @@ struct usbhsh_hpriv {
103 103
104 u32 port_stat; /* USB_PORT_STAT_xxx */ 104 u32 port_stat; /* USB_PORT_STAT_xxx */
105 105
106 struct completion *done; 106 struct completion setup_ack_done;
107 107
108 /* see usbhsh_req_alloc/free */ 108 /* see usbhsh_req_alloc/free */
109 struct list_head ureq_link_active; 109 struct list_head ureq_link_active;
@@ -355,6 +355,7 @@ static void usbhsh_device_free(struct usbhsh_hpriv *hpriv,
355struct usbhsh_ep *usbhsh_endpoint_alloc(struct usbhsh_hpriv *hpriv, 355struct usbhsh_ep *usbhsh_endpoint_alloc(struct usbhsh_hpriv *hpriv,
356 struct usbhsh_device *udev, 356 struct usbhsh_device *udev,
357 struct usb_host_endpoint *ep, 357 struct usb_host_endpoint *ep,
358 int dir_in_req,
358 gfp_t mem_flags) 359 gfp_t mem_flags)
359{ 360{
360 struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv); 361 struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv);
@@ -364,27 +365,38 @@ struct usbhsh_ep *usbhsh_endpoint_alloc(struct usbhsh_hpriv *hpriv,
364 struct usbhs_pipe *pipe, *best_pipe; 365 struct usbhs_pipe *pipe, *best_pipe;
365 struct device *dev = usbhsh_hcd_to_dev(hcd); 366 struct device *dev = usbhsh_hcd_to_dev(hcd);
366 struct usb_endpoint_descriptor *desc = &ep->desc; 367 struct usb_endpoint_descriptor *desc = &ep->desc;
367 int type, i; 368 int type, i, dir_in;
368 unsigned int min_usr; 369 unsigned int min_usr;
369 370
371 dir_in_req = !!dir_in_req;
372
370 uep = kzalloc(sizeof(struct usbhsh_ep), mem_flags); 373 uep = kzalloc(sizeof(struct usbhsh_ep), mem_flags);
371 if (!uep) { 374 if (!uep) {
372 dev_err(dev, "usbhsh_ep alloc fail\n"); 375 dev_err(dev, "usbhsh_ep alloc fail\n");
373 return NULL; 376 return NULL;
374 } 377 }
375 type = usb_endpoint_type(desc); 378
379 if (usb_endpoint_xfer_control(desc)) {
380 best_pipe = usbhsh_hpriv_to_dcp(hpriv);
381 goto usbhsh_endpoint_alloc_find_pipe;
382 }
376 383
377 /* 384 /*
378 * find best pipe for endpoint 385 * find best pipe for endpoint
379 * see 386 * see
380 * HARDWARE LIMITATION 387 * HARDWARE LIMITATION
381 */ 388 */
389 type = usb_endpoint_type(desc);
382 min_usr = ~0; 390 min_usr = ~0;
383 best_pipe = NULL; 391 best_pipe = NULL;
384 usbhs_for_each_pipe_with_dcp(pipe, priv, i) { 392 usbhs_for_each_pipe(pipe, priv, i) {
385 if (!usbhs_pipe_type_is(pipe, type)) 393 if (!usbhs_pipe_type_is(pipe, type))
386 continue; 394 continue;
387 395
396 dir_in = !!usbhs_pipe_is_dir_in(pipe);
397 if (0 != (dir_in - dir_in_req))
398 continue;
399
388 info = usbhsh_pipe_info(pipe); 400 info = usbhsh_pipe_info(pipe);
389 401
390 if (min_usr > info->usr_cnt) { 402 if (min_usr > info->usr_cnt) {
@@ -398,7 +410,7 @@ struct usbhsh_ep *usbhsh_endpoint_alloc(struct usbhsh_hpriv *hpriv,
398 kfree(uep); 410 kfree(uep);
399 return NULL; 411 return NULL;
400 } 412 }
401 413usbhsh_endpoint_alloc_find_pipe:
402 /* 414 /*
403 * init uep 415 * init uep
404 */ 416 */
@@ -423,6 +435,7 @@ struct usbhsh_ep *usbhsh_endpoint_alloc(struct usbhsh_hpriv *hpriv,
423 * see 435 * see
424 * DCPMAXP/PIPEMAXP 436 * DCPMAXP/PIPEMAXP
425 */ 437 */
438 usbhs_pipe_sequence_data0(uep->pipe);
426 usbhs_pipe_config_update(uep->pipe, 439 usbhs_pipe_config_update(uep->pipe,
427 usbhsh_device_number(hpriv, udev), 440 usbhsh_device_number(hpriv, udev),
428 usb_endpoint_num(desc), 441 usb_endpoint_num(desc),
@@ -430,7 +443,7 @@ struct usbhsh_ep *usbhsh_endpoint_alloc(struct usbhsh_hpriv *hpriv,
430 443
431 dev_dbg(dev, "%s [%d-%s](%p)\n", __func__, 444 dev_dbg(dev, "%s [%d-%s](%p)\n", __func__,
432 usbhsh_device_number(hpriv, udev), 445 usbhsh_device_number(hpriv, udev),
433 usbhs_pipe_name(pipe), uep); 446 usbhs_pipe_name(uep->pipe), uep);
434 447
435 return uep; 448 return uep;
436} 449}
@@ -549,8 +562,7 @@ static void usbhsh_setup_stage_packet_push(struct usbhsh_hpriv *hpriv,
549 * usbhsh_irq_setup_ack() 562 * usbhsh_irq_setup_ack()
550 * usbhsh_irq_setup_err() 563 * usbhsh_irq_setup_err()
551 */ 564 */
552 DECLARE_COMPLETION(done); 565 init_completion(&hpriv->setup_ack_done);
553 hpriv->done = &done;
554 566
555 /* copy original request */ 567 /* copy original request */
556 memcpy(&req, urb->setup_packet, sizeof(struct usb_ctrlrequest)); 568 memcpy(&req, urb->setup_packet, sizeof(struct usb_ctrlrequest));
@@ -572,8 +584,7 @@ static void usbhsh_setup_stage_packet_push(struct usbhsh_hpriv *hpriv,
572 /* 584 /*
573 * wait setup packet ACK 585 * wait setup packet ACK
574 */ 586 */
575 wait_for_completion(&done); 587 wait_for_completion(&hpriv->setup_ack_done);
576 hpriv->done = NULL;
577 588
578 dev_dbg(dev, "%s done\n", __func__); 589 dev_dbg(dev, "%s done\n", __func__);
579} 590}
@@ -724,11 +735,11 @@ static int usbhsh_urb_enqueue(struct usb_hcd *hcd,
724 struct usbhsh_device *udev, *new_udev = NULL; 735 struct usbhsh_device *udev, *new_udev = NULL;
725 struct usbhs_pipe *pipe; 736 struct usbhs_pipe *pipe;
726 struct usbhsh_ep *uep; 737 struct usbhsh_ep *uep;
738 int is_dir_in = usb_pipein(urb->pipe);
727 739
728 int ret; 740 int ret;
729 741
730 dev_dbg(dev, "%s (%s)\n", 742 dev_dbg(dev, "%s (%s)\n", __func__, is_dir_in ? "in" : "out");
731 __func__, usb_pipein(urb->pipe) ? "in" : "out");
732 743
733 ret = usb_hcd_link_urb_to_ep(hcd, urb); 744 ret = usb_hcd_link_urb_to_ep(hcd, urb);
734 if (ret) 745 if (ret)
@@ -751,7 +762,8 @@ static int usbhsh_urb_enqueue(struct usb_hcd *hcd,
751 */ 762 */
752 uep = usbhsh_ep_to_uep(ep); 763 uep = usbhsh_ep_to_uep(ep);
753 if (!uep) { 764 if (!uep) {
754 uep = usbhsh_endpoint_alloc(hpriv, udev, ep, mem_flags); 765 uep = usbhsh_endpoint_alloc(hpriv, udev, ep,
766 is_dir_in, mem_flags);
755 if (!uep) 767 if (!uep)
756 goto usbhsh_urb_enqueue_error_free_device; 768 goto usbhsh_urb_enqueue_error_free_device;
757 } 769 }
@@ -1095,10 +1107,7 @@ static int usbhsh_irq_setup_ack(struct usbhs_priv *priv,
1095 1107
1096 dev_dbg(dev, "setup packet OK\n"); 1108 dev_dbg(dev, "setup packet OK\n");
1097 1109
1098 if (unlikely(!hpriv->done)) 1110 complete(&hpriv->setup_ack_done); /* see usbhsh_urb_enqueue() */
1099 dev_err(dev, "setup ack happen without necessary data\n");
1100 else
1101 complete(hpriv->done); /* see usbhsh_urb_enqueue() */
1102 1111
1103 return 0; 1112 return 0;
1104} 1113}
@@ -1111,10 +1120,7 @@ static int usbhsh_irq_setup_err(struct usbhs_priv *priv,
1111 1120
1112 dev_dbg(dev, "setup packet Err\n"); 1121 dev_dbg(dev, "setup packet Err\n");
1113 1122
1114 if (unlikely(!hpriv->done)) 1123 complete(&hpriv->setup_ack_done); /* see usbhsh_urb_enqueue() */
1115 dev_err(dev, "setup err happen without necessary data\n");
1116 else
1117 complete(hpriv->done); /* see usbhsh_urb_enqueue() */
1118 1124
1119 return 0; 1125 return 0;
1120} 1126}
@@ -1221,8 +1227,18 @@ static int usbhsh_stop(struct usbhs_priv *priv)
1221{ 1227{
1222 struct usbhsh_hpriv *hpriv = usbhsh_priv_to_hpriv(priv); 1228 struct usbhsh_hpriv *hpriv = usbhsh_priv_to_hpriv(priv);
1223 struct usb_hcd *hcd = usbhsh_hpriv_to_hcd(hpriv); 1229 struct usb_hcd *hcd = usbhsh_hpriv_to_hcd(hpriv);
1230 struct usbhs_mod *mod = usbhs_mod_get_current(priv);
1224 struct device *dev = usbhs_priv_to_dev(priv); 1231 struct device *dev = usbhs_priv_to_dev(priv);
1225 1232
1233 /*
1234 * disable irq callback
1235 */
1236 mod->irq_attch = NULL;
1237 mod->irq_dtch = NULL;
1238 mod->irq_sack = NULL;
1239 mod->irq_sign = NULL;
1240 usbhs_irq_callback_update(priv, mod);
1241
1226 usb_remove_hcd(hcd); 1242 usb_remove_hcd(hcd);
1227 1243
1228 /* disable sys */ 1244 /* disable sys */
@@ -1235,7 +1251,7 @@ static int usbhsh_stop(struct usbhs_priv *priv)
1235 return 0; 1251 return 0;
1236} 1252}
1237 1253
1238int __devinit usbhs_mod_host_probe(struct usbhs_priv *priv) 1254int usbhs_mod_host_probe(struct usbhs_priv *priv)
1239{ 1255{
1240 struct usbhsh_hpriv *hpriv; 1256 struct usbhsh_hpriv *hpriv;
1241 struct usb_hcd *hcd; 1257 struct usb_hcd *hcd;
@@ -1279,7 +1295,6 @@ int __devinit usbhs_mod_host_probe(struct usbhs_priv *priv)
1279 hpriv->mod.stop = usbhsh_stop; 1295 hpriv->mod.stop = usbhsh_stop;
1280 hpriv->pipe_info = pipe_info; 1296 hpriv->pipe_info = pipe_info;
1281 hpriv->pipe_size = pipe_size; 1297 hpriv->pipe_size = pipe_size;
1282 hpriv->done = NULL;
1283 usbhsh_req_list_init(hpriv); 1298 usbhsh_req_list_init(hpriv);
1284 usbhsh_port_stat_init(hpriv); 1299 usbhsh_port_stat_init(hpriv);
1285 1300
@@ -1299,7 +1314,7 @@ usbhs_mod_host_probe_err:
1299 return -ENOMEM; 1314 return -ENOMEM;
1300} 1315}
1301 1316
1302int __devexit usbhs_mod_host_remove(struct usbhs_priv *priv) 1317int usbhs_mod_host_remove(struct usbhs_priv *priv)
1303{ 1318{
1304 struct usbhsh_hpriv *hpriv = usbhsh_priv_to_hpriv(priv); 1319 struct usbhsh_hpriv *hpriv = usbhsh_priv_to_hpriv(priv);
1305 struct usb_hcd *hcd = usbhsh_hpriv_to_hcd(hpriv); 1320 struct usb_hcd *hcd = usbhsh_hpriv_to_hcd(hpriv);
diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c
index 5cdb9d91227..18e875b92e0 100644
--- a/drivers/usb/serial/ark3116.c
+++ b/drivers/usb/serial/ark3116.c
@@ -42,7 +42,7 @@ static int debug;
42 * Version information 42 * Version information
43 */ 43 */
44 44
45#define DRIVER_VERSION "v0.6" 45#define DRIVER_VERSION "v0.7"
46#define DRIVER_AUTHOR "Bart Hartgers <bart.hartgers+ark3116@gmail.com>" 46#define DRIVER_AUTHOR "Bart Hartgers <bart.hartgers+ark3116@gmail.com>"
47#define DRIVER_DESC "USB ARK3116 serial/IrDA driver" 47#define DRIVER_DESC "USB ARK3116 serial/IrDA driver"
48#define DRIVER_DEV_DESC "ARK3116 RS232/IrDA" 48#define DRIVER_DEV_DESC "ARK3116 RS232/IrDA"
@@ -380,10 +380,6 @@ static int ark3116_open(struct tty_struct *tty, struct usb_serial_port *port)
380 goto err_out; 380 goto err_out;
381 } 381 }
382 382
383 /* setup termios */
384 if (tty)
385 ark3116_set_termios(tty, port, NULL);
386
387 /* remove any data still left: also clears error state */ 383 /* remove any data still left: also clears error state */
388 ark3116_read_reg(serial, UART_RX, buf); 384 ark3116_read_reg(serial, UART_RX, buf);
389 385
@@ -406,6 +402,10 @@ static int ark3116_open(struct tty_struct *tty, struct usb_serial_port *port)
406 /* enable DMA */ 402 /* enable DMA */
407 ark3116_write_reg(port->serial, UART_FCR, UART_FCR_DMA_SELECT); 403 ark3116_write_reg(port->serial, UART_FCR, UART_FCR_DMA_SELECT);
408 404
405 /* setup termios */
406 if (tty)
407 ark3116_set_termios(tty, port, NULL);
408
409err_out: 409err_out:
410 kfree(buf); 410 kfree(buf);
411 return result; 411 return result;
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 8fe034d2d3e..bd4298bb675 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -2104,13 +2104,19 @@ static void ftdi_set_termios(struct tty_struct *tty,
2104 2104
2105 cflag = termios->c_cflag; 2105 cflag = termios->c_cflag;
2106 2106
2107 /* FIXME -For this cut I don't care if the line is really changing or 2107 if (old_termios->c_cflag == termios->c_cflag
2108 not - so just do the change regardless - should be able to 2108 && old_termios->c_ispeed == termios->c_ispeed
2109 compare old_termios and tty->termios */ 2109 && old_termios->c_ospeed == termios->c_ospeed)
2110 goto no_c_cflag_changes;
2111
2110 /* NOTE These routines can get interrupted by 2112 /* NOTE These routines can get interrupted by
2111 ftdi_sio_read_bulk_callback - need to examine what this means - 2113 ftdi_sio_read_bulk_callback - need to examine what this means -
2112 don't see any problems yet */ 2114 don't see any problems yet */
2113 2115
2116 if ((old_termios->c_cflag & (CSIZE|PARODD|PARENB|CMSPAR|CSTOPB)) ==
2117 (termios->c_cflag & (CSIZE|PARODD|PARENB|CMSPAR|CSTOPB)))
2118 goto no_data_parity_stop_changes;
2119
2114 /* Set number of data bits, parity, stop bits */ 2120 /* Set number of data bits, parity, stop bits */
2115 2121
2116 urb_value = 0; 2122 urb_value = 0;
@@ -2151,6 +2157,7 @@ static void ftdi_set_termios(struct tty_struct *tty,
2151 } 2157 }
2152 2158
2153 /* Now do the baudrate */ 2159 /* Now do the baudrate */
2160no_data_parity_stop_changes:
2154 if ((cflag & CBAUD) == B0) { 2161 if ((cflag & CBAUD) == B0) {
2155 /* Disable flow control */ 2162 /* Disable flow control */
2156 if (usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 2163 if (usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
@@ -2178,6 +2185,7 @@ static void ftdi_set_termios(struct tty_struct *tty,
2178 2185
2179 /* Set flow control */ 2186 /* Set flow control */
2180 /* Note device also supports DTR/CD (ugh) and Xon/Xoff in hardware */ 2187 /* Note device also supports DTR/CD (ugh) and Xon/Xoff in hardware */
2188no_c_cflag_changes:
2181 if (cflag & CRTSCTS) { 2189 if (cflag & CRTSCTS) {
2182 dbg("%s Setting to CRTSCTS flow control", __func__); 2190 dbg("%s Setting to CRTSCTS flow control", __func__);
2183 if (usb_control_msg(dev, 2191 if (usb_control_msg(dev,
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 89ae1f65e1b..d865878c9f9 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -156,6 +156,7 @@ static void option_instat_callback(struct urb *urb);
156#define HUAWEI_PRODUCT_K4511 0x14CC 156#define HUAWEI_PRODUCT_K4511 0x14CC
157#define HUAWEI_PRODUCT_ETS1220 0x1803 157#define HUAWEI_PRODUCT_ETS1220 0x1803
158#define HUAWEI_PRODUCT_E353 0x1506 158#define HUAWEI_PRODUCT_E353 0x1506
159#define HUAWEI_PRODUCT_E173S 0x1C05
159 160
160#define QUANTA_VENDOR_ID 0x0408 161#define QUANTA_VENDOR_ID 0x0408
161#define QUANTA_PRODUCT_Q101 0xEA02 162#define QUANTA_PRODUCT_Q101 0xEA02
@@ -316,6 +317,9 @@ static void option_instat_callback(struct urb *urb);
316#define ZTE_PRODUCT_AC8710 0xfff1 317#define ZTE_PRODUCT_AC8710 0xfff1
317#define ZTE_PRODUCT_AC2726 0xfff5 318#define ZTE_PRODUCT_AC2726 0xfff5
318#define ZTE_PRODUCT_AC8710T 0xffff 319#define ZTE_PRODUCT_AC8710T 0xffff
320#define ZTE_PRODUCT_MC2718 0xffe8
321#define ZTE_PRODUCT_AD3812 0xffeb
322#define ZTE_PRODUCT_MC2716 0xffed
319 323
320#define BENQ_VENDOR_ID 0x04a5 324#define BENQ_VENDOR_ID 0x04a5
321#define BENQ_PRODUCT_H10 0x4068 325#define BENQ_PRODUCT_H10 0x4068
@@ -468,6 +472,10 @@ static void option_instat_callback(struct urb *urb);
468#define YUGA_PRODUCT_CLU528 0x260D 472#define YUGA_PRODUCT_CLU528 0x260D
469#define YUGA_PRODUCT_CLU526 0x260F 473#define YUGA_PRODUCT_CLU526 0x260F
470 474
475/* Viettel products */
476#define VIETTEL_VENDOR_ID 0x2262
477#define VIETTEL_PRODUCT_VT1000 0x0002
478
471/* some devices interfaces need special handling due to a number of reasons */ 479/* some devices interfaces need special handling due to a number of reasons */
472enum option_blacklist_reason { 480enum option_blacklist_reason {
473 OPTION_BLACKLIST_NONE = 0, 481 OPTION_BLACKLIST_NONE = 0,
@@ -500,6 +508,18 @@ static const struct option_blacklist_info zte_k3765_z_blacklist = {
500 .reserved = BIT(4), 508 .reserved = BIT(4),
501}; 509};
502 510
511static const struct option_blacklist_info zte_ad3812_z_blacklist = {
512 .sendsetup = BIT(0) | BIT(1) | BIT(2),
513};
514
515static const struct option_blacklist_info zte_mc2718_z_blacklist = {
516 .sendsetup = BIT(1) | BIT(2) | BIT(3) | BIT(4),
517};
518
519static const struct option_blacklist_info zte_mc2716_z_blacklist = {
520 .sendsetup = BIT(1) | BIT(2) | BIT(3),
521};
522
503static const struct option_blacklist_info huawei_cdc12_blacklist = { 523static const struct option_blacklist_info huawei_cdc12_blacklist = {
504 .reserved = BIT(1) | BIT(2), 524 .reserved = BIT(1) | BIT(2),
505}; 525};
@@ -622,6 +642,7 @@ static const struct usb_device_id option_ids[] = {
622 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143D, 0xff, 0xff, 0xff) }, 642 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143D, 0xff, 0xff, 0xff) },
623 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143E, 0xff, 0xff, 0xff) }, 643 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143E, 0xff, 0xff, 0xff) },
624 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143F, 0xff, 0xff, 0xff) }, 644 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143F, 0xff, 0xff, 0xff) },
645 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173S, 0xff, 0xff, 0xff) },
625 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff), 646 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff),
626 .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist }, 647 .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
627 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff), 648 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff),
@@ -1043,6 +1064,12 @@ static const struct usb_device_id option_ids[] = {
1043 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) }, 1064 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) },
1044 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) }, 1065 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
1045 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710T, 0xff, 0xff, 0xff) }, 1066 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710T, 0xff, 0xff, 0xff) },
1067 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2718, 0xff, 0xff, 0xff),
1068 .driver_info = (kernel_ulong_t)&zte_mc2718_z_blacklist },
1069 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AD3812, 0xff, 0xff, 0xff),
1070 .driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist },
1071 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff),
1072 .driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist },
1046 { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) }, 1073 { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) },
1047 { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) }, 1074 { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) },
1048 { USB_DEVICE(ALINK_VENDOR_ID, DLINK_PRODUCT_DWM_652_U5) }, /* Yes, ALINK_VENDOR_ID */ 1075 { USB_DEVICE(ALINK_VENDOR_ID, DLINK_PRODUCT_DWM_652_U5) }, /* Yes, ALINK_VENDOR_ID */
@@ -1141,6 +1168,7 @@ static const struct usb_device_id option_ids[] = {
1141 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU516) }, 1168 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU516) },
1142 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU528) }, 1169 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU528) },
1143 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU526) }, 1170 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU526) },
1171 { USB_DEVICE_AND_INTERFACE_INFO(VIETTEL_VENDOR_ID, VIETTEL_PRODUCT_VT1000, 0xff, 0xff, 0xff) },
1144 { } /* Terminating entry */ 1172 { } /* Terminating entry */
1145}; 1173};
1146MODULE_DEVICE_TABLE(usb, option_ids); 1174MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 9083d1e616b..fc2d66f7f4e 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -91,7 +91,6 @@ static const struct usb_device_id id_table[] = {
91 { USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) }, 91 { USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) },
92 { USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) }, 92 { USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) },
93 { USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) }, 93 { USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) },
94 { USB_DEVICE(WINCHIPHEAD_VENDOR_ID, WINCHIPHEAD_USBSER_PRODUCT_ID) },
95 { USB_DEVICE(SMART_VENDOR_ID, SMART_PRODUCT_ID) }, 94 { USB_DEVICE(SMART_VENDOR_ID, SMART_PRODUCT_ID) },
96 { } /* Terminating entry */ 95 { } /* Terminating entry */
97}; 96};
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index 3d10d7f0207..c38b8c00c06 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -145,10 +145,6 @@
145#define ADLINK_VENDOR_ID 0x0b63 145#define ADLINK_VENDOR_ID 0x0b63
146#define ADLINK_ND6530_PRODUCT_ID 0x6530 146#define ADLINK_ND6530_PRODUCT_ID 0x6530
147 147
148/* WinChipHead USB->RS 232 adapter */
149#define WINCHIPHEAD_VENDOR_ID 0x4348
150#define WINCHIPHEAD_USBSER_PRODUCT_ID 0x5523
151
152/* SMART USB Serial Adapter */ 148/* SMART USB Serial Adapter */
153#define SMART_VENDOR_ID 0x0b8c 149#define SMART_VENDOR_ID 0x0b8c
154#define SMART_PRODUCT_ID 0x2303 150#define SMART_PRODUCT_ID 0x2303
diff --git a/drivers/usb/storage/ene_ub6250.c b/drivers/usb/storage/ene_ub6250.c
index 4dca3ef0668..9fbe742343c 100644
--- a/drivers/usb/storage/ene_ub6250.c
+++ b/drivers/usb/storage/ene_ub6250.c
@@ -1762,10 +1762,9 @@ static int ms_scsi_write(struct us_data *us, struct scsi_cmnd *srb)
1762 result = ene_send_scsi_cmd(us, FDIR_WRITE, scsi_sglist(srb), 1); 1762 result = ene_send_scsi_cmd(us, FDIR_WRITE, scsi_sglist(srb), 1);
1763 } else { 1763 } else {
1764 void *buf; 1764 void *buf;
1765 int offset; 1765 int offset = 0;
1766 u16 PhyBlockAddr; 1766 u16 PhyBlockAddr;
1767 u8 PageNum; 1767 u8 PageNum;
1768 u32 result;
1769 u16 len, oldphy, newphy; 1768 u16 len, oldphy, newphy;
1770 1769
1771 buf = kmalloc(blenByte, GFP_KERNEL); 1770 buf = kmalloc(blenByte, GFP_KERNEL);
diff --git a/drivers/usb/storage/protocol.c b/drivers/usb/storage/protocol.c
index 93c1a4d86f5..82dd834709c 100644
--- a/drivers/usb/storage/protocol.c
+++ b/drivers/usb/storage/protocol.c
@@ -59,7 +59,9 @@
59 59
60void usb_stor_pad12_command(struct scsi_cmnd *srb, struct us_data *us) 60void usb_stor_pad12_command(struct scsi_cmnd *srb, struct us_data *us)
61{ 61{
62 /* Pad the SCSI command with zeros out to 12 bytes 62 /*
63 * Pad the SCSI command with zeros out to 12 bytes. If the
64 * command already is 12 bytes or longer, leave it alone.
63 * 65 *
64 * NOTE: This only works because a scsi_cmnd struct field contains 66 * NOTE: This only works because a scsi_cmnd struct field contains
65 * a unsigned char cmnd[16], so we know we have storage available 67 * a unsigned char cmnd[16], so we know we have storage available
@@ -67,9 +69,6 @@ void usb_stor_pad12_command(struct scsi_cmnd *srb, struct us_data *us)
67 for (; srb->cmd_len<12; srb->cmd_len++) 69 for (; srb->cmd_len<12; srb->cmd_len++)
68 srb->cmnd[srb->cmd_len] = 0; 70 srb->cmnd[srb->cmd_len] = 0;
69 71
70 /* set command length to 12 bytes */
71 srb->cmd_len = 12;
72
73 /* send the command to the transport layer */ 72 /* send the command to the transport layer */
74 usb_stor_invoke_transport(srb, us); 73 usb_stor_invoke_transport(srb, us);
75} 74}
diff --git a/drivers/video/da8xx-fb.c b/drivers/video/da8xx-fb.c
index 55f91d9ab00..29577bf1f55 100644
--- a/drivers/video/da8xx-fb.c
+++ b/drivers/video/da8xx-fb.c
@@ -116,6 +116,7 @@
116/* Clock registers available only on Version 2 */ 116/* Clock registers available only on Version 2 */
117#define LCD_CLK_ENABLE_REG 0x6c 117#define LCD_CLK_ENABLE_REG 0x6c
118#define LCD_CLK_RESET_REG 0x70 118#define LCD_CLK_RESET_REG 0x70
119#define LCD_CLK_MAIN_RESET BIT(3)
119 120
120#define LCD_NUM_BUFFERS 2 121#define LCD_NUM_BUFFERS 2
121 122
@@ -244,6 +245,10 @@ static inline void lcd_enable_raster(void)
244{ 245{
245 u32 reg; 246 u32 reg;
246 247
248 /* Bring LCDC out of reset */
249 if (lcd_revision == LCD_VERSION_2)
250 lcdc_write(0, LCD_CLK_RESET_REG);
251
247 reg = lcdc_read(LCD_RASTER_CTRL_REG); 252 reg = lcdc_read(LCD_RASTER_CTRL_REG);
248 if (!(reg & LCD_RASTER_ENABLE)) 253 if (!(reg & LCD_RASTER_ENABLE))
249 lcdc_write(reg | LCD_RASTER_ENABLE, LCD_RASTER_CTRL_REG); 254 lcdc_write(reg | LCD_RASTER_ENABLE, LCD_RASTER_CTRL_REG);
@@ -257,6 +262,10 @@ static inline void lcd_disable_raster(void)
257 reg = lcdc_read(LCD_RASTER_CTRL_REG); 262 reg = lcdc_read(LCD_RASTER_CTRL_REG);
258 if (reg & LCD_RASTER_ENABLE) 263 if (reg & LCD_RASTER_ENABLE)
259 lcdc_write(reg & ~LCD_RASTER_ENABLE, LCD_RASTER_CTRL_REG); 264 lcdc_write(reg & ~LCD_RASTER_ENABLE, LCD_RASTER_CTRL_REG);
265
266 if (lcd_revision == LCD_VERSION_2)
267 /* Write 1 to reset LCDC */
268 lcdc_write(LCD_CLK_MAIN_RESET, LCD_CLK_RESET_REG);
260} 269}
261 270
262static void lcd_blit(int load_mode, struct da8xx_fb_par *par) 271static void lcd_blit(int load_mode, struct da8xx_fb_par *par)
@@ -584,8 +593,12 @@ static void lcd_reset(struct da8xx_fb_par *par)
584 lcdc_write(0, LCD_DMA_CTRL_REG); 593 lcdc_write(0, LCD_DMA_CTRL_REG);
585 lcdc_write(0, LCD_RASTER_CTRL_REG); 594 lcdc_write(0, LCD_RASTER_CTRL_REG);
586 595
587 if (lcd_revision == LCD_VERSION_2) 596 if (lcd_revision == LCD_VERSION_2) {
588 lcdc_write(0, LCD_INT_ENABLE_SET_REG); 597 lcdc_write(0, LCD_INT_ENABLE_SET_REG);
598 /* Write 1 to reset */
599 lcdc_write(LCD_CLK_MAIN_RESET, LCD_CLK_RESET_REG);
600 lcdc_write(0, LCD_CLK_RESET_REG);
601 }
589} 602}
590 603
591static void lcd_calc_clk_divider(struct da8xx_fb_par *par) 604static void lcd_calc_clk_divider(struct da8xx_fb_par *par)
diff --git a/drivers/video/omap/dispc.c b/drivers/video/omap/dispc.c
index 0ccd7adf47b..6f61e781f15 100644
--- a/drivers/video/omap/dispc.c
+++ b/drivers/video/omap/dispc.c
@@ -19,6 +19,7 @@
19 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 19 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 */ 20 */
21#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include <linux/module.h>
22#include <linux/dma-mapping.h> 23#include <linux/dma-mapping.h>
23#include <linux/mm.h> 24#include <linux/mm.h>
24#include <linux/vmalloc.h> 25#include <linux/vmalloc.h>
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
index 3532782551c..5c81533eaca 100644
--- a/drivers/video/omap2/dss/dispc.c
+++ b/drivers/video/omap2/dss/dispc.c
@@ -1720,12 +1720,11 @@ static int dispc_ovl_calc_scaling(enum omap_plane plane,
1720 const int maxdownscale = dss_feat_get_param_max(FEAT_PARAM_DOWNSCALE); 1720 const int maxdownscale = dss_feat_get_param_max(FEAT_PARAM_DOWNSCALE);
1721 unsigned long fclk = 0; 1721 unsigned long fclk = 0;
1722 1722
1723 if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0) { 1723 if (width == out_width && height == out_height)
1724 if (width != out_width || height != out_height) 1724 return 0;
1725 return -EINVAL; 1725
1726 else 1726 if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0)
1727 return 0; 1727 return -EINVAL;
1728 }
1729 1728
1730 if (out_width < width / maxdownscale || 1729 if (out_width < width / maxdownscale ||
1731 out_width > width * 8) 1730 out_width > width * 8)
diff --git a/drivers/video/omap2/dss/hdmi.c b/drivers/video/omap2/dss/hdmi.c
index 3262f0f1fa3..c56378c555b 100644
--- a/drivers/video/omap2/dss/hdmi.c
+++ b/drivers/video/omap2/dss/hdmi.c
@@ -269,7 +269,7 @@ static void update_hdmi_timings(struct hdmi_config *cfg,
269unsigned long hdmi_get_pixel_clock(void) 269unsigned long hdmi_get_pixel_clock(void)
270{ 270{
271 /* HDMI Pixel Clock in Mhz */ 271 /* HDMI Pixel Clock in Mhz */
272 return hdmi.ip_data.cfg.timings.timings.pixel_clock * 10000; 272 return hdmi.ip_data.cfg.timings.timings.pixel_clock * 1000;
273} 273}
274 274
275static void hdmi_compute_pll(struct omap_dss_device *dssdev, int phy, 275static void hdmi_compute_pll(struct omap_dss_device *dssdev, int phy,
diff --git a/drivers/video/via/share.h b/drivers/video/via/share.h
index 69d882cbe70..c01c1c16272 100644
--- a/drivers/video/via/share.h
+++ b/drivers/video/via/share.h
@@ -559,8 +559,8 @@
559#define M1200X720_R60_VSP POSITIVE 559#define M1200X720_R60_VSP POSITIVE
560 560
561/* 1200x900@60 Sync Polarity (DCON) */ 561/* 1200x900@60 Sync Polarity (DCON) */
562#define M1200X900_R60_HSP NEGATIVE 562#define M1200X900_R60_HSP POSITIVE
563#define M1200X900_R60_VSP NEGATIVE 563#define M1200X900_R60_VSP POSITIVE
564 564
565/* 1280x600@60 Sync Polarity (GTF Mode) */ 565/* 1280x600@60 Sync Polarity (GTF Mode) */
566#define M1280x600_R60_HSP NEGATIVE 566#define M1280x600_R60_HSP NEGATIVE
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
index 816ed08e7cf..1a61939b85f 100644
--- a/drivers/virtio/Kconfig
+++ b/drivers/virtio/Kconfig
@@ -37,7 +37,7 @@ config VIRTIO_BALLOON
37 37
38 config VIRTIO_MMIO 38 config VIRTIO_MMIO
39 tristate "Platform bus driver for memory mapped virtio devices (EXPERIMENTAL)" 39 tristate "Platform bus driver for memory mapped virtio devices (EXPERIMENTAL)"
40 depends on EXPERIMENTAL 40 depends on HAS_IOMEM && EXPERIMENTAL
41 select VIRTIO 41 select VIRTIO
42 select VIRTIO_RING 42 select VIRTIO_RING
43 ---help--- 43 ---help---
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index acc5e43c373..7317dc2ec42 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -118,7 +118,7 @@ static void vm_finalize_features(struct virtio_device *vdev)
118 vring_transport_features(vdev); 118 vring_transport_features(vdev);
119 119
120 for (i = 0; i < ARRAY_SIZE(vdev->features); i++) { 120 for (i = 0; i < ARRAY_SIZE(vdev->features); i++) {
121 writel(i, vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES_SET); 121 writel(i, vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES_SEL);
122 writel(vdev->features[i], 122 writel(vdev->features[i],
123 vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES); 123 vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES);
124 } 124 }
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
index 3d1bf41e889..03d1984bd36 100644
--- a/drivers/virtio/virtio_pci.c
+++ b/drivers/virtio/virtio_pci.c
@@ -169,11 +169,29 @@ static void vp_set_status(struct virtio_device *vdev, u8 status)
169 iowrite8(status, vp_dev->ioaddr + VIRTIO_PCI_STATUS); 169 iowrite8(status, vp_dev->ioaddr + VIRTIO_PCI_STATUS);
170} 170}
171 171
172/* wait for pending irq handlers */
173static void vp_synchronize_vectors(struct virtio_device *vdev)
174{
175 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
176 int i;
177
178 if (vp_dev->intx_enabled)
179 synchronize_irq(vp_dev->pci_dev->irq);
180
181 for (i = 0; i < vp_dev->msix_vectors; ++i)
182 synchronize_irq(vp_dev->msix_entries[i].vector);
183}
184
172static void vp_reset(struct virtio_device *vdev) 185static void vp_reset(struct virtio_device *vdev)
173{ 186{
174 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 187 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
175 /* 0 status means a reset. */ 188 /* 0 status means a reset. */
176 iowrite8(0, vp_dev->ioaddr + VIRTIO_PCI_STATUS); 189 iowrite8(0, vp_dev->ioaddr + VIRTIO_PCI_STATUS);
190 /* Flush out the status write, and flush in device writes,
191 * including MSi-X interrupts, if any. */
192 ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS);
193 /* Flush pending VQ/configuration callbacks. */
194 vp_synchronize_vectors(vdev);
177} 195}
178 196
179/* the notify function used when creating a virt queue */ 197/* the notify function used when creating a virt queue */
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 6285867a935..79fd606b7cd 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -314,13 +314,6 @@ config NUC900_WATCHDOG
314 To compile this driver as a module, choose M here: the 314 To compile this driver as a module, choose M here: the
315 module will be called nuc900_wdt. 315 module will be called nuc900_wdt.
316 316
317config ADX_WATCHDOG
318 tristate "Avionic Design Xanthos watchdog"
319 depends on ARCH_PXA_ADX
320 help
321 Say Y here if you want support for the watchdog timer on Avionic
322 Design Xanthos boards.
323
324config TS72XX_WATCHDOG 317config TS72XX_WATCHDOG
325 tristate "TS-72XX SBC Watchdog" 318 tristate "TS-72XX SBC Watchdog"
326 depends on MACH_TS72XX 319 depends on MACH_TS72XX
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 55bd5740e91..fe893e91935 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -51,7 +51,6 @@ obj-$(CONFIG_ORION_WATCHDOG) += orion_wdt.o
51obj-$(CONFIG_COH901327_WATCHDOG) += coh901327_wdt.o 51obj-$(CONFIG_COH901327_WATCHDOG) += coh901327_wdt.o
52obj-$(CONFIG_STMP3XXX_WATCHDOG) += stmp3xxx_wdt.o 52obj-$(CONFIG_STMP3XXX_WATCHDOG) += stmp3xxx_wdt.o
53obj-$(CONFIG_NUC900_WATCHDOG) += nuc900_wdt.o 53obj-$(CONFIG_NUC900_WATCHDOG) += nuc900_wdt.o
54obj-$(CONFIG_ADX_WATCHDOG) += adx_wdt.o
55obj-$(CONFIG_TS72XX_WATCHDOG) += ts72xx_wdt.o 54obj-$(CONFIG_TS72XX_WATCHDOG) += ts72xx_wdt.o
56obj-$(CONFIG_IMX2_WDT) += imx2_wdt.o 55obj-$(CONFIG_IMX2_WDT) += imx2_wdt.o
57 56
diff --git a/drivers/watchdog/adx_wdt.c b/drivers/watchdog/adx_wdt.c
deleted file mode 100644
index af6e6b16475..00000000000
--- a/drivers/watchdog/adx_wdt.c
+++ /dev/null
@@ -1,355 +0,0 @@
1/*
2 * Copyright (C) 2008-2009 Avionic Design GmbH
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/fs.h>
10#include <linux/gfp.h>
11#include <linux/io.h>
12#include <linux/miscdevice.h>
13#include <linux/module.h>
14#include <linux/platform_device.h>
15#include <linux/types.h>
16#include <linux/uaccess.h>
17#include <linux/watchdog.h>
18
19#define WATCHDOG_NAME "adx-wdt"
20
21/* register offsets */
22#define ADX_WDT_CONTROL 0x00
23#define ADX_WDT_CONTROL_ENABLE (1 << 0)
24#define ADX_WDT_CONTROL_nRESET (1 << 1)
25#define ADX_WDT_TIMEOUT 0x08
26
27static struct platform_device *adx_wdt_dev;
28static unsigned long driver_open;
29
30#define WDT_STATE_STOP 0
31#define WDT_STATE_START 1
32
33struct adx_wdt {
34 void __iomem *base;
35 unsigned long timeout;
36 unsigned int state;
37 unsigned int wake;
38 spinlock_t lock;
39};
40
41static const struct watchdog_info adx_wdt_info = {
42 .identity = "Avionic Design Xanthos Watchdog",
43 .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
44};
45
46static void adx_wdt_start_locked(struct adx_wdt *wdt)
47{
48 u32 ctrl;
49
50 ctrl = readl(wdt->base + ADX_WDT_CONTROL);
51 ctrl |= ADX_WDT_CONTROL_ENABLE;
52 writel(ctrl, wdt->base + ADX_WDT_CONTROL);
53 wdt->state = WDT_STATE_START;
54}
55
56static void adx_wdt_start(struct adx_wdt *wdt)
57{
58 unsigned long flags;
59
60 spin_lock_irqsave(&wdt->lock, flags);
61 adx_wdt_start_locked(wdt);
62 spin_unlock_irqrestore(&wdt->lock, flags);
63}
64
65static void adx_wdt_stop_locked(struct adx_wdt *wdt)
66{
67 u32 ctrl;
68
69 ctrl = readl(wdt->base + ADX_WDT_CONTROL);
70 ctrl &= ~ADX_WDT_CONTROL_ENABLE;
71 writel(ctrl, wdt->base + ADX_WDT_CONTROL);
72 wdt->state = WDT_STATE_STOP;
73}
74
75static void adx_wdt_stop(struct adx_wdt *wdt)
76{
77 unsigned long flags;
78
79 spin_lock_irqsave(&wdt->lock, flags);
80 adx_wdt_stop_locked(wdt);
81 spin_unlock_irqrestore(&wdt->lock, flags);
82}
83
84static void adx_wdt_set_timeout(struct adx_wdt *wdt, unsigned long seconds)
85{
86 unsigned long timeout = seconds * 1000;
87 unsigned long flags;
88 unsigned int state;
89
90 spin_lock_irqsave(&wdt->lock, flags);
91 state = wdt->state;
92 adx_wdt_stop_locked(wdt);
93 writel(timeout, wdt->base + ADX_WDT_TIMEOUT);
94
95 if (state == WDT_STATE_START)
96 adx_wdt_start_locked(wdt);
97
98 wdt->timeout = timeout;
99 spin_unlock_irqrestore(&wdt->lock, flags);
100}
101
102static void adx_wdt_get_timeout(struct adx_wdt *wdt, unsigned long *seconds)
103{
104 *seconds = wdt->timeout / 1000;
105}
106
107static void adx_wdt_keepalive(struct adx_wdt *wdt)
108{
109 unsigned long flags;
110
111 spin_lock_irqsave(&wdt->lock, flags);
112 writel(wdt->timeout, wdt->base + ADX_WDT_TIMEOUT);
113 spin_unlock_irqrestore(&wdt->lock, flags);
114}
115
116static int adx_wdt_open(struct inode *inode, struct file *file)
117{
118 struct adx_wdt *wdt = platform_get_drvdata(adx_wdt_dev);
119
120 if (test_and_set_bit(0, &driver_open))
121 return -EBUSY;
122
123 file->private_data = wdt;
124 adx_wdt_set_timeout(wdt, 30);
125 adx_wdt_start(wdt);
126
127 return nonseekable_open(inode, file);
128}
129
130static int adx_wdt_release(struct inode *inode, struct file *file)
131{
132 struct adx_wdt *wdt = file->private_data;
133
134 adx_wdt_stop(wdt);
135 clear_bit(0, &driver_open);
136
137 return 0;
138}
139
140static long adx_wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
141{
142 struct adx_wdt *wdt = file->private_data;
143 void __user *argp = (void __user *)arg;
144 unsigned long __user *p = argp;
145 unsigned long seconds = 0;
146 unsigned int options;
147 long ret = -EINVAL;
148
149 switch (cmd) {
150 case WDIOC_GETSUPPORT:
151 if (copy_to_user(argp, &adx_wdt_info, sizeof(adx_wdt_info)))
152 return -EFAULT;
153 else
154 return 0;
155
156 case WDIOC_GETSTATUS:
157 case WDIOC_GETBOOTSTATUS:
158 return put_user(0, p);
159
160 case WDIOC_KEEPALIVE:
161 adx_wdt_keepalive(wdt);
162 return 0;
163
164 case WDIOC_SETTIMEOUT:
165 if (get_user(seconds, p))
166 return -EFAULT;
167
168 adx_wdt_set_timeout(wdt, seconds);
169
170 /* fallthrough */
171 case WDIOC_GETTIMEOUT:
172 adx_wdt_get_timeout(wdt, &seconds);
173 return put_user(seconds, p);
174
175 case WDIOC_SETOPTIONS:
176 if (copy_from_user(&options, argp, sizeof(options)))
177 return -EFAULT;
178
179 if (options & WDIOS_DISABLECARD) {
180 adx_wdt_stop(wdt);
181 ret = 0;
182 }
183
184 if (options & WDIOS_ENABLECARD) {
185 adx_wdt_start(wdt);
186 ret = 0;
187 }
188
189 return ret;
190
191 default:
192 break;
193 }
194
195 return -ENOTTY;
196}
197
198static ssize_t adx_wdt_write(struct file *file, const char __user *data,
199 size_t len, loff_t *ppos)
200{
201 struct adx_wdt *wdt = file->private_data;
202
203 if (len)
204 adx_wdt_keepalive(wdt);
205
206 return len;
207}
208
209static const struct file_operations adx_wdt_fops = {
210 .owner = THIS_MODULE,
211 .llseek = no_llseek,
212 .open = adx_wdt_open,
213 .release = adx_wdt_release,
214 .unlocked_ioctl = adx_wdt_ioctl,
215 .write = adx_wdt_write,
216};
217
218static struct miscdevice adx_wdt_miscdev = {
219 .minor = WATCHDOG_MINOR,
220 .name = "watchdog",
221 .fops = &adx_wdt_fops,
222};
223
224static int __devinit adx_wdt_probe(struct platform_device *pdev)
225{
226 struct resource *res;
227 struct adx_wdt *wdt;
228 int ret = 0;
229 u32 ctrl;
230
231 wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
232 if (!wdt) {
233 dev_err(&pdev->dev, "cannot allocate WDT structure\n");
234 return -ENOMEM;
235 }
236
237 spin_lock_init(&wdt->lock);
238
239 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
240 if (!res) {
241 dev_err(&pdev->dev, "cannot obtain I/O memory region\n");
242 return -ENXIO;
243 }
244
245 res = devm_request_mem_region(&pdev->dev, res->start,
246 resource_size(res), res->name);
247 if (!res) {
248 dev_err(&pdev->dev, "cannot request I/O memory region\n");
249 return -ENXIO;
250 }
251
252 wdt->base = devm_ioremap_nocache(&pdev->dev, res->start,
253 resource_size(res));
254 if (!wdt->base) {
255 dev_err(&pdev->dev, "cannot remap I/O memory region\n");
256 return -ENXIO;
257 }
258
259 /* disable watchdog and reboot on timeout */
260 ctrl = readl(wdt->base + ADX_WDT_CONTROL);
261 ctrl &= ~ADX_WDT_CONTROL_ENABLE;
262 ctrl &= ~ADX_WDT_CONTROL_nRESET;
263 writel(ctrl, wdt->base + ADX_WDT_CONTROL);
264
265 platform_set_drvdata(pdev, wdt);
266 adx_wdt_dev = pdev;
267
268 ret = misc_register(&adx_wdt_miscdev);
269 if (ret) {
270 dev_err(&pdev->dev, "cannot register miscdev on minor %d "
271 "(err=%d)\n", WATCHDOG_MINOR, ret);
272 return ret;
273 }
274
275 return 0;
276}
277
278static int __devexit adx_wdt_remove(struct platform_device *pdev)
279{
280 struct adx_wdt *wdt = platform_get_drvdata(pdev);
281
282 misc_deregister(&adx_wdt_miscdev);
283 adx_wdt_stop(wdt);
284 platform_set_drvdata(pdev, NULL);
285
286 return 0;
287}
288
289static void adx_wdt_shutdown(struct platform_device *pdev)
290{
291 struct adx_wdt *wdt = platform_get_drvdata(pdev);
292 adx_wdt_stop(wdt);
293}
294
295#ifdef CONFIG_PM
296static int adx_wdt_suspend(struct device *dev)
297{
298 struct platform_device *pdev = to_platform_device(dev);
299 struct adx_wdt *wdt = platform_get_drvdata(pdev);
300
301 wdt->wake = (wdt->state == WDT_STATE_START) ? 1 : 0;
302 adx_wdt_stop(wdt);
303
304 return 0;
305}
306
307static int adx_wdt_resume(struct device *dev)
308{
309 struct platform_device *pdev = to_platform_device(dev);
310 struct adx_wdt *wdt = platform_get_drvdata(pdev);
311
312 if (wdt->wake)
313 adx_wdt_start(wdt);
314
315 return 0;
316}
317
318static const struct dev_pm_ops adx_wdt_pm_ops = {
319 .suspend = adx_wdt_suspend,
320 .resume = adx_wdt_resume,
321};
322
323# define ADX_WDT_PM_OPS (&adx_wdt_pm_ops)
324#else
325# define ADX_WDT_PM_OPS NULL
326#endif
327
328static struct platform_driver adx_wdt_driver = {
329 .probe = adx_wdt_probe,
330 .remove = __devexit_p(adx_wdt_remove),
331 .shutdown = adx_wdt_shutdown,
332 .driver = {
333 .name = WATCHDOG_NAME,
334 .owner = THIS_MODULE,
335 .pm = ADX_WDT_PM_OPS,
336 },
337};
338
339static int __init adx_wdt_init(void)
340{
341 return platform_driver_register(&adx_wdt_driver);
342}
343
344static void __exit adx_wdt_exit(void)
345{
346 platform_driver_unregister(&adx_wdt_driver);
347}
348
349module_init(adx_wdt_init);
350module_exit(adx_wdt_exit);
351
352MODULE_DESCRIPTION("Avionic Design Xanthos Watchdog Driver");
353MODULE_LICENSE("GPL v2");
354MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
355MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index 5de7e4fa5b8..a79e3840782 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -401,8 +401,8 @@ static int __devinit s3c2410wdt_probe(struct platform_device *pdev)
401 401
402 dev_info(dev, "watchdog %sactive, reset %sabled, irq %sabled\n", 402 dev_info(dev, "watchdog %sactive, reset %sabled, irq %sabled\n",
403 (wtcon & S3C2410_WTCON_ENABLE) ? "" : "in", 403 (wtcon & S3C2410_WTCON_ENABLE) ? "" : "in",
404 (wtcon & S3C2410_WTCON_RSTEN) ? "" : "dis", 404 (wtcon & S3C2410_WTCON_RSTEN) ? "en" : "dis",
405 (wtcon & S3C2410_WTCON_INTEN) ? "" : "en"); 405 (wtcon & S3C2410_WTCON_INTEN) ? "en" : "dis");
406 406
407 return 0; 407 return 0;
408 408
diff --git a/drivers/watchdog/wm831x_wdt.c b/drivers/watchdog/wm831x_wdt.c
index 7be38556aed..e789a47db41 100644
--- a/drivers/watchdog/wm831x_wdt.c
+++ b/drivers/watchdog/wm831x_wdt.c
@@ -150,7 +150,7 @@ static int wm831x_wdt_set_timeout(struct watchdog_device *wdt_dev,
150 if (wm831x_wdt_cfgs[i].time == timeout) 150 if (wm831x_wdt_cfgs[i].time == timeout)
151 break; 151 break;
152 if (i == ARRAY_SIZE(wm831x_wdt_cfgs)) 152 if (i == ARRAY_SIZE(wm831x_wdt_cfgs))
153 ret = -EINVAL; 153 return -EINVAL;
154 154
155 ret = wm831x_reg_unlock(wm831x); 155 ret = wm831x_reg_unlock(wm831x);
156 if (ret == 0) { 156 if (ret == 0) {
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index a767884a6c7..31ab82fda38 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -501,7 +501,7 @@ EXPORT_SYMBOL_GPL(balloon_set_new_target);
501 * alloc_xenballooned_pages - get pages that have been ballooned out 501 * alloc_xenballooned_pages - get pages that have been ballooned out
502 * @nr_pages: Number of pages to get 502 * @nr_pages: Number of pages to get
503 * @pages: pages returned 503 * @pages: pages returned
504 * @highmem: highmem or lowmem pages 504 * @highmem: allow highmem pages
505 * @return 0 on success, error otherwise 505 * @return 0 on success, error otherwise
506 */ 506 */
507int alloc_xenballooned_pages(int nr_pages, struct page **pages, bool highmem) 507int alloc_xenballooned_pages(int nr_pages, struct page **pages, bool highmem)
@@ -511,7 +511,7 @@ int alloc_xenballooned_pages(int nr_pages, struct page **pages, bool highmem)
511 mutex_lock(&balloon_mutex); 511 mutex_lock(&balloon_mutex);
512 while (pgno < nr_pages) { 512 while (pgno < nr_pages) {
513 page = balloon_retrieve(highmem); 513 page = balloon_retrieve(highmem);
514 if (page && PageHighMem(page) == highmem) { 514 if (page && (highmem || !PageHighMem(page))) {
515 pages[pgno++] = page; 515 pages[pgno++] = page;
516 } else { 516 } else {
517 enum bp_state st; 517 enum bp_state st;
diff --git a/drivers/xen/gntalloc.c b/drivers/xen/gntalloc.c
index f6832f46aea..e1c4c6e5b46 100644
--- a/drivers/xen/gntalloc.c
+++ b/drivers/xen/gntalloc.c
@@ -135,7 +135,7 @@ static int add_grefs(struct ioctl_gntalloc_alloc_gref *op,
135 /* Grant foreign access to the page. */ 135 /* Grant foreign access to the page. */
136 gref->gref_id = gnttab_grant_foreign_access(op->domid, 136 gref->gref_id = gnttab_grant_foreign_access(op->domid,
137 pfn_to_mfn(page_to_pfn(gref->page)), readonly); 137 pfn_to_mfn(page_to_pfn(gref->page)), readonly);
138 if (gref->gref_id < 0) { 138 if ((int)gref->gref_id < 0) {
139 rc = gref->gref_id; 139 rc = gref->gref_id;
140 goto undo; 140 goto undo;
141 } 141 }
@@ -280,7 +280,7 @@ static long gntalloc_ioctl_alloc(struct gntalloc_file_private_data *priv,
280 goto out; 280 goto out;
281 } 281 }
282 282
283 gref_ids = kzalloc(sizeof(gref_ids[0]) * op.count, GFP_TEMPORARY); 283 gref_ids = kcalloc(op.count, sizeof(gref_ids[0]), GFP_TEMPORARY);
284 if (!gref_ids) { 284 if (!gref_ids) {
285 rc = -ENOMEM; 285 rc = -ENOMEM;
286 goto out; 286 goto out;
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 39871326afa..afca14d9042 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -114,11 +114,11 @@ static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count)
114 if (NULL == add) 114 if (NULL == add)
115 return NULL; 115 return NULL;
116 116
117 add->grants = kzalloc(sizeof(add->grants[0]) * count, GFP_KERNEL); 117 add->grants = kcalloc(count, sizeof(add->grants[0]), GFP_KERNEL);
118 add->map_ops = kzalloc(sizeof(add->map_ops[0]) * count, GFP_KERNEL); 118 add->map_ops = kcalloc(count, sizeof(add->map_ops[0]), GFP_KERNEL);
119 add->unmap_ops = kzalloc(sizeof(add->unmap_ops[0]) * count, GFP_KERNEL); 119 add->unmap_ops = kcalloc(count, sizeof(add->unmap_ops[0]), GFP_KERNEL);
120 add->kmap_ops = kzalloc(sizeof(add->kmap_ops[0]) * count, GFP_KERNEL); 120 add->kmap_ops = kcalloc(count, sizeof(add->kmap_ops[0]), GFP_KERNEL);
121 add->pages = kzalloc(sizeof(add->pages[0]) * count, GFP_KERNEL); 121 add->pages = kcalloc(count, sizeof(add->pages[0]), GFP_KERNEL);
122 if (NULL == add->grants || 122 if (NULL == add->grants ||
123 NULL == add->map_ops || 123 NULL == add->map_ops ||
124 NULL == add->unmap_ops || 124 NULL == add->unmap_ops ||
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index 81c3ce6b8bb..1906125eab4 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -35,6 +35,7 @@
35#include <linux/vmalloc.h> 35#include <linux/vmalloc.h>
36#include <linux/export.h> 36#include <linux/export.h>
37#include <asm/xen/hypervisor.h> 37#include <asm/xen/hypervisor.h>
38#include <asm/xen/page.h>
38#include <xen/interface/xen.h> 39#include <xen/interface/xen.h>
39#include <xen/interface/event_channel.h> 40#include <xen/interface/event_channel.h>
40#include <xen/events.h> 41#include <xen/events.h>
@@ -436,19 +437,20 @@ EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
436int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr) 437int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr)
437{ 438{
438 struct gnttab_map_grant_ref op = { 439 struct gnttab_map_grant_ref op = {
439 .flags = GNTMAP_host_map, 440 .flags = GNTMAP_host_map | GNTMAP_contains_pte,
440 .ref = gnt_ref, 441 .ref = gnt_ref,
441 .dom = dev->otherend_id, 442 .dom = dev->otherend_id,
442 }; 443 };
443 struct vm_struct *area; 444 struct vm_struct *area;
445 pte_t *pte;
444 446
445 *vaddr = NULL; 447 *vaddr = NULL;
446 448
447 area = alloc_vm_area(PAGE_SIZE); 449 area = alloc_vm_area(PAGE_SIZE, &pte);
448 if (!area) 450 if (!area)
449 return -ENOMEM; 451 return -ENOMEM;
450 452
451 op.host_addr = (unsigned long)area->addr; 453 op.host_addr = arbitrary_virt_to_machine(pte).maddr;
452 454
453 if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) 455 if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
454 BUG(); 456 BUG();
@@ -527,6 +529,7 @@ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
527 struct gnttab_unmap_grant_ref op = { 529 struct gnttab_unmap_grant_ref op = {
528 .host_addr = (unsigned long)vaddr, 530 .host_addr = (unsigned long)vaddr,
529 }; 531 };
532 unsigned int level;
530 533
531 /* It'd be nice if linux/vmalloc.h provided a find_vm_area(void *addr) 534 /* It'd be nice if linux/vmalloc.h provided a find_vm_area(void *addr)
532 * method so that we don't have to muck with vmalloc internals here. 535 * method so that we don't have to muck with vmalloc internals here.
@@ -548,6 +551,8 @@ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
548 } 551 }
549 552
550 op.handle = (grant_handle_t)area->phys_addr; 553 op.handle = (grant_handle_t)area->phys_addr;
554 op.host_addr = arbitrary_virt_to_machine(
555 lookup_address((unsigned long)vaddr, &level)).maddr;
551 556
552 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) 557 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
553 BUG(); 558 BUG();
diff --git a/fs/bio.c b/fs/bio.c
index 41c93c72224..b1fe82cf88c 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -337,7 +337,7 @@ static void bio_fs_destructor(struct bio *bio)
337 * RETURNS: 337 * RETURNS:
338 * Pointer to new bio on success, NULL on failure. 338 * Pointer to new bio on success, NULL on failure.
339 */ 339 */
340struct bio *bio_alloc(gfp_t gfp_mask, int nr_iovecs) 340struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
341{ 341{
342 struct bio *bio = bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set); 342 struct bio *bio = bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set);
343 343
@@ -365,7 +365,7 @@ static void bio_kmalloc_destructor(struct bio *bio)
365 * %__GFP_WAIT, the allocation is guaranteed to succeed. 365 * %__GFP_WAIT, the allocation is guaranteed to succeed.
366 * 366 *
367 **/ 367 **/
368struct bio *bio_kmalloc(gfp_t gfp_mask, int nr_iovecs) 368struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs)
369{ 369{
370 struct bio *bio; 370 struct bio *bio;
371 371
@@ -696,7 +696,8 @@ static void bio_free_map_data(struct bio_map_data *bmd)
696 kfree(bmd); 696 kfree(bmd);
697} 697}
698 698
699static struct bio_map_data *bio_alloc_map_data(int nr_segs, int iov_count, 699static struct bio_map_data *bio_alloc_map_data(int nr_segs,
700 unsigned int iov_count,
700 gfp_t gfp_mask) 701 gfp_t gfp_mask)
701{ 702{
702 struct bio_map_data *bmd; 703 struct bio_map_data *bmd;
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 8855aad3929..22c64fff1bd 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -683,7 +683,7 @@ static int inode_to_path(u64 inum, struct btrfs_inode_ref *iref,
683 return PTR_ERR(fspath); 683 return PTR_ERR(fspath);
684 684
685 if (fspath > fspath_min) { 685 if (fspath > fspath_min) {
686 ipath->fspath->val[i] = (u64)fspath; 686 ipath->fspath->val[i] = (u64)(unsigned long)fspath;
687 ++ipath->fspath->elem_cnt; 687 ++ipath->fspath->elem_cnt;
688 ipath->fspath->bytes_left = fspath - fspath_min; 688 ipath->fspath->bytes_left = fspath - fspath_min;
689 } else { 689 } else {
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 0fe615e4ea3..dede441bdee 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -514,10 +514,25 @@ static inline int should_cow_block(struct btrfs_trans_handle *trans,
514 struct btrfs_root *root, 514 struct btrfs_root *root,
515 struct extent_buffer *buf) 515 struct extent_buffer *buf)
516{ 516{
517 /* ensure we can see the force_cow */
518 smp_rmb();
519
520 /*
521 * We do not need to cow a block if
522 * 1) this block is not created or changed in this transaction;
523 * 2) this block does not belong to TREE_RELOC tree;
524 * 3) the root is not forced COW.
525 *
526 * What is forced COW:
527 * when we create snapshot during commiting the transaction,
528 * after we've finished coping src root, we must COW the shared
529 * block to ensure the metadata consistency.
530 */
517 if (btrfs_header_generation(buf) == trans->transid && 531 if (btrfs_header_generation(buf) == trans->transid &&
518 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) && 532 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
519 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID && 533 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
520 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC))) 534 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
535 !root->force_cow)
521 return 0; 536 return 0;
522 return 1; 537 return 1;
523} 538}
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index b9ba59ff929..50634abef9b 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -848,7 +848,8 @@ struct btrfs_free_cluster {
848enum btrfs_caching_type { 848enum btrfs_caching_type {
849 BTRFS_CACHE_NO = 0, 849 BTRFS_CACHE_NO = 0,
850 BTRFS_CACHE_STARTED = 1, 850 BTRFS_CACHE_STARTED = 1,
851 BTRFS_CACHE_FINISHED = 2, 851 BTRFS_CACHE_FAST = 2,
852 BTRFS_CACHE_FINISHED = 3,
852}; 853};
853 854
854enum btrfs_disk_cache_state { 855enum btrfs_disk_cache_state {
@@ -1271,6 +1272,8 @@ struct btrfs_root {
1271 * for stat. It may be used for more later 1272 * for stat. It may be used for more later
1272 */ 1273 */
1273 dev_t anon_dev; 1274 dev_t anon_dev;
1275
1276 int force_cow;
1274}; 1277};
1275 1278
1276struct btrfs_ioctl_defrag_range_args { 1279struct btrfs_ioctl_defrag_range_args {
@@ -2366,6 +2369,9 @@ int btrfs_block_rsv_check(struct btrfs_root *root,
2366int btrfs_block_rsv_refill(struct btrfs_root *root, 2369int btrfs_block_rsv_refill(struct btrfs_root *root,
2367 struct btrfs_block_rsv *block_rsv, 2370 struct btrfs_block_rsv *block_rsv,
2368 u64 min_reserved); 2371 u64 min_reserved);
2372int btrfs_block_rsv_refill_noflush(struct btrfs_root *root,
2373 struct btrfs_block_rsv *block_rsv,
2374 u64 min_reserved);
2369int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv, 2375int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
2370 struct btrfs_block_rsv *dst_rsv, 2376 struct btrfs_block_rsv *dst_rsv,
2371 u64 num_bytes); 2377 u64 num_bytes);
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 62afe5c5694..632f8f3cc9d 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -620,7 +620,7 @@ out:
620 620
621static int btree_io_failed_hook(struct bio *failed_bio, 621static int btree_io_failed_hook(struct bio *failed_bio,
622 struct page *page, u64 start, u64 end, 622 struct page *page, u64 start, u64 end,
623 u64 mirror_num, struct extent_state *state) 623 int mirror_num, struct extent_state *state)
624{ 624{
625 struct extent_io_tree *tree; 625 struct extent_io_tree *tree;
626 unsigned long len; 626 unsigned long len;
@@ -2573,22 +2573,10 @@ static int write_dev_supers(struct btrfs_device *device,
2573 int errors = 0; 2573 int errors = 0;
2574 u32 crc; 2574 u32 crc;
2575 u64 bytenr; 2575 u64 bytenr;
2576 int last_barrier = 0;
2577 2576
2578 if (max_mirrors == 0) 2577 if (max_mirrors == 0)
2579 max_mirrors = BTRFS_SUPER_MIRROR_MAX; 2578 max_mirrors = BTRFS_SUPER_MIRROR_MAX;
2580 2579
2581 /* make sure only the last submit_bh does a barrier */
2582 if (do_barriers) {
2583 for (i = 0; i < max_mirrors; i++) {
2584 bytenr = btrfs_sb_offset(i);
2585 if (bytenr + BTRFS_SUPER_INFO_SIZE >=
2586 device->total_bytes)
2587 break;
2588 last_barrier = i;
2589 }
2590 }
2591
2592 for (i = 0; i < max_mirrors; i++) { 2580 for (i = 0; i < max_mirrors; i++) {
2593 bytenr = btrfs_sb_offset(i); 2581 bytenr = btrfs_sb_offset(i);
2594 if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes) 2582 if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes)
@@ -2634,17 +2622,136 @@ static int write_dev_supers(struct btrfs_device *device,
2634 bh->b_end_io = btrfs_end_buffer_write_sync; 2622 bh->b_end_io = btrfs_end_buffer_write_sync;
2635 } 2623 }
2636 2624
2637 if (i == last_barrier && do_barriers) 2625 /*
2638 ret = submit_bh(WRITE_FLUSH_FUA, bh); 2626 * we fua the first super. The others we allow
2639 else 2627 * to go down lazy.
2640 ret = submit_bh(WRITE_SYNC, bh); 2628 */
2641 2629 ret = submit_bh(WRITE_FUA, bh);
2642 if (ret) 2630 if (ret)
2643 errors++; 2631 errors++;
2644 } 2632 }
2645 return errors < i ? 0 : -1; 2633 return errors < i ? 0 : -1;
2646} 2634}
2647 2635
2636/*
2637 * endio for the write_dev_flush, this will wake anyone waiting
2638 * for the barrier when it is done
2639 */
2640static void btrfs_end_empty_barrier(struct bio *bio, int err)
2641{
2642 if (err) {
2643 if (err == -EOPNOTSUPP)
2644 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2645 clear_bit(BIO_UPTODATE, &bio->bi_flags);
2646 }
2647 if (bio->bi_private)
2648 complete(bio->bi_private);
2649 bio_put(bio);
2650}
2651
2652/*
2653 * trigger flushes for one the devices. If you pass wait == 0, the flushes are
2654 * sent down. With wait == 1, it waits for the previous flush.
2655 *
2656 * any device where the flush fails with eopnotsupp are flagged as not-barrier
2657 * capable
2658 */
2659static int write_dev_flush(struct btrfs_device *device, int wait)
2660{
2661 struct bio *bio;
2662 int ret = 0;
2663
2664 if (device->nobarriers)
2665 return 0;
2666
2667 if (wait) {
2668 bio = device->flush_bio;
2669 if (!bio)
2670 return 0;
2671
2672 wait_for_completion(&device->flush_wait);
2673
2674 if (bio_flagged(bio, BIO_EOPNOTSUPP)) {
2675 printk("btrfs: disabling barriers on dev %s\n",
2676 device->name);
2677 device->nobarriers = 1;
2678 }
2679 if (!bio_flagged(bio, BIO_UPTODATE)) {
2680 ret = -EIO;
2681 }
2682
2683 /* drop the reference from the wait == 0 run */
2684 bio_put(bio);
2685 device->flush_bio = NULL;
2686
2687 return ret;
2688 }
2689
2690 /*
2691 * one reference for us, and we leave it for the
2692 * caller
2693 */
2694 device->flush_bio = NULL;;
2695 bio = bio_alloc(GFP_NOFS, 0);
2696 if (!bio)
2697 return -ENOMEM;
2698
2699 bio->bi_end_io = btrfs_end_empty_barrier;
2700 bio->bi_bdev = device->bdev;
2701 init_completion(&device->flush_wait);
2702 bio->bi_private = &device->flush_wait;
2703 device->flush_bio = bio;
2704
2705 bio_get(bio);
2706 submit_bio(WRITE_FLUSH, bio);
2707
2708 return 0;
2709}
2710
2711/*
2712 * send an empty flush down to each device in parallel,
2713 * then wait for them
2714 */
2715static int barrier_all_devices(struct btrfs_fs_info *info)
2716{
2717 struct list_head *head;
2718 struct btrfs_device *dev;
2719 int errors = 0;
2720 int ret;
2721
2722 /* send down all the barriers */
2723 head = &info->fs_devices->devices;
2724 list_for_each_entry_rcu(dev, head, dev_list) {
2725 if (!dev->bdev) {
2726 errors++;
2727 continue;
2728 }
2729 if (!dev->in_fs_metadata || !dev->writeable)
2730 continue;
2731
2732 ret = write_dev_flush(dev, 0);
2733 if (ret)
2734 errors++;
2735 }
2736
2737 /* wait for all the barriers */
2738 list_for_each_entry_rcu(dev, head, dev_list) {
2739 if (!dev->bdev) {
2740 errors++;
2741 continue;
2742 }
2743 if (!dev->in_fs_metadata || !dev->writeable)
2744 continue;
2745
2746 ret = write_dev_flush(dev, 1);
2747 if (ret)
2748 errors++;
2749 }
2750 if (errors)
2751 return -EIO;
2752 return 0;
2753}
2754
2648int write_all_supers(struct btrfs_root *root, int max_mirrors) 2755int write_all_supers(struct btrfs_root *root, int max_mirrors)
2649{ 2756{
2650 struct list_head *head; 2757 struct list_head *head;
@@ -2666,6 +2773,10 @@ int write_all_supers(struct btrfs_root *root, int max_mirrors)
2666 2773
2667 mutex_lock(&root->fs_info->fs_devices->device_list_mutex); 2774 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2668 head = &root->fs_info->fs_devices->devices; 2775 head = &root->fs_info->fs_devices->devices;
2776
2777 if (do_barriers)
2778 barrier_all_devices(root->fs_info);
2779
2669 list_for_each_entry_rcu(dev, head, dev_list) { 2780 list_for_each_entry_rcu(dev, head, dev_list) {
2670 if (!dev->bdev) { 2781 if (!dev->bdev) {
2671 total_errors++; 2782 total_errors++;
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index b232150b5b6..f0d5718d258 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -467,13 +467,59 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
467 struct btrfs_root *root, 467 struct btrfs_root *root,
468 int load_cache_only) 468 int load_cache_only)
469{ 469{
470 DEFINE_WAIT(wait);
470 struct btrfs_fs_info *fs_info = cache->fs_info; 471 struct btrfs_fs_info *fs_info = cache->fs_info;
471 struct btrfs_caching_control *caching_ctl; 472 struct btrfs_caching_control *caching_ctl;
472 int ret = 0; 473 int ret = 0;
473 474
474 smp_mb(); 475 caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
475 if (cache->cached != BTRFS_CACHE_NO) 476 BUG_ON(!caching_ctl);
477
478 INIT_LIST_HEAD(&caching_ctl->list);
479 mutex_init(&caching_ctl->mutex);
480 init_waitqueue_head(&caching_ctl->wait);
481 caching_ctl->block_group = cache;
482 caching_ctl->progress = cache->key.objectid;
483 atomic_set(&caching_ctl->count, 1);
484 caching_ctl->work.func = caching_thread;
485
486 spin_lock(&cache->lock);
487 /*
488 * This should be a rare occasion, but this could happen I think in the
489 * case where one thread starts to load the space cache info, and then
490 * some other thread starts a transaction commit which tries to do an
491 * allocation while the other thread is still loading the space cache
492 * info. The previous loop should have kept us from choosing this block
493 * group, but if we've moved to the state where we will wait on caching
494 * block groups we need to first check if we're doing a fast load here,
495 * so we can wait for it to finish, otherwise we could end up allocating
496 * from a block group who's cache gets evicted for one reason or
497 * another.
498 */
499 while (cache->cached == BTRFS_CACHE_FAST) {
500 struct btrfs_caching_control *ctl;
501
502 ctl = cache->caching_ctl;
503 atomic_inc(&ctl->count);
504 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
505 spin_unlock(&cache->lock);
506
507 schedule();
508
509 finish_wait(&ctl->wait, &wait);
510 put_caching_control(ctl);
511 spin_lock(&cache->lock);
512 }
513
514 if (cache->cached != BTRFS_CACHE_NO) {
515 spin_unlock(&cache->lock);
516 kfree(caching_ctl);
476 return 0; 517 return 0;
518 }
519 WARN_ON(cache->caching_ctl);
520 cache->caching_ctl = caching_ctl;
521 cache->cached = BTRFS_CACHE_FAST;
522 spin_unlock(&cache->lock);
477 523
478 /* 524 /*
479 * We can't do the read from on-disk cache during a commit since we need 525 * We can't do the read from on-disk cache during a commit since we need
@@ -484,56 +530,51 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
484 if (trans && (!trans->transaction->in_commit) && 530 if (trans && (!trans->transaction->in_commit) &&
485 (root && root != root->fs_info->tree_root) && 531 (root && root != root->fs_info->tree_root) &&
486 btrfs_test_opt(root, SPACE_CACHE)) { 532 btrfs_test_opt(root, SPACE_CACHE)) {
487 spin_lock(&cache->lock);
488 if (cache->cached != BTRFS_CACHE_NO) {
489 spin_unlock(&cache->lock);
490 return 0;
491 }
492 cache->cached = BTRFS_CACHE_STARTED;
493 spin_unlock(&cache->lock);
494
495 ret = load_free_space_cache(fs_info, cache); 533 ret = load_free_space_cache(fs_info, cache);
496 534
497 spin_lock(&cache->lock); 535 spin_lock(&cache->lock);
498 if (ret == 1) { 536 if (ret == 1) {
537 cache->caching_ctl = NULL;
499 cache->cached = BTRFS_CACHE_FINISHED; 538 cache->cached = BTRFS_CACHE_FINISHED;
500 cache->last_byte_to_unpin = (u64)-1; 539 cache->last_byte_to_unpin = (u64)-1;
501 } else { 540 } else {
502 cache->cached = BTRFS_CACHE_NO; 541 if (load_cache_only) {
542 cache->caching_ctl = NULL;
543 cache->cached = BTRFS_CACHE_NO;
544 } else {
545 cache->cached = BTRFS_CACHE_STARTED;
546 }
503 } 547 }
504 spin_unlock(&cache->lock); 548 spin_unlock(&cache->lock);
549 wake_up(&caching_ctl->wait);
505 if (ret == 1) { 550 if (ret == 1) {
551 put_caching_control(caching_ctl);
506 free_excluded_extents(fs_info->extent_root, cache); 552 free_excluded_extents(fs_info->extent_root, cache);
507 return 0; 553 return 0;
508 } 554 }
555 } else {
556 /*
557 * We are not going to do the fast caching, set cached to the
558 * appropriate value and wakeup any waiters.
559 */
560 spin_lock(&cache->lock);
561 if (load_cache_only) {
562 cache->caching_ctl = NULL;
563 cache->cached = BTRFS_CACHE_NO;
564 } else {
565 cache->cached = BTRFS_CACHE_STARTED;
566 }
567 spin_unlock(&cache->lock);
568 wake_up(&caching_ctl->wait);
509 } 569 }
510 570
511 if (load_cache_only) 571 if (load_cache_only) {
512 return 0; 572 put_caching_control(caching_ctl);
513
514 caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
515 BUG_ON(!caching_ctl);
516
517 INIT_LIST_HEAD(&caching_ctl->list);
518 mutex_init(&caching_ctl->mutex);
519 init_waitqueue_head(&caching_ctl->wait);
520 caching_ctl->block_group = cache;
521 caching_ctl->progress = cache->key.objectid;
522 /* one for caching kthread, one for caching block group list */
523 atomic_set(&caching_ctl->count, 2);
524 caching_ctl->work.func = caching_thread;
525
526 spin_lock(&cache->lock);
527 if (cache->cached != BTRFS_CACHE_NO) {
528 spin_unlock(&cache->lock);
529 kfree(caching_ctl);
530 return 0; 573 return 0;
531 } 574 }
532 cache->caching_ctl = caching_ctl;
533 cache->cached = BTRFS_CACHE_STARTED;
534 spin_unlock(&cache->lock);
535 575
536 down_write(&fs_info->extent_commit_sem); 576 down_write(&fs_info->extent_commit_sem);
577 atomic_inc(&caching_ctl->count);
537 list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups); 578 list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
538 up_write(&fs_info->extent_commit_sem); 579 up_write(&fs_info->extent_commit_sem);
539 580
@@ -3847,9 +3888,9 @@ int btrfs_block_rsv_check(struct btrfs_root *root,
3847 return ret; 3888 return ret;
3848} 3889}
3849 3890
3850int btrfs_block_rsv_refill(struct btrfs_root *root, 3891static inline int __btrfs_block_rsv_refill(struct btrfs_root *root,
3851 struct btrfs_block_rsv *block_rsv, 3892 struct btrfs_block_rsv *block_rsv,
3852 u64 min_reserved) 3893 u64 min_reserved, int flush)
3853{ 3894{
3854 u64 num_bytes = 0; 3895 u64 num_bytes = 0;
3855 int ret = -ENOSPC; 3896 int ret = -ENOSPC;
@@ -3868,7 +3909,7 @@ int btrfs_block_rsv_refill(struct btrfs_root *root,
3868 if (!ret) 3909 if (!ret)
3869 return 0; 3910 return 0;
3870 3911
3871 ret = reserve_metadata_bytes(root, block_rsv, num_bytes, 1); 3912 ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
3872 if (!ret) { 3913 if (!ret) {
3873 block_rsv_add_bytes(block_rsv, num_bytes, 0); 3914 block_rsv_add_bytes(block_rsv, num_bytes, 0);
3874 return 0; 3915 return 0;
@@ -3877,6 +3918,20 @@ int btrfs_block_rsv_refill(struct btrfs_root *root,
3877 return ret; 3918 return ret;
3878} 3919}
3879 3920
3921int btrfs_block_rsv_refill(struct btrfs_root *root,
3922 struct btrfs_block_rsv *block_rsv,
3923 u64 min_reserved)
3924{
3925 return __btrfs_block_rsv_refill(root, block_rsv, min_reserved, 1);
3926}
3927
3928int btrfs_block_rsv_refill_noflush(struct btrfs_root *root,
3929 struct btrfs_block_rsv *block_rsv,
3930 u64 min_reserved)
3931{
3932 return __btrfs_block_rsv_refill(root, block_rsv, min_reserved, 0);
3933}
3934
3880int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv, 3935int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
3881 struct btrfs_block_rsv *dst_rsv, 3936 struct btrfs_block_rsv *dst_rsv,
3882 u64 num_bytes) 3937 u64 num_bytes)
@@ -5178,13 +5233,15 @@ search:
5178 } 5233 }
5179 5234
5180have_block_group: 5235have_block_group:
5181 if (unlikely(block_group->cached == BTRFS_CACHE_NO)) { 5236 cached = block_group_cache_done(block_group);
5237 if (unlikely(!cached)) {
5182 u64 free_percent; 5238 u64 free_percent;
5183 5239
5240 found_uncached_bg = true;
5184 ret = cache_block_group(block_group, trans, 5241 ret = cache_block_group(block_group, trans,
5185 orig_root, 1); 5242 orig_root, 1);
5186 if (block_group->cached == BTRFS_CACHE_FINISHED) 5243 if (block_group->cached == BTRFS_CACHE_FINISHED)
5187 goto have_block_group; 5244 goto alloc;
5188 5245
5189 free_percent = btrfs_block_group_used(&block_group->item); 5246 free_percent = btrfs_block_group_used(&block_group->item);
5190 free_percent *= 100; 5247 free_percent *= 100;
@@ -5206,7 +5263,6 @@ have_block_group:
5206 orig_root, 0); 5263 orig_root, 0);
5207 BUG_ON(ret); 5264 BUG_ON(ret);
5208 } 5265 }
5209 found_uncached_bg = true;
5210 5266
5211 /* 5267 /*
5212 * If loop is set for cached only, try the next block 5268 * If loop is set for cached only, try the next block
@@ -5216,17 +5272,14 @@ have_block_group:
5216 goto loop; 5272 goto loop;
5217 } 5273 }
5218 5274
5219 cached = block_group_cache_done(block_group); 5275alloc:
5220 if (unlikely(!cached))
5221 found_uncached_bg = true;
5222
5223 if (unlikely(block_group->ro)) 5276 if (unlikely(block_group->ro))
5224 goto loop; 5277 goto loop;
5225 5278
5226 spin_lock(&block_group->free_space_ctl->tree_lock); 5279 spin_lock(&block_group->free_space_ctl->tree_lock);
5227 if (cached && 5280 if (cached &&
5228 block_group->free_space_ctl->free_space < 5281 block_group->free_space_ctl->free_space <
5229 num_bytes + empty_size) { 5282 num_bytes + empty_cluster + empty_size) {
5230 spin_unlock(&block_group->free_space_ctl->tree_lock); 5283 spin_unlock(&block_group->free_space_ctl->tree_lock);
5231 goto loop; 5284 goto loop;
5232 } 5285 }
@@ -5247,12 +5300,10 @@ have_block_group:
5247 * people trying to start a new cluster 5300 * people trying to start a new cluster
5248 */ 5301 */
5249 spin_lock(&last_ptr->refill_lock); 5302 spin_lock(&last_ptr->refill_lock);
5250 if (last_ptr->block_group && 5303 if (!last_ptr->block_group ||
5251 (last_ptr->block_group->ro || 5304 last_ptr->block_group->ro ||
5252 !block_group_bits(last_ptr->block_group, data))) { 5305 !block_group_bits(last_ptr->block_group, data))
5253 offset = 0;
5254 goto refill_cluster; 5306 goto refill_cluster;
5255 }
5256 5307
5257 offset = btrfs_alloc_from_cluster(block_group, last_ptr, 5308 offset = btrfs_alloc_from_cluster(block_group, last_ptr,
5258 num_bytes, search_start); 5309 num_bytes, search_start);
@@ -5303,7 +5354,7 @@ refill_cluster:
5303 /* allocate a cluster in this block group */ 5354 /* allocate a cluster in this block group */
5304 ret = btrfs_find_space_cluster(trans, root, 5355 ret = btrfs_find_space_cluster(trans, root,
5305 block_group, last_ptr, 5356 block_group, last_ptr,
5306 offset, num_bytes, 5357 search_start, num_bytes,
5307 empty_cluster + empty_size); 5358 empty_cluster + empty_size);
5308 if (ret == 0) { 5359 if (ret == 0) {
5309 /* 5360 /*
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 1f87c4d0e7a..be1bf627a14 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2285,16 +2285,22 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
2285 clean_io_failure(start, page); 2285 clean_io_failure(start, page);
2286 } 2286 }
2287 if (!uptodate) { 2287 if (!uptodate) {
2288 u64 failed_mirror; 2288 int failed_mirror;
2289 failed_mirror = (u64)bio->bi_bdev; 2289 failed_mirror = (int)(unsigned long)bio->bi_bdev;
2290 if (tree->ops && tree->ops->readpage_io_failed_hook) 2290 /*
2291 ret = tree->ops->readpage_io_failed_hook( 2291 * The generic bio_readpage_error handles errors the
2292 bio, page, start, end, 2292 * following way: If possible, new read requests are
2293 failed_mirror, state); 2293 * created and submitted and will end up in
2294 else 2294 * end_bio_extent_readpage as well (if we're lucky, not
2295 ret = bio_readpage_error(bio, page, start, end, 2295 * in the !uptodate case). In that case it returns 0 and
2296 failed_mirror, NULL); 2296 * we just go on with the next page in our bio. If it
2297 * can't handle the error it will return -EIO and we
2298 * remain responsible for that page.
2299 */
2300 ret = bio_readpage_error(bio, page, start, end,
2301 failed_mirror, NULL);
2297 if (ret == 0) { 2302 if (ret == 0) {
2303error_handled:
2298 uptodate = 2304 uptodate =
2299 test_bit(BIO_UPTODATE, &bio->bi_flags); 2305 test_bit(BIO_UPTODATE, &bio->bi_flags);
2300 if (err) 2306 if (err)
@@ -2302,6 +2308,13 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
2302 uncache_state(&cached); 2308 uncache_state(&cached);
2303 continue; 2309 continue;
2304 } 2310 }
2311 if (tree->ops && tree->ops->readpage_io_failed_hook) {
2312 ret = tree->ops->readpage_io_failed_hook(
2313 bio, page, start, end,
2314 failed_mirror, state);
2315 if (ret == 0)
2316 goto error_handled;
2317 }
2305 } 2318 }
2306 2319
2307 if (uptodate) { 2320 if (uptodate) {
@@ -3366,6 +3379,9 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3366 return -ENOMEM; 3379 return -ENOMEM;
3367 path->leave_spinning = 1; 3380 path->leave_spinning = 1;
3368 3381
3382 start = ALIGN(start, BTRFS_I(inode)->root->sectorsize);
3383 len = ALIGN(len, BTRFS_I(inode)->root->sectorsize);
3384
3369 /* 3385 /*
3370 * lookup the last file extent. We're not using i_size here 3386 * lookup the last file extent. We're not using i_size here
3371 * because there might be preallocation past i_size 3387 * because there might be preallocation past i_size
@@ -3413,7 +3429,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3413 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0, 3429 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0,
3414 &cached_state, GFP_NOFS); 3430 &cached_state, GFP_NOFS);
3415 3431
3416 em = get_extent_skip_holes(inode, off, last_for_get_extent, 3432 em = get_extent_skip_holes(inode, start, last_for_get_extent,
3417 get_extent); 3433 get_extent);
3418 if (!em) 3434 if (!em)
3419 goto out; 3435 goto out;
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index feb9be0e23b..7604c300132 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -70,7 +70,7 @@ struct extent_io_ops {
70 unsigned long bio_flags); 70 unsigned long bio_flags);
71 int (*readpage_io_hook)(struct page *page, u64 start, u64 end); 71 int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
72 int (*readpage_io_failed_hook)(struct bio *bio, struct page *page, 72 int (*readpage_io_failed_hook)(struct bio *bio, struct page *page,
73 u64 start, u64 end, u64 failed_mirror, 73 u64 start, u64 end, int failed_mirror,
74 struct extent_state *state); 74 struct extent_state *state);
75 int (*writepage_io_failed_hook)(struct bio *bio, struct page *page, 75 int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
76 u64 start, u64 end, 76 u64 start, u64 end,
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 181760f9d2a..ec23d43d0c3 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -351,6 +351,11 @@ static int io_ctl_prepare_pages(struct io_ctl *io_ctl, struct inode *inode,
351 } 351 }
352 } 352 }
353 353
354 for (i = 0; i < io_ctl->num_pages; i++) {
355 clear_page_dirty_for_io(io_ctl->pages[i]);
356 set_page_extent_mapped(io_ctl->pages[i]);
357 }
358
354 return 0; 359 return 0;
355} 360}
356 361
@@ -1465,6 +1470,7 @@ static void add_new_bitmap(struct btrfs_free_space_ctl *ctl,
1465{ 1470{
1466 info->offset = offset_to_bitmap(ctl, offset); 1471 info->offset = offset_to_bitmap(ctl, offset);
1467 info->bytes = 0; 1472 info->bytes = 0;
1473 INIT_LIST_HEAD(&info->list);
1468 link_free_space(ctl, info); 1474 link_free_space(ctl, info);
1469 ctl->total_bitmaps++; 1475 ctl->total_bitmaps++;
1470 1476
@@ -1844,7 +1850,13 @@ again:
1844 info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 1850 info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
1845 1, 0); 1851 1, 0);
1846 if (!info) { 1852 if (!info) {
1847 WARN_ON(1); 1853 /* the tree logging code might be calling us before we
1854 * have fully loaded the free space rbtree for this
1855 * block group. So it is possible the entry won't
1856 * be in the rbtree yet at all. The caching code
1857 * will make sure not to put it in the rbtree if
1858 * the logging code has pinned it.
1859 */
1848 goto out_lock; 1860 goto out_lock;
1849 } 1861 }
1850 } 1862 }
@@ -2308,6 +2320,7 @@ again:
2308 2320
2309 if (!found) { 2321 if (!found) {
2310 start = i; 2322 start = i;
2323 cluster->max_size = 0;
2311 found = true; 2324 found = true;
2312 } 2325 }
2313 2326
@@ -2451,16 +2464,23 @@ setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
2451{ 2464{
2452 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 2465 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2453 struct btrfs_free_space *entry; 2466 struct btrfs_free_space *entry;
2454 struct rb_node *node;
2455 int ret = -ENOSPC; 2467 int ret = -ENOSPC;
2468 u64 bitmap_offset = offset_to_bitmap(ctl, offset);
2456 2469
2457 if (ctl->total_bitmaps == 0) 2470 if (ctl->total_bitmaps == 0)
2458 return -ENOSPC; 2471 return -ENOSPC;
2459 2472
2460 /* 2473 /*
2461 * First check our cached list of bitmaps and see if there is an entry 2474 * The bitmap that covers offset won't be in the list unless offset
2462 * here that will work. 2475 * is just its start offset.
2463 */ 2476 */
2477 entry = list_first_entry(bitmaps, struct btrfs_free_space, list);
2478 if (entry->offset != bitmap_offset) {
2479 entry = tree_search_offset(ctl, bitmap_offset, 1, 0);
2480 if (entry && list_empty(&entry->list))
2481 list_add(&entry->list, bitmaps);
2482 }
2483
2464 list_for_each_entry(entry, bitmaps, list) { 2484 list_for_each_entry(entry, bitmaps, list) {
2465 if (entry->bytes < min_bytes) 2485 if (entry->bytes < min_bytes)
2466 continue; 2486 continue;
@@ -2471,38 +2491,10 @@ setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
2471 } 2491 }
2472 2492
2473 /* 2493 /*
2474 * If we do have entries on our list and we are here then we didn't find 2494 * The bitmaps list has all the bitmaps that record free space
2475 * anything, so go ahead and get the next entry after the last entry in 2495 * starting after offset, so no more search is required.
2476 * this list and start the search from there.
2477 */ 2496 */
2478 if (!list_empty(bitmaps)) { 2497 return -ENOSPC;
2479 entry = list_entry(bitmaps->prev, struct btrfs_free_space,
2480 list);
2481 node = rb_next(&entry->offset_index);
2482 if (!node)
2483 return -ENOSPC;
2484 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2485 goto search;
2486 }
2487
2488 entry = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 0, 1);
2489 if (!entry)
2490 return -ENOSPC;
2491
2492search:
2493 node = &entry->offset_index;
2494 do {
2495 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2496 node = rb_next(&entry->offset_index);
2497 if (!entry->bitmap)
2498 continue;
2499 if (entry->bytes < min_bytes)
2500 continue;
2501 ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset,
2502 bytes, min_bytes);
2503 } while (ret && node);
2504
2505 return ret;
2506} 2498}
2507 2499
2508/* 2500/*
@@ -2520,8 +2512,8 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
2520 u64 offset, u64 bytes, u64 empty_size) 2512 u64 offset, u64 bytes, u64 empty_size)
2521{ 2513{
2522 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 2514 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2523 struct list_head bitmaps;
2524 struct btrfs_free_space *entry, *tmp; 2515 struct btrfs_free_space *entry, *tmp;
2516 LIST_HEAD(bitmaps);
2525 u64 min_bytes; 2517 u64 min_bytes;
2526 int ret; 2518 int ret;
2527 2519
@@ -2560,7 +2552,6 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
2560 goto out; 2552 goto out;
2561 } 2553 }
2562 2554
2563 INIT_LIST_HEAD(&bitmaps);
2564 ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset, 2555 ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset,
2565 bytes, min_bytes); 2556 bytes, min_bytes);
2566 if (ret) 2557 if (ret)
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 116ab67a06d..2c984f7d4c2 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -3490,7 +3490,7 @@ void btrfs_evict_inode(struct inode *inode)
3490 * doing the truncate. 3490 * doing the truncate.
3491 */ 3491 */
3492 while (1) { 3492 while (1) {
3493 ret = btrfs_block_rsv_refill(root, rsv, min_size); 3493 ret = btrfs_block_rsv_refill_noflush(root, rsv, min_size);
3494 3494
3495 /* 3495 /*
3496 * Try and steal from the global reserve since we will 3496 * Try and steal from the global reserve since we will
@@ -6794,11 +6794,13 @@ static int btrfs_getattr(struct vfsmount *mnt,
6794 struct dentry *dentry, struct kstat *stat) 6794 struct dentry *dentry, struct kstat *stat)
6795{ 6795{
6796 struct inode *inode = dentry->d_inode; 6796 struct inode *inode = dentry->d_inode;
6797 u32 blocksize = inode->i_sb->s_blocksize;
6798
6797 generic_fillattr(inode, stat); 6799 generic_fillattr(inode, stat);
6798 stat->dev = BTRFS_I(inode)->root->anon_dev; 6800 stat->dev = BTRFS_I(inode)->root->anon_dev;
6799 stat->blksize = PAGE_CACHE_SIZE; 6801 stat->blksize = PAGE_CACHE_SIZE;
6800 stat->blocks = (inode_get_bytes(inode) + 6802 stat->blocks = (ALIGN(inode_get_bytes(inode), blocksize) +
6801 BTRFS_I(inode)->delalloc_bytes) >> 9; 6803 ALIGN(BTRFS_I(inode)->delalloc_bytes, blocksize)) >> 9;
6802 return 0; 6804 return 0;
6803} 6805}
6804 6806
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 4a34c472f12..72d461656f6 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -1216,12 +1216,12 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root,
1216 *devstr = '\0'; 1216 *devstr = '\0';
1217 devstr = vol_args->name; 1217 devstr = vol_args->name;
1218 devid = simple_strtoull(devstr, &end, 10); 1218 devid = simple_strtoull(devstr, &end, 10);
1219 printk(KERN_INFO "resizing devid %llu\n", 1219 printk(KERN_INFO "btrfs: resizing devid %llu\n",
1220 (unsigned long long)devid); 1220 (unsigned long long)devid);
1221 } 1221 }
1222 device = btrfs_find_device(root, devid, NULL, NULL); 1222 device = btrfs_find_device(root, devid, NULL, NULL);
1223 if (!device) { 1223 if (!device) {
1224 printk(KERN_INFO "resizer unable to find device %llu\n", 1224 printk(KERN_INFO "btrfs: resizer unable to find device %llu\n",
1225 (unsigned long long)devid); 1225 (unsigned long long)devid);
1226 ret = -EINVAL; 1226 ret = -EINVAL;
1227 goto out_unlock; 1227 goto out_unlock;
@@ -1267,7 +1267,7 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root,
1267 do_div(new_size, root->sectorsize); 1267 do_div(new_size, root->sectorsize);
1268 new_size *= root->sectorsize; 1268 new_size *= root->sectorsize;
1269 1269
1270 printk(KERN_INFO "new size for %s is %llu\n", 1270 printk(KERN_INFO "btrfs: new size for %s is %llu\n",
1271 device->name, (unsigned long long)new_size); 1271 device->name, (unsigned long long)new_size);
1272 1272
1273 if (new_size > old_size) { 1273 if (new_size > old_size) {
@@ -1278,7 +1278,7 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root,
1278 } 1278 }
1279 ret = btrfs_grow_device(trans, device, new_size); 1279 ret = btrfs_grow_device(trans, device, new_size);
1280 btrfs_commit_transaction(trans, root); 1280 btrfs_commit_transaction(trans, root);
1281 } else { 1281 } else if (new_size < old_size) {
1282 ret = btrfs_shrink_device(device, new_size); 1282 ret = btrfs_shrink_device(device, new_size);
1283 } 1283 }
1284 1284
@@ -2930,11 +2930,13 @@ static long btrfs_ioctl_ino_to_path(struct btrfs_root *root, void __user *arg)
2930 goto out; 2930 goto out;
2931 2931
2932 for (i = 0; i < ipath->fspath->elem_cnt; ++i) { 2932 for (i = 0; i < ipath->fspath->elem_cnt; ++i) {
2933 rel_ptr = ipath->fspath->val[i] - (u64)ipath->fspath->val; 2933 rel_ptr = ipath->fspath->val[i] -
2934 (u64)(unsigned long)ipath->fspath->val;
2934 ipath->fspath->val[i] = rel_ptr; 2935 ipath->fspath->val[i] = rel_ptr;
2935 } 2936 }
2936 2937
2937 ret = copy_to_user((void *)ipa->fspath, (void *)ipath->fspath, size); 2938 ret = copy_to_user((void *)(unsigned long)ipa->fspath,
2939 (void *)(unsigned long)ipath->fspath, size);
2938 if (ret) { 2940 if (ret) {
2939 ret = -EFAULT; 2941 ret = -EFAULT;
2940 goto out; 2942 goto out;
@@ -3017,7 +3019,8 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_root *root,
3017 if (ret < 0) 3019 if (ret < 0)
3018 goto out; 3020 goto out;
3019 3021
3020 ret = copy_to_user((void *)loi->inodes, (void *)inodes, size); 3022 ret = copy_to_user((void *)(unsigned long)loi->inodes,
3023 (void *)(unsigned long)inodes, size);
3021 if (ret) 3024 if (ret)
3022 ret = -EFAULT; 3025 ret = -EFAULT;
3023 3026
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index f4190f22edf..c27bcb67f33 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -256,6 +256,11 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, void *ctx)
256 btrfs_release_path(swarn->path); 256 btrfs_release_path(swarn->path);
257 257
258 ipath = init_ipath(4096, local_root, swarn->path); 258 ipath = init_ipath(4096, local_root, swarn->path);
259 if (IS_ERR(ipath)) {
260 ret = PTR_ERR(ipath);
261 ipath = NULL;
262 goto err;
263 }
259 ret = paths_from_inode(inum, ipath); 264 ret = paths_from_inode(inum, ipath);
260 265
261 if (ret < 0) 266 if (ret < 0)
@@ -272,7 +277,7 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, void *ctx)
272 swarn->logical, swarn->dev->name, 277 swarn->logical, swarn->dev->name,
273 (unsigned long long)swarn->sector, root, inum, offset, 278 (unsigned long long)swarn->sector, root, inum, offset,
274 min(isize - offset, (u64)PAGE_SIZE), nlink, 279 min(isize - offset, (u64)PAGE_SIZE), nlink,
275 (char *)ipath->fspath->val[i]); 280 (char *)(unsigned long)ipath->fspath->val[i]);
276 281
277 free_ipath(ipath); 282 free_ipath(ipath);
278 return 0; 283 return 0;
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 8bd9d6d0e07..e28ad4baf48 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -825,13 +825,9 @@ static char *setup_root_args(char *args)
825static struct dentry *mount_subvol(const char *subvol_name, int flags, 825static struct dentry *mount_subvol(const char *subvol_name, int flags,
826 const char *device_name, char *data) 826 const char *device_name, char *data)
827{ 827{
828 struct super_block *s;
829 struct dentry *root; 828 struct dentry *root;
830 struct vfsmount *mnt; 829 struct vfsmount *mnt;
831 struct mnt_namespace *ns_private;
832 char *newargs; 830 char *newargs;
833 struct path path;
834 int error;
835 831
836 newargs = setup_root_args(data); 832 newargs = setup_root_args(data);
837 if (!newargs) 833 if (!newargs)
@@ -842,39 +838,17 @@ static struct dentry *mount_subvol(const char *subvol_name, int flags,
842 if (IS_ERR(mnt)) 838 if (IS_ERR(mnt))
843 return ERR_CAST(mnt); 839 return ERR_CAST(mnt);
844 840
845 ns_private = create_mnt_ns(mnt); 841 root = mount_subtree(mnt, subvol_name);
846 if (IS_ERR(ns_private)) {
847 mntput(mnt);
848 return ERR_CAST(ns_private);
849 }
850 842
851 /* 843 if (!IS_ERR(root) && !is_subvolume_inode(root->d_inode)) {
852 * This will trigger the automount of the subvol so we can just 844 struct super_block *s = root->d_sb;
853 * drop the mnt we have here and return the dentry that we 845 dput(root);
854 * found. 846 root = ERR_PTR(-EINVAL);
855 */ 847 deactivate_locked_super(s);
856 error = vfs_path_lookup(mnt->mnt_root, mnt, subvol_name,
857 LOOKUP_FOLLOW, &path);
858 put_mnt_ns(ns_private);
859 if (error)
860 return ERR_PTR(error);
861
862 if (!is_subvolume_inode(path.dentry->d_inode)) {
863 path_put(&path);
864 mntput(mnt);
865 error = -EINVAL;
866 printk(KERN_ERR "btrfs: '%s' is not a valid subvolume\n", 848 printk(KERN_ERR "btrfs: '%s' is not a valid subvolume\n",
867 subvol_name); 849 subvol_name);
868 return ERR_PTR(-EINVAL);
869 } 850 }
870 851
871 /* Get a ref to the sb and the dentry we found and return it */
872 s = path.mnt->mnt_sb;
873 atomic_inc(&s->s_active);
874 root = dget(path.dentry);
875 path_put(&path);
876 down_write(&s->s_umount);
877
878 return root; 852 return root;
879} 853}
880 854
@@ -1083,7 +1057,7 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
1083 int i = 0, nr_devices; 1057 int i = 0, nr_devices;
1084 int ret; 1058 int ret;
1085 1059
1086 nr_devices = fs_info->fs_devices->rw_devices; 1060 nr_devices = fs_info->fs_devices->open_devices;
1087 BUG_ON(!nr_devices); 1061 BUG_ON(!nr_devices);
1088 1062
1089 devices_info = kmalloc(sizeof(*devices_info) * nr_devices, 1063 devices_info = kmalloc(sizeof(*devices_info) * nr_devices,
@@ -1105,8 +1079,8 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
1105 else 1079 else
1106 min_stripe_size = BTRFS_STRIPE_LEN; 1080 min_stripe_size = BTRFS_STRIPE_LEN;
1107 1081
1108 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) { 1082 list_for_each_entry(device, &fs_devices->devices, dev_list) {
1109 if (!device->in_fs_metadata) 1083 if (!device->in_fs_metadata || !device->bdev)
1110 continue; 1084 continue;
1111 1085
1112 avail_space = device->total_bytes - device->bytes_used; 1086 avail_space = device->total_bytes - device->bytes_used;
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 6a0574e923b..81376d94cd3 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -785,6 +785,10 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
785 785
786 btrfs_save_ino_cache(root, trans); 786 btrfs_save_ino_cache(root, trans);
787 787
788 /* see comments in should_cow_block() */
789 root->force_cow = 0;
790 smp_wmb();
791
788 if (root->commit_root != root->node) { 792 if (root->commit_root != root->node) {
789 mutex_lock(&root->fs_commit_mutex); 793 mutex_lock(&root->fs_commit_mutex);
790 switch_commit_root(root); 794 switch_commit_root(root);
@@ -947,6 +951,10 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
947 btrfs_tree_unlock(old); 951 btrfs_tree_unlock(old);
948 free_extent_buffer(old); 952 free_extent_buffer(old);
949 953
954 /* see comments in should_cow_block() */
955 root->force_cow = 1;
956 smp_wmb();
957
950 btrfs_set_root_node(new_root_item, tmp); 958 btrfs_set_root_node(new_root_item, tmp);
951 /* record when the snapshot was created in key.offset */ 959 /* record when the snapshot was created in key.offset */
952 key.offset = trans->transid; 960 key.offset = trans->transid;
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index ab5b1c49f35..78f2d4d4f37 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -100,6 +100,12 @@ struct btrfs_device {
100 struct reada_zone *reada_curr_zone; 100 struct reada_zone *reada_curr_zone;
101 struct radix_tree_root reada_zones; 101 struct radix_tree_root reada_zones;
102 struct radix_tree_root reada_extents; 102 struct radix_tree_root reada_extents;
103
104 /* for sending down flush barriers */
105 struct bio *flush_bio;
106 struct completion flush_wait;
107 int nobarriers;
108
103}; 109};
104 110
105struct btrfs_fs_devices { 111struct btrfs_fs_devices {
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 2abd0dfad7f..bca3948e9db 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -1143,7 +1143,7 @@ static void ceph_d_prune(struct dentry *dentry)
1143{ 1143{
1144 struct ceph_dentry_info *di; 1144 struct ceph_dentry_info *di;
1145 1145
1146 dout("d_release %p\n", dentry); 1146 dout("ceph_d_prune %p\n", dentry);
1147 1147
1148 /* do we have a valid parent? */ 1148 /* do we have a valid parent? */
1149 if (!dentry->d_parent || IS_ROOT(dentry)) 1149 if (!dentry->d_parent || IS_ROOT(dentry))
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index e392bfce84a..116f36502f1 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -1328,12 +1328,13 @@ int ceph_inode_set_size(struct inode *inode, loff_t size)
1328 */ 1328 */
1329void ceph_queue_writeback(struct inode *inode) 1329void ceph_queue_writeback(struct inode *inode)
1330{ 1330{
1331 ihold(inode);
1331 if (queue_work(ceph_inode_to_client(inode)->wb_wq, 1332 if (queue_work(ceph_inode_to_client(inode)->wb_wq,
1332 &ceph_inode(inode)->i_wb_work)) { 1333 &ceph_inode(inode)->i_wb_work)) {
1333 dout("ceph_queue_writeback %p\n", inode); 1334 dout("ceph_queue_writeback %p\n", inode);
1334 ihold(inode);
1335 } else { 1335 } else {
1336 dout("ceph_queue_writeback %p failed\n", inode); 1336 dout("ceph_queue_writeback %p failed\n", inode);
1337 iput(inode);
1337 } 1338 }
1338} 1339}
1339 1340
@@ -1353,12 +1354,13 @@ static void ceph_writeback_work(struct work_struct *work)
1353 */ 1354 */
1354void ceph_queue_invalidate(struct inode *inode) 1355void ceph_queue_invalidate(struct inode *inode)
1355{ 1356{
1357 ihold(inode);
1356 if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq, 1358 if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq,
1357 &ceph_inode(inode)->i_pg_inv_work)) { 1359 &ceph_inode(inode)->i_pg_inv_work)) {
1358 dout("ceph_queue_invalidate %p\n", inode); 1360 dout("ceph_queue_invalidate %p\n", inode);
1359 ihold(inode);
1360 } else { 1361 } else {
1361 dout("ceph_queue_invalidate %p failed\n", inode); 1362 dout("ceph_queue_invalidate %p failed\n", inode);
1363 iput(inode);
1362 } 1364 }
1363} 1365}
1364 1366
@@ -1434,13 +1436,14 @@ void ceph_queue_vmtruncate(struct inode *inode)
1434{ 1436{
1435 struct ceph_inode_info *ci = ceph_inode(inode); 1437 struct ceph_inode_info *ci = ceph_inode(inode);
1436 1438
1439 ihold(inode);
1437 if (queue_work(ceph_sb_to_client(inode->i_sb)->trunc_wq, 1440 if (queue_work(ceph_sb_to_client(inode->i_sb)->trunc_wq,
1438 &ci->i_vmtruncate_work)) { 1441 &ci->i_vmtruncate_work)) {
1439 dout("ceph_queue_vmtruncate %p\n", inode); 1442 dout("ceph_queue_vmtruncate %p\n", inode);
1440 ihold(inode);
1441 } else { 1443 } else {
1442 dout("ceph_queue_vmtruncate %p failed, pending=%d\n", 1444 dout("ceph_queue_vmtruncate %p failed, pending=%d\n",
1443 inode, ci->i_truncate_pending); 1445 inode, ci->i_truncate_pending);
1446 iput(inode);
1444 } 1447 }
1445} 1448}
1446 1449
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index a90846fac75..8dc73a594a9 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -638,10 +638,12 @@ static struct dentry *open_root_dentry(struct ceph_fs_client *fsc,
638 if (err == 0) { 638 if (err == 0) {
639 dout("open_root_inode success\n"); 639 dout("open_root_inode success\n");
640 if (ceph_ino(req->r_target_inode) == CEPH_INO_ROOT && 640 if (ceph_ino(req->r_target_inode) == CEPH_INO_ROOT &&
641 fsc->sb->s_root == NULL) 641 fsc->sb->s_root == NULL) {
642 root = d_alloc_root(req->r_target_inode); 642 root = d_alloc_root(req->r_target_inode);
643 else 643 ceph_init_dentry(root);
644 } else {
644 root = d_obtain_alias(req->r_target_inode); 645 root = d_obtain_alias(req->r_target_inode);
646 }
645 req->r_target_inode = NULL; 647 req->r_target_inode = NULL;
646 dout("open_root_inode success, root dentry is %p\n", root); 648 dout("open_root_inode success, root dentry is %p\n", root);
647 } else { 649 } else {
diff --git a/fs/dcache.c b/fs/dcache.c
index a901c6901bc..10ba92def3f 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -36,6 +36,7 @@
36#include <linux/bit_spinlock.h> 36#include <linux/bit_spinlock.h>
37#include <linux/rculist_bl.h> 37#include <linux/rculist_bl.h>
38#include <linux/prefetch.h> 38#include <linux/prefetch.h>
39#include <linux/ratelimit.h>
39#include "internal.h" 40#include "internal.h"
40 41
41/* 42/*
@@ -2383,8 +2384,16 @@ struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode)
2383 actual = __d_unalias(inode, dentry, alias); 2384 actual = __d_unalias(inode, dentry, alias);
2384 } 2385 }
2385 write_sequnlock(&rename_lock); 2386 write_sequnlock(&rename_lock);
2386 if (IS_ERR(actual)) 2387 if (IS_ERR(actual)) {
2388 if (PTR_ERR(actual) == -ELOOP)
2389 pr_warn_ratelimited(
2390 "VFS: Lookup of '%s' in %s %s"
2391 " would have caused loop\n",
2392 dentry->d_name.name,
2393 inode->i_sb->s_type->name,
2394 inode->i_sb->s_id);
2387 dput(alias); 2395 dput(alias);
2396 }
2388 goto out_nolock; 2397 goto out_nolock;
2389 } 2398 }
2390 } 2399 }
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index 58609bde3b9..2a834255c75 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -967,7 +967,7 @@ static void ecryptfs_set_default_crypt_stat_vals(
967 967
968/** 968/**
969 * ecryptfs_new_file_context 969 * ecryptfs_new_file_context
970 * @ecryptfs_dentry: The eCryptfs dentry 970 * @ecryptfs_inode: The eCryptfs inode
971 * 971 *
972 * If the crypto context for the file has not yet been established, 972 * If the crypto context for the file has not yet been established,
973 * this is where we do that. Establishing a new crypto context 973 * this is where we do that. Establishing a new crypto context
@@ -984,13 +984,13 @@ static void ecryptfs_set_default_crypt_stat_vals(
984 * 984 *
985 * Returns zero on success; non-zero otherwise 985 * Returns zero on success; non-zero otherwise
986 */ 986 */
987int ecryptfs_new_file_context(struct dentry *ecryptfs_dentry) 987int ecryptfs_new_file_context(struct inode *ecryptfs_inode)
988{ 988{
989 struct ecryptfs_crypt_stat *crypt_stat = 989 struct ecryptfs_crypt_stat *crypt_stat =
990 &ecryptfs_inode_to_private(ecryptfs_dentry->d_inode)->crypt_stat; 990 &ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat;
991 struct ecryptfs_mount_crypt_stat *mount_crypt_stat = 991 struct ecryptfs_mount_crypt_stat *mount_crypt_stat =
992 &ecryptfs_superblock_to_private( 992 &ecryptfs_superblock_to_private(
993 ecryptfs_dentry->d_sb)->mount_crypt_stat; 993 ecryptfs_inode->i_sb)->mount_crypt_stat;
994 int cipher_name_len; 994 int cipher_name_len;
995 int rc = 0; 995 int rc = 0;
996 996
@@ -1299,12 +1299,12 @@ static int ecryptfs_write_headers_virt(char *page_virt, size_t max,
1299} 1299}
1300 1300
1301static int 1301static int
1302ecryptfs_write_metadata_to_contents(struct dentry *ecryptfs_dentry, 1302ecryptfs_write_metadata_to_contents(struct inode *ecryptfs_inode,
1303 char *virt, size_t virt_len) 1303 char *virt, size_t virt_len)
1304{ 1304{
1305 int rc; 1305 int rc;
1306 1306
1307 rc = ecryptfs_write_lower(ecryptfs_dentry->d_inode, virt, 1307 rc = ecryptfs_write_lower(ecryptfs_inode, virt,
1308 0, virt_len); 1308 0, virt_len);
1309 if (rc < 0) 1309 if (rc < 0)
1310 printk(KERN_ERR "%s: Error attempting to write header " 1310 printk(KERN_ERR "%s: Error attempting to write header "
@@ -1338,7 +1338,8 @@ static unsigned long ecryptfs_get_zeroed_pages(gfp_t gfp_mask,
1338 1338
1339/** 1339/**
1340 * ecryptfs_write_metadata 1340 * ecryptfs_write_metadata
1341 * @ecryptfs_dentry: The eCryptfs dentry 1341 * @ecryptfs_dentry: The eCryptfs dentry, which should be negative
1342 * @ecryptfs_inode: The newly created eCryptfs inode
1342 * 1343 *
1343 * Write the file headers out. This will likely involve a userspace 1344 * Write the file headers out. This will likely involve a userspace
1344 * callout, in which the session key is encrypted with one or more 1345 * callout, in which the session key is encrypted with one or more
@@ -1348,10 +1349,11 @@ static unsigned long ecryptfs_get_zeroed_pages(gfp_t gfp_mask,
1348 * 1349 *
1349 * Returns zero on success; non-zero on error 1350 * Returns zero on success; non-zero on error
1350 */ 1351 */
1351int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry) 1352int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry,
1353 struct inode *ecryptfs_inode)
1352{ 1354{
1353 struct ecryptfs_crypt_stat *crypt_stat = 1355 struct ecryptfs_crypt_stat *crypt_stat =
1354 &ecryptfs_inode_to_private(ecryptfs_dentry->d_inode)->crypt_stat; 1356 &ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat;
1355 unsigned int order; 1357 unsigned int order;
1356 char *virt; 1358 char *virt;
1357 size_t virt_len; 1359 size_t virt_len;
@@ -1391,7 +1393,7 @@ int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry)
1391 rc = ecryptfs_write_metadata_to_xattr(ecryptfs_dentry, virt, 1393 rc = ecryptfs_write_metadata_to_xattr(ecryptfs_dentry, virt,
1392 size); 1394 size);
1393 else 1395 else
1394 rc = ecryptfs_write_metadata_to_contents(ecryptfs_dentry, virt, 1396 rc = ecryptfs_write_metadata_to_contents(ecryptfs_inode, virt,
1395 virt_len); 1397 virt_len);
1396 if (rc) { 1398 if (rc) {
1397 printk(KERN_ERR "%s: Error writing metadata out to lower file; " 1399 printk(KERN_ERR "%s: Error writing metadata out to lower file; "
@@ -1943,7 +1945,7 @@ static unsigned char *portable_filename_chars = ("-.0123456789ABCD"
1943 1945
1944/* We could either offset on every reverse map or just pad some 0x00's 1946/* We could either offset on every reverse map or just pad some 0x00's
1945 * at the front here */ 1947 * at the front here */
1946static const unsigned char filename_rev_map[] = { 1948static const unsigned char filename_rev_map[256] = {
1947 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 7 */ 1949 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 7 */
1948 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 15 */ 1950 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 15 */
1949 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 23 */ 1951 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 23 */
@@ -1959,7 +1961,7 @@ static const unsigned char filename_rev_map[] = {
1959 0x00, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, /* 103 */ 1961 0x00, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, /* 103 */
1960 0x2D, 0x2E, 0x2F, 0x30, 0x31, 0x32, 0x33, 0x34, /* 111 */ 1962 0x2D, 0x2E, 0x2F, 0x30, 0x31, 0x32, 0x33, 0x34, /* 111 */
1961 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, /* 119 */ 1963 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, /* 119 */
1962 0x3D, 0x3E, 0x3F 1964 0x3D, 0x3E, 0x3F /* 123 - 255 initialized to 0x00 */
1963}; 1965};
1964 1966
1965/** 1967/**
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index 54481a3b2c7..a9f29b12fbf 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -584,9 +584,10 @@ int ecryptfs_init_crypt_ctx(struct ecryptfs_crypt_stat *crypt_stat);
584int ecryptfs_write_inode_size_to_metadata(struct inode *ecryptfs_inode); 584int ecryptfs_write_inode_size_to_metadata(struct inode *ecryptfs_inode);
585int ecryptfs_encrypt_page(struct page *page); 585int ecryptfs_encrypt_page(struct page *page);
586int ecryptfs_decrypt_page(struct page *page); 586int ecryptfs_decrypt_page(struct page *page);
587int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry); 587int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry,
588 struct inode *ecryptfs_inode);
588int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry); 589int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry);
589int ecryptfs_new_file_context(struct dentry *ecryptfs_dentry); 590int ecryptfs_new_file_context(struct inode *ecryptfs_inode);
590void ecryptfs_write_crypt_stat_flags(char *page_virt, 591void ecryptfs_write_crypt_stat_flags(char *page_virt,
591 struct ecryptfs_crypt_stat *crypt_stat, 592 struct ecryptfs_crypt_stat *crypt_stat,
592 size_t *written); 593 size_t *written);
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
index c6ac98cf9ba..d3f95f941c4 100644
--- a/fs/ecryptfs/file.c
+++ b/fs/ecryptfs/file.c
@@ -139,6 +139,27 @@ out:
139 return rc; 139 return rc;
140} 140}
141 141
142static void ecryptfs_vma_close(struct vm_area_struct *vma)
143{
144 filemap_write_and_wait(vma->vm_file->f_mapping);
145}
146
147static const struct vm_operations_struct ecryptfs_file_vm_ops = {
148 .close = ecryptfs_vma_close,
149 .fault = filemap_fault,
150};
151
152static int ecryptfs_file_mmap(struct file *file, struct vm_area_struct *vma)
153{
154 int rc;
155
156 rc = generic_file_mmap(file, vma);
157 if (!rc)
158 vma->vm_ops = &ecryptfs_file_vm_ops;
159
160 return rc;
161}
162
142struct kmem_cache *ecryptfs_file_info_cache; 163struct kmem_cache *ecryptfs_file_info_cache;
143 164
144/** 165/**
@@ -349,7 +370,7 @@ const struct file_operations ecryptfs_main_fops = {
349#ifdef CONFIG_COMPAT 370#ifdef CONFIG_COMPAT
350 .compat_ioctl = ecryptfs_compat_ioctl, 371 .compat_ioctl = ecryptfs_compat_ioctl,
351#endif 372#endif
352 .mmap = generic_file_mmap, 373 .mmap = ecryptfs_file_mmap,
353 .open = ecryptfs_open, 374 .open = ecryptfs_open,
354 .flush = ecryptfs_flush, 375 .flush = ecryptfs_flush,
355 .release = ecryptfs_release, 376 .release = ecryptfs_release,
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index a36d327f152..32f90a3ae63 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -172,22 +172,23 @@ ecryptfs_create_underlying_file(struct inode *lower_dir_inode,
172 * it. It will also update the eCryptfs directory inode to mimic the 172 * it. It will also update the eCryptfs directory inode to mimic the
173 * stat of the lower directory inode. 173 * stat of the lower directory inode.
174 * 174 *
175 * Returns zero on success; non-zero on error condition 175 * Returns the new eCryptfs inode on success; an ERR_PTR on error condition
176 */ 176 */
177static int 177static struct inode *
178ecryptfs_do_create(struct inode *directory_inode, 178ecryptfs_do_create(struct inode *directory_inode,
179 struct dentry *ecryptfs_dentry, int mode) 179 struct dentry *ecryptfs_dentry, int mode)
180{ 180{
181 int rc; 181 int rc;
182 struct dentry *lower_dentry; 182 struct dentry *lower_dentry;
183 struct dentry *lower_dir_dentry; 183 struct dentry *lower_dir_dentry;
184 struct inode *inode;
184 185
185 lower_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry); 186 lower_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry);
186 lower_dir_dentry = lock_parent(lower_dentry); 187 lower_dir_dentry = lock_parent(lower_dentry);
187 if (IS_ERR(lower_dir_dentry)) { 188 if (IS_ERR(lower_dir_dentry)) {
188 ecryptfs_printk(KERN_ERR, "Error locking directory of " 189 ecryptfs_printk(KERN_ERR, "Error locking directory of "
189 "dentry\n"); 190 "dentry\n");
190 rc = PTR_ERR(lower_dir_dentry); 191 inode = ERR_CAST(lower_dir_dentry);
191 goto out; 192 goto out;
192 } 193 }
193 rc = ecryptfs_create_underlying_file(lower_dir_dentry->d_inode, 194 rc = ecryptfs_create_underlying_file(lower_dir_dentry->d_inode,
@@ -195,20 +196,19 @@ ecryptfs_do_create(struct inode *directory_inode,
195 if (rc) { 196 if (rc) {
196 printk(KERN_ERR "%s: Failure to create dentry in lower fs; " 197 printk(KERN_ERR "%s: Failure to create dentry in lower fs; "
197 "rc = [%d]\n", __func__, rc); 198 "rc = [%d]\n", __func__, rc);
199 inode = ERR_PTR(rc);
198 goto out_lock; 200 goto out_lock;
199 } 201 }
200 rc = ecryptfs_interpose(lower_dentry, ecryptfs_dentry, 202 inode = __ecryptfs_get_inode(lower_dentry->d_inode,
201 directory_inode->i_sb); 203 directory_inode->i_sb);
202 if (rc) { 204 if (IS_ERR(inode))
203 ecryptfs_printk(KERN_ERR, "Failure in ecryptfs_interpose\n");
204 goto out_lock; 205 goto out_lock;
205 }
206 fsstack_copy_attr_times(directory_inode, lower_dir_dentry->d_inode); 206 fsstack_copy_attr_times(directory_inode, lower_dir_dentry->d_inode);
207 fsstack_copy_inode_size(directory_inode, lower_dir_dentry->d_inode); 207 fsstack_copy_inode_size(directory_inode, lower_dir_dentry->d_inode);
208out_lock: 208out_lock:
209 unlock_dir(lower_dir_dentry); 209 unlock_dir(lower_dir_dentry);
210out: 210out:
211 return rc; 211 return inode;
212} 212}
213 213
214/** 214/**
@@ -219,26 +219,26 @@ out:
219 * 219 *
220 * Returns zero on success 220 * Returns zero on success
221 */ 221 */
222static int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry) 222static int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry,
223 struct inode *ecryptfs_inode)
223{ 224{
224 struct ecryptfs_crypt_stat *crypt_stat = 225 struct ecryptfs_crypt_stat *crypt_stat =
225 &ecryptfs_inode_to_private(ecryptfs_dentry->d_inode)->crypt_stat; 226 &ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat;
226 int rc = 0; 227 int rc = 0;
227 228
228 if (S_ISDIR(ecryptfs_dentry->d_inode->i_mode)) { 229 if (S_ISDIR(ecryptfs_inode->i_mode)) {
229 ecryptfs_printk(KERN_DEBUG, "This is a directory\n"); 230 ecryptfs_printk(KERN_DEBUG, "This is a directory\n");
230 crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED); 231 crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED);
231 goto out; 232 goto out;
232 } 233 }
233 ecryptfs_printk(KERN_DEBUG, "Initializing crypto context\n"); 234 ecryptfs_printk(KERN_DEBUG, "Initializing crypto context\n");
234 rc = ecryptfs_new_file_context(ecryptfs_dentry); 235 rc = ecryptfs_new_file_context(ecryptfs_inode);
235 if (rc) { 236 if (rc) {
236 ecryptfs_printk(KERN_ERR, "Error creating new file " 237 ecryptfs_printk(KERN_ERR, "Error creating new file "
237 "context; rc = [%d]\n", rc); 238 "context; rc = [%d]\n", rc);
238 goto out; 239 goto out;
239 } 240 }
240 rc = ecryptfs_get_lower_file(ecryptfs_dentry, 241 rc = ecryptfs_get_lower_file(ecryptfs_dentry, ecryptfs_inode);
241 ecryptfs_dentry->d_inode);
242 if (rc) { 242 if (rc) {
243 printk(KERN_ERR "%s: Error attempting to initialize " 243 printk(KERN_ERR "%s: Error attempting to initialize "
244 "the lower file for the dentry with name " 244 "the lower file for the dentry with name "
@@ -246,10 +246,10 @@ static int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry)
246 ecryptfs_dentry->d_name.name, rc); 246 ecryptfs_dentry->d_name.name, rc);
247 goto out; 247 goto out;
248 } 248 }
249 rc = ecryptfs_write_metadata(ecryptfs_dentry); 249 rc = ecryptfs_write_metadata(ecryptfs_dentry, ecryptfs_inode);
250 if (rc) 250 if (rc)
251 printk(KERN_ERR "Error writing headers; rc = [%d]\n", rc); 251 printk(KERN_ERR "Error writing headers; rc = [%d]\n", rc);
252 ecryptfs_put_lower_file(ecryptfs_dentry->d_inode); 252 ecryptfs_put_lower_file(ecryptfs_inode);
253out: 253out:
254 return rc; 254 return rc;
255} 255}
@@ -269,18 +269,28 @@ static int
269ecryptfs_create(struct inode *directory_inode, struct dentry *ecryptfs_dentry, 269ecryptfs_create(struct inode *directory_inode, struct dentry *ecryptfs_dentry,
270 int mode, struct nameidata *nd) 270 int mode, struct nameidata *nd)
271{ 271{
272 struct inode *ecryptfs_inode;
272 int rc; 273 int rc;
273 274
274 /* ecryptfs_do_create() calls ecryptfs_interpose() */ 275 ecryptfs_inode = ecryptfs_do_create(directory_inode, ecryptfs_dentry,
275 rc = ecryptfs_do_create(directory_inode, ecryptfs_dentry, mode); 276 mode);
276 if (unlikely(rc)) { 277 if (unlikely(IS_ERR(ecryptfs_inode))) {
277 ecryptfs_printk(KERN_WARNING, "Failed to create file in" 278 ecryptfs_printk(KERN_WARNING, "Failed to create file in"
278 "lower filesystem\n"); 279 "lower filesystem\n");
280 rc = PTR_ERR(ecryptfs_inode);
279 goto out; 281 goto out;
280 } 282 }
281 /* At this point, a file exists on "disk"; we need to make sure 283 /* At this point, a file exists on "disk"; we need to make sure
282 * that this on disk file is prepared to be an ecryptfs file */ 284 * that this on disk file is prepared to be an ecryptfs file */
283 rc = ecryptfs_initialize_file(ecryptfs_dentry); 285 rc = ecryptfs_initialize_file(ecryptfs_dentry, ecryptfs_inode);
286 if (rc) {
287 drop_nlink(ecryptfs_inode);
288 unlock_new_inode(ecryptfs_inode);
289 iput(ecryptfs_inode);
290 goto out;
291 }
292 d_instantiate(ecryptfs_dentry, ecryptfs_inode);
293 unlock_new_inode(ecryptfs_inode);
284out: 294out:
285 return rc; 295 return rc;
286} 296}
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index f6dba4505f1..12ccacda44e 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -565,7 +565,7 @@ ext4_fsblk_t ext4_count_free_clusters(struct super_block *sb)
565 brelse(bitmap_bh); 565 brelse(bitmap_bh);
566 printk(KERN_DEBUG "ext4_count_free_clusters: stored = %llu" 566 printk(KERN_DEBUG "ext4_count_free_clusters: stored = %llu"
567 ", computed = %llu, %llu\n", 567 ", computed = %llu, %llu\n",
568 EXT4_B2C(sbi, ext4_free_blocks_count(es)), 568 EXT4_B2C(EXT4_SB(sb), ext4_free_blocks_count(es)),
569 desc_count, bitmap_count); 569 desc_count, bitmap_count);
570 return bitmap_count; 570 return bitmap_count;
571#else 571#else
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 240f6e2dc7e..848f436df29 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -2270,6 +2270,7 @@ retry:
2270 ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: " 2270 ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
2271 "%ld pages, ino %lu; err %d", __func__, 2271 "%ld pages, ino %lu; err %d", __func__,
2272 wbc->nr_to_write, inode->i_ino, ret); 2272 wbc->nr_to_write, inode->i_ino, ret);
2273 blk_finish_plug(&plug);
2273 goto out_writepages; 2274 goto out_writepages;
2274 } 2275 }
2275 2276
@@ -2806,8 +2807,8 @@ out:
2806 spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); 2807 spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
2807 2808
2808 /* queue the work to convert unwritten extents to written */ 2809 /* queue the work to convert unwritten extents to written */
2809 queue_work(wq, &io_end->work);
2810 iocb->private = NULL; 2810 iocb->private = NULL;
2811 queue_work(wq, &io_end->work);
2811 2812
2812 /* XXX: probably should move into the real I/O completion handler */ 2813 /* XXX: probably should move into the real I/O completion handler */
2813 inode_dio_done(inode); 2814 inode_dio_done(inode);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 9953d80145a..3858767ec67 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1683,7 +1683,9 @@ static int parse_options(char *options, struct super_block *sb,
1683 data_opt = EXT4_MOUNT_WRITEBACK_DATA; 1683 data_opt = EXT4_MOUNT_WRITEBACK_DATA;
1684 datacheck: 1684 datacheck:
1685 if (is_remount) { 1685 if (is_remount) {
1686 if (test_opt(sb, DATA_FLAGS) != data_opt) { 1686 if (!sbi->s_journal)
1687 ext4_msg(sb, KERN_WARNING, "Remounting file system with no journal so ignoring journalled data option");
1688 else if (test_opt(sb, DATA_FLAGS) != data_opt) {
1687 ext4_msg(sb, KERN_ERR, 1689 ext4_msg(sb, KERN_ERR,
1688 "Cannot change data mode on remount"); 1690 "Cannot change data mode on remount");
1689 return 0; 1691 return 0;
@@ -3099,8 +3101,6 @@ static void ext4_destroy_lazyinit_thread(void)
3099} 3101}
3100 3102
3101static int ext4_fill_super(struct super_block *sb, void *data, int silent) 3103static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3102 __releases(kernel_lock)
3103 __acquires(kernel_lock)
3104{ 3104{
3105 char *orig_data = kstrdup(data, GFP_KERNEL); 3105 char *orig_data = kstrdup(data, GFP_KERNEL);
3106 struct buffer_head *bh; 3106 struct buffer_head *bh;
diff --git a/fs/minix/bitmap.c b/fs/minix/bitmap.c
index 3f32bcb0d9b..ef175cb8cfd 100644
--- a/fs/minix/bitmap.c
+++ b/fs/minix/bitmap.c
@@ -16,38 +16,26 @@
16#include <linux/bitops.h> 16#include <linux/bitops.h>
17#include <linux/sched.h> 17#include <linux/sched.h>
18 18
19static const int nibblemap[] = { 4,3,3,2,3,2,2,1,3,2,2,1,2,1,1,0 };
20
21static DEFINE_SPINLOCK(bitmap_lock); 19static DEFINE_SPINLOCK(bitmap_lock);
22 20
23static unsigned long count_free(struct buffer_head *map[], unsigned numblocks, __u32 numbits) 21/*
22 * bitmap consists of blocks filled with 16bit words
23 * bit set == busy, bit clear == free
24 * endianness is a mess, but for counting zero bits it really doesn't matter...
25 */
26static __u32 count_free(struct buffer_head *map[], unsigned blocksize, __u32 numbits)
24{ 27{
25 unsigned i, j, sum = 0; 28 __u32 sum = 0;
26 struct buffer_head *bh; 29 unsigned blocks = DIV_ROUND_UP(numbits, blocksize * 8);
27
28 for (i=0; i<numblocks-1; i++) {
29 if (!(bh=map[i]))
30 return(0);
31 for (j=0; j<bh->b_size; j++)
32 sum += nibblemap[bh->b_data[j] & 0xf]
33 + nibblemap[(bh->b_data[j]>>4) & 0xf];
34 }
35 30
36 if (numblocks==0 || !(bh=map[numblocks-1])) 31 while (blocks--) {
37 return(0); 32 unsigned words = blocksize / 2;
38 i = ((numbits - (numblocks-1) * bh->b_size * 8) / 16) * 2; 33 __u16 *p = (__u16 *)(*map++)->b_data;
39 for (j=0; j<i; j++) { 34 while (words--)
40 sum += nibblemap[bh->b_data[j] & 0xf] 35 sum += 16 - hweight16(*p++);
41 + nibblemap[(bh->b_data[j]>>4) & 0xf];
42 } 36 }
43 37
44 i = numbits%16; 38 return sum;
45 if (i!=0) {
46 i = *(__u16 *)(&bh->b_data[j]) | ~((1<<i) - 1);
47 sum += nibblemap[i & 0xf] + nibblemap[(i>>4) & 0xf];
48 sum += nibblemap[(i>>8) & 0xf] + nibblemap[(i>>12) & 0xf];
49 }
50 return(sum);
51} 39}
52 40
53void minix_free_block(struct inode *inode, unsigned long block) 41void minix_free_block(struct inode *inode, unsigned long block)
@@ -105,10 +93,12 @@ int minix_new_block(struct inode * inode)
105 return 0; 93 return 0;
106} 94}
107 95
108unsigned long minix_count_free_blocks(struct minix_sb_info *sbi) 96unsigned long minix_count_free_blocks(struct super_block *sb)
109{ 97{
110 return (count_free(sbi->s_zmap, sbi->s_zmap_blocks, 98 struct minix_sb_info *sbi = minix_sb(sb);
111 sbi->s_nzones - sbi->s_firstdatazone + 1) 99 u32 bits = sbi->s_nzones - (sbi->s_firstdatazone + 1);
100
101 return (count_free(sbi->s_zmap, sb->s_blocksize, bits)
112 << sbi->s_log_zone_size); 102 << sbi->s_log_zone_size);
113} 103}
114 104
@@ -273,7 +263,10 @@ struct inode *minix_new_inode(const struct inode *dir, int mode, int *error)
273 return inode; 263 return inode;
274} 264}
275 265
276unsigned long minix_count_free_inodes(struct minix_sb_info *sbi) 266unsigned long minix_count_free_inodes(struct super_block *sb)
277{ 267{
278 return count_free(sbi->s_imap, sbi->s_imap_blocks, sbi->s_ninodes + 1); 268 struct minix_sb_info *sbi = minix_sb(sb);
269 u32 bits = sbi->s_ninodes + 1;
270
271 return count_free(sbi->s_imap, sb->s_blocksize, bits);
279} 272}
diff --git a/fs/minix/inode.c b/fs/minix/inode.c
index 64cdcd662ff..1d9e33966db 100644
--- a/fs/minix/inode.c
+++ b/fs/minix/inode.c
@@ -279,6 +279,27 @@ static int minix_fill_super(struct super_block *s, void *data, int silent)
279 else if (sbi->s_mount_state & MINIX_ERROR_FS) 279 else if (sbi->s_mount_state & MINIX_ERROR_FS)
280 printk("MINIX-fs: mounting file system with errors, " 280 printk("MINIX-fs: mounting file system with errors, "
281 "running fsck is recommended\n"); 281 "running fsck is recommended\n");
282
283 /* Apparently minix can create filesystems that allocate more blocks for
284 * the bitmaps than needed. We simply ignore that, but verify it didn't
285 * create one with not enough blocks and bail out if so.
286 */
287 block = minix_blocks_needed(sbi->s_ninodes, s->s_blocksize);
288 if (sbi->s_imap_blocks < block) {
289 printk("MINIX-fs: file system does not have enough "
290 "imap blocks allocated. Refusing to mount\n");
291 goto out_iput;
292 }
293
294 block = minix_blocks_needed(
295 (sbi->s_nzones - (sbi->s_firstdatazone + 1)),
296 s->s_blocksize);
297 if (sbi->s_zmap_blocks < block) {
298 printk("MINIX-fs: file system does not have enough "
299 "zmap blocks allocated. Refusing to mount.\n");
300 goto out_iput;
301 }
302
282 return 0; 303 return 0;
283 304
284out_iput: 305out_iput:
@@ -339,10 +360,10 @@ static int minix_statfs(struct dentry *dentry, struct kstatfs *buf)
339 buf->f_type = sb->s_magic; 360 buf->f_type = sb->s_magic;
340 buf->f_bsize = sb->s_blocksize; 361 buf->f_bsize = sb->s_blocksize;
341 buf->f_blocks = (sbi->s_nzones - sbi->s_firstdatazone) << sbi->s_log_zone_size; 362 buf->f_blocks = (sbi->s_nzones - sbi->s_firstdatazone) << sbi->s_log_zone_size;
342 buf->f_bfree = minix_count_free_blocks(sbi); 363 buf->f_bfree = minix_count_free_blocks(sb);
343 buf->f_bavail = buf->f_bfree; 364 buf->f_bavail = buf->f_bfree;
344 buf->f_files = sbi->s_ninodes; 365 buf->f_files = sbi->s_ninodes;
345 buf->f_ffree = minix_count_free_inodes(sbi); 366 buf->f_ffree = minix_count_free_inodes(sb);
346 buf->f_namelen = sbi->s_namelen; 367 buf->f_namelen = sbi->s_namelen;
347 buf->f_fsid.val[0] = (u32)id; 368 buf->f_fsid.val[0] = (u32)id;
348 buf->f_fsid.val[1] = (u32)(id >> 32); 369 buf->f_fsid.val[1] = (u32)(id >> 32);
diff --git a/fs/minix/minix.h b/fs/minix/minix.h
index 341e2122879..26bbd55e82e 100644
--- a/fs/minix/minix.h
+++ b/fs/minix/minix.h
@@ -48,10 +48,10 @@ extern struct minix_inode * minix_V1_raw_inode(struct super_block *, ino_t, stru
48extern struct minix2_inode * minix_V2_raw_inode(struct super_block *, ino_t, struct buffer_head **); 48extern struct minix2_inode * minix_V2_raw_inode(struct super_block *, ino_t, struct buffer_head **);
49extern struct inode * minix_new_inode(const struct inode *, int, int *); 49extern struct inode * minix_new_inode(const struct inode *, int, int *);
50extern void minix_free_inode(struct inode * inode); 50extern void minix_free_inode(struct inode * inode);
51extern unsigned long minix_count_free_inodes(struct minix_sb_info *sbi); 51extern unsigned long minix_count_free_inodes(struct super_block *sb);
52extern int minix_new_block(struct inode * inode); 52extern int minix_new_block(struct inode * inode);
53extern void minix_free_block(struct inode *inode, unsigned long block); 53extern void minix_free_block(struct inode *inode, unsigned long block);
54extern unsigned long minix_count_free_blocks(struct minix_sb_info *sbi); 54extern unsigned long minix_count_free_blocks(struct super_block *sb);
55extern int minix_getattr(struct vfsmount *, struct dentry *, struct kstat *); 55extern int minix_getattr(struct vfsmount *, struct dentry *, struct kstat *);
56extern int minix_prepare_chunk(struct page *page, loff_t pos, unsigned len); 56extern int minix_prepare_chunk(struct page *page, loff_t pos, unsigned len);
57 57
@@ -88,6 +88,11 @@ static inline struct minix_inode_info *minix_i(struct inode *inode)
88 return list_entry(inode, struct minix_inode_info, vfs_inode); 88 return list_entry(inode, struct minix_inode_info, vfs_inode);
89} 89}
90 90
91static inline unsigned minix_blocks_needed(unsigned bits, unsigned blocksize)
92{
93 return DIV_ROUND_UP(bits, blocksize * 8);
94}
95
91#if defined(CONFIG_MINIX_FS_NATIVE_ENDIAN) && \ 96#if defined(CONFIG_MINIX_FS_NATIVE_ENDIAN) && \
92 defined(CONFIG_MINIX_FS_BIG_ENDIAN_16BIT_INDEXED) 97 defined(CONFIG_MINIX_FS_BIG_ENDIAN_16BIT_INDEXED)
93 98
@@ -125,7 +130,7 @@ static inline int minix_find_first_zero_bit(const void *vaddr, unsigned size)
125 if (!size) 130 if (!size)
126 return 0; 131 return 0;
127 132
128 size = (size >> 4) + ((size & 15) > 0); 133 size >>= 4;
129 while (*p++ == 0xffff) { 134 while (*p++ == 0xffff) {
130 if (--size == 0) 135 if (--size == 0)
131 return (p - addr) << 4; 136 return (p - addr) << 4;
diff --git a/fs/namespace.c b/fs/namespace.c
index e5e1c7d1839..6d3a1963879 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -2483,11 +2483,43 @@ struct mnt_namespace *create_mnt_ns(struct vfsmount *mnt)
2483 __mnt_make_longterm(mnt); 2483 __mnt_make_longterm(mnt);
2484 new_ns->root = mnt; 2484 new_ns->root = mnt;
2485 list_add(&new_ns->list, &new_ns->root->mnt_list); 2485 list_add(&new_ns->list, &new_ns->root->mnt_list);
2486 } else {
2487 mntput(mnt);
2486 } 2488 }
2487 return new_ns; 2489 return new_ns;
2488} 2490}
2489EXPORT_SYMBOL(create_mnt_ns); 2491EXPORT_SYMBOL(create_mnt_ns);
2490 2492
2493struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
2494{
2495 struct mnt_namespace *ns;
2496 struct super_block *s;
2497 struct path path;
2498 int err;
2499
2500 ns = create_mnt_ns(mnt);
2501 if (IS_ERR(ns))
2502 return ERR_CAST(ns);
2503
2504 err = vfs_path_lookup(mnt->mnt_root, mnt,
2505 name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path);
2506
2507 put_mnt_ns(ns);
2508
2509 if (err)
2510 return ERR_PTR(err);
2511
2512 /* trade a vfsmount reference for active sb one */
2513 s = path.mnt->mnt_sb;
2514 atomic_inc(&s->s_active);
2515 mntput(path.mnt);
2516 /* lock the sucker */
2517 down_write(&s->s_umount);
2518 /* ... and return the root of (sub)tree on it */
2519 return path.dentry;
2520}
2521EXPORT_SYMBOL(mount_subtree);
2522
2491SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name, 2523SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
2492 char __user *, type, unsigned long, flags, void __user *, data) 2524 char __user *, type, unsigned long, flags, void __user *, data)
2493{ 2525{
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index b238d95ac48..ac289909814 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1468,12 +1468,12 @@ static struct dentry *nfs_atomic_lookup(struct inode *dir, struct dentry *dentry
1468 res = NULL; 1468 res = NULL;
1469 goto out; 1469 goto out;
1470 /* This turned out not to be a regular file */ 1470 /* This turned out not to be a regular file */
1471 case -EISDIR:
1471 case -ENOTDIR: 1472 case -ENOTDIR:
1472 goto no_open; 1473 goto no_open;
1473 case -ELOOP: 1474 case -ELOOP:
1474 if (!(nd->intent.open.flags & O_NOFOLLOW)) 1475 if (!(nd->intent.open.flags & O_NOFOLLOW))
1475 goto no_open; 1476 goto no_open;
1476 /* case -EISDIR: */
1477 /* case -EINVAL: */ 1477 /* case -EINVAL: */
1478 default: 1478 default:
1479 res = ERR_CAST(inode); 1479 res = ERR_CAST(inode);
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 0a1f8312b4d..eca56d4b39c 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -40,48 +40,8 @@
40 40
41#define NFSDBG_FACILITY NFSDBG_FILE 41#define NFSDBG_FACILITY NFSDBG_FILE
42 42
43static int nfs_file_open(struct inode *, struct file *);
44static int nfs_file_release(struct inode *, struct file *);
45static loff_t nfs_file_llseek(struct file *file, loff_t offset, int origin);
46static int nfs_file_mmap(struct file *, struct vm_area_struct *);
47static ssize_t nfs_file_splice_read(struct file *filp, loff_t *ppos,
48 struct pipe_inode_info *pipe,
49 size_t count, unsigned int flags);
50static ssize_t nfs_file_read(struct kiocb *, const struct iovec *iov,
51 unsigned long nr_segs, loff_t pos);
52static ssize_t nfs_file_splice_write(struct pipe_inode_info *pipe,
53 struct file *filp, loff_t *ppos,
54 size_t count, unsigned int flags);
55static ssize_t nfs_file_write(struct kiocb *, const struct iovec *iov,
56 unsigned long nr_segs, loff_t pos);
57static int nfs_file_flush(struct file *, fl_owner_t id);
58static int nfs_file_fsync(struct file *, loff_t, loff_t, int datasync);
59static int nfs_check_flags(int flags);
60static int nfs_lock(struct file *filp, int cmd, struct file_lock *fl);
61static int nfs_flock(struct file *filp, int cmd, struct file_lock *fl);
62static int nfs_setlease(struct file *file, long arg, struct file_lock **fl);
63
64static const struct vm_operations_struct nfs_file_vm_ops; 43static const struct vm_operations_struct nfs_file_vm_ops;
65 44
66const struct file_operations nfs_file_operations = {
67 .llseek = nfs_file_llseek,
68 .read = do_sync_read,
69 .write = do_sync_write,
70 .aio_read = nfs_file_read,
71 .aio_write = nfs_file_write,
72 .mmap = nfs_file_mmap,
73 .open = nfs_file_open,
74 .flush = nfs_file_flush,
75 .release = nfs_file_release,
76 .fsync = nfs_file_fsync,
77 .lock = nfs_lock,
78 .flock = nfs_flock,
79 .splice_read = nfs_file_splice_read,
80 .splice_write = nfs_file_splice_write,
81 .check_flags = nfs_check_flags,
82 .setlease = nfs_setlease,
83};
84
85const struct inode_operations nfs_file_inode_operations = { 45const struct inode_operations nfs_file_inode_operations = {
86 .permission = nfs_permission, 46 .permission = nfs_permission,
87 .getattr = nfs_getattr, 47 .getattr = nfs_getattr,
@@ -886,3 +846,54 @@ static int nfs_setlease(struct file *file, long arg, struct file_lock **fl)
886 file->f_path.dentry->d_name.name, arg); 846 file->f_path.dentry->d_name.name, arg);
887 return -EINVAL; 847 return -EINVAL;
888} 848}
849
850const struct file_operations nfs_file_operations = {
851 .llseek = nfs_file_llseek,
852 .read = do_sync_read,
853 .write = do_sync_write,
854 .aio_read = nfs_file_read,
855 .aio_write = nfs_file_write,
856 .mmap = nfs_file_mmap,
857 .open = nfs_file_open,
858 .flush = nfs_file_flush,
859 .release = nfs_file_release,
860 .fsync = nfs_file_fsync,
861 .lock = nfs_lock,
862 .flock = nfs_flock,
863 .splice_read = nfs_file_splice_read,
864 .splice_write = nfs_file_splice_write,
865 .check_flags = nfs_check_flags,
866 .setlease = nfs_setlease,
867};
868
869#ifdef CONFIG_NFS_V4
870static int
871nfs4_file_open(struct inode *inode, struct file *filp)
872{
873 /*
874 * NFSv4 opens are handled in d_lookup and d_revalidate. If we get to
875 * this point, then something is very wrong
876 */
877 dprintk("NFS: %s called! inode=%p filp=%p\n", __func__, inode, filp);
878 return -ENOTDIR;
879}
880
881const struct file_operations nfs4_file_operations = {
882 .llseek = nfs_file_llseek,
883 .read = do_sync_read,
884 .write = do_sync_write,
885 .aio_read = nfs_file_read,
886 .aio_write = nfs_file_write,
887 .mmap = nfs_file_mmap,
888 .open = nfs4_file_open,
889 .flush = nfs_file_flush,
890 .release = nfs_file_release,
891 .fsync = nfs_file_fsync,
892 .lock = nfs_lock,
893 .flock = nfs_flock,
894 .splice_read = nfs_file_splice_read,
895 .splice_write = nfs_file_splice_write,
896 .check_flags = nfs_check_flags,
897 .setlease = nfs_setlease,
898};
899#endif /* CONFIG_NFS_V4 */
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index c07a55aec83..50a15fa8cf9 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -291,7 +291,7 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
291 */ 291 */
292 inode->i_op = NFS_SB(sb)->nfs_client->rpc_ops->file_inode_ops; 292 inode->i_op = NFS_SB(sb)->nfs_client->rpc_ops->file_inode_ops;
293 if (S_ISREG(inode->i_mode)) { 293 if (S_ISREG(inode->i_mode)) {
294 inode->i_fop = &nfs_file_operations; 294 inode->i_fop = NFS_SB(sb)->nfs_client->rpc_ops->file_ops;
295 inode->i_data.a_ops = &nfs_file_aops; 295 inode->i_data.a_ops = &nfs_file_aops;
296 inode->i_data.backing_dev_info = &NFS_SB(sb)->backing_dev_info; 296 inode->i_data.backing_dev_info = &NFS_SB(sb)->backing_dev_info;
297 } else if (S_ISDIR(inode->i_mode)) { 297 } else if (S_ISDIR(inode->i_mode)) {
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index c1a1bd8ddf1..3f4d95751d5 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -299,6 +299,8 @@ extern void nfs_read_prepare(struct rpc_task *task, void *calldata);
299extern int nfs_generic_pagein(struct nfs_pageio_descriptor *desc, 299extern int nfs_generic_pagein(struct nfs_pageio_descriptor *desc,
300 struct list_head *head); 300 struct list_head *head);
301 301
302extern void nfs_pageio_init_read_mds(struct nfs_pageio_descriptor *pgio,
303 struct inode *inode);
302extern void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio); 304extern void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio);
303extern void nfs_readdata_release(struct nfs_read_data *rdata); 305extern void nfs_readdata_release(struct nfs_read_data *rdata);
304 306
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index 85f1690ca08..d4bc9ed9174 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -853,6 +853,7 @@ const struct nfs_rpc_ops nfs_v3_clientops = {
853 .dentry_ops = &nfs_dentry_operations, 853 .dentry_ops = &nfs_dentry_operations,
854 .dir_inode_ops = &nfs3_dir_inode_operations, 854 .dir_inode_ops = &nfs3_dir_inode_operations,
855 .file_inode_ops = &nfs3_file_inode_operations, 855 .file_inode_ops = &nfs3_file_inode_operations,
856 .file_ops = &nfs_file_operations,
856 .getroot = nfs3_proc_get_root, 857 .getroot = nfs3_proc_get_root,
857 .getattr = nfs3_proc_getattr, 858 .getattr = nfs3_proc_getattr,
858 .setattr = nfs3_proc_setattr, 859 .setattr = nfs3_proc_setattr,
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index b60fddf606f..be2bbac1381 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2464,8 +2464,7 @@ static int nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, struct qst
2464 case -NFS4ERR_BADNAME: 2464 case -NFS4ERR_BADNAME:
2465 return -ENOENT; 2465 return -ENOENT;
2466 case -NFS4ERR_MOVED: 2466 case -NFS4ERR_MOVED:
2467 err = nfs4_get_referral(dir, name, fattr, fhandle); 2467 return nfs4_get_referral(dir, name, fattr, fhandle);
2468 break;
2469 case -NFS4ERR_WRONGSEC: 2468 case -NFS4ERR_WRONGSEC:
2470 nfs_fixup_secinfo_attributes(fattr, fhandle); 2469 nfs_fixup_secinfo_attributes(fattr, fhandle);
2471 } 2470 }
@@ -6253,6 +6252,7 @@ const struct nfs_rpc_ops nfs_v4_clientops = {
6253 .dentry_ops = &nfs4_dentry_operations, 6252 .dentry_ops = &nfs4_dentry_operations,
6254 .dir_inode_ops = &nfs4_dir_inode_operations, 6253 .dir_inode_ops = &nfs4_dir_inode_operations,
6255 .file_inode_ops = &nfs4_file_inode_operations, 6254 .file_inode_ops = &nfs4_file_inode_operations,
6255 .file_ops = &nfs4_file_operations,
6256 .getroot = nfs4_proc_get_root, 6256 .getroot = nfs4_proc_get_root,
6257 .getattr = nfs4_proc_getattr, 6257 .getattr = nfs4_proc_getattr,
6258 .setattr = nfs4_proc_setattr, 6258 .setattr = nfs4_proc_setattr,
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index baf73536bc0..8e672a2b2d6 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -1260,6 +1260,25 @@ pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
1260} 1260}
1261EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages); 1261EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages);
1262 1262
1263static void pnfs_ld_handle_read_error(struct nfs_read_data *data)
1264{
1265 struct nfs_pageio_descriptor pgio;
1266
1267 put_lseg(data->lseg);
1268 data->lseg = NULL;
1269 dprintk("pnfs write error = %d\n", data->pnfs_error);
1270
1271 nfs_pageio_init_read_mds(&pgio, data->inode);
1272
1273 while (!list_empty(&data->pages)) {
1274 struct nfs_page *req = nfs_list_entry(data->pages.next);
1275
1276 nfs_list_remove_request(req);
1277 nfs_pageio_add_request(&pgio, req);
1278 }
1279 nfs_pageio_complete(&pgio);
1280}
1281
1263/* 1282/*
1264 * Called by non rpc-based layout drivers 1283 * Called by non rpc-based layout drivers
1265 */ 1284 */
@@ -1268,11 +1287,8 @@ void pnfs_ld_read_done(struct nfs_read_data *data)
1268 if (likely(!data->pnfs_error)) { 1287 if (likely(!data->pnfs_error)) {
1269 __nfs4_read_done_cb(data); 1288 __nfs4_read_done_cb(data);
1270 data->mds_ops->rpc_call_done(&data->task, data); 1289 data->mds_ops->rpc_call_done(&data->task, data);
1271 } else { 1290 } else
1272 put_lseg(data->lseg); 1291 pnfs_ld_handle_read_error(data);
1273 data->lseg = NULL;
1274 dprintk("pnfs write error = %d\n", data->pnfs_error);
1275 }
1276 data->mds_ops->rpc_release(data); 1292 data->mds_ops->rpc_release(data);
1277} 1293}
1278EXPORT_SYMBOL_GPL(pnfs_ld_read_done); 1294EXPORT_SYMBOL_GPL(pnfs_ld_read_done);
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index ac40b8535d7..f48125da198 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -710,6 +710,7 @@ const struct nfs_rpc_ops nfs_v2_clientops = {
710 .dentry_ops = &nfs_dentry_operations, 710 .dentry_ops = &nfs_dentry_operations,
711 .dir_inode_ops = &nfs_dir_inode_operations, 711 .dir_inode_ops = &nfs_dir_inode_operations,
712 .file_inode_ops = &nfs_file_inode_operations, 712 .file_inode_ops = &nfs_file_inode_operations,
713 .file_ops = &nfs_file_operations,
713 .getroot = nfs_proc_get_root, 714 .getroot = nfs_proc_get_root,
714 .getattr = nfs_proc_getattr, 715 .getattr = nfs_proc_getattr,
715 .setattr = nfs_proc_setattr, 716 .setattr = nfs_proc_setattr,
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 8b48ec63f72..cfa175c223d 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -109,7 +109,7 @@ static void nfs_readpage_truncate_uninitialised_page(struct nfs_read_data *data)
109 } 109 }
110} 110}
111 111
112static void nfs_pageio_init_read_mds(struct nfs_pageio_descriptor *pgio, 112void nfs_pageio_init_read_mds(struct nfs_pageio_descriptor *pgio,
113 struct inode *inode) 113 struct inode *inode)
114{ 114{
115 nfs_pageio_init(pgio, inode, &nfs_pageio_read_ops, 115 nfs_pageio_init(pgio, inode, &nfs_pageio_read_ops,
@@ -534,23 +534,13 @@ static void nfs_readpage_result_full(struct rpc_task *task, void *calldata)
534static void nfs_readpage_release_full(void *calldata) 534static void nfs_readpage_release_full(void *calldata)
535{ 535{
536 struct nfs_read_data *data = calldata; 536 struct nfs_read_data *data = calldata;
537 struct nfs_pageio_descriptor pgio;
538 537
539 if (data->pnfs_error) {
540 nfs_pageio_init_read_mds(&pgio, data->inode);
541 pgio.pg_recoalesce = 1;
542 }
543 while (!list_empty(&data->pages)) { 538 while (!list_empty(&data->pages)) {
544 struct nfs_page *req = nfs_list_entry(data->pages.next); 539 struct nfs_page *req = nfs_list_entry(data->pages.next);
545 540
546 nfs_list_remove_request(req); 541 nfs_list_remove_request(req);
547 if (!data->pnfs_error) 542 nfs_readpage_release(req);
548 nfs_readpage_release(req);
549 else
550 nfs_pageio_add_request(&pgio, req);
551 } 543 }
552 if (data->pnfs_error)
553 nfs_pageio_complete(&pgio);
554 nfs_readdata_release(calldata); 544 nfs_readdata_release(calldata);
555} 545}
556 546
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 480b3b6bf71..134777406ee 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -2787,43 +2787,18 @@ static void nfs_referral_loop_unprotect(void)
2787static struct dentry *nfs_follow_remote_path(struct vfsmount *root_mnt, 2787static struct dentry *nfs_follow_remote_path(struct vfsmount *root_mnt,
2788 const char *export_path) 2788 const char *export_path)
2789{ 2789{
2790 struct mnt_namespace *ns_private;
2791 struct super_block *s;
2792 struct dentry *dentry; 2790 struct dentry *dentry;
2793 struct path path; 2791 int ret = nfs_referral_loop_protect();
2794 int ret;
2795
2796 ns_private = create_mnt_ns(root_mnt);
2797 ret = PTR_ERR(ns_private);
2798 if (IS_ERR(ns_private))
2799 goto out_mntput;
2800
2801 ret = nfs_referral_loop_protect();
2802 if (ret != 0)
2803 goto out_put_mnt_ns;
2804 2792
2805 ret = vfs_path_lookup(root_mnt->mnt_root, root_mnt, 2793 if (ret) {
2806 export_path, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path); 2794 mntput(root_mnt);
2795 return ERR_PTR(ret);
2796 }
2807 2797
2798 dentry = mount_subtree(root_mnt, export_path);
2808 nfs_referral_loop_unprotect(); 2799 nfs_referral_loop_unprotect();
2809 put_mnt_ns(ns_private);
2810
2811 if (ret != 0)
2812 goto out_err;
2813
2814 s = path.mnt->mnt_sb;
2815 atomic_inc(&s->s_active);
2816 dentry = dget(path.dentry);
2817 2800
2818 path_put(&path);
2819 down_write(&s->s_umount);
2820 return dentry; 2801 return dentry;
2821out_put_mnt_ns:
2822 put_mnt_ns(ns_private);
2823out_mntput:
2824 mntput(root_mnt);
2825out_err:
2826 return ERR_PTR(ret);
2827} 2802}
2828 2803
2829static struct dentry *nfs4_try_mount(int flags, const char *dev_name, 2804static struct dentry *nfs4_try_mount(int flags, const char *dev_name,
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index ed553c60de8..3165aebb43c 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -5699,7 +5699,7 @@ int ocfs2_remove_btree_range(struct inode *inode,
5699 OCFS2_JOURNAL_ACCESS_WRITE); 5699 OCFS2_JOURNAL_ACCESS_WRITE);
5700 if (ret) { 5700 if (ret) {
5701 mlog_errno(ret); 5701 mlog_errno(ret);
5702 goto out; 5702 goto out_commit;
5703 } 5703 }
5704 5704
5705 dquot_free_space_nodirty(inode, 5705 dquot_free_space_nodirty(inode,
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index c1efe939c77..78b68af3b0e 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -290,7 +290,15 @@ static int ocfs2_readpage(struct file *file, struct page *page)
290 } 290 }
291 291
292 if (down_read_trylock(&oi->ip_alloc_sem) == 0) { 292 if (down_read_trylock(&oi->ip_alloc_sem) == 0) {
293 /*
294 * Unlock the page and cycle ip_alloc_sem so that we don't
295 * busyloop waiting for ip_alloc_sem to unlock
296 */
293 ret = AOP_TRUNCATED_PAGE; 297 ret = AOP_TRUNCATED_PAGE;
298 unlock_page(page);
299 unlock = 0;
300 down_read(&oi->ip_alloc_sem);
301 up_read(&oi->ip_alloc_sem);
294 goto out_inode_unlock; 302 goto out_inode_unlock;
295 } 303 }
296 304
@@ -563,6 +571,7 @@ static void ocfs2_dio_end_io(struct kiocb *iocb,
563{ 571{
564 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode; 572 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
565 int level; 573 int level;
574 wait_queue_head_t *wq = ocfs2_ioend_wq(inode);
566 575
567 /* this io's submitter should not have unlocked this before we could */ 576 /* this io's submitter should not have unlocked this before we could */
568 BUG_ON(!ocfs2_iocb_is_rw_locked(iocb)); 577 BUG_ON(!ocfs2_iocb_is_rw_locked(iocb));
@@ -570,6 +579,15 @@ static void ocfs2_dio_end_io(struct kiocb *iocb,
570 if (ocfs2_iocb_is_sem_locked(iocb)) 579 if (ocfs2_iocb_is_sem_locked(iocb))
571 ocfs2_iocb_clear_sem_locked(iocb); 580 ocfs2_iocb_clear_sem_locked(iocb);
572 581
582 if (ocfs2_iocb_is_unaligned_aio(iocb)) {
583 ocfs2_iocb_clear_unaligned_aio(iocb);
584
585 if (atomic_dec_and_test(&OCFS2_I(inode)->ip_unaligned_aio) &&
586 waitqueue_active(wq)) {
587 wake_up_all(wq);
588 }
589 }
590
573 ocfs2_iocb_clear_rw_locked(iocb); 591 ocfs2_iocb_clear_rw_locked(iocb);
574 592
575 level = ocfs2_iocb_rw_locked_level(iocb); 593 level = ocfs2_iocb_rw_locked_level(iocb);
@@ -863,6 +881,12 @@ struct ocfs2_write_ctxt {
863 struct page *w_target_page; 881 struct page *w_target_page;
864 882
865 /* 883 /*
884 * w_target_locked is used for page_mkwrite path indicating no unlocking
885 * against w_target_page in ocfs2_write_end_nolock.
886 */
887 unsigned int w_target_locked:1;
888
889 /*
866 * ocfs2_write_end() uses this to know what the real range to 890 * ocfs2_write_end() uses this to know what the real range to
867 * write in the target should be. 891 * write in the target should be.
868 */ 892 */
@@ -895,6 +919,24 @@ void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages)
895 919
896static void ocfs2_free_write_ctxt(struct ocfs2_write_ctxt *wc) 920static void ocfs2_free_write_ctxt(struct ocfs2_write_ctxt *wc)
897{ 921{
922 int i;
923
924 /*
925 * w_target_locked is only set to true in the page_mkwrite() case.
926 * The intent is to allow us to lock the target page from write_begin()
927 * to write_end(). The caller must hold a ref on w_target_page.
928 */
929 if (wc->w_target_locked) {
930 BUG_ON(!wc->w_target_page);
931 for (i = 0; i < wc->w_num_pages; i++) {
932 if (wc->w_target_page == wc->w_pages[i]) {
933 wc->w_pages[i] = NULL;
934 break;
935 }
936 }
937 mark_page_accessed(wc->w_target_page);
938 page_cache_release(wc->w_target_page);
939 }
898 ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages); 940 ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages);
899 941
900 brelse(wc->w_di_bh); 942 brelse(wc->w_di_bh);
@@ -1132,20 +1174,17 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping,
1132 */ 1174 */
1133 lock_page(mmap_page); 1175 lock_page(mmap_page);
1134 1176
1177 /* Exit and let the caller retry */
1135 if (mmap_page->mapping != mapping) { 1178 if (mmap_page->mapping != mapping) {
1179 WARN_ON(mmap_page->mapping);
1136 unlock_page(mmap_page); 1180 unlock_page(mmap_page);
1137 /* 1181 ret = -EAGAIN;
1138 * Sanity check - the locking in
1139 * ocfs2_pagemkwrite() should ensure
1140 * that this code doesn't trigger.
1141 */
1142 ret = -EINVAL;
1143 mlog_errno(ret);
1144 goto out; 1182 goto out;
1145 } 1183 }
1146 1184
1147 page_cache_get(mmap_page); 1185 page_cache_get(mmap_page);
1148 wc->w_pages[i] = mmap_page; 1186 wc->w_pages[i] = mmap_page;
1187 wc->w_target_locked = true;
1149 } else { 1188 } else {
1150 wc->w_pages[i] = find_or_create_page(mapping, index, 1189 wc->w_pages[i] = find_or_create_page(mapping, index,
1151 GFP_NOFS); 1190 GFP_NOFS);
@@ -1160,6 +1199,8 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping,
1160 wc->w_target_page = wc->w_pages[i]; 1199 wc->w_target_page = wc->w_pages[i];
1161 } 1200 }
1162out: 1201out:
1202 if (ret)
1203 wc->w_target_locked = false;
1163 return ret; 1204 return ret;
1164} 1205}
1165 1206
@@ -1817,11 +1858,23 @@ try_again:
1817 */ 1858 */
1818 ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos, len, 1859 ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos, len,
1819 cluster_of_pages, mmap_page); 1860 cluster_of_pages, mmap_page);
1820 if (ret) { 1861 if (ret && ret != -EAGAIN) {
1821 mlog_errno(ret); 1862 mlog_errno(ret);
1822 goto out_quota; 1863 goto out_quota;
1823 } 1864 }
1824 1865
1866 /*
1867 * ocfs2_grab_pages_for_write() returns -EAGAIN if it could not lock
1868 * the target page. In this case, we exit with no error and no target
1869 * page. This will trigger the caller, page_mkwrite(), to re-try
1870 * the operation.
1871 */
1872 if (ret == -EAGAIN) {
1873 BUG_ON(wc->w_target_page);
1874 ret = 0;
1875 goto out_quota;
1876 }
1877
1825 ret = ocfs2_write_cluster_by_desc(mapping, data_ac, meta_ac, wc, pos, 1878 ret = ocfs2_write_cluster_by_desc(mapping, data_ac, meta_ac, wc, pos,
1826 len); 1879 len);
1827 if (ret) { 1880 if (ret) {
diff --git a/fs/ocfs2/aops.h b/fs/ocfs2/aops.h
index 75cf3ad987a..ffb2da370a9 100644
--- a/fs/ocfs2/aops.h
+++ b/fs/ocfs2/aops.h
@@ -78,6 +78,7 @@ enum ocfs2_iocb_lock_bits {
78 OCFS2_IOCB_RW_LOCK = 0, 78 OCFS2_IOCB_RW_LOCK = 0,
79 OCFS2_IOCB_RW_LOCK_LEVEL, 79 OCFS2_IOCB_RW_LOCK_LEVEL,
80 OCFS2_IOCB_SEM, 80 OCFS2_IOCB_SEM,
81 OCFS2_IOCB_UNALIGNED_IO,
81 OCFS2_IOCB_NUM_LOCKS 82 OCFS2_IOCB_NUM_LOCKS
82}; 83};
83 84
@@ -91,4 +92,17 @@ enum ocfs2_iocb_lock_bits {
91 clear_bit(OCFS2_IOCB_SEM, (unsigned long *)&iocb->private) 92 clear_bit(OCFS2_IOCB_SEM, (unsigned long *)&iocb->private)
92#define ocfs2_iocb_is_sem_locked(iocb) \ 93#define ocfs2_iocb_is_sem_locked(iocb) \
93 test_bit(OCFS2_IOCB_SEM, (unsigned long *)&iocb->private) 94 test_bit(OCFS2_IOCB_SEM, (unsigned long *)&iocb->private)
95
96#define ocfs2_iocb_set_unaligned_aio(iocb) \
97 set_bit(OCFS2_IOCB_UNALIGNED_IO, (unsigned long *)&iocb->private)
98#define ocfs2_iocb_clear_unaligned_aio(iocb) \
99 clear_bit(OCFS2_IOCB_UNALIGNED_IO, (unsigned long *)&iocb->private)
100#define ocfs2_iocb_is_unaligned_aio(iocb) \
101 test_bit(OCFS2_IOCB_UNALIGNED_IO, (unsigned long *)&iocb->private)
102
103#define OCFS2_IOEND_WQ_HASH_SZ 37
104#define ocfs2_ioend_wq(v) (&ocfs2__ioend_wq[((unsigned long)(v)) %\
105 OCFS2_IOEND_WQ_HASH_SZ])
106extern wait_queue_head_t ocfs2__ioend_wq[OCFS2_IOEND_WQ_HASH_SZ];
107
94#endif /* OCFS2_FILE_H */ 108#endif /* OCFS2_FILE_H */
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 9a3e6bbff27..a4e855e3690 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -216,6 +216,7 @@ struct o2hb_region {
216 216
217 struct list_head hr_all_item; 217 struct list_head hr_all_item;
218 unsigned hr_unclean_stop:1, 218 unsigned hr_unclean_stop:1,
219 hr_aborted_start:1,
219 hr_item_pinned:1, 220 hr_item_pinned:1,
220 hr_item_dropped:1; 221 hr_item_dropped:1;
221 222
@@ -254,6 +255,10 @@ struct o2hb_region {
254 * a more complete api that doesn't lead to this sort of fragility. */ 255 * a more complete api that doesn't lead to this sort of fragility. */
255 atomic_t hr_steady_iterations; 256 atomic_t hr_steady_iterations;
256 257
258 /* terminate o2hb thread if it does not reach steady state
259 * (hr_steady_iterations == 0) within hr_unsteady_iterations */
260 atomic_t hr_unsteady_iterations;
261
257 char hr_dev_name[BDEVNAME_SIZE]; 262 char hr_dev_name[BDEVNAME_SIZE];
258 263
259 unsigned int hr_timeout_ms; 264 unsigned int hr_timeout_ms;
@@ -324,6 +329,10 @@ static void o2hb_write_timeout(struct work_struct *work)
324 329
325static void o2hb_arm_write_timeout(struct o2hb_region *reg) 330static void o2hb_arm_write_timeout(struct o2hb_region *reg)
326{ 331{
332 /* Arm writeout only after thread reaches steady state */
333 if (atomic_read(&reg->hr_steady_iterations) != 0)
334 return;
335
327 mlog(ML_HEARTBEAT, "Queue write timeout for %u ms\n", 336 mlog(ML_HEARTBEAT, "Queue write timeout for %u ms\n",
328 O2HB_MAX_WRITE_TIMEOUT_MS); 337 O2HB_MAX_WRITE_TIMEOUT_MS);
329 338
@@ -537,9 +546,14 @@ static int o2hb_verify_crc(struct o2hb_region *reg,
537 return read == computed; 546 return read == computed;
538} 547}
539 548
540/* We want to make sure that nobody is heartbeating on top of us -- 549/*
541 * this will help detect an invalid configuration. */ 550 * Compare the slot data with what we wrote in the last iteration.
542static void o2hb_check_last_timestamp(struct o2hb_region *reg) 551 * If the match fails, print an appropriate error message. This is to
552 * detect errors like... another node hearting on the same slot,
553 * flaky device that is losing writes, etc.
554 * Returns 1 if check succeeds, 0 otherwise.
555 */
556static int o2hb_check_own_slot(struct o2hb_region *reg)
543{ 557{
544 struct o2hb_disk_slot *slot; 558 struct o2hb_disk_slot *slot;
545 struct o2hb_disk_heartbeat_block *hb_block; 559 struct o2hb_disk_heartbeat_block *hb_block;
@@ -548,13 +562,13 @@ static void o2hb_check_last_timestamp(struct o2hb_region *reg)
548 slot = &reg->hr_slots[o2nm_this_node()]; 562 slot = &reg->hr_slots[o2nm_this_node()];
549 /* Don't check on our 1st timestamp */ 563 /* Don't check on our 1st timestamp */
550 if (!slot->ds_last_time) 564 if (!slot->ds_last_time)
551 return; 565 return 0;
552 566
553 hb_block = slot->ds_raw_block; 567 hb_block = slot->ds_raw_block;
554 if (le64_to_cpu(hb_block->hb_seq) == slot->ds_last_time && 568 if (le64_to_cpu(hb_block->hb_seq) == slot->ds_last_time &&
555 le64_to_cpu(hb_block->hb_generation) == slot->ds_last_generation && 569 le64_to_cpu(hb_block->hb_generation) == slot->ds_last_generation &&
556 hb_block->hb_node == slot->ds_node_num) 570 hb_block->hb_node == slot->ds_node_num)
557 return; 571 return 1;
558 572
559#define ERRSTR1 "Another node is heartbeating on device" 573#define ERRSTR1 "Another node is heartbeating on device"
560#define ERRSTR2 "Heartbeat generation mismatch on device" 574#define ERRSTR2 "Heartbeat generation mismatch on device"
@@ -574,6 +588,8 @@ static void o2hb_check_last_timestamp(struct o2hb_region *reg)
574 (unsigned long long)slot->ds_last_time, hb_block->hb_node, 588 (unsigned long long)slot->ds_last_time, hb_block->hb_node,
575 (unsigned long long)le64_to_cpu(hb_block->hb_generation), 589 (unsigned long long)le64_to_cpu(hb_block->hb_generation),
576 (unsigned long long)le64_to_cpu(hb_block->hb_seq)); 590 (unsigned long long)le64_to_cpu(hb_block->hb_seq));
591
592 return 0;
577} 593}
578 594
579static inline void o2hb_prepare_block(struct o2hb_region *reg, 595static inline void o2hb_prepare_block(struct o2hb_region *reg,
@@ -719,17 +735,24 @@ static void o2hb_shutdown_slot(struct o2hb_disk_slot *slot)
719 o2nm_node_put(node); 735 o2nm_node_put(node);
720} 736}
721 737
722static void o2hb_set_quorum_device(struct o2hb_region *reg, 738static void o2hb_set_quorum_device(struct o2hb_region *reg)
723 struct o2hb_disk_slot *slot)
724{ 739{
725 assert_spin_locked(&o2hb_live_lock);
726
727 if (!o2hb_global_heartbeat_active()) 740 if (!o2hb_global_heartbeat_active())
728 return; 741 return;
729 742
730 if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap)) 743 /* Prevent race with o2hb_heartbeat_group_drop_item() */
744 if (kthread_should_stop())
745 return;
746
747 /* Tag region as quorum only after thread reaches steady state */
748 if (atomic_read(&reg->hr_steady_iterations) != 0)
731 return; 749 return;
732 750
751 spin_lock(&o2hb_live_lock);
752
753 if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap))
754 goto unlock;
755
733 /* 756 /*
734 * A region can be added to the quorum only when it sees all 757 * A region can be added to the quorum only when it sees all
735 * live nodes heartbeat on it. In other words, the region has been 758 * live nodes heartbeat on it. In other words, the region has been
@@ -737,13 +760,10 @@ static void o2hb_set_quorum_device(struct o2hb_region *reg,
737 */ 760 */
738 if (memcmp(reg->hr_live_node_bitmap, o2hb_live_node_bitmap, 761 if (memcmp(reg->hr_live_node_bitmap, o2hb_live_node_bitmap,
739 sizeof(o2hb_live_node_bitmap))) 762 sizeof(o2hb_live_node_bitmap)))
740 return; 763 goto unlock;
741
742 if (slot->ds_changed_samples < O2HB_LIVE_THRESHOLD)
743 return;
744 764
745 printk(KERN_NOTICE "o2hb: Region %s is now a quorum device\n", 765 printk(KERN_NOTICE "o2hb: Region %s (%s) is now a quorum device\n",
746 config_item_name(&reg->hr_item)); 766 config_item_name(&reg->hr_item), reg->hr_dev_name);
747 767
748 set_bit(reg->hr_region_num, o2hb_quorum_region_bitmap); 768 set_bit(reg->hr_region_num, o2hb_quorum_region_bitmap);
749 769
@@ -754,6 +774,8 @@ static void o2hb_set_quorum_device(struct o2hb_region *reg,
754 if (o2hb_pop_count(&o2hb_quorum_region_bitmap, 774 if (o2hb_pop_count(&o2hb_quorum_region_bitmap,
755 O2NM_MAX_REGIONS) > O2HB_PIN_CUT_OFF) 775 O2NM_MAX_REGIONS) > O2HB_PIN_CUT_OFF)
756 o2hb_region_unpin(NULL); 776 o2hb_region_unpin(NULL);
777unlock:
778 spin_unlock(&o2hb_live_lock);
757} 779}
758 780
759static int o2hb_check_slot(struct o2hb_region *reg, 781static int o2hb_check_slot(struct o2hb_region *reg,
@@ -925,8 +947,6 @@ fire_callbacks:
925 slot->ds_equal_samples = 0; 947 slot->ds_equal_samples = 0;
926 } 948 }
927out: 949out:
928 o2hb_set_quorum_device(reg, slot);
929
930 spin_unlock(&o2hb_live_lock); 950 spin_unlock(&o2hb_live_lock);
931 951
932 o2hb_run_event_list(&event); 952 o2hb_run_event_list(&event);
@@ -957,7 +977,8 @@ static int o2hb_highest_node(unsigned long *nodes,
957 977
958static int o2hb_do_disk_heartbeat(struct o2hb_region *reg) 978static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
959{ 979{
960 int i, ret, highest_node, change = 0; 980 int i, ret, highest_node;
981 int membership_change = 0, own_slot_ok = 0;
961 unsigned long configured_nodes[BITS_TO_LONGS(O2NM_MAX_NODES)]; 982 unsigned long configured_nodes[BITS_TO_LONGS(O2NM_MAX_NODES)];
962 unsigned long live_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)]; 983 unsigned long live_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
963 struct o2hb_bio_wait_ctxt write_wc; 984 struct o2hb_bio_wait_ctxt write_wc;
@@ -966,7 +987,7 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
966 sizeof(configured_nodes)); 987 sizeof(configured_nodes));
967 if (ret) { 988 if (ret) {
968 mlog_errno(ret); 989 mlog_errno(ret);
969 return ret; 990 goto bail;
970 } 991 }
971 992
972 /* 993 /*
@@ -982,8 +1003,9 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
982 1003
983 highest_node = o2hb_highest_node(configured_nodes, O2NM_MAX_NODES); 1004 highest_node = o2hb_highest_node(configured_nodes, O2NM_MAX_NODES);
984 if (highest_node >= O2NM_MAX_NODES) { 1005 if (highest_node >= O2NM_MAX_NODES) {
985 mlog(ML_NOTICE, "ocfs2_heartbeat: no configured nodes found!\n"); 1006 mlog(ML_NOTICE, "o2hb: No configured nodes found!\n");
986 return -EINVAL; 1007 ret = -EINVAL;
1008 goto bail;
987 } 1009 }
988 1010
989 /* No sense in reading the slots of nodes that don't exist 1011 /* No sense in reading the slots of nodes that don't exist
@@ -993,29 +1015,27 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
993 ret = o2hb_read_slots(reg, highest_node + 1); 1015 ret = o2hb_read_slots(reg, highest_node + 1);
994 if (ret < 0) { 1016 if (ret < 0) {
995 mlog_errno(ret); 1017 mlog_errno(ret);
996 return ret; 1018 goto bail;
997 } 1019 }
998 1020
999 /* With an up to date view of the slots, we can check that no 1021 /* With an up to date view of the slots, we can check that no
1000 * other node has been improperly configured to heartbeat in 1022 * other node has been improperly configured to heartbeat in
1001 * our slot. */ 1023 * our slot. */
1002 o2hb_check_last_timestamp(reg); 1024 own_slot_ok = o2hb_check_own_slot(reg);
1003 1025
1004 /* fill in the proper info for our next heartbeat */ 1026 /* fill in the proper info for our next heartbeat */
1005 o2hb_prepare_block(reg, reg->hr_generation); 1027 o2hb_prepare_block(reg, reg->hr_generation);
1006 1028
1007 /* And fire off the write. Note that we don't wait on this I/O
1008 * until later. */
1009 ret = o2hb_issue_node_write(reg, &write_wc); 1029 ret = o2hb_issue_node_write(reg, &write_wc);
1010 if (ret < 0) { 1030 if (ret < 0) {
1011 mlog_errno(ret); 1031 mlog_errno(ret);
1012 return ret; 1032 goto bail;
1013 } 1033 }
1014 1034
1015 i = -1; 1035 i = -1;
1016 while((i = find_next_bit(configured_nodes, 1036 while((i = find_next_bit(configured_nodes,
1017 O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES) { 1037 O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES) {
1018 change |= o2hb_check_slot(reg, &reg->hr_slots[i]); 1038 membership_change |= o2hb_check_slot(reg, &reg->hr_slots[i]);
1019 } 1039 }
1020 1040
1021 /* 1041 /*
@@ -1030,18 +1050,39 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
1030 * disk */ 1050 * disk */
1031 mlog(ML_ERROR, "Write error %d on device \"%s\"\n", 1051 mlog(ML_ERROR, "Write error %d on device \"%s\"\n",
1032 write_wc.wc_error, reg->hr_dev_name); 1052 write_wc.wc_error, reg->hr_dev_name);
1033 return write_wc.wc_error; 1053 ret = write_wc.wc_error;
1054 goto bail;
1034 } 1055 }
1035 1056
1036 o2hb_arm_write_timeout(reg); 1057 /* Skip disarming the timeout if own slot has stale/bad data */
1058 if (own_slot_ok) {
1059 o2hb_set_quorum_device(reg);
1060 o2hb_arm_write_timeout(reg);
1061 }
1037 1062
1063bail:
1038 /* let the person who launched us know when things are steady */ 1064 /* let the person who launched us know when things are steady */
1039 if (!change && (atomic_read(&reg->hr_steady_iterations) != 0)) { 1065 if (atomic_read(&reg->hr_steady_iterations) != 0) {
1040 if (atomic_dec_and_test(&reg->hr_steady_iterations)) 1066 if (!ret && own_slot_ok && !membership_change) {
1067 if (atomic_dec_and_test(&reg->hr_steady_iterations))
1068 wake_up(&o2hb_steady_queue);
1069 }
1070 }
1071
1072 if (atomic_read(&reg->hr_steady_iterations) != 0) {
1073 if (atomic_dec_and_test(&reg->hr_unsteady_iterations)) {
1074 printk(KERN_NOTICE "o2hb: Unable to stabilize "
1075 "heartbeart on region %s (%s)\n",
1076 config_item_name(&reg->hr_item),
1077 reg->hr_dev_name);
1078 atomic_set(&reg->hr_steady_iterations, 0);
1079 reg->hr_aborted_start = 1;
1041 wake_up(&o2hb_steady_queue); 1080 wake_up(&o2hb_steady_queue);
1081 ret = -EIO;
1082 }
1042 } 1083 }
1043 1084
1044 return 0; 1085 return ret;
1045} 1086}
1046 1087
1047/* Subtract b from a, storing the result in a. a *must* have a larger 1088/* Subtract b from a, storing the result in a. a *must* have a larger
@@ -1095,7 +1136,8 @@ static int o2hb_thread(void *data)
1095 /* Pin node */ 1136 /* Pin node */
1096 o2nm_depend_this_node(); 1137 o2nm_depend_this_node();
1097 1138
1098 while (!kthread_should_stop() && !reg->hr_unclean_stop) { 1139 while (!kthread_should_stop() &&
1140 !reg->hr_unclean_stop && !reg->hr_aborted_start) {
1099 /* We track the time spent inside 1141 /* We track the time spent inside
1100 * o2hb_do_disk_heartbeat so that we avoid more than 1142 * o2hb_do_disk_heartbeat so that we avoid more than
1101 * hr_timeout_ms between disk writes. On busy systems 1143 * hr_timeout_ms between disk writes. On busy systems
@@ -1103,10 +1145,7 @@ static int o2hb_thread(void *data)
1103 * likely to time itself out. */ 1145 * likely to time itself out. */
1104 do_gettimeofday(&before_hb); 1146 do_gettimeofday(&before_hb);
1105 1147
1106 i = 0; 1148 ret = o2hb_do_disk_heartbeat(reg);
1107 do {
1108 ret = o2hb_do_disk_heartbeat(reg);
1109 } while (ret && ++i < 2);
1110 1149
1111 do_gettimeofday(&after_hb); 1150 do_gettimeofday(&after_hb);
1112 elapsed_msec = o2hb_elapsed_msecs(&before_hb, &after_hb); 1151 elapsed_msec = o2hb_elapsed_msecs(&before_hb, &after_hb);
@@ -1117,7 +1156,8 @@ static int o2hb_thread(void *data)
1117 after_hb.tv_sec, (unsigned long) after_hb.tv_usec, 1156 after_hb.tv_sec, (unsigned long) after_hb.tv_usec,
1118 elapsed_msec); 1157 elapsed_msec);
1119 1158
1120 if (elapsed_msec < reg->hr_timeout_ms) { 1159 if (!kthread_should_stop() &&
1160 elapsed_msec < reg->hr_timeout_ms) {
1121 /* the kthread api has blocked signals for us so no 1161 /* the kthread api has blocked signals for us so no
1122 * need to record the return value. */ 1162 * need to record the return value. */
1123 msleep_interruptible(reg->hr_timeout_ms - elapsed_msec); 1163 msleep_interruptible(reg->hr_timeout_ms - elapsed_msec);
@@ -1134,20 +1174,20 @@ static int o2hb_thread(void *data)
1134 * to timeout on this region when we could just as easily 1174 * to timeout on this region when we could just as easily
1135 * write a clear generation - thus indicating to them that 1175 * write a clear generation - thus indicating to them that
1136 * this node has left this region. 1176 * this node has left this region.
1137 * 1177 */
1138 * XXX: Should we skip this on unclean_stop? */ 1178 if (!reg->hr_unclean_stop && !reg->hr_aborted_start) {
1139 o2hb_prepare_block(reg, 0); 1179 o2hb_prepare_block(reg, 0);
1140 ret = o2hb_issue_node_write(reg, &write_wc); 1180 ret = o2hb_issue_node_write(reg, &write_wc);
1141 if (ret == 0) { 1181 if (ret == 0)
1142 o2hb_wait_on_io(reg, &write_wc); 1182 o2hb_wait_on_io(reg, &write_wc);
1143 } else { 1183 else
1144 mlog_errno(ret); 1184 mlog_errno(ret);
1145 } 1185 }
1146 1186
1147 /* Unpin node */ 1187 /* Unpin node */
1148 o2nm_undepend_this_node(); 1188 o2nm_undepend_this_node();
1149 1189
1150 mlog(ML_HEARTBEAT|ML_KTHREAD, "hb thread exiting\n"); 1190 mlog(ML_HEARTBEAT|ML_KTHREAD, "o2hb thread exiting\n");
1151 1191
1152 return 0; 1192 return 0;
1153} 1193}
@@ -1158,6 +1198,7 @@ static int o2hb_debug_open(struct inode *inode, struct file *file)
1158 struct o2hb_debug_buf *db = inode->i_private; 1198 struct o2hb_debug_buf *db = inode->i_private;
1159 struct o2hb_region *reg; 1199 struct o2hb_region *reg;
1160 unsigned long map[BITS_TO_LONGS(O2NM_MAX_NODES)]; 1200 unsigned long map[BITS_TO_LONGS(O2NM_MAX_NODES)];
1201 unsigned long lts;
1161 char *buf = NULL; 1202 char *buf = NULL;
1162 int i = -1; 1203 int i = -1;
1163 int out = 0; 1204 int out = 0;
@@ -1194,9 +1235,11 @@ static int o2hb_debug_open(struct inode *inode, struct file *file)
1194 1235
1195 case O2HB_DB_TYPE_REGION_ELAPSED_TIME: 1236 case O2HB_DB_TYPE_REGION_ELAPSED_TIME:
1196 reg = (struct o2hb_region *)db->db_data; 1237 reg = (struct o2hb_region *)db->db_data;
1197 out += snprintf(buf + out, PAGE_SIZE - out, "%u\n", 1238 lts = reg->hr_last_timeout_start;
1198 jiffies_to_msecs(jiffies - 1239 /* If 0, it has never been set before */
1199 reg->hr_last_timeout_start)); 1240 if (lts)
1241 lts = jiffies_to_msecs(jiffies - lts);
1242 out += snprintf(buf + out, PAGE_SIZE - out, "%lu\n", lts);
1200 goto done; 1243 goto done;
1201 1244
1202 case O2HB_DB_TYPE_REGION_PINNED: 1245 case O2HB_DB_TYPE_REGION_PINNED:
@@ -1426,6 +1469,8 @@ static void o2hb_region_release(struct config_item *item)
1426 struct page *page; 1469 struct page *page;
1427 struct o2hb_region *reg = to_o2hb_region(item); 1470 struct o2hb_region *reg = to_o2hb_region(item);
1428 1471
1472 mlog(ML_HEARTBEAT, "hb region release (%s)\n", reg->hr_dev_name);
1473
1429 if (reg->hr_tmp_block) 1474 if (reg->hr_tmp_block)
1430 kfree(reg->hr_tmp_block); 1475 kfree(reg->hr_tmp_block);
1431 1476
@@ -1792,7 +1837,10 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg,
1792 live_threshold <<= 1; 1837 live_threshold <<= 1;
1793 spin_unlock(&o2hb_live_lock); 1838 spin_unlock(&o2hb_live_lock);
1794 } 1839 }
1795 atomic_set(&reg->hr_steady_iterations, live_threshold + 1); 1840 ++live_threshold;
1841 atomic_set(&reg->hr_steady_iterations, live_threshold);
1842 /* unsteady_iterations is double the steady_iterations */
1843 atomic_set(&reg->hr_unsteady_iterations, (live_threshold << 1));
1796 1844
1797 hb_task = kthread_run(o2hb_thread, reg, "o2hb-%s", 1845 hb_task = kthread_run(o2hb_thread, reg, "o2hb-%s",
1798 reg->hr_item.ci_name); 1846 reg->hr_item.ci_name);
@@ -1809,14 +1857,12 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg,
1809 ret = wait_event_interruptible(o2hb_steady_queue, 1857 ret = wait_event_interruptible(o2hb_steady_queue,
1810 atomic_read(&reg->hr_steady_iterations) == 0); 1858 atomic_read(&reg->hr_steady_iterations) == 0);
1811 if (ret) { 1859 if (ret) {
1812 /* We got interrupted (hello ptrace!). Clean up */ 1860 atomic_set(&reg->hr_steady_iterations, 0);
1813 spin_lock(&o2hb_live_lock); 1861 reg->hr_aborted_start = 1;
1814 hb_task = reg->hr_task; 1862 }
1815 reg->hr_task = NULL;
1816 spin_unlock(&o2hb_live_lock);
1817 1863
1818 if (hb_task) 1864 if (reg->hr_aborted_start) {
1819 kthread_stop(hb_task); 1865 ret = -EIO;
1820 goto out; 1866 goto out;
1821 } 1867 }
1822 1868
@@ -1833,8 +1879,8 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg,
1833 ret = -EIO; 1879 ret = -EIO;
1834 1880
1835 if (hb_task && o2hb_global_heartbeat_active()) 1881 if (hb_task && o2hb_global_heartbeat_active())
1836 printk(KERN_NOTICE "o2hb: Heartbeat started on region %s\n", 1882 printk(KERN_NOTICE "o2hb: Heartbeat started on region %s (%s)\n",
1837 config_item_name(&reg->hr_item)); 1883 config_item_name(&reg->hr_item), reg->hr_dev_name);
1838 1884
1839out: 1885out:
1840 if (filp) 1886 if (filp)
@@ -2092,13 +2138,6 @@ static void o2hb_heartbeat_group_drop_item(struct config_group *group,
2092 2138
2093 /* stop the thread when the user removes the region dir */ 2139 /* stop the thread when the user removes the region dir */
2094 spin_lock(&o2hb_live_lock); 2140 spin_lock(&o2hb_live_lock);
2095 if (o2hb_global_heartbeat_active()) {
2096 clear_bit(reg->hr_region_num, o2hb_region_bitmap);
2097 clear_bit(reg->hr_region_num, o2hb_live_region_bitmap);
2098 if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap))
2099 quorum_region = 1;
2100 clear_bit(reg->hr_region_num, o2hb_quorum_region_bitmap);
2101 }
2102 hb_task = reg->hr_task; 2141 hb_task = reg->hr_task;
2103 reg->hr_task = NULL; 2142 reg->hr_task = NULL;
2104 reg->hr_item_dropped = 1; 2143 reg->hr_item_dropped = 1;
@@ -2107,19 +2146,30 @@ static void o2hb_heartbeat_group_drop_item(struct config_group *group,
2107 if (hb_task) 2146 if (hb_task)
2108 kthread_stop(hb_task); 2147 kthread_stop(hb_task);
2109 2148
2149 if (o2hb_global_heartbeat_active()) {
2150 spin_lock(&o2hb_live_lock);
2151 clear_bit(reg->hr_region_num, o2hb_region_bitmap);
2152 clear_bit(reg->hr_region_num, o2hb_live_region_bitmap);
2153 if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap))
2154 quorum_region = 1;
2155 clear_bit(reg->hr_region_num, o2hb_quorum_region_bitmap);
2156 spin_unlock(&o2hb_live_lock);
2157 printk(KERN_NOTICE "o2hb: Heartbeat %s on region %s (%s)\n",
2158 ((atomic_read(&reg->hr_steady_iterations) == 0) ?
2159 "stopped" : "start aborted"), config_item_name(item),
2160 reg->hr_dev_name);
2161 }
2162
2110 /* 2163 /*
2111 * If we're racing a dev_write(), we need to wake them. They will 2164 * If we're racing a dev_write(), we need to wake them. They will
2112 * check reg->hr_task 2165 * check reg->hr_task
2113 */ 2166 */
2114 if (atomic_read(&reg->hr_steady_iterations) != 0) { 2167 if (atomic_read(&reg->hr_steady_iterations) != 0) {
2168 reg->hr_aborted_start = 1;
2115 atomic_set(&reg->hr_steady_iterations, 0); 2169 atomic_set(&reg->hr_steady_iterations, 0);
2116 wake_up(&o2hb_steady_queue); 2170 wake_up(&o2hb_steady_queue);
2117 } 2171 }
2118 2172
2119 if (o2hb_global_heartbeat_active())
2120 printk(KERN_NOTICE "o2hb: Heartbeat stopped on region %s\n",
2121 config_item_name(&reg->hr_item));
2122
2123 config_item_put(item); 2173 config_item_put(item);
2124 2174
2125 if (!o2hb_global_heartbeat_active() || !quorum_region) 2175 if (!o2hb_global_heartbeat_active() || !quorum_region)
diff --git a/fs/ocfs2/cluster/netdebug.c b/fs/ocfs2/cluster/netdebug.c
index 3a5835904b3..dc45deb19e6 100644
--- a/fs/ocfs2/cluster/netdebug.c
+++ b/fs/ocfs2/cluster/netdebug.c
@@ -47,6 +47,7 @@
47#define SC_DEBUG_NAME "sock_containers" 47#define SC_DEBUG_NAME "sock_containers"
48#define NST_DEBUG_NAME "send_tracking" 48#define NST_DEBUG_NAME "send_tracking"
49#define STATS_DEBUG_NAME "stats" 49#define STATS_DEBUG_NAME "stats"
50#define NODES_DEBUG_NAME "connected_nodes"
50 51
51#define SHOW_SOCK_CONTAINERS 0 52#define SHOW_SOCK_CONTAINERS 0
52#define SHOW_SOCK_STATS 1 53#define SHOW_SOCK_STATS 1
@@ -55,6 +56,7 @@ static struct dentry *o2net_dentry;
55static struct dentry *sc_dentry; 56static struct dentry *sc_dentry;
56static struct dentry *nst_dentry; 57static struct dentry *nst_dentry;
57static struct dentry *stats_dentry; 58static struct dentry *stats_dentry;
59static struct dentry *nodes_dentry;
58 60
59static DEFINE_SPINLOCK(o2net_debug_lock); 61static DEFINE_SPINLOCK(o2net_debug_lock);
60 62
@@ -491,53 +493,87 @@ static const struct file_operations sc_seq_fops = {
491 .release = sc_fop_release, 493 .release = sc_fop_release,
492}; 494};
493 495
494int o2net_debugfs_init(void) 496static int o2net_fill_bitmap(char *buf, int len)
495{ 497{
496 o2net_dentry = debugfs_create_dir(O2NET_DEBUG_DIR, NULL); 498 unsigned long map[BITS_TO_LONGS(O2NM_MAX_NODES)];
497 if (!o2net_dentry) { 499 int i = -1, out = 0;
498 mlog_errno(-ENOMEM);
499 goto bail;
500 }
501 500
502 nst_dentry = debugfs_create_file(NST_DEBUG_NAME, S_IFREG|S_IRUSR, 501 o2net_fill_node_map(map, sizeof(map));
503 o2net_dentry, NULL,
504 &nst_seq_fops);
505 if (!nst_dentry) {
506 mlog_errno(-ENOMEM);
507 goto bail;
508 }
509 502
510 sc_dentry = debugfs_create_file(SC_DEBUG_NAME, S_IFREG|S_IRUSR, 503 while ((i = find_next_bit(map, O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES)
511 o2net_dentry, NULL, 504 out += snprintf(buf + out, PAGE_SIZE - out, "%d ", i);
512 &sc_seq_fops); 505 out += snprintf(buf + out, PAGE_SIZE - out, "\n");
513 if (!sc_dentry) {
514 mlog_errno(-ENOMEM);
515 goto bail;
516 }
517 506
518 stats_dentry = debugfs_create_file(STATS_DEBUG_NAME, S_IFREG|S_IRUSR, 507 return out;
519 o2net_dentry, NULL, 508}
520 &stats_seq_fops); 509
521 if (!stats_dentry) { 510static int nodes_fop_open(struct inode *inode, struct file *file)
522 mlog_errno(-ENOMEM); 511{
523 goto bail; 512 char *buf;
524 } 513
514 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
515 if (!buf)
516 return -ENOMEM;
517
518 i_size_write(inode, o2net_fill_bitmap(buf, PAGE_SIZE));
519
520 file->private_data = buf;
525 521
526 return 0; 522 return 0;
527bail:
528 debugfs_remove(stats_dentry);
529 debugfs_remove(sc_dentry);
530 debugfs_remove(nst_dentry);
531 debugfs_remove(o2net_dentry);
532 return -ENOMEM;
533} 523}
534 524
525static int o2net_debug_release(struct inode *inode, struct file *file)
526{
527 kfree(file->private_data);
528 return 0;
529}
530
531static ssize_t o2net_debug_read(struct file *file, char __user *buf,
532 size_t nbytes, loff_t *ppos)
533{
534 return simple_read_from_buffer(buf, nbytes, ppos, file->private_data,
535 i_size_read(file->f_mapping->host));
536}
537
538static const struct file_operations nodes_fops = {
539 .open = nodes_fop_open,
540 .release = o2net_debug_release,
541 .read = o2net_debug_read,
542 .llseek = generic_file_llseek,
543};
544
535void o2net_debugfs_exit(void) 545void o2net_debugfs_exit(void)
536{ 546{
547 debugfs_remove(nodes_dentry);
537 debugfs_remove(stats_dentry); 548 debugfs_remove(stats_dentry);
538 debugfs_remove(sc_dentry); 549 debugfs_remove(sc_dentry);
539 debugfs_remove(nst_dentry); 550 debugfs_remove(nst_dentry);
540 debugfs_remove(o2net_dentry); 551 debugfs_remove(o2net_dentry);
541} 552}
542 553
554int o2net_debugfs_init(void)
555{
556 mode_t mode = S_IFREG|S_IRUSR;
557
558 o2net_dentry = debugfs_create_dir(O2NET_DEBUG_DIR, NULL);
559 if (o2net_dentry)
560 nst_dentry = debugfs_create_file(NST_DEBUG_NAME, mode,
561 o2net_dentry, NULL, &nst_seq_fops);
562 if (nst_dentry)
563 sc_dentry = debugfs_create_file(SC_DEBUG_NAME, mode,
564 o2net_dentry, NULL, &sc_seq_fops);
565 if (sc_dentry)
566 stats_dentry = debugfs_create_file(STATS_DEBUG_NAME, mode,
567 o2net_dentry, NULL, &stats_seq_fops);
568 if (stats_dentry)
569 nodes_dentry = debugfs_create_file(NODES_DEBUG_NAME, mode,
570 o2net_dentry, NULL, &nodes_fops);
571 if (nodes_dentry)
572 return 0;
573
574 o2net_debugfs_exit();
575 mlog_errno(-ENOMEM);
576 return -ENOMEM;
577}
578
543#endif /* CONFIG_DEBUG_FS */ 579#endif /* CONFIG_DEBUG_FS */
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index ad7d0c155de..044e7b58d31 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -546,7 +546,7 @@ static void o2net_set_nn_state(struct o2net_node *nn,
546 } 546 }
547 547
548 if (was_valid && !valid) { 548 if (was_valid && !valid) {
549 printk(KERN_NOTICE "o2net: no longer connected to " 549 printk(KERN_NOTICE "o2net: No longer connected to "
550 SC_NODEF_FMT "\n", SC_NODEF_ARGS(old_sc)); 550 SC_NODEF_FMT "\n", SC_NODEF_ARGS(old_sc));
551 o2net_complete_nodes_nsw(nn); 551 o2net_complete_nodes_nsw(nn);
552 } 552 }
@@ -556,7 +556,7 @@ static void o2net_set_nn_state(struct o2net_node *nn,
556 cancel_delayed_work(&nn->nn_connect_expired); 556 cancel_delayed_work(&nn->nn_connect_expired);
557 printk(KERN_NOTICE "o2net: %s " SC_NODEF_FMT "\n", 557 printk(KERN_NOTICE "o2net: %s " SC_NODEF_FMT "\n",
558 o2nm_this_node() > sc->sc_node->nd_num ? 558 o2nm_this_node() > sc->sc_node->nd_num ?
559 "connected to" : "accepted connection from", 559 "Connected to" : "Accepted connection from",
560 SC_NODEF_ARGS(sc)); 560 SC_NODEF_ARGS(sc));
561 } 561 }
562 562
@@ -644,7 +644,7 @@ static void o2net_state_change(struct sock *sk)
644 o2net_sc_queue_work(sc, &sc->sc_connect_work); 644 o2net_sc_queue_work(sc, &sc->sc_connect_work);
645 break; 645 break;
646 default: 646 default:
647 printk(KERN_INFO "o2net: connection to " SC_NODEF_FMT 647 printk(KERN_INFO "o2net: Connection to " SC_NODEF_FMT
648 " shutdown, state %d\n", 648 " shutdown, state %d\n",
649 SC_NODEF_ARGS(sc), sk->sk_state); 649 SC_NODEF_ARGS(sc), sk->sk_state);
650 o2net_sc_queue_work(sc, &sc->sc_shutdown_work); 650 o2net_sc_queue_work(sc, &sc->sc_shutdown_work);
@@ -1035,6 +1035,25 @@ static int o2net_tx_can_proceed(struct o2net_node *nn,
1035 return ret; 1035 return ret;
1036} 1036}
1037 1037
1038/* Get a map of all nodes to which this node is currently connected to */
1039void o2net_fill_node_map(unsigned long *map, unsigned bytes)
1040{
1041 struct o2net_sock_container *sc;
1042 int node, ret;
1043
1044 BUG_ON(bytes < (BITS_TO_LONGS(O2NM_MAX_NODES) * sizeof(unsigned long)));
1045
1046 memset(map, 0, bytes);
1047 for (node = 0; node < O2NM_MAX_NODES; ++node) {
1048 o2net_tx_can_proceed(o2net_nn_from_num(node), &sc, &ret);
1049 if (!ret) {
1050 set_bit(node, map);
1051 sc_put(sc);
1052 }
1053 }
1054}
1055EXPORT_SYMBOL_GPL(o2net_fill_node_map);
1056
1038int o2net_send_message_vec(u32 msg_type, u32 key, struct kvec *caller_vec, 1057int o2net_send_message_vec(u32 msg_type, u32 key, struct kvec *caller_vec,
1039 size_t caller_veclen, u8 target_node, int *status) 1058 size_t caller_veclen, u8 target_node, int *status)
1040{ 1059{
@@ -1285,11 +1304,11 @@ static int o2net_check_handshake(struct o2net_sock_container *sc)
1285 struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num); 1304 struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
1286 1305
1287 if (hand->protocol_version != cpu_to_be64(O2NET_PROTOCOL_VERSION)) { 1306 if (hand->protocol_version != cpu_to_be64(O2NET_PROTOCOL_VERSION)) {
1288 mlog(ML_NOTICE, SC_NODEF_FMT " advertised net protocol " 1307 printk(KERN_NOTICE "o2net: " SC_NODEF_FMT " Advertised net "
1289 "version %llu but %llu is required, disconnecting\n", 1308 "protocol version %llu but %llu is required. "
1290 SC_NODEF_ARGS(sc), 1309 "Disconnecting.\n", SC_NODEF_ARGS(sc),
1291 (unsigned long long)be64_to_cpu(hand->protocol_version), 1310 (unsigned long long)be64_to_cpu(hand->protocol_version),
1292 O2NET_PROTOCOL_VERSION); 1311 O2NET_PROTOCOL_VERSION);
1293 1312
1294 /* don't bother reconnecting if its the wrong version. */ 1313 /* don't bother reconnecting if its the wrong version. */
1295 o2net_ensure_shutdown(nn, sc, -ENOTCONN); 1314 o2net_ensure_shutdown(nn, sc, -ENOTCONN);
@@ -1303,33 +1322,33 @@ static int o2net_check_handshake(struct o2net_sock_container *sc)
1303 */ 1322 */
1304 if (be32_to_cpu(hand->o2net_idle_timeout_ms) != 1323 if (be32_to_cpu(hand->o2net_idle_timeout_ms) !=
1305 o2net_idle_timeout()) { 1324 o2net_idle_timeout()) {
1306 mlog(ML_NOTICE, SC_NODEF_FMT " uses a network idle timeout of " 1325 printk(KERN_NOTICE "o2net: " SC_NODEF_FMT " uses a network "
1307 "%u ms, but we use %u ms locally. disconnecting\n", 1326 "idle timeout of %u ms, but we use %u ms locally. "
1308 SC_NODEF_ARGS(sc), 1327 "Disconnecting.\n", SC_NODEF_ARGS(sc),
1309 be32_to_cpu(hand->o2net_idle_timeout_ms), 1328 be32_to_cpu(hand->o2net_idle_timeout_ms),
1310 o2net_idle_timeout()); 1329 o2net_idle_timeout());
1311 o2net_ensure_shutdown(nn, sc, -ENOTCONN); 1330 o2net_ensure_shutdown(nn, sc, -ENOTCONN);
1312 return -1; 1331 return -1;
1313 } 1332 }
1314 1333
1315 if (be32_to_cpu(hand->o2net_keepalive_delay_ms) != 1334 if (be32_to_cpu(hand->o2net_keepalive_delay_ms) !=
1316 o2net_keepalive_delay()) { 1335 o2net_keepalive_delay()) {
1317 mlog(ML_NOTICE, SC_NODEF_FMT " uses a keepalive delay of " 1336 printk(KERN_NOTICE "o2net: " SC_NODEF_FMT " uses a keepalive "
1318 "%u ms, but we use %u ms locally. disconnecting\n", 1337 "delay of %u ms, but we use %u ms locally. "
1319 SC_NODEF_ARGS(sc), 1338 "Disconnecting.\n", SC_NODEF_ARGS(sc),
1320 be32_to_cpu(hand->o2net_keepalive_delay_ms), 1339 be32_to_cpu(hand->o2net_keepalive_delay_ms),
1321 o2net_keepalive_delay()); 1340 o2net_keepalive_delay());
1322 o2net_ensure_shutdown(nn, sc, -ENOTCONN); 1341 o2net_ensure_shutdown(nn, sc, -ENOTCONN);
1323 return -1; 1342 return -1;
1324 } 1343 }
1325 1344
1326 if (be32_to_cpu(hand->o2hb_heartbeat_timeout_ms) != 1345 if (be32_to_cpu(hand->o2hb_heartbeat_timeout_ms) !=
1327 O2HB_MAX_WRITE_TIMEOUT_MS) { 1346 O2HB_MAX_WRITE_TIMEOUT_MS) {
1328 mlog(ML_NOTICE, SC_NODEF_FMT " uses a heartbeat timeout of " 1347 printk(KERN_NOTICE "o2net: " SC_NODEF_FMT " uses a heartbeat "
1329 "%u ms, but we use %u ms locally. disconnecting\n", 1348 "timeout of %u ms, but we use %u ms locally. "
1330 SC_NODEF_ARGS(sc), 1349 "Disconnecting.\n", SC_NODEF_ARGS(sc),
1331 be32_to_cpu(hand->o2hb_heartbeat_timeout_ms), 1350 be32_to_cpu(hand->o2hb_heartbeat_timeout_ms),
1332 O2HB_MAX_WRITE_TIMEOUT_MS); 1351 O2HB_MAX_WRITE_TIMEOUT_MS);
1333 o2net_ensure_shutdown(nn, sc, -ENOTCONN); 1352 o2net_ensure_shutdown(nn, sc, -ENOTCONN);
1334 return -1; 1353 return -1;
1335 } 1354 }
@@ -1540,28 +1559,16 @@ static void o2net_idle_timer(unsigned long data)
1540{ 1559{
1541 struct o2net_sock_container *sc = (struct o2net_sock_container *)data; 1560 struct o2net_sock_container *sc = (struct o2net_sock_container *)data;
1542 struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num); 1561 struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
1543
1544#ifdef CONFIG_DEBUG_FS 1562#ifdef CONFIG_DEBUG_FS
1545 ktime_t now = ktime_get(); 1563 unsigned long msecs = ktime_to_ms(ktime_get()) -
1564 ktime_to_ms(sc->sc_tv_timer);
1565#else
1566 unsigned long msecs = o2net_idle_timeout();
1546#endif 1567#endif
1547 1568
1548 printk(KERN_NOTICE "o2net: connection to " SC_NODEF_FMT " has been idle for %u.%u " 1569 printk(KERN_NOTICE "o2net: Connection to " SC_NODEF_FMT " has been "
1549 "seconds, shutting it down.\n", SC_NODEF_ARGS(sc), 1570 "idle for %lu.%lu secs, shutting it down.\n", SC_NODEF_ARGS(sc),
1550 o2net_idle_timeout() / 1000, 1571 msecs / 1000, msecs % 1000);
1551 o2net_idle_timeout() % 1000);
1552
1553#ifdef CONFIG_DEBUG_FS
1554 mlog(ML_NOTICE, "Here are some times that might help debug the "
1555 "situation: (Timer: %lld, Now %lld, DataReady %lld, Advance %lld-%lld, "
1556 "Key 0x%08x, Func %u, FuncTime %lld-%lld)\n",
1557 (long long)ktime_to_us(sc->sc_tv_timer), (long long)ktime_to_us(now),
1558 (long long)ktime_to_us(sc->sc_tv_data_ready),
1559 (long long)ktime_to_us(sc->sc_tv_advance_start),
1560 (long long)ktime_to_us(sc->sc_tv_advance_stop),
1561 sc->sc_msg_key, sc->sc_msg_type,
1562 (long long)ktime_to_us(sc->sc_tv_func_start),
1563 (long long)ktime_to_us(sc->sc_tv_func_stop));
1564#endif
1565 1572
1566 /* 1573 /*
1567 * Initialize the nn_timeout so that the next connection attempt 1574 * Initialize the nn_timeout so that the next connection attempt
@@ -1694,8 +1701,8 @@ static void o2net_start_connect(struct work_struct *work)
1694 1701
1695out: 1702out:
1696 if (ret) { 1703 if (ret) {
1697 mlog(ML_NOTICE, "connect attempt to " SC_NODEF_FMT " failed " 1704 printk(KERN_NOTICE "o2net: Connect attempt to " SC_NODEF_FMT
1698 "with errno %d\n", SC_NODEF_ARGS(sc), ret); 1705 " failed with errno %d\n", SC_NODEF_ARGS(sc), ret);
1699 /* 0 err so that another will be queued and attempted 1706 /* 0 err so that another will be queued and attempted
1700 * from set_nn_state */ 1707 * from set_nn_state */
1701 if (sc) 1708 if (sc)
@@ -1718,8 +1725,8 @@ static void o2net_connect_expired(struct work_struct *work)
1718 1725
1719 spin_lock(&nn->nn_lock); 1726 spin_lock(&nn->nn_lock);
1720 if (!nn->nn_sc_valid) { 1727 if (!nn->nn_sc_valid) {
1721 mlog(ML_ERROR, "no connection established with node %u after " 1728 printk(KERN_NOTICE "o2net: No connection established with "
1722 "%u.%u seconds, giving up and returning errors.\n", 1729 "node %u after %u.%u seconds, giving up.\n",
1723 o2net_num_from_nn(nn), 1730 o2net_num_from_nn(nn),
1724 o2net_idle_timeout() / 1000, 1731 o2net_idle_timeout() / 1000,
1725 o2net_idle_timeout() % 1000); 1732 o2net_idle_timeout() % 1000);
@@ -1862,21 +1869,21 @@ static int o2net_accept_one(struct socket *sock)
1862 1869
1863 node = o2nm_get_node_by_ip(sin.sin_addr.s_addr); 1870 node = o2nm_get_node_by_ip(sin.sin_addr.s_addr);
1864 if (node == NULL) { 1871 if (node == NULL) {
1865 mlog(ML_NOTICE, "attempt to connect from unknown node at %pI4:%d\n", 1872 printk(KERN_NOTICE "o2net: Attempt to connect from unknown "
1866 &sin.sin_addr.s_addr, ntohs(sin.sin_port)); 1873 "node at %pI4:%d\n", &sin.sin_addr.s_addr,
1874 ntohs(sin.sin_port));
1867 ret = -EINVAL; 1875 ret = -EINVAL;
1868 goto out; 1876 goto out;
1869 } 1877 }
1870 1878
1871 if (o2nm_this_node() >= node->nd_num) { 1879 if (o2nm_this_node() >= node->nd_num) {
1872 local_node = o2nm_get_node_by_num(o2nm_this_node()); 1880 local_node = o2nm_get_node_by_num(o2nm_this_node());
1873 mlog(ML_NOTICE, "unexpected connect attempt seen at node '%s' (" 1881 printk(KERN_NOTICE "o2net: Unexpected connect attempt seen "
1874 "%u, %pI4:%d) from node '%s' (%u, %pI4:%d)\n", 1882 "at node '%s' (%u, %pI4:%d) from node '%s' (%u, "
1875 local_node->nd_name, local_node->nd_num, 1883 "%pI4:%d)\n", local_node->nd_name, local_node->nd_num,
1876 &(local_node->nd_ipv4_address), 1884 &(local_node->nd_ipv4_address),
1877 ntohs(local_node->nd_ipv4_port), 1885 ntohs(local_node->nd_ipv4_port), node->nd_name,
1878 node->nd_name, node->nd_num, &sin.sin_addr.s_addr, 1886 node->nd_num, &sin.sin_addr.s_addr, ntohs(sin.sin_port));
1879 ntohs(sin.sin_port));
1880 ret = -EINVAL; 1887 ret = -EINVAL;
1881 goto out; 1888 goto out;
1882 } 1889 }
@@ -1901,10 +1908,10 @@ static int o2net_accept_one(struct socket *sock)
1901 ret = 0; 1908 ret = 0;
1902 spin_unlock(&nn->nn_lock); 1909 spin_unlock(&nn->nn_lock);
1903 if (ret) { 1910 if (ret) {
1904 mlog(ML_NOTICE, "attempt to connect from node '%s' at " 1911 printk(KERN_NOTICE "o2net: Attempt to connect from node '%s' "
1905 "%pI4:%d but it already has an open connection\n", 1912 "at %pI4:%d but it already has an open connection\n",
1906 node->nd_name, &sin.sin_addr.s_addr, 1913 node->nd_name, &sin.sin_addr.s_addr,
1907 ntohs(sin.sin_port)); 1914 ntohs(sin.sin_port));
1908 goto out; 1915 goto out;
1909 } 1916 }
1910 1917
@@ -1984,7 +1991,7 @@ static int o2net_open_listening_sock(__be32 addr, __be16 port)
1984 1991
1985 ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock); 1992 ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
1986 if (ret < 0) { 1993 if (ret < 0) {
1987 mlog(ML_ERROR, "unable to create socket, ret=%d\n", ret); 1994 printk(KERN_ERR "o2net: Error %d while creating socket\n", ret);
1988 goto out; 1995 goto out;
1989 } 1996 }
1990 1997
@@ -2001,16 +2008,15 @@ static int o2net_open_listening_sock(__be32 addr, __be16 port)
2001 sock->sk->sk_reuse = 1; 2008 sock->sk->sk_reuse = 1;
2002 ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin)); 2009 ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin));
2003 if (ret < 0) { 2010 if (ret < 0) {
2004 mlog(ML_ERROR, "unable to bind socket at %pI4:%u, " 2011 printk(KERN_ERR "o2net: Error %d while binding socket at "
2005 "ret=%d\n", &addr, ntohs(port), ret); 2012 "%pI4:%u\n", ret, &addr, ntohs(port));
2006 goto out; 2013 goto out;
2007 } 2014 }
2008 2015
2009 ret = sock->ops->listen(sock, 64); 2016 ret = sock->ops->listen(sock, 64);
2010 if (ret < 0) { 2017 if (ret < 0)
2011 mlog(ML_ERROR, "unable to listen on %pI4:%u, ret=%d\n", 2018 printk(KERN_ERR "o2net: Error %d while listening on %pI4:%u\n",
2012 &addr, ntohs(port), ret); 2019 ret, &addr, ntohs(port));
2013 }
2014 2020
2015out: 2021out:
2016 if (ret) { 2022 if (ret) {
diff --git a/fs/ocfs2/cluster/tcp.h b/fs/ocfs2/cluster/tcp.h
index fd6179eb26d..5bada2a69b5 100644
--- a/fs/ocfs2/cluster/tcp.h
+++ b/fs/ocfs2/cluster/tcp.h
@@ -106,6 +106,8 @@ int o2net_register_handler(u32 msg_type, u32 key, u32 max_len,
106 struct list_head *unreg_list); 106 struct list_head *unreg_list);
107void o2net_unregister_handler_list(struct list_head *list); 107void o2net_unregister_handler_list(struct list_head *list);
108 108
109void o2net_fill_node_map(unsigned long *map, unsigned bytes);
110
109struct o2nm_node; 111struct o2nm_node;
110int o2net_register_hb_callbacks(void); 112int o2net_register_hb_callbacks(void);
111void o2net_unregister_hb_callbacks(void); 113void o2net_unregister_hb_callbacks(void);
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index e2878b5895f..8fe4e2892ab 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -1184,8 +1184,7 @@ static int __ocfs2_delete_entry(handle_t *handle, struct inode *dir,
1184 if (pde) 1184 if (pde)
1185 le16_add_cpu(&pde->rec_len, 1185 le16_add_cpu(&pde->rec_len,
1186 le16_to_cpu(de->rec_len)); 1186 le16_to_cpu(de->rec_len));
1187 else 1187 de->inode = 0;
1188 de->inode = 0;
1189 dir->i_version++; 1188 dir->i_version++;
1190 ocfs2_journal_dirty(handle, bh); 1189 ocfs2_journal_dirty(handle, bh);
1191 goto bail; 1190 goto bail;
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h
index d602abb51b6..a5952ceecba 100644
--- a/fs/ocfs2/dlm/dlmcommon.h
+++ b/fs/ocfs2/dlm/dlmcommon.h
@@ -859,8 +859,8 @@ void dlm_complete_recovery_thread(struct dlm_ctxt *dlm);
859void dlm_wait_for_recovery(struct dlm_ctxt *dlm); 859void dlm_wait_for_recovery(struct dlm_ctxt *dlm);
860void dlm_kick_recovery_thread(struct dlm_ctxt *dlm); 860void dlm_kick_recovery_thread(struct dlm_ctxt *dlm);
861int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node); 861int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node);
862int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout); 862void dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout);
863int dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout); 863void dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout);
864 864
865void dlm_put(struct dlm_ctxt *dlm); 865void dlm_put(struct dlm_ctxt *dlm);
866struct dlm_ctxt *dlm_grab(struct dlm_ctxt *dlm); 866struct dlm_ctxt *dlm_grab(struct dlm_ctxt *dlm);
@@ -877,9 +877,8 @@ static inline void dlm_lockres_get(struct dlm_lock_resource *res)
877 kref_get(&res->refs); 877 kref_get(&res->refs);
878} 878}
879void dlm_lockres_put(struct dlm_lock_resource *res); 879void dlm_lockres_put(struct dlm_lock_resource *res);
880void __dlm_unhash_lockres(struct dlm_lock_resource *res); 880void __dlm_unhash_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res);
881void __dlm_insert_lockres(struct dlm_ctxt *dlm, 881void __dlm_insert_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res);
882 struct dlm_lock_resource *res);
883struct dlm_lock_resource * __dlm_lookup_lockres_full(struct dlm_ctxt *dlm, 882struct dlm_lock_resource * __dlm_lookup_lockres_full(struct dlm_ctxt *dlm,
884 const char *name, 883 const char *name,
885 unsigned int len, 884 unsigned int len,
@@ -902,46 +901,15 @@ struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm,
902 const char *name, 901 const char *name,
903 unsigned int namelen); 902 unsigned int namelen);
904 903
905#define dlm_lockres_set_refmap_bit(bit,res) \ 904void dlm_lockres_set_refmap_bit(struct dlm_ctxt *dlm,
906 __dlm_lockres_set_refmap_bit(bit,res,__FILE__,__LINE__) 905 struct dlm_lock_resource *res, int bit);
907#define dlm_lockres_clear_refmap_bit(bit,res) \ 906void dlm_lockres_clear_refmap_bit(struct dlm_ctxt *dlm,
908 __dlm_lockres_clear_refmap_bit(bit,res,__FILE__,__LINE__) 907 struct dlm_lock_resource *res, int bit);
909 908
910static inline void __dlm_lockres_set_refmap_bit(int bit, 909void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
911 struct dlm_lock_resource *res, 910 struct dlm_lock_resource *res);
912 const char *file, 911void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
913 int line) 912 struct dlm_lock_resource *res);
914{
915 //printk("%s:%d:%.*s: setting bit %d\n", file, line,
916 // res->lockname.len, res->lockname.name, bit);
917 set_bit(bit, res->refmap);
918}
919
920static inline void __dlm_lockres_clear_refmap_bit(int bit,
921 struct dlm_lock_resource *res,
922 const char *file,
923 int line)
924{
925 //printk("%s:%d:%.*s: clearing bit %d\n", file, line,
926 // res->lockname.len, res->lockname.name, bit);
927 clear_bit(bit, res->refmap);
928}
929
930void __dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
931 struct dlm_lock_resource *res,
932 const char *file,
933 int line);
934void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
935 struct dlm_lock_resource *res,
936 int new_lockres,
937 const char *file,
938 int line);
939#define dlm_lockres_drop_inflight_ref(d,r) \
940 __dlm_lockres_drop_inflight_ref(d,r,__FILE__,__LINE__)
941#define dlm_lockres_grab_inflight_ref(d,r) \
942 __dlm_lockres_grab_inflight_ref(d,r,0,__FILE__,__LINE__)
943#define dlm_lockres_grab_inflight_ref_new(d,r) \
944 __dlm_lockres_grab_inflight_ref(d,r,1,__FILE__,__LINE__)
945 913
946void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock); 914void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
947void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock); 915void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index 6ed6b95dcf9..92f2ead0fab 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -157,16 +157,18 @@ static int dlm_protocol_compare(struct dlm_protocol_version *existing,
157 157
158static void dlm_unregister_domain_handlers(struct dlm_ctxt *dlm); 158static void dlm_unregister_domain_handlers(struct dlm_ctxt *dlm);
159 159
160void __dlm_unhash_lockres(struct dlm_lock_resource *lockres) 160void __dlm_unhash_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
161{ 161{
162 if (!hlist_unhashed(&lockres->hash_node)) { 162 if (hlist_unhashed(&res->hash_node))
163 hlist_del_init(&lockres->hash_node); 163 return;
164 dlm_lockres_put(lockres); 164
165 } 165 mlog(0, "%s: Unhash res %.*s\n", dlm->name, res->lockname.len,
166 res->lockname.name);
167 hlist_del_init(&res->hash_node);
168 dlm_lockres_put(res);
166} 169}
167 170
168void __dlm_insert_lockres(struct dlm_ctxt *dlm, 171void __dlm_insert_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
169 struct dlm_lock_resource *res)
170{ 172{
171 struct hlist_head *bucket; 173 struct hlist_head *bucket;
172 struct qstr *q; 174 struct qstr *q;
@@ -180,6 +182,9 @@ void __dlm_insert_lockres(struct dlm_ctxt *dlm,
180 dlm_lockres_get(res); 182 dlm_lockres_get(res);
181 183
182 hlist_add_head(&res->hash_node, bucket); 184 hlist_add_head(&res->hash_node, bucket);
185
186 mlog(0, "%s: Hash res %.*s\n", dlm->name, res->lockname.len,
187 res->lockname.name);
183} 188}
184 189
185struct dlm_lock_resource * __dlm_lookup_lockres_full(struct dlm_ctxt *dlm, 190struct dlm_lock_resource * __dlm_lookup_lockres_full(struct dlm_ctxt *dlm,
@@ -539,17 +544,17 @@ again:
539 544
540static void __dlm_print_nodes(struct dlm_ctxt *dlm) 545static void __dlm_print_nodes(struct dlm_ctxt *dlm)
541{ 546{
542 int node = -1; 547 int node = -1, num = 0;
543 548
544 assert_spin_locked(&dlm->spinlock); 549 assert_spin_locked(&dlm->spinlock);
545 550
546 printk(KERN_NOTICE "o2dlm: Nodes in domain %s: ", dlm->name); 551 printk("( ");
547
548 while ((node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES, 552 while ((node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES,
549 node + 1)) < O2NM_MAX_NODES) { 553 node + 1)) < O2NM_MAX_NODES) {
550 printk("%d ", node); 554 printk("%d ", node);
555 ++num;
551 } 556 }
552 printk("\n"); 557 printk(") %u nodes\n", num);
553} 558}
554 559
555static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data, 560static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data,
@@ -566,11 +571,10 @@ static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data,
566 571
567 node = exit_msg->node_idx; 572 node = exit_msg->node_idx;
568 573
569 printk(KERN_NOTICE "o2dlm: Node %u leaves domain %s\n", node, dlm->name);
570
571 spin_lock(&dlm->spinlock); 574 spin_lock(&dlm->spinlock);
572 clear_bit(node, dlm->domain_map); 575 clear_bit(node, dlm->domain_map);
573 clear_bit(node, dlm->exit_domain_map); 576 clear_bit(node, dlm->exit_domain_map);
577 printk(KERN_NOTICE "o2dlm: Node %u leaves domain %s ", node, dlm->name);
574 __dlm_print_nodes(dlm); 578 __dlm_print_nodes(dlm);
575 579
576 /* notify anything attached to the heartbeat events */ 580 /* notify anything attached to the heartbeat events */
@@ -755,6 +759,7 @@ void dlm_unregister_domain(struct dlm_ctxt *dlm)
755 759
756 dlm_mark_domain_leaving(dlm); 760 dlm_mark_domain_leaving(dlm);
757 dlm_leave_domain(dlm); 761 dlm_leave_domain(dlm);
762 printk(KERN_NOTICE "o2dlm: Leaving domain %s\n", dlm->name);
758 dlm_force_free_mles(dlm); 763 dlm_force_free_mles(dlm);
759 dlm_complete_dlm_shutdown(dlm); 764 dlm_complete_dlm_shutdown(dlm);
760 } 765 }
@@ -970,7 +975,7 @@ static int dlm_assert_joined_handler(struct o2net_msg *msg, u32 len, void *data,
970 clear_bit(assert->node_idx, dlm->exit_domain_map); 975 clear_bit(assert->node_idx, dlm->exit_domain_map);
971 __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN); 976 __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
972 977
973 printk(KERN_NOTICE "o2dlm: Node %u joins domain %s\n", 978 printk(KERN_NOTICE "o2dlm: Node %u joins domain %s ",
974 assert->node_idx, dlm->name); 979 assert->node_idx, dlm->name);
975 __dlm_print_nodes(dlm); 980 __dlm_print_nodes(dlm);
976 981
@@ -1701,8 +1706,10 @@ static int dlm_try_to_join_domain(struct dlm_ctxt *dlm)
1701bail: 1706bail:
1702 spin_lock(&dlm->spinlock); 1707 spin_lock(&dlm->spinlock);
1703 __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN); 1708 __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
1704 if (!status) 1709 if (!status) {
1710 printk(KERN_NOTICE "o2dlm: Joining domain %s ", dlm->name);
1705 __dlm_print_nodes(dlm); 1711 __dlm_print_nodes(dlm);
1712 }
1706 spin_unlock(&dlm->spinlock); 1713 spin_unlock(&dlm->spinlock);
1707 1714
1708 if (ctxt) { 1715 if (ctxt) {
@@ -2131,13 +2138,6 @@ struct dlm_ctxt * dlm_register_domain(const char *domain,
2131 goto leave; 2138 goto leave;
2132 } 2139 }
2133 2140
2134 if (!o2hb_check_local_node_heartbeating()) {
2135 mlog(ML_ERROR, "the local node has not been configured, or is "
2136 "not heartbeating\n");
2137 ret = -EPROTO;
2138 goto leave;
2139 }
2140
2141 mlog(0, "register called for domain \"%s\"\n", domain); 2141 mlog(0, "register called for domain \"%s\"\n", domain);
2142 2142
2143retry: 2143retry:
diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c
index 8d39e0fd66f..975810b9849 100644
--- a/fs/ocfs2/dlm/dlmlock.c
+++ b/fs/ocfs2/dlm/dlmlock.c
@@ -183,10 +183,6 @@ static enum dlm_status dlmlock_master(struct dlm_ctxt *dlm,
183 kick_thread = 1; 183 kick_thread = 1;
184 } 184 }
185 } 185 }
186 /* reduce the inflight count, this may result in the lockres
187 * being purged below during calc_usage */
188 if (lock->ml.node == dlm->node_num)
189 dlm_lockres_drop_inflight_ref(dlm, res);
190 186
191 spin_unlock(&res->spinlock); 187 spin_unlock(&res->spinlock);
192 wake_up(&res->wq); 188 wake_up(&res->wq);
@@ -231,10 +227,16 @@ static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm,
231 lock->ml.type, res->lockname.len, 227 lock->ml.type, res->lockname.len,
232 res->lockname.name, flags); 228 res->lockname.name, flags);
233 229
230 /*
231 * Wait if resource is getting recovered, remastered, etc.
232 * If the resource was remastered and new owner is self, then exit.
233 */
234 spin_lock(&res->spinlock); 234 spin_lock(&res->spinlock);
235
236 /* will exit this call with spinlock held */
237 __dlm_wait_on_lockres(res); 235 __dlm_wait_on_lockres(res);
236 if (res->owner == dlm->node_num) {
237 spin_unlock(&res->spinlock);
238 return DLM_RECOVERING;
239 }
238 res->state |= DLM_LOCK_RES_IN_PROGRESS; 240 res->state |= DLM_LOCK_RES_IN_PROGRESS;
239 241
240 /* add lock to local (secondary) queue */ 242 /* add lock to local (secondary) queue */
@@ -319,27 +321,23 @@ static enum dlm_status dlm_send_remote_lock_request(struct dlm_ctxt *dlm,
319 tmpret = o2net_send_message(DLM_CREATE_LOCK_MSG, dlm->key, &create, 321 tmpret = o2net_send_message(DLM_CREATE_LOCK_MSG, dlm->key, &create,
320 sizeof(create), res->owner, &status); 322 sizeof(create), res->owner, &status);
321 if (tmpret >= 0) { 323 if (tmpret >= 0) {
322 // successfully sent and received 324 ret = status;
323 ret = status; // this is already a dlm_status
324 if (ret == DLM_REJECTED) { 325 if (ret == DLM_REJECTED) {
325 mlog(ML_ERROR, "%s:%.*s: BUG. this is a stale lockres " 326 mlog(ML_ERROR, "%s: res %.*s, Stale lockres no longer "
326 "no longer owned by %u. that node is coming back " 327 "owned by node %u. That node is coming back up "
327 "up currently.\n", dlm->name, create.namelen, 328 "currently.\n", dlm->name, create.namelen,
328 create.name, res->owner); 329 create.name, res->owner);
329 dlm_print_one_lock_resource(res); 330 dlm_print_one_lock_resource(res);
330 BUG(); 331 BUG();
331 } 332 }
332 } else { 333 } else {
333 mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to " 334 mlog(ML_ERROR, "%s: res %.*s, Error %d send CREATE LOCK to "
334 "node %u\n", tmpret, DLM_CREATE_LOCK_MSG, dlm->key, 335 "node %u\n", dlm->name, create.namelen, create.name,
335 res->owner); 336 tmpret, res->owner);
336 if (dlm_is_host_down(tmpret)) { 337 if (dlm_is_host_down(tmpret))
337 ret = DLM_RECOVERING; 338 ret = DLM_RECOVERING;
338 mlog(0, "node %u died so returning DLM_RECOVERING " 339 else
339 "from lock message!\n", res->owner);
340 } else {
341 ret = dlm_err_to_dlm_status(tmpret); 340 ret = dlm_err_to_dlm_status(tmpret);
342 }
343 } 341 }
344 342
345 return ret; 343 return ret;
@@ -440,7 +438,7 @@ struct dlm_lock * dlm_new_lock(int type, u8 node, u64 cookie,
440 /* zero memory only if kernel-allocated */ 438 /* zero memory only if kernel-allocated */
441 lksb = kzalloc(sizeof(*lksb), GFP_NOFS); 439 lksb = kzalloc(sizeof(*lksb), GFP_NOFS);
442 if (!lksb) { 440 if (!lksb) {
443 kfree(lock); 441 kmem_cache_free(dlm_lock_cache, lock);
444 return NULL; 442 return NULL;
445 } 443 }
446 kernel_allocated = 1; 444 kernel_allocated = 1;
@@ -718,18 +716,10 @@ retry_lock:
718 716
719 if (status == DLM_RECOVERING || status == DLM_MIGRATING || 717 if (status == DLM_RECOVERING || status == DLM_MIGRATING ||
720 status == DLM_FORWARD) { 718 status == DLM_FORWARD) {
721 mlog(0, "retrying lock with migration/"
722 "recovery/in progress\n");
723 msleep(100); 719 msleep(100);
724 /* no waiting for dlm_reco_thread */
725 if (recovery) { 720 if (recovery) {
726 if (status != DLM_RECOVERING) 721 if (status != DLM_RECOVERING)
727 goto retry_lock; 722 goto retry_lock;
728
729 mlog(0, "%s: got RECOVERING "
730 "for $RECOVERY lock, master "
731 "was %u\n", dlm->name,
732 res->owner);
733 /* wait to see the node go down, then 723 /* wait to see the node go down, then
734 * drop down and allow the lockres to 724 * drop down and allow the lockres to
735 * get cleaned up. need to remaster. */ 725 * get cleaned up. need to remaster. */
@@ -741,6 +731,14 @@ retry_lock:
741 } 731 }
742 } 732 }
743 733
734 /* Inflight taken in dlm_get_lock_resource() is dropped here */
735 spin_lock(&res->spinlock);
736 dlm_lockres_drop_inflight_ref(dlm, res);
737 spin_unlock(&res->spinlock);
738
739 dlm_lockres_calc_usage(dlm, res);
740 dlm_kick_thread(dlm, res);
741
744 if (status != DLM_NORMAL) { 742 if (status != DLM_NORMAL) {
745 lock->lksb->flags &= ~DLM_LKSB_GET_LVB; 743 lock->lksb->flags &= ~DLM_LKSB_GET_LVB;
746 if (status != DLM_NOTQUEUED) 744 if (status != DLM_NOTQUEUED)
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 11eefb8c12e..005261c333b 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -631,39 +631,54 @@ error:
631 return NULL; 631 return NULL;
632} 632}
633 633
634void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm, 634void dlm_lockres_set_refmap_bit(struct dlm_ctxt *dlm,
635 struct dlm_lock_resource *res, 635 struct dlm_lock_resource *res, int bit)
636 int new_lockres,
637 const char *file,
638 int line)
639{ 636{
640 if (!new_lockres) 637 assert_spin_locked(&res->spinlock);
641 assert_spin_locked(&res->spinlock); 638
639 mlog(0, "res %.*s, set node %u, %ps()\n", res->lockname.len,
640 res->lockname.name, bit, __builtin_return_address(0));
641
642 set_bit(bit, res->refmap);
643}
644
645void dlm_lockres_clear_refmap_bit(struct dlm_ctxt *dlm,
646 struct dlm_lock_resource *res, int bit)
647{
648 assert_spin_locked(&res->spinlock);
649
650 mlog(0, "res %.*s, clr node %u, %ps()\n", res->lockname.len,
651 res->lockname.name, bit, __builtin_return_address(0));
652
653 clear_bit(bit, res->refmap);
654}
655
656
657void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
658 struct dlm_lock_resource *res)
659{
660 assert_spin_locked(&res->spinlock);
642 661
643 if (!test_bit(dlm->node_num, res->refmap)) {
644 BUG_ON(res->inflight_locks != 0);
645 dlm_lockres_set_refmap_bit(dlm->node_num, res);
646 }
647 res->inflight_locks++; 662 res->inflight_locks++;
648 mlog(0, "%s:%.*s: inflight++: now %u\n", 663
649 dlm->name, res->lockname.len, res->lockname.name, 664 mlog(0, "%s: res %.*s, inflight++: now %u, %ps()\n", dlm->name,
650 res->inflight_locks); 665 res->lockname.len, res->lockname.name, res->inflight_locks,
666 __builtin_return_address(0));
651} 667}
652 668
653void __dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm, 669void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
654 struct dlm_lock_resource *res, 670 struct dlm_lock_resource *res)
655 const char *file,
656 int line)
657{ 671{
658 assert_spin_locked(&res->spinlock); 672 assert_spin_locked(&res->spinlock);
659 673
660 BUG_ON(res->inflight_locks == 0); 674 BUG_ON(res->inflight_locks == 0);
675
661 res->inflight_locks--; 676 res->inflight_locks--;
662 mlog(0, "%s:%.*s: inflight--: now %u\n", 677
663 dlm->name, res->lockname.len, res->lockname.name, 678 mlog(0, "%s: res %.*s, inflight--: now %u, %ps()\n", dlm->name,
664 res->inflight_locks); 679 res->lockname.len, res->lockname.name, res->inflight_locks,
665 if (res->inflight_locks == 0) 680 __builtin_return_address(0));
666 dlm_lockres_clear_refmap_bit(dlm->node_num, res); 681
667 wake_up(&res->wq); 682 wake_up(&res->wq);
668} 683}
669 684
@@ -697,7 +712,6 @@ struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm,
697 unsigned int hash; 712 unsigned int hash;
698 int tries = 0; 713 int tries = 0;
699 int bit, wait_on_recovery = 0; 714 int bit, wait_on_recovery = 0;
700 int drop_inflight_if_nonlocal = 0;
701 715
702 BUG_ON(!lockid); 716 BUG_ON(!lockid);
703 717
@@ -709,36 +723,33 @@ lookup:
709 spin_lock(&dlm->spinlock); 723 spin_lock(&dlm->spinlock);
710 tmpres = __dlm_lookup_lockres_full(dlm, lockid, namelen, hash); 724 tmpres = __dlm_lookup_lockres_full(dlm, lockid, namelen, hash);
711 if (tmpres) { 725 if (tmpres) {
712 int dropping_ref = 0;
713
714 spin_unlock(&dlm->spinlock); 726 spin_unlock(&dlm->spinlock);
715
716 spin_lock(&tmpres->spinlock); 727 spin_lock(&tmpres->spinlock);
717 /* We wait for the other thread that is mastering the resource */ 728 /* Wait on the thread that is mastering the resource */
718 if (tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN) { 729 if (tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
719 __dlm_wait_on_lockres(tmpres); 730 __dlm_wait_on_lockres(tmpres);
720 BUG_ON(tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN); 731 BUG_ON(tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN);
732 spin_unlock(&tmpres->spinlock);
733 dlm_lockres_put(tmpres);
734 tmpres = NULL;
735 goto lookup;
721 } 736 }
722 737
723 if (tmpres->owner == dlm->node_num) { 738 /* Wait on the resource purge to complete before continuing */
724 BUG_ON(tmpres->state & DLM_LOCK_RES_DROPPING_REF); 739 if (tmpres->state & DLM_LOCK_RES_DROPPING_REF) {
725 dlm_lockres_grab_inflight_ref(dlm, tmpres); 740 BUG_ON(tmpres->owner == dlm->node_num);
726 } else if (tmpres->state & DLM_LOCK_RES_DROPPING_REF) 741 __dlm_wait_on_lockres_flags(tmpres,
727 dropping_ref = 1; 742 DLM_LOCK_RES_DROPPING_REF);
728 spin_unlock(&tmpres->spinlock);
729
730 /* wait until done messaging the master, drop our ref to allow
731 * the lockres to be purged, start over. */
732 if (dropping_ref) {
733 spin_lock(&tmpres->spinlock);
734 __dlm_wait_on_lockres_flags(tmpres, DLM_LOCK_RES_DROPPING_REF);
735 spin_unlock(&tmpres->spinlock); 743 spin_unlock(&tmpres->spinlock);
736 dlm_lockres_put(tmpres); 744 dlm_lockres_put(tmpres);
737 tmpres = NULL; 745 tmpres = NULL;
738 goto lookup; 746 goto lookup;
739 } 747 }
740 748
741 mlog(0, "found in hash!\n"); 749 /* Grab inflight ref to pin the resource */
750 dlm_lockres_grab_inflight_ref(dlm, tmpres);
751
752 spin_unlock(&tmpres->spinlock);
742 if (res) 753 if (res)
743 dlm_lockres_put(res); 754 dlm_lockres_put(res);
744 res = tmpres; 755 res = tmpres;
@@ -829,8 +840,8 @@ lookup:
829 * but they might own this lockres. wait on them. */ 840 * but they might own this lockres. wait on them. */
830 bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0); 841 bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
831 if (bit < O2NM_MAX_NODES) { 842 if (bit < O2NM_MAX_NODES) {
832 mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to " 843 mlog(0, "%s: res %.*s, At least one node (%d) "
833 "recover before lock mastery can begin\n", 844 "to recover before lock mastery can begin\n",
834 dlm->name, namelen, (char *)lockid, bit); 845 dlm->name, namelen, (char *)lockid, bit);
835 wait_on_recovery = 1; 846 wait_on_recovery = 1;
836 } 847 }
@@ -843,12 +854,11 @@ lookup:
843 854
844 /* finally add the lockres to its hash bucket */ 855 /* finally add the lockres to its hash bucket */
845 __dlm_insert_lockres(dlm, res); 856 __dlm_insert_lockres(dlm, res);
846 /* since this lockres is new it doesn't not require the spinlock */
847 dlm_lockres_grab_inflight_ref_new(dlm, res);
848 857
849 /* if this node does not become the master make sure to drop 858 /* Grab inflight ref to pin the resource */
850 * this inflight reference below */ 859 spin_lock(&res->spinlock);
851 drop_inflight_if_nonlocal = 1; 860 dlm_lockres_grab_inflight_ref(dlm, res);
861 spin_unlock(&res->spinlock);
852 862
853 /* get an extra ref on the mle in case this is a BLOCK 863 /* get an extra ref on the mle in case this is a BLOCK
854 * if so, the creator of the BLOCK may try to put the last 864 * if so, the creator of the BLOCK may try to put the last
@@ -864,8 +874,8 @@ redo_request:
864 * dlm spinlock would be detectable be a change on the mle, 874 * dlm spinlock would be detectable be a change on the mle,
865 * so we only need to clear out the recovery map once. */ 875 * so we only need to clear out the recovery map once. */
866 if (dlm_is_recovery_lock(lockid, namelen)) { 876 if (dlm_is_recovery_lock(lockid, namelen)) {
867 mlog(ML_NOTICE, "%s: recovery map is not empty, but " 877 mlog(0, "%s: Recovery map is not empty, but must "
868 "must master $RECOVERY lock now\n", dlm->name); 878 "master $RECOVERY lock now\n", dlm->name);
869 if (!dlm_pre_master_reco_lockres(dlm, res)) 879 if (!dlm_pre_master_reco_lockres(dlm, res))
870 wait_on_recovery = 0; 880 wait_on_recovery = 0;
871 else { 881 else {
@@ -883,8 +893,8 @@ redo_request:
883 spin_lock(&dlm->spinlock); 893 spin_lock(&dlm->spinlock);
884 bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0); 894 bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
885 if (bit < O2NM_MAX_NODES) { 895 if (bit < O2NM_MAX_NODES) {
886 mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to " 896 mlog(0, "%s: res %.*s, At least one node (%d) "
887 "recover before lock mastery can begin\n", 897 "to recover before lock mastery can begin\n",
888 dlm->name, namelen, (char *)lockid, bit); 898 dlm->name, namelen, (char *)lockid, bit);
889 wait_on_recovery = 1; 899 wait_on_recovery = 1;
890 } else 900 } else
@@ -913,8 +923,8 @@ redo_request:
913 * yet, keep going until it does. this is how the 923 * yet, keep going until it does. this is how the
914 * master will know that asserts are needed back to 924 * master will know that asserts are needed back to
915 * the lower nodes. */ 925 * the lower nodes. */
916 mlog(0, "%s:%.*s: requests only up to %u but master " 926 mlog(0, "%s: res %.*s, Requests only up to %u but "
917 "is %u, keep going\n", dlm->name, namelen, 927 "master is %u, keep going\n", dlm->name, namelen,
918 lockid, nodenum, mle->master); 928 lockid, nodenum, mle->master);
919 } 929 }
920 } 930 }
@@ -924,13 +934,12 @@ wait:
924 ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked); 934 ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked);
925 if (ret < 0) { 935 if (ret < 0) {
926 wait_on_recovery = 1; 936 wait_on_recovery = 1;
927 mlog(0, "%s:%.*s: node map changed, redo the " 937 mlog(0, "%s: res %.*s, Node map changed, redo the master "
928 "master request now, blocked=%d\n", 938 "request now, blocked=%d\n", dlm->name, res->lockname.len,
929 dlm->name, res->lockname.len,
930 res->lockname.name, blocked); 939 res->lockname.name, blocked);
931 if (++tries > 20) { 940 if (++tries > 20) {
932 mlog(ML_ERROR, "%s:%.*s: spinning on " 941 mlog(ML_ERROR, "%s: res %.*s, Spinning on "
933 "dlm_wait_for_lock_mastery, blocked=%d\n", 942 "dlm_wait_for_lock_mastery, blocked = %d\n",
934 dlm->name, res->lockname.len, 943 dlm->name, res->lockname.len,
935 res->lockname.name, blocked); 944 res->lockname.name, blocked);
936 dlm_print_one_lock_resource(res); 945 dlm_print_one_lock_resource(res);
@@ -940,7 +949,8 @@ wait:
940 goto redo_request; 949 goto redo_request;
941 } 950 }
942 951
943 mlog(0, "lockres mastered by %u\n", res->owner); 952 mlog(0, "%s: res %.*s, Mastered by %u\n", dlm->name, res->lockname.len,
953 res->lockname.name, res->owner);
944 /* make sure we never continue without this */ 954 /* make sure we never continue without this */
945 BUG_ON(res->owner == O2NM_MAX_NODES); 955 BUG_ON(res->owner == O2NM_MAX_NODES);
946 956
@@ -952,8 +962,6 @@ wait:
952 962
953wake_waiters: 963wake_waiters:
954 spin_lock(&res->spinlock); 964 spin_lock(&res->spinlock);
955 if (res->owner != dlm->node_num && drop_inflight_if_nonlocal)
956 dlm_lockres_drop_inflight_ref(dlm, res);
957 res->state &= ~DLM_LOCK_RES_IN_PROGRESS; 965 res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
958 spin_unlock(&res->spinlock); 966 spin_unlock(&res->spinlock);
959 wake_up(&res->wq); 967 wake_up(&res->wq);
@@ -1426,9 +1434,7 @@ way_up_top:
1426 } 1434 }
1427 1435
1428 if (res->owner == dlm->node_num) { 1436 if (res->owner == dlm->node_num) {
1429 mlog(0, "%s:%.*s: setting bit %u in refmap\n", 1437 dlm_lockres_set_refmap_bit(dlm, res, request->node_idx);
1430 dlm->name, namelen, name, request->node_idx);
1431 dlm_lockres_set_refmap_bit(request->node_idx, res);
1432 spin_unlock(&res->spinlock); 1438 spin_unlock(&res->spinlock);
1433 response = DLM_MASTER_RESP_YES; 1439 response = DLM_MASTER_RESP_YES;
1434 if (mle) 1440 if (mle)
@@ -1493,10 +1499,8 @@ way_up_top:
1493 * go back and clean the mles on any 1499 * go back and clean the mles on any
1494 * other nodes */ 1500 * other nodes */
1495 dispatch_assert = 1; 1501 dispatch_assert = 1;
1496 dlm_lockres_set_refmap_bit(request->node_idx, res); 1502 dlm_lockres_set_refmap_bit(dlm, res,
1497 mlog(0, "%s:%.*s: setting bit %u in refmap\n", 1503 request->node_idx);
1498 dlm->name, namelen, name,
1499 request->node_idx);
1500 } else 1504 } else
1501 response = DLM_MASTER_RESP_NO; 1505 response = DLM_MASTER_RESP_NO;
1502 } else { 1506 } else {
@@ -1702,7 +1706,7 @@ again:
1702 "lockres, set the bit in the refmap\n", 1706 "lockres, set the bit in the refmap\n",
1703 namelen, lockname, to); 1707 namelen, lockname, to);
1704 spin_lock(&res->spinlock); 1708 spin_lock(&res->spinlock);
1705 dlm_lockres_set_refmap_bit(to, res); 1709 dlm_lockres_set_refmap_bit(dlm, res, to);
1706 spin_unlock(&res->spinlock); 1710 spin_unlock(&res->spinlock);
1707 } 1711 }
1708 } 1712 }
@@ -2187,8 +2191,6 @@ int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
2187 namelen = res->lockname.len; 2191 namelen = res->lockname.len;
2188 BUG_ON(namelen > O2NM_MAX_NAME_LEN); 2192 BUG_ON(namelen > O2NM_MAX_NAME_LEN);
2189 2193
2190 mlog(0, "%s:%.*s: sending deref to %d\n",
2191 dlm->name, namelen, lockname, res->owner);
2192 memset(&deref, 0, sizeof(deref)); 2194 memset(&deref, 0, sizeof(deref));
2193 deref.node_idx = dlm->node_num; 2195 deref.node_idx = dlm->node_num;
2194 deref.namelen = namelen; 2196 deref.namelen = namelen;
@@ -2197,14 +2199,12 @@ int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
2197 ret = o2net_send_message(DLM_DEREF_LOCKRES_MSG, dlm->key, 2199 ret = o2net_send_message(DLM_DEREF_LOCKRES_MSG, dlm->key,
2198 &deref, sizeof(deref), res->owner, &r); 2200 &deref, sizeof(deref), res->owner, &r);
2199 if (ret < 0) 2201 if (ret < 0)
2200 mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to " 2202 mlog(ML_ERROR, "%s: res %.*s, error %d send DEREF to node %u\n",
2201 "node %u\n", ret, DLM_DEREF_LOCKRES_MSG, dlm->key, 2203 dlm->name, namelen, lockname, ret, res->owner);
2202 res->owner);
2203 else if (r < 0) { 2204 else if (r < 0) {
2204 /* BAD. other node says I did not have a ref. */ 2205 /* BAD. other node says I did not have a ref. */
2205 mlog(ML_ERROR,"while dropping ref on %s:%.*s " 2206 mlog(ML_ERROR, "%s: res %.*s, DEREF to node %u got %d\n",
2206 "(master=%u) got %d.\n", dlm->name, namelen, 2207 dlm->name, namelen, lockname, res->owner, r);
2207 lockname, res->owner, r);
2208 dlm_print_one_lock_resource(res); 2208 dlm_print_one_lock_resource(res);
2209 BUG(); 2209 BUG();
2210 } 2210 }
@@ -2260,7 +2260,7 @@ int dlm_deref_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
2260 else { 2260 else {
2261 BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF); 2261 BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF);
2262 if (test_bit(node, res->refmap)) { 2262 if (test_bit(node, res->refmap)) {
2263 dlm_lockres_clear_refmap_bit(node, res); 2263 dlm_lockres_clear_refmap_bit(dlm, res, node);
2264 cleared = 1; 2264 cleared = 1;
2265 } 2265 }
2266 } 2266 }
@@ -2320,7 +2320,7 @@ static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data)
2320 BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF); 2320 BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF);
2321 if (test_bit(node, res->refmap)) { 2321 if (test_bit(node, res->refmap)) {
2322 __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG); 2322 __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG);
2323 dlm_lockres_clear_refmap_bit(node, res); 2323 dlm_lockres_clear_refmap_bit(dlm, res, node);
2324 cleared = 1; 2324 cleared = 1;
2325 } 2325 }
2326 spin_unlock(&res->spinlock); 2326 spin_unlock(&res->spinlock);
@@ -2802,7 +2802,8 @@ static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
2802 BUG_ON(!list_empty(&lock->bast_list)); 2802 BUG_ON(!list_empty(&lock->bast_list));
2803 BUG_ON(lock->ast_pending); 2803 BUG_ON(lock->ast_pending);
2804 BUG_ON(lock->bast_pending); 2804 BUG_ON(lock->bast_pending);
2805 dlm_lockres_clear_refmap_bit(lock->ml.node, res); 2805 dlm_lockres_clear_refmap_bit(dlm, res,
2806 lock->ml.node);
2806 list_del_init(&lock->list); 2807 list_del_init(&lock->list);
2807 dlm_lock_put(lock); 2808 dlm_lock_put(lock);
2808 /* In a normal unlock, we would have added a 2809 /* In a normal unlock, we would have added a
@@ -2823,7 +2824,7 @@ static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
2823 mlog(0, "%s:%.*s: node %u had a ref to this " 2824 mlog(0, "%s:%.*s: node %u had a ref to this "
2824 "migrating lockres, clearing\n", dlm->name, 2825 "migrating lockres, clearing\n", dlm->name,
2825 res->lockname.len, res->lockname.name, bit); 2826 res->lockname.len, res->lockname.name, bit);
2826 dlm_lockres_clear_refmap_bit(bit, res); 2827 dlm_lockres_clear_refmap_bit(dlm, res, bit);
2827 } 2828 }
2828 bit++; 2829 bit++;
2829 } 2830 }
@@ -2916,9 +2917,9 @@ static int dlm_do_migrate_request(struct dlm_ctxt *dlm,
2916 &migrate, sizeof(migrate), nodenum, 2917 &migrate, sizeof(migrate), nodenum,
2917 &status); 2918 &status);
2918 if (ret < 0) { 2919 if (ret < 0) {
2919 mlog(ML_ERROR, "Error %d when sending message %u (key " 2920 mlog(ML_ERROR, "%s: res %.*s, Error %d send "
2920 "0x%x) to node %u\n", ret, DLM_MIGRATE_REQUEST_MSG, 2921 "MIGRATE_REQUEST to node %u\n", dlm->name,
2921 dlm->key, nodenum); 2922 migrate.namelen, migrate.name, ret, nodenum);
2922 if (!dlm_is_host_down(ret)) { 2923 if (!dlm_is_host_down(ret)) {
2923 mlog(ML_ERROR, "unhandled error=%d!\n", ret); 2924 mlog(ML_ERROR, "unhandled error=%d!\n", ret);
2924 BUG(); 2925 BUG();
@@ -2937,7 +2938,7 @@ static int dlm_do_migrate_request(struct dlm_ctxt *dlm,
2937 dlm->name, res->lockname.len, res->lockname.name, 2938 dlm->name, res->lockname.len, res->lockname.name,
2938 nodenum); 2939 nodenum);
2939 spin_lock(&res->spinlock); 2940 spin_lock(&res->spinlock);
2940 dlm_lockres_set_refmap_bit(nodenum, res); 2941 dlm_lockres_set_refmap_bit(dlm, res, nodenum);
2941 spin_unlock(&res->spinlock); 2942 spin_unlock(&res->spinlock);
2942 } 2943 }
2943 } 2944 }
@@ -3271,7 +3272,7 @@ int dlm_finish_migration(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
3271 * mastery reference here since old_master will briefly have 3272 * mastery reference here since old_master will briefly have
3272 * a reference after the migration completes */ 3273 * a reference after the migration completes */
3273 spin_lock(&res->spinlock); 3274 spin_lock(&res->spinlock);
3274 dlm_lockres_set_refmap_bit(old_master, res); 3275 dlm_lockres_set_refmap_bit(dlm, res, old_master);
3275 spin_unlock(&res->spinlock); 3276 spin_unlock(&res->spinlock);
3276 3277
3277 mlog(0, "now time to do a migrate request to other nodes\n"); 3278 mlog(0, "now time to do a migrate request to other nodes\n");
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index 7efab6d28a2..01ebfd0bdad 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -362,40 +362,38 @@ static int dlm_is_node_recovered(struct dlm_ctxt *dlm, u8 node)
362} 362}
363 363
364 364
365int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout) 365void dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout)
366{ 366{
367 if (timeout) { 367 if (dlm_is_node_dead(dlm, node))
368 mlog(ML_NOTICE, "%s: waiting %dms for notification of " 368 return;
369 "death of node %u\n", dlm->name, timeout, node); 369
370 printk(KERN_NOTICE "o2dlm: Waiting on the death of node %u in "
371 "domain %s\n", node, dlm->name);
372
373 if (timeout)
370 wait_event_timeout(dlm->dlm_reco_thread_wq, 374 wait_event_timeout(dlm->dlm_reco_thread_wq,
371 dlm_is_node_dead(dlm, node), 375 dlm_is_node_dead(dlm, node),
372 msecs_to_jiffies(timeout)); 376 msecs_to_jiffies(timeout));
373 } else { 377 else
374 mlog(ML_NOTICE, "%s: waiting indefinitely for notification "
375 "of death of node %u\n", dlm->name, node);
376 wait_event(dlm->dlm_reco_thread_wq, 378 wait_event(dlm->dlm_reco_thread_wq,
377 dlm_is_node_dead(dlm, node)); 379 dlm_is_node_dead(dlm, node));
378 }
379 /* for now, return 0 */
380 return 0;
381} 380}
382 381
383int dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout) 382void dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout)
384{ 383{
385 if (timeout) { 384 if (dlm_is_node_recovered(dlm, node))
386 mlog(0, "%s: waiting %dms for notification of " 385 return;
387 "recovery of node %u\n", dlm->name, timeout, node); 386
387 printk(KERN_NOTICE "o2dlm: Waiting on the recovery of node %u in "
388 "domain %s\n", node, dlm->name);
389
390 if (timeout)
388 wait_event_timeout(dlm->dlm_reco_thread_wq, 391 wait_event_timeout(dlm->dlm_reco_thread_wq,
389 dlm_is_node_recovered(dlm, node), 392 dlm_is_node_recovered(dlm, node),
390 msecs_to_jiffies(timeout)); 393 msecs_to_jiffies(timeout));
391 } else { 394 else
392 mlog(0, "%s: waiting indefinitely for notification "
393 "of recovery of node %u\n", dlm->name, node);
394 wait_event(dlm->dlm_reco_thread_wq, 395 wait_event(dlm->dlm_reco_thread_wq,
395 dlm_is_node_recovered(dlm, node)); 396 dlm_is_node_recovered(dlm, node));
396 }
397 /* for now, return 0 */
398 return 0;
399} 397}
400 398
401/* callers of the top-level api calls (dlmlock/dlmunlock) should 399/* callers of the top-level api calls (dlmlock/dlmunlock) should
@@ -430,6 +428,8 @@ static void dlm_begin_recovery(struct dlm_ctxt *dlm)
430{ 428{
431 spin_lock(&dlm->spinlock); 429 spin_lock(&dlm->spinlock);
432 BUG_ON(dlm->reco.state & DLM_RECO_STATE_ACTIVE); 430 BUG_ON(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
431 printk(KERN_NOTICE "o2dlm: Begin recovery on domain %s for node %u\n",
432 dlm->name, dlm->reco.dead_node);
433 dlm->reco.state |= DLM_RECO_STATE_ACTIVE; 433 dlm->reco.state |= DLM_RECO_STATE_ACTIVE;
434 spin_unlock(&dlm->spinlock); 434 spin_unlock(&dlm->spinlock);
435} 435}
@@ -440,9 +440,18 @@ static void dlm_end_recovery(struct dlm_ctxt *dlm)
440 BUG_ON(!(dlm->reco.state & DLM_RECO_STATE_ACTIVE)); 440 BUG_ON(!(dlm->reco.state & DLM_RECO_STATE_ACTIVE));
441 dlm->reco.state &= ~DLM_RECO_STATE_ACTIVE; 441 dlm->reco.state &= ~DLM_RECO_STATE_ACTIVE;
442 spin_unlock(&dlm->spinlock); 442 spin_unlock(&dlm->spinlock);
443 printk(KERN_NOTICE "o2dlm: End recovery on domain %s\n", dlm->name);
443 wake_up(&dlm->reco.event); 444 wake_up(&dlm->reco.event);
444} 445}
445 446
447static void dlm_print_recovery_master(struct dlm_ctxt *dlm)
448{
449 printk(KERN_NOTICE "o2dlm: Node %u (%s) is the Recovery Master for the "
450 "dead node %u in domain %s\n", dlm->reco.new_master,
451 (dlm->node_num == dlm->reco.new_master ? "me" : "he"),
452 dlm->reco.dead_node, dlm->name);
453}
454
446static int dlm_do_recovery(struct dlm_ctxt *dlm) 455static int dlm_do_recovery(struct dlm_ctxt *dlm)
447{ 456{
448 int status = 0; 457 int status = 0;
@@ -505,9 +514,8 @@ static int dlm_do_recovery(struct dlm_ctxt *dlm)
505 } 514 }
506 mlog(0, "another node will master this recovery session.\n"); 515 mlog(0, "another node will master this recovery session.\n");
507 } 516 }
508 mlog(0, "dlm=%s (%d), new_master=%u, this node=%u, dead_node=%u\n", 517
509 dlm->name, task_pid_nr(dlm->dlm_reco_thread_task), dlm->reco.new_master, 518 dlm_print_recovery_master(dlm);
510 dlm->node_num, dlm->reco.dead_node);
511 519
512 /* it is safe to start everything back up here 520 /* it is safe to start everything back up here
513 * because all of the dead node's lock resources 521 * because all of the dead node's lock resources
@@ -518,15 +526,13 @@ static int dlm_do_recovery(struct dlm_ctxt *dlm)
518 return 0; 526 return 0;
519 527
520master_here: 528master_here:
521 mlog(ML_NOTICE, "(%d) Node %u is the Recovery Master for the Dead Node " 529 dlm_print_recovery_master(dlm);
522 "%u for Domain %s\n", task_pid_nr(dlm->dlm_reco_thread_task),
523 dlm->node_num, dlm->reco.dead_node, dlm->name);
524 530
525 status = dlm_remaster_locks(dlm, dlm->reco.dead_node); 531 status = dlm_remaster_locks(dlm, dlm->reco.dead_node);
526 if (status < 0) { 532 if (status < 0) {
527 /* we should never hit this anymore */ 533 /* we should never hit this anymore */
528 mlog(ML_ERROR, "error %d remastering locks for node %u, " 534 mlog(ML_ERROR, "%s: Error %d remastering locks for node %u, "
529 "retrying.\n", status, dlm->reco.dead_node); 535 "retrying.\n", dlm->name, status, dlm->reco.dead_node);
530 /* yield a bit to allow any final network messages 536 /* yield a bit to allow any final network messages
531 * to get handled on remaining nodes */ 537 * to get handled on remaining nodes */
532 msleep(100); 538 msleep(100);
@@ -567,7 +573,7 @@ static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
567 BUG_ON(ndata->state != DLM_RECO_NODE_DATA_INIT); 573 BUG_ON(ndata->state != DLM_RECO_NODE_DATA_INIT);
568 ndata->state = DLM_RECO_NODE_DATA_REQUESTING; 574 ndata->state = DLM_RECO_NODE_DATA_REQUESTING;
569 575
570 mlog(0, "requesting lock info from node %u\n", 576 mlog(0, "%s: Requesting lock info from node %u\n", dlm->name,
571 ndata->node_num); 577 ndata->node_num);
572 578
573 if (ndata->node_num == dlm->node_num) { 579 if (ndata->node_num == dlm->node_num) {
@@ -640,7 +646,7 @@ static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
640 spin_unlock(&dlm_reco_state_lock); 646 spin_unlock(&dlm_reco_state_lock);
641 } 647 }
642 648
643 mlog(0, "done requesting all lock info\n"); 649 mlog(0, "%s: Done requesting all lock info\n", dlm->name);
644 650
645 /* nodes should be sending reco data now 651 /* nodes should be sending reco data now
646 * just need to wait */ 652 * just need to wait */
@@ -802,10 +808,9 @@ static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from,
802 808
803 /* negative status is handled by caller */ 809 /* negative status is handled by caller */
804 if (ret < 0) 810 if (ret < 0)
805 mlog(ML_ERROR, "Error %d when sending message %u (key " 811 mlog(ML_ERROR, "%s: Error %d send LOCK_REQUEST to node %u "
806 "0x%x) to node %u\n", ret, DLM_LOCK_REQUEST_MSG, 812 "to recover dead node %u\n", dlm->name, ret,
807 dlm->key, request_from); 813 request_from, dead_node);
808
809 // return from here, then 814 // return from here, then
810 // sleep until all received or error 815 // sleep until all received or error
811 return ret; 816 return ret;
@@ -956,9 +961,9 @@ static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, u8 dead_node, u8 send_to)
956 ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg, 961 ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg,
957 sizeof(done_msg), send_to, &tmpret); 962 sizeof(done_msg), send_to, &tmpret);
958 if (ret < 0) { 963 if (ret < 0) {
959 mlog(ML_ERROR, "Error %d when sending message %u (key " 964 mlog(ML_ERROR, "%s: Error %d send RECO_DATA_DONE to node %u "
960 "0x%x) to node %u\n", ret, DLM_RECO_DATA_DONE_MSG, 965 "to recover dead node %u\n", dlm->name, ret, send_to,
961 dlm->key, send_to); 966 dead_node);
962 if (!dlm_is_host_down(ret)) { 967 if (!dlm_is_host_down(ret)) {
963 BUG(); 968 BUG();
964 } 969 }
@@ -1127,9 +1132,11 @@ static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
1127 if (ret < 0) { 1132 if (ret < 0) {
1128 /* XXX: negative status is not handled. 1133 /* XXX: negative status is not handled.
1129 * this will end up killing this node. */ 1134 * this will end up killing this node. */
1130 mlog(ML_ERROR, "Error %d when sending message %u (key " 1135 mlog(ML_ERROR, "%s: res %.*s, Error %d send MIG_LOCKRES to "
1131 "0x%x) to node %u\n", ret, DLM_MIG_LOCKRES_MSG, 1136 "node %u (%s)\n", dlm->name, mres->lockname_len,
1132 dlm->key, send_to); 1137 mres->lockname, ret, send_to,
1138 (orig_flags & DLM_MRES_MIGRATION ?
1139 "migration" : "recovery"));
1133 } else { 1140 } else {
1134 /* might get an -ENOMEM back here */ 1141 /* might get an -ENOMEM back here */
1135 ret = status; 1142 ret = status;
@@ -1767,7 +1774,7 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
1767 dlm->name, mres->lockname_len, mres->lockname, 1774 dlm->name, mres->lockname_len, mres->lockname,
1768 from); 1775 from);
1769 spin_lock(&res->spinlock); 1776 spin_lock(&res->spinlock);
1770 dlm_lockres_set_refmap_bit(from, res); 1777 dlm_lockres_set_refmap_bit(dlm, res, from);
1771 spin_unlock(&res->spinlock); 1778 spin_unlock(&res->spinlock);
1772 added++; 1779 added++;
1773 break; 1780 break;
@@ -1965,7 +1972,7 @@ skip_lvb:
1965 mlog(0, "%s:%.*s: added lock for node %u, " 1972 mlog(0, "%s:%.*s: added lock for node %u, "
1966 "setting refmap bit\n", dlm->name, 1973 "setting refmap bit\n", dlm->name,
1967 res->lockname.len, res->lockname.name, ml->node); 1974 res->lockname.len, res->lockname.name, ml->node);
1968 dlm_lockres_set_refmap_bit(ml->node, res); 1975 dlm_lockres_set_refmap_bit(dlm, res, ml->node);
1969 added++; 1976 added++;
1970 } 1977 }
1971 spin_unlock(&res->spinlock); 1978 spin_unlock(&res->spinlock);
@@ -2084,6 +2091,9 @@ static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
2084 2091
2085 list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) { 2092 list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) {
2086 if (res->owner == dead_node) { 2093 if (res->owner == dead_node) {
2094 mlog(0, "%s: res %.*s, Changing owner from %u to %u\n",
2095 dlm->name, res->lockname.len, res->lockname.name,
2096 res->owner, new_master);
2087 list_del_init(&res->recovering); 2097 list_del_init(&res->recovering);
2088 spin_lock(&res->spinlock); 2098 spin_lock(&res->spinlock);
2089 /* new_master has our reference from 2099 /* new_master has our reference from
@@ -2105,40 +2115,30 @@ static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
2105 for (i = 0; i < DLM_HASH_BUCKETS; i++) { 2115 for (i = 0; i < DLM_HASH_BUCKETS; i++) {
2106 bucket = dlm_lockres_hash(dlm, i); 2116 bucket = dlm_lockres_hash(dlm, i);
2107 hlist_for_each_entry(res, hash_iter, bucket, hash_node) { 2117 hlist_for_each_entry(res, hash_iter, bucket, hash_node) {
2108 if (res->state & DLM_LOCK_RES_RECOVERING) { 2118 if (!(res->state & DLM_LOCK_RES_RECOVERING))
2109 if (res->owner == dead_node) { 2119 continue;
2110 mlog(0, "(this=%u) res %.*s owner=%u "
2111 "was not on recovering list, but "
2112 "clearing state anyway\n",
2113 dlm->node_num, res->lockname.len,
2114 res->lockname.name, new_master);
2115 } else if (res->owner == dlm->node_num) {
2116 mlog(0, "(this=%u) res %.*s owner=%u "
2117 "was not on recovering list, "
2118 "owner is THIS node, clearing\n",
2119 dlm->node_num, res->lockname.len,
2120 res->lockname.name, new_master);
2121 } else
2122 continue;
2123 2120
2124 if (!list_empty(&res->recovering)) { 2121 if (res->owner != dead_node &&
2125 mlog(0, "%s:%.*s: lockres was " 2122 res->owner != dlm->node_num)
2126 "marked RECOVERING, owner=%u\n", 2123 continue;
2127 dlm->name, res->lockname.len, 2124
2128 res->lockname.name, res->owner); 2125 if (!list_empty(&res->recovering)) {
2129 list_del_init(&res->recovering); 2126 list_del_init(&res->recovering);
2130 dlm_lockres_put(res); 2127 dlm_lockres_put(res);
2131 }
2132 spin_lock(&res->spinlock);
2133 /* new_master has our reference from
2134 * the lock state sent during recovery */
2135 dlm_change_lockres_owner(dlm, res, new_master);
2136 res->state &= ~DLM_LOCK_RES_RECOVERING;
2137 if (__dlm_lockres_has_locks(res))
2138 __dlm_dirty_lockres(dlm, res);
2139 spin_unlock(&res->spinlock);
2140 wake_up(&res->wq);
2141 } 2128 }
2129
2130 /* new_master has our reference from
2131 * the lock state sent during recovery */
2132 mlog(0, "%s: res %.*s, Changing owner from %u to %u\n",
2133 dlm->name, res->lockname.len, res->lockname.name,
2134 res->owner, new_master);
2135 spin_lock(&res->spinlock);
2136 dlm_change_lockres_owner(dlm, res, new_master);
2137 res->state &= ~DLM_LOCK_RES_RECOVERING;
2138 if (__dlm_lockres_has_locks(res))
2139 __dlm_dirty_lockres(dlm, res);
2140 spin_unlock(&res->spinlock);
2141 wake_up(&res->wq);
2142 } 2142 }
2143 } 2143 }
2144} 2144}
@@ -2252,12 +2252,12 @@ static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
2252 res->lockname.len, res->lockname.name, freed, dead_node); 2252 res->lockname.len, res->lockname.name, freed, dead_node);
2253 __dlm_print_one_lock_resource(res); 2253 __dlm_print_one_lock_resource(res);
2254 } 2254 }
2255 dlm_lockres_clear_refmap_bit(dead_node, res); 2255 dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
2256 } else if (test_bit(dead_node, res->refmap)) { 2256 } else if (test_bit(dead_node, res->refmap)) {
2257 mlog(0, "%s:%.*s: dead node %u had a ref, but had " 2257 mlog(0, "%s:%.*s: dead node %u had a ref, but had "
2258 "no locks and had not purged before dying\n", dlm->name, 2258 "no locks and had not purged before dying\n", dlm->name,
2259 res->lockname.len, res->lockname.name, dead_node); 2259 res->lockname.len, res->lockname.name, dead_node);
2260 dlm_lockres_clear_refmap_bit(dead_node, res); 2260 dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
2261 } 2261 }
2262 2262
2263 /* do not kick thread yet */ 2263 /* do not kick thread yet */
@@ -2324,9 +2324,9 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
2324 dlm_revalidate_lvb(dlm, res, dead_node); 2324 dlm_revalidate_lvb(dlm, res, dead_node);
2325 if (res->owner == dead_node) { 2325 if (res->owner == dead_node) {
2326 if (res->state & DLM_LOCK_RES_DROPPING_REF) { 2326 if (res->state & DLM_LOCK_RES_DROPPING_REF) {
2327 mlog(ML_NOTICE, "Ignore %.*s for " 2327 mlog(ML_NOTICE, "%s: res %.*s, Skip "
2328 "recovery as it is being freed\n", 2328 "recovery as it is being freed\n",
2329 res->lockname.len, 2329 dlm->name, res->lockname.len,
2330 res->lockname.name); 2330 res->lockname.name);
2331 } else 2331 } else
2332 dlm_move_lockres_to_recovery_list(dlm, 2332 dlm_move_lockres_to_recovery_list(dlm,
diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c
index 1d6d1d22c47..e73c833fc2a 100644
--- a/fs/ocfs2/dlm/dlmthread.c
+++ b/fs/ocfs2/dlm/dlmthread.c
@@ -94,24 +94,26 @@ int __dlm_lockres_unused(struct dlm_lock_resource *res)
94{ 94{
95 int bit; 95 int bit;
96 96
97 assert_spin_locked(&res->spinlock);
98
97 if (__dlm_lockres_has_locks(res)) 99 if (__dlm_lockres_has_locks(res))
98 return 0; 100 return 0;
99 101
102 /* Locks are in the process of being created */
103 if (res->inflight_locks)
104 return 0;
105
100 if (!list_empty(&res->dirty) || res->state & DLM_LOCK_RES_DIRTY) 106 if (!list_empty(&res->dirty) || res->state & DLM_LOCK_RES_DIRTY)
101 return 0; 107 return 0;
102 108
103 if (res->state & DLM_LOCK_RES_RECOVERING) 109 if (res->state & DLM_LOCK_RES_RECOVERING)
104 return 0; 110 return 0;
105 111
112 /* Another node has this resource with this node as the master */
106 bit = find_next_bit(res->refmap, O2NM_MAX_NODES, 0); 113 bit = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
107 if (bit < O2NM_MAX_NODES) 114 if (bit < O2NM_MAX_NODES)
108 return 0; 115 return 0;
109 116
110 /*
111 * since the bit for dlm->node_num is not set, inflight_locks better
112 * be zero
113 */
114 BUG_ON(res->inflight_locks != 0);
115 return 1; 117 return 1;
116} 118}
117 119
@@ -185,8 +187,6 @@ static void dlm_purge_lockres(struct dlm_ctxt *dlm,
185 /* clear our bit from the master's refmap, ignore errors */ 187 /* clear our bit from the master's refmap, ignore errors */
186 ret = dlm_drop_lockres_ref(dlm, res); 188 ret = dlm_drop_lockres_ref(dlm, res);
187 if (ret < 0) { 189 if (ret < 0) {
188 mlog(ML_ERROR, "%s: deref %.*s failed %d\n", dlm->name,
189 res->lockname.len, res->lockname.name, ret);
190 if (!dlm_is_host_down(ret)) 190 if (!dlm_is_host_down(ret))
191 BUG(); 191 BUG();
192 } 192 }
@@ -209,7 +209,7 @@ static void dlm_purge_lockres(struct dlm_ctxt *dlm,
209 BUG(); 209 BUG();
210 } 210 }
211 211
212 __dlm_unhash_lockres(res); 212 __dlm_unhash_lockres(dlm, res);
213 213
214 /* lockres is not in the hash now. drop the flag and wake up 214 /* lockres is not in the hash now. drop the flag and wake up
215 * any processes waiting in dlm_get_lock_resource. */ 215 * any processes waiting in dlm_get_lock_resource. */
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index e1ed5e502ff..81a4cd22f80 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -1692,7 +1692,7 @@ int ocfs2_open_lock(struct inode *inode)
1692 mlog(0, "inode %llu take PRMODE open lock\n", 1692 mlog(0, "inode %llu take PRMODE open lock\n",
1693 (unsigned long long)OCFS2_I(inode)->ip_blkno); 1693 (unsigned long long)OCFS2_I(inode)->ip_blkno);
1694 1694
1695 if (ocfs2_mount_local(osb)) 1695 if (ocfs2_is_hard_readonly(osb) || ocfs2_mount_local(osb))
1696 goto out; 1696 goto out;
1697 1697
1698 lockres = &OCFS2_I(inode)->ip_open_lockres; 1698 lockres = &OCFS2_I(inode)->ip_open_lockres;
@@ -1718,6 +1718,12 @@ int ocfs2_try_open_lock(struct inode *inode, int write)
1718 (unsigned long long)OCFS2_I(inode)->ip_blkno, 1718 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1719 write ? "EXMODE" : "PRMODE"); 1719 write ? "EXMODE" : "PRMODE");
1720 1720
1721 if (ocfs2_is_hard_readonly(osb)) {
1722 if (write)
1723 status = -EROFS;
1724 goto out;
1725 }
1726
1721 if (ocfs2_mount_local(osb)) 1727 if (ocfs2_mount_local(osb))
1722 goto out; 1728 goto out;
1723 1729
@@ -2298,7 +2304,7 @@ int ocfs2_inode_lock_full_nested(struct inode *inode,
2298 if (ocfs2_is_hard_readonly(osb)) { 2304 if (ocfs2_is_hard_readonly(osb)) {
2299 if (ex) 2305 if (ex)
2300 status = -EROFS; 2306 status = -EROFS;
2301 goto bail; 2307 goto getbh;
2302 } 2308 }
2303 2309
2304 if (ocfs2_mount_local(osb)) 2310 if (ocfs2_mount_local(osb))
@@ -2356,7 +2362,7 @@ local:
2356 mlog_errno(status); 2362 mlog_errno(status);
2357 goto bail; 2363 goto bail;
2358 } 2364 }
2359 2365getbh:
2360 if (ret_bh) { 2366 if (ret_bh) {
2361 status = ocfs2_assign_bh(inode, ret_bh, local_bh); 2367 status = ocfs2_assign_bh(inode, ret_bh, local_bh);
2362 if (status < 0) { 2368 if (status < 0) {
@@ -2628,8 +2634,11 @@ int ocfs2_dentry_lock(struct dentry *dentry, int ex)
2628 2634
2629 BUG_ON(!dl); 2635 BUG_ON(!dl);
2630 2636
2631 if (ocfs2_is_hard_readonly(osb)) 2637 if (ocfs2_is_hard_readonly(osb)) {
2632 return -EROFS; 2638 if (ex)
2639 return -EROFS;
2640 return 0;
2641 }
2633 2642
2634 if (ocfs2_mount_local(osb)) 2643 if (ocfs2_mount_local(osb))
2635 return 0; 2644 return 0;
@@ -2647,7 +2656,7 @@ void ocfs2_dentry_unlock(struct dentry *dentry, int ex)
2647 struct ocfs2_dentry_lock *dl = dentry->d_fsdata; 2656 struct ocfs2_dentry_lock *dl = dentry->d_fsdata;
2648 struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb); 2657 struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
2649 2658
2650 if (!ocfs2_mount_local(osb)) 2659 if (!ocfs2_is_hard_readonly(osb) && !ocfs2_mount_local(osb))
2651 ocfs2_cluster_unlock(osb, &dl->dl_lockres, level); 2660 ocfs2_cluster_unlock(osb, &dl->dl_lockres, level);
2652} 2661}
2653 2662
diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c
index 23457b491e8..2f5b92ef0e5 100644
--- a/fs/ocfs2/extent_map.c
+++ b/fs/ocfs2/extent_map.c
@@ -832,6 +832,102 @@ out:
832 return ret; 832 return ret;
833} 833}
834 834
835int ocfs2_seek_data_hole_offset(struct file *file, loff_t *offset, int origin)
836{
837 struct inode *inode = file->f_mapping->host;
838 int ret;
839 unsigned int is_last = 0, is_data = 0;
840 u16 cs_bits = OCFS2_SB(inode->i_sb)->s_clustersize_bits;
841 u32 cpos, cend, clen, hole_size;
842 u64 extoff, extlen;
843 struct buffer_head *di_bh = NULL;
844 struct ocfs2_extent_rec rec;
845
846 BUG_ON(origin != SEEK_DATA && origin != SEEK_HOLE);
847
848 ret = ocfs2_inode_lock(inode, &di_bh, 0);
849 if (ret) {
850 mlog_errno(ret);
851 goto out;
852 }
853
854 down_read(&OCFS2_I(inode)->ip_alloc_sem);
855
856 if (*offset >= inode->i_size) {
857 ret = -ENXIO;
858 goto out_unlock;
859 }
860
861 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
862 if (origin == SEEK_HOLE)
863 *offset = inode->i_size;
864 goto out_unlock;
865 }
866
867 clen = 0;
868 cpos = *offset >> cs_bits;
869 cend = ocfs2_clusters_for_bytes(inode->i_sb, inode->i_size);
870
871 while (cpos < cend && !is_last) {
872 ret = ocfs2_get_clusters_nocache(inode, di_bh, cpos, &hole_size,
873 &rec, &is_last);
874 if (ret) {
875 mlog_errno(ret);
876 goto out_unlock;
877 }
878
879 extoff = cpos;
880 extoff <<= cs_bits;
881
882 if (rec.e_blkno == 0ULL) {
883 clen = hole_size;
884 is_data = 0;
885 } else {
886 clen = le16_to_cpu(rec.e_leaf_clusters) -
887 (cpos - le32_to_cpu(rec.e_cpos));
888 is_data = (rec.e_flags & OCFS2_EXT_UNWRITTEN) ? 0 : 1;
889 }
890
891 if ((!is_data && origin == SEEK_HOLE) ||
892 (is_data && origin == SEEK_DATA)) {
893 if (extoff > *offset)
894 *offset = extoff;
895 goto out_unlock;
896 }
897
898 if (!is_last)
899 cpos += clen;
900 }
901
902 if (origin == SEEK_HOLE) {
903 extoff = cpos;
904 extoff <<= cs_bits;
905 extlen = clen;
906 extlen <<= cs_bits;
907
908 if ((extoff + extlen) > inode->i_size)
909 extlen = inode->i_size - extoff;
910 extoff += extlen;
911 if (extoff > *offset)
912 *offset = extoff;
913 goto out_unlock;
914 }
915
916 ret = -ENXIO;
917
918out_unlock:
919
920 brelse(di_bh);
921
922 up_read(&OCFS2_I(inode)->ip_alloc_sem);
923
924 ocfs2_inode_unlock(inode, 0);
925out:
926 if (ret && ret != -ENXIO)
927 ret = -ENXIO;
928 return ret;
929}
930
835int ocfs2_read_virt_blocks(struct inode *inode, u64 v_block, int nr, 931int ocfs2_read_virt_blocks(struct inode *inode, u64 v_block, int nr,
836 struct buffer_head *bhs[], int flags, 932 struct buffer_head *bhs[], int flags,
837 int (*validate)(struct super_block *sb, 933 int (*validate)(struct super_block *sb,
diff --git a/fs/ocfs2/extent_map.h b/fs/ocfs2/extent_map.h
index e79d41c2c90..67ea57d2fd5 100644
--- a/fs/ocfs2/extent_map.h
+++ b/fs/ocfs2/extent_map.h
@@ -53,6 +53,8 @@ int ocfs2_extent_map_get_blocks(struct inode *inode, u64 v_blkno, u64 *p_blkno,
53int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 53int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
54 u64 map_start, u64 map_len); 54 u64 map_start, u64 map_len);
55 55
56int ocfs2_seek_data_hole_offset(struct file *file, loff_t *offset, int origin);
57
56int ocfs2_xattr_get_clusters(struct inode *inode, u32 v_cluster, 58int ocfs2_xattr_get_clusters(struct inode *inode, u32 v_cluster,
57 u32 *p_cluster, u32 *num_clusters, 59 u32 *p_cluster, u32 *num_clusters,
58 struct ocfs2_extent_list *el, 60 struct ocfs2_extent_list *el,
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index de4ea1af041..6e396683c3d 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -1950,6 +1950,9 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
1950 if (ret < 0) 1950 if (ret < 0)
1951 mlog_errno(ret); 1951 mlog_errno(ret);
1952 1952
1953 if (file->f_flags & O_SYNC)
1954 handle->h_sync = 1;
1955
1953 ocfs2_commit_trans(osb, handle); 1956 ocfs2_commit_trans(osb, handle);
1954 1957
1955out_inode_unlock: 1958out_inode_unlock:
@@ -2052,6 +2055,23 @@ out:
2052 return ret; 2055 return ret;
2053} 2056}
2054 2057
2058static void ocfs2_aiodio_wait(struct inode *inode)
2059{
2060 wait_queue_head_t *wq = ocfs2_ioend_wq(inode);
2061
2062 wait_event(*wq, (atomic_read(&OCFS2_I(inode)->ip_unaligned_aio) == 0));
2063}
2064
2065static int ocfs2_is_io_unaligned(struct inode *inode, size_t count, loff_t pos)
2066{
2067 int blockmask = inode->i_sb->s_blocksize - 1;
2068 loff_t final_size = pos + count;
2069
2070 if ((pos & blockmask) || (final_size & blockmask))
2071 return 1;
2072 return 0;
2073}
2074
2055static int ocfs2_prepare_inode_for_refcount(struct inode *inode, 2075static int ocfs2_prepare_inode_for_refcount(struct inode *inode,
2056 struct file *file, 2076 struct file *file,
2057 loff_t pos, size_t count, 2077 loff_t pos, size_t count,
@@ -2230,6 +2250,7 @@ static ssize_t ocfs2_file_aio_write(struct kiocb *iocb,
2230 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 2250 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2231 int full_coherency = !(osb->s_mount_opt & 2251 int full_coherency = !(osb->s_mount_opt &
2232 OCFS2_MOUNT_COHERENCY_BUFFERED); 2252 OCFS2_MOUNT_COHERENCY_BUFFERED);
2253 int unaligned_dio = 0;
2233 2254
2234 trace_ocfs2_file_aio_write(inode, file, file->f_path.dentry, 2255 trace_ocfs2_file_aio_write(inode, file, file->f_path.dentry,
2235 (unsigned long long)OCFS2_I(inode)->ip_blkno, 2256 (unsigned long long)OCFS2_I(inode)->ip_blkno,
@@ -2297,6 +2318,10 @@ relock:
2297 goto out; 2318 goto out;
2298 } 2319 }
2299 2320
2321 if (direct_io && !is_sync_kiocb(iocb))
2322 unaligned_dio = ocfs2_is_io_unaligned(inode, iocb->ki_left,
2323 *ppos);
2324
2300 /* 2325 /*
2301 * We can't complete the direct I/O as requested, fall back to 2326 * We can't complete the direct I/O as requested, fall back to
2302 * buffered I/O. 2327 * buffered I/O.
@@ -2311,6 +2336,18 @@ relock:
2311 goto relock; 2336 goto relock;
2312 } 2337 }
2313 2338
2339 if (unaligned_dio) {
2340 /*
2341 * Wait on previous unaligned aio to complete before
2342 * proceeding.
2343 */
2344 ocfs2_aiodio_wait(inode);
2345
2346 /* Mark the iocb as needing a decrement in ocfs2_dio_end_io */
2347 atomic_inc(&OCFS2_I(inode)->ip_unaligned_aio);
2348 ocfs2_iocb_set_unaligned_aio(iocb);
2349 }
2350
2314 /* 2351 /*
2315 * To later detect whether a journal commit for sync writes is 2352 * To later detect whether a journal commit for sync writes is
2316 * necessary, we sample i_size, and cluster count here. 2353 * necessary, we sample i_size, and cluster count here.
@@ -2382,8 +2419,12 @@ out_dio:
2382 if ((ret == -EIOCBQUEUED) || (!ocfs2_iocb_is_rw_locked(iocb))) { 2419 if ((ret == -EIOCBQUEUED) || (!ocfs2_iocb_is_rw_locked(iocb))) {
2383 rw_level = -1; 2420 rw_level = -1;
2384 have_alloc_sem = 0; 2421 have_alloc_sem = 0;
2422 unaligned_dio = 0;
2385 } 2423 }
2386 2424
2425 if (unaligned_dio)
2426 atomic_dec(&OCFS2_I(inode)->ip_unaligned_aio);
2427
2387out: 2428out:
2388 if (rw_level != -1) 2429 if (rw_level != -1)
2389 ocfs2_rw_unlock(inode, rw_level); 2430 ocfs2_rw_unlock(inode, rw_level);
@@ -2591,6 +2632,57 @@ bail:
2591 return ret; 2632 return ret;
2592} 2633}
2593 2634
2635/* Refer generic_file_llseek_unlocked() */
2636static loff_t ocfs2_file_llseek(struct file *file, loff_t offset, int origin)
2637{
2638 struct inode *inode = file->f_mapping->host;
2639 int ret = 0;
2640
2641 mutex_lock(&inode->i_mutex);
2642
2643 switch (origin) {
2644 case SEEK_SET:
2645 break;
2646 case SEEK_END:
2647 offset += inode->i_size;
2648 break;
2649 case SEEK_CUR:
2650 if (offset == 0) {
2651 offset = file->f_pos;
2652 goto out;
2653 }
2654 offset += file->f_pos;
2655 break;
2656 case SEEK_DATA:
2657 case SEEK_HOLE:
2658 ret = ocfs2_seek_data_hole_offset(file, &offset, origin);
2659 if (ret)
2660 goto out;
2661 break;
2662 default:
2663 ret = -EINVAL;
2664 goto out;
2665 }
2666
2667 if (offset < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET))
2668 ret = -EINVAL;
2669 if (!ret && offset > inode->i_sb->s_maxbytes)
2670 ret = -EINVAL;
2671 if (ret)
2672 goto out;
2673
2674 if (offset != file->f_pos) {
2675 file->f_pos = offset;
2676 file->f_version = 0;
2677 }
2678
2679out:
2680 mutex_unlock(&inode->i_mutex);
2681 if (ret)
2682 return ret;
2683 return offset;
2684}
2685
2594const struct inode_operations ocfs2_file_iops = { 2686const struct inode_operations ocfs2_file_iops = {
2595 .setattr = ocfs2_setattr, 2687 .setattr = ocfs2_setattr,
2596 .getattr = ocfs2_getattr, 2688 .getattr = ocfs2_getattr,
@@ -2615,7 +2707,7 @@ const struct inode_operations ocfs2_special_file_iops = {
2615 * ocfs2_fops_no_plocks and ocfs2_dops_no_plocks! 2707 * ocfs2_fops_no_plocks and ocfs2_dops_no_plocks!
2616 */ 2708 */
2617const struct file_operations ocfs2_fops = { 2709const struct file_operations ocfs2_fops = {
2618 .llseek = generic_file_llseek, 2710 .llseek = ocfs2_file_llseek,
2619 .read = do_sync_read, 2711 .read = do_sync_read,
2620 .write = do_sync_write, 2712 .write = do_sync_write,
2621 .mmap = ocfs2_mmap, 2713 .mmap = ocfs2_mmap,
@@ -2663,7 +2755,7 @@ const struct file_operations ocfs2_dops = {
2663 * the cluster. 2755 * the cluster.
2664 */ 2756 */
2665const struct file_operations ocfs2_fops_no_plocks = { 2757const struct file_operations ocfs2_fops_no_plocks = {
2666 .llseek = generic_file_llseek, 2758 .llseek = ocfs2_file_llseek,
2667 .read = do_sync_read, 2759 .read = do_sync_read,
2668 .write = do_sync_write, 2760 .write = do_sync_write,
2669 .mmap = ocfs2_mmap, 2761 .mmap = ocfs2_mmap,
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index a22d2c09889..17454a904d7 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -951,7 +951,7 @@ static void ocfs2_cleanup_delete_inode(struct inode *inode,
951 trace_ocfs2_cleanup_delete_inode( 951 trace_ocfs2_cleanup_delete_inode(
952 (unsigned long long)OCFS2_I(inode)->ip_blkno, sync_data); 952 (unsigned long long)OCFS2_I(inode)->ip_blkno, sync_data);
953 if (sync_data) 953 if (sync_data)
954 write_inode_now(inode, 1); 954 filemap_write_and_wait(inode->i_mapping);
955 truncate_inode_pages(&inode->i_data, 0); 955 truncate_inode_pages(&inode->i_data, 0);
956} 956}
957 957
diff --git a/fs/ocfs2/inode.h b/fs/ocfs2/inode.h
index 1c508b149b3..88924a3133f 100644
--- a/fs/ocfs2/inode.h
+++ b/fs/ocfs2/inode.h
@@ -43,6 +43,9 @@ struct ocfs2_inode_info
43 /* protects extended attribute changes on this inode */ 43 /* protects extended attribute changes on this inode */
44 struct rw_semaphore ip_xattr_sem; 44 struct rw_semaphore ip_xattr_sem;
45 45
46 /* Number of outstanding AIO's which are not page aligned */
47 atomic_t ip_unaligned_aio;
48
46 /* These fields are protected by ip_lock */ 49 /* These fields are protected by ip_lock */
47 spinlock_t ip_lock; 50 spinlock_t ip_lock;
48 u32 ip_open_count; 51 u32 ip_open_count;
diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c
index bc91072b721..726ff265b29 100644
--- a/fs/ocfs2/ioctl.c
+++ b/fs/ocfs2/ioctl.c
@@ -122,7 +122,7 @@ static int ocfs2_set_inode_attr(struct inode *inode, unsigned flags,
122 if ((oldflags & OCFS2_IMMUTABLE_FL) || ((flags ^ oldflags) & 122 if ((oldflags & OCFS2_IMMUTABLE_FL) || ((flags ^ oldflags) &
123 (OCFS2_APPEND_FL | OCFS2_IMMUTABLE_FL))) { 123 (OCFS2_APPEND_FL | OCFS2_IMMUTABLE_FL))) {
124 if (!capable(CAP_LINUX_IMMUTABLE)) 124 if (!capable(CAP_LINUX_IMMUTABLE))
125 goto bail_unlock; 125 goto bail_commit;
126 } 126 }
127 127
128 ocfs2_inode->ip_attr = flags; 128 ocfs2_inode->ip_attr = flags;
@@ -132,6 +132,7 @@ static int ocfs2_set_inode_attr(struct inode *inode, unsigned flags,
132 if (status < 0) 132 if (status < 0)
133 mlog_errno(status); 133 mlog_errno(status);
134 134
135bail_commit:
135 ocfs2_commit_trans(osb, handle); 136 ocfs2_commit_trans(osb, handle);
136bail_unlock: 137bail_unlock:
137 ocfs2_inode_unlock(inode, 1); 138 ocfs2_inode_unlock(inode, 1);
@@ -381,7 +382,7 @@ int ocfs2_info_handle_freeinode(struct inode *inode,
381 if (!oifi) { 382 if (!oifi) {
382 status = -ENOMEM; 383 status = -ENOMEM;
383 mlog_errno(status); 384 mlog_errno(status);
384 goto bail; 385 goto out_err;
385 } 386 }
386 387
387 if (o2info_from_user(*oifi, req)) 388 if (o2info_from_user(*oifi, req))
@@ -431,7 +432,7 @@ bail:
431 o2info_set_request_error(&oifi->ifi_req, req); 432 o2info_set_request_error(&oifi->ifi_req, req);
432 433
433 kfree(oifi); 434 kfree(oifi);
434 435out_err:
435 return status; 436 return status;
436} 437}
437 438
@@ -666,7 +667,7 @@ int ocfs2_info_handle_freefrag(struct inode *inode,
666 if (!oiff) { 667 if (!oiff) {
667 status = -ENOMEM; 668 status = -ENOMEM;
668 mlog_errno(status); 669 mlog_errno(status);
669 goto bail; 670 goto out_err;
670 } 671 }
671 672
672 if (o2info_from_user(*oiff, req)) 673 if (o2info_from_user(*oiff, req))
@@ -716,7 +717,7 @@ bail:
716 o2info_set_request_error(&oiff->iff_req, req); 717 o2info_set_request_error(&oiff->iff_req, req);
717 718
718 kfree(oiff); 719 kfree(oiff);
719 720out_err:
720 return status; 721 return status;
721} 722}
722 723
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index 295d56454e8..0a42ae96dca 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -1544,9 +1544,9 @@ static int ocfs2_replay_journal(struct ocfs2_super *osb,
1544 /* we need to run complete recovery for offline orphan slots */ 1544 /* we need to run complete recovery for offline orphan slots */
1545 ocfs2_replay_map_set_state(osb, REPLAY_NEEDED); 1545 ocfs2_replay_map_set_state(osb, REPLAY_NEEDED);
1546 1546
1547 mlog(ML_NOTICE, "Recovering node %d from slot %d on device (%u,%u)\n", 1547 printk(KERN_NOTICE "ocfs2: Begin replay journal (node %d, slot %d) on "\
1548 node_num, slot_num, 1548 "device (%u,%u)\n", node_num, slot_num, MAJOR(osb->sb->s_dev),
1549 MAJOR(osb->sb->s_dev), MINOR(osb->sb->s_dev)); 1549 MINOR(osb->sb->s_dev));
1550 1550
1551 OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters); 1551 OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
1552 1552
@@ -1601,6 +1601,9 @@ static int ocfs2_replay_journal(struct ocfs2_super *osb,
1601 1601
1602 jbd2_journal_destroy(journal); 1602 jbd2_journal_destroy(journal);
1603 1603
1604 printk(KERN_NOTICE "ocfs2: End replay journal (node %d, slot %d) on "\
1605 "device (%u,%u)\n", node_num, slot_num, MAJOR(osb->sb->s_dev),
1606 MINOR(osb->sb->s_dev));
1604done: 1607done:
1605 /* drop the lock on this nodes journal */ 1608 /* drop the lock on this nodes journal */
1606 if (got_lock) 1609 if (got_lock)
@@ -1808,6 +1811,20 @@ static inline unsigned long ocfs2_orphan_scan_timeout(void)
1808 * every slot, queuing a recovery of the slot on the ocfs2_wq thread. This 1811 * every slot, queuing a recovery of the slot on the ocfs2_wq thread. This
1809 * is done to catch any orphans that are left over in orphan directories. 1812 * is done to catch any orphans that are left over in orphan directories.
1810 * 1813 *
1814 * It scans all slots, even ones that are in use. It does so to handle the
1815 * case described below:
1816 *
1817 * Node 1 has an inode it was using. The dentry went away due to memory
1818 * pressure. Node 1 closes the inode, but it's on the free list. The node
1819 * has the open lock.
1820 * Node 2 unlinks the inode. It grabs the dentry lock to notify others,
1821 * but node 1 has no dentry and doesn't get the message. It trylocks the
1822 * open lock, sees that another node has a PR, and does nothing.
1823 * Later node 2 runs its orphan dir. It igets the inode, trylocks the
1824 * open lock, sees the PR still, and does nothing.
1825 * Basically, we have to trigger an orphan iput on node 1. The only way
1826 * for this to happen is if node 1 runs node 2's orphan dir.
1827 *
1811 * ocfs2_queue_orphan_scan gets called every ORPHAN_SCAN_SCHEDULE_TIMEOUT 1828 * ocfs2_queue_orphan_scan gets called every ORPHAN_SCAN_SCHEDULE_TIMEOUT
1812 * seconds. It gets an EX lock on os_lockres and checks sequence number 1829 * seconds. It gets an EX lock on os_lockres and checks sequence number
1813 * stored in LVB. If the sequence number has changed, it means some other 1830 * stored in LVB. If the sequence number has changed, it means some other
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
index 68cf2f6d3c6..a3385b63ff5 100644
--- a/fs/ocfs2/journal.h
+++ b/fs/ocfs2/journal.h
@@ -441,10 +441,11 @@ static inline int ocfs2_mknod_credits(struct super_block *sb, int is_dir,
441#define OCFS2_SIMPLE_DIR_EXTEND_CREDITS (2) 441#define OCFS2_SIMPLE_DIR_EXTEND_CREDITS (2)
442 442
443/* file update (nlink, etc) + directory mtime/ctime + dir entry block + quota 443/* file update (nlink, etc) + directory mtime/ctime + dir entry block + quota
444 * update on dir + index leaf + dx root update for free list */ 444 * update on dir + index leaf + dx root update for free list +
445 * previous dirblock update in the free list */
445static inline int ocfs2_link_credits(struct super_block *sb) 446static inline int ocfs2_link_credits(struct super_block *sb)
446{ 447{
447 return 2*OCFS2_INODE_UPDATE_CREDITS + 3 + 448 return 2*OCFS2_INODE_UPDATE_CREDITS + 4 +
448 ocfs2_quota_trans_credits(sb); 449 ocfs2_quota_trans_credits(sb);
449} 450}
450 451
diff --git a/fs/ocfs2/mmap.c b/fs/ocfs2/mmap.c
index 3e9393ca39e..9cd41083e99 100644
--- a/fs/ocfs2/mmap.c
+++ b/fs/ocfs2/mmap.c
@@ -61,7 +61,7 @@ static int ocfs2_fault(struct vm_area_struct *area, struct vm_fault *vmf)
61static int __ocfs2_page_mkwrite(struct file *file, struct buffer_head *di_bh, 61static int __ocfs2_page_mkwrite(struct file *file, struct buffer_head *di_bh,
62 struct page *page) 62 struct page *page)
63{ 63{
64 int ret; 64 int ret = VM_FAULT_NOPAGE;
65 struct inode *inode = file->f_path.dentry->d_inode; 65 struct inode *inode = file->f_path.dentry->d_inode;
66 struct address_space *mapping = inode->i_mapping; 66 struct address_space *mapping = inode->i_mapping;
67 loff_t pos = page_offset(page); 67 loff_t pos = page_offset(page);
@@ -71,32 +71,25 @@ static int __ocfs2_page_mkwrite(struct file *file, struct buffer_head *di_bh,
71 void *fsdata; 71 void *fsdata;
72 loff_t size = i_size_read(inode); 72 loff_t size = i_size_read(inode);
73 73
74 /*
75 * Another node might have truncated while we were waiting on
76 * cluster locks.
77 * We don't check size == 0 before the shift. This is borrowed
78 * from do_generic_file_read.
79 */
80 last_index = (size - 1) >> PAGE_CACHE_SHIFT; 74 last_index = (size - 1) >> PAGE_CACHE_SHIFT;
81 if (unlikely(!size || page->index > last_index)) {
82 ret = -EINVAL;
83 goto out;
84 }
85 75
86 /* 76 /*
87 * The i_size check above doesn't catch the case where nodes 77 * There are cases that lead to the page no longer bebongs to the
88 * truncated and then re-extended the file. We'll re-check the 78 * mapping.
89 * page mapping after taking the page lock inside of 79 * 1) pagecache truncates locally due to memory pressure.
90 * ocfs2_write_begin_nolock(). 80 * 2) pagecache truncates when another is taking EX lock against
81 * inode lock. see ocfs2_data_convert_worker.
82 *
83 * The i_size check doesn't catch the case where nodes truncated and
84 * then re-extended the file. We'll re-check the page mapping after
85 * taking the page lock inside of ocfs2_write_begin_nolock().
86 *
87 * Let VM retry with these cases.
91 */ 88 */
92 if (!PageUptodate(page) || page->mapping != inode->i_mapping) { 89 if ((page->mapping != inode->i_mapping) ||
93 /* 90 (!PageUptodate(page)) ||
94 * the page has been umapped in ocfs2_data_downconvert_worker. 91 (page_offset(page) >= size))
95 * So return 0 here and let VFS retry.
96 */
97 ret = 0;
98 goto out; 92 goto out;
99 }
100 93
101 /* 94 /*
102 * Call ocfs2_write_begin() and ocfs2_write_end() to take 95 * Call ocfs2_write_begin() and ocfs2_write_end() to take
@@ -116,17 +109,21 @@ static int __ocfs2_page_mkwrite(struct file *file, struct buffer_head *di_bh,
116 if (ret) { 109 if (ret) {
117 if (ret != -ENOSPC) 110 if (ret != -ENOSPC)
118 mlog_errno(ret); 111 mlog_errno(ret);
112 if (ret == -ENOMEM)
113 ret = VM_FAULT_OOM;
114 else
115 ret = VM_FAULT_SIGBUS;
119 goto out; 116 goto out;
120 } 117 }
121 118
122 ret = ocfs2_write_end_nolock(mapping, pos, len, len, locked_page, 119 if (!locked_page) {
123 fsdata); 120 ret = VM_FAULT_NOPAGE;
124 if (ret < 0) {
125 mlog_errno(ret);
126 goto out; 121 goto out;
127 } 122 }
123 ret = ocfs2_write_end_nolock(mapping, pos, len, len, locked_page,
124 fsdata);
128 BUG_ON(ret != len); 125 BUG_ON(ret != len);
129 ret = 0; 126 ret = VM_FAULT_LOCKED;
130out: 127out:
131 return ret; 128 return ret;
132} 129}
@@ -168,8 +165,6 @@ static int ocfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
168 165
169out: 166out:
170 ocfs2_unblock_signals(&oldset); 167 ocfs2_unblock_signals(&oldset);
171 if (ret)
172 ret = VM_FAULT_SIGBUS;
173 return ret; 168 return ret;
174} 169}
175 170
diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c
index d53cb706f14..184c76b8c29 100644
--- a/fs/ocfs2/move_extents.c
+++ b/fs/ocfs2/move_extents.c
@@ -745,7 +745,7 @@ static int ocfs2_move_extent(struct ocfs2_move_extents_context *context,
745 */ 745 */
746 ocfs2_probe_alloc_group(inode, gd_bh, &goal_bit, len, move_max_hop, 746 ocfs2_probe_alloc_group(inode, gd_bh, &goal_bit, len, move_max_hop,
747 new_phys_cpos); 747 new_phys_cpos);
748 if (!new_phys_cpos) { 748 if (!*new_phys_cpos) {
749 ret = -ENOSPC; 749 ret = -ENOSPC;
750 goto out_commit; 750 goto out_commit;
751 } 751 }
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 409285854f6..d355e6e36b3 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -836,18 +836,65 @@ static inline unsigned int ocfs2_clusters_to_megabytes(struct super_block *sb,
836 836
837static inline void _ocfs2_set_bit(unsigned int bit, unsigned long *bitmap) 837static inline void _ocfs2_set_bit(unsigned int bit, unsigned long *bitmap)
838{ 838{
839 __test_and_set_bit_le(bit, bitmap); 839 __set_bit_le(bit, bitmap);
840} 840}
841#define ocfs2_set_bit(bit, addr) _ocfs2_set_bit((bit), (unsigned long *)(addr)) 841#define ocfs2_set_bit(bit, addr) _ocfs2_set_bit((bit), (unsigned long *)(addr))
842 842
843static inline void _ocfs2_clear_bit(unsigned int bit, unsigned long *bitmap) 843static inline void _ocfs2_clear_bit(unsigned int bit, unsigned long *bitmap)
844{ 844{
845 __test_and_clear_bit_le(bit, bitmap); 845 __clear_bit_le(bit, bitmap);
846} 846}
847#define ocfs2_clear_bit(bit, addr) _ocfs2_clear_bit((bit), (unsigned long *)(addr)) 847#define ocfs2_clear_bit(bit, addr) _ocfs2_clear_bit((bit), (unsigned long *)(addr))
848 848
849#define ocfs2_test_bit test_bit_le 849#define ocfs2_test_bit test_bit_le
850#define ocfs2_find_next_zero_bit find_next_zero_bit_le 850#define ocfs2_find_next_zero_bit find_next_zero_bit_le
851#define ocfs2_find_next_bit find_next_bit_le 851#define ocfs2_find_next_bit find_next_bit_le
852
853static inline void *correct_addr_and_bit_unaligned(int *bit, void *addr)
854{
855#if BITS_PER_LONG == 64
856 *bit += ((unsigned long) addr & 7UL) << 3;
857 addr = (void *) ((unsigned long) addr & ~7UL);
858#elif BITS_PER_LONG == 32
859 *bit += ((unsigned long) addr & 3UL) << 3;
860 addr = (void *) ((unsigned long) addr & ~3UL);
861#else
862#error "how many bits you are?!"
863#endif
864 return addr;
865}
866
867static inline void ocfs2_set_bit_unaligned(int bit, void *bitmap)
868{
869 bitmap = correct_addr_and_bit_unaligned(&bit, bitmap);
870 ocfs2_set_bit(bit, bitmap);
871}
872
873static inline void ocfs2_clear_bit_unaligned(int bit, void *bitmap)
874{
875 bitmap = correct_addr_and_bit_unaligned(&bit, bitmap);
876 ocfs2_clear_bit(bit, bitmap);
877}
878
879static inline int ocfs2_test_bit_unaligned(int bit, void *bitmap)
880{
881 bitmap = correct_addr_and_bit_unaligned(&bit, bitmap);
882 return ocfs2_test_bit(bit, bitmap);
883}
884
885static inline int ocfs2_find_next_zero_bit_unaligned(void *bitmap, int max,
886 int start)
887{
888 int fix = 0, ret, tmpmax;
889 bitmap = correct_addr_and_bit_unaligned(&fix, bitmap);
890 tmpmax = max + fix;
891 start += fix;
892
893 ret = ocfs2_find_next_zero_bit(bitmap, tmpmax, start) - fix;
894 if (ret > max)
895 return max;
896 return ret;
897}
898
852#endif /* OCFS2_H */ 899#endif /* OCFS2_H */
853 900
diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c
index dc8007fc924..f100bf70a90 100644
--- a/fs/ocfs2/quota_local.c
+++ b/fs/ocfs2/quota_local.c
@@ -404,7 +404,9 @@ struct ocfs2_quota_recovery *ocfs2_begin_quota_recovery(
404 int status = 0; 404 int status = 0;
405 struct ocfs2_quota_recovery *rec; 405 struct ocfs2_quota_recovery *rec;
406 406
407 mlog(ML_NOTICE, "Beginning quota recovery in slot %u\n", slot_num); 407 printk(KERN_NOTICE "ocfs2: Beginning quota recovery on device (%s) for "
408 "slot %u\n", osb->dev_str, slot_num);
409
408 rec = ocfs2_alloc_quota_recovery(); 410 rec = ocfs2_alloc_quota_recovery();
409 if (!rec) 411 if (!rec)
410 return ERR_PTR(-ENOMEM); 412 return ERR_PTR(-ENOMEM);
@@ -549,8 +551,8 @@ static int ocfs2_recover_local_quota_file(struct inode *lqinode,
549 goto out_commit; 551 goto out_commit;
550 } 552 }
551 lock_buffer(qbh); 553 lock_buffer(qbh);
552 WARN_ON(!ocfs2_test_bit(bit, dchunk->dqc_bitmap)); 554 WARN_ON(!ocfs2_test_bit_unaligned(bit, dchunk->dqc_bitmap));
553 ocfs2_clear_bit(bit, dchunk->dqc_bitmap); 555 ocfs2_clear_bit_unaligned(bit, dchunk->dqc_bitmap);
554 le32_add_cpu(&dchunk->dqc_free, 1); 556 le32_add_cpu(&dchunk->dqc_free, 1);
555 unlock_buffer(qbh); 557 unlock_buffer(qbh);
556 ocfs2_journal_dirty(handle, qbh); 558 ocfs2_journal_dirty(handle, qbh);
@@ -596,7 +598,9 @@ int ocfs2_finish_quota_recovery(struct ocfs2_super *osb,
596 struct inode *lqinode; 598 struct inode *lqinode;
597 unsigned int flags; 599 unsigned int flags;
598 600
599 mlog(ML_NOTICE, "Finishing quota recovery in slot %u\n", slot_num); 601 printk(KERN_NOTICE "ocfs2: Finishing quota recovery on device (%s) for "
602 "slot %u\n", osb->dev_str, slot_num);
603
600 mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); 604 mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
601 for (type = 0; type < MAXQUOTAS; type++) { 605 for (type = 0; type < MAXQUOTAS; type++) {
602 if (list_empty(&(rec->r_list[type]))) 606 if (list_empty(&(rec->r_list[type])))
@@ -612,8 +616,9 @@ int ocfs2_finish_quota_recovery(struct ocfs2_super *osb,
612 /* Someone else is holding the lock? Then he must be 616 /* Someone else is holding the lock? Then he must be
613 * doing the recovery. Just skip the file... */ 617 * doing the recovery. Just skip the file... */
614 if (status == -EAGAIN) { 618 if (status == -EAGAIN) {
615 mlog(ML_NOTICE, "skipping quota recovery for slot %d " 619 printk(KERN_NOTICE "ocfs2: Skipping quota recovery on "
616 "because quota file is locked.\n", slot_num); 620 "device (%s) for slot %d because quota file is "
621 "locked.\n", osb->dev_str, slot_num);
617 status = 0; 622 status = 0;
618 goto out_put; 623 goto out_put;
619 } else if (status < 0) { 624 } else if (status < 0) {
@@ -944,7 +949,7 @@ static struct ocfs2_quota_chunk *ocfs2_find_free_entry(struct super_block *sb,
944 * ol_quota_entries_per_block(sb); 949 * ol_quota_entries_per_block(sb);
945 } 950 }
946 951
947 found = ocfs2_find_next_zero_bit(dchunk->dqc_bitmap, len, 0); 952 found = ocfs2_find_next_zero_bit_unaligned(dchunk->dqc_bitmap, len, 0);
948 /* We failed? */ 953 /* We failed? */
949 if (found == len) { 954 if (found == len) {
950 mlog(ML_ERROR, "Did not find empty entry in chunk %d with %u" 955 mlog(ML_ERROR, "Did not find empty entry in chunk %d with %u"
@@ -1208,7 +1213,7 @@ static void olq_alloc_dquot(struct buffer_head *bh, void *private)
1208 struct ocfs2_local_disk_chunk *dchunk; 1213 struct ocfs2_local_disk_chunk *dchunk;
1209 1214
1210 dchunk = (struct ocfs2_local_disk_chunk *)bh->b_data; 1215 dchunk = (struct ocfs2_local_disk_chunk *)bh->b_data;
1211 ocfs2_set_bit(*offset, dchunk->dqc_bitmap); 1216 ocfs2_set_bit_unaligned(*offset, dchunk->dqc_bitmap);
1212 le32_add_cpu(&dchunk->dqc_free, -1); 1217 le32_add_cpu(&dchunk->dqc_free, -1);
1213} 1218}
1214 1219
@@ -1289,7 +1294,7 @@ int ocfs2_local_release_dquot(handle_t *handle, struct dquot *dquot)
1289 (od->dq_chunk->qc_headerbh->b_data); 1294 (od->dq_chunk->qc_headerbh->b_data);
1290 /* Mark structure as freed */ 1295 /* Mark structure as freed */
1291 lock_buffer(od->dq_chunk->qc_headerbh); 1296 lock_buffer(od->dq_chunk->qc_headerbh);
1292 ocfs2_clear_bit(offset, dchunk->dqc_bitmap); 1297 ocfs2_clear_bit_unaligned(offset, dchunk->dqc_bitmap);
1293 le32_add_cpu(&dchunk->dqc_free, 1); 1298 le32_add_cpu(&dchunk->dqc_free, 1);
1294 unlock_buffer(od->dq_chunk->qc_headerbh); 1299 unlock_buffer(od->dq_chunk->qc_headerbh);
1295 ocfs2_journal_dirty(handle, od->dq_chunk->qc_headerbh); 1300 ocfs2_journal_dirty(handle, od->dq_chunk->qc_headerbh);
diff --git a/fs/ocfs2/slot_map.c b/fs/ocfs2/slot_map.c
index 26fc0014d50..1424c151ccc 100644
--- a/fs/ocfs2/slot_map.c
+++ b/fs/ocfs2/slot_map.c
@@ -493,8 +493,8 @@ int ocfs2_find_slot(struct ocfs2_super *osb)
493 goto bail; 493 goto bail;
494 } 494 }
495 } else 495 } else
496 mlog(ML_NOTICE, "slot %d is already allocated to this node!\n", 496 printk(KERN_INFO "ocfs2: Slot %d on device (%s) was already "
497 slot); 497 "allocated to this node!\n", slot, osb->dev_str);
498 498
499 ocfs2_set_slot(si, slot, osb->node_num); 499 ocfs2_set_slot(si, slot, osb->node_num);
500 osb->slot_num = slot; 500 osb->slot_num = slot;
diff --git a/fs/ocfs2/stack_o2cb.c b/fs/ocfs2/stack_o2cb.c
index 19965b00c43..94368017edb 100644
--- a/fs/ocfs2/stack_o2cb.c
+++ b/fs/ocfs2/stack_o2cb.c
@@ -28,6 +28,7 @@
28#include "cluster/masklog.h" 28#include "cluster/masklog.h"
29#include "cluster/nodemanager.h" 29#include "cluster/nodemanager.h"
30#include "cluster/heartbeat.h" 30#include "cluster/heartbeat.h"
31#include "cluster/tcp.h"
31 32
32#include "stackglue.h" 33#include "stackglue.h"
33 34
@@ -256,6 +257,61 @@ static void o2cb_dump_lksb(struct ocfs2_dlm_lksb *lksb)
256} 257}
257 258
258/* 259/*
260 * Check if this node is heartbeating and is connected to all other
261 * heartbeating nodes.
262 */
263static int o2cb_cluster_check(void)
264{
265 u8 node_num;
266 int i;
267 unsigned long hbmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
268 unsigned long netmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
269
270 node_num = o2nm_this_node();
271 if (node_num == O2NM_MAX_NODES) {
272 printk(KERN_ERR "o2cb: This node has not been configured.\n");
273 return -EINVAL;
274 }
275
276 /*
277 * o2dlm expects o2net sockets to be created. If not, then
278 * dlm_join_domain() fails with a stack of errors which are both cryptic
279 * and incomplete. The idea here is to detect upfront whether we have
280 * managed to connect to all nodes or not. If not, then list the nodes
281 * to allow the user to check the configuration (incorrect IP, firewall,
282 * etc.) Yes, this is racy. But its not the end of the world.
283 */
284#define O2CB_MAP_STABILIZE_COUNT 60
285 for (i = 0; i < O2CB_MAP_STABILIZE_COUNT; ++i) {
286 o2hb_fill_node_map(hbmap, sizeof(hbmap));
287 if (!test_bit(node_num, hbmap)) {
288 printk(KERN_ERR "o2cb: %s heartbeat has not been "
289 "started.\n", (o2hb_global_heartbeat_active() ?
290 "Global" : "Local"));
291 return -EINVAL;
292 }
293 o2net_fill_node_map(netmap, sizeof(netmap));
294 /* Force set the current node to allow easy compare */
295 set_bit(node_num, netmap);
296 if (!memcmp(hbmap, netmap, sizeof(hbmap)))
297 return 0;
298 if (i < O2CB_MAP_STABILIZE_COUNT)
299 msleep(1000);
300 }
301
302 printk(KERN_ERR "o2cb: This node could not connect to nodes:");
303 i = -1;
304 while ((i = find_next_bit(hbmap, O2NM_MAX_NODES,
305 i + 1)) < O2NM_MAX_NODES) {
306 if (!test_bit(i, netmap))
307 printk(" %u", i);
308 }
309 printk(".\n");
310
311 return -ENOTCONN;
312}
313
314/*
259 * Called from the dlm when it's about to evict a node. This is how the 315 * Called from the dlm when it's about to evict a node. This is how the
260 * classic stack signals node death. 316 * classic stack signals node death.
261 */ 317 */
@@ -263,8 +319,8 @@ static void o2dlm_eviction_cb(int node_num, void *data)
263{ 319{
264 struct ocfs2_cluster_connection *conn = data; 320 struct ocfs2_cluster_connection *conn = data;
265 321
266 mlog(ML_NOTICE, "o2dlm has evicted node %d from group %.*s\n", 322 printk(KERN_NOTICE "o2cb: o2dlm has evicted node %d from domain %.*s\n",
267 node_num, conn->cc_namelen, conn->cc_name); 323 node_num, conn->cc_namelen, conn->cc_name);
268 324
269 conn->cc_recovery_handler(node_num, conn->cc_recovery_data); 325 conn->cc_recovery_handler(node_num, conn->cc_recovery_data);
270} 326}
@@ -280,12 +336,11 @@ static int o2cb_cluster_connect(struct ocfs2_cluster_connection *conn)
280 BUG_ON(conn == NULL); 336 BUG_ON(conn == NULL);
281 BUG_ON(conn->cc_proto == NULL); 337 BUG_ON(conn->cc_proto == NULL);
282 338
283 /* for now we only have one cluster/node, make sure we see it 339 /* Ensure cluster stack is up and all nodes are connected */
284 * in the heartbeat universe */ 340 rc = o2cb_cluster_check();
285 if (!o2hb_check_local_node_heartbeating()) { 341 if (rc) {
286 if (o2hb_global_heartbeat_active()) 342 printk(KERN_ERR "o2cb: Cluster check failed. Fix errors "
287 mlog(ML_ERROR, "Global heartbeat not started\n"); 343 "before retrying.\n");
288 rc = -EINVAL;
289 goto out; 344 goto out;
290 } 345 }
291 346
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 56f61027236..4994f8b0e60 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -54,6 +54,7 @@
54#include "ocfs1_fs_compat.h" 54#include "ocfs1_fs_compat.h"
55 55
56#include "alloc.h" 56#include "alloc.h"
57#include "aops.h"
57#include "blockcheck.h" 58#include "blockcheck.h"
58#include "dlmglue.h" 59#include "dlmglue.h"
59#include "export.h" 60#include "export.h"
@@ -1107,9 +1108,9 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
1107 1108
1108 ocfs2_set_ro_flag(osb, 1); 1109 ocfs2_set_ro_flag(osb, 1);
1109 1110
1110 printk(KERN_NOTICE "Readonly device detected. No cluster " 1111 printk(KERN_NOTICE "ocfs2: Readonly device (%s) detected. "
1111 "services will be utilized for this mount. Recovery " 1112 "Cluster services will not be used for this mount. "
1112 "will be skipped.\n"); 1113 "Recovery will be skipped.\n", osb->dev_str);
1113 } 1114 }
1114 1115
1115 if (!ocfs2_is_hard_readonly(osb)) { 1116 if (!ocfs2_is_hard_readonly(osb)) {
@@ -1616,12 +1617,17 @@ static int ocfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
1616 return 0; 1617 return 0;
1617} 1618}
1618 1619
1620wait_queue_head_t ocfs2__ioend_wq[OCFS2_IOEND_WQ_HASH_SZ];
1621
1619static int __init ocfs2_init(void) 1622static int __init ocfs2_init(void)
1620{ 1623{
1621 int status; 1624 int status, i;
1622 1625
1623 ocfs2_print_version(); 1626 ocfs2_print_version();
1624 1627
1628 for (i = 0; i < OCFS2_IOEND_WQ_HASH_SZ; i++)
1629 init_waitqueue_head(&ocfs2__ioend_wq[i]);
1630
1625 status = init_ocfs2_uptodate_cache(); 1631 status = init_ocfs2_uptodate_cache();
1626 if (status < 0) { 1632 if (status < 0) {
1627 mlog_errno(status); 1633 mlog_errno(status);
@@ -1760,7 +1766,7 @@ static void ocfs2_inode_init_once(void *data)
1760 ocfs2_extent_map_init(&oi->vfs_inode); 1766 ocfs2_extent_map_init(&oi->vfs_inode);
1761 INIT_LIST_HEAD(&oi->ip_io_markers); 1767 INIT_LIST_HEAD(&oi->ip_io_markers);
1762 oi->ip_dir_start_lookup = 0; 1768 oi->ip_dir_start_lookup = 0;
1763 1769 atomic_set(&oi->ip_unaligned_aio, 0);
1764 init_rwsem(&oi->ip_alloc_sem); 1770 init_rwsem(&oi->ip_alloc_sem);
1765 init_rwsem(&oi->ip_xattr_sem); 1771 init_rwsem(&oi->ip_xattr_sem);
1766 mutex_init(&oi->ip_io_mutex); 1772 mutex_init(&oi->ip_io_mutex);
@@ -1974,7 +1980,8 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err)
1974 * If we failed before we got a uuid_str yet, we can't stop 1980 * If we failed before we got a uuid_str yet, we can't stop
1975 * heartbeat. Otherwise, do it. 1981 * heartbeat. Otherwise, do it.
1976 */ 1982 */
1977 if (!mnt_err && !ocfs2_mount_local(osb) && osb->uuid_str) 1983 if (!mnt_err && !ocfs2_mount_local(osb) && osb->uuid_str &&
1984 !ocfs2_is_hard_readonly(osb))
1978 hangup_needed = 1; 1985 hangup_needed = 1;
1979 1986
1980 if (osb->cconn) 1987 if (osb->cconn)
@@ -2353,7 +2360,7 @@ static int ocfs2_initialize_super(struct super_block *sb,
2353 mlog_errno(status); 2360 mlog_errno(status);
2354 goto bail; 2361 goto bail;
2355 } 2362 }
2356 cleancache_init_shared_fs((char *)&uuid_net_key, sb); 2363 cleancache_init_shared_fs((char *)&di->id2.i_super.s_uuid, sb);
2357 2364
2358bail: 2365bail:
2359 return status; 2366 return status;
@@ -2462,8 +2469,8 @@ static int ocfs2_check_volume(struct ocfs2_super *osb)
2462 goto finally; 2469 goto finally;
2463 } 2470 }
2464 } else { 2471 } else {
2465 mlog(ML_NOTICE, "File system was not unmounted cleanly, " 2472 printk(KERN_NOTICE "ocfs2: File system on device (%s) was not "
2466 "recovering volume.\n"); 2473 "unmounted cleanly, recovering it.\n", osb->dev_str);
2467 } 2474 }
2468 2475
2469 local = ocfs2_mount_local(osb); 2476 local = ocfs2_mount_local(osb);
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index 194fb22ef79..aa9e8777b09 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -2376,16 +2376,18 @@ static int ocfs2_remove_value_outside(struct inode*inode,
2376 } 2376 }
2377 2377
2378 ret = ocfs2_xattr_value_truncate(inode, vb, 0, &ctxt); 2378 ret = ocfs2_xattr_value_truncate(inode, vb, 0, &ctxt);
2379 if (ret < 0) {
2380 mlog_errno(ret);
2381 break;
2382 }
2383 2379
2384 ocfs2_commit_trans(osb, ctxt.handle); 2380 ocfs2_commit_trans(osb, ctxt.handle);
2385 if (ctxt.meta_ac) { 2381 if (ctxt.meta_ac) {
2386 ocfs2_free_alloc_context(ctxt.meta_ac); 2382 ocfs2_free_alloc_context(ctxt.meta_ac);
2387 ctxt.meta_ac = NULL; 2383 ctxt.meta_ac = NULL;
2388 } 2384 }
2385
2386 if (ret < 0) {
2387 mlog_errno(ret);
2388 break;
2389 }
2390
2389 } 2391 }
2390 2392
2391 if (ctxt.meta_ac) 2393 if (ctxt.meta_ac)
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
index 2bd620f0d79..57bbf9078ac 100644
--- a/fs/pstore/platform.c
+++ b/fs/pstore/platform.c
@@ -167,6 +167,7 @@ int pstore_register(struct pstore_info *psi)
167 } 167 }
168 168
169 psinfo = psi; 169 psinfo = psi;
170 mutex_init(&psinfo->read_mutex);
170 spin_unlock(&pstore_lock); 171 spin_unlock(&pstore_lock);
171 172
172 if (owner && !try_module_get(owner)) { 173 if (owner && !try_module_get(owner)) {
@@ -195,30 +196,32 @@ EXPORT_SYMBOL_GPL(pstore_register);
195void pstore_get_records(int quiet) 196void pstore_get_records(int quiet)
196{ 197{
197 struct pstore_info *psi = psinfo; 198 struct pstore_info *psi = psinfo;
199 char *buf = NULL;
198 ssize_t size; 200 ssize_t size;
199 u64 id; 201 u64 id;
200 enum pstore_type_id type; 202 enum pstore_type_id type;
201 struct timespec time; 203 struct timespec time;
202 int failed = 0, rc; 204 int failed = 0, rc;
203 unsigned long flags;
204 205
205 if (!psi) 206 if (!psi)
206 return; 207 return;
207 208
208 spin_lock_irqsave(&psinfo->buf_lock, flags); 209 mutex_lock(&psi->read_mutex);
209 rc = psi->open(psi); 210 rc = psi->open(psi);
210 if (rc) 211 if (rc)
211 goto out; 212 goto out;
212 213
213 while ((size = psi->read(&id, &type, &time, psi)) > 0) { 214 while ((size = psi->read(&id, &type, &time, &buf, psi)) > 0) {
214 rc = pstore_mkfile(type, psi->name, id, psi->buf, (size_t)size, 215 rc = pstore_mkfile(type, psi->name, id, buf, (size_t)size,
215 time, psi); 216 time, psi);
217 kfree(buf);
218 buf = NULL;
216 if (rc && (rc != -EEXIST || !quiet)) 219 if (rc && (rc != -EEXIST || !quiet))
217 failed++; 220 failed++;
218 } 221 }
219 psi->close(psi); 222 psi->close(psi);
220out: 223out:
221 spin_unlock_irqrestore(&psinfo->buf_lock, flags); 224 mutex_unlock(&psi->read_mutex);
222 225
223 if (failed) 226 if (failed)
224 printk(KERN_WARNING "pstore: failed to load %d record(s) from '%s'\n", 227 printk(KERN_WARNING "pstore: failed to load %d record(s) from '%s'\n",
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index 5cff443f6cd..0bbb1a41998 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -674,7 +674,8 @@ xfs_qm_dqattach_one(
674 * disk and we didn't ask it to allocate; 674 * disk and we didn't ask it to allocate;
675 * ESRCH if quotas got turned off suddenly. 675 * ESRCH if quotas got turned off suddenly.
676 */ 676 */
677 error = xfs_qm_dqget(ip->i_mount, ip, id, type, XFS_QMOPT_DOWARN, &dqp); 677 error = xfs_qm_dqget(ip->i_mount, ip, id, type,
678 doalloc | XFS_QMOPT_DOWARN, &dqp);
678 if (error) 679 if (error)
679 return error; 680 return error;
680 681
diff --git a/include/drm/drm_mode.h b/include/drm/drm_mode.h
index d30bedfeb7e..ddd46db65b5 100644
--- a/include/drm/drm_mode.h
+++ b/include/drm/drm_mode.h
@@ -235,6 +235,8 @@ struct drm_mode_fb_cmd {
235#define DRM_MODE_FB_DIRTY_ANNOTATE_FILL 0x02 235#define DRM_MODE_FB_DIRTY_ANNOTATE_FILL 0x02
236#define DRM_MODE_FB_DIRTY_FLAGS 0x03 236#define DRM_MODE_FB_DIRTY_FLAGS 0x03
237 237
238#define DRM_MODE_FB_DIRTY_MAX_CLIPS 256
239
238/* 240/*
239 * Mark a region of a framebuffer as dirty. 241 * Mark a region of a framebuffer as dirty.
240 * 242 *
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 3d53efd25ab..f81676f1b31 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -4,6 +4,7 @@
4*/ 4*/
5#define radeon_PCI_IDS \ 5#define radeon_PCI_IDS \
6 {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ 6 {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
7 {0x1002, 0x3151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
7 {0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 8 {0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
8 {0x1002, 0x3154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 9 {0x1002, 0x3154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
9 {0x1002, 0x3155, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 10 {0x1002, 0x3155, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
@@ -55,6 +56,7 @@
55 {0x1002, 0x4C64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \ 56 {0x1002, 0x4C64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
56 {0x1002, 0x4C66, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \ 57 {0x1002, 0x4C66, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
57 {0x1002, 0x4C67, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \ 58 {0x1002, 0x4C67, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
59 {0x1002, 0x4C6E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|RADEON_IS_MOBILITY}, \
58 {0x1002, 0x4E44, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ 60 {0x1002, 0x4E44, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
59 {0x1002, 0x4E45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ 61 {0x1002, 0x4E45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
60 {0x1002, 0x4E46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ 62 {0x1002, 0x4E46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
diff --git a/include/drm/exynos_drm.h b/include/drm/exynos_drm.h
index 1d161cb3aca..12050434d57 100644
--- a/include/drm/exynos_drm.h
+++ b/include/drm/exynos_drm.h
@@ -32,17 +32,16 @@
32/** 32/**
33 * User-desired buffer creation information structure. 33 * User-desired buffer creation information structure.
34 * 34 *
35 * @size: requested size for the object. 35 * @size: user-desired memory allocation size.
36 * - this size value would be page-aligned internally. 36 * - this size value would be page-aligned internally.
37 * @flags: user request for setting memory type or cache attributes. 37 * @flags: user request for setting memory type or cache attributes.
38 * @handle: returned handle for the object. 38 * @handle: returned a handle to created gem object.
39 * @pad: just padding to be 64-bit aligned. 39 * - this handle will be set by gem module of kernel side.
40 */ 40 */
41struct drm_exynos_gem_create { 41struct drm_exynos_gem_create {
42 unsigned int size; 42 uint64_t size;
43 unsigned int flags; 43 unsigned int flags;
44 unsigned int handle; 44 unsigned int handle;
45 unsigned int pad;
46}; 45};
47 46
48/** 47/**
diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h
index b65be6054a1..be94be6d6f1 100644
--- a/include/drm/radeon_drm.h
+++ b/include/drm/radeon_drm.h
@@ -874,6 +874,10 @@ struct drm_radeon_gem_pwrite {
874 874
875#define RADEON_CHUNK_ID_RELOCS 0x01 875#define RADEON_CHUNK_ID_RELOCS 0x01
876#define RADEON_CHUNK_ID_IB 0x02 876#define RADEON_CHUNK_ID_IB 0x02
877#define RADEON_CHUNK_ID_FLAGS 0x03
878
879/* The first dword of RADEON_CHUNK_ID_FLAGS is a uint32 of these flags: */
880#define RADEON_CS_KEEP_TILING_FLAGS 0x01
877 881
878struct drm_radeon_cs_chunk { 882struct drm_radeon_cs_chunk {
879 uint32_t chunk_id; 883 uint32_t chunk_id;
diff --git a/include/linux/bio.h b/include/linux/bio.h
index a3c071c9e18..847994aef0e 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -211,8 +211,8 @@ extern void bio_pair_release(struct bio_pair *dbio);
211extern struct bio_set *bioset_create(unsigned int, unsigned int); 211extern struct bio_set *bioset_create(unsigned int, unsigned int);
212extern void bioset_free(struct bio_set *); 212extern void bioset_free(struct bio_set *);
213 213
214extern struct bio *bio_alloc(gfp_t, int); 214extern struct bio *bio_alloc(gfp_t, unsigned int);
215extern struct bio *bio_kmalloc(gfp_t, int); 215extern struct bio *bio_kmalloc(gfp_t, unsigned int);
216extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *); 216extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *);
217extern void bio_put(struct bio *); 217extern void bio_put(struct bio *);
218extern void bio_free(struct bio *, struct bio_set *); 218extern void bio_free(struct bio *, struct bio_set *);
@@ -519,7 +519,11 @@ extern void bio_integrity_init(void);
519#define bioset_integrity_create(a, b) (0) 519#define bioset_integrity_create(a, b) (0)
520#define bio_integrity_prep(a) (0) 520#define bio_integrity_prep(a) (0)
521#define bio_integrity_enabled(a) (0) 521#define bio_integrity_enabled(a) (0)
522#define bio_integrity_clone(a, b, c, d) (0) 522static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
523 gfp_t gfp_mask, struct bio_set *bs)
524{
525 return 0;
526}
523#define bioset_integrity_free(a) do { } while (0) 527#define bioset_integrity_free(a) do { } while (0)
524#define bio_integrity_free(a, b) do { } while (0) 528#define bio_integrity_free(a, b) do { } while (0)
525#define bio_integrity_endio(a, b) do { } while (0) 529#define bio_integrity_endio(a, b) do { } while (0)
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index f88eacb111d..7c05ac202d9 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -10,6 +10,12 @@
10#include "osdmap.h" 10#include "osdmap.h"
11#include "messenger.h" 11#include "messenger.h"
12 12
13/*
14 * Maximum object name size
15 * (must be at least as big as RBD_MAX_MD_NAME_LEN -- currently 100)
16 */
17#define MAX_OBJ_NAME_SIZE 100
18
13struct ceph_msg; 19struct ceph_msg;
14struct ceph_snap_context; 20struct ceph_snap_context;
15struct ceph_osd_request; 21struct ceph_osd_request;
@@ -75,7 +81,7 @@ struct ceph_osd_request {
75 struct inode *r_inode; /* for use by callbacks */ 81 struct inode *r_inode; /* for use by callbacks */
76 void *r_priv; /* ditto */ 82 void *r_priv; /* ditto */
77 83
78 char r_oid[40]; /* object name */ 84 char r_oid[MAX_OBJ_NAME_SIZE]; /* object name */
79 int r_oid_len; 85 int r_oid_len;
80 unsigned long r_stamp; /* send OR check time */ 86 unsigned long r_stamp; /* send OR check time */
81 87
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 139c4db55f1..c86c940d1de 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -156,6 +156,7 @@ extern u64 timecounter_cyc2time(struct timecounter *tc,
156 * @mult: cycle to nanosecond multiplier 156 * @mult: cycle to nanosecond multiplier
157 * @shift: cycle to nanosecond divisor (power of two) 157 * @shift: cycle to nanosecond divisor (power of two)
158 * @max_idle_ns: max idle time permitted by the clocksource (nsecs) 158 * @max_idle_ns: max idle time permitted by the clocksource (nsecs)
159 * @maxadj maximum adjustment value to mult (~11%)
159 * @flags: flags describing special properties 160 * @flags: flags describing special properties
160 * @archdata: arch-specific data 161 * @archdata: arch-specific data
161 * @suspend: suspend function for the clocksource, if necessary 162 * @suspend: suspend function for the clocksource, if necessary
@@ -172,7 +173,7 @@ struct clocksource {
172 u32 mult; 173 u32 mult;
173 u32 shift; 174 u32 shift;
174 u64 max_idle_ns; 175 u64 max_idle_ns;
175 176 u32 maxadj;
176#ifdef CONFIG_ARCH_CLOCKSOURCE_DATA 177#ifdef CONFIG_ARCH_CLOCKSOURCE_DATA
177 struct arch_clocksource_data archdata; 178 struct arch_clocksource_data archdata;
178#endif 179#endif
diff --git a/include/linux/device.h b/include/linux/device.h
index ffbcf95cd97..3136ede5a1e 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -69,7 +69,7 @@ extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
69 * @resume: Called to bring a device on this bus out of sleep mode. 69 * @resume: Called to bring a device on this bus out of sleep mode.
70 * @pm: Power management operations of this bus, callback the specific 70 * @pm: Power management operations of this bus, callback the specific
71 * device driver's pm-ops. 71 * device driver's pm-ops.
72 * @iommu_ops IOMMU specific operations for this bus, used to attach IOMMU 72 * @iommu_ops: IOMMU specific operations for this bus, used to attach IOMMU
73 * driver implementations to a bus and allow the driver to do 73 * driver implementations to a bus and allow the driver to do
74 * bus-specific setup 74 * bus-specific setup
75 * @p: The private data of the driver core, only the driver core can 75 * @p: The private data of the driver core, only the driver core can
@@ -682,6 +682,11 @@ static inline bool device_async_suspend_enabled(struct device *dev)
682 return !!dev->power.async_suspend; 682 return !!dev->power.async_suspend;
683} 683}
684 684
685static inline void pm_suspend_ignore_children(struct device *dev, bool enable)
686{
687 dev->power.ignore_children = enable;
688}
689
685static inline void device_lock(struct device *dev) 690static inline void device_lock(struct device *dev)
686{ 691{
687 mutex_lock(&dev->mutex); 692 mutex_lock(&dev->mutex);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 0c4df261af7..e3130220ce3 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1886,6 +1886,7 @@ extern struct dentry *mount_single(struct file_system_type *fs_type,
1886extern struct dentry *mount_nodev(struct file_system_type *fs_type, 1886extern struct dentry *mount_nodev(struct file_system_type *fs_type,
1887 int flags, void *data, 1887 int flags, void *data,
1888 int (*fill_super)(struct super_block *, void *, int)); 1888 int (*fill_super)(struct super_block *, void *, int));
1889extern struct dentry *mount_subtree(struct vfsmount *mnt, const char *path);
1889void generic_shutdown_super(struct super_block *sb); 1890void generic_shutdown_super(struct super_block *sb);
1890void kill_block_super(struct super_block *sb); 1891void kill_block_super(struct super_block *sb);
1891void kill_anon_super(struct super_block *sb); 1892void kill_anon_super(struct super_block *sb);
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 9de31bc98c8..6d18f3531f1 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -21,8 +21,6 @@
21#define dev_to_part(device) container_of((device), struct hd_struct, __dev) 21#define dev_to_part(device) container_of((device), struct hd_struct, __dev)
22#define disk_to_dev(disk) (&(disk)->part0.__dev) 22#define disk_to_dev(disk) (&(disk)->part0.__dev)
23#define part_to_dev(part) (&((part)->__dev)) 23#define part_to_dev(part) (&((part)->__dev))
24#define alias_name(disk) ((disk)->alias ? (disk)->alias : \
25 (disk)->disk_name)
26 24
27extern struct device_type part_type; 25extern struct device_type part_type;
28extern struct kobject *block_depr; 26extern struct kobject *block_depr;
@@ -60,7 +58,6 @@ enum {
60 58
61#define DISK_MAX_PARTS 256 59#define DISK_MAX_PARTS 256
62#define DISK_NAME_LEN 32 60#define DISK_NAME_LEN 32
63#define ALIAS_LEN 256
64 61
65#include <linux/major.h> 62#include <linux/major.h>
66#include <linux/device.h> 63#include <linux/device.h>
@@ -166,7 +163,6 @@ struct gendisk {
166 * disks that can't be partitioned. */ 163 * disks that can't be partitioned. */
167 164
168 char disk_name[DISK_NAME_LEN]; /* name of major driver */ 165 char disk_name[DISK_NAME_LEN]; /* name of major driver */
169 char *alias; /* alias name of disk */
170 char *(*devnode)(struct gendisk *gd, mode_t *mode); 166 char *(*devnode)(struct gendisk *gd, mode_t *mode);
171 167
172 unsigned int events; /* supported events */ 168 unsigned int events; /* supported events */
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 19644e0016b..d9d6c868b86 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -110,11 +110,6 @@ static inline void copy_huge_page(struct page *dst, struct page *src)
110 110
111#define hugetlb_change_protection(vma, address, end, newprot) 111#define hugetlb_change_protection(vma, address, end, newprot)
112 112
113#ifndef HPAGE_MASK
114#define HPAGE_MASK PAGE_MASK /* Keep the compiler happy */
115#define HPAGE_SIZE PAGE_SIZE
116#endif
117
118#endif /* !CONFIG_HUGETLB_PAGE */ 113#endif /* !CONFIG_HUGETLB_PAGE */
119 114
120#define HUGETLB_ANON_FILE "anon_hugepage" 115#define HUGETLB_ANON_FILE "anon_hugepage"
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index a81bf6d23b3..07d103a06d6 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -432,9 +432,6 @@ void i2c_unlock_adapter(struct i2c_adapter *);
432/* Internal numbers to terminate lists */ 432/* Internal numbers to terminate lists */
433#define I2C_CLIENT_END 0xfffeU 433#define I2C_CLIENT_END 0xfffeU
434 434
435/* The numbers to use to set I2C bus address */
436#define ANY_I2C_BUS 0xffff
437
438/* Construct an I2C_CLIENT_END-terminated array of i2c addresses */ 435/* Construct an I2C_CLIENT_END-terminated array of i2c addresses */
439#define I2C_ADDRS(addr, addrs...) \ 436#define I2C_ADDRS(addr, addrs...) \
440 ((const unsigned short []){ addr, ## addrs, I2C_CLIENT_END }) 437 ((const unsigned short []){ addr, ## addrs, I2C_CLIENT_END })
diff --git a/include/linux/inet_diag.h b/include/linux/inet_diag.h
index 80b480c9753..abf5028db98 100644
--- a/include/linux/inet_diag.h
+++ b/include/linux/inet_diag.h
@@ -98,9 +98,10 @@ enum {
98 INET_DIAG_VEGASINFO, 98 INET_DIAG_VEGASINFO,
99 INET_DIAG_CONG, 99 INET_DIAG_CONG,
100 INET_DIAG_TOS, 100 INET_DIAG_TOS,
101 INET_DIAG_TCLASS,
101}; 102};
102 103
103#define INET_DIAG_MAX INET_DIAG_TOS 104#define INET_DIAG_MAX INET_DIAG_TCLASS
104 105
105 106
106/* INET_DIAG_MEM */ 107/* INET_DIAG_MEM */
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 08ffab01e76..94b1e356c02 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -184,7 +184,6 @@ extern struct cred init_cred;
184 [PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID), \ 184 [PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID), \
185 }, \ 185 }, \
186 .thread_group = LIST_HEAD_INIT(tsk.thread_group), \ 186 .thread_group = LIST_HEAD_INIT(tsk.thread_group), \
187 .dirties = INIT_PROP_LOCAL_SINGLE(dirties), \
188 INIT_IDS \ 187 INIT_IDS \
189 INIT_PERF_EVENTS(tsk) \ 188 INIT_PERF_EVENTS(tsk) \
190 INIT_TRACE_IRQFLAGS \ 189 INIT_TRACE_IRQFLAGS \
diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index f47fcd30273..c3892fc1d53 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -555,7 +555,6 @@ struct kvm_ppc_pvinfo {
555#define KVM_CAP_PPC_SMT 64 555#define KVM_CAP_PPC_SMT 64
556#define KVM_CAP_PPC_RMA 65 556#define KVM_CAP_PPC_RMA 65
557#define KVM_CAP_MAX_VCPUS 66 /* returns max vcpus per vm */ 557#define KVM_CAP_MAX_VCPUS 66 /* returns max vcpus per vm */
558#define KVM_CAP_PPC_HIOR 67
559#define KVM_CAP_PPC_PAPR 68 558#define KVM_CAP_PPC_PAPR 68
560#define KVM_CAP_S390_GMAP 71 559#define KVM_CAP_S390_GMAP 71
561 560
diff --git a/include/linux/mfd/tps65910.h b/include/linux/mfd/tps65910.h
index 82b4c8801a4..8bf2cb9502d 100644
--- a/include/linux/mfd/tps65910.h
+++ b/include/linux/mfd/tps65910.h
@@ -243,7 +243,8 @@
243 243
244 244
245/*Registers VDD1, VDD2 voltage values definitions */ 245/*Registers VDD1, VDD2 voltage values definitions */
246#define VDD1_2_NUM_VOLTS 73 246#define VDD1_2_NUM_VOLT_FINE 73
247#define VDD1_2_NUM_VOLT_COARSE 3
247#define VDD1_2_MIN_VOLT 6000 248#define VDD1_2_MIN_VOLT 6000
248#define VDD1_2_OFFSET 125 249#define VDD1_2_OFFSET 125
249 250
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index ab2c6343361..92ecf5585fa 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -410,6 +410,9 @@ extern const struct inode_operations nfs_file_inode_operations;
410extern const struct inode_operations nfs3_file_inode_operations; 410extern const struct inode_operations nfs3_file_inode_operations;
411#endif /* CONFIG_NFS_V3 */ 411#endif /* CONFIG_NFS_V3 */
412extern const struct file_operations nfs_file_operations; 412extern const struct file_operations nfs_file_operations;
413#ifdef CONFIG_NFS_V4
414extern const struct file_operations nfs4_file_operations;
415#endif /* CONFIG_NFS_V4 */
413extern const struct address_space_operations nfs_file_aops; 416extern const struct address_space_operations nfs_file_aops;
414extern const struct address_space_operations nfs_dir_aops; 417extern const struct address_space_operations nfs_dir_aops;
415 418
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index c74595ba709..2a7c533be5d 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -1192,6 +1192,7 @@ struct nfs_rpc_ops {
1192 const struct dentry_operations *dentry_ops; 1192 const struct dentry_operations *dentry_ops;
1193 const struct inode_operations *dir_inode_ops; 1193 const struct inode_operations *dir_inode_ops;
1194 const struct inode_operations *file_inode_ops; 1194 const struct inode_operations *file_inode_ops;
1195 const struct file_operations *file_ops;
1195 1196
1196 int (*getroot) (struct nfs_server *, struct nfs_fh *, 1197 int (*getroot) (struct nfs_server *, struct nfs_fh *,
1197 struct nfs_fsinfo *); 1198 struct nfs_fsinfo *);
diff --git a/include/linux/pci-ats.h b/include/linux/pci-ats.h
index e3d0b389024..7ef68724f0f 100644
--- a/include/linux/pci-ats.h
+++ b/include/linux/pci-ats.h
@@ -12,7 +12,7 @@ struct pci_ats {
12 unsigned int is_enabled:1; /* Enable bit is set */ 12 unsigned int is_enabled:1; /* Enable bit is set */
13}; 13};
14 14
15#ifdef CONFIG_PCI_IOV 15#ifdef CONFIG_PCI_ATS
16 16
17extern int pci_enable_ats(struct pci_dev *dev, int ps); 17extern int pci_enable_ats(struct pci_dev *dev, int ps);
18extern void pci_disable_ats(struct pci_dev *dev); 18extern void pci_disable_ats(struct pci_dev *dev);
@@ -29,7 +29,7 @@ static inline int pci_ats_enabled(struct pci_dev *dev)
29 return dev->ats && dev->ats->is_enabled; 29 return dev->ats && dev->ats->is_enabled;
30} 30}
31 31
32#else /* CONFIG_PCI_IOV */ 32#else /* CONFIG_PCI_ATS */
33 33
34static inline int pci_enable_ats(struct pci_dev *dev, int ps) 34static inline int pci_enable_ats(struct pci_dev *dev, int ps)
35{ 35{
@@ -50,7 +50,7 @@ static inline int pci_ats_enabled(struct pci_dev *dev)
50 return 0; 50 return 0;
51} 51}
52 52
53#endif /* CONFIG_PCI_IOV */ 53#endif /* CONFIG_PCI_ATS */
54 54
55#ifdef CONFIG_PCI_PRI 55#ifdef CONFIG_PCI_PRI
56 56
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 337df0d5d5f..7cda65b5f79 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -338,7 +338,7 @@ struct pci_dev {
338 struct list_head msi_list; 338 struct list_head msi_list;
339#endif 339#endif
340 struct pci_vpd *vpd; 340 struct pci_vpd *vpd;
341#ifdef CONFIG_PCI_IOV 341#ifdef CONFIG_PCI_ATS
342 union { 342 union {
343 struct pci_sriov *sriov; /* SR-IOV capability related */ 343 struct pci_sriov *sriov; /* SR-IOV capability related */
344 struct pci_dev *physfn; /* the PF this VF is associated with */ 344 struct pci_dev *physfn; /* the PF this VF is associated with */
diff --git a/include/linux/pm.h b/include/linux/pm.h
index f15acb64681..3f3ed83a9aa 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -54,118 +54,145 @@ typedef struct pm_message {
54/** 54/**
55 * struct dev_pm_ops - device PM callbacks 55 * struct dev_pm_ops - device PM callbacks
56 * 56 *
57 * Several driver power state transitions are externally visible, affecting 57 * Several device power state transitions are externally visible, affecting
58 * the state of pending I/O queues and (for drivers that touch hardware) 58 * the state of pending I/O queues and (for drivers that touch hardware)
59 * interrupts, wakeups, DMA, and other hardware state. There may also be 59 * interrupts, wakeups, DMA, and other hardware state. There may also be
60 * internal transitions to various low power modes, which are transparent 60 * internal transitions to various low-power modes which are transparent
61 * to the rest of the driver stack (such as a driver that's ON gating off 61 * to the rest of the driver stack (such as a driver that's ON gating off
62 * clocks which are not in active use). 62 * clocks which are not in active use).
63 * 63 *
64 * The externally visible transitions are handled with the help of the following 64 * The externally visible transitions are handled with the help of callbacks
65 * callbacks included in this structure: 65 * included in this structure in such a way that two levels of callbacks are
66 * 66 * involved. First, the PM core executes callbacks provided by PM domains,
67 * @prepare: Prepare the device for the upcoming transition, but do NOT change 67 * device types, classes and bus types. They are the subsystem-level callbacks
68 * its hardware state. Prevent new children of the device from being 68 * supposed to execute callbacks provided by device drivers, although they may
69 * registered after @prepare() returns (the driver's subsystem and 69 * choose not to do that. If the driver callbacks are executed, they have to
70 * generally the rest of the kernel is supposed to prevent new calls to the 70 * collaborate with the subsystem-level callbacks to achieve the goals
71 * probe method from being made too once @prepare() has succeeded). If 71 * appropriate for the given system transition, given transition phase and the
72 * @prepare() detects a situation it cannot handle (e.g. registration of a 72 * subsystem the device belongs to.
73 * child already in progress), it may return -EAGAIN, so that the PM core 73 *
74 * can execute it once again (e.g. after the new child has been registered) 74 * @prepare: The principal role of this callback is to prevent new children of
75 * to recover from the race condition. This method is executed for all 75 * the device from being registered after it has returned (the driver's
76 * kinds of suspend transitions and is followed by one of the suspend 76 * subsystem and generally the rest of the kernel is supposed to prevent
77 * callbacks: @suspend(), @freeze(), or @poweroff(). 77 * new calls to the probe method from being made too once @prepare() has
78 * The PM core executes @prepare() for all devices before starting to 78 * succeeded). If @prepare() detects a situation it cannot handle (e.g.
79 * execute suspend callbacks for any of them, so drivers may assume all of 79 * registration of a child already in progress), it may return -EAGAIN, so
80 * the other devices to be present and functional while @prepare() is being 80 * that the PM core can execute it once again (e.g. after a new child has
81 * executed. In particular, it is safe to make GFP_KERNEL memory 81 * been registered) to recover from the race condition.
82 * allocations from within @prepare(). However, drivers may NOT assume 82 * This method is executed for all kinds of suspend transitions and is
83 * anything about the availability of the user space at that time and it 83 * followed by one of the suspend callbacks: @suspend(), @freeze(), or
84 * is not correct to request firmware from within @prepare() (it's too 84 * @poweroff(). The PM core executes subsystem-level @prepare() for all
85 * late to do that). [To work around this limitation, drivers may 85 * devices before starting to invoke suspend callbacks for any of them, so
86 * register suspend and hibernation notifiers that are executed before the 86 * generally devices may be assumed to be functional or to respond to
87 * freezing of tasks.] 87 * runtime resume requests while @prepare() is being executed. However,
88 * device drivers may NOT assume anything about the availability of user
89 * space at that time and it is NOT valid to request firmware from within
90 * @prepare() (it's too late to do that). It also is NOT valid to allocate
91 * substantial amounts of memory from @prepare() in the GFP_KERNEL mode.
92 * [To work around these limitations, drivers may register suspend and
93 * hibernation notifiers to be executed before the freezing of tasks.]
88 * 94 *
89 * @complete: Undo the changes made by @prepare(). This method is executed for 95 * @complete: Undo the changes made by @prepare(). This method is executed for
90 * all kinds of resume transitions, following one of the resume callbacks: 96 * all kinds of resume transitions, following one of the resume callbacks:
91 * @resume(), @thaw(), @restore(). Also called if the state transition 97 * @resume(), @thaw(), @restore(). Also called if the state transition
92 * fails before the driver's suspend callback (@suspend(), @freeze(), 98 * fails before the driver's suspend callback: @suspend(), @freeze() or
93 * @poweroff()) can be executed (e.g. if the suspend callback fails for one 99 * @poweroff(), can be executed (e.g. if the suspend callback fails for one
94 * of the other devices that the PM core has unsuccessfully attempted to 100 * of the other devices that the PM core has unsuccessfully attempted to
95 * suspend earlier). 101 * suspend earlier).
96 * The PM core executes @complete() after it has executed the appropriate 102 * The PM core executes subsystem-level @complete() after it has executed
97 * resume callback for all devices. 103 * the appropriate resume callbacks for all devices.
98 * 104 *
99 * @suspend: Executed before putting the system into a sleep state in which the 105 * @suspend: Executed before putting the system into a sleep state in which the
100 * contents of main memory are preserved. Quiesce the device, put it into 106 * contents of main memory are preserved. The exact action to perform
101 * a low power state appropriate for the upcoming system state (such as 107 * depends on the device's subsystem (PM domain, device type, class or bus
102 * PCI_D3hot), and enable wakeup events as appropriate. 108 * type), but generally the device must be quiescent after subsystem-level
109 * @suspend() has returned, so that it doesn't do any I/O or DMA.
110 * Subsystem-level @suspend() is executed for all devices after invoking
111 * subsystem-level @prepare() for all of them.
103 * 112 *
104 * @resume: Executed after waking the system up from a sleep state in which the 113 * @resume: Executed after waking the system up from a sleep state in which the
105 * contents of main memory were preserved. Put the device into the 114 * contents of main memory were preserved. The exact action to perform
106 * appropriate state, according to the information saved in memory by the 115 * depends on the device's subsystem, but generally the driver is expected
107 * preceding @suspend(). The driver starts working again, responding to 116 * to start working again, responding to hardware events and software
108 * hardware events and software requests. The hardware may have gone 117 * requests (the device itself may be left in a low-power state, waiting
109 * through a power-off reset, or it may have maintained state from the 118 * for a runtime resume to occur). The state of the device at the time its
110 * previous suspend() which the driver may rely on while resuming. On most 119 * driver's @resume() callback is run depends on the platform and subsystem
111 * platforms, there are no restrictions on availability of resources like 120 * the device belongs to. On most platforms, there are no restrictions on
112 * clocks during @resume(). 121 * availability of resources like clocks during @resume().
122 * Subsystem-level @resume() is executed for all devices after invoking
123 * subsystem-level @resume_noirq() for all of them.
113 * 124 *
114 * @freeze: Hibernation-specific, executed before creating a hibernation image. 125 * @freeze: Hibernation-specific, executed before creating a hibernation image.
115 * Quiesce operations so that a consistent image can be created, but do NOT 126 * Analogous to @suspend(), but it should not enable the device to signal
116 * otherwise put the device into a low power device state and do NOT emit 127 * wakeup events or change its power state. The majority of subsystems
117 * system wakeup events. Save in main memory the device settings to be 128 * (with the notable exception of the PCI bus type) expect the driver-level
118 * used by @restore() during the subsequent resume from hibernation or by 129 * @freeze() to save the device settings in memory to be used by @restore()
119 * the subsequent @thaw(), if the creation of the image or the restoration 130 * during the subsequent resume from hibernation.
120 * of main memory contents from it fails. 131 * Subsystem-level @freeze() is executed for all devices after invoking
132 * subsystem-level @prepare() for all of them.
121 * 133 *
122 * @thaw: Hibernation-specific, executed after creating a hibernation image OR 134 * @thaw: Hibernation-specific, executed after creating a hibernation image OR
123 * if the creation of the image fails. Also executed after a failing 135 * if the creation of an image has failed. Also executed after a failing
124 * attempt to restore the contents of main memory from such an image. 136 * attempt to restore the contents of main memory from such an image.
125 * Undo the changes made by the preceding @freeze(), so the device can be 137 * Undo the changes made by the preceding @freeze(), so the device can be
126 * operated in the same way as immediately before the call to @freeze(). 138 * operated in the same way as immediately before the call to @freeze().
139 * Subsystem-level @thaw() is executed for all devices after invoking
140 * subsystem-level @thaw_noirq() for all of them. It also may be executed
141 * directly after @freeze() in case of a transition error.
127 * 142 *
128 * @poweroff: Hibernation-specific, executed after saving a hibernation image. 143 * @poweroff: Hibernation-specific, executed after saving a hibernation image.
129 * Quiesce the device, put it into a low power state appropriate for the 144 * Analogous to @suspend(), but it need not save the device's settings in
130 * upcoming system state (such as PCI_D3hot), and enable wakeup events as 145 * memory.
131 * appropriate. 146 * Subsystem-level @poweroff() is executed for all devices after invoking
147 * subsystem-level @prepare() for all of them.
132 * 148 *
133 * @restore: Hibernation-specific, executed after restoring the contents of main 149 * @restore: Hibernation-specific, executed after restoring the contents of main
134 * memory from a hibernation image. Driver starts working again, 150 * memory from a hibernation image, analogous to @resume().
135 * responding to hardware events and software requests. Drivers may NOT 151 *
136 * make ANY assumptions about the hardware state right prior to @restore(). 152 * @suspend_noirq: Complete the actions started by @suspend(). Carry out any
137 * On most platforms, there are no restrictions on availability of 153 * additional operations required for suspending the device that might be
138 * resources like clocks during @restore(). 154 * racing with its driver's interrupt handler, which is guaranteed not to
139 * 155 * run while @suspend_noirq() is being executed.
140 * @suspend_noirq: Complete the operations of ->suspend() by carrying out any 156 * It generally is expected that the device will be in a low-power state
141 * actions required for suspending the device that need interrupts to be 157 * (appropriate for the target system sleep state) after subsystem-level
142 * disabled 158 * @suspend_noirq() has returned successfully. If the device can generate
143 * 159 * system wakeup signals and is enabled to wake up the system, it should be
144 * @resume_noirq: Prepare for the execution of ->resume() by carrying out any 160 * configured to do so at that time. However, depending on the platform
145 * actions required for resuming the device that need interrupts to be 161 * and device's subsystem, @suspend() may be allowed to put the device into
146 * disabled 162 * the low-power state and configure it to generate wakeup signals, in
147 * 163 * which case it generally is not necessary to define @suspend_noirq().
148 * @freeze_noirq: Complete the operations of ->freeze() by carrying out any 164 *
149 * actions required for freezing the device that need interrupts to be 165 * @resume_noirq: Prepare for the execution of @resume() by carrying out any
150 * disabled 166 * operations required for resuming the device that might be racing with
151 * 167 * its driver's interrupt handler, which is guaranteed not to run while
152 * @thaw_noirq: Prepare for the execution of ->thaw() by carrying out any 168 * @resume_noirq() is being executed.
153 * actions required for thawing the device that need interrupts to be 169 *
154 * disabled 170 * @freeze_noirq: Complete the actions started by @freeze(). Carry out any
155 * 171 * additional operations required for freezing the device that might be
156 * @poweroff_noirq: Complete the operations of ->poweroff() by carrying out any 172 * racing with its driver's interrupt handler, which is guaranteed not to
157 * actions required for handling the device that need interrupts to be 173 * run while @freeze_noirq() is being executed.
158 * disabled 174 * The power state of the device should not be changed by either @freeze()
159 * 175 * or @freeze_noirq() and it should not be configured to signal system
160 * @restore_noirq: Prepare for the execution of ->restore() by carrying out any 176 * wakeup by any of these callbacks.
161 * actions required for restoring the operations of the device that need 177 *
162 * interrupts to be disabled 178 * @thaw_noirq: Prepare for the execution of @thaw() by carrying out any
179 * operations required for thawing the device that might be racing with its
180 * driver's interrupt handler, which is guaranteed not to run while
181 * @thaw_noirq() is being executed.
182 *
183 * @poweroff_noirq: Complete the actions started by @poweroff(). Analogous to
184 * @suspend_noirq(), but it need not save the device's settings in memory.
185 *
186 * @restore_noirq: Prepare for the execution of @restore() by carrying out any
187 * operations required for thawing the device that might be racing with its
188 * driver's interrupt handler, which is guaranteed not to run while
189 * @restore_noirq() is being executed. Analogous to @resume_noirq().
163 * 190 *
164 * All of the above callbacks, except for @complete(), return error codes. 191 * All of the above callbacks, except for @complete(), return error codes.
165 * However, the error codes returned by the resume operations, @resume(), 192 * However, the error codes returned by the resume operations, @resume(),
166 * @thaw(), @restore(), @resume_noirq(), @thaw_noirq(), and @restore_noirq() do 193 * @thaw(), @restore(), @resume_noirq(), @thaw_noirq(), and @restore_noirq(), do
167 * not cause the PM core to abort the resume transition during which they are 194 * not cause the PM core to abort the resume transition during which they are
168 * returned. The error codes returned in that cases are only printed by the PM 195 * returned. The error codes returned in those cases are only printed by the PM
169 * core to the system logs for debugging purposes. Still, it is recommended 196 * core to the system logs for debugging purposes. Still, it is recommended
170 * that drivers only return error codes from their resume methods in case of an 197 * that drivers only return error codes from their resume methods in case of an
171 * unrecoverable failure (i.e. when the device being handled refuses to resume 198 * unrecoverable failure (i.e. when the device being handled refuses to resume
@@ -174,31 +201,43 @@ typedef struct pm_message {
174 * their children. 201 * their children.
175 * 202 *
176 * It is allowed to unregister devices while the above callbacks are being 203 * It is allowed to unregister devices while the above callbacks are being
177 * executed. However, it is not allowed to unregister a device from within any 204 * executed. However, a callback routine must NOT try to unregister the device
178 * of its own callbacks. 205 * it was called for, although it may unregister children of that device (for
206 * example, if it detects that a child was unplugged while the system was
207 * asleep).
208 *
209 * Refer to Documentation/power/devices.txt for more information about the role
210 * of the above callbacks in the system suspend process.
179 * 211 *
180 * There also are the following callbacks related to run-time power management 212 * There also are callbacks related to runtime power management of devices.
181 * of devices: 213 * Again, these callbacks are executed by the PM core only for subsystems
214 * (PM domains, device types, classes and bus types) and the subsystem-level
215 * callbacks are supposed to invoke the driver callbacks. Moreover, the exact
216 * actions to be performed by a device driver's callbacks generally depend on
217 * the platform and subsystem the device belongs to.
182 * 218 *
183 * @runtime_suspend: Prepare the device for a condition in which it won't be 219 * @runtime_suspend: Prepare the device for a condition in which it won't be
184 * able to communicate with the CPU(s) and RAM due to power management. 220 * able to communicate with the CPU(s) and RAM due to power management.
185 * This need not mean that the device should be put into a low power state. 221 * This need not mean that the device should be put into a low-power state.
186 * For example, if the device is behind a link which is about to be turned 222 * For example, if the device is behind a link which is about to be turned
187 * off, the device may remain at full power. If the device does go to low 223 * off, the device may remain at full power. If the device does go to low
188 * power and is capable of generating run-time wake-up events, remote 224 * power and is capable of generating runtime wakeup events, remote wakeup
189 * wake-up (i.e., a hardware mechanism allowing the device to request a 225 * (i.e., a hardware mechanism allowing the device to request a change of
190 * change of its power state via a wake-up event, such as PCI PME) should 226 * its power state via an interrupt) should be enabled for it.
191 * be enabled for it.
192 * 227 *
193 * @runtime_resume: Put the device into the fully active state in response to a 228 * @runtime_resume: Put the device into the fully active state in response to a
194 * wake-up event generated by hardware or at the request of software. If 229 * wakeup event generated by hardware or at the request of software. If
195 * necessary, put the device into the full power state and restore its 230 * necessary, put the device into the full-power state and restore its
196 * registers, so that it is fully operational. 231 * registers, so that it is fully operational.
197 * 232 *
198 * @runtime_idle: Device appears to be inactive and it might be put into a low 233 * @runtime_idle: Device appears to be inactive and it might be put into a
199 * power state if all of the necessary conditions are satisfied. Check 234 * low-power state if all of the necessary conditions are satisfied. Check
200 * these conditions and handle the device as appropriate, possibly queueing 235 * these conditions and handle the device as appropriate, possibly queueing
201 * a suspend request for it. The return value is ignored by the PM core. 236 * a suspend request for it. The return value is ignored by the PM core.
237 *
238 * Refer to Documentation/power/runtime_pm.txt for more information about the
239 * role of the above callbacks in device runtime power management.
240 *
202 */ 241 */
203 242
204struct dev_pm_ops { 243struct dev_pm_ops {
@@ -447,6 +486,7 @@ struct dev_pm_info {
447 unsigned int async_suspend:1; 486 unsigned int async_suspend:1;
448 bool is_prepared:1; /* Owned by the PM core */ 487 bool is_prepared:1; /* Owned by the PM core */
449 bool is_suspended:1; /* Ditto */ 488 bool is_suspended:1; /* Ditto */
489 bool ignore_children:1;
450 spinlock_t lock; 490 spinlock_t lock;
451#ifdef CONFIG_PM_SLEEP 491#ifdef CONFIG_PM_SLEEP
452 struct list_head entry; 492 struct list_head entry;
@@ -464,7 +504,6 @@ struct dev_pm_info {
464 atomic_t usage_count; 504 atomic_t usage_count;
465 atomic_t child_count; 505 atomic_t child_count;
466 unsigned int disable_depth:3; 506 unsigned int disable_depth:3;
467 unsigned int ignore_children:1;
468 unsigned int idle_notification:1; 507 unsigned int idle_notification:1;
469 unsigned int request_pending:1; 508 unsigned int request_pending:1;
470 unsigned int deferred_resume:1; 509 unsigned int deferred_resume:1;
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index d8d90361964..d3085e72a0e 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -52,11 +52,6 @@ static inline bool pm_children_suspended(struct device *dev)
52 || !atomic_read(&dev->power.child_count); 52 || !atomic_read(&dev->power.child_count);
53} 53}
54 54
55static inline void pm_suspend_ignore_children(struct device *dev, bool enable)
56{
57 dev->power.ignore_children = enable;
58}
59
60static inline void pm_runtime_get_noresume(struct device *dev) 55static inline void pm_runtime_get_noresume(struct device *dev)
61{ 56{
62 atomic_inc(&dev->power.usage_count); 57 atomic_inc(&dev->power.usage_count);
@@ -130,7 +125,6 @@ static inline void pm_runtime_allow(struct device *dev) {}
130static inline void pm_runtime_forbid(struct device *dev) {} 125static inline void pm_runtime_forbid(struct device *dev) {}
131 126
132static inline bool pm_children_suspended(struct device *dev) { return false; } 127static inline bool pm_children_suspended(struct device *dev) { return false; }
133static inline void pm_suspend_ignore_children(struct device *dev, bool en) {}
134static inline void pm_runtime_get_noresume(struct device *dev) {} 128static inline void pm_runtime_get_noresume(struct device *dev) {}
135static inline void pm_runtime_put_noidle(struct device *dev) {} 129static inline void pm_runtime_put_noidle(struct device *dev) {}
136static inline bool device_run_wake(struct device *dev) { return false; } 130static inline bool device_run_wake(struct device *dev) { return false; }
diff --git a/include/linux/pstore.h b/include/linux/pstore.h
index ea567321ae3..2ca8cde5459 100644
--- a/include/linux/pstore.h
+++ b/include/linux/pstore.h
@@ -35,10 +35,12 @@ struct pstore_info {
35 spinlock_t buf_lock; /* serialize access to 'buf' */ 35 spinlock_t buf_lock; /* serialize access to 'buf' */
36 char *buf; 36 char *buf;
37 size_t bufsize; 37 size_t bufsize;
38 struct mutex read_mutex; /* serialize open/read/close */
38 int (*open)(struct pstore_info *psi); 39 int (*open)(struct pstore_info *psi);
39 int (*close)(struct pstore_info *psi); 40 int (*close)(struct pstore_info *psi);
40 ssize_t (*read)(u64 *id, enum pstore_type_id *type, 41 ssize_t (*read)(u64 *id, enum pstore_type_id *type,
41 struct timespec *time, struct pstore_info *psi); 42 struct timespec *time, char **buf,
43 struct pstore_info *psi);
42 int (*write)(enum pstore_type_id type, u64 *id, 44 int (*write)(enum pstore_type_id type, u64 *id,
43 unsigned int part, size_t size, struct pstore_info *psi); 45 unsigned int part, size_t size, struct pstore_info *psi);
44 int (*erase)(enum pstore_type_id type, u64 id, 46 int (*erase)(enum pstore_type_id type, u64 id,
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 68daf4f27e2..1c4f3e9b9bc 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1521,7 +1521,6 @@ struct task_struct {
1521#ifdef CONFIG_FAULT_INJECTION 1521#ifdef CONFIG_FAULT_INJECTION
1522 int make_it_fail; 1522 int make_it_fail;
1523#endif 1523#endif
1524 struct prop_local_single dirties;
1525 /* 1524 /*
1526 * when (nr_dirtied >= nr_dirtied_pause), it's time to call 1525 * when (nr_dirtied >= nr_dirtied_pause), it's time to call
1527 * balance_dirty_pages() for some dirty throttling pause 1526 * balance_dirty_pages() for some dirty throttling pause
diff --git a/include/linux/serial.h b/include/linux/serial.h
index 97ff8e27a6c..3d86517fe7d 100644
--- a/include/linux/serial.h
+++ b/include/linux/serial.h
@@ -207,13 +207,15 @@ struct serial_icounter_struct {
207 207
208struct serial_rs485 { 208struct serial_rs485 {
209 __u32 flags; /* RS485 feature flags */ 209 __u32 flags; /* RS485 feature flags */
210#define SER_RS485_ENABLED (1 << 0) 210#define SER_RS485_ENABLED (1 << 0) /* If enabled */
211#define SER_RS485_RTS_ON_SEND (1 << 1) 211#define SER_RS485_RTS_ON_SEND (1 << 1) /* Logical level for
212#define SER_RS485_RTS_AFTER_SEND (1 << 2) 212 RTS pin when
213#define SER_RS485_RTS_BEFORE_SEND (1 << 3) 213 sending */
214#define SER_RS485_RTS_AFTER_SEND (1 << 2) /* Logical level for
215 RTS pin after sent*/
214#define SER_RS485_RX_DURING_TX (1 << 4) 216#define SER_RS485_RX_DURING_TX (1 << 4)
215 __u32 delay_rts_before_send; /* Milliseconds */ 217 __u32 delay_rts_before_send; /* Delay before send (milliseconds) */
216 __u32 delay_rts_after_send; /* Milliseconds */ 218 __u32 delay_rts_after_send; /* Delay after send (milliseconds) */
217 __u32 padding[5]; /* Memory is cheap, new structs 219 __u32 padding[5]; /* Memory is cheap, new structs
218 are a royal PITA .. */ 220 are a royal PITA .. */
219}; 221};
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index add4790b21f..e9e72bda1b7 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -85,6 +85,8 @@
85 * @reset: reset the device 85 * @reset: reset the device
86 * vdev: the virtio device 86 * vdev: the virtio device
87 * After this, status and feature negotiation must be done again 87 * After this, status and feature negotiation must be done again
88 * Device must not be reset from its vq/config callbacks, or in
89 * parallel with being added/removed.
88 * @find_vqs: find virtqueues and instantiate them. 90 * @find_vqs: find virtqueues and instantiate them.
89 * vdev: the virtio_device 91 * vdev: the virtio_device
90 * nvqs: the number of virtqueues to find 92 * nvqs: the number of virtqueues to find
diff --git a/include/linux/virtio_mmio.h b/include/linux/virtio_mmio.h
index 27c7edefbc8..5c7b6f0daef 100644
--- a/include/linux/virtio_mmio.h
+++ b/include/linux/virtio_mmio.h
@@ -63,7 +63,7 @@
63#define VIRTIO_MMIO_GUEST_FEATURES 0x020 63#define VIRTIO_MMIO_GUEST_FEATURES 0x020
64 64
65/* Activated features set selector - Write Only */ 65/* Activated features set selector - Write Only */
66#define VIRTIO_MMIO_GUEST_FEATURES_SET 0x024 66#define VIRTIO_MMIO_GUEST_FEATURES_SEL 0x024
67 67
68/* Guest's memory page size in bytes - Write Only */ 68/* Guest's memory page size in bytes - Write Only */
69#define VIRTIO_MMIO_GUEST_PAGE_SIZE 0x028 69#define VIRTIO_MMIO_GUEST_PAGE_SIZE 0x028
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 687fb11e201..4bde182fcf9 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -119,7 +119,7 @@ unmap_kernel_range(unsigned long addr, unsigned long size)
119#endif 119#endif
120 120
121/* Allocate/destroy a 'vmalloc' VM area. */ 121/* Allocate/destroy a 'vmalloc' VM area. */
122extern struct vm_struct *alloc_vm_area(size_t size); 122extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes);
123extern void free_vm_area(struct vm_struct *area); 123extern void free_vm_area(struct vm_struct *area);
124 124
125/* for /dev/kmem */ 125/* for /dev/kmem */
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index ab90ae0970a..6cc18f37167 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -39,8 +39,11 @@
39#define L2CAP_DEFAULT_ACK_TO 200 39#define L2CAP_DEFAULT_ACK_TO 200
40#define L2CAP_LE_DEFAULT_MTU 23 40#define L2CAP_LE_DEFAULT_MTU 23
41 41
42#define L2CAP_CONN_TIMEOUT (40000) /* 40 seconds */ 42#define L2CAP_DISC_TIMEOUT (100)
43#define L2CAP_INFO_TIMEOUT (4000) /* 4 seconds */ 43#define L2CAP_DISC_REJ_TIMEOUT (5000) /* 5 seconds */
44#define L2CAP_ENC_TIMEOUT (5000) /* 5 seconds */
45#define L2CAP_CONN_TIMEOUT (40000) /* 40 seconds */
46#define L2CAP_INFO_TIMEOUT (4000) /* 4 seconds */
44 47
45/* L2CAP socket address */ 48/* L2CAP socket address */
46struct sockaddr_l2 { 49struct sockaddr_l2 {
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 92cf1c2c30c..95852e36713 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -456,6 +456,9 @@ enum station_parameters_apply_mask {
456 * as the AC bitmap in the QoS info field 456 * as the AC bitmap in the QoS info field
457 * @max_sp: max Service Period. same format as the MAX_SP in the 457 * @max_sp: max Service Period. same format as the MAX_SP in the
458 * QoS info field (but already shifted down) 458 * QoS info field (but already shifted down)
459 * @sta_modify_mask: bitmap indicating which parameters changed
460 * (for those that don't have a natural "no change" value),
461 * see &enum station_parameters_apply_mask
459 */ 462 */
460struct station_parameters { 463struct station_parameters {
461 u8 *supported_rates; 464 u8 *supported_rates;
@@ -615,6 +618,7 @@ struct sta_bss_parameters {
615 * user space MLME/SME implementation. The information is provided for 618 * user space MLME/SME implementation. The information is provided for
616 * the cfg80211_new_sta() calls to notify user space of the IEs. 619 * the cfg80211_new_sta() calls to notify user space of the IEs.
617 * @assoc_req_ies_len: Length of assoc_req_ies buffer in octets. 620 * @assoc_req_ies_len: Length of assoc_req_ies buffer in octets.
621 * @sta_flags: station flags mask & values
618 */ 622 */
619struct station_info { 623struct station_info {
620 u32 filled; 624 u32 filled;
diff --git a/include/video/omapdss.h b/include/video/omapdss.h
index b66ebb2032c..378c7ed6760 100644
--- a/include/video/omapdss.h
+++ b/include/video/omapdss.h
@@ -307,15 +307,8 @@ struct omap_dss_board_info {
307 void (*dsi_disable_pads)(int dsi_id, unsigned lane_mask); 307 void (*dsi_disable_pads)(int dsi_id, unsigned lane_mask);
308}; 308};
309 309
310#if defined(CONFIG_OMAP2_DSS_MODULE) || defined(CONFIG_OMAP2_DSS)
311/* Init with the board info */ 310/* Init with the board info */
312extern int omap_display_init(struct omap_dss_board_info *board_data); 311extern int omap_display_init(struct omap_dss_board_info *board_data);
313#else
314static inline int omap_display_init(struct omap_dss_board_info *board_data)
315{
316 return 0;
317}
318#endif
319 312
320struct omap_display_platform_data { 313struct omap_display_platform_data {
321 struct omap_dss_board_info *board_data; 314 struct omap_dss_board_info *board_data;
diff --git a/include/xen/platform_pci.h b/include/xen/platform_pci.h
index a785a3b0c8c..438c256c274 100644
--- a/include/xen/platform_pci.h
+++ b/include/xen/platform_pci.h
@@ -29,8 +29,7 @@
29static inline int xen_must_unplug_nics(void) { 29static inline int xen_must_unplug_nics(void) {
30#if (defined(CONFIG_XEN_NETDEV_FRONTEND) || \ 30#if (defined(CONFIG_XEN_NETDEV_FRONTEND) || \
31 defined(CONFIG_XEN_NETDEV_FRONTEND_MODULE)) && \ 31 defined(CONFIG_XEN_NETDEV_FRONTEND_MODULE)) && \
32 (defined(CONFIG_XEN_PLATFORM_PCI) || \ 32 defined(CONFIG_XEN_PVHVM)
33 defined(CONFIG_XEN_PLATFORM_PCI_MODULE))
34 return 1; 33 return 1;
35#else 34#else
36 return 0; 35 return 0;
@@ -40,8 +39,7 @@ static inline int xen_must_unplug_nics(void) {
40static inline int xen_must_unplug_disks(void) { 39static inline int xen_must_unplug_disks(void) {
41#if (defined(CONFIG_XEN_BLKDEV_FRONTEND) || \ 40#if (defined(CONFIG_XEN_BLKDEV_FRONTEND) || \
42 defined(CONFIG_XEN_BLKDEV_FRONTEND_MODULE)) && \ 41 defined(CONFIG_XEN_BLKDEV_FRONTEND_MODULE)) && \
43 (defined(CONFIG_XEN_PLATFORM_PCI) || \ 42 defined(CONFIG_XEN_PVHVM)
44 defined(CONFIG_XEN_PLATFORM_PCI_MODULE))
45 return 1; 43 return 1;
46#else 44#else
47 return 0; 45 return 0;
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
index 5e828a2ca8e..213c0351dad 100644
--- a/kernel/cgroup_freezer.c
+++ b/kernel/cgroup_freezer.c
@@ -153,6 +153,13 @@ static void freezer_destroy(struct cgroup_subsys *ss,
153 kfree(cgroup_freezer(cgroup)); 153 kfree(cgroup_freezer(cgroup));
154} 154}
155 155
156/* task is frozen or will freeze immediately when next it gets woken */
157static bool is_task_frozen_enough(struct task_struct *task)
158{
159 return frozen(task) ||
160 (task_is_stopped_or_traced(task) && freezing(task));
161}
162
156/* 163/*
157 * The call to cgroup_lock() in the freezer.state write method prevents 164 * The call to cgroup_lock() in the freezer.state write method prevents
158 * a write to that file racing against an attach, and hence the 165 * a write to that file racing against an attach, and hence the
@@ -231,7 +238,7 @@ static void update_if_frozen(struct cgroup *cgroup,
231 cgroup_iter_start(cgroup, &it); 238 cgroup_iter_start(cgroup, &it);
232 while ((task = cgroup_iter_next(cgroup, &it))) { 239 while ((task = cgroup_iter_next(cgroup, &it))) {
233 ntotal++; 240 ntotal++;
234 if (frozen(task)) 241 if (is_task_frozen_enough(task))
235 nfrozen++; 242 nfrozen++;
236 } 243 }
237 244
@@ -284,7 +291,7 @@ static int try_to_freeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
284 while ((task = cgroup_iter_next(cgroup, &it))) { 291 while ((task = cgroup_iter_next(cgroup, &it))) {
285 if (!freeze_task(task, true)) 292 if (!freeze_task(task, true))
286 continue; 293 continue;
287 if (frozen(task)) 294 if (is_task_frozen_enough(task))
288 continue; 295 continue;
289 if (!freezing(task) && !freezer_should_skip(task)) 296 if (!freezing(task) && !freezer_should_skip(task))
290 num_cant_freeze_now++; 297 num_cant_freeze_now++;
diff --git a/kernel/fork.c b/kernel/fork.c
index ba0d1726132..da4a6a10d08 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -162,7 +162,6 @@ static void account_kernel_stack(struct thread_info *ti, int account)
162 162
163void free_task(struct task_struct *tsk) 163void free_task(struct task_struct *tsk)
164{ 164{
165 prop_local_destroy_single(&tsk->dirties);
166 account_kernel_stack(tsk->stack, -1); 165 account_kernel_stack(tsk->stack, -1);
167 free_thread_info(tsk->stack); 166 free_thread_info(tsk->stack);
168 rt_mutex_debug_task_free(tsk); 167 rt_mutex_debug_task_free(tsk);
@@ -274,10 +273,6 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
274 273
275 tsk->stack = ti; 274 tsk->stack = ti;
276 275
277 err = prop_local_init_single(&tsk->dirties);
278 if (err)
279 goto out;
280
281 setup_thread_stack(tsk, orig); 276 setup_thread_stack(tsk, orig);
282 clear_user_return_notifier(tsk); 277 clear_user_return_notifier(tsk);
283 clear_tsk_need_resched(tsk); 278 clear_tsk_need_resched(tsk);
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 422e567eecf..ae34bf51682 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -885,10 +885,13 @@ static void __remove_hrtimer(struct hrtimer *timer,
885 struct hrtimer_clock_base *base, 885 struct hrtimer_clock_base *base,
886 unsigned long newstate, int reprogram) 886 unsigned long newstate, int reprogram)
887{ 887{
888 struct timerqueue_node *next_timer;
888 if (!(timer->state & HRTIMER_STATE_ENQUEUED)) 889 if (!(timer->state & HRTIMER_STATE_ENQUEUED))
889 goto out; 890 goto out;
890 891
891 if (&timer->node == timerqueue_getnext(&base->active)) { 892 next_timer = timerqueue_getnext(&base->active);
893 timerqueue_del(&base->active, &timer->node);
894 if (&timer->node == next_timer) {
892#ifdef CONFIG_HIGH_RES_TIMERS 895#ifdef CONFIG_HIGH_RES_TIMERS
893 /* Reprogram the clock event device. if enabled */ 896 /* Reprogram the clock event device. if enabled */
894 if (reprogram && hrtimer_hres_active()) { 897 if (reprogram && hrtimer_hres_active()) {
@@ -901,7 +904,6 @@ static void __remove_hrtimer(struct hrtimer *timer,
901 } 904 }
902#endif 905#endif
903 } 906 }
904 timerqueue_del(&base->active, &timer->node);
905 if (!timerqueue_getnext(&base->active)) 907 if (!timerqueue_getnext(&base->active))
906 base->cpu_base->active_bases &= ~(1 << base->index); 908 base->cpu_base->active_bases &= ~(1 << base->index);
907out: 909out:
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 67ce837ae52..0e2b179bc7b 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -1596,7 +1596,7 @@ int request_percpu_irq(unsigned int irq, irq_handler_t handler,
1596 return -ENOMEM; 1596 return -ENOMEM;
1597 1597
1598 action->handler = handler; 1598 action->handler = handler;
1599 action->flags = IRQF_PERCPU; 1599 action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND;
1600 action->name = devname; 1600 action->name = devname;
1601 action->percpu_dev_id = dev_id; 1601 action->percpu_dev_id = dev_id;
1602 1602
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index aa57d5da18c..dc813a948be 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -84,7 +84,9 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force)
84 */ 84 */
85 action = desc->action; 85 action = desc->action;
86 if (!action || !(action->flags & IRQF_SHARED) || 86 if (!action || !(action->flags & IRQF_SHARED) ||
87 (action->flags & __IRQF_TIMER) || !action->next) 87 (action->flags & __IRQF_TIMER) ||
88 (action->handler(irq, action->dev_id) == IRQ_HANDLED) ||
89 !action->next)
88 goto out; 90 goto out;
89 91
90 /* Already running on another processor */ 92 /* Already running on another processor */
@@ -115,7 +117,7 @@ static int misrouted_irq(int irq)
115 struct irq_desc *desc; 117 struct irq_desc *desc;
116 int i, ok = 0; 118 int i, ok = 0;
117 119
118 if (atomic_inc_return(&irq_poll_active) == 1) 120 if (atomic_inc_return(&irq_poll_active) != 1)
119 goto out; 121 goto out;
120 122
121 irq_poll_cpu = smp_processor_id(); 123 irq_poll_cpu = smp_processor_id();
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index b4511b6d3ef..a6b0503574e 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -55,6 +55,8 @@ enum {
55 55
56static int hibernation_mode = HIBERNATION_SHUTDOWN; 56static int hibernation_mode = HIBERNATION_SHUTDOWN;
57 57
58static bool freezer_test_done;
59
58static const struct platform_hibernation_ops *hibernation_ops; 60static const struct platform_hibernation_ops *hibernation_ops;
59 61
60/** 62/**
@@ -345,11 +347,24 @@ int hibernation_snapshot(int platform_mode)
345 347
346 error = freeze_kernel_threads(); 348 error = freeze_kernel_threads();
347 if (error) 349 if (error)
348 goto Close; 350 goto Cleanup;
351
352 if (hibernation_test(TEST_FREEZER) ||
353 hibernation_testmode(HIBERNATION_TESTPROC)) {
354
355 /*
356 * Indicate to the caller that we are returning due to a
357 * successful freezer test.
358 */
359 freezer_test_done = true;
360 goto Cleanup;
361 }
349 362
350 error = dpm_prepare(PMSG_FREEZE); 363 error = dpm_prepare(PMSG_FREEZE);
351 if (error) 364 if (error) {
352 goto Complete_devices; 365 dpm_complete(msg);
366 goto Cleanup;
367 }
353 368
354 suspend_console(); 369 suspend_console();
355 pm_restrict_gfp_mask(); 370 pm_restrict_gfp_mask();
@@ -378,8 +393,6 @@ int hibernation_snapshot(int platform_mode)
378 pm_restore_gfp_mask(); 393 pm_restore_gfp_mask();
379 394
380 resume_console(); 395 resume_console();
381
382 Complete_devices:
383 dpm_complete(msg); 396 dpm_complete(msg);
384 397
385 Close: 398 Close:
@@ -389,6 +402,10 @@ int hibernation_snapshot(int platform_mode)
389 Recover_platform: 402 Recover_platform:
390 platform_recover(platform_mode); 403 platform_recover(platform_mode);
391 goto Resume_devices; 404 goto Resume_devices;
405
406 Cleanup:
407 swsusp_free();
408 goto Close;
392} 409}
393 410
394/** 411/**
@@ -641,15 +658,13 @@ int hibernate(void)
641 if (error) 658 if (error)
642 goto Finish; 659 goto Finish;
643 660
644 if (hibernation_test(TEST_FREEZER))
645 goto Thaw;
646
647 if (hibernation_testmode(HIBERNATION_TESTPROC))
648 goto Thaw;
649
650 error = hibernation_snapshot(hibernation_mode == HIBERNATION_PLATFORM); 661 error = hibernation_snapshot(hibernation_mode == HIBERNATION_PLATFORM);
651 if (error) 662 if (error)
652 goto Thaw; 663 goto Thaw;
664 if (freezer_test_done) {
665 freezer_test_done = false;
666 goto Thaw;
667 }
653 668
654 if (in_suspend) { 669 if (in_suspend) {
655 unsigned int flags = 0; 670 unsigned int flags = 0;
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 71f49fe4377..36e0f0903c3 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -290,13 +290,14 @@ static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
290 if (*s && len == strlen(*s) && !strncmp(buf, *s, len)) 290 if (*s && len == strlen(*s) && !strncmp(buf, *s, len))
291 break; 291 break;
292 } 292 }
293 if (state < PM_SUSPEND_MAX && *s) 293 if (state < PM_SUSPEND_MAX && *s) {
294 error = enter_state(state); 294 error = enter_state(state);
295 if (error) { 295 if (error) {
296 suspend_stats.fail++; 296 suspend_stats.fail++;
297 dpm_save_failed_errno(error); 297 dpm_save_failed_errno(error);
298 } else 298 } else
299 suspend_stats.success++; 299 suspend_stats.success++;
300 }
300#endif 301#endif
301 302
302 Exit: 303 Exit:
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index cf52fda2e09..cfc65e1eb9f 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -492,6 +492,22 @@ void clocksource_touch_watchdog(void)
492} 492}
493 493
494/** 494/**
495 * clocksource_max_adjustment- Returns max adjustment amount
496 * @cs: Pointer to clocksource
497 *
498 */
499static u32 clocksource_max_adjustment(struct clocksource *cs)
500{
501 u64 ret;
502 /*
503 * We won't try to correct for more then 11% adjustments (110,000 ppm),
504 */
505 ret = (u64)cs->mult * 11;
506 do_div(ret,100);
507 return (u32)ret;
508}
509
510/**
495 * clocksource_max_deferment - Returns max time the clocksource can be deferred 511 * clocksource_max_deferment - Returns max time the clocksource can be deferred
496 * @cs: Pointer to clocksource 512 * @cs: Pointer to clocksource
497 * 513 *
@@ -503,25 +519,28 @@ static u64 clocksource_max_deferment(struct clocksource *cs)
503 /* 519 /*
504 * Calculate the maximum number of cycles that we can pass to the 520 * Calculate the maximum number of cycles that we can pass to the
505 * cyc2ns function without overflowing a 64-bit signed result. The 521 * cyc2ns function without overflowing a 64-bit signed result. The
506 * maximum number of cycles is equal to ULLONG_MAX/cs->mult which 522 * maximum number of cycles is equal to ULLONG_MAX/(cs->mult+cs->maxadj)
507 * is equivalent to the below. 523 * which is equivalent to the below.
508 * max_cycles < (2^63)/cs->mult 524 * max_cycles < (2^63)/(cs->mult + cs->maxadj)
509 * max_cycles < 2^(log2((2^63)/cs->mult)) 525 * max_cycles < 2^(log2((2^63)/(cs->mult + cs->maxadj)))
510 * max_cycles < 2^(log2(2^63) - log2(cs->mult)) 526 * max_cycles < 2^(log2(2^63) - log2(cs->mult + cs->maxadj))
511 * max_cycles < 2^(63 - log2(cs->mult)) 527 * max_cycles < 2^(63 - log2(cs->mult + cs->maxadj))
512 * max_cycles < 1 << (63 - log2(cs->mult)) 528 * max_cycles < 1 << (63 - log2(cs->mult + cs->maxadj))
513 * Please note that we add 1 to the result of the log2 to account for 529 * Please note that we add 1 to the result of the log2 to account for
514 * any rounding errors, ensure the above inequality is satisfied and 530 * any rounding errors, ensure the above inequality is satisfied and
515 * no overflow will occur. 531 * no overflow will occur.
516 */ 532 */
517 max_cycles = 1ULL << (63 - (ilog2(cs->mult) + 1)); 533 max_cycles = 1ULL << (63 - (ilog2(cs->mult + cs->maxadj) + 1));
518 534
519 /* 535 /*
520 * The actual maximum number of cycles we can defer the clocksource is 536 * The actual maximum number of cycles we can defer the clocksource is
521 * determined by the minimum of max_cycles and cs->mask. 537 * determined by the minimum of max_cycles and cs->mask.
538 * Note: Here we subtract the maxadj to make sure we don't sleep for
539 * too long if there's a large negative adjustment.
522 */ 540 */
523 max_cycles = min_t(u64, max_cycles, (u64) cs->mask); 541 max_cycles = min_t(u64, max_cycles, (u64) cs->mask);
524 max_nsecs = clocksource_cyc2ns(max_cycles, cs->mult, cs->shift); 542 max_nsecs = clocksource_cyc2ns(max_cycles, cs->mult - cs->maxadj,
543 cs->shift);
525 544
526 /* 545 /*
527 * To ensure that the clocksource does not wrap whilst we are idle, 546 * To ensure that the clocksource does not wrap whilst we are idle,
@@ -640,7 +659,6 @@ static void clocksource_enqueue(struct clocksource *cs)
640void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq) 659void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq)
641{ 660{
642 u64 sec; 661 u64 sec;
643
644 /* 662 /*
645 * Calc the maximum number of seconds which we can run before 663 * Calc the maximum number of seconds which we can run before
646 * wrapping around. For clocksources which have a mask > 32bit 664 * wrapping around. For clocksources which have a mask > 32bit
@@ -661,6 +679,20 @@ void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq)
661 679
662 clocks_calc_mult_shift(&cs->mult, &cs->shift, freq, 680 clocks_calc_mult_shift(&cs->mult, &cs->shift, freq,
663 NSEC_PER_SEC / scale, sec * scale); 681 NSEC_PER_SEC / scale, sec * scale);
682
683 /*
684 * for clocksources that have large mults, to avoid overflow.
685 * Since mult may be adjusted by ntp, add an safety extra margin
686 *
687 */
688 cs->maxadj = clocksource_max_adjustment(cs);
689 while ((cs->mult + cs->maxadj < cs->mult)
690 || (cs->mult - cs->maxadj > cs->mult)) {
691 cs->mult >>= 1;
692 cs->shift--;
693 cs->maxadj = clocksource_max_adjustment(cs);
694 }
695
664 cs->max_idle_ns = clocksource_max_deferment(cs); 696 cs->max_idle_ns = clocksource_max_deferment(cs);
665} 697}
666EXPORT_SYMBOL_GPL(__clocksource_updatefreq_scale); 698EXPORT_SYMBOL_GPL(__clocksource_updatefreq_scale);
@@ -701,6 +733,12 @@ EXPORT_SYMBOL_GPL(__clocksource_register_scale);
701 */ 733 */
702int clocksource_register(struct clocksource *cs) 734int clocksource_register(struct clocksource *cs)
703{ 735{
736 /* calculate max adjustment for given mult/shift */
737 cs->maxadj = clocksource_max_adjustment(cs);
738 WARN_ONCE(cs->mult + cs->maxadj < cs->mult,
739 "Clocksource %s might overflow on 11%% adjustment\n",
740 cs->name);
741
704 /* calculate max idle time permitted for this clocksource */ 742 /* calculate max idle time permitted for this clocksource */
705 cs->max_idle_ns = clocksource_max_deferment(cs); 743 cs->max_idle_ns = clocksource_max_deferment(cs);
706 744
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 2b021b0e850..237841378c0 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -249,6 +249,8 @@ ktime_t ktime_get(void)
249 secs = xtime.tv_sec + wall_to_monotonic.tv_sec; 249 secs = xtime.tv_sec + wall_to_monotonic.tv_sec;
250 nsecs = xtime.tv_nsec + wall_to_monotonic.tv_nsec; 250 nsecs = xtime.tv_nsec + wall_to_monotonic.tv_nsec;
251 nsecs += timekeeping_get_ns(); 251 nsecs += timekeeping_get_ns();
252 /* If arch requires, add in gettimeoffset() */
253 nsecs += arch_gettimeoffset();
252 254
253 } while (read_seqretry(&xtime_lock, seq)); 255 } while (read_seqretry(&xtime_lock, seq));
254 /* 256 /*
@@ -280,6 +282,8 @@ void ktime_get_ts(struct timespec *ts)
280 *ts = xtime; 282 *ts = xtime;
281 tomono = wall_to_monotonic; 283 tomono = wall_to_monotonic;
282 nsecs = timekeeping_get_ns(); 284 nsecs = timekeeping_get_ns();
285 /* If arch requires, add in gettimeoffset() */
286 nsecs += arch_gettimeoffset();
283 287
284 } while (read_seqretry(&xtime_lock, seq)); 288 } while (read_seqretry(&xtime_lock, seq));
285 289
@@ -802,14 +806,44 @@ static void timekeeping_adjust(s64 offset)
802 s64 error, interval = timekeeper.cycle_interval; 806 s64 error, interval = timekeeper.cycle_interval;
803 int adj; 807 int adj;
804 808
809 /*
810 * The point of this is to check if the error is greater then half
811 * an interval.
812 *
813 * First we shift it down from NTP_SHIFT to clocksource->shifted nsecs.
814 *
815 * Note we subtract one in the shift, so that error is really error*2.
816 * This "saves" dividing(shifting) intererval twice, but keeps the
817 * (error > interval) comparision as still measuring if error is
818 * larger then half an interval.
819 *
820 * Note: It does not "save" on aggrivation when reading the code.
821 */
805 error = timekeeper.ntp_error >> (timekeeper.ntp_error_shift - 1); 822 error = timekeeper.ntp_error >> (timekeeper.ntp_error_shift - 1);
806 if (error > interval) { 823 if (error > interval) {
824 /*
825 * We now divide error by 4(via shift), which checks if
826 * the error is greater then twice the interval.
827 * If it is greater, we need a bigadjust, if its smaller,
828 * we can adjust by 1.
829 */
807 error >>= 2; 830 error >>= 2;
831 /*
832 * XXX - In update_wall_time, we round up to the next
833 * nanosecond, and store the amount rounded up into
834 * the error. This causes the likely below to be unlikely.
835 *
836 * The properfix is to avoid rounding up by using
837 * the high precision timekeeper.xtime_nsec instead of
838 * xtime.tv_nsec everywhere. Fixing this will take some
839 * time.
840 */
808 if (likely(error <= interval)) 841 if (likely(error <= interval))
809 adj = 1; 842 adj = 1;
810 else 843 else
811 adj = timekeeping_bigadjust(error, &interval, &offset); 844 adj = timekeeping_bigadjust(error, &interval, &offset);
812 } else if (error < -interval) { 845 } else if (error < -interval) {
846 /* See comment above, this is just switched for the negative */
813 error >>= 2; 847 error >>= 2;
814 if (likely(error >= -interval)) { 848 if (likely(error >= -interval)) {
815 adj = -1; 849 adj = -1;
@@ -817,9 +851,65 @@ static void timekeeping_adjust(s64 offset)
817 offset = -offset; 851 offset = -offset;
818 } else 852 } else
819 adj = timekeeping_bigadjust(error, &interval, &offset); 853 adj = timekeeping_bigadjust(error, &interval, &offset);
820 } else 854 } else /* No adjustment needed */
821 return; 855 return;
822 856
857 WARN_ONCE(timekeeper.clock->maxadj &&
858 (timekeeper.mult + adj > timekeeper.clock->mult +
859 timekeeper.clock->maxadj),
860 "Adjusting %s more then 11%% (%ld vs %ld)\n",
861 timekeeper.clock->name, (long)timekeeper.mult + adj,
862 (long)timekeeper.clock->mult +
863 timekeeper.clock->maxadj);
864 /*
865 * So the following can be confusing.
866 *
867 * To keep things simple, lets assume adj == 1 for now.
868 *
869 * When adj != 1, remember that the interval and offset values
870 * have been appropriately scaled so the math is the same.
871 *
872 * The basic idea here is that we're increasing the multiplier
873 * by one, this causes the xtime_interval to be incremented by
874 * one cycle_interval. This is because:
875 * xtime_interval = cycle_interval * mult
876 * So if mult is being incremented by one:
877 * xtime_interval = cycle_interval * (mult + 1)
878 * Its the same as:
879 * xtime_interval = (cycle_interval * mult) + cycle_interval
880 * Which can be shortened to:
881 * xtime_interval += cycle_interval
882 *
883 * So offset stores the non-accumulated cycles. Thus the current
884 * time (in shifted nanoseconds) is:
885 * now = (offset * adj) + xtime_nsec
886 * Now, even though we're adjusting the clock frequency, we have
887 * to keep time consistent. In other words, we can't jump back
888 * in time, and we also want to avoid jumping forward in time.
889 *
890 * So given the same offset value, we need the time to be the same
891 * both before and after the freq adjustment.
892 * now = (offset * adj_1) + xtime_nsec_1
893 * now = (offset * adj_2) + xtime_nsec_2
894 * So:
895 * (offset * adj_1) + xtime_nsec_1 =
896 * (offset * adj_2) + xtime_nsec_2
897 * And we know:
898 * adj_2 = adj_1 + 1
899 * So:
900 * (offset * adj_1) + xtime_nsec_1 =
901 * (offset * (adj_1+1)) + xtime_nsec_2
902 * (offset * adj_1) + xtime_nsec_1 =
903 * (offset * adj_1) + offset + xtime_nsec_2
904 * Canceling the sides:
905 * xtime_nsec_1 = offset + xtime_nsec_2
906 * Which gives us:
907 * xtime_nsec_2 = xtime_nsec_1 - offset
908 * Which simplfies to:
909 * xtime_nsec -= offset
910 *
911 * XXX - TODO: Doc ntp_error calculation.
912 */
823 timekeeper.mult += adj; 913 timekeeper.mult += adj;
824 timekeeper.xtime_interval += interval; 914 timekeeper.xtime_interval += interval;
825 timekeeper.xtime_nsec -= offset; 915 timekeeper.xtime_nsec -= offset;
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index a0860640378..71034f41a2b 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -724,6 +724,14 @@ void bdi_destroy(struct backing_dev_info *bdi)
724 724
725 bdi_unregister(bdi); 725 bdi_unregister(bdi);
726 726
727 /*
728 * If bdi_unregister() had already been called earlier, the
729 * wakeup_timer could still be armed because bdi_prune_sb()
730 * can race with the bdi_wakeup_thread_delayed() calls from
731 * __mark_inode_dirty().
732 */
733 del_timer_sync(&bdi->wb.wakeup_timer);
734
727 for (i = 0; i < NR_BDI_STAT_ITEMS; i++) 735 for (i = 0; i < NR_BDI_STAT_ITEMS; i++)
728 percpu_counter_destroy(&bdi->bdi_stat[i]); 736 percpu_counter_destroy(&bdi->bdi_stat[i]);
729 737
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index dae27ba3be2..bb28a5f9db8 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2422,6 +2422,8 @@ retry_avoidcopy:
2422 * anon_vma prepared. 2422 * anon_vma prepared.
2423 */ 2423 */
2424 if (unlikely(anon_vma_prepare(vma))) { 2424 if (unlikely(anon_vma_prepare(vma))) {
2425 page_cache_release(new_page);
2426 page_cache_release(old_page);
2425 /* Caller expects lock to be held */ 2427 /* Caller expects lock to be held */
2426 spin_lock(&mm->page_table_lock); 2428 spin_lock(&mm->page_table_lock);
2427 return VM_FAULT_OOM; 2429 return VM_FAULT_OOM;
diff --git a/mm/nommu.c b/mm/nommu.c
index 73419c55eda..b982290fd96 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -454,7 +454,7 @@ void __attribute__((weak)) vmalloc_sync_all(void)
454 * between processes, it syncs the pagetable across all 454 * between processes, it syncs the pagetable across all
455 * processes. 455 * processes.
456 */ 456 */
457struct vm_struct *alloc_vm_area(size_t size) 457struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
458{ 458{
459 BUG(); 459 BUG();
460 return NULL; 460 return NULL;
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 471dedb463a..76f2c5ae908 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -185,6 +185,11 @@ unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem,
185 if (!p) 185 if (!p)
186 return 0; 186 return 0;
187 187
188 if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN) {
189 task_unlock(p);
190 return 0;
191 }
192
188 /* 193 /*
189 * The memory controller may have a limit of 0 bytes, so avoid a divide 194 * The memory controller may have a limit of 0 bytes, so avoid a divide
190 * by zero, if necessary. 195 * by zero, if necessary.
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index a3278f00523..71252486bc6 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -128,7 +128,6 @@ unsigned long global_dirty_limit;
128 * 128 *
129 */ 129 */
130static struct prop_descriptor vm_completions; 130static struct prop_descriptor vm_completions;
131static struct prop_descriptor vm_dirties;
132 131
133/* 132/*
134 * couple the period to the dirty_ratio: 133 * couple the period to the dirty_ratio:
@@ -154,7 +153,6 @@ static void update_completion_period(void)
154{ 153{
155 int shift = calc_period_shift(); 154 int shift = calc_period_shift();
156 prop_change_shift(&vm_completions, shift); 155 prop_change_shift(&vm_completions, shift);
157 prop_change_shift(&vm_dirties, shift);
158 156
159 writeback_set_ratelimit(); 157 writeback_set_ratelimit();
160} 158}
@@ -235,11 +233,6 @@ void bdi_writeout_inc(struct backing_dev_info *bdi)
235} 233}
236EXPORT_SYMBOL_GPL(bdi_writeout_inc); 234EXPORT_SYMBOL_GPL(bdi_writeout_inc);
237 235
238void task_dirty_inc(struct task_struct *tsk)
239{
240 prop_inc_single(&vm_dirties, &tsk->dirties);
241}
242
243/* 236/*
244 * Obtain an accurate fraction of the BDI's portion. 237 * Obtain an accurate fraction of the BDI's portion.
245 */ 238 */
@@ -1133,17 +1126,17 @@ pause:
1133 pages_dirtied, 1126 pages_dirtied,
1134 pause, 1127 pause,
1135 start_time); 1128 start_time);
1136 __set_current_state(TASK_UNINTERRUPTIBLE); 1129 __set_current_state(TASK_KILLABLE);
1137 io_schedule_timeout(pause); 1130 io_schedule_timeout(pause);
1138 1131
1139 dirty_thresh = hard_dirty_limit(dirty_thresh);
1140 /* 1132 /*
1141 * max-pause area. If dirty exceeded but still within this 1133 * This is typically equal to (nr_dirty < dirty_thresh) and can
1142 * area, no need to sleep for more than 200ms: (a) 8 pages per 1134 * also keep "1000+ dd on a slow USB stick" under control.
1143 * 200ms is typically more than enough to curb heavy dirtiers;
1144 * (b) the pause time limit makes the dirtiers more responsive.
1145 */ 1135 */
1146 if (nr_dirty < dirty_thresh) 1136 if (task_ratelimit)
1137 break;
1138
1139 if (fatal_signal_pending(current))
1147 break; 1140 break;
1148 } 1141 }
1149 1142
@@ -1395,7 +1388,6 @@ void __init page_writeback_init(void)
1395 1388
1396 shift = calc_period_shift(); 1389 shift = calc_period_shift();
1397 prop_descriptor_init(&vm_completions, shift); 1390 prop_descriptor_init(&vm_completions, shift);
1398 prop_descriptor_init(&vm_dirties, shift);
1399} 1391}
1400 1392
1401/** 1393/**
@@ -1724,7 +1716,6 @@ void account_page_dirtied(struct page *page, struct address_space *mapping)
1724 __inc_zone_page_state(page, NR_DIRTIED); 1716 __inc_zone_page_state(page, NR_DIRTIED);
1725 __inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE); 1717 __inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
1726 __inc_bdi_stat(mapping->backing_dev_info, BDI_DIRTIED); 1718 __inc_bdi_stat(mapping->backing_dev_info, BDI_DIRTIED);
1727 task_dirty_inc(current);
1728 task_io_account_write(PAGE_CACHE_SIZE); 1719 task_io_account_write(PAGE_CACHE_SIZE);
1729 } 1720 }
1730} 1721}
diff --git a/mm/percpu-vm.c b/mm/percpu-vm.c
index ea534960a04..12a48a88c0d 100644
--- a/mm/percpu-vm.c
+++ b/mm/percpu-vm.c
@@ -50,14 +50,13 @@ static struct page **pcpu_get_pages_and_bitmap(struct pcpu_chunk *chunk,
50 50
51 if (!pages || !bitmap) { 51 if (!pages || !bitmap) {
52 if (may_alloc && !pages) 52 if (may_alloc && !pages)
53 pages = pcpu_mem_alloc(pages_size); 53 pages = pcpu_mem_zalloc(pages_size);
54 if (may_alloc && !bitmap) 54 if (may_alloc && !bitmap)
55 bitmap = pcpu_mem_alloc(bitmap_size); 55 bitmap = pcpu_mem_zalloc(bitmap_size);
56 if (!pages || !bitmap) 56 if (!pages || !bitmap)
57 return NULL; 57 return NULL;
58 } 58 }
59 59
60 memset(pages, 0, pages_size);
61 bitmap_copy(bitmap, chunk->populated, pcpu_unit_pages); 60 bitmap_copy(bitmap, chunk->populated, pcpu_unit_pages);
62 61
63 *bitmapp = bitmap; 62 *bitmapp = bitmap;
@@ -143,8 +142,8 @@ static void pcpu_pre_unmap_flush(struct pcpu_chunk *chunk,
143 int page_start, int page_end) 142 int page_start, int page_end)
144{ 143{
145 flush_cache_vunmap( 144 flush_cache_vunmap(
146 pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start), 145 pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start),
147 pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end)); 146 pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end));
148} 147}
149 148
150static void __pcpu_unmap_pages(unsigned long addr, int nr_pages) 149static void __pcpu_unmap_pages(unsigned long addr, int nr_pages)
@@ -206,8 +205,8 @@ static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk,
206 int page_start, int page_end) 205 int page_start, int page_end)
207{ 206{
208 flush_tlb_kernel_range( 207 flush_tlb_kernel_range(
209 pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start), 208 pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start),
210 pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end)); 209 pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end));
211} 210}
212 211
213static int __pcpu_map_pages(unsigned long addr, struct page **pages, 212static int __pcpu_map_pages(unsigned long addr, struct page **pages,
@@ -284,8 +283,8 @@ static void pcpu_post_map_flush(struct pcpu_chunk *chunk,
284 int page_start, int page_end) 283 int page_start, int page_end)
285{ 284{
286 flush_cache_vmap( 285 flush_cache_vmap(
287 pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start), 286 pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start),
288 pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end)); 287 pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end));
289} 288}
290 289
291/** 290/**
diff --git a/mm/percpu.c b/mm/percpu.c
index bf80e55dbed..3bb810a7200 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -116,9 +116,9 @@ static int pcpu_atom_size __read_mostly;
116static int pcpu_nr_slots __read_mostly; 116static int pcpu_nr_slots __read_mostly;
117static size_t pcpu_chunk_struct_size __read_mostly; 117static size_t pcpu_chunk_struct_size __read_mostly;
118 118
119/* cpus with the lowest and highest unit numbers */ 119/* cpus with the lowest and highest unit addresses */
120static unsigned int pcpu_first_unit_cpu __read_mostly; 120static unsigned int pcpu_low_unit_cpu __read_mostly;
121static unsigned int pcpu_last_unit_cpu __read_mostly; 121static unsigned int pcpu_high_unit_cpu __read_mostly;
122 122
123/* the address of the first chunk which starts with the kernel static area */ 123/* the address of the first chunk which starts with the kernel static area */
124void *pcpu_base_addr __read_mostly; 124void *pcpu_base_addr __read_mostly;
@@ -273,11 +273,11 @@ static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk,
273 (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end))) 273 (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end)))
274 274
275/** 275/**
276 * pcpu_mem_alloc - allocate memory 276 * pcpu_mem_zalloc - allocate memory
277 * @size: bytes to allocate 277 * @size: bytes to allocate
278 * 278 *
279 * Allocate @size bytes. If @size is smaller than PAGE_SIZE, 279 * Allocate @size bytes. If @size is smaller than PAGE_SIZE,
280 * kzalloc() is used; otherwise, vmalloc() is used. The returned 280 * kzalloc() is used; otherwise, vzalloc() is used. The returned
281 * memory is always zeroed. 281 * memory is always zeroed.
282 * 282 *
283 * CONTEXT: 283 * CONTEXT:
@@ -286,7 +286,7 @@ static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk,
286 * RETURNS: 286 * RETURNS:
287 * Pointer to the allocated area on success, NULL on failure. 287 * Pointer to the allocated area on success, NULL on failure.
288 */ 288 */
289static void *pcpu_mem_alloc(size_t size) 289static void *pcpu_mem_zalloc(size_t size)
290{ 290{
291 if (WARN_ON_ONCE(!slab_is_available())) 291 if (WARN_ON_ONCE(!slab_is_available()))
292 return NULL; 292 return NULL;
@@ -302,7 +302,7 @@ static void *pcpu_mem_alloc(size_t size)
302 * @ptr: memory to free 302 * @ptr: memory to free
303 * @size: size of the area 303 * @size: size of the area
304 * 304 *
305 * Free @ptr. @ptr should have been allocated using pcpu_mem_alloc(). 305 * Free @ptr. @ptr should have been allocated using pcpu_mem_zalloc().
306 */ 306 */
307static void pcpu_mem_free(void *ptr, size_t size) 307static void pcpu_mem_free(void *ptr, size_t size)
308{ 308{
@@ -384,7 +384,7 @@ static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
384 size_t old_size = 0, new_size = new_alloc * sizeof(new[0]); 384 size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
385 unsigned long flags; 385 unsigned long flags;
386 386
387 new = pcpu_mem_alloc(new_size); 387 new = pcpu_mem_zalloc(new_size);
388 if (!new) 388 if (!new)
389 return -ENOMEM; 389 return -ENOMEM;
390 390
@@ -604,11 +604,12 @@ static struct pcpu_chunk *pcpu_alloc_chunk(void)
604{ 604{
605 struct pcpu_chunk *chunk; 605 struct pcpu_chunk *chunk;
606 606
607 chunk = pcpu_mem_alloc(pcpu_chunk_struct_size); 607 chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size);
608 if (!chunk) 608 if (!chunk)
609 return NULL; 609 return NULL;
610 610
611 chunk->map = pcpu_mem_alloc(PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0])); 611 chunk->map = pcpu_mem_zalloc(PCPU_DFL_MAP_ALLOC *
612 sizeof(chunk->map[0]));
612 if (!chunk->map) { 613 if (!chunk->map) {
613 kfree(chunk); 614 kfree(chunk);
614 return NULL; 615 return NULL;
@@ -977,6 +978,17 @@ bool is_kernel_percpu_address(unsigned long addr)
977 * address. The caller is responsible for ensuring @addr stays valid 978 * address. The caller is responsible for ensuring @addr stays valid
978 * until this function finishes. 979 * until this function finishes.
979 * 980 *
981 * percpu allocator has special setup for the first chunk, which currently
982 * supports either embedding in linear address space or vmalloc mapping,
983 * and, from the second one, the backing allocator (currently either vm or
984 * km) provides translation.
985 *
986 * The addr can be tranlated simply without checking if it falls into the
987 * first chunk. But the current code reflects better how percpu allocator
988 * actually works, and the verification can discover both bugs in percpu
989 * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current
990 * code.
991 *
980 * RETURNS: 992 * RETURNS:
981 * The physical address for @addr. 993 * The physical address for @addr.
982 */ 994 */
@@ -984,19 +996,19 @@ phys_addr_t per_cpu_ptr_to_phys(void *addr)
984{ 996{
985 void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); 997 void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
986 bool in_first_chunk = false; 998 bool in_first_chunk = false;
987 unsigned long first_start, first_end; 999 unsigned long first_low, first_high;
988 unsigned int cpu; 1000 unsigned int cpu;
989 1001
990 /* 1002 /*
991 * The following test on first_start/end isn't strictly 1003 * The following test on unit_low/high isn't strictly
992 * necessary but will speed up lookups of addresses which 1004 * necessary but will speed up lookups of addresses which
993 * aren't in the first chunk. 1005 * aren't in the first chunk.
994 */ 1006 */
995 first_start = pcpu_chunk_addr(pcpu_first_chunk, pcpu_first_unit_cpu, 0); 1007 first_low = pcpu_chunk_addr(pcpu_first_chunk, pcpu_low_unit_cpu, 0);
996 first_end = pcpu_chunk_addr(pcpu_first_chunk, pcpu_last_unit_cpu, 1008 first_high = pcpu_chunk_addr(pcpu_first_chunk, pcpu_high_unit_cpu,
997 pcpu_unit_pages); 1009 pcpu_unit_pages);
998 if ((unsigned long)addr >= first_start && 1010 if ((unsigned long)addr >= first_low &&
999 (unsigned long)addr < first_end) { 1011 (unsigned long)addr < first_high) {
1000 for_each_possible_cpu(cpu) { 1012 for_each_possible_cpu(cpu) {
1001 void *start = per_cpu_ptr(base, cpu); 1013 void *start = per_cpu_ptr(base, cpu);
1002 1014
@@ -1233,7 +1245,9 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1233 1245
1234 for (cpu = 0; cpu < nr_cpu_ids; cpu++) 1246 for (cpu = 0; cpu < nr_cpu_ids; cpu++)
1235 unit_map[cpu] = UINT_MAX; 1247 unit_map[cpu] = UINT_MAX;
1236 pcpu_first_unit_cpu = NR_CPUS; 1248
1249 pcpu_low_unit_cpu = NR_CPUS;
1250 pcpu_high_unit_cpu = NR_CPUS;
1237 1251
1238 for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) { 1252 for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
1239 const struct pcpu_group_info *gi = &ai->groups[group]; 1253 const struct pcpu_group_info *gi = &ai->groups[group];
@@ -1253,9 +1267,13 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1253 unit_map[cpu] = unit + i; 1267 unit_map[cpu] = unit + i;
1254 unit_off[cpu] = gi->base_offset + i * ai->unit_size; 1268 unit_off[cpu] = gi->base_offset + i * ai->unit_size;
1255 1269
1256 if (pcpu_first_unit_cpu == NR_CPUS) 1270 /* determine low/high unit_cpu */
1257 pcpu_first_unit_cpu = cpu; 1271 if (pcpu_low_unit_cpu == NR_CPUS ||
1258 pcpu_last_unit_cpu = cpu; 1272 unit_off[cpu] < unit_off[pcpu_low_unit_cpu])
1273 pcpu_low_unit_cpu = cpu;
1274 if (pcpu_high_unit_cpu == NR_CPUS ||
1275 unit_off[cpu] > unit_off[pcpu_high_unit_cpu])
1276 pcpu_high_unit_cpu = cpu;
1259 } 1277 }
1260 } 1278 }
1261 pcpu_nr_units = unit; 1279 pcpu_nr_units = unit;
@@ -1889,7 +1907,7 @@ void __init percpu_init_late(void)
1889 1907
1890 BUILD_BUG_ON(size > PAGE_SIZE); 1908 BUILD_BUG_ON(size > PAGE_SIZE);
1891 1909
1892 map = pcpu_mem_alloc(size); 1910 map = pcpu_mem_zalloc(size);
1893 BUG_ON(!map); 1911 BUG_ON(!map);
1894 1912
1895 spin_lock_irqsave(&pcpu_lock, flags); 1913 spin_lock_irqsave(&pcpu_lock, flags);
diff --git a/mm/slub.c b/mm/slub.c
index 7d2a996c307..ed3334d9b6d 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1862,7 +1862,7 @@ static void unfreeze_partials(struct kmem_cache *s)
1862{ 1862{
1863 struct kmem_cache_node *n = NULL; 1863 struct kmem_cache_node *n = NULL;
1864 struct kmem_cache_cpu *c = this_cpu_ptr(s->cpu_slab); 1864 struct kmem_cache_cpu *c = this_cpu_ptr(s->cpu_slab);
1865 struct page *page; 1865 struct page *page, *discard_page = NULL;
1866 1866
1867 while ((page = c->partial)) { 1867 while ((page = c->partial)) {
1868 enum slab_modes { M_PARTIAL, M_FREE }; 1868 enum slab_modes { M_PARTIAL, M_FREE };
@@ -1904,7 +1904,8 @@ static void unfreeze_partials(struct kmem_cache *s)
1904 if (l == M_PARTIAL) 1904 if (l == M_PARTIAL)
1905 remove_partial(n, page); 1905 remove_partial(n, page);
1906 else 1906 else
1907 add_partial(n, page, 1); 1907 add_partial(n, page,
1908 DEACTIVATE_TO_TAIL);
1908 1909
1909 l = m; 1910 l = m;
1910 } 1911 }
@@ -1915,14 +1916,22 @@ static void unfreeze_partials(struct kmem_cache *s)
1915 "unfreezing slab")); 1916 "unfreezing slab"));
1916 1917
1917 if (m == M_FREE) { 1918 if (m == M_FREE) {
1918 stat(s, DEACTIVATE_EMPTY); 1919 page->next = discard_page;
1919 discard_slab(s, page); 1920 discard_page = page;
1920 stat(s, FREE_SLAB);
1921 } 1921 }
1922 } 1922 }
1923 1923
1924 if (n) 1924 if (n)
1925 spin_unlock(&n->list_lock); 1925 spin_unlock(&n->list_lock);
1926
1927 while (discard_page) {
1928 page = discard_page;
1929 discard_page = discard_page->next;
1930
1931 stat(s, DEACTIVATE_EMPTY);
1932 discard_slab(s, page);
1933 stat(s, FREE_SLAB);
1934 }
1926} 1935}
1927 1936
1928/* 1937/*
@@ -1969,7 +1978,7 @@ int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
1969 page->pobjects = pobjects; 1978 page->pobjects = pobjects;
1970 page->next = oldpage; 1979 page->next = oldpage;
1971 1980
1972 } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage); 1981 } while (irqsafe_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage);
1973 stat(s, CPU_PARTIAL_FREE); 1982 stat(s, CPU_PARTIAL_FREE);
1974 return pobjects; 1983 return pobjects;
1975} 1984}
@@ -4435,30 +4444,31 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
4435 4444
4436 for_each_possible_cpu(cpu) { 4445 for_each_possible_cpu(cpu) {
4437 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); 4446 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
4447 int node = ACCESS_ONCE(c->node);
4438 struct page *page; 4448 struct page *page;
4439 4449
4440 if (!c || c->node < 0) 4450 if (node < 0)
4441 continue; 4451 continue;
4442 4452 page = ACCESS_ONCE(c->page);
4443 if (c->page) { 4453 if (page) {
4444 if (flags & SO_TOTAL) 4454 if (flags & SO_TOTAL)
4445 x = c->page->objects; 4455 x = page->objects;
4446 else if (flags & SO_OBJECTS) 4456 else if (flags & SO_OBJECTS)
4447 x = c->page->inuse; 4457 x = page->inuse;
4448 else 4458 else
4449 x = 1; 4459 x = 1;
4450 4460
4451 total += x; 4461 total += x;
4452 nodes[c->node] += x; 4462 nodes[node] += x;
4453 } 4463 }
4454 page = c->partial; 4464 page = c->partial;
4455 4465
4456 if (page) { 4466 if (page) {
4457 x = page->pobjects; 4467 x = page->pobjects;
4458 total += x; 4468 total += x;
4459 nodes[c->node] += x; 4469 nodes[node] += x;
4460 } 4470 }
4461 per_cpu[c->node]++; 4471 per_cpu[node]++;
4462 } 4472 }
4463 } 4473 }
4464 4474
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index b669aa6f6ca..3231bf33287 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -2141,23 +2141,30 @@ void __attribute__((weak)) vmalloc_sync_all(void)
2141 2141
2142static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data) 2142static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data)
2143{ 2143{
2144 /* apply_to_page_range() does all the hard work. */ 2144 pte_t ***p = data;
2145
2146 if (p) {
2147 *(*p) = pte;
2148 (*p)++;
2149 }
2145 return 0; 2150 return 0;
2146} 2151}
2147 2152
2148/** 2153/**
2149 * alloc_vm_area - allocate a range of kernel address space 2154 * alloc_vm_area - allocate a range of kernel address space
2150 * @size: size of the area 2155 * @size: size of the area
2156 * @ptes: returns the PTEs for the address space
2151 * 2157 *
2152 * Returns: NULL on failure, vm_struct on success 2158 * Returns: NULL on failure, vm_struct on success
2153 * 2159 *
2154 * This function reserves a range of kernel address space, and 2160 * This function reserves a range of kernel address space, and
2155 * allocates pagetables to map that range. No actual mappings 2161 * allocates pagetables to map that range. No actual mappings
2156 * are created. If the kernel address space is not shared 2162 * are created.
2157 * between processes, it syncs the pagetable across all 2163 *
2158 * processes. 2164 * If @ptes is non-NULL, pointers to the PTEs (in init_mm)
2165 * allocated for the VM area are returned.
2159 */ 2166 */
2160struct vm_struct *alloc_vm_area(size_t size) 2167struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
2161{ 2168{
2162 struct vm_struct *area; 2169 struct vm_struct *area;
2163 2170
@@ -2171,19 +2178,11 @@ struct vm_struct *alloc_vm_area(size_t size)
2171 * of kernel virtual address space and mapped into init_mm. 2178 * of kernel virtual address space and mapped into init_mm.
2172 */ 2179 */
2173 if (apply_to_page_range(&init_mm, (unsigned long)area->addr, 2180 if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
2174 area->size, f, NULL)) { 2181 size, f, ptes ? &ptes : NULL)) {
2175 free_vm_area(area); 2182 free_vm_area(area);
2176 return NULL; 2183 return NULL;
2177 } 2184 }
2178 2185
2179 /*
2180 * If the allocated address space is passed to a hypercall
2181 * before being used then we cannot rely on a page fault to
2182 * trigger an update of the page tables. So sync all the page
2183 * tables here.
2184 */
2185 vmalloc_sync_all();
2186
2187 return area; 2186 return area;
2188} 2187}
2189EXPORT_SYMBOL_GPL(alloc_vm_area); 2188EXPORT_SYMBOL_GPL(alloc_vm_area);
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index c1c597e3e19..e0af7237cd9 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -673,7 +673,7 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
673 goto encrypt; 673 goto encrypt;
674 674
675auth: 675auth:
676 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) 676 if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
677 return 0; 677 return 0;
678 678
679 if (!hci_conn_auth(conn, sec_level, auth_type)) 679 if (!hci_conn_auth(conn, sec_level, auth_type))
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 8cd12917733..5ea94a1eecf 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -251,7 +251,7 @@ static void l2cap_chan_timeout(unsigned long arg)
251 251
252 if (sock_owned_by_user(sk)) { 252 if (sock_owned_by_user(sk)) {
253 /* sk is owned by user. Try again later */ 253 /* sk is owned by user. Try again later */
254 __set_chan_timer(chan, HZ / 5); 254 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
255 bh_unlock_sock(sk); 255 bh_unlock_sock(sk);
256 chan_put(chan); 256 chan_put(chan);
257 return; 257 return;
@@ -2488,7 +2488,7 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd
2488 if (sock_owned_by_user(sk)) { 2488 if (sock_owned_by_user(sk)) {
2489 l2cap_state_change(chan, BT_DISCONN); 2489 l2cap_state_change(chan, BT_DISCONN);
2490 __clear_chan_timer(chan); 2490 __clear_chan_timer(chan);
2491 __set_chan_timer(chan, HZ / 5); 2491 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
2492 break; 2492 break;
2493 } 2493 }
2494 2494
@@ -2661,7 +2661,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2661 2661
2662 default: 2662 default:
2663 sk->sk_err = ECONNRESET; 2663 sk->sk_err = ECONNRESET;
2664 __set_chan_timer(chan, HZ * 5); 2664 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
2665 l2cap_send_disconn_req(conn, chan, ECONNRESET); 2665 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2666 goto done; 2666 goto done;
2667 } 2667 }
@@ -2718,7 +2718,7 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd
2718 if (sock_owned_by_user(sk)) { 2718 if (sock_owned_by_user(sk)) {
2719 l2cap_state_change(chan, BT_DISCONN); 2719 l2cap_state_change(chan, BT_DISCONN);
2720 __clear_chan_timer(chan); 2720 __clear_chan_timer(chan);
2721 __set_chan_timer(chan, HZ / 5); 2721 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
2722 bh_unlock_sock(sk); 2722 bh_unlock_sock(sk);
2723 return 0; 2723 return 0;
2724 } 2724 }
@@ -2752,7 +2752,7 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd
2752 if (sock_owned_by_user(sk)) { 2752 if (sock_owned_by_user(sk)) {
2753 l2cap_state_change(chan,BT_DISCONN); 2753 l2cap_state_change(chan,BT_DISCONN);
2754 __clear_chan_timer(chan); 2754 __clear_chan_timer(chan);
2755 __set_chan_timer(chan, HZ / 5); 2755 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
2756 bh_unlock_sock(sk); 2756 bh_unlock_sock(sk);
2757 return 0; 2757 return 0;
2758 } 2758 }
@@ -3998,7 +3998,7 @@ static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
3998 if (encrypt == 0x00) { 3998 if (encrypt == 0x00) {
3999 if (chan->sec_level == BT_SECURITY_MEDIUM) { 3999 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4000 __clear_chan_timer(chan); 4000 __clear_chan_timer(chan);
4001 __set_chan_timer(chan, HZ * 5); 4001 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
4002 } else if (chan->sec_level == BT_SECURITY_HIGH) 4002 } else if (chan->sec_level == BT_SECURITY_HIGH)
4003 l2cap_chan_close(chan, ECONNREFUSED); 4003 l2cap_chan_close(chan, ECONNREFUSED);
4004 } else { 4004 } else {
@@ -4066,7 +4066,7 @@ static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4066 L2CAP_CONN_REQ, sizeof(req), &req); 4066 L2CAP_CONN_REQ, sizeof(req), &req);
4067 } else { 4067 } else {
4068 __clear_chan_timer(chan); 4068 __clear_chan_timer(chan);
4069 __set_chan_timer(chan, HZ / 10); 4069 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4070 } 4070 }
4071 } else if (chan->state == BT_CONNECT2) { 4071 } else if (chan->state == BT_CONNECT2) {
4072 struct l2cap_conn_rsp rsp; 4072 struct l2cap_conn_rsp rsp;
@@ -4086,7 +4086,7 @@ static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4086 } 4086 }
4087 } else { 4087 } else {
4088 l2cap_state_change(chan, BT_DISCONN); 4088 l2cap_state_change(chan, BT_DISCONN);
4089 __set_chan_timer(chan, HZ / 10); 4089 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4090 res = L2CAP_CR_SEC_BLOCK; 4090 res = L2CAP_CR_SEC_BLOCK;
4091 stat = L2CAP_CS_NO_INFO; 4091 stat = L2CAP_CS_NO_INFO;
4092 } 4092 }
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 995cbe0ac0b..a5f4e576980 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1501,6 +1501,8 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
1501 1501
1502 __skb_pull(skb2, offset); 1502 __skb_pull(skb2, offset);
1503 skb_reset_transport_header(skb2); 1503 skb_reset_transport_header(skb2);
1504 skb_postpull_rcsum(skb2, skb_network_header(skb2),
1505 skb_network_header_len(skb2));
1504 1506
1505 icmp6_type = icmp6_hdr(skb2)->icmp6_type; 1507 icmp6_type = icmp6_hdr(skb2)->icmp6_type;
1506 1508
@@ -1770,7 +1772,7 @@ int br_multicast_toggle(struct net_bridge *br, unsigned long val)
1770 int err = 0; 1772 int err = 0;
1771 struct net_bridge_mdb_htable *mdb; 1773 struct net_bridge_mdb_htable *mdb;
1772 1774
1773 spin_lock(&br->multicast_lock); 1775 spin_lock_bh(&br->multicast_lock);
1774 if (br->multicast_disabled == !val) 1776 if (br->multicast_disabled == !val)
1775 goto unlock; 1777 goto unlock;
1776 1778
@@ -1806,7 +1808,7 @@ rollback:
1806 } 1808 }
1807 1809
1808unlock: 1810unlock:
1809 spin_unlock(&br->multicast_lock); 1811 spin_unlock_bh(&br->multicast_lock);
1810 1812
1811 return err; 1813 return err;
1812} 1814}
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 733e46008b8..f4f3f58f523 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -244,7 +244,7 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
244 ceph_pagelist_init(req->r_trail); 244 ceph_pagelist_init(req->r_trail);
245 } 245 }
246 /* create request message; allow space for oid */ 246 /* create request message; allow space for oid */
247 msg_size += 40; 247 msg_size += MAX_OBJ_NAME_SIZE;
248 if (snapc) 248 if (snapc)
249 msg_size += sizeof(u64) * snapc->num_snaps; 249 msg_size += sizeof(u64) * snapc->num_snaps;
250 if (use_mempool) 250 if (use_mempool)
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index c1f4154552f..36d14406261 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -136,8 +136,6 @@ static void ah_output_done(struct crypto_async_request *base, int err)
136 memcpy(top_iph+1, iph+1, top_iph->ihl*4 - sizeof(struct iphdr)); 136 memcpy(top_iph+1, iph+1, top_iph->ihl*4 - sizeof(struct iphdr));
137 } 137 }
138 138
139 err = ah->nexthdr;
140
141 kfree(AH_SKB_CB(skb)->tmp); 139 kfree(AH_SKB_CB(skb)->tmp);
142 xfrm_output_resume(skb, err); 140 xfrm_output_resume(skb, err);
143} 141}
@@ -264,12 +262,12 @@ static void ah_input_done(struct crypto_async_request *base, int err)
264 if (err) 262 if (err)
265 goto out; 263 goto out;
266 264
265 err = ah->nexthdr;
266
267 skb->network_header += ah_hlen; 267 skb->network_header += ah_hlen;
268 memcpy(skb_network_header(skb), work_iph, ihl); 268 memcpy(skb_network_header(skb), work_iph, ihl);
269 __skb_pull(skb, ah_hlen + ihl); 269 __skb_pull(skb, ah_hlen + ihl);
270 skb_set_transport_header(skb, -ihl); 270 skb_set_transport_header(skb, -ihl);
271
272 err = ah->nexthdr;
273out: 271out:
274 kfree(AH_SKB_CB(skb)->tmp); 272 kfree(AH_SKB_CB(skb)->tmp);
275 xfrm_input_resume(skb, err); 273 xfrm_input_resume(skb, err);
@@ -371,8 +369,6 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
371 if (err == -EINPROGRESS) 369 if (err == -EINPROGRESS)
372 goto out; 370 goto out;
373 371
374 if (err == -EBUSY)
375 err = NET_XMIT_DROP;
376 goto out_free; 372 goto out_free;
377 } 373 }
378 374
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index f5e2bdaef94..68e8ac51438 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -133,8 +133,8 @@ static int inet_csk_diag_fill(struct sock *sk,
133 &np->rcv_saddr); 133 &np->rcv_saddr);
134 ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst, 134 ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst,
135 &np->daddr); 135 &np->daddr);
136 if (ext & (1 << (INET_DIAG_TOS - 1))) 136 if (ext & (1 << (INET_DIAG_TCLASS - 1)))
137 RTA_PUT_U8(skb, INET_DIAG_TOS, np->tclass); 137 RTA_PUT_U8(skb, INET_DIAG_TCLASS, np->tclass);
138 } 138 }
139#endif 139#endif
140 140
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index ec93335901d..05d20cca9d6 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -640,6 +640,7 @@ int ip_options_rcv_srr(struct sk_buff *skb)
640 } 640 }
641 if (srrptr <= srrspace) { 641 if (srrptr <= srrspace) {
642 opt->srr_is_hit = 1; 642 opt->srr_is_hit = 1;
643 iph->daddr = nexthop;
643 opt->is_changed = 1; 644 opt->is_changed = 1;
644 } 645 }
645 return 0; 646 return 0;
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index a06f73fdb3c..43d4c3b2236 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -339,7 +339,6 @@ void ping_err(struct sk_buff *skb, u32 info)
339 sk = ping_v4_lookup(net, iph->daddr, iph->saddr, 339 sk = ping_v4_lookup(net, iph->daddr, iph->saddr,
340 ntohs(icmph->un.echo.id), skb->dev->ifindex); 340 ntohs(icmph->un.echo.id), skb->dev->ifindex);
341 if (sk == NULL) { 341 if (sk == NULL) {
342 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
343 pr_debug("no socket, dropping\n"); 342 pr_debug("no socket, dropping\n");
344 return; /* No socket for error */ 343 return; /* No socket for error */
345 } 344 }
@@ -679,7 +678,6 @@ static int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
679 pr_debug("ping_queue_rcv_skb(sk=%p,sk->num=%d,skb=%p)\n", 678 pr_debug("ping_queue_rcv_skb(sk=%p,sk->num=%d,skb=%p)\n",
680 inet_sk(sk), inet_sk(sk)->inet_num, skb); 679 inet_sk(sk), inet_sk(sk)->inet_num, skb);
681 if (sock_queue_rcv_skb(sk, skb) < 0) { 680 if (sock_queue_rcv_skb(sk, skb) < 0) {
682 ICMP_INC_STATS_BH(sock_net(sk), ICMP_MIB_INERRORS);
683 kfree_skb(skb); 681 kfree_skb(skb);
684 pr_debug("ping_queue_rcv_skb -> failed\n"); 682 pr_debug("ping_queue_rcv_skb -> failed\n");
685 return -1; 683 return -1;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 155138d8ec8..0c74da8a047 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1304,16 +1304,42 @@ static void rt_del(unsigned hash, struct rtable *rt)
1304 spin_unlock_bh(rt_hash_lock_addr(hash)); 1304 spin_unlock_bh(rt_hash_lock_addr(hash));
1305} 1305}
1306 1306
1307static int check_peer_redir(struct dst_entry *dst, struct inet_peer *peer)
1308{
1309 struct rtable *rt = (struct rtable *) dst;
1310 __be32 orig_gw = rt->rt_gateway;
1311 struct neighbour *n, *old_n;
1312
1313 dst_confirm(&rt->dst);
1314
1315 rt->rt_gateway = peer->redirect_learned.a4;
1316
1317 n = ipv4_neigh_lookup(&rt->dst, &rt->rt_gateway);
1318 if (IS_ERR(n))
1319 return PTR_ERR(n);
1320 old_n = xchg(&rt->dst._neighbour, n);
1321 if (old_n)
1322 neigh_release(old_n);
1323 if (!n || !(n->nud_state & NUD_VALID)) {
1324 if (n)
1325 neigh_event_send(n, NULL);
1326 rt->rt_gateway = orig_gw;
1327 return -EAGAIN;
1328 } else {
1329 rt->rt_flags |= RTCF_REDIRECTED;
1330 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
1331 }
1332 return 0;
1333}
1334
1307/* called in rcu_read_lock() section */ 1335/* called in rcu_read_lock() section */
1308void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, 1336void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1309 __be32 saddr, struct net_device *dev) 1337 __be32 saddr, struct net_device *dev)
1310{ 1338{
1311 int s, i; 1339 int s, i;
1312 struct in_device *in_dev = __in_dev_get_rcu(dev); 1340 struct in_device *in_dev = __in_dev_get_rcu(dev);
1313 struct rtable *rt;
1314 __be32 skeys[2] = { saddr, 0 }; 1341 __be32 skeys[2] = { saddr, 0 };
1315 int ikeys[2] = { dev->ifindex, 0 }; 1342 int ikeys[2] = { dev->ifindex, 0 };
1316 struct flowi4 fl4;
1317 struct inet_peer *peer; 1343 struct inet_peer *peer;
1318 struct net *net; 1344 struct net *net;
1319 1345
@@ -1336,33 +1362,42 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1336 goto reject_redirect; 1362 goto reject_redirect;
1337 } 1363 }
1338 1364
1339 memset(&fl4, 0, sizeof(fl4));
1340 fl4.daddr = daddr;
1341 for (s = 0; s < 2; s++) { 1365 for (s = 0; s < 2; s++) {
1342 for (i = 0; i < 2; i++) { 1366 for (i = 0; i < 2; i++) {
1343 fl4.flowi4_oif = ikeys[i]; 1367 unsigned int hash;
1344 fl4.saddr = skeys[s]; 1368 struct rtable __rcu **rthp;
1345 rt = __ip_route_output_key(net, &fl4); 1369 struct rtable *rt;
1346 if (IS_ERR(rt)) 1370
1347 continue; 1371 hash = rt_hash(daddr, skeys[s], ikeys[i], rt_genid(net));
1348 1372
1349 if (rt->dst.error || rt->dst.dev != dev || 1373 rthp = &rt_hash_table[hash].chain;
1350 rt->rt_gateway != old_gw) { 1374
1351 ip_rt_put(rt); 1375 while ((rt = rcu_dereference(*rthp)) != NULL) {
1352 continue; 1376 rthp = &rt->dst.rt_next;
1353 } 1377
1378 if (rt->rt_key_dst != daddr ||
1379 rt->rt_key_src != skeys[s] ||
1380 rt->rt_oif != ikeys[i] ||
1381 rt_is_input_route(rt) ||
1382 rt_is_expired(rt) ||
1383 !net_eq(dev_net(rt->dst.dev), net) ||
1384 rt->dst.error ||
1385 rt->dst.dev != dev ||
1386 rt->rt_gateway != old_gw)
1387 continue;
1354 1388
1355 if (!rt->peer) 1389 if (!rt->peer)
1356 rt_bind_peer(rt, rt->rt_dst, 1); 1390 rt_bind_peer(rt, rt->rt_dst, 1);
1357 1391
1358 peer = rt->peer; 1392 peer = rt->peer;
1359 if (peer) { 1393 if (peer) {
1360 peer->redirect_learned.a4 = new_gw; 1394 if (peer->redirect_learned.a4 != new_gw) {
1361 atomic_inc(&__rt_peer_genid); 1395 peer->redirect_learned.a4 = new_gw;
1396 atomic_inc(&__rt_peer_genid);
1397 }
1398 check_peer_redir(&rt->dst, peer);
1399 }
1362 } 1400 }
1363
1364 ip_rt_put(rt);
1365 return;
1366 } 1401 }
1367 } 1402 }
1368 return; 1403 return;
@@ -1649,33 +1684,6 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
1649 } 1684 }
1650} 1685}
1651 1686
1652static int check_peer_redir(struct dst_entry *dst, struct inet_peer *peer)
1653{
1654 struct rtable *rt = (struct rtable *) dst;
1655 __be32 orig_gw = rt->rt_gateway;
1656 struct neighbour *n, *old_n;
1657
1658 dst_confirm(&rt->dst);
1659
1660 rt->rt_gateway = peer->redirect_learned.a4;
1661
1662 n = ipv4_neigh_lookup(&rt->dst, &rt->rt_gateway);
1663 if (IS_ERR(n))
1664 return PTR_ERR(n);
1665 old_n = xchg(&rt->dst._neighbour, n);
1666 if (old_n)
1667 neigh_release(old_n);
1668 if (!n || !(n->nud_state & NUD_VALID)) {
1669 if (n)
1670 neigh_event_send(n, NULL);
1671 rt->rt_gateway = orig_gw;
1672 return -EAGAIN;
1673 } else {
1674 rt->rt_flags |= RTCF_REDIRECTED;
1675 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
1676 }
1677 return 0;
1678}
1679 1687
1680static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie) 1688static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1681{ 1689{
@@ -2845,7 +2853,7 @@ static int rt_fill_info(struct net *net,
2845 struct rtable *rt = skb_rtable(skb); 2853 struct rtable *rt = skb_rtable(skb);
2846 struct rtmsg *r; 2854 struct rtmsg *r;
2847 struct nlmsghdr *nlh; 2855 struct nlmsghdr *nlh;
2848 long expires = 0; 2856 unsigned long expires = 0;
2849 const struct inet_peer *peer = rt->peer; 2857 const struct inet_peer *peer = rt->peer;
2850 u32 id = 0, ts = 0, tsage = 0, error; 2858 u32 id = 0, ts = 0, tsage = 0, error;
2851 2859
@@ -2902,8 +2910,12 @@ static int rt_fill_info(struct net *net,
2902 tsage = get_seconds() - peer->tcp_ts_stamp; 2910 tsage = get_seconds() - peer->tcp_ts_stamp;
2903 } 2911 }
2904 expires = ACCESS_ONCE(peer->pmtu_expires); 2912 expires = ACCESS_ONCE(peer->pmtu_expires);
2905 if (expires) 2913 if (expires) {
2906 expires -= jiffies; 2914 if (time_before(jiffies, expires))
2915 expires -= jiffies;
2916 else
2917 expires = 0;
2918 }
2907 } 2919 }
2908 2920
2909 if (rt_is_input_route(rt)) { 2921 if (rt_is_input_route(rt)) {
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index a7443159c40..a9db4b1a221 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1510,6 +1510,7 @@ exit:
1510 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); 1510 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1511 return NULL; 1511 return NULL;
1512put_and_exit: 1512put_and_exit:
1513 tcp_clear_xmit_timers(newsk);
1513 bh_unlock_sock(newsk); 1514 bh_unlock_sock(newsk);
1514 sock_put(newsk); 1515 sock_put(newsk);
1515 goto exit; 1516 goto exit;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 980b98f6288..63170e29754 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1382,7 +1382,7 @@ static inline int tcp_minshall_check(const struct tcp_sock *tp)
1382/* Return 0, if packet can be sent now without violation Nagle's rules: 1382/* Return 0, if packet can be sent now without violation Nagle's rules:
1383 * 1. It is full sized. 1383 * 1. It is full sized.
1384 * 2. Or it contains FIN. (already checked by caller) 1384 * 2. Or it contains FIN. (already checked by caller)
1385 * 3. Or TCP_NODELAY was set. 1385 * 3. Or TCP_CORK is not set, and TCP_NODELAY is set.
1386 * 4. Or TCP_CORK is not set, and all sent packets are ACKed. 1386 * 4. Or TCP_CORK is not set, and all sent packets are ACKed.
1387 * With Minshall's modification: all sent small packets are ACKed. 1387 * With Minshall's modification: all sent small packets are ACKed.
1388 */ 1388 */
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index 2195ae65192..4c0f894d084 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -324,8 +324,6 @@ static void ah6_output_done(struct crypto_async_request *base, int err)
324#endif 324#endif
325 } 325 }
326 326
327 err = ah->nexthdr;
328
329 kfree(AH_SKB_CB(skb)->tmp); 327 kfree(AH_SKB_CB(skb)->tmp);
330 xfrm_output_resume(skb, err); 328 xfrm_output_resume(skb, err);
331} 329}
@@ -466,12 +464,12 @@ static void ah6_input_done(struct crypto_async_request *base, int err)
466 if (err) 464 if (err)
467 goto out; 465 goto out;
468 466
467 err = ah->nexthdr;
468
469 skb->network_header += ah_hlen; 469 skb->network_header += ah_hlen;
470 memcpy(skb_network_header(skb), work_iph, hdr_len); 470 memcpy(skb_network_header(skb), work_iph, hdr_len);
471 __skb_pull(skb, ah_hlen + hdr_len); 471 __skb_pull(skb, ah_hlen + hdr_len);
472 skb_set_transport_header(skb, -hdr_len); 472 skb_set_transport_header(skb, -hdr_len);
473
474 err = ah->nexthdr;
475out: 473out:
476 kfree(AH_SKB_CB(skb)->tmp); 474 kfree(AH_SKB_CB(skb)->tmp);
477 xfrm_input_resume(skb, err); 475 xfrm_input_resume(skb, err);
@@ -583,8 +581,6 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
583 if (err == -EINPROGRESS) 581 if (err == -EINPROGRESS)
584 goto out; 582 goto out;
585 583
586 if (err == -EBUSY)
587 err = NET_XMIT_DROP;
588 goto out_free; 584 goto out_free;
589 } 585 }
590 586
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 027c7ff6f1e..a46c64eb0a6 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -111,6 +111,14 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
111 ipv6_addr_loopback(&hdr->daddr)) 111 ipv6_addr_loopback(&hdr->daddr))
112 goto err; 112 goto err;
113 113
114 /*
115 * RFC4291 2.7
116 * Multicast addresses must not be used as source addresses in IPv6
117 * packets or appear in any Routing header.
118 */
119 if (ipv6_addr_is_multicast(&hdr->saddr))
120 goto err;
121
114 skb->transport_header = skb->network_header + sizeof(*hdr); 122 skb->transport_header = skb->network_header + sizeof(*hdr);
115 IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr); 123 IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr);
116 124
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index bdc15c9003d..4e2e9ff67ef 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -289,6 +289,8 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct ip6_tnl_parm *p)
289 if ((err = register_netdevice(dev)) < 0) 289 if ((err = register_netdevice(dev)) < 0)
290 goto failed_free; 290 goto failed_free;
291 291
292 strcpy(t->parms.name, dev->name);
293
292 dev_hold(dev); 294 dev_hold(dev);
293 ip6_tnl_link(ip6n, t); 295 ip6_tnl_link(ip6n, t);
294 return t; 296 return t;
@@ -1407,7 +1409,6 @@ ip6_tnl_dev_init_gen(struct net_device *dev)
1407 struct ip6_tnl *t = netdev_priv(dev); 1409 struct ip6_tnl *t = netdev_priv(dev);
1408 1410
1409 t->dev = dev; 1411 t->dev = dev;
1410 strcpy(t->parms.name, dev->name);
1411 dev->tstats = alloc_percpu(struct pcpu_tstats); 1412 dev->tstats = alloc_percpu(struct pcpu_tstats);
1412 if (!dev->tstats) 1413 if (!dev->tstats)
1413 return -ENOMEM; 1414 return -ENOMEM;
@@ -1487,6 +1488,7 @@ static void __net_exit ip6_tnl_destroy_tunnels(struct ip6_tnl_net *ip6n)
1487static int __net_init ip6_tnl_init_net(struct net *net) 1488static int __net_init ip6_tnl_init_net(struct net *net)
1488{ 1489{
1489 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 1490 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1491 struct ip6_tnl *t = NULL;
1490 int err; 1492 int err;
1491 1493
1492 ip6n->tnls[0] = ip6n->tnls_wc; 1494 ip6n->tnls[0] = ip6n->tnls_wc;
@@ -1507,6 +1509,10 @@ static int __net_init ip6_tnl_init_net(struct net *net)
1507 err = register_netdev(ip6n->fb_tnl_dev); 1509 err = register_netdev(ip6n->fb_tnl_dev);
1508 if (err < 0) 1510 if (err < 0)
1509 goto err_register; 1511 goto err_register;
1512
1513 t = netdev_priv(ip6n->fb_tnl_dev);
1514
1515 strcpy(t->parms.name, ip6n->fb_tnl_dev->name);
1510 return 0; 1516 return 0;
1511 1517
1512err_register: 1518err_register:
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index bf8d50c6793..cf0f308abf5 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -756,9 +756,6 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
756 goto error; 756 goto error;
757 } 757 }
758 758
759 /* Point to L2TP header */
760 optr = ptr = skb->data;
761
762 /* Trace packet contents, if enabled */ 759 /* Trace packet contents, if enabled */
763 if (tunnel->debug & L2TP_MSG_DATA) { 760 if (tunnel->debug & L2TP_MSG_DATA) {
764 length = min(32u, skb->len); 761 length = min(32u, skb->len);
@@ -769,12 +766,15 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
769 766
770 offset = 0; 767 offset = 0;
771 do { 768 do {
772 printk(" %02X", ptr[offset]); 769 printk(" %02X", skb->data[offset]);
773 } while (++offset < length); 770 } while (++offset < length);
774 771
775 printk("\n"); 772 printk("\n");
776 } 773 }
777 774
775 /* Point to L2TP header */
776 optr = ptr = skb->data;
777
778 /* Get L2TP header flags */ 778 /* Get L2TP header flags */
779 hdrflags = ntohs(*(__be16 *) ptr); 779 hdrflags = ntohs(*(__be16 *) ptr);
780 780
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 72c8bea81a6..b1b1bb368f7 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1487,6 +1487,7 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
1487 int i, j, err; 1487 int i, j, err;
1488 bool have_higher_than_11mbit = false; 1488 bool have_higher_than_11mbit = false;
1489 u16 ap_ht_cap_flags; 1489 u16 ap_ht_cap_flags;
1490 int min_rate = INT_MAX, min_rate_index = -1;
1490 1491
1491 /* AssocResp and ReassocResp have identical structure */ 1492 /* AssocResp and ReassocResp have identical structure */
1492 1493
@@ -1553,6 +1554,10 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
1553 rates |= BIT(j); 1554 rates |= BIT(j);
1554 if (is_basic) 1555 if (is_basic)
1555 basic_rates |= BIT(j); 1556 basic_rates |= BIT(j);
1557 if (rate < min_rate) {
1558 min_rate = rate;
1559 min_rate_index = j;
1560 }
1556 break; 1561 break;
1557 } 1562 }
1558 } 1563 }
@@ -1570,11 +1575,25 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
1570 rates |= BIT(j); 1575 rates |= BIT(j);
1571 if (is_basic) 1576 if (is_basic)
1572 basic_rates |= BIT(j); 1577 basic_rates |= BIT(j);
1578 if (rate < min_rate) {
1579 min_rate = rate;
1580 min_rate_index = j;
1581 }
1573 break; 1582 break;
1574 } 1583 }
1575 } 1584 }
1576 } 1585 }
1577 1586
1587 /*
1588 * some buggy APs don't advertise basic_rates. use the lowest
1589 * supported rate instead.
1590 */
1591 if (unlikely(!basic_rates) && min_rate_index >= 0) {
1592 printk(KERN_DEBUG "%s: No basic rates in AssocResp. "
1593 "Using min supported rate instead.\n", sdata->name);
1594 basic_rates = BIT(min_rate_index);
1595 }
1596
1578 sta->sta.supp_rates[wk->chan->band] = rates; 1597 sta->sta.supp_rates[wk->chan->band] = rates;
1579 sdata->vif.bss_conf.basic_rates = basic_rates; 1598 sdata->vif.bss_conf.basic_rates = basic_rates;
1580 1599
@@ -2269,6 +2288,7 @@ void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata)
2269 2288
2270 cancel_work_sync(&ifmgd->request_smps_work); 2289 cancel_work_sync(&ifmgd->request_smps_work);
2271 2290
2291 cancel_work_sync(&ifmgd->monitor_work);
2272 cancel_work_sync(&ifmgd->beacon_connection_loss_work); 2292 cancel_work_sync(&ifmgd->beacon_connection_loss_work);
2273 if (del_timer_sync(&ifmgd->timer)) 2293 if (del_timer_sync(&ifmgd->timer))
2274 set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running); 2294 set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running);
@@ -2277,7 +2297,6 @@ void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata)
2277 if (del_timer_sync(&ifmgd->chswitch_timer)) 2297 if (del_timer_sync(&ifmgd->chswitch_timer))
2278 set_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running); 2298 set_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running);
2279 2299
2280 cancel_work_sync(&ifmgd->monitor_work);
2281 /* these will just be re-established on connection */ 2300 /* these will just be re-established on connection */
2282 del_timer_sync(&ifmgd->conn_mon_timer); 2301 del_timer_sync(&ifmgd->conn_mon_timer);
2283 del_timer_sync(&ifmgd->bcn_mon_timer); 2302 del_timer_sync(&ifmgd->bcn_mon_timer);
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index bb53726cb04..fb123e2e081 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -141,8 +141,9 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
141 pos++; 141 pos++;
142 142
143 /* IEEE80211_RADIOTAP_RATE */ 143 /* IEEE80211_RADIOTAP_RATE */
144 if (status->flag & RX_FLAG_HT) { 144 if (!rate || status->flag & RX_FLAG_HT) {
145 /* 145 /*
146 * Without rate information don't add it. If we have,
146 * MCS information is a separate field in radiotap, 147 * MCS information is a separate field in radiotap,
147 * added below. The byte here is needed as padding 148 * added below. The byte here is needed as padding
148 * for the channel though, so initialise it to 0. 149 * for the channel though, so initialise it to 0.
@@ -163,12 +164,14 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
163 else if (status->flag & RX_FLAG_HT) 164 else if (status->flag & RX_FLAG_HT)
164 put_unaligned_le16(IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ, 165 put_unaligned_le16(IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ,
165 pos); 166 pos);
166 else if (rate->flags & IEEE80211_RATE_ERP_G) 167 else if (rate && rate->flags & IEEE80211_RATE_ERP_G)
167 put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ, 168 put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ,
168 pos); 169 pos);
169 else 170 else if (rate)
170 put_unaligned_le16(IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ, 171 put_unaligned_le16(IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ,
171 pos); 172 pos);
173 else
174 put_unaligned_le16(IEEE80211_CHAN_2GHZ, pos);
172 pos += 2; 175 pos += 2;
173 176
174 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */ 177 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index ce962d2c878..8eaa746ec7a 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -1354,12 +1354,12 @@ ieee80211_sta_ps_deliver_response(struct sta_info *sta,
1354 * Use MoreData flag to indicate whether there are 1354 * Use MoreData flag to indicate whether there are
1355 * more buffered frames for this STA 1355 * more buffered frames for this STA
1356 */ 1356 */
1357 if (!more_data) 1357 if (more_data || !skb_queue_empty(&frames))
1358 hdr->frame_control &=
1359 cpu_to_le16(~IEEE80211_FCTL_MOREDATA);
1360 else
1361 hdr->frame_control |= 1358 hdr->frame_control |=
1362 cpu_to_le16(IEEE80211_FCTL_MOREDATA); 1359 cpu_to_le16(IEEE80211_FCTL_MOREDATA);
1360 else
1361 hdr->frame_control &=
1362 cpu_to_le16(~IEEE80211_FCTL_MOREDATA);
1363 1363
1364 if (ieee80211_is_data_qos(hdr->frame_control) || 1364 if (ieee80211_is_data_qos(hdr->frame_control) ||
1365 ieee80211_is_qos_nullfunc(hdr->frame_control)) 1365 ieee80211_is_qos_nullfunc(hdr->frame_control))
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 51e256c5fb7..eca0fad0970 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -881,6 +881,8 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
881 skb = ieee80211_probereq_get(&local->hw, &sdata->vif, 881 skb = ieee80211_probereq_get(&local->hw, &sdata->vif,
882 ssid, ssid_len, 882 ssid, ssid_len,
883 buf, buf_len); 883 buf, buf_len);
884 if (!skb)
885 goto out;
884 886
885 if (dst) { 887 if (dst) {
886 mgmt = (struct ieee80211_mgmt *) skb->data; 888 mgmt = (struct ieee80211_mgmt *) skb->data;
@@ -889,6 +891,8 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
889 } 891 }
890 892
891 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; 893 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
894
895 out:
892 kfree(buf); 896 kfree(buf);
893 897
894 return skb; 898 return skb;
diff --git a/net/rds/Kconfig b/net/rds/Kconfig
index 4cf6dc7910e..ec753b3ae72 100644
--- a/net/rds/Kconfig
+++ b/net/rds/Kconfig
@@ -9,7 +9,6 @@ config RDS
9 9
10config RDS_RDMA 10config RDS_RDMA
11 tristate "RDS over Infiniband and iWARP" 11 tristate "RDS over Infiniband and iWARP"
12 select LLIST
13 depends on RDS && INFINIBAND && INFINIBAND_ADDR_TRANS 12 depends on RDS && INFINIBAND && INFINIBAND_ADDR_TRANS
14 ---help--- 13 ---help---
15 Allow RDS to use Infiniband and iWARP as a transport. 14 Allow RDS to use Infiniband and iWARP as a transport.
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index d7f97ef2659..55472c48825 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -496,7 +496,7 @@ static int xs_nospace(struct rpc_task *task)
496 struct rpc_rqst *req = task->tk_rqstp; 496 struct rpc_rqst *req = task->tk_rqstp;
497 struct rpc_xprt *xprt = req->rq_xprt; 497 struct rpc_xprt *xprt = req->rq_xprt;
498 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 498 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
499 int ret = 0; 499 int ret = -EAGAIN;
500 500
501 dprintk("RPC: %5u xmit incomplete (%u left of %u)\n", 501 dprintk("RPC: %5u xmit incomplete (%u left of %u)\n",
502 task->tk_pid, req->rq_slen - req->rq_bytes_sent, 502 task->tk_pid, req->rq_slen - req->rq_bytes_sent,
@@ -508,7 +508,6 @@ static int xs_nospace(struct rpc_task *task)
508 /* Don't race with disconnect */ 508 /* Don't race with disconnect */
509 if (xprt_connected(xprt)) { 509 if (xprt_connected(xprt)) {
510 if (test_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags)) { 510 if (test_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags)) {
511 ret = -EAGAIN;
512 /* 511 /*
513 * Notify TCP that we're limited by the application 512 * Notify TCP that we're limited by the application
514 * window size 513 * window size
@@ -2530,8 +2529,10 @@ static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args,
2530 int err; 2529 int err;
2531 err = xs_init_anyaddr(args->dstaddr->sa_family, 2530 err = xs_init_anyaddr(args->dstaddr->sa_family,
2532 (struct sockaddr *)&new->srcaddr); 2531 (struct sockaddr *)&new->srcaddr);
2533 if (err != 0) 2532 if (err != 0) {
2533 xprt_free(xprt);
2534 return ERR_PTR(err); 2534 return ERR_PTR(err);
2535 }
2535 } 2536 }
2536 2537
2537 return xprt; 2538 return xprt;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 48260c2d092..b3a476fe827 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -132,8 +132,7 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
132 [NL80211_ATTR_MESH_CONFIG] = { .type = NLA_NESTED }, 132 [NL80211_ATTR_MESH_CONFIG] = { .type = NLA_NESTED },
133 [NL80211_ATTR_SUPPORT_MESH_AUTH] = { .type = NLA_FLAG }, 133 [NL80211_ATTR_SUPPORT_MESH_AUTH] = { .type = NLA_FLAG },
134 134
135 [NL80211_ATTR_HT_CAPABILITY] = { .type = NLA_BINARY, 135 [NL80211_ATTR_HT_CAPABILITY] = { .len = NL80211_HT_CAPABILITY_LEN },
136 .len = NL80211_HT_CAPABILITY_LEN },
137 136
138 [NL80211_ATTR_MGMT_SUBTYPE] = { .type = NLA_U8 }, 137 [NL80211_ATTR_MGMT_SUBTYPE] = { .type = NLA_U8 },
139 [NL80211_ATTR_IE] = { .type = NLA_BINARY, 138 [NL80211_ATTR_IE] = { .type = NLA_BINARY,
@@ -1253,6 +1252,12 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
1253 goto bad_res; 1252 goto bad_res;
1254 } 1253 }
1255 1254
1255 if (netdev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
1256 netdev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) {
1257 result = -EINVAL;
1258 goto bad_res;
1259 }
1260
1256 nla_for_each_nested(nl_txq_params, 1261 nla_for_each_nested(nl_txq_params,
1257 info->attrs[NL80211_ATTR_WIPHY_TXQ_PARAMS], 1262 info->attrs[NL80211_ATTR_WIPHY_TXQ_PARAMS],
1258 rem_txq_params) { 1263 rem_txq_params) {
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 6acba9d18cc..e71f5a66574 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -2265,6 +2265,9 @@ void /* __init_or_exit */ regulatory_exit(void)
2265 2265
2266 kfree(last_request); 2266 kfree(last_request);
2267 2267
2268 last_request = NULL;
2269 dev_set_uevent_suppress(&reg_pdev->dev, true);
2270
2268 platform_device_unregister(reg_pdev); 2271 platform_device_unregister(reg_pdev);
2269 2272
2270 spin_lock_bh(&reg_pending_beacons_lock); 2273 spin_lock_bh(&reg_pending_beacons_lock);
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 0fb14241040..dc23b31594e 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -259,17 +259,20 @@ static int cmp_ies(u8 num, u8 *ies1, size_t len1, u8 *ies2, size_t len2)
259{ 259{
260 const u8 *ie1 = cfg80211_find_ie(num, ies1, len1); 260 const u8 *ie1 = cfg80211_find_ie(num, ies1, len1);
261 const u8 *ie2 = cfg80211_find_ie(num, ies2, len2); 261 const u8 *ie2 = cfg80211_find_ie(num, ies2, len2);
262 int r;
263 262
263 /* equal if both missing */
264 if (!ie1 && !ie2) 264 if (!ie1 && !ie2)
265 return 0; 265 return 0;
266 if (!ie1 || !ie2) 266 /* sort missing IE before (left of) present IE */
267 if (!ie1)
267 return -1; 268 return -1;
269 if (!ie2)
270 return 1;
268 271
269 r = memcmp(ie1 + 2, ie2 + 2, min(ie1[1], ie2[1])); 272 /* sort by length first, then by contents */
270 if (r == 0 && ie1[1] != ie2[1]) 273 if (ie1[1] != ie2[1])
271 return ie2[1] - ie1[1]; 274 return ie2[1] - ie1[1];
272 return r; 275 return memcmp(ie1 + 2, ie2 + 2, ie1[1]);
273} 276}
274 277
275static bool is_bss(struct cfg80211_bss *a, 278static bool is_bss(struct cfg80211_bss *a,
diff --git a/security/keys/encrypted-keys/Makefile b/security/keys/encrypted-keys/Makefile
index 6bc7a86d102..d6f8433250a 100644
--- a/security/keys/encrypted-keys/Makefile
+++ b/security/keys/encrypted-keys/Makefile
@@ -2,5 +2,9 @@
2# Makefile for encrypted keys 2# Makefile for encrypted keys
3# 3#
4 4
5obj-$(CONFIG_ENCRYPTED_KEYS) += encrypted.o ecryptfs_format.o 5obj-$(CONFIG_ENCRYPTED_KEYS) += encrypted-keys.o
6obj-$(CONFIG_TRUSTED_KEYS) += masterkey_trusted.o 6
7encrypted-keys-y := encrypted.o ecryptfs_format.o
8masterkey-$(CONFIG_TRUSTED_KEYS) := masterkey_trusted.o
9masterkey-$(CONFIG_TRUSTED_KEYS)-$(CONFIG_ENCRYPTED_KEYS) := masterkey_trusted.o
10encrypted-keys-y += $(masterkey-y) $(masterkey-m-m)
diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c
index dcc843cb0f8..41144f71d61 100644
--- a/security/keys/encrypted-keys/encrypted.c
+++ b/security/keys/encrypted-keys/encrypted.c
@@ -444,7 +444,7 @@ static struct key *request_master_key(struct encrypted_key_payload *epayload,
444 goto out; 444 goto out;
445 445
446 if (IS_ERR(mkey)) { 446 if (IS_ERR(mkey)) {
447 int ret = PTR_ERR(epayload); 447 int ret = PTR_ERR(mkey);
448 448
449 if (ret == -ENOTSUPP) 449 if (ret == -ENOTSUPP)
450 pr_info("encrypted_key: key %s not supported", 450 pr_info("encrypted_key: key %s not supported",
diff --git a/security/keys/encrypted-keys/encrypted.h b/security/keys/encrypted-keys/encrypted.h
index b6ade894525..8136a2d44c6 100644
--- a/security/keys/encrypted-keys/encrypted.h
+++ b/security/keys/encrypted-keys/encrypted.h
@@ -2,7 +2,8 @@
2#define __ENCRYPTED_KEY_H 2#define __ENCRYPTED_KEY_H
3 3
4#define ENCRYPTED_DEBUG 0 4#define ENCRYPTED_DEBUG 0
5#ifdef CONFIG_TRUSTED_KEYS 5#if defined(CONFIG_TRUSTED_KEYS) || \
6 (defined(CONFIG_TRUSTED_KEYS_MODULE) && defined(CONFIG_ENCRYPTED_KEYS_MODULE))
6extern struct key *request_trusted_key(const char *trusted_desc, 7extern struct key *request_trusted_key(const char *trusted_desc,
7 u8 **master_key, size_t *master_keylen); 8 u8 **master_key, size_t *master_keylen);
8#else 9#else
diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c
index 5b366d7af3c..69ff52c08e9 100644
--- a/security/keys/user_defined.c
+++ b/security/keys/user_defined.c
@@ -102,7 +102,8 @@ int user_update(struct key *key, const void *data, size_t datalen)
102 key->expiry = 0; 102 key->expiry = 0;
103 } 103 }
104 104
105 kfree_rcu(zap, rcu); 105 if (zap)
106 kfree_rcu(zap, rcu);
106 107
107error: 108error:
108 return ret; 109 return ret;
diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c
index 6aceef518a4..5c32f36ff70 100644
--- a/security/smack/smackfs.c
+++ b/security/smack/smackfs.c
@@ -102,9 +102,6 @@ static int smk_cipso_doi_value = SMACK_CIPSO_DOI_DEFAULT;
102 102
103const char *smack_cipso_option = SMACK_CIPSO_OPTION; 103const char *smack_cipso_option = SMACK_CIPSO_OPTION;
104 104
105
106#define SEQ_READ_FINISHED ((loff_t)-1)
107
108/* 105/*
109 * Values for parsing cipso rules 106 * Values for parsing cipso rules
110 * SMK_DIGITLEN: Length of a digit field in a rule. 107 * SMK_DIGITLEN: Length of a digit field in a rule.
@@ -357,10 +354,12 @@ static ssize_t smk_write_load_list(struct file *file, const char __user *buf,
357 354
358 rc = count; 355 rc = count;
359 /* 356 /*
357 * If this is "load" as opposed to "load-self" and a new rule
358 * it needs to get added for reporting.
360 * smk_set_access returns true if there was already a rule 359 * smk_set_access returns true if there was already a rule
361 * for the subject/object pair, and false if it was new. 360 * for the subject/object pair, and false if it was new.
362 */ 361 */
363 if (!smk_set_access(rule, rule_list, rule_lock)) { 362 if (load && !smk_set_access(rule, rule_list, rule_lock)) {
364 smlp = kzalloc(sizeof(*smlp), GFP_KERNEL); 363 smlp = kzalloc(sizeof(*smlp), GFP_KERNEL);
365 if (smlp != NULL) { 364 if (smlp != NULL) {
366 smlp->smk_rule = rule; 365 smlp->smk_rule = rule;
@@ -377,12 +376,12 @@ out:
377 return rc; 376 return rc;
378} 377}
379 378
380
381/* 379/*
382 * Seq_file read operations for /smack/load 380 * Core logic for smackfs seq list operations.
383 */ 381 */
384 382
385static void *load_seq_start(struct seq_file *s, loff_t *pos) 383static void *smk_seq_start(struct seq_file *s, loff_t *pos,
384 struct list_head *head)
386{ 385{
387 struct list_head *list; 386 struct list_head *list;
388 387
@@ -390,7 +389,7 @@ static void *load_seq_start(struct seq_file *s, loff_t *pos)
390 * This is 0 the first time through. 389 * This is 0 the first time through.
391 */ 390 */
392 if (s->index == 0) 391 if (s->index == 0)
393 s->private = &smack_rule_list; 392 s->private = head;
394 393
395 if (s->private == NULL) 394 if (s->private == NULL)
396 return NULL; 395 return NULL;
@@ -404,11 +403,12 @@ static void *load_seq_start(struct seq_file *s, loff_t *pos)
404 return list; 403 return list;
405} 404}
406 405
407static void *load_seq_next(struct seq_file *s, void *v, loff_t *pos) 406static void *smk_seq_next(struct seq_file *s, void *v, loff_t *pos,
407 struct list_head *head)
408{ 408{
409 struct list_head *list = v; 409 struct list_head *list = v;
410 410
411 if (list_is_last(list, &smack_rule_list)) { 411 if (list_is_last(list, head)) {
412 s->private = NULL; 412 s->private = NULL;
413 return NULL; 413 return NULL;
414 } 414 }
@@ -416,6 +416,25 @@ static void *load_seq_next(struct seq_file *s, void *v, loff_t *pos)
416 return list->next; 416 return list->next;
417} 417}
418 418
419static void smk_seq_stop(struct seq_file *s, void *v)
420{
421 /* No-op */
422}
423
424/*
425 * Seq_file read operations for /smack/load
426 */
427
428static void *load_seq_start(struct seq_file *s, loff_t *pos)
429{
430 return smk_seq_start(s, pos, &smack_rule_list);
431}
432
433static void *load_seq_next(struct seq_file *s, void *v, loff_t *pos)
434{
435 return smk_seq_next(s, v, pos, &smack_rule_list);
436}
437
419static int load_seq_show(struct seq_file *s, void *v) 438static int load_seq_show(struct seq_file *s, void *v)
420{ 439{
421 struct list_head *list = v; 440 struct list_head *list = v;
@@ -446,16 +465,11 @@ static int load_seq_show(struct seq_file *s, void *v)
446 return 0; 465 return 0;
447} 466}
448 467
449static void load_seq_stop(struct seq_file *s, void *v)
450{
451 /* No-op */
452}
453
454static const struct seq_operations load_seq_ops = { 468static const struct seq_operations load_seq_ops = {
455 .start = load_seq_start, 469 .start = load_seq_start,
456 .next = load_seq_next, 470 .next = load_seq_next,
457 .show = load_seq_show, 471 .show = load_seq_show,
458 .stop = load_seq_stop, 472 .stop = smk_seq_stop,
459}; 473};
460 474
461/** 475/**
@@ -574,28 +588,12 @@ static void smk_unlbl_ambient(char *oldambient)
574 588
575static void *cipso_seq_start(struct seq_file *s, loff_t *pos) 589static void *cipso_seq_start(struct seq_file *s, loff_t *pos)
576{ 590{
577 if (*pos == SEQ_READ_FINISHED) 591 return smk_seq_start(s, pos, &smack_known_list);
578 return NULL;
579 if (list_empty(&smack_known_list))
580 return NULL;
581
582 return smack_known_list.next;
583} 592}
584 593
585static void *cipso_seq_next(struct seq_file *s, void *v, loff_t *pos) 594static void *cipso_seq_next(struct seq_file *s, void *v, loff_t *pos)
586{ 595{
587 struct list_head *list = v; 596 return smk_seq_next(s, v, pos, &smack_known_list);
588
589 /*
590 * labels with no associated cipso value wont be printed
591 * in cipso_seq_show
592 */
593 if (list_is_last(list, &smack_known_list)) {
594 *pos = SEQ_READ_FINISHED;
595 return NULL;
596 }
597
598 return list->next;
599} 597}
600 598
601/* 599/*
@@ -634,16 +632,11 @@ static int cipso_seq_show(struct seq_file *s, void *v)
634 return 0; 632 return 0;
635} 633}
636 634
637static void cipso_seq_stop(struct seq_file *s, void *v)
638{
639 /* No-op */
640}
641
642static const struct seq_operations cipso_seq_ops = { 635static const struct seq_operations cipso_seq_ops = {
643 .start = cipso_seq_start, 636 .start = cipso_seq_start,
644 .stop = cipso_seq_stop,
645 .next = cipso_seq_next, 637 .next = cipso_seq_next,
646 .show = cipso_seq_show, 638 .show = cipso_seq_show,
639 .stop = smk_seq_stop,
647}; 640};
648 641
649/** 642/**
@@ -788,23 +781,12 @@ static const struct file_operations smk_cipso_ops = {
788 781
789static void *netlbladdr_seq_start(struct seq_file *s, loff_t *pos) 782static void *netlbladdr_seq_start(struct seq_file *s, loff_t *pos)
790{ 783{
791 if (*pos == SEQ_READ_FINISHED) 784 return smk_seq_start(s, pos, &smk_netlbladdr_list);
792 return NULL;
793 if (list_empty(&smk_netlbladdr_list))
794 return NULL;
795 return smk_netlbladdr_list.next;
796} 785}
797 786
798static void *netlbladdr_seq_next(struct seq_file *s, void *v, loff_t *pos) 787static void *netlbladdr_seq_next(struct seq_file *s, void *v, loff_t *pos)
799{ 788{
800 struct list_head *list = v; 789 return smk_seq_next(s, v, pos, &smk_netlbladdr_list);
801
802 if (list_is_last(list, &smk_netlbladdr_list)) {
803 *pos = SEQ_READ_FINISHED;
804 return NULL;
805 }
806
807 return list->next;
808} 790}
809#define BEBITS (sizeof(__be32) * 8) 791#define BEBITS (sizeof(__be32) * 8)
810 792
@@ -828,16 +810,11 @@ static int netlbladdr_seq_show(struct seq_file *s, void *v)
828 return 0; 810 return 0;
829} 811}
830 812
831static void netlbladdr_seq_stop(struct seq_file *s, void *v)
832{
833 /* No-op */
834}
835
836static const struct seq_operations netlbladdr_seq_ops = { 813static const struct seq_operations netlbladdr_seq_ops = {
837 .start = netlbladdr_seq_start, 814 .start = netlbladdr_seq_start,
838 .stop = netlbladdr_seq_stop,
839 .next = netlbladdr_seq_next, 815 .next = netlbladdr_seq_next,
840 .show = netlbladdr_seq_show, 816 .show = netlbladdr_seq_show,
817 .stop = smk_seq_stop,
841}; 818};
842 819
843/** 820/**
@@ -1405,23 +1382,14 @@ static void *load_self_seq_start(struct seq_file *s, loff_t *pos)
1405{ 1382{
1406 struct task_smack *tsp = current_security(); 1383 struct task_smack *tsp = current_security();
1407 1384
1408 if (*pos == SEQ_READ_FINISHED) 1385 return smk_seq_start(s, pos, &tsp->smk_rules);
1409 return NULL;
1410 if (list_empty(&tsp->smk_rules))
1411 return NULL;
1412 return tsp->smk_rules.next;
1413} 1386}
1414 1387
1415static void *load_self_seq_next(struct seq_file *s, void *v, loff_t *pos) 1388static void *load_self_seq_next(struct seq_file *s, void *v, loff_t *pos)
1416{ 1389{
1417 struct task_smack *tsp = current_security(); 1390 struct task_smack *tsp = current_security();
1418 struct list_head *list = v;
1419 1391
1420 if (list_is_last(list, &tsp->smk_rules)) { 1392 return smk_seq_next(s, v, pos, &tsp->smk_rules);
1421 *pos = SEQ_READ_FINISHED;
1422 return NULL;
1423 }
1424 return list->next;
1425} 1393}
1426 1394
1427static int load_self_seq_show(struct seq_file *s, void *v) 1395static int load_self_seq_show(struct seq_file *s, void *v)
@@ -1453,16 +1421,11 @@ static int load_self_seq_show(struct seq_file *s, void *v)
1453 return 0; 1421 return 0;
1454} 1422}
1455 1423
1456static void load_self_seq_stop(struct seq_file *s, void *v)
1457{
1458 /* No-op */
1459}
1460
1461static const struct seq_operations load_self_seq_ops = { 1424static const struct seq_operations load_self_seq_ops = {
1462 .start = load_self_seq_start, 1425 .start = load_self_seq_start,
1463 .next = load_self_seq_next, 1426 .next = load_self_seq_next,
1464 .show = load_self_seq_show, 1427 .show = load_self_seq_show,
1465 .stop = load_self_seq_stop, 1428 .stop = smk_seq_stop,
1466}; 1429};
1467 1430
1468 1431
diff --git a/sound/pci/cs5535audio/cs5535audio_pcm.c b/sound/pci/cs5535audio/cs5535audio_pcm.c
index e083122ca55..dbf94b189e7 100644
--- a/sound/pci/cs5535audio/cs5535audio_pcm.c
+++ b/sound/pci/cs5535audio/cs5535audio_pcm.c
@@ -148,7 +148,7 @@ static int cs5535audio_build_dma_packets(struct cs5535audio *cs5535au,
148 struct cs5535audio_dma_desc *desc = 148 struct cs5535audio_dma_desc *desc =
149 &((struct cs5535audio_dma_desc *) dma->desc_buf.area)[i]; 149 &((struct cs5535audio_dma_desc *) dma->desc_buf.area)[i];
150 desc->addr = cpu_to_le32(addr); 150 desc->addr = cpu_to_le32(addr);
151 desc->size = cpu_to_le32(period_bytes); 151 desc->size = cpu_to_le16(period_bytes);
152 desc->ctlreserved = cpu_to_le16(PRD_EOP); 152 desc->ctlreserved = cpu_to_le16(PRD_EOP);
153 desc_addr += sizeof(struct cs5535audio_dma_desc); 153 desc_addr += sizeof(struct cs5535audio_dma_desc);
154 addr += period_bytes; 154 addr += period_bytes;
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index e44b107fdc7..4562e9de6a1 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -4046,9 +4046,9 @@ int snd_hda_check_board_codec_sid_config(struct hda_codec *codec,
4046 4046
4047 /* Search for codec ID */ 4047 /* Search for codec ID */
4048 for (q = tbl; q->subvendor; q++) { 4048 for (q = tbl; q->subvendor; q++) {
4049 unsigned long vendorid = (q->subdevice) | (q->subvendor << 16); 4049 unsigned int mask = 0xffff0000 | q->subdevice_mask;
4050 4050 unsigned int id = (q->subdevice | (q->subvendor << 16)) & mask;
4051 if (vendorid == codec->subsystem_id) 4051 if ((codec->subsystem_id & mask) == id)
4052 break; 4052 break;
4053 } 4053 }
4054 4054
diff --git a/sound/pci/hda/hda_eld.c b/sound/pci/hda/hda_eld.c
index 1c8ddf547a2..c1da422e085 100644
--- a/sound/pci/hda/hda_eld.c
+++ b/sound/pci/hda/hda_eld.c
@@ -297,10 +297,18 @@ static int hdmi_update_eld(struct hdmi_eld *e,
297 buf + ELD_FIXED_BYTES + mnl + 3 * i); 297 buf + ELD_FIXED_BYTES + mnl + 3 * i);
298 } 298 }
299 299
300 /*
301 * HDMI sink's ELD info cannot always be retrieved for now, e.g.
302 * in console or for audio devices. Assume the highest speakers
303 * configuration, to _not_ prohibit multi-channel audio playback.
304 */
305 if (!e->spk_alloc)
306 e->spk_alloc = 0xffff;
307
308 e->eld_valid = true;
300 return 0; 309 return 0;
301 310
302out_fail: 311out_fail:
303 e->eld_ver = 0;
304 return -EINVAL; 312 return -EINVAL;
305} 313}
306 314
@@ -323,9 +331,6 @@ int snd_hdmi_get_eld(struct hdmi_eld *eld,
323 * ELD is valid, actual eld_size is assigned in hdmi_update_eld() 331 * ELD is valid, actual eld_size is assigned in hdmi_update_eld()
324 */ 332 */
325 333
326 if (!eld->eld_valid)
327 return -ENOENT;
328
329 size = snd_hdmi_get_eld_size(codec, nid); 334 size = snd_hdmi_get_eld_size(codec, nid);
330 if (size == 0) { 335 if (size == 0) {
331 /* wfg: workaround for ASUS P5E-VM HDMI board */ 336 /* wfg: workaround for ASUS P5E-VM HDMI board */
@@ -342,18 +347,28 @@ int snd_hdmi_get_eld(struct hdmi_eld *eld,
342 347
343 for (i = 0; i < size; i++) { 348 for (i = 0; i < size; i++) {
344 unsigned int val = hdmi_get_eld_data(codec, nid, i); 349 unsigned int val = hdmi_get_eld_data(codec, nid, i);
350 /*
351 * Graphics driver might be writing to ELD buffer right now.
352 * Just abort. The caller will repoll after a while.
353 */
345 if (!(val & AC_ELDD_ELD_VALID)) { 354 if (!(val & AC_ELDD_ELD_VALID)) {
346 if (!i) {
347 snd_printd(KERN_INFO
348 "HDMI: invalid ELD data\n");
349 ret = -EINVAL;
350 goto error;
351 }
352 snd_printd(KERN_INFO 355 snd_printd(KERN_INFO
353 "HDMI: invalid ELD data byte %d\n", i); 356 "HDMI: invalid ELD data byte %d\n", i);
354 val = 0; 357 ret = -EINVAL;
355 } else 358 goto error;
356 val &= AC_ELDD_ELD_DATA; 359 }
360 val &= AC_ELDD_ELD_DATA;
361 /*
362 * The first byte cannot be zero. This can happen on some DVI
363 * connections. Some Intel chips may also need some 250ms delay
364 * to return non-zero ELD data, even when the graphics driver
365 * correctly writes ELD content before setting ELD_valid bit.
366 */
367 if (!val && !i) {
368 snd_printdd(KERN_INFO "HDMI: 0 ELD data\n");
369 ret = -EINVAL;
370 goto error;
371 }
357 buf[i] = val; 372 buf[i] = val;
358 } 373 }
359 374
diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h
index 6579e0f2bb5..618ddad1723 100644
--- a/sound/pci/hda/hda_local.h
+++ b/sound/pci/hda/hda_local.h
@@ -653,6 +653,9 @@ struct hdmi_eld {
653 int spk_alloc; 653 int spk_alloc;
654 int sad_count; 654 int sad_count;
655 struct cea_sad sad[ELD_MAX_SAD]; 655 struct cea_sad sad[ELD_MAX_SAD];
656 /*
657 * all fields above eld_buffer will be cleared before updating ELD
658 */
656 char eld_buffer[ELD_MAX_SIZE]; 659 char eld_buffer[ELD_MAX_SIZE];
657#ifdef CONFIG_PROC_FS 660#ifdef CONFIG_PROC_FS
658 struct snd_info_entry *proc_entry; 661 struct snd_info_entry *proc_entry;
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
index 2a2d8645ba0..70a7abda7e2 100644
--- a/sound/pci/hda/patch_cirrus.c
+++ b/sound/pci/hda/patch_cirrus.c
@@ -58,6 +58,8 @@ struct cs_spec {
58 unsigned int gpio_mask; 58 unsigned int gpio_mask;
59 unsigned int gpio_dir; 59 unsigned int gpio_dir;
60 unsigned int gpio_data; 60 unsigned int gpio_data;
61 unsigned int gpio_eapd_hp; /* EAPD GPIO bit for headphones */
62 unsigned int gpio_eapd_speaker; /* EAPD GPIO bit for speakers */
61 63
62 struct hda_pcm pcm_rec[2]; /* PCM information */ 64 struct hda_pcm pcm_rec[2]; /* PCM information */
63 65
@@ -76,6 +78,7 @@ enum {
76 CS420X_MBP53, 78 CS420X_MBP53,
77 CS420X_MBP55, 79 CS420X_MBP55,
78 CS420X_IMAC27, 80 CS420X_IMAC27,
81 CS420X_APPLE,
79 CS420X_AUTO, 82 CS420X_AUTO,
80 CS420X_MODELS 83 CS420X_MODELS
81}; 84};
@@ -237,6 +240,15 @@ static int cs_dig_playback_pcm_cleanup(struct hda_pcm_stream *hinfo,
237 return snd_hda_multi_out_dig_cleanup(codec, &spec->multiout); 240 return snd_hda_multi_out_dig_cleanup(codec, &spec->multiout);
238} 241}
239 242
243static void cs_update_input_select(struct hda_codec *codec)
244{
245 struct cs_spec *spec = codec->spec;
246 if (spec->cur_adc)
247 snd_hda_codec_write(codec, spec->cur_adc, 0,
248 AC_VERB_SET_CONNECT_SEL,
249 spec->adc_idx[spec->cur_input]);
250}
251
240/* 252/*
241 * Analog capture 253 * Analog capture
242 */ 254 */
@@ -250,6 +262,7 @@ static int cs_capture_pcm_prepare(struct hda_pcm_stream *hinfo,
250 spec->cur_adc = spec->adc_nid[spec->cur_input]; 262 spec->cur_adc = spec->adc_nid[spec->cur_input];
251 spec->cur_adc_stream_tag = stream_tag; 263 spec->cur_adc_stream_tag = stream_tag;
252 spec->cur_adc_format = format; 264 spec->cur_adc_format = format;
265 cs_update_input_select(codec);
253 snd_hda_codec_setup_stream(codec, spec->cur_adc, stream_tag, 0, format); 266 snd_hda_codec_setup_stream(codec, spec->cur_adc, stream_tag, 0, format);
254 return 0; 267 return 0;
255} 268}
@@ -689,10 +702,8 @@ static int change_cur_input(struct hda_codec *codec, unsigned int idx,
689 spec->cur_adc_stream_tag, 0, 702 spec->cur_adc_stream_tag, 0,
690 spec->cur_adc_format); 703 spec->cur_adc_format);
691 } 704 }
692 snd_hda_codec_write(codec, spec->cur_adc, 0,
693 AC_VERB_SET_CONNECT_SEL,
694 spec->adc_idx[idx]);
695 spec->cur_input = idx; 705 spec->cur_input = idx;
706 cs_update_input_select(codec);
696 return 1; 707 return 1;
697} 708}
698 709
@@ -920,10 +931,9 @@ static void cs_automute(struct hda_codec *codec)
920 spdif_present ? 0 : PIN_OUT); 931 spdif_present ? 0 : PIN_OUT);
921 } 932 }
922 } 933 }
923 if (spec->board_config == CS420X_MBP53 || 934 if (spec->gpio_eapd_hp) {
924 spec->board_config == CS420X_MBP55 || 935 unsigned int gpio = hp_present ?
925 spec->board_config == CS420X_IMAC27) { 936 spec->gpio_eapd_hp : spec->gpio_eapd_speaker;
926 unsigned int gpio = hp_present ? 0x02 : 0x08;
927 snd_hda_codec_write(codec, 0x01, 0, 937 snd_hda_codec_write(codec, 0x01, 0,
928 AC_VERB_SET_GPIO_DATA, gpio); 938 AC_VERB_SET_GPIO_DATA, gpio);
929 } 939 }
@@ -973,10 +983,7 @@ static void cs_automic(struct hda_codec *codec)
973 } else { 983 } else {
974 spec->cur_input = spec->last_input; 984 spec->cur_input = spec->last_input;
975 } 985 }
976 986 cs_update_input_select(codec);
977 snd_hda_codec_write_cache(codec, spec->cur_adc, 0,
978 AC_VERB_SET_CONNECT_SEL,
979 spec->adc_idx[spec->cur_input]);
980 } else { 987 } else {
981 if (present) 988 if (present)
982 change_cur_input(codec, spec->automic_idx, 0); 989 change_cur_input(codec, spec->automic_idx, 0);
@@ -1073,9 +1080,7 @@ static void init_input(struct hda_codec *codec)
1073 cs_automic(codec); 1080 cs_automic(codec);
1074 else { 1081 else {
1075 spec->cur_adc = spec->adc_nid[spec->cur_input]; 1082 spec->cur_adc = spec->adc_nid[spec->cur_input];
1076 snd_hda_codec_write(codec, spec->cur_adc, 0, 1083 cs_update_input_select(codec);
1077 AC_VERB_SET_CONNECT_SEL,
1078 spec->adc_idx[spec->cur_input]);
1079 } 1084 }
1080 } else { 1085 } else {
1081 change_cur_input(codec, spec->cur_input, 1); 1086 change_cur_input(codec, spec->cur_input, 1);
@@ -1273,6 +1278,7 @@ static const char * const cs420x_models[CS420X_MODELS] = {
1273 [CS420X_MBP53] = "mbp53", 1278 [CS420X_MBP53] = "mbp53",
1274 [CS420X_MBP55] = "mbp55", 1279 [CS420X_MBP55] = "mbp55",
1275 [CS420X_IMAC27] = "imac27", 1280 [CS420X_IMAC27] = "imac27",
1281 [CS420X_APPLE] = "apple",
1276 [CS420X_AUTO] = "auto", 1282 [CS420X_AUTO] = "auto",
1277}; 1283};
1278 1284
@@ -1282,7 +1288,13 @@ static const struct snd_pci_quirk cs420x_cfg_tbl[] = {
1282 SND_PCI_QUIRK(0x10de, 0x0d94, "MacBookAir 3,1(2)", CS420X_MBP55), 1288 SND_PCI_QUIRK(0x10de, 0x0d94, "MacBookAir 3,1(2)", CS420X_MBP55),
1283 SND_PCI_QUIRK(0x10de, 0xcb79, "MacBookPro 5,5", CS420X_MBP55), 1289 SND_PCI_QUIRK(0x10de, 0xcb79, "MacBookPro 5,5", CS420X_MBP55),
1284 SND_PCI_QUIRK(0x10de, 0xcb89, "MacBookPro 7,1", CS420X_MBP55), 1290 SND_PCI_QUIRK(0x10de, 0xcb89, "MacBookPro 7,1", CS420X_MBP55),
1285 SND_PCI_QUIRK(0x8086, 0x7270, "IMac 27 Inch", CS420X_IMAC27), 1291 /* this conflicts with too many other models */
1292 /*SND_PCI_QUIRK(0x8086, 0x7270, "IMac 27 Inch", CS420X_IMAC27),*/
1293 {} /* terminator */
1294};
1295
1296static const struct snd_pci_quirk cs420x_codec_cfg_tbl[] = {
1297 SND_PCI_QUIRK_VENDOR(0x106b, "Apple", CS420X_APPLE),
1286 {} /* terminator */ 1298 {} /* terminator */
1287}; 1299};
1288 1300
@@ -1364,6 +1376,10 @@ static int patch_cs420x(struct hda_codec *codec)
1364 spec->board_config = 1376 spec->board_config =
1365 snd_hda_check_board_config(codec, CS420X_MODELS, 1377 snd_hda_check_board_config(codec, CS420X_MODELS,
1366 cs420x_models, cs420x_cfg_tbl); 1378 cs420x_models, cs420x_cfg_tbl);
1379 if (spec->board_config < 0)
1380 spec->board_config =
1381 snd_hda_check_board_codec_sid_config(codec,
1382 CS420X_MODELS, NULL, cs420x_codec_cfg_tbl);
1367 if (spec->board_config >= 0) 1383 if (spec->board_config >= 0)
1368 fix_pincfg(codec, spec->board_config, cs_pincfgs); 1384 fix_pincfg(codec, spec->board_config, cs_pincfgs);
1369 1385
@@ -1371,10 +1387,11 @@ static int patch_cs420x(struct hda_codec *codec)
1371 case CS420X_IMAC27: 1387 case CS420X_IMAC27:
1372 case CS420X_MBP53: 1388 case CS420X_MBP53:
1373 case CS420X_MBP55: 1389 case CS420X_MBP55:
1374 /* GPIO1 = headphones */ 1390 case CS420X_APPLE:
1375 /* GPIO3 = speakers */ 1391 spec->gpio_eapd_hp = 2; /* GPIO1 = headphones */
1376 spec->gpio_mask = 0x0a; 1392 spec->gpio_eapd_speaker = 8; /* GPIO3 = speakers */
1377 spec->gpio_dir = 0x0a; 1393 spec->gpio_mask = spec->gpio_dir =
1394 spec->gpio_eapd_hp | spec->gpio_eapd_speaker;
1378 break; 1395 break;
1379 } 1396 }
1380 1397
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 81b7b791b3c..c505fd5d338 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -65,7 +65,11 @@ struct hdmi_spec_per_pin {
65 hda_nid_t pin_nid; 65 hda_nid_t pin_nid;
66 int num_mux_nids; 66 int num_mux_nids;
67 hda_nid_t mux_nids[HDA_MAX_CONNECTIONS]; 67 hda_nid_t mux_nids[HDA_MAX_CONNECTIONS];
68
69 struct hda_codec *codec;
68 struct hdmi_eld sink_eld; 70 struct hdmi_eld sink_eld;
71 struct delayed_work work;
72 int repoll_count;
69}; 73};
70 74
71struct hdmi_spec { 75struct hdmi_spec {
@@ -745,8 +749,7 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec, int pin_idx,
745 * Unsolicited events 749 * Unsolicited events
746 */ 750 */
747 751
748static void hdmi_present_sense(struct hda_codec *codec, hda_nid_t pin_nid, 752static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll);
749 struct hdmi_eld *eld);
750 753
751static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res) 754static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res)
752{ 755{
@@ -755,7 +758,6 @@ static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res)
755 int pd = !!(res & AC_UNSOL_RES_PD); 758 int pd = !!(res & AC_UNSOL_RES_PD);
756 int eldv = !!(res & AC_UNSOL_RES_ELDV); 759 int eldv = !!(res & AC_UNSOL_RES_ELDV);
757 int pin_idx; 760 int pin_idx;
758 struct hdmi_eld *eld;
759 761
760 printk(KERN_INFO 762 printk(KERN_INFO
761 "HDMI hot plug event: Codec=%d Pin=%d Presence_Detect=%d ELD_Valid=%d\n", 763 "HDMI hot plug event: Codec=%d Pin=%d Presence_Detect=%d ELD_Valid=%d\n",
@@ -764,17 +766,8 @@ static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res)
764 pin_idx = pin_nid_to_pin_index(spec, pin_nid); 766 pin_idx = pin_nid_to_pin_index(spec, pin_nid);
765 if (pin_idx < 0) 767 if (pin_idx < 0)
766 return; 768 return;
767 eld = &spec->pins[pin_idx].sink_eld;
768
769 hdmi_present_sense(codec, pin_nid, eld);
770 769
771 /* 770 hdmi_present_sense(&spec->pins[pin_idx], 1);
772 * HDMI sink's ELD info cannot always be retrieved for now, e.g.
773 * in console or for audio devices. Assume the highest speakers
774 * configuration, to _not_ prohibit multi-channel audio playback.
775 */
776 if (!eld->spk_alloc)
777 eld->spk_alloc = 0xffff;
778} 771}
779 772
780static void hdmi_non_intrinsic_event(struct hda_codec *codec, unsigned int res) 773static void hdmi_non_intrinsic_event(struct hda_codec *codec, unsigned int res)
@@ -968,9 +961,11 @@ static int hdmi_read_pin_conn(struct hda_codec *codec, int pin_idx)
968 return 0; 961 return 0;
969} 962}
970 963
971static void hdmi_present_sense(struct hda_codec *codec, hda_nid_t pin_nid, 964static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
972 struct hdmi_eld *eld)
973{ 965{
966 struct hda_codec *codec = per_pin->codec;
967 struct hdmi_eld *eld = &per_pin->sink_eld;
968 hda_nid_t pin_nid = per_pin->pin_nid;
974 /* 969 /*
975 * Always execute a GetPinSense verb here, even when called from 970 * Always execute a GetPinSense verb here, even when called from
976 * hdmi_intrinsic_event; for some NVIDIA HW, the unsolicited 971 * hdmi_intrinsic_event; for some NVIDIA HW, the unsolicited
@@ -980,26 +975,42 @@ static void hdmi_present_sense(struct hda_codec *codec, hda_nid_t pin_nid,
980 * the unsolicited response to avoid custom WARs. 975 * the unsolicited response to avoid custom WARs.
981 */ 976 */
982 int present = snd_hda_pin_sense(codec, pin_nid); 977 int present = snd_hda_pin_sense(codec, pin_nid);
978 bool eld_valid = false;
983 979
984 memset(eld, 0, sizeof(*eld)); 980 memset(eld, 0, offsetof(struct hdmi_eld, eld_buffer));
985 981
986 eld->monitor_present = !!(present & AC_PINSENSE_PRESENCE); 982 eld->monitor_present = !!(present & AC_PINSENSE_PRESENCE);
987 if (eld->monitor_present) 983 if (eld->monitor_present)
988 eld->eld_valid = !!(present & AC_PINSENSE_ELDV); 984 eld_valid = !!(present & AC_PINSENSE_ELDV);
989 else
990 eld->eld_valid = 0;
991 985
992 printk(KERN_INFO 986 printk(KERN_INFO
993 "HDMI status: Codec=%d Pin=%d Presence_Detect=%d ELD_Valid=%d\n", 987 "HDMI status: Codec=%d Pin=%d Presence_Detect=%d ELD_Valid=%d\n",
994 codec->addr, pin_nid, eld->monitor_present, eld->eld_valid); 988 codec->addr, pin_nid, eld->monitor_present, eld_valid);
995 989
996 if (eld->eld_valid) 990 if (eld_valid) {
997 if (!snd_hdmi_get_eld(eld, codec, pin_nid)) 991 if (!snd_hdmi_get_eld(eld, codec, pin_nid))
998 snd_hdmi_show_eld(eld); 992 snd_hdmi_show_eld(eld);
993 else if (repoll) {
994 queue_delayed_work(codec->bus->workq,
995 &per_pin->work,
996 msecs_to_jiffies(300));
997 }
998 }
999 999
1000 snd_hda_input_jack_report(codec, pin_nid); 1000 snd_hda_input_jack_report(codec, pin_nid);
1001} 1001}
1002 1002
1003static void hdmi_repoll_eld(struct work_struct *work)
1004{
1005 struct hdmi_spec_per_pin *per_pin =
1006 container_of(to_delayed_work(work), struct hdmi_spec_per_pin, work);
1007
1008 if (per_pin->repoll_count++ > 6)
1009 per_pin->repoll_count = 0;
1010
1011 hdmi_present_sense(per_pin, per_pin->repoll_count);
1012}
1013
1003static int hdmi_add_pin(struct hda_codec *codec, hda_nid_t pin_nid) 1014static int hdmi_add_pin(struct hda_codec *codec, hda_nid_t pin_nid)
1004{ 1015{
1005 struct hdmi_spec *spec = codec->spec; 1016 struct hdmi_spec *spec = codec->spec;
@@ -1228,7 +1239,7 @@ static int generic_hdmi_build_jack(struct hda_codec *codec, int pin_idx)
1228 if (err < 0) 1239 if (err < 0)
1229 return err; 1240 return err;
1230 1241
1231 hdmi_present_sense(codec, per_pin->pin_nid, &per_pin->sink_eld); 1242 hdmi_present_sense(per_pin, 0);
1232 return 0; 1243 return 0;
1233} 1244}
1234 1245
@@ -1279,6 +1290,8 @@ static int generic_hdmi_init(struct hda_codec *codec)
1279 AC_VERB_SET_UNSOLICITED_ENABLE, 1290 AC_VERB_SET_UNSOLICITED_ENABLE,
1280 AC_USRSP_EN | pin_nid); 1291 AC_USRSP_EN | pin_nid);
1281 1292
1293 per_pin->codec = codec;
1294 INIT_DELAYED_WORK(&per_pin->work, hdmi_repoll_eld);
1282 snd_hda_eld_proc_new(codec, eld, pin_idx); 1295 snd_hda_eld_proc_new(codec, eld, pin_idx);
1283 } 1296 }
1284 return 0; 1297 return 0;
@@ -1293,10 +1306,12 @@ static void generic_hdmi_free(struct hda_codec *codec)
1293 struct hdmi_spec_per_pin *per_pin = &spec->pins[pin_idx]; 1306 struct hdmi_spec_per_pin *per_pin = &spec->pins[pin_idx];
1294 struct hdmi_eld *eld = &per_pin->sink_eld; 1307 struct hdmi_eld *eld = &per_pin->sink_eld;
1295 1308
1309 cancel_delayed_work(&per_pin->work);
1296 snd_hda_eld_proc_free(codec, eld); 1310 snd_hda_eld_proc_free(codec, eld);
1297 } 1311 }
1298 snd_hda_input_jack_free(codec); 1312 snd_hda_input_jack_free(codec);
1299 1313
1314 flush_workqueue(codec->bus->workq);
1300 kfree(spec); 1315 kfree(spec);
1301} 1316}
1302 1317
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 308bb575bc0..cbde019d3d5 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -277,6 +277,12 @@ static bool alc_dyn_adc_pcm_resetup(struct hda_codec *codec, int cur)
277 return false; 277 return false;
278} 278}
279 279
280static inline hda_nid_t get_capsrc(struct alc_spec *spec, int idx)
281{
282 return spec->capsrc_nids ?
283 spec->capsrc_nids[idx] : spec->adc_nids[idx];
284}
285
280/* select the given imux item; either unmute exclusively or select the route */ 286/* select the given imux item; either unmute exclusively or select the route */
281static int alc_mux_select(struct hda_codec *codec, unsigned int adc_idx, 287static int alc_mux_select(struct hda_codec *codec, unsigned int adc_idx,
282 unsigned int idx, bool force) 288 unsigned int idx, bool force)
@@ -303,8 +309,7 @@ static int alc_mux_select(struct hda_codec *codec, unsigned int adc_idx,
303 adc_idx = spec->dyn_adc_idx[idx]; 309 adc_idx = spec->dyn_adc_idx[idx];
304 } 310 }
305 311
306 nid = spec->capsrc_nids ? 312 nid = get_capsrc(spec, adc_idx);
307 spec->capsrc_nids[adc_idx] : spec->adc_nids[adc_idx];
308 313
309 /* no selection? */ 314 /* no selection? */
310 num_conns = snd_hda_get_conn_list(codec, nid, NULL); 315 num_conns = snd_hda_get_conn_list(codec, nid, NULL);
@@ -1054,8 +1059,19 @@ static bool alc_rebuild_imux_for_auto_mic(struct hda_codec *codec)
1054 spec->imux_pins[2] = spec->dock_mic_pin; 1059 spec->imux_pins[2] = spec->dock_mic_pin;
1055 for (i = 0; i < 3; i++) { 1060 for (i = 0; i < 3; i++) {
1056 strcpy(imux->items[i].label, texts[i]); 1061 strcpy(imux->items[i].label, texts[i]);
1057 if (spec->imux_pins[i]) 1062 if (spec->imux_pins[i]) {
1063 hda_nid_t pin = spec->imux_pins[i];
1064 int c;
1065 for (c = 0; c < spec->num_adc_nids; c++) {
1066 hda_nid_t cap = get_capsrc(spec, c);
1067 int idx = get_connection_index(codec, cap, pin);
1068 if (idx >= 0) {
1069 imux->items[i].index = idx;
1070 break;
1071 }
1072 }
1058 imux->num_items = i + 1; 1073 imux->num_items = i + 1;
1074 }
1059 } 1075 }
1060 spec->num_mux_defs = 1; 1076 spec->num_mux_defs = 1;
1061 spec->input_mux = imux; 1077 spec->input_mux = imux;
@@ -1452,7 +1468,7 @@ static void alc_apply_fixup(struct hda_codec *codec, int action)
1452 switch (fix->type) { 1468 switch (fix->type) {
1453 case ALC_FIXUP_SKU: 1469 case ALC_FIXUP_SKU:
1454 if (action != ALC_FIXUP_ACT_PRE_PROBE || !fix->v.sku) 1470 if (action != ALC_FIXUP_ACT_PRE_PROBE || !fix->v.sku)
1455 break;; 1471 break;
1456 snd_printdd(KERN_INFO "hda_codec: %s: " 1472 snd_printdd(KERN_INFO "hda_codec: %s: "
1457 "Apply sku override for %s\n", 1473 "Apply sku override for %s\n",
1458 codec->chip_name, modelname); 1474 codec->chip_name, modelname);
@@ -1957,10 +1973,8 @@ static int alc_build_controls(struct hda_codec *codec)
1957 if (!kctl) 1973 if (!kctl)
1958 kctl = snd_hda_find_mixer_ctl(codec, "Input Source"); 1974 kctl = snd_hda_find_mixer_ctl(codec, "Input Source");
1959 for (i = 0; kctl && i < kctl->count; i++) { 1975 for (i = 0; kctl && i < kctl->count; i++) {
1960 const hda_nid_t *nids = spec->capsrc_nids; 1976 err = snd_hda_add_nid(codec, kctl, i,
1961 if (!nids) 1977 get_capsrc(spec, i));
1962 nids = spec->adc_nids;
1963 err = snd_hda_add_nid(codec, kctl, i, nids[i]);
1964 if (err < 0) 1978 if (err < 0)
1965 return err; 1979 return err;
1966 } 1980 }
@@ -2747,8 +2761,7 @@ static int alc_auto_create_input_ctls(struct hda_codec *codec)
2747 } 2761 }
2748 2762
2749 for (c = 0; c < num_adcs; c++) { 2763 for (c = 0; c < num_adcs; c++) {
2750 hda_nid_t cap = spec->capsrc_nids ? 2764 hda_nid_t cap = get_capsrc(spec, c);
2751 spec->capsrc_nids[c] : spec->adc_nids[c];
2752 idx = get_connection_index(codec, cap, pin); 2765 idx = get_connection_index(codec, cap, pin);
2753 if (idx >= 0) { 2766 if (idx >= 0) {
2754 spec->imux_pins[imux->num_items] = pin; 2767 spec->imux_pins[imux->num_items] = pin;
@@ -3694,8 +3707,7 @@ static int init_capsrc_for_pin(struct hda_codec *codec, hda_nid_t pin)
3694 if (!pin) 3707 if (!pin)
3695 return 0; 3708 return 0;
3696 for (i = 0; i < spec->num_adc_nids; i++) { 3709 for (i = 0; i < spec->num_adc_nids; i++) {
3697 hda_nid_t cap = spec->capsrc_nids ? 3710 hda_nid_t cap = get_capsrc(spec, i);
3698 spec->capsrc_nids[i] : spec->adc_nids[i];
3699 int idx; 3711 int idx;
3700 3712
3701 idx = get_connection_index(codec, cap, pin); 3713 idx = get_connection_index(codec, cap, pin);
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index edc2b7bc177..f3658658548 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -227,7 +227,6 @@ struct sigmatel_spec {
227 227
228 /* power management */ 228 /* power management */
229 unsigned int num_pwrs; 229 unsigned int num_pwrs;
230 const unsigned int *pwr_mapping;
231 const hda_nid_t *pwr_nids; 230 const hda_nid_t *pwr_nids;
232 const hda_nid_t *dac_list; 231 const hda_nid_t *dac_list;
233 232
@@ -374,18 +373,15 @@ static const unsigned long stac92hd73xx_capvols[] = {
374 373
375#define STAC92HD83_DAC_COUNT 3 374#define STAC92HD83_DAC_COUNT 3
376 375
377static const hda_nid_t stac92hd83xxx_pwr_nids[4] = { 376static const hda_nid_t stac92hd83xxx_pwr_nids[7] = {
378 0xa, 0xb, 0xd, 0xe, 377 0x0a, 0x0b, 0x0c, 0xd, 0x0e,
378 0x0f, 0x10
379}; 379};
380 380
381static const hda_nid_t stac92hd83xxx_slave_dig_outs[2] = { 381static const hda_nid_t stac92hd83xxx_slave_dig_outs[2] = {
382 0x1e, 0, 382 0x1e, 0,
383}; 383};
384 384
385static const unsigned int stac92hd83xxx_pwr_mapping[4] = {
386 0x03, 0x0c, 0x20, 0x40,
387};
388
389static const hda_nid_t stac92hd83xxx_dmic_nids[] = { 385static const hda_nid_t stac92hd83xxx_dmic_nids[] = {
390 0x11, 0x20, 386 0x11, 0x20,
391}; 387};
@@ -1645,6 +1641,8 @@ static const struct snd_pci_quirk stac92hd73xx_codec_id_cfg_tbl[] = {
1645 "Alienware M17x", STAC_ALIENWARE_M17X), 1641 "Alienware M17x", STAC_ALIENWARE_M17X),
1646 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x043a, 1642 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x043a,
1647 "Alienware M17x", STAC_ALIENWARE_M17X), 1643 "Alienware M17x", STAC_ALIENWARE_M17X),
1644 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0490,
1645 "Alienware M17x", STAC_ALIENWARE_M17X),
1648 {} /* terminator */ 1646 {} /* terminator */
1649}; 1647};
1650 1648
@@ -4470,8 +4468,12 @@ static int stac92xx_init(struct hda_codec *codec)
4470 stac_toggle_power_map(codec, nid, 1); 4468 stac_toggle_power_map(codec, nid, 1);
4471 continue; 4469 continue;
4472 } 4470 }
4473 if (enable_pin_detect(codec, nid, STAC_PWR_EVENT)) 4471 if (enable_pin_detect(codec, nid, STAC_PWR_EVENT)) {
4474 stac_issue_unsol_event(codec, nid); 4472 stac_issue_unsol_event(codec, nid);
4473 continue;
4474 }
4475 /* none of the above, turn the port OFF */
4476 stac_toggle_power_map(codec, nid, 0);
4475 } 4477 }
4476 4478
4477 /* sync mute LED */ 4479 /* sync mute LED */
@@ -4727,11 +4729,7 @@ static void stac_toggle_power_map(struct hda_codec *codec, hda_nid_t nid,
4727 if (idx >= spec->num_pwrs) 4729 if (idx >= spec->num_pwrs)
4728 return; 4730 return;
4729 4731
4730 /* several codecs have two power down bits */ 4732 idx = 1 << idx;
4731 if (spec->pwr_mapping)
4732 idx = spec->pwr_mapping[idx];
4733 else
4734 idx = 1 << idx;
4735 4733
4736 val = snd_hda_codec_read(codec, codec->afg, 0, 0x0fec, 0x0) & 0xff; 4734 val = snd_hda_codec_read(codec, codec->afg, 0, 0x0fec, 0x0) & 0xff;
4737 if (enable) 4735 if (enable)
@@ -5629,9 +5627,6 @@ static int patch_stac92hd83xxx(struct hda_codec *codec)
5629 snd_hda_codec_set_pincfg(codec, 0xf, 0x2181205e); 5627 snd_hda_codec_set_pincfg(codec, 0xf, 0x2181205e);
5630 } 5628 }
5631 5629
5632 /* reset pin power-down; Windows may leave these bits after reboot */
5633 snd_hda_codec_write_cache(codec, codec->afg, 0, 0x7EC, 0);
5634 snd_hda_codec_write_cache(codec, codec->afg, 0, 0x7ED, 0);
5635 codec->no_trigger_sense = 1; 5630 codec->no_trigger_sense = 1;
5636 codec->spec = spec; 5631 codec->spec = spec;
5637 5632
@@ -5641,7 +5636,6 @@ static int patch_stac92hd83xxx(struct hda_codec *codec)
5641 codec->slave_dig_outs = stac92hd83xxx_slave_dig_outs; 5636 codec->slave_dig_outs = stac92hd83xxx_slave_dig_outs;
5642 spec->digbeep_nid = 0x21; 5637 spec->digbeep_nid = 0x21;
5643 spec->pwr_nids = stac92hd83xxx_pwr_nids; 5638 spec->pwr_nids = stac92hd83xxx_pwr_nids;
5644 spec->pwr_mapping = stac92hd83xxx_pwr_mapping;
5645 spec->num_pwrs = ARRAY_SIZE(stac92hd83xxx_pwr_nids); 5639 spec->num_pwrs = ARRAY_SIZE(stac92hd83xxx_pwr_nids);
5646 spec->multiout.dac_nids = spec->dac_nids; 5640 spec->multiout.dac_nids = spec->dac_nids;
5647 spec->init = stac92hd83xxx_core_init; 5641 spec->init = stac92hd83xxx_core_init;
@@ -5658,9 +5652,6 @@ again:
5658 stac92xx_set_config_regs(codec, 5652 stac92xx_set_config_regs(codec,
5659 stac92hd83xxx_brd_tbl[spec->board_config]); 5653 stac92hd83xxx_brd_tbl[spec->board_config]);
5660 5654
5661 if (spec->board_config != STAC_92HD83XXX_PWR_REF)
5662 spec->num_pwrs = 0;
5663
5664 codec->patch_ops = stac92xx_patch_ops; 5655 codec->patch_ops = stac92xx_patch_ops;
5665 5656
5666 if (find_mute_led_gpio(codec, 0)) 5657 if (find_mute_led_gpio(codec, 0))
@@ -5869,8 +5860,6 @@ again:
5869 (codec->revision_id & 0xf) == 1) 5860 (codec->revision_id & 0xf) == 1)
5870 spec->stream_delay = 40; /* 40 milliseconds */ 5861 spec->stream_delay = 40; /* 40 milliseconds */
5871 5862
5872 /* no output amps */
5873 spec->num_pwrs = 0;
5874 /* disable VSW */ 5863 /* disable VSW */
5875 spec->init = stac92hd71bxx_core_init; 5864 spec->init = stac92hd71bxx_core_init;
5876 unmute_init++; 5865 unmute_init++;
@@ -5885,8 +5874,6 @@ again:
5885 if ((codec->revision_id & 0xf) == 1) 5874 if ((codec->revision_id & 0xf) == 1)
5886 spec->stream_delay = 40; /* 40 milliseconds */ 5875 spec->stream_delay = 40; /* 40 milliseconds */
5887 5876
5888 /* no output amps */
5889 spec->num_pwrs = 0;
5890 /* fallthru */ 5877 /* fallthru */
5891 default: 5878 default:
5892 spec->init = stac92hd71bxx_core_init; 5879 spec->init = stac92hd71bxx_core_init;
diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
index 431c0d417ee..b5137629f8e 100644
--- a/sound/pci/hda/patch_via.c
+++ b/sound/pci/hda/patch_via.c
@@ -208,6 +208,7 @@ struct via_spec {
208 /* work to check hp jack state */ 208 /* work to check hp jack state */
209 struct hda_codec *codec; 209 struct hda_codec *codec;
210 struct delayed_work vt1708_hp_work; 210 struct delayed_work vt1708_hp_work;
211 int hp_work_active;
211 int vt1708_jack_detect; 212 int vt1708_jack_detect;
212 int vt1708_hp_present; 213 int vt1708_hp_present;
213 214
@@ -305,27 +306,35 @@ enum {
305static void analog_low_current_mode(struct hda_codec *codec); 306static void analog_low_current_mode(struct hda_codec *codec);
306static bool is_aa_path_mute(struct hda_codec *codec); 307static bool is_aa_path_mute(struct hda_codec *codec);
307 308
308static void vt1708_start_hp_work(struct via_spec *spec) 309#define hp_detect_with_aa(codec) \
310 (snd_hda_get_bool_hint(codec, "analog_loopback_hp_detect") == 1 && \
311 !is_aa_path_mute(codec))
312
313static void vt1708_stop_hp_work(struct via_spec *spec)
309{ 314{
310 if (spec->codec_type != VT1708 || spec->autocfg.hp_pins[0] == 0) 315 if (spec->codec_type != VT1708 || spec->autocfg.hp_pins[0] == 0)
311 return; 316 return;
312 snd_hda_codec_write(spec->codec, 0x1, 0, 0xf81, 317 if (spec->hp_work_active) {
313 !spec->vt1708_jack_detect); 318 snd_hda_codec_write(spec->codec, 0x1, 0, 0xf81, 1);
314 if (!delayed_work_pending(&spec->vt1708_hp_work)) 319 cancel_delayed_work_sync(&spec->vt1708_hp_work);
315 schedule_delayed_work(&spec->vt1708_hp_work, 320 spec->hp_work_active = 0;
316 msecs_to_jiffies(100)); 321 }
317} 322}
318 323
319static void vt1708_stop_hp_work(struct via_spec *spec) 324static void vt1708_update_hp_work(struct via_spec *spec)
320{ 325{
321 if (spec->codec_type != VT1708 || spec->autocfg.hp_pins[0] == 0) 326 if (spec->codec_type != VT1708 || spec->autocfg.hp_pins[0] == 0)
322 return; 327 return;
323 if (snd_hda_get_bool_hint(spec->codec, "analog_loopback_hp_detect") == 1 328 if (spec->vt1708_jack_detect &&
324 && !is_aa_path_mute(spec->codec)) 329 (spec->active_streams || hp_detect_with_aa(spec->codec))) {
325 return; 330 if (!spec->hp_work_active) {
326 snd_hda_codec_write(spec->codec, 0x1, 0, 0xf81, 331 snd_hda_codec_write(spec->codec, 0x1, 0, 0xf81, 0);
327 !spec->vt1708_jack_detect); 332 schedule_delayed_work(&spec->vt1708_hp_work,
328 cancel_delayed_work_sync(&spec->vt1708_hp_work); 333 msecs_to_jiffies(100));
334 spec->hp_work_active = 1;
335 }
336 } else if (!hp_detect_with_aa(spec->codec))
337 vt1708_stop_hp_work(spec);
329} 338}
330 339
331static void set_widgets_power_state(struct hda_codec *codec) 340static void set_widgets_power_state(struct hda_codec *codec)
@@ -343,12 +352,7 @@ static int analog_input_switch_put(struct snd_kcontrol *kcontrol,
343 352
344 set_widgets_power_state(codec); 353 set_widgets_power_state(codec);
345 analog_low_current_mode(snd_kcontrol_chip(kcontrol)); 354 analog_low_current_mode(snd_kcontrol_chip(kcontrol));
346 if (snd_hda_get_bool_hint(codec, "analog_loopback_hp_detect") == 1) { 355 vt1708_update_hp_work(codec->spec);
347 if (is_aa_path_mute(codec))
348 vt1708_start_hp_work(codec->spec);
349 else
350 vt1708_stop_hp_work(codec->spec);
351 }
352 return change; 356 return change;
353} 357}
354 358
@@ -1154,7 +1158,7 @@ static int via_playback_multi_pcm_prepare(struct hda_pcm_stream *hinfo,
1154 spec->cur_dac_stream_tag = stream_tag; 1158 spec->cur_dac_stream_tag = stream_tag;
1155 spec->cur_dac_format = format; 1159 spec->cur_dac_format = format;
1156 mutex_unlock(&spec->config_mutex); 1160 mutex_unlock(&spec->config_mutex);
1157 vt1708_start_hp_work(spec); 1161 vt1708_update_hp_work(spec);
1158 return 0; 1162 return 0;
1159} 1163}
1160 1164
@@ -1174,7 +1178,7 @@ static int via_playback_hp_pcm_prepare(struct hda_pcm_stream *hinfo,
1174 spec->cur_hp_stream_tag = stream_tag; 1178 spec->cur_hp_stream_tag = stream_tag;
1175 spec->cur_hp_format = format; 1179 spec->cur_hp_format = format;
1176 mutex_unlock(&spec->config_mutex); 1180 mutex_unlock(&spec->config_mutex);
1177 vt1708_start_hp_work(spec); 1181 vt1708_update_hp_work(spec);
1178 return 0; 1182 return 0;
1179} 1183}
1180 1184
@@ -1188,7 +1192,7 @@ static int via_playback_multi_pcm_cleanup(struct hda_pcm_stream *hinfo,
1188 snd_hda_multi_out_analog_cleanup(codec, &spec->multiout); 1192 snd_hda_multi_out_analog_cleanup(codec, &spec->multiout);
1189 spec->active_streams &= ~STREAM_MULTI_OUT; 1193 spec->active_streams &= ~STREAM_MULTI_OUT;
1190 mutex_unlock(&spec->config_mutex); 1194 mutex_unlock(&spec->config_mutex);
1191 vt1708_stop_hp_work(spec); 1195 vt1708_update_hp_work(spec);
1192 return 0; 1196 return 0;
1193} 1197}
1194 1198
@@ -1203,7 +1207,7 @@ static int via_playback_hp_pcm_cleanup(struct hda_pcm_stream *hinfo,
1203 snd_hda_codec_setup_stream(codec, spec->hp_dac_nid, 0, 0, 0); 1207 snd_hda_codec_setup_stream(codec, spec->hp_dac_nid, 0, 0, 0);
1204 spec->active_streams &= ~STREAM_INDEP_HP; 1208 spec->active_streams &= ~STREAM_INDEP_HP;
1205 mutex_unlock(&spec->config_mutex); 1209 mutex_unlock(&spec->config_mutex);
1206 vt1708_stop_hp_work(spec); 1210 vt1708_update_hp_work(spec);
1207 return 0; 1211 return 0;
1208} 1212}
1209 1213
@@ -1645,7 +1649,8 @@ static void via_hp_automute(struct hda_codec *codec)
1645 int nums; 1649 int nums;
1646 struct via_spec *spec = codec->spec; 1650 struct via_spec *spec = codec->spec;
1647 1651
1648 if (!spec->hp_independent_mode && spec->autocfg.hp_pins[0]) 1652 if (!spec->hp_independent_mode && spec->autocfg.hp_pins[0] &&
1653 (spec->codec_type != VT1708 || spec->vt1708_jack_detect))
1649 present = snd_hda_jack_detect(codec, spec->autocfg.hp_pins[0]); 1654 present = snd_hda_jack_detect(codec, spec->autocfg.hp_pins[0]);
1650 1655
1651 if (spec->smart51_enabled) 1656 if (spec->smart51_enabled)
@@ -2612,8 +2617,6 @@ static int vt1708_jack_detect_get(struct snd_kcontrol *kcontrol,
2612 2617
2613 if (spec->codec_type != VT1708) 2618 if (spec->codec_type != VT1708)
2614 return 0; 2619 return 0;
2615 spec->vt1708_jack_detect =
2616 !((snd_hda_codec_read(codec, 0x1, 0, 0xf84, 0) >> 8) & 0x1);
2617 ucontrol->value.integer.value[0] = spec->vt1708_jack_detect; 2620 ucontrol->value.integer.value[0] = spec->vt1708_jack_detect;
2618 return 0; 2621 return 0;
2619} 2622}
@@ -2623,18 +2626,22 @@ static int vt1708_jack_detect_put(struct snd_kcontrol *kcontrol,
2623{ 2626{
2624 struct hda_codec *codec = snd_kcontrol_chip(kcontrol); 2627 struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
2625 struct via_spec *spec = codec->spec; 2628 struct via_spec *spec = codec->spec;
2626 int change; 2629 int val;
2627 2630
2628 if (spec->codec_type != VT1708) 2631 if (spec->codec_type != VT1708)
2629 return 0; 2632 return 0;
2630 spec->vt1708_jack_detect = ucontrol->value.integer.value[0]; 2633 val = !!ucontrol->value.integer.value[0];
2631 change = (0x1 & (snd_hda_codec_read(codec, 0x1, 0, 0xf84, 0) >> 8)) 2634 if (spec->vt1708_jack_detect == val)
2632 == !spec->vt1708_jack_detect; 2635 return 0;
2633 if (spec->vt1708_jack_detect) { 2636 spec->vt1708_jack_detect = val;
2637 if (spec->vt1708_jack_detect &&
2638 snd_hda_get_bool_hint(codec, "analog_loopback_hp_detect") != 1) {
2634 mute_aa_path(codec, 1); 2639 mute_aa_path(codec, 1);
2635 notify_aa_path_ctls(codec); 2640 notify_aa_path_ctls(codec);
2636 } 2641 }
2637 return change; 2642 via_hp_automute(codec);
2643 vt1708_update_hp_work(spec);
2644 return 1;
2638} 2645}
2639 2646
2640static const struct snd_kcontrol_new vt1708_jack_detect_ctl = { 2647static const struct snd_kcontrol_new vt1708_jack_detect_ctl = {
@@ -2771,6 +2778,7 @@ static int via_init(struct hda_codec *codec)
2771 via_auto_init_unsol_event(codec); 2778 via_auto_init_unsol_event(codec);
2772 2779
2773 via_hp_automute(codec); 2780 via_hp_automute(codec);
2781 vt1708_update_hp_work(spec);
2774 2782
2775 return 0; 2783 return 0;
2776} 2784}
@@ -2787,7 +2795,9 @@ static void vt1708_update_hp_jack_state(struct work_struct *work)
2787 spec->vt1708_hp_present ^= 1; 2795 spec->vt1708_hp_present ^= 1;
2788 via_hp_automute(spec->codec); 2796 via_hp_automute(spec->codec);
2789 } 2797 }
2790 vt1708_start_hp_work(spec); 2798 if (spec->vt1708_jack_detect)
2799 schedule_delayed_work(&spec->vt1708_hp_work,
2800 msecs_to_jiffies(100));
2791} 2801}
2792 2802
2793static int get_mux_nids(struct hda_codec *codec) 2803static int get_mux_nids(struct hda_codec *codec)
diff --git a/sound/pci/lx6464es/lx_core.c b/sound/pci/lx6464es/lx_core.c
index 5c8717e29ee..8c3e7fcefd9 100644
--- a/sound/pci/lx6464es/lx_core.c
+++ b/sound/pci/lx6464es/lx_core.c
@@ -78,10 +78,15 @@ unsigned long lx_dsp_reg_read(struct lx6464es *chip, int port)
78 return ioread32(address); 78 return ioread32(address);
79} 79}
80 80
81void lx_dsp_reg_readbuf(struct lx6464es *chip, int port, u32 *data, u32 len) 81static void lx_dsp_reg_readbuf(struct lx6464es *chip, int port, u32 *data,
82 u32 len)
82{ 83{
83 void __iomem *address = lx_dsp_register(chip, port); 84 u32 __iomem *address = lx_dsp_register(chip, port);
84 memcpy_fromio(data, address, len*sizeof(u32)); 85 int i;
86
87 /* we cannot use memcpy_fromio */
88 for (i = 0; i != len; ++i)
89 data[i] = ioread32(address + i);
85} 90}
86 91
87 92
@@ -91,11 +96,15 @@ void lx_dsp_reg_write(struct lx6464es *chip, int port, unsigned data)
91 iowrite32(data, address); 96 iowrite32(data, address);
92} 97}
93 98
94void lx_dsp_reg_writebuf(struct lx6464es *chip, int port, const u32 *data, 99static void lx_dsp_reg_writebuf(struct lx6464es *chip, int port,
95 u32 len) 100 const u32 *data, u32 len)
96{ 101{
97 void __iomem *address = lx_dsp_register(chip, port); 102 u32 __iomem *address = lx_dsp_register(chip, port);
98 memcpy_toio(address, data, len*sizeof(u32)); 103 int i;
104
105 /* we cannot use memcpy_to */
106 for (i = 0; i != len; ++i)
107 iowrite32(data[i], address + i);
99} 108}
100 109
101 110
diff --git a/sound/pci/lx6464es/lx_core.h b/sound/pci/lx6464es/lx_core.h
index 1dd562980b6..4d7ff797a64 100644
--- a/sound/pci/lx6464es/lx_core.h
+++ b/sound/pci/lx6464es/lx_core.h
@@ -72,10 +72,7 @@ enum {
72}; 72};
73 73
74unsigned long lx_dsp_reg_read(struct lx6464es *chip, int port); 74unsigned long lx_dsp_reg_read(struct lx6464es *chip, int port);
75void lx_dsp_reg_readbuf(struct lx6464es *chip, int port, u32 *data, u32 len);
76void lx_dsp_reg_write(struct lx6464es *chip, int port, unsigned data); 75void lx_dsp_reg_write(struct lx6464es *chip, int port, unsigned data);
77void lx_dsp_reg_writebuf(struct lx6464es *chip, int port, const u32 *data,
78 u32 len);
79 76
80/* plx register access */ 77/* plx register access */
81enum { 78enum {
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index e760adad952..19ee2203cbb 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -6518,7 +6518,7 @@ static int __devinit snd_hdspm_create(struct snd_card *card,
6518 hdspm->io_type = AES32; 6518 hdspm->io_type = AES32;
6519 hdspm->card_name = "RME AES32"; 6519 hdspm->card_name = "RME AES32";
6520 hdspm->midiPorts = 2; 6520 hdspm->midiPorts = 2;
6521 } else if ((hdspm->firmware_rev == 0xd5) || 6521 } else if ((hdspm->firmware_rev == 0xd2) ||
6522 ((hdspm->firmware_rev >= 0xc8) && 6522 ((hdspm->firmware_rev >= 0xc8) &&
6523 (hdspm->firmware_rev <= 0xcf))) { 6523 (hdspm->firmware_rev <= 0xcf))) {
6524 hdspm->io_type = MADI; 6524 hdspm->io_type = MADI;
diff --git a/sound/soc/codecs/adau1373.c b/sound/soc/codecs/adau1373.c
index 1ccf8dd4757..45c63028b40 100644
--- a/sound/soc/codecs/adau1373.c
+++ b/sound/soc/codecs/adau1373.c
@@ -245,7 +245,7 @@ static const char *adau1373_bass_hpf_cutoff_text[] = {
245}; 245};
246 246
247static const unsigned int adau1373_bass_tlv[] = { 247static const unsigned int adau1373_bass_tlv[] = {
248 TLV_DB_RANGE_HEAD(4), 248 TLV_DB_RANGE_HEAD(3),
249 0, 2, TLV_DB_SCALE_ITEM(-600, 600, 1), 249 0, 2, TLV_DB_SCALE_ITEM(-600, 600, 1),
250 3, 4, TLV_DB_SCALE_ITEM(950, 250, 0), 250 3, 4, TLV_DB_SCALE_ITEM(950, 250, 0),
251 5, 7, TLV_DB_SCALE_ITEM(1400, 150, 0), 251 5, 7, TLV_DB_SCALE_ITEM(1400, 150, 0),
diff --git a/sound/soc/codecs/cs4271.c b/sound/soc/codecs/cs4271.c
index 23d1bd5dadd..69fde1506fe 100644
--- a/sound/soc/codecs/cs4271.c
+++ b/sound/soc/codecs/cs4271.c
@@ -434,7 +434,8 @@ static int cs4271_soc_suspend(struct snd_soc_codec *codec, pm_message_t mesg)
434{ 434{
435 int ret; 435 int ret;
436 /* Set power-down bit */ 436 /* Set power-down bit */
437 ret = snd_soc_update_bits(codec, CS4271_MODE2, 0, CS4271_MODE2_PDN); 437 ret = snd_soc_update_bits(codec, CS4271_MODE2, CS4271_MODE2_PDN,
438 CS4271_MODE2_PDN);
438 if (ret < 0) 439 if (ret < 0)
439 return ret; 440 return ret;
440 return 0; 441 return 0;
@@ -501,8 +502,9 @@ static int cs4271_probe(struct snd_soc_codec *codec)
501 return ret; 502 return ret;
502 } 503 }
503 504
504 ret = snd_soc_update_bits(codec, CS4271_MODE2, 0, 505 ret = snd_soc_update_bits(codec, CS4271_MODE2,
505 CS4271_MODE2_PDN | CS4271_MODE2_CPEN); 506 CS4271_MODE2_PDN | CS4271_MODE2_CPEN,
507 CS4271_MODE2_PDN | CS4271_MODE2_CPEN);
506 if (ret < 0) 508 if (ret < 0)
507 return ret; 509 return ret;
508 ret = snd_soc_update_bits(codec, CS4271_MODE2, CS4271_MODE2_PDN, 0); 510 ret = snd_soc_update_bits(codec, CS4271_MODE2, CS4271_MODE2_PDN, 0);
diff --git a/sound/soc/codecs/rt5631.c b/sound/soc/codecs/rt5631.c
index 27a078cbb6e..4646e808b90 100644
--- a/sound/soc/codecs/rt5631.c
+++ b/sound/soc/codecs/rt5631.c
@@ -177,7 +177,7 @@ static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -95625, 375, 0);
177static const DECLARE_TLV_DB_SCALE(in_vol_tlv, -3450, 150, 0); 177static const DECLARE_TLV_DB_SCALE(in_vol_tlv, -3450, 150, 0);
178/* {0, +20, +24, +30, +35, +40, +44, +50, +52}dB */ 178/* {0, +20, +24, +30, +35, +40, +44, +50, +52}dB */
179static unsigned int mic_bst_tlv[] = { 179static unsigned int mic_bst_tlv[] = {
180 TLV_DB_RANGE_HEAD(6), 180 TLV_DB_RANGE_HEAD(7),
181 0, 0, TLV_DB_SCALE_ITEM(0, 0, 0), 181 0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
182 1, 1, TLV_DB_SCALE_ITEM(2000, 0, 0), 182 1, 1, TLV_DB_SCALE_ITEM(2000, 0, 0),
183 2, 2, TLV_DB_SCALE_ITEM(2400, 0, 0), 183 2, 2, TLV_DB_SCALE_ITEM(2400, 0, 0),
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
index d15695d1c27..bbcf921166f 100644
--- a/sound/soc/codecs/sgtl5000.c
+++ b/sound/soc/codecs/sgtl5000.c
@@ -365,7 +365,7 @@ static const DECLARE_TLV_DB_SCALE(capture_6db_attenuate, -600, 600, 0);
365 365
366/* tlv for mic gain, 0db 20db 30db 40db */ 366/* tlv for mic gain, 0db 20db 30db 40db */
367static const unsigned int mic_gain_tlv[] = { 367static const unsigned int mic_gain_tlv[] = {
368 TLV_DB_RANGE_HEAD(4), 368 TLV_DB_RANGE_HEAD(2),
369 0, 0, TLV_DB_SCALE_ITEM(0, 0, 0), 369 0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
370 1, 3, TLV_DB_SCALE_ITEM(2000, 1000, 0), 370 1, 3, TLV_DB_SCALE_ITEM(2000, 1000, 0),
371}; 371};
diff --git a/sound/soc/codecs/sta32x.c b/sound/soc/codecs/sta32x.c
index bb82408ab8e..d2f37152f94 100644
--- a/sound/soc/codecs/sta32x.c
+++ b/sound/soc/codecs/sta32x.c
@@ -76,6 +76,8 @@ struct sta32x_priv {
76 76
77 unsigned int mclk; 77 unsigned int mclk;
78 unsigned int format; 78 unsigned int format;
79
80 u32 coef_shadow[STA32X_COEF_COUNT];
79}; 81};
80 82
81static const DECLARE_TLV_DB_SCALE(mvol_tlv, -12700, 50, 1); 83static const DECLARE_TLV_DB_SCALE(mvol_tlv, -12700, 50, 1);
@@ -227,6 +229,7 @@ static int sta32x_coefficient_put(struct snd_kcontrol *kcontrol,
227 struct snd_ctl_elem_value *ucontrol) 229 struct snd_ctl_elem_value *ucontrol)
228{ 230{
229 struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); 231 struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
232 struct sta32x_priv *sta32x = snd_soc_codec_get_drvdata(codec);
230 int numcoef = kcontrol->private_value >> 16; 233 int numcoef = kcontrol->private_value >> 16;
231 int index = kcontrol->private_value & 0xffff; 234 int index = kcontrol->private_value & 0xffff;
232 unsigned int cfud; 235 unsigned int cfud;
@@ -239,6 +242,11 @@ static int sta32x_coefficient_put(struct snd_kcontrol *kcontrol,
239 snd_soc_write(codec, STA32X_CFUD, cfud); 242 snd_soc_write(codec, STA32X_CFUD, cfud);
240 243
241 snd_soc_write(codec, STA32X_CFADDR2, index); 244 snd_soc_write(codec, STA32X_CFADDR2, index);
245 for (i = 0; i < numcoef && (index + i < STA32X_COEF_COUNT); i++)
246 sta32x->coef_shadow[index + i] =
247 (ucontrol->value.bytes.data[3 * i] << 16)
248 | (ucontrol->value.bytes.data[3 * i + 1] << 8)
249 | (ucontrol->value.bytes.data[3 * i + 2]);
242 for (i = 0; i < 3 * numcoef; i++) 250 for (i = 0; i < 3 * numcoef; i++)
243 snd_soc_write(codec, STA32X_B1CF1 + i, 251 snd_soc_write(codec, STA32X_B1CF1 + i,
244 ucontrol->value.bytes.data[i]); 252 ucontrol->value.bytes.data[i]);
@@ -252,6 +260,48 @@ static int sta32x_coefficient_put(struct snd_kcontrol *kcontrol,
252 return 0; 260 return 0;
253} 261}
254 262
263int sta32x_sync_coef_shadow(struct snd_soc_codec *codec)
264{
265 struct sta32x_priv *sta32x = snd_soc_codec_get_drvdata(codec);
266 unsigned int cfud;
267 int i;
268
269 /* preserve reserved bits in STA32X_CFUD */
270 cfud = snd_soc_read(codec, STA32X_CFUD) & 0xf0;
271
272 for (i = 0; i < STA32X_COEF_COUNT; i++) {
273 snd_soc_write(codec, STA32X_CFADDR2, i);
274 snd_soc_write(codec, STA32X_B1CF1,
275 (sta32x->coef_shadow[i] >> 16) & 0xff);
276 snd_soc_write(codec, STA32X_B1CF2,
277 (sta32x->coef_shadow[i] >> 8) & 0xff);
278 snd_soc_write(codec, STA32X_B1CF3,
279 (sta32x->coef_shadow[i]) & 0xff);
280 /* chip documentation does not say if the bits are
281 * self-clearing, so do it explicitly */
282 snd_soc_write(codec, STA32X_CFUD, cfud);
283 snd_soc_write(codec, STA32X_CFUD, cfud | 0x01);
284 }
285 return 0;
286}
287
288int sta32x_cache_sync(struct snd_soc_codec *codec)
289{
290 unsigned int mute;
291 int rc;
292
293 if (!codec->cache_sync)
294 return 0;
295
296 /* mute during register sync */
297 mute = snd_soc_read(codec, STA32X_MMUTE);
298 snd_soc_write(codec, STA32X_MMUTE, mute | STA32X_MMUTE_MMUTE);
299 sta32x_sync_coef_shadow(codec);
300 rc = snd_soc_cache_sync(codec);
301 snd_soc_write(codec, STA32X_MMUTE, mute);
302 return rc;
303}
304
255#define SINGLE_COEF(xname, index) \ 305#define SINGLE_COEF(xname, index) \
256{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \ 306{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
257 .info = sta32x_coefficient_info, \ 307 .info = sta32x_coefficient_info, \
@@ -661,7 +711,7 @@ static int sta32x_set_bias_level(struct snd_soc_codec *codec,
661 return ret; 711 return ret;
662 } 712 }
663 713
664 snd_soc_cache_sync(codec); 714 sta32x_cache_sync(codec);
665 } 715 }
666 716
667 /* Power up to mute */ 717 /* Power up to mute */
@@ -790,6 +840,17 @@ static int sta32x_probe(struct snd_soc_codec *codec)
790 STA32X_CxCFG_OM_MASK, 840 STA32X_CxCFG_OM_MASK,
791 2 << STA32X_CxCFG_OM_SHIFT); 841 2 << STA32X_CxCFG_OM_SHIFT);
792 842
843 /* initialize coefficient shadow RAM with reset values */
844 for (i = 4; i <= 49; i += 5)
845 sta32x->coef_shadow[i] = 0x400000;
846 for (i = 50; i <= 54; i++)
847 sta32x->coef_shadow[i] = 0x7fffff;
848 sta32x->coef_shadow[55] = 0x5a9df7;
849 sta32x->coef_shadow[56] = 0x7fffff;
850 sta32x->coef_shadow[59] = 0x7fffff;
851 sta32x->coef_shadow[60] = 0x400000;
852 sta32x->coef_shadow[61] = 0x400000;
853
793 sta32x_set_bias_level(codec, SND_SOC_BIAS_STANDBY); 854 sta32x_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
794 /* Bias level configuration will have done an extra enable */ 855 /* Bias level configuration will have done an extra enable */
795 regulator_bulk_disable(ARRAY_SIZE(sta32x->supplies), sta32x->supplies); 856 regulator_bulk_disable(ARRAY_SIZE(sta32x->supplies), sta32x->supplies);
diff --git a/sound/soc/codecs/sta32x.h b/sound/soc/codecs/sta32x.h
index b97ee5a7566..d8e32a6262e 100644
--- a/sound/soc/codecs/sta32x.h
+++ b/sound/soc/codecs/sta32x.h
@@ -19,6 +19,7 @@
19/* STA326 register addresses */ 19/* STA326 register addresses */
20 20
21#define STA32X_REGISTER_COUNT 0x2d 21#define STA32X_REGISTER_COUNT 0x2d
22#define STA32X_COEF_COUNT 62
22 23
23#define STA32X_CONFA 0x00 24#define STA32X_CONFA 0x00
24#define STA32X_CONFB 0x01 25#define STA32X_CONFB 0x01
diff --git a/sound/soc/codecs/wm8731.c b/sound/soc/codecs/wm8731.c
index 7e5ec03f6f8..a7c9ae17fc7 100644
--- a/sound/soc/codecs/wm8731.c
+++ b/sound/soc/codecs/wm8731.c
@@ -453,6 +453,7 @@ static int wm8731_set_bias_level(struct snd_soc_codec *codec,
453 snd_soc_write(codec, WM8731_PWR, 0xffff); 453 snd_soc_write(codec, WM8731_PWR, 0xffff);
454 regulator_bulk_disable(ARRAY_SIZE(wm8731->supplies), 454 regulator_bulk_disable(ARRAY_SIZE(wm8731->supplies),
455 wm8731->supplies); 455 wm8731->supplies);
456 codec->cache_sync = 1;
456 break; 457 break;
457 } 458 }
458 codec->dapm.bias_level = level; 459 codec->dapm.bias_level = level;
diff --git a/sound/soc/codecs/wm8753.c b/sound/soc/codecs/wm8753.c
index a9504710bb6..3a629d0d690 100644
--- a/sound/soc/codecs/wm8753.c
+++ b/sound/soc/codecs/wm8753.c
@@ -190,6 +190,9 @@ static int wm8753_set_dai(struct snd_kcontrol *kcontrol,
190 struct wm8753_priv *wm8753 = snd_soc_codec_get_drvdata(codec); 190 struct wm8753_priv *wm8753 = snd_soc_codec_get_drvdata(codec);
191 u16 ioctl; 191 u16 ioctl;
192 192
193 if (wm8753->dai_func == ucontrol->value.integer.value[0])
194 return 0;
195
193 if (codec->active) 196 if (codec->active)
194 return -EBUSY; 197 return -EBUSY;
195 198
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index 91d3c6dbeba..53edd9a8c75 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -1973,7 +1973,7 @@ static int wm8962_reset(struct snd_soc_codec *codec)
1973static const DECLARE_TLV_DB_SCALE(inpga_tlv, -2325, 75, 0); 1973static const DECLARE_TLV_DB_SCALE(inpga_tlv, -2325, 75, 0);
1974static const DECLARE_TLV_DB_SCALE(mixin_tlv, -1500, 300, 0); 1974static const DECLARE_TLV_DB_SCALE(mixin_tlv, -1500, 300, 0);
1975static const unsigned int mixinpga_tlv[] = { 1975static const unsigned int mixinpga_tlv[] = {
1976 TLV_DB_RANGE_HEAD(7), 1976 TLV_DB_RANGE_HEAD(5),
1977 0, 1, TLV_DB_SCALE_ITEM(0, 600, 0), 1977 0, 1, TLV_DB_SCALE_ITEM(0, 600, 0),
1978 2, 2, TLV_DB_SCALE_ITEM(1300, 1300, 0), 1978 2, 2, TLV_DB_SCALE_ITEM(1300, 1300, 0),
1979 3, 4, TLV_DB_SCALE_ITEM(1800, 200, 0), 1979 3, 4, TLV_DB_SCALE_ITEM(1800, 200, 0),
@@ -1988,7 +1988,7 @@ static const DECLARE_TLV_DB_SCALE(bypass_tlv, -1500, 300, 0);
1988static const DECLARE_TLV_DB_SCALE(out_tlv, -12100, 100, 1); 1988static const DECLARE_TLV_DB_SCALE(out_tlv, -12100, 100, 1);
1989static const DECLARE_TLV_DB_SCALE(hp_tlv, -700, 100, 0); 1989static const DECLARE_TLV_DB_SCALE(hp_tlv, -700, 100, 0);
1990static const unsigned int classd_tlv[] = { 1990static const unsigned int classd_tlv[] = {
1991 TLV_DB_RANGE_HEAD(7), 1991 TLV_DB_RANGE_HEAD(2),
1992 0, 6, TLV_DB_SCALE_ITEM(0, 150, 0), 1992 0, 6, TLV_DB_SCALE_ITEM(0, 150, 0),
1993 7, 7, TLV_DB_SCALE_ITEM(1200, 0, 0), 1993 7, 7, TLV_DB_SCALE_ITEM(1200, 0, 0),
1994}; 1994};
diff --git a/sound/soc/codecs/wm8993.c b/sound/soc/codecs/wm8993.c
index eec8e143511..d1a142f48b0 100644
--- a/sound/soc/codecs/wm8993.c
+++ b/sound/soc/codecs/wm8993.c
@@ -512,7 +512,7 @@ static const DECLARE_TLV_DB_SCALE(drc_comp_threash, -4500, 75, 0);
512static const DECLARE_TLV_DB_SCALE(drc_comp_amp, -2250, 75, 0); 512static const DECLARE_TLV_DB_SCALE(drc_comp_amp, -2250, 75, 0);
513static const DECLARE_TLV_DB_SCALE(drc_min_tlv, -1800, 600, 0); 513static const DECLARE_TLV_DB_SCALE(drc_min_tlv, -1800, 600, 0);
514static const unsigned int drc_max_tlv[] = { 514static const unsigned int drc_max_tlv[] = {
515 TLV_DB_RANGE_HEAD(4), 515 TLV_DB_RANGE_HEAD(2),
516 0, 2, TLV_DB_SCALE_ITEM(1200, 600, 0), 516 0, 2, TLV_DB_SCALE_ITEM(1200, 600, 0),
517 3, 3, TLV_DB_SCALE_ITEM(3600, 0, 0), 517 3, 3, TLV_DB_SCALE_ITEM(3600, 0, 0),
518}; 518};
diff --git a/sound/soc/codecs/wm9081.c b/sound/soc/codecs/wm9081.c
index 3cd35a02c28..4a398c3bfe8 100644
--- a/sound/soc/codecs/wm9081.c
+++ b/sound/soc/codecs/wm9081.c
@@ -807,7 +807,6 @@ static int wm9081_set_bias_level(struct snd_soc_codec *codec,
807 mdelay(100); 807 mdelay(100);
808 808
809 /* Normal bias enable & soft start off */ 809 /* Normal bias enable & soft start off */
810 reg |= WM9081_BIAS_ENA;
811 reg &= ~WM9081_VMID_RAMP; 810 reg &= ~WM9081_VMID_RAMP;
812 snd_soc_write(codec, WM9081_VMID_CONTROL, reg); 811 snd_soc_write(codec, WM9081_VMID_CONTROL, reg);
813 812
@@ -818,7 +817,7 @@ static int wm9081_set_bias_level(struct snd_soc_codec *codec,
818 } 817 }
819 818
820 /* VMID 2*240k */ 819 /* VMID 2*240k */
821 reg = snd_soc_read(codec, WM9081_BIAS_CONTROL_1); 820 reg = snd_soc_read(codec, WM9081_VMID_CONTROL);
822 reg &= ~WM9081_VMID_SEL_MASK; 821 reg &= ~WM9081_VMID_SEL_MASK;
823 reg |= 0x04; 822 reg |= 0x04;
824 snd_soc_write(codec, WM9081_VMID_CONTROL, reg); 823 snd_soc_write(codec, WM9081_VMID_CONTROL, reg);
@@ -830,14 +829,15 @@ static int wm9081_set_bias_level(struct snd_soc_codec *codec,
830 break; 829 break;
831 830
832 case SND_SOC_BIAS_OFF: 831 case SND_SOC_BIAS_OFF:
833 /* Startup bias source */ 832 /* Startup bias source and disable bias */
834 reg = snd_soc_read(codec, WM9081_BIAS_CONTROL_1); 833 reg = snd_soc_read(codec, WM9081_BIAS_CONTROL_1);
835 reg |= WM9081_BIAS_SRC; 834 reg |= WM9081_BIAS_SRC;
835 reg &= ~WM9081_BIAS_ENA;
836 snd_soc_write(codec, WM9081_BIAS_CONTROL_1, reg); 836 snd_soc_write(codec, WM9081_BIAS_CONTROL_1, reg);
837 837
838 /* Disable VMID and biases with soft ramping */ 838 /* Disable VMID with soft ramping */
839 reg = snd_soc_read(codec, WM9081_VMID_CONTROL); 839 reg = snd_soc_read(codec, WM9081_VMID_CONTROL);
840 reg &= ~(WM9081_VMID_SEL_MASK | WM9081_BIAS_ENA); 840 reg &= ~WM9081_VMID_SEL_MASK;
841 reg |= WM9081_VMID_RAMP; 841 reg |= WM9081_VMID_RAMP;
842 snd_soc_write(codec, WM9081_VMID_CONTROL, reg); 842 snd_soc_write(codec, WM9081_VMID_CONTROL, reg);
843 843
diff --git a/sound/soc/codecs/wm9090.c b/sound/soc/codecs/wm9090.c
index 2b5252c9e37..f94c06057c6 100644
--- a/sound/soc/codecs/wm9090.c
+++ b/sound/soc/codecs/wm9090.c
@@ -177,19 +177,19 @@ static void wait_for_dc_servo(struct snd_soc_codec *codec)
177} 177}
178 178
179static const unsigned int in_tlv[] = { 179static const unsigned int in_tlv[] = {
180 TLV_DB_RANGE_HEAD(6), 180 TLV_DB_RANGE_HEAD(3),
181 0, 0, TLV_DB_SCALE_ITEM(-600, 0, 0), 181 0, 0, TLV_DB_SCALE_ITEM(-600, 0, 0),
182 1, 3, TLV_DB_SCALE_ITEM(-350, 350, 0), 182 1, 3, TLV_DB_SCALE_ITEM(-350, 350, 0),
183 4, 6, TLV_DB_SCALE_ITEM(600, 600, 0), 183 4, 6, TLV_DB_SCALE_ITEM(600, 600, 0),
184}; 184};
185static const unsigned int mix_tlv[] = { 185static const unsigned int mix_tlv[] = {
186 TLV_DB_RANGE_HEAD(4), 186 TLV_DB_RANGE_HEAD(2),
187 0, 2, TLV_DB_SCALE_ITEM(-1200, 300, 0), 187 0, 2, TLV_DB_SCALE_ITEM(-1200, 300, 0),
188 3, 3, TLV_DB_SCALE_ITEM(0, 0, 0), 188 3, 3, TLV_DB_SCALE_ITEM(0, 0, 0),
189}; 189};
190static const DECLARE_TLV_DB_SCALE(out_tlv, -5700, 100, 0); 190static const DECLARE_TLV_DB_SCALE(out_tlv, -5700, 100, 0);
191static const unsigned int spkboost_tlv[] = { 191static const unsigned int spkboost_tlv[] = {
192 TLV_DB_RANGE_HEAD(7), 192 TLV_DB_RANGE_HEAD(2),
193 0, 6, TLV_DB_SCALE_ITEM(0, 150, 0), 193 0, 6, TLV_DB_SCALE_ITEM(0, 150, 0),
194 7, 7, TLV_DB_SCALE_ITEM(1200, 0, 0), 194 7, 7, TLV_DB_SCALE_ITEM(1200, 0, 0),
195}; 195};
diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c
index 84f33d4ea2c..48e61e91240 100644
--- a/sound/soc/codecs/wm_hubs.c
+++ b/sound/soc/codecs/wm_hubs.c
@@ -40,7 +40,7 @@ static const DECLARE_TLV_DB_SCALE(outmix_tlv, -2100, 300, 0);
40static const DECLARE_TLV_DB_SCALE(spkmixout_tlv, -1800, 600, 1); 40static const DECLARE_TLV_DB_SCALE(spkmixout_tlv, -1800, 600, 1);
41static const DECLARE_TLV_DB_SCALE(outpga_tlv, -5700, 100, 0); 41static const DECLARE_TLV_DB_SCALE(outpga_tlv, -5700, 100, 0);
42static const unsigned int spkboost_tlv[] = { 42static const unsigned int spkboost_tlv[] = {
43 TLV_DB_RANGE_HEAD(7), 43 TLV_DB_RANGE_HEAD(2),
44 0, 6, TLV_DB_SCALE_ITEM(0, 150, 0), 44 0, 6, TLV_DB_SCALE_ITEM(0, 150, 0),
45 7, 7, TLV_DB_SCALE_ITEM(1200, 0, 0), 45 7, 7, TLV_DB_SCALE_ITEM(1200, 0, 0),
46}; 46};
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index 0268cf98973..83c4bd5b2dd 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -694,6 +694,7 @@ static int __devinit fsl_ssi_probe(struct platform_device *pdev)
694 694
695 /* Initialize the the device_attribute structure */ 695 /* Initialize the the device_attribute structure */
696 dev_attr = &ssi_private->dev_attr; 696 dev_attr = &ssi_private->dev_attr;
697 sysfs_attr_init(&dev_attr->attr);
697 dev_attr->attr.name = "statistics"; 698 dev_attr->attr.name = "statistics";
698 dev_attr->attr.mode = S_IRUGO; 699 dev_attr->attr.mode = S_IRUGO;
699 dev_attr->show = fsl_sysfs_ssi_show; 700 dev_attr->show = fsl_sysfs_ssi_show;
diff --git a/sound/soc/nuc900/nuc900-ac97.c b/sound/soc/nuc900/nuc900-ac97.c
index 9c0edad90d8..a4e3237956e 100644
--- a/sound/soc/nuc900/nuc900-ac97.c
+++ b/sound/soc/nuc900/nuc900-ac97.c
@@ -365,7 +365,8 @@ static int __devinit nuc900_ac97_drvprobe(struct platform_device *pdev)
365 if (ret) 365 if (ret)
366 goto out3; 366 goto out3;
367 367
368 mfp_set_groupg(nuc900_audio->dev); /* enbale ac97 multifunction pin*/ 368 /* enbale ac97 multifunction pin */
369 mfp_set_groupg(nuc900_audio->dev, "nuc900-audio");
369 370
370 return 0; 371 return 0;
371 372
diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl
index 30e2befd6f2..8b4c2535b26 100755
--- a/tools/testing/ktest/ktest.pl
+++ b/tools/testing/ktest/ktest.pl
@@ -747,6 +747,18 @@ sub __eval_option {
747 # Add space to evaluate the character before $ 747 # Add space to evaluate the character before $
748 $option = " $option"; 748 $option = " $option";
749 my $retval = ""; 749 my $retval = "";
750 my $repeated = 0;
751 my $parent = 0;
752
753 foreach my $test (keys %repeat_tests) {
754 if ($i >= $test &&
755 $i < $test + $repeat_tests{$test}) {
756
757 $repeated = 1;
758 $parent = $test;
759 last;
760 }
761 }
750 762
751 while ($option =~ /(.*?[^\\])\$\{(.*?)\}(.*)/) { 763 while ($option =~ /(.*?[^\\])\$\{(.*?)\}(.*)/) {
752 my $start = $1; 764 my $start = $1;
@@ -760,10 +772,14 @@ sub __eval_option {
760 # otherwise see if the default OPT (without [$i]) exists. 772 # otherwise see if the default OPT (without [$i]) exists.
761 773
762 my $o = "$var\[$i\]"; 774 my $o = "$var\[$i\]";
775 my $parento = "$var\[$parent\]";
763 776
764 if (defined($opt{$o})) { 777 if (defined($opt{$o})) {
765 $o = $opt{$o}; 778 $o = $opt{$o};
766 $retval = "$retval$o"; 779 $retval = "$retval$o";
780 } elsif ($repeated && defined($opt{$parento})) {
781 $o = $opt{$parento};
782 $retval = "$retval$o";
767 } elsif (defined($opt{$var})) { 783 } elsif (defined($opt{$var})) {
768 $o = $opt{$var}; 784 $o = $opt{$var};
769 $retval = "$retval$o"; 785 $retval = "$retval$o";