aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/DocBook/media/Makefile4
-rw-r--r--Documentation/devicetree/bindings/clock/sunxi.txt4
-rw-r--r--Documentation/devicetree/bindings/clock/ti/apll.txt24
-rw-r--r--Documentation/devicetree/bindings/clock/ti/dpll.txt10
-rw-r--r--Documentation/devicetree/bindings/clock/ti/dra7-atl.txt96
-rw-r--r--Documentation/devicetree/bindings/clock/ti/gate.txt29
-rw-r--r--Documentation/devicetree/bindings/clock/ti/interface.txt2
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-rk3x.txt42
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-sunxi-p2wi.txt41
-rw-r--r--Documentation/hwmon/shtc143
-rw-r--r--Documentation/kbuild/makefiles.txt2
-rw-r--r--Documentation/kernel-parameters.txt14
-rw-r--r--Documentation/thermal/nouveau_thermal7
-rw-r--r--Documentation/vDSO/parse_vdso.c67
-rw-r--r--Documentation/vDSO/vdso_standalone_test_x86.c128
-rw-r--r--Documentation/vDSO/vdso_test.c107
-rw-r--r--MAINTAINERS15
-rw-r--r--Makefile4
-rw-r--r--arch/arm/Kconfig20
-rw-r--r--arch/arm/boot/dts/armada-375-db.dts2
-rw-r--r--arch/arm/boot/dts/armada-385-db.dts2
-rw-r--r--arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts2
-rw-r--r--arch/arm/boot/dts/dra7xx-clocks.dtsi24
-rw-r--r--arch/arm/boot/dts/omap54xx-clocks.dtsi2
-rw-r--r--arch/arm/configs/multi_v7_defconfig1
-rw-r--r--arch/arm/configs/omap2plus_defconfig1
-rw-r--r--arch/arm/include/asm/ftrace.h2
-rw-r--r--arch/arm/mach-bcm/Kconfig7
-rw-r--r--arch/arm/mach-berlin/Kconfig6
-rw-r--r--arch/arm/mach-cns3xxx/Kconfig7
-rw-r--r--arch/arm/mach-davinci/Kconfig1
-rw-r--r--arch/arm/mach-exynos/Kconfig7
-rw-r--r--arch/arm/mach-exynos/common.h1
-rw-r--r--arch/arm/mach-exynos/exynos.c31
-rw-r--r--arch/arm/mach-exynos/platsmp.c26
-rw-r--r--arch/arm/mach-highbank/Kconfig1
-rw-r--r--arch/arm/mach-imx/Kconfig10
-rw-r--r--arch/arm/mach-integrator/Kconfig2
-rw-r--r--arch/arm/mach-integrator/impd1.c12
-rw-r--r--arch/arm/mach-keystone/Kconfig1
-rw-r--r--arch/arm/mach-moxart/Kconfig2
-rw-r--r--arch/arm/mach-mvebu/Kconfig7
-rw-r--r--arch/arm/mach-nomadik/Kconfig4
-rw-r--r--arch/arm/mach-omap2/Kconfig7
-rw-r--r--arch/arm/mach-omap2/clkt2xxx_virt_prcm_set.c53
-rw-r--r--arch/arm/mach-omap2/clock.h13
-rw-r--r--arch/arm/mach-omap2/clock2xxx.h4
-rw-r--r--arch/arm/mach-omap2/common.h7
-rw-r--r--arch/arm/mach-omap2/dpll3xxx.c9
-rw-r--r--arch/arm/mach-prima2/Kconfig6
-rw-r--r--arch/arm/mach-qcom/Kconfig6
-rw-r--r--arch/arm/mach-s3c24xx/Kconfig2
-rw-r--r--arch/arm/mach-s3c64xx/Kconfig4
-rw-r--r--arch/arm/mach-s5p64x0/Kconfig6
-rw-r--r--arch/arm/mach-s5pc100/Kconfig3
-rw-r--r--arch/arm/mach-s5pv210/Kconfig3
-rw-r--r--arch/arm/mach-shmobile/Kconfig6
-rw-r--r--arch/arm/mach-spear/Kconfig1
-rw-r--r--arch/arm/mach-sti/Kconfig2
-rw-r--r--arch/arm/mach-tegra/Kconfig8
-rw-r--r--arch/arm/mach-u300/Kconfig6
-rw-r--r--arch/arm/mach-ux500/Kconfig7
-rw-r--r--arch/arm/mach-vexpress/Kconfig8
-rw-r--r--arch/arm/mach-vt8500/Kconfig1
-rw-r--r--arch/arm/mach-zynq/Kconfig1
-rw-r--r--arch/arm/plat-samsung/Kconfig28
-rw-r--r--arch/arm64/Kconfig3
-rw-r--r--arch/arm64/boot/dts/apm-mustang.dts4
-rw-r--r--arch/arm64/boot/dts/apm-storm.dtsi36
-rw-r--r--arch/arm64/configs/defconfig15
-rw-r--r--arch/arm64/crypto/ghash-ce-core.S92
-rw-r--r--arch/arm64/crypto/ghash-ce-glue.c5
-rw-r--r--arch/arm64/include/asm/Kbuild1
-rw-r--r--arch/arm64/include/asm/dma-mapping.h2
-rw-r--r--arch/arm64/include/asm/pgtable.h2
-rw-r--r--arch/arm64/include/uapi/asm/posix_types.h10
-rw-r--r--arch/arm64/include/uapi/asm/sigcontext.h2
-rw-r--r--arch/arm64/kernel/entry-ftrace.S2
-rw-r--r--arch/arm64/kernel/entry.S1
-rw-r--r--arch/arm64/kernel/ptrace.c32
-rw-r--r--arch/arm64/mm/init.c10
-rw-r--r--arch/ia64/hp/common/sba_iommu.c64
-rw-r--r--arch/s390/configs/default_defconfig5
-rw-r--r--arch/s390/configs/gcov_defconfig5
-rw-r--r--arch/s390/configs/performance_defconfig5
-rw-r--r--arch/s390/configs/zfcpdump_defconfig3
-rw-r--r--arch/s390/defconfig8
-rw-r--r--arch/s390/include/asm/mmu_context.h33
-rw-r--r--arch/s390/include/asm/switch_to.h4
-rw-r--r--arch/s390/include/uapi/asm/ucontext.h8
-rw-r--r--arch/s390/kernel/compat_linux.h4
-rw-r--r--arch/sparc/crypto/aes_glue.c6
-rw-r--r--arch/sparc/include/asm/atomic_32.h8
-rw-r--r--arch/sparc/include/asm/atomic_64.h18
-rw-r--r--arch/sparc/include/asm/auxio.h7
-rw-r--r--arch/sparc/include/asm/auxio_32.h6
-rw-r--r--arch/sparc/include/asm/auxio_64.h6
-rw-r--r--arch/sparc/include/asm/bitext.h6
-rw-r--r--arch/sparc/include/asm/bitops_32.h6
-rw-r--r--arch/sparc/include/asm/bitops_64.h24
-rw-r--r--arch/sparc/include/asm/btext.h2
-rw-r--r--arch/sparc/include/asm/bug.h4
-rw-r--r--arch/sparc/include/asm/cacheflush_32.h8
-rw-r--r--arch/sparc/include/asm/cacheflush_64.h24
-rw-r--r--arch/sparc/include/asm/checksum_32.h4
-rw-r--r--arch/sparc/include/asm/checksum_64.h32
-rw-r--r--arch/sparc/include/asm/cmpxchg_32.h6
-rw-r--r--arch/sparc/include/asm/cmpxchg_64.h4
-rw-r--r--arch/sparc/include/asm/cpudata.h10
-rw-r--r--arch/sparc/include/asm/cpudata_64.h5
-rw-r--r--arch/sparc/include/asm/delay_32.h4
-rw-r--r--arch/sparc/include/asm/delay_64.h4
-rw-r--r--arch/sparc/include/asm/device.h2
-rw-r--r--arch/sparc/include/asm/dma-mapping.h2
-rw-r--r--arch/sparc/include/asm/ebus_dma.h16
-rw-r--r--arch/sparc/include/asm/floppy_32.h14
-rw-r--r--arch/sparc/include/asm/floppy_64.h2
-rw-r--r--arch/sparc/include/asm/ftrace.h6
-rw-r--r--arch/sparc/include/asm/highmem.h10
-rw-r--r--arch/sparc/include/asm/hvtramp.h2
-rw-r--r--arch/sparc/include/asm/hypervisor.h325
-rw-r--r--arch/sparc/include/asm/idprom.h2
-rw-r--r--arch/sparc/include/asm/io-unit.h2
-rw-r--r--arch/sparc/include/asm/io_32.h299
-rw-r--r--arch/sparc/include/asm/io_64.h21
-rw-r--r--arch/sparc/include/asm/iommu_32.h10
-rw-r--r--arch/sparc/include/asm/iommu_64.h6
-rw-r--r--arch/sparc/include/asm/irq_32.h3
-rw-r--r--arch/sparc/include/asm/irq_64.h44
-rw-r--r--arch/sparc/include/asm/irqflags_32.h6
-rw-r--r--arch/sparc/include/asm/kdebug_64.h2
-rw-r--r--arch/sparc/include/asm/kgdb.h5
-rw-r--r--arch/sparc/include/asm/kprobes.h8
-rw-r--r--arch/sparc/include/asm/ldc.h66
-rw-r--r--arch/sparc/include/asm/leon.h54
-rw-r--r--arch/sparc/include/asm/leon_pci.h4
-rw-r--r--arch/sparc/include/asm/mc146818rtc.h5
-rw-r--r--arch/sparc/include/asm/mdesc.h32
-rw-r--r--arch/sparc/include/asm/mmu_64.h6
-rw-r--r--arch/sparc/include/asm/mmu_context_64.h24
-rw-r--r--arch/sparc/include/asm/nmi.h10
-rw-r--r--arch/sparc/include/asm/oplib_32.h68
-rw-r--r--arch/sparc/include/asm/oplib_64.h112
-rw-r--r--arch/sparc/include/asm/page.h3
-rw-r--r--arch/sparc/include/asm/page_64.h8
-rw-r--r--arch/sparc/include/asm/pci_64.h14
-rw-r--r--arch/sparc/include/asm/pcic.h8
-rw-r--r--arch/sparc/include/asm/pcr.h6
-rw-r--r--arch/sparc/include/asm/pgalloc_32.h2
-rw-r--r--arch/sparc/include/asm/pgalloc_64.h16
-rw-r--r--arch/sparc/include/asm/pgtable_32.h11
-rw-r--r--arch/sparc/include/asm/pgtable_64.h58
-rw-r--r--arch/sparc/include/asm/processor_32.h5
-rw-r--r--arch/sparc/include/asm/processor_64.h6
-rw-r--r--arch/sparc/include/asm/prom.h24
-rw-r--r--arch/sparc/include/asm/ptrace.h2
-rw-r--r--arch/sparc/include/asm/setup.h39
-rw-r--r--arch/sparc/include/asm/sfp-machine_32.h28
-rw-r--r--arch/sparc/include/asm/smp_32.h6
-rw-r--r--arch/sparc/include/asm/smp_64.h24
-rw-r--r--arch/sparc/include/asm/spitfire.h2
-rw-r--r--arch/sparc/include/asm/stacktrace.h2
-rw-r--r--arch/sparc/include/asm/starfire.h8
-rw-r--r--arch/sparc/include/asm/string_32.h12
-rw-r--r--arch/sparc/include/asm/string_64.h12
-rw-r--r--arch/sparc/include/asm/switch_to_32.h6
-rw-r--r--arch/sparc/include/asm/switch_to_64.h4
-rw-r--r--arch/sparc/include/asm/syscalls.h8
-rw-r--r--arch/sparc/include/asm/timer_32.h6
-rw-r--r--arch/sparc/include/asm/timer_64.h6
-rw-r--r--arch/sparc/include/asm/tlb_64.h8
-rw-r--r--arch/sparc/include/asm/tlbflush_64.h22
-rw-r--r--arch/sparc/include/asm/topology_64.h2
-rw-r--r--arch/sparc/include/asm/trap_block.h6
-rw-r--r--arch/sparc/include/asm/uaccess.h2
-rw-r--r--arch/sparc/include/asm/uaccess_32.h14
-rw-r--r--arch/sparc/include/asm/uaccess_64.h50
-rw-r--r--arch/sparc/include/asm/vio.h36
-rw-r--r--arch/sparc/include/asm/visasm.h3
-rw-r--r--arch/sparc/include/asm/xor_64.h28
-rw-r--r--arch/sparc/kernel/Makefile1
-rw-r--r--arch/sparc/kernel/audit.c8
-rw-r--r--arch/sparc/kernel/auxio_32.c9
-rw-r--r--arch/sparc/kernel/btext.c2
-rw-r--r--arch/sparc/kernel/compat_audit.c1
-rw-r--r--arch/sparc/kernel/cpu.c1
-rw-r--r--arch/sparc/kernel/cpumap.h4
-rw-r--r--arch/sparc/kernel/devices.c12
-rw-r--r--arch/sparc/kernel/entry.h259
-rw-r--r--arch/sparc/kernel/iommu.c3
-rw-r--r--arch/sparc/kernel/iommu_common.h14
-rw-r--r--arch/sparc/kernel/ioport.c6
-rw-r--r--arch/sparc/kernel/irq.h11
-rw-r--r--arch/sparc/kernel/irq_32.c1
-rw-r--r--arch/sparc/kernel/kernel.h124
-rw-r--r--arch/sparc/kernel/kgdb_64.c2
-rw-r--r--arch/sparc/kernel/kprobes.c5
-rw-r--r--arch/sparc/kernel/leon_kernel.c10
-rw-r--r--arch/sparc/kernel/leon_pci.c79
-rw-r--r--arch/sparc/kernel/leon_pci_grpci1.c16
-rw-r--r--arch/sparc/kernel/leon_pci_grpci2.c22
-rw-r--r--arch/sparc/kernel/leon_pmc.c8
-rw-r--r--arch/sparc/kernel/leon_smp.c13
-rw-r--r--arch/sparc/kernel/of_device_common.c4
-rw-r--r--arch/sparc/kernel/pci.c1
-rw-r--r--arch/sparc/kernel/pci_impl.h30
-rw-r--r--arch/sparc/kernel/pci_sun4v.h156
-rw-r--r--arch/sparc/kernel/pcic.c116
-rw-r--r--arch/sparc/kernel/perf_event.c23
-rw-r--r--arch/sparc/kernel/process_32.c12
-rw-r--r--arch/sparc/kernel/process_64.c2
-rw-r--r--arch/sparc/kernel/prom.h2
-rw-r--r--arch/sparc/kernel/prom_64.c5
-rw-r--r--arch/sparc/kernel/psycho_common.h22
-rw-r--r--arch/sparc/kernel/ptrace_32.c2
-rw-r--r--arch/sparc/kernel/setup_32.c4
-rw-r--r--arch/sparc/kernel/signal32.c56
-rw-r--r--arch/sparc/kernel/signal_32.c11
-rw-r--r--arch/sparc/kernel/signal_64.c6
-rw-r--r--arch/sparc/kernel/smp_32.c13
-rw-r--r--arch/sparc/kernel/smp_64.c16
-rw-r--r--arch/sparc/kernel/sun4d_irq.c17
-rw-r--r--arch/sparc/kernel/sys_sparc32.c2
-rw-r--r--arch/sparc/kernel/sys_sparc_32.c10
-rw-r--r--arch/sparc/kernel/sys_sparc_64.c1
-rw-r--r--arch/sparc/kernel/systbls.h124
-rw-r--r--arch/sparc/kernel/tadpole.c126
-rw-r--r--arch/sparc/kernel/time_32.c8
-rw-r--r--arch/sparc/kernel/traps_32.c4
-rw-r--r--arch/sparc/kernel/traps_64.c11
-rw-r--r--arch/sparc/kernel/unaligned_32.c4
-rw-r--r--arch/sparc/kernel/unaligned_64.c2
-rw-r--r--arch/sparc/kernel/windows.c3
-rw-r--r--arch/sparc/lib/Makefile2
-rw-r--r--arch/sparc/math-emu/sfp-util_32.h20
-rw-r--r--arch/sparc/math-emu/sfp-util_64.h12
-rw-r--r--arch/sparc/mm/fault_32.c9
-rw-r--r--arch/sparc/mm/fault_64.c4
-rw-r--r--arch/sparc/mm/init_32.c7
-rw-r--r--arch/sparc/mm/init_64.c9
-rw-r--r--arch/sparc/mm/init_64.h4
-rw-r--r--arch/sparc/mm/io-unit.c21
-rw-r--r--arch/sparc/mm/iommu.c25
-rw-r--r--arch/sparc/mm/leon_mm.c4
-rw-r--r--arch/sparc/mm/mm_32.h24
-rw-r--r--arch/sparc/mm/srmmu.c13
-rw-r--r--arch/sparc/mm/srmmu.h4
-rw-r--r--arch/sparc/mm/tsb.c1
-rw-r--r--arch/sparc/prom/misc_64.c5
-rw-r--r--arch/unicore32/Kconfig6
-rw-r--r--arch/unicore32/include/asm/io.h27
-rw-r--r--arch/unicore32/include/asm/pgtable.h10
-rw-r--r--arch/unicore32/include/asm/ptrace.h1
-rw-r--r--arch/unicore32/kernel/clock.c8
-rw-r--r--arch/unicore32/kernel/ksyms.c41
-rw-r--r--arch/unicore32/kernel/ksyms.h2
-rw-r--r--arch/unicore32/kernel/module.c11
-rw-r--r--arch/unicore32/kernel/process.c1
-rw-r--r--arch/unicore32/kernel/setup.c4
-rw-r--r--arch/unicore32/mm/alignment.c1
-rw-r--r--arch/unicore32/mm/proc-syms.c2
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/boot/compressed/aslr.c9
-rw-r--r--arch/x86/vdso/Makefile40
-rw-r--r--arch/x86/vdso/vdso-fakesections.c32
-rw-r--r--arch/x86/vdso/vdso2c.c19
-rw-r--r--arch/x86/vdso/vdso2c.h23
-rw-r--r--arch/x86/xen/enlighten.c5
-rw-r--r--arch/x86/xen/setup.c60
-rw-r--r--arch/x86/xen/xen-ops.h1
-rw-r--r--block/blk-core.c4
-rw-r--r--block/blk-flush.c38
-rw-r--r--block/blk-mq-tag.c59
-rw-r--r--block/blk-mq-tag.h2
-rw-r--r--block/blk-mq.c11
-rw-r--r--block/blk.h1
-rw-r--r--block/elevator.c20
-rw-r--r--drivers/acpi/acpi_lpss.c15
-rw-r--r--drivers/acpi/battery.c39
-rw-r--r--drivers/acpi/osl.c3
-rw-r--r--drivers/acpi/tables.c3
-rw-r--r--drivers/block/null_blk.c7
-rw-r--r--drivers/block/nvme-core.c203
-rw-r--r--drivers/block/nvme-scsi.c36
-rw-r--r--drivers/block/rbd.c242
-rw-r--r--drivers/bus/Kconfig2
-rw-r--r--drivers/char/random.c17
-rw-r--r--drivers/clk/sunxi/Makefile4
-rw-r--r--drivers/clk/sunxi/clk-a10-hosc.c73
-rw-r--r--drivers/clk/sunxi/clk-a20-gmac.c119
-rw-r--r--drivers/clk/sunxi/clk-sun6i-apb0-gates.c99
-rw-r--r--drivers/clk/sunxi/clk-sun6i-apb0.c77
-rw-r--r--drivers/clk/sunxi/clk-sun6i-ar100.c233
-rw-r--r--drivers/clk/sunxi/clk-sunxi.c239
-rw-r--r--drivers/clk/ti/Makefile4
-rw-r--r--drivers/clk/ti/apll.c181
-rw-r--r--drivers/clk/ti/clk-2xxx.c256
-rw-r--r--drivers/clk/ti/clk-54xx.c6
-rw-r--r--drivers/clk/ti/clk-7xx.c2
-rw-r--r--drivers/clk/ti/clk-dra7-atl.c312
-rw-r--r--drivers/clk/ti/dpll.c138
-rw-r--r--drivers/clk/ti/gate.c2
-rw-r--r--drivers/clk/ti/interface.c11
-rw-r--r--drivers/cpufreq/Kconfig2
-rw-r--r--drivers/cpufreq/cpufreq.c10
-rw-r--r--drivers/cpufreq/intel_pstate.c5
-rw-r--r--drivers/cpuidle/cpuidle-armada-370-xp.c4
-rw-r--r--drivers/gpio/gpiolib.c2
-rw-r--r--drivers/gpu/drm/drm_modeset_lock.c1
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c47
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c9
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c3
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c18
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c5
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c23
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h2
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c4
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c3
-rw-r--r--drivers/gpu/drm/nouveau/Makefile1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nvc0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/base.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/dport.c7
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpc.fuc2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hub.fuc18
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubgm107.fuc5.h460
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5.h460
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h188
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvd7.fuc.h188
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h170
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvf0.fuc.h170
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/macros.fuc6
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/os.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv50.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c41
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/i2c.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c76
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/gf117.c39
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ibus/nve0.c19
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/host.fuc2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c7
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c118
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/r500_reg.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c35
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c22
-rw-r--r--drivers/hsi/clients/Kconfig2
-rw-r--r--drivers/hsi/controllers/omap_ssi_port.c4
-rw-r--r--drivers/hwmon/Kconfig10
-rw-r--r--drivers/hwmon/Makefile1
-rw-r--r--drivers/hwmon/atxp1.c42
-rw-r--r--drivers/hwmon/ina2xx.c7
-rw-r--r--drivers/hwmon/lm85.c33
-rw-r--r--drivers/hwmon/ltc4151.c51
-rw-r--r--drivers/hwmon/shtc1.c251
-rw-r--r--drivers/hwmon/vexpress.c82
-rw-r--r--drivers/i2c/busses/Kconfig23
-rw-r--r--drivers/i2c/busses/Makefile2
-rw-r--r--drivers/i2c/busses/i2c-rk3x.c763
-rw-r--r--drivers/i2c/busses/i2c-sun6i-p2wi.c345
-rw-r--r--drivers/iio/adc/at91_adc.c16
-rw-r--r--drivers/iio/adc/men_z188_adc.c4
-rw-r--r--drivers/iio/adc/twl4030-madc.c1
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-trigger.c3
-rw-r--r--drivers/iio/magnetometer/ak8975.c9
-rw-r--r--drivers/iio/pressure/mpl3115.c6
-rw-r--r--drivers/media/platform/Kconfig4
-rw-r--r--drivers/media/platform/omap3isp/Makefile2
-rw-r--r--drivers/media/platform/omap3isp/isp.c108
-rw-r--r--drivers/media/platform/omap3isp/isp.h8
-rw-r--r--drivers/media/platform/omap3isp/ispccdc.c107
-rw-r--r--drivers/media/platform/omap3isp/ispccdc.h16
-rw-r--r--drivers/media/platform/omap3isp/ispccp2.c4
-rw-r--r--drivers/media/platform/omap3isp/ispcsi2.c4
-rw-r--r--drivers/media/platform/omap3isp/isph3a_aewb.c2
-rw-r--r--drivers/media/platform/omap3isp/isph3a_af.c2
-rw-r--r--drivers/media/platform/omap3isp/isppreview.c8
-rw-r--r--drivers/media/platform/omap3isp/ispqueue.c1161
-rw-r--r--drivers/media/platform/omap3isp/ispqueue.h188
-rw-r--r--drivers/media/platform/omap3isp/ispresizer.c8
-rw-r--r--drivers/media/platform/omap3isp/ispstat.c197
-rw-r--r--drivers/media/platform/omap3isp/ispstat.h3
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.c325
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.h29
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c24
-rw-r--r--drivers/misc/vexpress-syscfg.c12
-rw-r--r--drivers/misc/vmw_balloon.c3
-rw-r--r--drivers/net/vxlan.c18
-rw-r--r--drivers/of/base.c7
-rw-r--r--drivers/of/platform.c4
-rw-r--r--drivers/regulator/as3722-regulator.c2
-rw-r--r--drivers/regulator/ltc3589.c2
-rw-r--r--drivers/regulator/palmas-regulator.c2
-rw-r--r--drivers/remoteproc/Kconfig2
-rw-r--r--drivers/rtc/rtc-puv3.c4
-rw-r--r--drivers/s390/block/dcssblk.c2
-rw-r--r--drivers/s390/char/Makefile1
-rw-r--r--drivers/s390/char/sclp_vt220.c2
-rw-r--r--drivers/s390/char/vmlogrdr.c2
-rw-r--r--drivers/s390/char/vmwatchdog.c338
-rw-r--r--drivers/s390/cio/airq.c13
-rw-r--r--drivers/s390/cio/ccwgroup.c28
-rw-r--r--drivers/s390/cio/cio.c2
-rw-r--r--drivers/s390/cio/device.c71
-rw-r--r--drivers/s390/cio/qdio_debug.c79
-rw-r--r--drivers/s390/cio/qdio_debug.h2
-rw-r--r--drivers/s390/cio/qdio_main.c16
-rw-r--r--drivers/s390/crypto/ap_bus.c4
-rw-r--r--drivers/s390/crypto/zcrypt_api.c2
-rw-r--r--drivers/scsi/hpsa.c266
-rw-r--r--drivers/scsi/hpsa.h42
-rw-r--r--drivers/scsi/hpsa_cmd.h49
-rw-r--r--drivers/scsi/lpfc/lpfc.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c23
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c5
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c258
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c60
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c297
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h6
-rw-r--r--drivers/scsi/mvsas/mv_94xx.c10
-rw-r--r--drivers/scsi/mvsas/mv_94xx.h58
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h16
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c15
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h16
-rw-r--r--drivers/spi/spi-pxa2xx-dma.c2
-rw-r--r--drivers/staging/android/timed_output.c1
-rw-r--r--drivers/staging/comedi/Kconfig1
-rw-r--r--drivers/staging/iio/Kconfig9
-rw-r--r--drivers/staging/iio/adc/mxs-lradc.c12
-rw-r--r--drivers/staging/iio/light/tsl2x7x_core.c8
-rw-r--r--drivers/staging/imx-drm/parallel-display.c7
-rw-r--r--drivers/staging/media/omap4iss/iss_video.c2
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c2
-rw-r--r--drivers/staging/rtl8723au/os_dep/os_intfs.c4
-rw-r--r--drivers/tty/n_tty.c19
-rw-r--r--drivers/tty/serial/8250/8250_core.c2
-rw-r--r--drivers/tty/serial/8250/8250_early.c5
-rw-r--r--drivers/tty/serial/altera_uart.c6
-rw-r--r--drivers/tty/serial/amba-pl010.c2
-rw-r--r--drivers/tty/serial/amba-pl011.c2
-rw-r--r--drivers/tty/serial/atmel_serial.c2
-rw-r--r--drivers/tty/serial/bcm63xx_uart.c2
-rw-r--r--drivers/tty/serial/bfin_uart.c2
-rw-r--r--drivers/tty/serial/dz.c2
-rw-r--r--drivers/tty/serial/earlycon.c2
-rw-r--r--drivers/tty/serial/efm32-uart.c2
-rw-r--r--drivers/tty/serial/fsl_lpuart.c2
-rw-r--r--drivers/tty/serial/ip22zilog.c2
-rw-r--r--drivers/tty/serial/m32r_sio.c2
-rw-r--r--drivers/tty/serial/max310x.c2
-rw-r--r--drivers/tty/serial/mcf.c6
-rw-r--r--drivers/tty/serial/mfd.c2
-rw-r--r--drivers/tty/serial/mpsc.c2
-rw-r--r--drivers/tty/serial/msm_serial.c2
-rw-r--r--drivers/tty/serial/mxs-auart.c2
-rw-r--r--drivers/tty/serial/netx-serial.c2
-rw-r--r--drivers/tty/serial/pmac_zilog.c2
-rw-r--r--drivers/tty/serial/pnx8xxx_uart.c2
-rw-r--r--drivers/tty/serial/pxa.c2
-rw-r--r--drivers/tty/serial/samsung.c2
-rw-r--r--drivers/tty/serial/sb1250-duart.c2
-rw-r--r--drivers/tty/serial/sccnxp.c2
-rw-r--r--drivers/tty/serial/serial_ks8695.c2
-rw-r--r--drivers/tty/serial/serial_txx9.c2
-rw-r--r--drivers/tty/serial/sirfsoc_uart.c2
-rw-r--r--drivers/tty/serial/st-asc.c2
-rw-r--r--drivers/tty/serial/sunsab.c2
-rw-r--r--drivers/tty/serial/sunsu.c2
-rw-r--r--drivers/tty/serial/sunzilog.c2
-rw-r--r--drivers/tty/serial/ucc_uart.c2
-rw-r--r--drivers/tty/serial/vr41xx_siu.c2
-rw-r--r--drivers/tty/serial/zs.c2
-rw-r--r--drivers/tty/vt/vt.c24
-rw-r--r--drivers/uio/uio.c2
-rw-r--r--drivers/usb/core/hub.c33
-rw-r--r--drivers/usb/core/hub.h2
-rw-r--r--drivers/usb/core/port.c89
-rw-r--r--drivers/usb/host/pci-quirks.c19
-rw-r--r--drivers/usb/host/xhci-hub.c2
-rw-r--r--drivers/usb/misc/usbtest.c16
-rw-r--r--drivers/video/console/dummycon.c1
-rw-r--r--drivers/video/console/vgacon.c1
-rw-r--r--drivers/video/fbdev/offb.c11
-rw-r--r--drivers/w1/masters/mxc_w1.c2
-rw-r--r--drivers/watchdog/Kconfig7
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/diag288_wdt.c316
-rw-r--r--drivers/xen/grant-table.c3
-rw-r--r--fs/aio.c70
-rw-r--r--fs/btrfs/ctree.h13
-rw-r--r--fs/btrfs/extent-tree.c143
-rw-r--r--fs/btrfs/extent_io.c39
-rw-r--r--fs/btrfs/extent_io.h4
-rw-r--r--fs/btrfs/extent_map.c2
-rw-r--r--fs/btrfs/extent_map.h1
-rw-r--r--fs/btrfs/free-space-cache.c192
-rw-r--r--fs/btrfs/inode.c41
-rw-r--r--fs/btrfs/ioctl.c147
-rw-r--r--fs/btrfs/locking.c80
-rw-r--r--fs/btrfs/qgroup.c4
-rw-r--r--fs/btrfs/reada.c9
-rw-r--r--fs/btrfs/scrub.c19
-rw-r--r--fs/btrfs/tests/btrfs-tests.c2
-rw-r--r--fs/btrfs/tests/qgroup-tests.c2
-rw-r--r--fs/btrfs/transaction.c12
-rw-r--r--fs/btrfs/volumes.c36
-rw-r--r--fs/btrfs/volumes.h3
-rw-r--r--fs/ceph/acl.c6
-rw-r--r--fs/ceph/addr.c17
-rw-r--r--fs/ceph/caps.c246
-rw-r--r--fs/ceph/export.c2
-rw-r--r--fs/ceph/inode.c247
-rw-r--r--fs/ceph/mds_client.c9
-rw-r--r--fs/ceph/mds_client.h1
-rw-r--r--fs/ceph/super.h13
-rw-r--r--fs/dlm/lowcomms.c5
-rw-r--r--fs/eventpoll.c4
-rw-r--r--fs/locks.c2
-rw-r--r--fs/nfsd/nfs4state.c78
-rw-r--r--fs/nfsd/nfs4xdr.c1
-rw-r--r--include/acpi/processor.h10
-rw-r--r--include/drm/i915_powerwell.h4
-rw-r--r--include/dt-bindings/clk/ti-dra7-atl.h40
-rw-r--r--include/linux/blk-mq.h2
-rw-r--r--include/linux/blkdev.h2
-rw-r--r--include/linux/ceph/ceph_fs.h2
-rw-r--r--include/linux/ceph/mon_client.h11
-rw-r--r--include/linux/clk/ti.h35
-rw-r--r--include/linux/elevator.h1
-rw-r--r--include/linux/fs.h6
-rw-r--r--include/linux/netdev_features.h1
-rw-r--r--include/linux/netdevice.h7
-rw-r--r--include/linux/nvme.h14
-rw-r--r--include/linux/platform_data/shtc1.h23
-rw-r--r--include/linux/profile.h1
-rw-r--r--include/linux/regulator/consumer.h5
-rw-r--r--include/linux/skbuff.h23
-rw-r--r--include/linux/suspend.h2
-rw-r--r--include/media/videobuf2-core.h1
-rw-r--r--include/net/udp.h4
-rw-r--r--include/sound/core.h2
-rw-r--r--include/sound/pcm.h2
-rw-r--r--include/uapi/linux/btrfs.h10
-rw-r--r--include/uapi/linux/nvme.h50
-rw-r--r--include/uapi/sound/compress_offload.h2
-rw-r--r--kernel/locking/rtmutex-debug.h5
-rw-r--r--kernel/locking/rtmutex.c243
-rw-r--r--kernel/locking/rtmutex.h5
-rw-r--r--kernel/power/hibernate.c37
-rw-r--r--kernel/power/main.c6
-rw-r--r--kernel/power/user.c3
-rw-r--r--kernel/sysctl.c4
-rw-r--r--mm/page_io.c2
-rw-r--r--net/ceph/ceph_common.c2
-rw-r--r--net/ceph/debugfs.c8
-rw-r--r--net/ceph/mon_client.c150
-rw-r--r--net/core/datagram.c36
-rw-r--r--net/core/skbuff.c3
-rw-r--r--net/ipv4/udp.c4
-rw-r--r--net/sctp/sysctl.c32
-rw-r--r--scripts/package/builddeb14
-rw-r--r--scripts/package/buildtar3
-rw-r--r--security/integrity/evm/Kconfig42
-rw-r--r--security/integrity/evm/evm.h5
-rw-r--r--security/integrity/evm/evm_crypto.c2
-rw-r--r--security/integrity/evm/evm_main.c29
-rw-r--r--security/integrity/ima/ima_appraise.c10
-rw-r--r--security/integrity/ima/ima_crypto.c32
-rw-r--r--security/integrity/ima/ima_main.c22
-rw-r--r--sound/core/control.c78
-rw-r--r--sound/core/init.c1
-rw-r--r--sound/core/seq/seq_clientmgr.c36
-rw-r--r--sound/core/seq/seq_fifo.c2
-rw-r--r--sound/core/timer.c4
-rw-r--r--sound/firewire/bebob/bebob.h2
-rw-r--r--sound/firewire/bebob/bebob_stream.c4
-rw-r--r--sound/firewire/fireworks/fireworks.c1
-rw-r--r--sound/firewire/fireworks/fireworks.h1
-rw-r--r--sound/firewire/fireworks/fireworks_hwdep.c2
-rw-r--r--sound/firewire/fireworks/fireworks_stream.c4
-rw-r--r--sound/firewire/fireworks/fireworks_transaction.c18
-rw-r--r--sound/pci/hda/hda_i915.c12
-rw-r--r--sound/pci/hda/hda_i915.h4
-rw-r--r--sound/pci/hda/hda_intel.c21
-rw-r--r--sound/pci/hda/patch_hdmi.c10
-rw-r--r--sound/pci/hda/patch_realtek.c91
-rw-r--r--sound/pci/intel8x0.c10
-rw-r--r--sound/soc/codecs/Kconfig12
-rw-r--r--sound/soc/codecs/Makefile4
-rw-r--r--sound/soc/codecs/sigmadsp-i2c.c35
-rw-r--r--sound/soc/codecs/sigmadsp-regmap.c36
-rw-r--r--sound/soc/codecs/sigmadsp.c65
-rw-r--r--sound/soc/codecs/sigmadsp.h20
-rw-r--r--sound/soc/fsl/fsl_dma.c4
-rw-r--r--sound/soc/fsl/fsl_spdif.c6
-rw-r--r--sound/soc/pxa/Kconfig11
-rw-r--r--sound/soc/sh/rcar/core.c2
-rw-r--r--sound/soc/soc-dapm.c29
-rw-r--r--tools/perf/ui/browsers/hists.c21
-rw-r--r--tools/perf/util/machine.c54
617 files changed, 11626 insertions, 7454 deletions
diff --git a/Documentation/DocBook/media/Makefile b/Documentation/DocBook/media/Makefile
index 1d27f0a1abd1..639e74857968 100644
--- a/Documentation/DocBook/media/Makefile
+++ b/Documentation/DocBook/media/Makefile
@@ -202,8 +202,8 @@ $(MEDIA_OBJ_DIR)/%: $(MEDIA_SRC_DIR)/%.b64
202 202
203$(MEDIA_OBJ_DIR)/v4l2.xml: $(OBJIMGFILES) 203$(MEDIA_OBJ_DIR)/v4l2.xml: $(OBJIMGFILES)
204 @$($(quiet)gen_xml) 204 @$($(quiet)gen_xml)
205 @(ln -sf $(MEDIA_SRC_DIR)/v4l/*xml $(MEDIA_OBJ_DIR)/) 205 @(ln -sf `cd $(MEDIA_SRC_DIR) && /bin/pwd`/v4l/*xml $(MEDIA_OBJ_DIR)/)
206 @(ln -sf $(MEDIA_SRC_DIR)/dvb/*xml $(MEDIA_OBJ_DIR)/) 206 @(ln -sf `cd $(MEDIA_SRC_DIR) && /bin/pwd`/dvb/*xml $(MEDIA_OBJ_DIR)/)
207 207
208$(MEDIA_OBJ_DIR)/videodev2.h.xml: $(srctree)/include/uapi/linux/videodev2.h $(MEDIA_OBJ_DIR)/v4l2.xml 208$(MEDIA_OBJ_DIR)/videodev2.h.xml: $(srctree)/include/uapi/linux/videodev2.h $(MEDIA_OBJ_DIR)/v4l2.xml
209 @$($(quiet)gen_xml) 209 @$($(quiet)gen_xml)
diff --git a/Documentation/devicetree/bindings/clock/sunxi.txt b/Documentation/devicetree/bindings/clock/sunxi.txt
index a5160d8cbb5f..b9ec668bfe62 100644
--- a/Documentation/devicetree/bindings/clock/sunxi.txt
+++ b/Documentation/devicetree/bindings/clock/sunxi.txt
@@ -20,12 +20,15 @@ Required properties:
20 "allwinner,sun5i-a13-ahb-gates-clk" - for the AHB gates on A13 20 "allwinner,sun5i-a13-ahb-gates-clk" - for the AHB gates on A13
21 "allwinner,sun5i-a10s-ahb-gates-clk" - for the AHB gates on A10s 21 "allwinner,sun5i-a10s-ahb-gates-clk" - for the AHB gates on A10s
22 "allwinner,sun7i-a20-ahb-gates-clk" - for the AHB gates on A20 22 "allwinner,sun7i-a20-ahb-gates-clk" - for the AHB gates on A20
23 "allwinner,sun6i-a31-ar100-clk" - for the AR100 on A31
23 "allwinner,sun6i-a31-ahb1-mux-clk" - for the AHB1 multiplexer on A31 24 "allwinner,sun6i-a31-ahb1-mux-clk" - for the AHB1 multiplexer on A31
24 "allwinner,sun6i-a31-ahb1-gates-clk" - for the AHB1 gates on A31 25 "allwinner,sun6i-a31-ahb1-gates-clk" - for the AHB1 gates on A31
25 "allwinner,sun4i-a10-apb0-clk" - for the APB0 clock 26 "allwinner,sun4i-a10-apb0-clk" - for the APB0 clock
27 "allwinner,sun6i-a31-apb0-clk" - for the APB0 clock on A31
26 "allwinner,sun4i-a10-apb0-gates-clk" - for the APB0 gates on A10 28 "allwinner,sun4i-a10-apb0-gates-clk" - for the APB0 gates on A10
27 "allwinner,sun5i-a13-apb0-gates-clk" - for the APB0 gates on A13 29 "allwinner,sun5i-a13-apb0-gates-clk" - for the APB0 gates on A13
28 "allwinner,sun5i-a10s-apb0-gates-clk" - for the APB0 gates on A10s 30 "allwinner,sun5i-a10s-apb0-gates-clk" - for the APB0 gates on A10s
31 "allwinner,sun6i-a31-apb0-gates-clk" - for the APB0 gates on A31
29 "allwinner,sun7i-a20-apb0-gates-clk" - for the APB0 gates on A20 32 "allwinner,sun7i-a20-apb0-gates-clk" - for the APB0 gates on A20
30 "allwinner,sun4i-a10-apb1-clk" - for the APB1 clock 33 "allwinner,sun4i-a10-apb1-clk" - for the APB1 clock
31 "allwinner,sun4i-a10-apb1-mux-clk" - for the APB1 clock muxing 34 "allwinner,sun4i-a10-apb1-mux-clk" - for the APB1 clock muxing
@@ -41,6 +44,7 @@ Required properties:
41 "allwinner,sun7i-a20-gmac-clk" - for the GMAC clock module on A20/A31 44 "allwinner,sun7i-a20-gmac-clk" - for the GMAC clock module on A20/A31
42 "allwinner,sun4i-a10-usb-clk" - for usb gates + resets on A10 / A20 45 "allwinner,sun4i-a10-usb-clk" - for usb gates + resets on A10 / A20
43 "allwinner,sun5i-a13-usb-clk" - for usb gates + resets on A13 46 "allwinner,sun5i-a13-usb-clk" - for usb gates + resets on A13
47 "allwinner,sun6i-a31-usb-clk" - for usb gates + resets on A31
44 48
45Required properties for all clocks: 49Required properties for all clocks:
46- reg : shall be the control register address for the clock. 50- reg : shall be the control register address for the clock.
diff --git a/Documentation/devicetree/bindings/clock/ti/apll.txt b/Documentation/devicetree/bindings/clock/ti/apll.txt
index 7faf5a68b3be..ade4dd4c30f0 100644
--- a/Documentation/devicetree/bindings/clock/ti/apll.txt
+++ b/Documentation/devicetree/bindings/clock/ti/apll.txt
@@ -14,18 +14,32 @@ a subtype of a DPLL [2], although a simplified one at that.
14[2] Documentation/devicetree/bindings/clock/ti/dpll.txt 14[2] Documentation/devicetree/bindings/clock/ti/dpll.txt
15 15
16Required properties: 16Required properties:
17- compatible : shall be "ti,dra7-apll-clock" 17- compatible : shall be "ti,dra7-apll-clock" or "ti,omap2-apll-clock"
18- #clock-cells : from common clock binding; shall be set to 0. 18- #clock-cells : from common clock binding; shall be set to 0.
19- clocks : link phandles of parent clocks (clk-ref and clk-bypass) 19- clocks : link phandles of parent clocks (clk-ref and clk-bypass)
20- reg : address and length of the register set for controlling the APLL. 20- reg : address and length of the register set for controlling the APLL.
21 It contains the information of registers in the following order: 21 It contains the information of registers in the following order:
22 "control" - contains the control register base address 22 "control" - contains the control register offset
23 "idlest" - contains the idlest register base address 23 "idlest" - contains the idlest register offset
24 "autoidle" - contains the autoidle register offset (OMAP2 only)
25- ti,clock-frequency : static clock frequency for the clock (OMAP2 only)
26- ti,idlest-shift : bit-shift for the idlest field (OMAP2 only)
27- ti,bit-shift : bit-shift for enable and autoidle fields (OMAP2 only)
24 28
25Examples: 29Examples:
26 apll_pcie_ck: apll_pcie_ck@4a008200 { 30 apll_pcie_ck: apll_pcie_ck {
27 #clock-cells = <0>; 31 #clock-cells = <0>;
28 clocks = <&apll_pcie_in_clk_mux>, <&dpll_pcie_ref_ck>; 32 clocks = <&apll_pcie_in_clk_mux>, <&dpll_pcie_ref_ck>;
29 reg = <0x4a00821c 0x4>, <0x4a008220 0x4>; 33 reg = <0x021c>, <0x0220>;
30 compatible = "ti,dra7-apll-clock"; 34 compatible = "ti,dra7-apll-clock";
31 }; 35 };
36
37 apll96_ck: apll96_ck {
38 #clock-cells = <0>;
39 compatible = "ti,omap2-apll-clock";
40 clocks = <&sys_ck>;
41 ti,bit-shift = <2>;
42 ti,idlest-shift = <8>;
43 ti,clock-frequency = <96000000>;
44 reg = <0x0500>, <0x0530>, <0x0520>;
45 };
diff --git a/Documentation/devicetree/bindings/clock/ti/dpll.txt b/Documentation/devicetree/bindings/clock/ti/dpll.txt
index 30bfdb7c9f18..df57009ff8e7 100644
--- a/Documentation/devicetree/bindings/clock/ti/dpll.txt
+++ b/Documentation/devicetree/bindings/clock/ti/dpll.txt
@@ -24,12 +24,14 @@ Required properties:
24 "ti,omap4-dpll-core-clock", 24 "ti,omap4-dpll-core-clock",
25 "ti,omap4-dpll-m4xen-clock", 25 "ti,omap4-dpll-m4xen-clock",
26 "ti,omap4-dpll-j-type-clock", 26 "ti,omap4-dpll-j-type-clock",
27 "ti,omap5-mpu-dpll-clock",
27 "ti,am3-dpll-no-gate-clock", 28 "ti,am3-dpll-no-gate-clock",
28 "ti,am3-dpll-j-type-clock", 29 "ti,am3-dpll-j-type-clock",
29 "ti,am3-dpll-no-gate-j-type-clock", 30 "ti,am3-dpll-no-gate-j-type-clock",
30 "ti,am3-dpll-clock", 31 "ti,am3-dpll-clock",
31 "ti,am3-dpll-core-clock", 32 "ti,am3-dpll-core-clock",
32 "ti,am3-dpll-x2-clock", 33 "ti,am3-dpll-x2-clock",
34 "ti,omap2-dpll-core-clock",
33 35
34- #clock-cells : from common clock binding; shall be set to 0. 36- #clock-cells : from common clock binding; shall be set to 0.
35- clocks : link phandles of parent clocks, first entry lists reference clock 37- clocks : link phandles of parent clocks, first entry lists reference clock
@@ -41,6 +43,7 @@ Required properties:
41 "mult-div1" - contains the multiplier / divider register base address 43 "mult-div1" - contains the multiplier / divider register base address
42 "autoidle" - contains the autoidle register base address (optional) 44 "autoidle" - contains the autoidle register base address (optional)
43 ti,am3-* dpll types do not have autoidle register 45 ti,am3-* dpll types do not have autoidle register
46 ti,omap2-* dpll type does not support idlest / autoidle registers
44 47
45Optional properties: 48Optional properties:
46- DPLL mode setting - defining any one or more of the following overrides 49- DPLL mode setting - defining any one or more of the following overrides
@@ -73,3 +76,10 @@ Examples:
73 clocks = <&sys_clkin_ck>, <&sys_clkin_ck>; 76 clocks = <&sys_clkin_ck>, <&sys_clkin_ck>;
74 reg = <0x90>, <0x5c>, <0x68>; 77 reg = <0x90>, <0x5c>, <0x68>;
75 }; 78 };
79
80 dpll_ck: dpll_ck {
81 #clock-cells = <0>;
82 compatible = "ti,omap2-dpll-core-clock";
83 clocks = <&sys_ck>, <&sys_ck>;
84 reg = <0x0500>, <0x0540>;
85 };
diff --git a/Documentation/devicetree/bindings/clock/ti/dra7-atl.txt b/Documentation/devicetree/bindings/clock/ti/dra7-atl.txt
new file mode 100644
index 000000000000..585e8c191f50
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/ti/dra7-atl.txt
@@ -0,0 +1,96 @@
1Device Tree Clock bindings for ATL (Audio Tracking Logic) of DRA7 SoC.
2
3The ATL IP is used to generate clock to be used to synchronize baseband and
4audio codec. A single ATL IP provides four ATL clock instances sharing the same
5functional clock but can be configured to provide different clocks.
6ATL can maintain a clock averages to some desired frequency based on the bws/aws
7signals - can compensate the drift between the two ws signal.
8
9In order to provide the support for ATL and it's output clocks (which can be used
10internally within the SoC or external components) two sets of bindings is needed:
11
12Clock tree binding:
13This binding uses the common clock binding[1].
14To be able to integrate the ATL clocks with DT clock tree.
15Provides ccf level representation of the ATL clocks to be used by drivers.
16Since the clock instances are part of a single IP this binding is used as a node
17for the DT clock tree, the IP driver is needed to handle the actual configuration
18of the IP.
19
20[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
21
22Required properties:
23- compatible : shall be "ti,dra7-atl-clock"
24- #clock-cells : from common clock binding; shall be set to 0.
25- clocks : link phandles to functional clock of ATL
26
27Binding for the IP driver:
28This binding is used to configure the IP driver which is going to handle the
29configuration of the IP for the ATL clock instances.
30
31Required properties:
32- compatible : shall be "ti,dra7-atl"
33- reg : base address for the ATL IP
34- ti,provided-clocks : List of phandles to the clocks associated with the ATL
35- clocks : link phandles to functional clock of ATL
36- clock-names : Shall be set to "fck"
37- ti,hwmods : Shall be set to "atl"
38
39Optional properties:
40Configuration of ATL instances:
41- atl{0/1/2/3} {
42 - bws : Baseband word select signal selection
43 - aws : Audio word select signal selection
44};
45
46For valid word select signals, see the dt-bindings/clk/ti-dra7-atl.h include
47file.
48
49Examples:
50/* clock bindings for atl provided clocks */
51atl_clkin0_ck: atl_clkin0_ck {
52 #clock-cells = <0>;
53 compatible = "ti,dra7-atl-clock";
54 clocks = <&atl_gfclk_mux>;
55};
56
57atl_clkin1_ck: atl_clkin1_ck {
58 #clock-cells = <0>;
59 compatible = "ti,dra7-atl-clock";
60 clocks = <&atl_gfclk_mux>;
61};
62
63atl_clkin2_ck: atl_clkin2_ck {
64 #clock-cells = <0>;
65 compatible = "ti,dra7-atl-clock";
66 clocks = <&atl_gfclk_mux>;
67};
68
69atl_clkin3_ck: atl_clkin3_ck {
70 #clock-cells = <0>;
71 compatible = "ti,dra7-atl-clock";
72 clocks = <&atl_gfclk_mux>;
73};
74
75/* binding for the IP */
76atl: atl@4843c000 {
77 compatible = "ti,dra7-atl";
78 reg = <0x4843c000 0x3ff>;
79 ti,hwmods = "atl";
80 ti,provided-clocks = <&atl_clkin0_ck>, <&atl_clkin1_ck>,
81 <&atl_clkin2_ck>, <&atl_clkin3_ck>;
82 clocks = <&atl_gfclk_mux>;
83 clock-names = "fck";
84 status = "disabled";
85};
86
87#include <dt-bindings/clk/ti-dra7-atl.h>
88
89&atl {
90 status = "okay";
91
92 atl2 {
93 bws = <DRA7_ATL_WS_MCASP2_FSX>;
94 aws = <DRA7_ATL_WS_MCASP3_FSX>;
95 };
96};
diff --git a/Documentation/devicetree/bindings/clock/ti/gate.txt b/Documentation/devicetree/bindings/clock/ti/gate.txt
index 125281aaa4ca..03f8fdee62a7 100644
--- a/Documentation/devicetree/bindings/clock/ti/gate.txt
+++ b/Documentation/devicetree/bindings/clock/ti/gate.txt
@@ -25,6 +25,11 @@ Required properties:
25 to map clockdomains properly 25 to map clockdomains properly
26 "ti,hsdiv-gate-clock" - gate clock with OMAP36xx specific hardware handling, 26 "ti,hsdiv-gate-clock" - gate clock with OMAP36xx specific hardware handling,
27 required for a hardware errata 27 required for a hardware errata
28 "ti,composite-gate-clock" - composite gate clock, to be part of composite
29 clock
30 "ti,composite-no-wait-gate-clock" - composite gate clock that does not wait
31 for clock to be active before returning
32 from clk_enable()
28- #clock-cells : from common clock binding; shall be set to 0 33- #clock-cells : from common clock binding; shall be set to 0
29- clocks : link to phandle of parent clock 34- clocks : link to phandle of parent clock
30- reg : offset for register controlling adjustable gate, not needed for 35- reg : offset for register controlling adjustable gate, not needed for
@@ -41,7 +46,7 @@ Examples:
41 #clock-cells = <0>; 46 #clock-cells = <0>;
42 compatible = "ti,gate-clock"; 47 compatible = "ti,gate-clock";
43 clocks = <&core_96m_fck>; 48 clocks = <&core_96m_fck>;
44 reg = <0x48004a00 0x4>; 49 reg = <0x0a00>;
45 ti,bit-shift = <25>; 50 ti,bit-shift = <25>;
46 }; 51 };
47 52
@@ -57,7 +62,7 @@ Examples:
57 #clock-cells = <0>; 62 #clock-cells = <0>;
58 compatible = "ti,dss-gate-clock"; 63 compatible = "ti,dss-gate-clock";
59 clocks = <&dpll4_m4x2_ck>; 64 clocks = <&dpll4_m4x2_ck>;
60 reg = <0x48004e00 0x4>; 65 reg = <0x0e00>;
61 ti,bit-shift = <0>; 66 ti,bit-shift = <0>;
62 }; 67 };
63 68
@@ -65,7 +70,7 @@ Examples:
65 #clock-cells = <0>; 70 #clock-cells = <0>;
66 compatible = "ti,am35xx-gate-clock"; 71 compatible = "ti,am35xx-gate-clock";
67 clocks = <&ipss_ick>; 72 clocks = <&ipss_ick>;
68 reg = <0x4800259c 0x4>; 73 reg = <0x059c>;
69 ti,bit-shift = <1>; 74 ti,bit-shift = <1>;
70 }; 75 };
71 76
@@ -80,6 +85,22 @@ Examples:
80 compatible = "ti,hsdiv-gate-clock"; 85 compatible = "ti,hsdiv-gate-clock";
81 clocks = <&dpll4_m2x2_mul_ck>; 86 clocks = <&dpll4_m2x2_mul_ck>;
82 ti,bit-shift = <0x1b>; 87 ti,bit-shift = <0x1b>;
83 reg = <0x48004d00 0x4>; 88 reg = <0x0d00>;
84 ti,set-bit-to-disable; 89 ti,set-bit-to-disable;
85 }; 90 };
91
92 vlynq_gate_fck: vlynq_gate_fck {
93 #clock-cells = <0>;
94 compatible = "ti,composite-gate-clock";
95 clocks = <&core_ck>;
96 ti,bit-shift = <3>;
97 reg = <0x0200>;
98 };
99
100 sys_clkout2_src_gate: sys_clkout2_src_gate {
101 #clock-cells = <0>;
102 compatible = "ti,composite-no-wait-gate-clock";
103 clocks = <&core_ck>;
104 ti,bit-shift = <15>;
105 reg = <0x0070>;
106 };
diff --git a/Documentation/devicetree/bindings/clock/ti/interface.txt b/Documentation/devicetree/bindings/clock/ti/interface.txt
index 064e8caccac3..3111a409fea6 100644
--- a/Documentation/devicetree/bindings/clock/ti/interface.txt
+++ b/Documentation/devicetree/bindings/clock/ti/interface.txt
@@ -21,6 +21,8 @@ Required properties:
21 "ti,omap3-dss-interface-clock" - interface clock with DSS specific HW handling 21 "ti,omap3-dss-interface-clock" - interface clock with DSS specific HW handling
22 "ti,omap3-ssi-interface-clock" - interface clock with SSI specific HW handling 22 "ti,omap3-ssi-interface-clock" - interface clock with SSI specific HW handling
23 "ti,am35xx-interface-clock" - interface clock with AM35xx specific HW handling 23 "ti,am35xx-interface-clock" - interface clock with AM35xx specific HW handling
24 "ti,omap2430-interface-clock" - interface clock with OMAP2430 specific HW
25 handling
24- #clock-cells : from common clock binding; shall be set to 0 26- #clock-cells : from common clock binding; shall be set to 0
25- clocks : link to phandle of parent clock 27- clocks : link to phandle of parent clock
26- reg : base address for the control register 28- reg : base address for the control register
diff --git a/Documentation/devicetree/bindings/i2c/i2c-rk3x.txt b/Documentation/devicetree/bindings/i2c/i2c-rk3x.txt
new file mode 100644
index 000000000000..dde6c22ce91a
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/i2c-rk3x.txt
@@ -0,0 +1,42 @@
1* Rockchip RK3xxx I2C controller
2
3This driver interfaces with the native I2C controller present in Rockchip
4RK3xxx SoCs.
5
6Required properties :
7
8 - reg : Offset and length of the register set for the device
9 - compatible : should be "rockchip,rk3066-i2c", "rockchip,rk3188-i2c" or
10 "rockchip,rk3288-i2c".
11 - interrupts : interrupt number
12 - clocks : parent clock
13
14Required on RK3066, RK3188 :
15
16 - rockchip,grf : the phandle of the syscon node for the general register
17 file (GRF)
18 - on those SoCs an alias with the correct I2C bus ID (bit offset in the GRF)
19 is also required.
20
21Optional properties :
22
23 - clock-frequency : SCL frequency to use (in Hz). If omitted, 100kHz is used.
24
25Example:
26
27aliases {
28 i2c0 = &i2c0;
29}
30
31i2c0: i2c@2002d000 {
32 compatible = "rockchip,rk3188-i2c";
33 reg = <0x2002d000 0x1000>;
34 interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
35 #address-cells = <1>;
36 #size-cells = <0>;
37
38 rockchip,grf = <&grf>;
39
40 clock-names = "i2c";
41 clocks = <&cru PCLK_I2C0>;
42};
diff --git a/Documentation/devicetree/bindings/i2c/i2c-sunxi-p2wi.txt b/Documentation/devicetree/bindings/i2c/i2c-sunxi-p2wi.txt
new file mode 100644
index 000000000000..6b765485af7d
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/i2c-sunxi-p2wi.txt
@@ -0,0 +1,41 @@
1
2* Allwinner P2WI (Push/Pull 2 Wire Interface) controller
3
4Required properties :
5
6 - reg : Offset and length of the register set for the device.
7 - compatible : Should one of the following:
8 - "allwinner,sun6i-a31-p2wi"
9 - interrupts : The interrupt line connected to the P2WI peripheral.
10 - clocks : The gate clk connected to the P2WI peripheral.
11 - resets : The reset line connected to the P2WI peripheral.
12
13Optional properties :
14
15 - clock-frequency : Desired P2WI bus clock frequency in Hz. If not set the
16default frequency is 100kHz
17
18A P2WI may contain one child node encoding a P2WI slave device.
19
20Slave device properties:
21 Required properties:
22 - reg : the I2C slave address used during the initialization
23 process to switch from I2C to P2WI mode
24
25Example:
26
27 p2wi@01f03400 {
28 compatible = "allwinner,sun6i-a31-p2wi";
29 reg = <0x01f03400 0x400>;
30 interrupts = <0 39 4>;
31 clocks = <&apb0_gates 3>;
32 clock-frequency = <6000000>;
33 resets = <&apb0_rst 3>;
34
35 axp221: pmic@68 {
36 compatible = "x-powers,axp221";
37 reg = <0x68>;
38
39 /* ... */
40 };
41 };
diff --git a/Documentation/hwmon/shtc1 b/Documentation/hwmon/shtc1
new file mode 100644
index 000000000000..6b1e05458f0f
--- /dev/null
+++ b/Documentation/hwmon/shtc1
@@ -0,0 +1,43 @@
1Kernel driver shtc1
2===================
3
4Supported chips:
5 * Sensirion SHTC1
6 Prefix: 'shtc1'
7 Addresses scanned: none
8 Datasheet: http://www.sensirion.com/file/datasheet_shtc1
9
10 * Sensirion SHTW1
11 Prefix: 'shtw1'
12 Addresses scanned: none
13 Datasheet: Not publicly available
14
15Author:
16 Johannes Winkelmann <johannes.winkelmann@sensirion.com>
17
18Description
19-----------
20
21This driver implements support for the Sensirion SHTC1 chip, a humidity and
22temperature sensor. Temperature is measured in degrees celsius, relative
23humidity is expressed as a percentage. Driver can be used as well for SHTW1
24chip, which has the same electrical interface.
25
26The device communicates with the I2C protocol. All sensors are set to I2C
27address 0x70. See Documentation/i2c/instantiating-devices for methods to
28instantiate the device.
29
30There are two options configurable by means of shtc1_platform_data:
311. blocking (pull the I2C clock line down while performing the measurement) or
32 non-blocking mode. Blocking mode will guarantee the fastest result but
33 the I2C bus will be busy during that time. By default, non-blocking mode
34 is used. Make sure clock-stretching works properly on your device if you
35 want to use blocking mode.
362. high or low accuracy. High accuracy is used by default and using it is
37 strongly recommended.
38
39sysfs-Interface
40---------------
41
42temp1_input - temperature input
43humidity1_input - humidity input
diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
index d567a7cc552b..c600e2f44a62 100644
--- a/Documentation/kbuild/makefiles.txt
+++ b/Documentation/kbuild/makefiles.txt
@@ -1171,7 +1171,7 @@ When kbuild executes, the following steps are followed (roughly):
1171 obvious reason. 1171 obvious reason.
1172 1172
1173 dtc 1173 dtc
1174 Create flattend device tree blob object suitable for linking 1174 Create flattened device tree blob object suitable for linking
1175 into vmlinux. Device tree blobs linked into vmlinux are placed 1175 into vmlinux. Device tree blobs linked into vmlinux are placed
1176 in an init section in the image. Platform code *must* copy the 1176 in an init section in the image. Platform code *must* copy the
1177 blob to non-init memory prior to calling unflatten_device_tree(). 1177 blob to non-init memory prior to calling unflatten_device_tree().
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 6eaa9cdb7094..884904975d0b 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1474,6 +1474,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
1474 js= [HW,JOY] Analog joystick 1474 js= [HW,JOY] Analog joystick
1475 See Documentation/input/joystick.txt. 1475 See Documentation/input/joystick.txt.
1476 1476
1477 kaslr/nokaslr [X86]
1478 Enable/disable kernel and module base offset ASLR
1479 (Address Space Layout Randomization) if built into
1480 the kernel. When CONFIG_HIBERNATION is selected,
1481 kASLR is disabled by default. When kASLR is enabled,
1482 hibernation will be disabled.
1483
1477 keepinitrd [HW,ARM] 1484 keepinitrd [HW,ARM]
1478 1485
1479 kernelcore=nn[KMG] [KNL,X86,IA-64,PPC] This parameter 1486 kernelcore=nn[KMG] [KNL,X86,IA-64,PPC] This parameter
@@ -2110,10 +2117,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
2110 noapic [SMP,APIC] Tells the kernel to not make use of any 2117 noapic [SMP,APIC] Tells the kernel to not make use of any
2111 IOAPICs that may be present in the system. 2118 IOAPICs that may be present in the system.
2112 2119
2113 nokaslr [X86]
2114 Disable kernel and module base offset ASLR (Address
2115 Space Layout Randomization) if built into the kernel.
2116
2117 noautogroup Disable scheduler automatic task group creation. 2120 noautogroup Disable scheduler automatic task group creation.
2118 2121
2119 nobats [PPC] Do not use BATs for mapping kernel lowmem 2122 nobats [PPC] Do not use BATs for mapping kernel lowmem
@@ -2184,6 +2187,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
2184 in certain environments such as networked servers or 2187 in certain environments such as networked servers or
2185 real-time systems. 2188 real-time systems.
2186 2189
2190 nohibernate [HIBERNATION] Disable hibernation and resume.
2191
2187 nohz= [KNL] Boottime enable/disable dynamic ticks 2192 nohz= [KNL] Boottime enable/disable dynamic ticks
2188 Valid arguments: on, off 2193 Valid arguments: on, off
2189 Default: on 2194 Default: on
@@ -2980,6 +2985,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
2980 noresume Don't check if there's a hibernation image 2985 noresume Don't check if there's a hibernation image
2981 present during boot. 2986 present during boot.
2982 nocompress Don't compress/decompress hibernation images. 2987 nocompress Don't compress/decompress hibernation images.
2988 no Disable hibernation and resume.
2983 2989
2984 retain_initrd [RAM] Keep initrd memory after extraction 2990 retain_initrd [RAM] Keep initrd memory after extraction
2985 2991
diff --git a/Documentation/thermal/nouveau_thermal b/Documentation/thermal/nouveau_thermal
index efceb7828f54..60bc29357ac3 100644
--- a/Documentation/thermal/nouveau_thermal
+++ b/Documentation/thermal/nouveau_thermal
@@ -4,7 +4,7 @@ Kernel driver nouveau
4Supported chips: 4Supported chips:
5* NV43+ 5* NV43+
6 6
7Authors: Martin Peres (mupuf) <martin.peres@labri.fr> 7Authors: Martin Peres (mupuf) <martin.peres@free.fr>
8 8
9Description 9Description
10--------- 10---------
@@ -68,8 +68,9 @@ Your fan can be driven in different modes:
68 68
69NOTE: Be sure to use the manual mode if you want to drive the fan speed manually 69NOTE: Be sure to use the manual mode if you want to drive the fan speed manually
70 70
71NOTE2: Not all fan management modes may be supported on all chipsets. We are 71NOTE2: When operating in manual mode outside the vbios-defined
72working on it. 72[PWM_min, PWM_max] range, the reported fan speed (RPM) may not be accurate
73depending on your hardware.
73 74
74Bug reports 75Bug reports
75--------- 76---------
diff --git a/Documentation/vDSO/parse_vdso.c b/Documentation/vDSO/parse_vdso.c
index 85870208edcf..1dbb4b87268f 100644
--- a/Documentation/vDSO/parse_vdso.c
+++ b/Documentation/vDSO/parse_vdso.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * parse_vdso.c: Linux reference vDSO parser 2 * parse_vdso.c: Linux reference vDSO parser
3 * Written by Andrew Lutomirski, 2011. 3 * Written by Andrew Lutomirski, 2011-2014.
4 * 4 *
5 * This code is meant to be linked in to various programs that run on Linux. 5 * This code is meant to be linked in to various programs that run on Linux.
6 * As such, it is available with as few restrictions as possible. This file 6 * As such, it is available with as few restrictions as possible. This file
@@ -11,13 +11,14 @@
11 * it starts a program. It works equally well in statically and dynamically 11 * it starts a program. It works equally well in statically and dynamically
12 * linked binaries. 12 * linked binaries.
13 * 13 *
14 * This code is tested on x86_64. In principle it should work on any 64-bit 14 * This code is tested on x86. In principle it should work on any
15 * architecture that has a vDSO. 15 * architecture that has a vDSO.
16 */ 16 */
17 17
18#include <stdbool.h> 18#include <stdbool.h>
19#include <stdint.h> 19#include <stdint.h>
20#include <string.h> 20#include <string.h>
21#include <limits.h>
21#include <elf.h> 22#include <elf.h>
22 23
23/* 24/*
@@ -45,11 +46,18 @@ extern void *vdso_sym(const char *version, const char *name);
45 46
46 47
47/* And here's the code. */ 48/* And here's the code. */
48 49#ifndef ELF_BITS
49#ifndef __x86_64__ 50# if ULONG_MAX > 0xffffffffUL
50# error Not yet ported to non-x86_64 architectures 51# define ELF_BITS 64
52# else
53# define ELF_BITS 32
54# endif
51#endif 55#endif
52 56
57#define ELF_BITS_XFORM2(bits, x) Elf##bits##_##x
58#define ELF_BITS_XFORM(bits, x) ELF_BITS_XFORM2(bits, x)
59#define ELF(x) ELF_BITS_XFORM(ELF_BITS, x)
60
53static struct vdso_info 61static struct vdso_info
54{ 62{
55 bool valid; 63 bool valid;
@@ -59,14 +67,14 @@ static struct vdso_info
59 uintptr_t load_offset; /* load_addr - recorded vaddr */ 67 uintptr_t load_offset; /* load_addr - recorded vaddr */
60 68
61 /* Symbol table */ 69 /* Symbol table */
62 Elf64_Sym *symtab; 70 ELF(Sym) *symtab;
63 const char *symstrings; 71 const char *symstrings;
64 Elf64_Word *bucket, *chain; 72 ELF(Word) *bucket, *chain;
65 Elf64_Word nbucket, nchain; 73 ELF(Word) nbucket, nchain;
66 74
67 /* Version table */ 75 /* Version table */
68 Elf64_Versym *versym; 76 ELF(Versym) *versym;
69 Elf64_Verdef *verdef; 77 ELF(Verdef) *verdef;
70} vdso_info; 78} vdso_info;
71 79
72/* Straight from the ELF specification. */ 80/* Straight from the ELF specification. */
@@ -92,9 +100,14 @@ void vdso_init_from_sysinfo_ehdr(uintptr_t base)
92 100
93 vdso_info.load_addr = base; 101 vdso_info.load_addr = base;
94 102
95 Elf64_Ehdr *hdr = (Elf64_Ehdr*)base; 103 ELF(Ehdr) *hdr = (ELF(Ehdr)*)base;
96 Elf64_Phdr *pt = (Elf64_Phdr*)(vdso_info.load_addr + hdr->e_phoff); 104 if (hdr->e_ident[EI_CLASS] !=
97 Elf64_Dyn *dyn = 0; 105 (ELF_BITS == 32 ? ELFCLASS32 : ELFCLASS64)) {
106 return; /* Wrong ELF class -- check ELF_BITS */
107 }
108
109 ELF(Phdr) *pt = (ELF(Phdr)*)(vdso_info.load_addr + hdr->e_phoff);
110 ELF(Dyn) *dyn = 0;
98 111
99 /* 112 /*
100 * We need two things from the segment table: the load offset 113 * We need two things from the segment table: the load offset
@@ -108,7 +121,7 @@ void vdso_init_from_sysinfo_ehdr(uintptr_t base)
108 + (uintptr_t)pt[i].p_offset 121 + (uintptr_t)pt[i].p_offset
109 - (uintptr_t)pt[i].p_vaddr; 122 - (uintptr_t)pt[i].p_vaddr;
110 } else if (pt[i].p_type == PT_DYNAMIC) { 123 } else if (pt[i].p_type == PT_DYNAMIC) {
111 dyn = (Elf64_Dyn*)(base + pt[i].p_offset); 124 dyn = (ELF(Dyn)*)(base + pt[i].p_offset);
112 } 125 }
113 } 126 }
114 127
@@ -118,7 +131,7 @@ void vdso_init_from_sysinfo_ehdr(uintptr_t base)
118 /* 131 /*
119 * Fish out the useful bits of the dynamic table. 132 * Fish out the useful bits of the dynamic table.
120 */ 133 */
121 Elf64_Word *hash = 0; 134 ELF(Word) *hash = 0;
122 vdso_info.symstrings = 0; 135 vdso_info.symstrings = 0;
123 vdso_info.symtab = 0; 136 vdso_info.symtab = 0;
124 vdso_info.versym = 0; 137 vdso_info.versym = 0;
@@ -131,22 +144,22 @@ void vdso_init_from_sysinfo_ehdr(uintptr_t base)
131 + vdso_info.load_offset); 144 + vdso_info.load_offset);
132 break; 145 break;
133 case DT_SYMTAB: 146 case DT_SYMTAB:
134 vdso_info.symtab = (Elf64_Sym *) 147 vdso_info.symtab = (ELF(Sym) *)
135 ((uintptr_t)dyn[i].d_un.d_ptr 148 ((uintptr_t)dyn[i].d_un.d_ptr
136 + vdso_info.load_offset); 149 + vdso_info.load_offset);
137 break; 150 break;
138 case DT_HASH: 151 case DT_HASH:
139 hash = (Elf64_Word *) 152 hash = (ELF(Word) *)
140 ((uintptr_t)dyn[i].d_un.d_ptr 153 ((uintptr_t)dyn[i].d_un.d_ptr
141 + vdso_info.load_offset); 154 + vdso_info.load_offset);
142 break; 155 break;
143 case DT_VERSYM: 156 case DT_VERSYM:
144 vdso_info.versym = (Elf64_Versym *) 157 vdso_info.versym = (ELF(Versym) *)
145 ((uintptr_t)dyn[i].d_un.d_ptr 158 ((uintptr_t)dyn[i].d_un.d_ptr
146 + vdso_info.load_offset); 159 + vdso_info.load_offset);
147 break; 160 break;
148 case DT_VERDEF: 161 case DT_VERDEF:
149 vdso_info.verdef = (Elf64_Verdef *) 162 vdso_info.verdef = (ELF(Verdef) *)
150 ((uintptr_t)dyn[i].d_un.d_ptr 163 ((uintptr_t)dyn[i].d_un.d_ptr
151 + vdso_info.load_offset); 164 + vdso_info.load_offset);
152 break; 165 break;
@@ -168,8 +181,8 @@ void vdso_init_from_sysinfo_ehdr(uintptr_t base)
168 vdso_info.valid = true; 181 vdso_info.valid = true;
169} 182}
170 183
171static bool vdso_match_version(Elf64_Versym ver, 184static bool vdso_match_version(ELF(Versym) ver,
172 const char *name, Elf64_Word hash) 185 const char *name, ELF(Word) hash)
173{ 186{
174 /* 187 /*
175 * This is a helper function to check if the version indexed by 188 * This is a helper function to check if the version indexed by
@@ -188,7 +201,7 @@ static bool vdso_match_version(Elf64_Versym ver,
188 201
189 /* First step: find the version definition */ 202 /* First step: find the version definition */
190 ver &= 0x7fff; /* Apparently bit 15 means "hidden" */ 203 ver &= 0x7fff; /* Apparently bit 15 means "hidden" */
191 Elf64_Verdef *def = vdso_info.verdef; 204 ELF(Verdef) *def = vdso_info.verdef;
192 while(true) { 205 while(true) {
193 if ((def->vd_flags & VER_FLG_BASE) == 0 206 if ((def->vd_flags & VER_FLG_BASE) == 0
194 && (def->vd_ndx & 0x7fff) == ver) 207 && (def->vd_ndx & 0x7fff) == ver)
@@ -197,11 +210,11 @@ static bool vdso_match_version(Elf64_Versym ver,
197 if (def->vd_next == 0) 210 if (def->vd_next == 0)
198 return false; /* No definition. */ 211 return false; /* No definition. */
199 212
200 def = (Elf64_Verdef *)((char *)def + def->vd_next); 213 def = (ELF(Verdef) *)((char *)def + def->vd_next);
201 } 214 }
202 215
203 /* Now figure out whether it matches. */ 216 /* Now figure out whether it matches. */
204 Elf64_Verdaux *aux = (Elf64_Verdaux*)((char *)def + def->vd_aux); 217 ELF(Verdaux) *aux = (ELF(Verdaux)*)((char *)def + def->vd_aux);
205 return def->vd_hash == hash 218 return def->vd_hash == hash
206 && !strcmp(name, vdso_info.symstrings + aux->vda_name); 219 && !strcmp(name, vdso_info.symstrings + aux->vda_name);
207} 220}
@@ -213,10 +226,10 @@ void *vdso_sym(const char *version, const char *name)
213 return 0; 226 return 0;
214 227
215 ver_hash = elf_hash(version); 228 ver_hash = elf_hash(version);
216 Elf64_Word chain = vdso_info.bucket[elf_hash(name) % vdso_info.nbucket]; 229 ELF(Word) chain = vdso_info.bucket[elf_hash(name) % vdso_info.nbucket];
217 230
218 for (; chain != STN_UNDEF; chain = vdso_info.chain[chain]) { 231 for (; chain != STN_UNDEF; chain = vdso_info.chain[chain]) {
219 Elf64_Sym *sym = &vdso_info.symtab[chain]; 232 ELF(Sym) *sym = &vdso_info.symtab[chain];
220 233
221 /* Check for a defined global or weak function w/ right name. */ 234 /* Check for a defined global or weak function w/ right name. */
222 if (ELF64_ST_TYPE(sym->st_info) != STT_FUNC) 235 if (ELF64_ST_TYPE(sym->st_info) != STT_FUNC)
@@ -243,7 +256,7 @@ void *vdso_sym(const char *version, const char *name)
243 256
244void vdso_init_from_auxv(void *auxv) 257void vdso_init_from_auxv(void *auxv)
245{ 258{
246 Elf64_auxv_t *elf_auxv = auxv; 259 ELF(auxv_t) *elf_auxv = auxv;
247 for (int i = 0; elf_auxv[i].a_type != AT_NULL; i++) 260 for (int i = 0; elf_auxv[i].a_type != AT_NULL; i++)
248 { 261 {
249 if (elf_auxv[i].a_type == AT_SYSINFO_EHDR) { 262 if (elf_auxv[i].a_type == AT_SYSINFO_EHDR) {
diff --git a/Documentation/vDSO/vdso_standalone_test_x86.c b/Documentation/vDSO/vdso_standalone_test_x86.c
new file mode 100644
index 000000000000..d46240265c50
--- /dev/null
+++ b/Documentation/vDSO/vdso_standalone_test_x86.c
@@ -0,0 +1,128 @@
1/*
2 * vdso_test.c: Sample code to test parse_vdso.c on x86
3 * Copyright (c) 2011-2014 Andy Lutomirski
4 * Subject to the GNU General Public License, version 2
5 *
6 * You can amuse yourself by compiling with:
7 * gcc -std=gnu99 -nostdlib
8 * -Os -fno-asynchronous-unwind-tables -flto -lgcc_s
9 * vdso_standalone_test_x86.c parse_vdso.c
10 * to generate a small binary. On x86_64, you can omit -lgcc_s
11 * if you want the binary to be completely standalone.
12 */
13
14#include <sys/syscall.h>
15#include <sys/time.h>
16#include <unistd.h>
17#include <stdint.h>
18
19extern void *vdso_sym(const char *version, const char *name);
20extern void vdso_init_from_sysinfo_ehdr(uintptr_t base);
21extern void vdso_init_from_auxv(void *auxv);
22
23/* We need a libc functions... */
24int strcmp(const char *a, const char *b)
25{
26 /* This implementation is buggy: it never returns -1. */
27 while (*a || *b) {
28 if (*a != *b)
29 return 1;
30 if (*a == 0 || *b == 0)
31 return 1;
32 a++;
33 b++;
34 }
35
36 return 0;
37}
38
39/* ...and two syscalls. This is x86-specific. */
40static inline long x86_syscall3(long nr, long a0, long a1, long a2)
41{
42 long ret;
43#ifdef __x86_64__
44 asm volatile ("syscall" : "=a" (ret) : "a" (nr),
45 "D" (a0), "S" (a1), "d" (a2) :
46 "cc", "memory", "rcx",
47 "r8", "r9", "r10", "r11" );
48#else
49 asm volatile ("int $0x80" : "=a" (ret) : "a" (nr),
50 "b" (a0), "c" (a1), "d" (a2) :
51 "cc", "memory" );
52#endif
53 return ret;
54}
55
56static inline long linux_write(int fd, const void *data, size_t len)
57{
58 return x86_syscall3(__NR_write, fd, (long)data, (long)len);
59}
60
61static inline void linux_exit(int code)
62{
63 x86_syscall3(__NR_exit, code, 0, 0);
64}
65
66void to_base10(char *lastdig, uint64_t n)
67{
68 while (n) {
69 *lastdig = (n % 10) + '0';
70 n /= 10;
71 lastdig--;
72 }
73}
74
75__attribute__((externally_visible)) void c_main(void **stack)
76{
77 /* Parse the stack */
78 long argc = (long)*stack;
79 stack += argc + 2;
80
81 /* Now we're pointing at the environment. Skip it. */
82 while(*stack)
83 stack++;
84 stack++;
85
86 /* Now we're pointing at auxv. Initialize the vDSO parser. */
87 vdso_init_from_auxv((void *)stack);
88
89 /* Find gettimeofday. */
90 typedef long (*gtod_t)(struct timeval *tv, struct timezone *tz);
91 gtod_t gtod = (gtod_t)vdso_sym("LINUX_2.6", "__vdso_gettimeofday");
92
93 if (!gtod)
94 linux_exit(1);
95
96 struct timeval tv;
97 long ret = gtod(&tv, 0);
98
99 if (ret == 0) {
100 char buf[] = "The time is .000000\n";
101 to_base10(buf + 31, tv.tv_sec);
102 to_base10(buf + 38, tv.tv_usec);
103 linux_write(1, buf, sizeof(buf) - 1);
104 } else {
105 linux_exit(ret);
106 }
107
108 linux_exit(0);
109}
110
111/*
112 * This is the real entry point. It passes the initial stack into
113 * the C entry point.
114 */
115asm (
116 ".text\n"
117 ".global _start\n"
118 ".type _start,@function\n"
119 "_start:\n\t"
120#ifdef __x86_64__
121 "mov %rsp,%rdi\n\t"
122 "jmp c_main"
123#else
124 "push %esp\n\t"
125 "call c_main\n\t"
126 "int $3"
127#endif
128 );
diff --git a/Documentation/vDSO/vdso_test.c b/Documentation/vDSO/vdso_test.c
index fff633432dff..8daeb7d7032c 100644
--- a/Documentation/vDSO/vdso_test.c
+++ b/Documentation/vDSO/vdso_test.c
@@ -1,111 +1,52 @@
1/* 1/*
2 * vdso_test.c: Sample code to test parse_vdso.c on x86_64 2 * vdso_test.c: Sample code to test parse_vdso.c
3 * Copyright (c) 2011 Andy Lutomirski 3 * Copyright (c) 2014 Andy Lutomirski
4 * Subject to the GNU General Public License, version 2 4 * Subject to the GNU General Public License, version 2
5 * 5 *
6 * You can amuse yourself by compiling with: 6 * Compile with:
7 * gcc -std=gnu99 -nostdlib 7 * gcc -std=gnu99 vdso_test.c parse_vdso.c
8 * -Os -fno-asynchronous-unwind-tables -flto 8 *
9 * vdso_test.c parse_vdso.c -o vdso_test 9 * Tested on x86, 32-bit and 64-bit. It may work on other architectures, too.
10 * to generate a small binary with no dependencies at all.
11 */ 10 */
12 11
13#include <sys/syscall.h>
14#include <sys/time.h>
15#include <unistd.h>
16#include <stdint.h> 12#include <stdint.h>
13#include <elf.h>
14#include <stdio.h>
15#include <sys/auxv.h>
16#include <sys/time.h>
17 17
18extern void *vdso_sym(const char *version, const char *name); 18extern void *vdso_sym(const char *version, const char *name);
19extern void vdso_init_from_sysinfo_ehdr(uintptr_t base); 19extern void vdso_init_from_sysinfo_ehdr(uintptr_t base);
20extern void vdso_init_from_auxv(void *auxv); 20extern void vdso_init_from_auxv(void *auxv);
21 21
22/* We need a libc functions... */ 22int main(int argc, char **argv)
23int strcmp(const char *a, const char *b)
24{ 23{
25 /* This implementation is buggy: it never returns -1. */ 24 unsigned long sysinfo_ehdr = getauxval(AT_SYSINFO_EHDR);
26 while (*a || *b) { 25 if (!sysinfo_ehdr) {
27 if (*a != *b) 26 printf("AT_SYSINFO_EHDR is not present!\n");
28 return 1; 27 return 0;
29 if (*a == 0 || *b == 0)
30 return 1;
31 a++;
32 b++;
33 } 28 }
34 29
35 return 0; 30 vdso_init_from_sysinfo_ehdr(getauxval(AT_SYSINFO_EHDR));
36}
37
38/* ...and two syscalls. This is x86_64-specific. */
39static inline long linux_write(int fd, const void *data, size_t len)
40{
41
42 long ret;
43 asm volatile ("syscall" : "=a" (ret) : "a" (__NR_write),
44 "D" (fd), "S" (data), "d" (len) :
45 "cc", "memory", "rcx",
46 "r8", "r9", "r10", "r11" );
47 return ret;
48}
49
50static inline void linux_exit(int code)
51{
52 asm volatile ("syscall" : : "a" (__NR_exit), "D" (code));
53}
54
55void to_base10(char *lastdig, uint64_t n)
56{
57 while (n) {
58 *lastdig = (n % 10) + '0';
59 n /= 10;
60 lastdig--;
61 }
62}
63
64__attribute__((externally_visible)) void c_main(void **stack)
65{
66 /* Parse the stack */
67 long argc = (long)*stack;
68 stack += argc + 2;
69
70 /* Now we're pointing at the environment. Skip it. */
71 while(*stack)
72 stack++;
73 stack++;
74
75 /* Now we're pointing at auxv. Initialize the vDSO parser. */
76 vdso_init_from_auxv((void *)stack);
77 31
78 /* Find gettimeofday. */ 32 /* Find gettimeofday. */
79 typedef long (*gtod_t)(struct timeval *tv, struct timezone *tz); 33 typedef long (*gtod_t)(struct timeval *tv, struct timezone *tz);
80 gtod_t gtod = (gtod_t)vdso_sym("LINUX_2.6", "__vdso_gettimeofday"); 34 gtod_t gtod = (gtod_t)vdso_sym("LINUX_2.6", "__vdso_gettimeofday");
81 35
82 if (!gtod) 36 if (!gtod) {
83 linux_exit(1); 37 printf("Could not find __vdso_gettimeofday\n");
38 return 1;
39 }
84 40
85 struct timeval tv; 41 struct timeval tv;
86 long ret = gtod(&tv, 0); 42 long ret = gtod(&tv, 0);
87 43
88 if (ret == 0) { 44 if (ret == 0) {
89 char buf[] = "The time is .000000\n"; 45 printf("The time is %lld.%06lld\n",
90 to_base10(buf + 31, tv.tv_sec); 46 (long long)tv.tv_sec, (long long)tv.tv_usec);
91 to_base10(buf + 38, tv.tv_usec);
92 linux_write(1, buf, sizeof(buf) - 1);
93 } else { 47 } else {
94 linux_exit(ret); 48 printf("__vdso_gettimeofday failed\n");
95 } 49 }
96 50
97 linux_exit(0); 51 return 0;
98} 52}
99
100/*
101 * This is the real entry point. It passes the initial stack into
102 * the C entry point.
103 */
104asm (
105 ".text\n"
106 ".global _start\n"
107 ".type _start,@function\n"
108 "_start:\n\t"
109 "mov %rsp,%rdi\n\t"
110 "jmp c_main"
111 );
diff --git a/MAINTAINERS b/MAINTAINERS
index 055f95238d88..3f2e171047b9 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2594,7 +2594,7 @@ S: Supported
2594F: drivers/infiniband/hw/cxgb3/ 2594F: drivers/infiniband/hw/cxgb3/
2595 2595
2596CXGB4 ETHERNET DRIVER (CXGB4) 2596CXGB4 ETHERNET DRIVER (CXGB4)
2597M: Dimitris Michailidis <dm@chelsio.com> 2597M: Hariprasad S <hariprasad@chelsio.com>
2598L: netdev@vger.kernel.org 2598L: netdev@vger.kernel.org
2599W: http://www.chelsio.com 2599W: http://www.chelsio.com
2600S: Supported 2600S: Supported
@@ -6960,7 +6960,7 @@ PKUNITY SOC DRIVERS
6960M: Guan Xuetao <gxt@mprc.pku.edu.cn> 6960M: Guan Xuetao <gxt@mprc.pku.edu.cn>
6961W: http://mprc.pku.edu.cn/~guanxuetao/linux 6961W: http://mprc.pku.edu.cn/~guanxuetao/linux
6962S: Maintained 6962S: Maintained
6963T: git git://git.kernel.org/pub/scm/linux/kernel/git/epip/linux-2.6-unicore32.git 6963T: git git://github.com/gxt/linux.git
6964F: drivers/input/serio/i8042-unicore32io.h 6964F: drivers/input/serio/i8042-unicore32io.h
6965F: drivers/i2c/busses/i2c-puv3.c 6965F: drivers/i2c/busses/i2c-puv3.c
6966F: drivers/video/fb-puv3.c 6966F: drivers/video/fb-puv3.c
@@ -7948,6 +7948,7 @@ F: drivers/mmc/host/sdhci-spear.c
7948 7948
7949SECURITY SUBSYSTEM 7949SECURITY SUBSYSTEM
7950M: James Morris <james.l.morris@oracle.com> 7950M: James Morris <james.l.morris@oracle.com>
7951M: Serge E. Hallyn <serge@hallyn.com>
7951L: linux-security-module@vger.kernel.org (suggested Cc:) 7952L: linux-security-module@vger.kernel.org (suggested Cc:)
7952T: git git://git.kernel.org/pub/scm/linux/kernel/git/jmorris/linux-security.git 7953T: git git://git.kernel.org/pub/scm/linux/kernel/git/jmorris/linux-security.git
7953W: http://kernsec.org/ 7954W: http://kernsec.org/
@@ -9276,7 +9277,7 @@ UNICORE32 ARCHITECTURE:
9276M: Guan Xuetao <gxt@mprc.pku.edu.cn> 9277M: Guan Xuetao <gxt@mprc.pku.edu.cn>
9277W: http://mprc.pku.edu.cn/~guanxuetao/linux 9278W: http://mprc.pku.edu.cn/~guanxuetao/linux
9278S: Maintained 9279S: Maintained
9279T: git git://git.kernel.org/pub/scm/linux/kernel/git/epip/linux-2.6-unicore32.git 9280T: git git://github.com/gxt/linux.git
9280F: arch/unicore32/ 9281F: arch/unicore32/
9281 9282
9282UNIFDEF 9283UNIFDEF
@@ -9743,6 +9744,14 @@ L: virtualization@lists.linux-foundation.org
9743S: Supported 9744S: Supported
9744F: arch/x86/kernel/cpu/vmware.c 9745F: arch/x86/kernel/cpu/vmware.c
9745 9746
9747VMWARE BALLOON DRIVER
9748M: Xavier Deguillard <xdeguillard@vmware.com>
9749M: Philip Moltmann <moltmann@vmware.com>
9750M: "VMware, Inc." <pv-drivers@vmware.com>
9751L: linux-kernel@vger.kernel.org
9752S: Maintained
9753F: drivers/misc/vmw_balloon.c
9754
9746VMWARE VMXNET3 ETHERNET DRIVER 9755VMWARE VMXNET3 ETHERNET DRIVER
9747M: Shreyas Bhatewara <sbhatewara@vmware.com> 9756M: Shreyas Bhatewara <sbhatewara@vmware.com>
9748M: "VMware, Inc." <pv-drivers@vmware.com> 9757M: "VMware, Inc." <pv-drivers@vmware.com>
diff --git a/Makefile b/Makefile
index 7680d7c70730..b11e2d504a00 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 15 2PATCHLEVEL = 16
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = 4EXTRAVERSION = -rc2
5NAME = Shuffling Zombie Juror 5NAME = Shuffling Zombie Juror
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 87b63fde06d7..245058b3b0ef 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -175,13 +175,6 @@ config ARCH_HAS_ILOG2_U32
175config ARCH_HAS_ILOG2_U64 175config ARCH_HAS_ILOG2_U64
176 bool 176 bool
177 177
178config ARCH_HAS_CPUFREQ
179 bool
180 help
181 Internal node to signify that the ARCH has CPUFREQ support
182 and that the relevant menu configurations are displayed for
183 it.
184
185config ARCH_HAS_BANDGAP 178config ARCH_HAS_BANDGAP
186 bool 179 bool
187 180
@@ -318,7 +311,6 @@ config ARCH_MULTIPLATFORM
318 311
319config ARCH_INTEGRATOR 312config ARCH_INTEGRATOR
320 bool "ARM Ltd. Integrator family" 313 bool "ARM Ltd. Integrator family"
321 select ARCH_HAS_CPUFREQ
322 select ARM_AMBA 314 select ARM_AMBA
323 select ARM_PATCH_PHYS_VIRT 315 select ARM_PATCH_PHYS_VIRT
324 select AUTO_ZRELADDR 316 select AUTO_ZRELADDR
@@ -538,7 +530,6 @@ config ARCH_DOVE
538 530
539config ARCH_KIRKWOOD 531config ARCH_KIRKWOOD
540 bool "Marvell Kirkwood" 532 bool "Marvell Kirkwood"
541 select ARCH_HAS_CPUFREQ
542 select ARCH_REQUIRE_GPIOLIB 533 select ARCH_REQUIRE_GPIOLIB
543 select CPU_FEROCEON 534 select CPU_FEROCEON
544 select GENERIC_CLOCKEVENTS 535 select GENERIC_CLOCKEVENTS
@@ -637,7 +628,6 @@ config ARCH_LPC32XX
637config ARCH_PXA 628config ARCH_PXA
638 bool "PXA2xx/PXA3xx-based" 629 bool "PXA2xx/PXA3xx-based"
639 depends on MMU 630 depends on MMU
640 select ARCH_HAS_CPUFREQ
641 select ARCH_MTD_XIP 631 select ARCH_MTD_XIP
642 select ARCH_REQUIRE_GPIOLIB 632 select ARCH_REQUIRE_GPIOLIB
643 select ARM_CPU_SUSPEND if PM 633 select ARM_CPU_SUSPEND if PM
@@ -707,7 +697,6 @@ config ARCH_RPC
707 697
708config ARCH_SA1100 698config ARCH_SA1100
709 bool "SA1100-based" 699 bool "SA1100-based"
710 select ARCH_HAS_CPUFREQ
711 select ARCH_MTD_XIP 700 select ARCH_MTD_XIP
712 select ARCH_REQUIRE_GPIOLIB 701 select ARCH_REQUIRE_GPIOLIB
713 select ARCH_SPARSEMEM_ENABLE 702 select ARCH_SPARSEMEM_ENABLE
@@ -725,7 +714,6 @@ config ARCH_SA1100
725 714
726config ARCH_S3C24XX 715config ARCH_S3C24XX
727 bool "Samsung S3C24XX SoCs" 716 bool "Samsung S3C24XX SoCs"
728 select ARCH_HAS_CPUFREQ
729 select ARCH_REQUIRE_GPIOLIB 717 select ARCH_REQUIRE_GPIOLIB
730 select ATAGS 718 select ATAGS
731 select CLKDEV_LOOKUP 719 select CLKDEV_LOOKUP
@@ -746,7 +734,6 @@ config ARCH_S3C24XX
746 734
747config ARCH_S3C64XX 735config ARCH_S3C64XX
748 bool "Samsung S3C64XX" 736 bool "Samsung S3C64XX"
749 select ARCH_HAS_CPUFREQ
750 select ARCH_REQUIRE_GPIOLIB 737 select ARCH_REQUIRE_GPIOLIB
751 select ARM_AMBA 738 select ARM_AMBA
752 select ARM_VIC 739 select ARM_VIC
@@ -809,7 +796,6 @@ config ARCH_S5PC100
809 796
810config ARCH_S5PV210 797config ARCH_S5PV210
811 bool "Samsung S5PV210/S5PC110" 798 bool "Samsung S5PV210/S5PC110"
812 select ARCH_HAS_CPUFREQ
813 select ARCH_HAS_HOLES_MEMORYMODEL 799 select ARCH_HAS_HOLES_MEMORYMODEL
814 select ARCH_SPARSEMEM_ENABLE 800 select ARCH_SPARSEMEM_ENABLE
815 select ATAGS 801 select ATAGS
@@ -845,7 +831,6 @@ config ARCH_DAVINCI
845config ARCH_OMAP1 831config ARCH_OMAP1
846 bool "TI OMAP1" 832 bool "TI OMAP1"
847 depends on MMU 833 depends on MMU
848 select ARCH_HAS_CPUFREQ
849 select ARCH_HAS_HOLES_MEMORYMODEL 834 select ARCH_HAS_HOLES_MEMORYMODEL
850 select ARCH_OMAP 835 select ARCH_OMAP
851 select ARCH_REQUIRE_GPIOLIB 836 select ARCH_REQUIRE_GPIOLIB
@@ -1009,8 +994,6 @@ source "arch/arm/mach-rockchip/Kconfig"
1009 994
1010source "arch/arm/mach-sa1100/Kconfig" 995source "arch/arm/mach-sa1100/Kconfig"
1011 996
1012source "arch/arm/plat-samsung/Kconfig"
1013
1014source "arch/arm/mach-socfpga/Kconfig" 997source "arch/arm/mach-socfpga/Kconfig"
1015 998
1016source "arch/arm/mach-spear/Kconfig" 999source "arch/arm/mach-spear/Kconfig"
@@ -1028,6 +1011,7 @@ source "arch/arm/mach-s5pc100/Kconfig"
1028source "arch/arm/mach-s5pv210/Kconfig" 1011source "arch/arm/mach-s5pv210/Kconfig"
1029 1012
1030source "arch/arm/mach-exynos/Kconfig" 1013source "arch/arm/mach-exynos/Kconfig"
1014source "arch/arm/plat-samsung/Kconfig"
1031 1015
1032source "arch/arm/mach-shmobile/Kconfig" 1016source "arch/arm/mach-shmobile/Kconfig"
1033 1017
@@ -2109,9 +2093,7 @@ endmenu
2109 2093
2110menu "CPU Power Management" 2094menu "CPU Power Management"
2111 2095
2112if ARCH_HAS_CPUFREQ
2113source "drivers/cpufreq/Kconfig" 2096source "drivers/cpufreq/Kconfig"
2114endif
2115 2097
2116source "drivers/cpuidle/Kconfig" 2098source "drivers/cpuidle/Kconfig"
2117 2099
diff --git a/arch/arm/boot/dts/armada-375-db.dts b/arch/arm/boot/dts/armada-375-db.dts
index 772fec2d26ce..1e2919d43d78 100644
--- a/arch/arm/boot/dts/armada-375-db.dts
+++ b/arch/arm/boot/dts/armada-375-db.dts
@@ -91,6 +91,8 @@
91 marvell,nand-keep-config; 91 marvell,nand-keep-config;
92 marvell,nand-enable-arbiter; 92 marvell,nand-enable-arbiter;
93 nand-on-flash-bbt; 93 nand-on-flash-bbt;
94 nand-ecc-strength = <4>;
95 nand-ecc-step-size = <512>;
94 96
95 partition@0 { 97 partition@0 {
96 label = "U-Boot"; 98 label = "U-Boot";
diff --git a/arch/arm/boot/dts/armada-385-db.dts b/arch/arm/boot/dts/armada-385-db.dts
index ff9637dd8d0f..5bae4731828b 100644
--- a/arch/arm/boot/dts/armada-385-db.dts
+++ b/arch/arm/boot/dts/armada-385-db.dts
@@ -98,6 +98,8 @@
98 marvell,nand-keep-config; 98 marvell,nand-keep-config;
99 marvell,nand-enable-arbiter; 99 marvell,nand-enable-arbiter;
100 nand-on-flash-bbt; 100 nand-on-flash-bbt;
101 nand-ecc-strength = <4>;
102 nand-ecc-step-size = <512>;
101 103
102 partition@0 { 104 partition@0 {
103 label = "U-Boot"; 105 label = "U-Boot";
diff --git a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
index e5c6a0492ca0..4e5a59ee1501 100644
--- a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
+++ b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
@@ -25,7 +25,7 @@
25 25
26 memory { 26 memory {
27 device_type = "memory"; 27 device_type = "memory";
28 reg = <0 0x00000000 0 0xC0000000>; /* 3 GB */ 28 reg = <0 0x00000000 0 0x40000000>; /* 1 GB soldered on */
29 }; 29 };
30 30
31 soc { 31 soc {
diff --git a/arch/arm/boot/dts/dra7xx-clocks.dtsi b/arch/arm/boot/dts/dra7xx-clocks.dtsi
index c7676871d9c0..b03cfe49d22b 100644
--- a/arch/arm/boot/dts/dra7xx-clocks.dtsi
+++ b/arch/arm/boot/dts/dra7xx-clocks.dtsi
@@ -26,7 +26,7 @@
26 clock-frequency = <0>; 26 clock-frequency = <0>;
27 }; 27 };
28 28
29 atlclkin3_ck: atlclkin3_ck { 29 atl_clkin3_ck: atl_clkin3_ck {
30 #clock-cells = <0>; 30 #clock-cells = <0>;
31 compatible = "fixed-clock"; 31 compatible = "fixed-clock";
32 clock-frequency = <0>; 32 clock-frequency = <0>;
@@ -277,7 +277,7 @@
277 277
278 dpll_mpu_ck: dpll_mpu_ck { 278 dpll_mpu_ck: dpll_mpu_ck {
279 #clock-cells = <0>; 279 #clock-cells = <0>;
280 compatible = "ti,omap4-dpll-clock"; 280 compatible = "ti,omap5-mpu-dpll-clock";
281 clocks = <&sys_clkin1>, <&mpu_dpll_hs_clk_div>; 281 clocks = <&sys_clkin1>, <&mpu_dpll_hs_clk_div>;
282 reg = <0x0160>, <0x0164>, <0x016c>, <0x0168>; 282 reg = <0x0160>, <0x0164>, <0x016c>, <0x0168>;
283 }; 283 };
@@ -730,7 +730,7 @@
730 mcasp1_ahclkr_mux: mcasp1_ahclkr_mux { 730 mcasp1_ahclkr_mux: mcasp1_ahclkr_mux {
731 #clock-cells = <0>; 731 #clock-cells = <0>;
732 compatible = "ti,mux-clock"; 732 compatible = "ti,mux-clock";
733 clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atlclkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>; 733 clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atl_clkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>;
734 ti,bit-shift = <28>; 734 ti,bit-shift = <28>;
735 reg = <0x0550>; 735 reg = <0x0550>;
736 }; 736 };
@@ -738,7 +738,7 @@
738 mcasp1_ahclkx_mux: mcasp1_ahclkx_mux { 738 mcasp1_ahclkx_mux: mcasp1_ahclkx_mux {
739 #clock-cells = <0>; 739 #clock-cells = <0>;
740 compatible = "ti,mux-clock"; 740 compatible = "ti,mux-clock";
741 clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atlclkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>; 741 clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atl_clkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>;
742 ti,bit-shift = <24>; 742 ti,bit-shift = <24>;
743 reg = <0x0550>; 743 reg = <0x0550>;
744 }; 744 };
@@ -1639,7 +1639,7 @@
1639 mcasp2_ahclkr_mux: mcasp2_ahclkr_mux { 1639 mcasp2_ahclkr_mux: mcasp2_ahclkr_mux {
1640 #clock-cells = <0>; 1640 #clock-cells = <0>;
1641 compatible = "ti,mux-clock"; 1641 compatible = "ti,mux-clock";
1642 clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atlclkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>; 1642 clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atl_clkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>;
1643 ti,bit-shift = <28>; 1643 ti,bit-shift = <28>;
1644 reg = <0x1860>; 1644 reg = <0x1860>;
1645 }; 1645 };
@@ -1647,7 +1647,7 @@
1647 mcasp2_ahclkx_mux: mcasp2_ahclkx_mux { 1647 mcasp2_ahclkx_mux: mcasp2_ahclkx_mux {
1648 #clock-cells = <0>; 1648 #clock-cells = <0>;
1649 compatible = "ti,mux-clock"; 1649 compatible = "ti,mux-clock";
1650 clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atlclkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>; 1650 clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atl_clkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>;
1651 ti,bit-shift = <24>; 1651 ti,bit-shift = <24>;
1652 reg = <0x1860>; 1652 reg = <0x1860>;
1653 }; 1653 };
@@ -1663,7 +1663,7 @@
1663 mcasp3_ahclkx_mux: mcasp3_ahclkx_mux { 1663 mcasp3_ahclkx_mux: mcasp3_ahclkx_mux {
1664 #clock-cells = <0>; 1664 #clock-cells = <0>;
1665 compatible = "ti,mux-clock"; 1665 compatible = "ti,mux-clock";
1666 clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atlclkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>; 1666 clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atl_clkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>;
1667 ti,bit-shift = <24>; 1667 ti,bit-shift = <24>;
1668 reg = <0x1868>; 1668 reg = <0x1868>;
1669 }; 1669 };
@@ -1679,7 +1679,7 @@
1679 mcasp4_ahclkx_mux: mcasp4_ahclkx_mux { 1679 mcasp4_ahclkx_mux: mcasp4_ahclkx_mux {
1680 #clock-cells = <0>; 1680 #clock-cells = <0>;
1681 compatible = "ti,mux-clock"; 1681 compatible = "ti,mux-clock";
1682 clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atlclkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>; 1682 clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atl_clkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>;
1683 ti,bit-shift = <24>; 1683 ti,bit-shift = <24>;
1684 reg = <0x1898>; 1684 reg = <0x1898>;
1685 }; 1685 };
@@ -1695,7 +1695,7 @@
1695 mcasp5_ahclkx_mux: mcasp5_ahclkx_mux { 1695 mcasp5_ahclkx_mux: mcasp5_ahclkx_mux {
1696 #clock-cells = <0>; 1696 #clock-cells = <0>;
1697 compatible = "ti,mux-clock"; 1697 compatible = "ti,mux-clock";
1698 clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atlclkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>; 1698 clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atl_clkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>;
1699 ti,bit-shift = <24>; 1699 ti,bit-shift = <24>;
1700 reg = <0x1878>; 1700 reg = <0x1878>;
1701 }; 1701 };
@@ -1711,7 +1711,7 @@
1711 mcasp6_ahclkx_mux: mcasp6_ahclkx_mux { 1711 mcasp6_ahclkx_mux: mcasp6_ahclkx_mux {
1712 #clock-cells = <0>; 1712 #clock-cells = <0>;
1713 compatible = "ti,mux-clock"; 1713 compatible = "ti,mux-clock";
1714 clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atlclkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>; 1714 clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atl_clkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>;
1715 ti,bit-shift = <24>; 1715 ti,bit-shift = <24>;
1716 reg = <0x1904>; 1716 reg = <0x1904>;
1717 }; 1717 };
@@ -1727,7 +1727,7 @@
1727 mcasp7_ahclkx_mux: mcasp7_ahclkx_mux { 1727 mcasp7_ahclkx_mux: mcasp7_ahclkx_mux {
1728 #clock-cells = <0>; 1728 #clock-cells = <0>;
1729 compatible = "ti,mux-clock"; 1729 compatible = "ti,mux-clock";
1730 clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atlclkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>; 1730 clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atl_clkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>;
1731 ti,bit-shift = <24>; 1731 ti,bit-shift = <24>;
1732 reg = <0x1908>; 1732 reg = <0x1908>;
1733 }; 1733 };
@@ -1743,7 +1743,7 @@
1743 mcasp8_ahclk_mux: mcasp8_ahclk_mux { 1743 mcasp8_ahclk_mux: mcasp8_ahclk_mux {
1744 #clock-cells = <0>; 1744 #clock-cells = <0>;
1745 compatible = "ti,mux-clock"; 1745 compatible = "ti,mux-clock";
1746 clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atlclkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>; 1746 clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atl_clkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>;
1747 ti,bit-shift = <22>; 1747 ti,bit-shift = <22>;
1748 reg = <0x1890>; 1748 reg = <0x1890>;
1749 }; 1749 };
diff --git a/arch/arm/boot/dts/omap54xx-clocks.dtsi b/arch/arm/boot/dts/omap54xx-clocks.dtsi
index aeb142ce8e9d..e67a23b5d788 100644
--- a/arch/arm/boot/dts/omap54xx-clocks.dtsi
+++ b/arch/arm/boot/dts/omap54xx-clocks.dtsi
@@ -335,7 +335,7 @@
335 335
336 dpll_mpu_ck: dpll_mpu_ck { 336 dpll_mpu_ck: dpll_mpu_ck {
337 #clock-cells = <0>; 337 #clock-cells = <0>;
338 compatible = "ti,omap4-dpll-clock"; 338 compatible = "ti,omap5-mpu-dpll-clock";
339 clocks = <&sys_clkin>, <&mpu_dpll_hs_clk_div>; 339 clocks = <&sys_clkin>, <&mpu_dpll_hs_clk_div>;
340 reg = <0x0160>, <0x0164>, <0x016c>, <0x0168>; 340 reg = <0x0160>, <0x0164>, <0x016c>, <0x0168>;
341 }; 341 };
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index e2d62048e198..17d9462b9fb9 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -300,6 +300,7 @@ CONFIG_MMC=y
300CONFIG_MMC_BLOCK_MINORS=16 300CONFIG_MMC_BLOCK_MINORS=16
301CONFIG_MMC_ARMMMCI=y 301CONFIG_MMC_ARMMMCI=y
302CONFIG_MMC_SDHCI=y 302CONFIG_MMC_SDHCI=y
303CONFIG_MMC_SDHCI_PLTFM=y
303CONFIG_MMC_SDHCI_OF_ARASAN=y 304CONFIG_MMC_SDHCI_OF_ARASAN=y
304CONFIG_MMC_SDHCI_ESDHC_IMX=y 305CONFIG_MMC_SDHCI_ESDHC_IMX=y
305CONFIG_MMC_SDHCI_DOVE=y 306CONFIG_MMC_SDHCI_DOVE=y
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index 59066cf0271a..536a137863cb 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -32,6 +32,7 @@ CONFIG_SOC_OMAP5=y
32CONFIG_SOC_AM33XX=y 32CONFIG_SOC_AM33XX=y
33CONFIG_SOC_AM43XX=y 33CONFIG_SOC_AM43XX=y
34CONFIG_SOC_DRA7XX=y 34CONFIG_SOC_DRA7XX=y
35CONFIG_CACHE_L2X0=y
35CONFIG_ARM_THUMBEE=y 36CONFIG_ARM_THUMBEE=y
36CONFIG_ARM_ERRATA_411920=y 37CONFIG_ARM_ERRATA_411920=y
37CONFIG_SMP=y 38CONFIG_SMP=y
diff --git a/arch/arm/include/asm/ftrace.h b/arch/arm/include/asm/ftrace.h
index eb577f4f5f70..39eb16b0066f 100644
--- a/arch/arm/include/asm/ftrace.h
+++ b/arch/arm/include/asm/ftrace.h
@@ -52,7 +52,7 @@ extern inline void *return_address(unsigned int level)
52 52
53#endif 53#endif
54 54
55#define ftrace_return_addr(n) return_address(n) 55#define ftrace_return_address(n) return_address(n)
56 56
57#endif /* ifndef __ASSEMBLY__ */ 57#endif /* ifndef __ASSEMBLY__ */
58 58
diff --git a/arch/arm/mach-bcm/Kconfig b/arch/arm/mach-bcm/Kconfig
index 9bc6db1c1348..41c839167e87 100644
--- a/arch/arm/mach-bcm/Kconfig
+++ b/arch/arm/mach-bcm/Kconfig
@@ -1,10 +1,9 @@
1config ARCH_BCM 1menuconfig ARCH_BCM
2 bool "Broadcom SoC Support" if ARCH_MULTI_V6_V7 2 bool "Broadcom SoC Support" if ARCH_MULTI_V6_V7
3 help 3 help
4 This enables support for Broadcom ARM based SoC chips 4 This enables support for Broadcom ARM based SoC chips
5 5
6menu "Broadcom SoC Selection" 6if ARCH_BCM
7 depends on ARCH_BCM
8 7
9config ARCH_BCM_MOBILE 8config ARCH_BCM_MOBILE
10 bool "Broadcom Mobile SoC Support" if ARCH_MULTI_V7 9 bool "Broadcom Mobile SoC Support" if ARCH_MULTI_V7
@@ -88,4 +87,4 @@ config ARCH_BCM_5301X
88 different SoC or with the older BCM47XX and BCM53XX based 87 different SoC or with the older BCM47XX and BCM53XX based
89 network SoC using a MIPS CPU, they are supported by arch/mips/bcm47xx 88 network SoC using a MIPS CPU, they are supported by arch/mips/bcm47xx
90 89
91endmenu 90endif
diff --git a/arch/arm/mach-berlin/Kconfig b/arch/arm/mach-berlin/Kconfig
index 101e0f356730..2631cfc5ab0d 100644
--- a/arch/arm/mach-berlin/Kconfig
+++ b/arch/arm/mach-berlin/Kconfig
@@ -1,4 +1,4 @@
1config ARCH_BERLIN 1menuconfig ARCH_BERLIN
2 bool "Marvell Berlin SoCs" if ARCH_MULTI_V7 2 bool "Marvell Berlin SoCs" if ARCH_MULTI_V7
3 select ARCH_REQUIRE_GPIOLIB 3 select ARCH_REQUIRE_GPIOLIB
4 select ARM_GIC 4 select ARM_GIC
@@ -9,8 +9,6 @@ config ARCH_BERLIN
9 9
10if ARCH_BERLIN 10if ARCH_BERLIN
11 11
12menu "Marvell Berlin SoC variants"
13
14config MACH_BERLIN_BG2 12config MACH_BERLIN_BG2
15 bool "Marvell Armada 1500 (BG2)" 13 bool "Marvell Armada 1500 (BG2)"
16 select CACHE_L2X0 14 select CACHE_L2X0
@@ -30,6 +28,4 @@ config MACH_BERLIN_BG2Q
30 select HAVE_ARM_TWD if SMP 28 select HAVE_ARM_TWD if SMP
31 select PINCTRL_BERLIN_BG2Q 29 select PINCTRL_BERLIN_BG2Q
32 30
33endmenu
34
35endif 31endif
diff --git a/arch/arm/mach-cns3xxx/Kconfig b/arch/arm/mach-cns3xxx/Kconfig
index 66838f42037f..3c22a1990ecd 100644
--- a/arch/arm/mach-cns3xxx/Kconfig
+++ b/arch/arm/mach-cns3xxx/Kconfig
@@ -1,12 +1,11 @@
1config ARCH_CNS3XXX 1menuconfig ARCH_CNS3XXX
2 bool "Cavium Networks CNS3XXX family" if ARCH_MULTI_V6 2 bool "Cavium Networks CNS3XXX family" if ARCH_MULTI_V6
3 select ARM_GIC 3 select ARM_GIC
4 select PCI_DOMAINS if PCI 4 select PCI_DOMAINS if PCI
5 help 5 help
6 Support for Cavium Networks CNS3XXX platform. 6 Support for Cavium Networks CNS3XXX platform.
7 7
8menu "CNS3XXX platform type" 8if ARCH_CNS3XXX
9 depends on ARCH_CNS3XXX
10 9
11config MACH_CNS3420VB 10config MACH_CNS3420VB
12 bool "Support for CNS3420 Validation Board" 11 bool "Support for CNS3420 Validation Board"
@@ -17,4 +16,4 @@ config MACH_CNS3420VB
17 This is a platform with an on-board ARM11 MPCore and has support 16 This is a platform with an on-board ARM11 MPCore and has support
18 for USB, USB-OTG, MMC/SD/SDIO, SATA, PCI-E, etc. 17 for USB, USB-OTG, MMC/SD/SDIO, SATA, PCI-E, etc.
19 18
20endmenu 19endif
diff --git a/arch/arm/mach-davinci/Kconfig b/arch/arm/mach-davinci/Kconfig
index db18ef866593..584e8d4e2892 100644
--- a/arch/arm/mach-davinci/Kconfig
+++ b/arch/arm/mach-davinci/Kconfig
@@ -39,7 +39,6 @@ config ARCH_DAVINCI_DA830
39config ARCH_DAVINCI_DA850 39config ARCH_DAVINCI_DA850
40 bool "DA850/OMAP-L138/AM18x based system" 40 bool "DA850/OMAP-L138/AM18x based system"
41 select ARCH_DAVINCI_DA8XX 41 select ARCH_DAVINCI_DA8XX
42 select ARCH_HAS_CPUFREQ
43 select CP_INTC 42 select CP_INTC
44 43
45config ARCH_DAVINCI_DA8XX 44config ARCH_DAVINCI_DA8XX
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
index d58995c9a95a..8f9b66c4ac78 100644
--- a/arch/arm/mach-exynos/Kconfig
+++ b/arch/arm/mach-exynos/Kconfig
@@ -7,10 +7,9 @@
7 7
8# Configuration options for the EXYNOS4 8# Configuration options for the EXYNOS4
9 9
10config ARCH_EXYNOS 10menuconfig ARCH_EXYNOS
11 bool "Samsung EXYNOS" if ARCH_MULTI_V7 11 bool "Samsung EXYNOS" if ARCH_MULTI_V7
12 select ARCH_HAS_BANDGAP 12 select ARCH_HAS_BANDGAP
13 select ARCH_HAS_CPUFREQ
14 select ARCH_HAS_HOLES_MEMORYMODEL 13 select ARCH_HAS_HOLES_MEMORYMODEL
15 select ARCH_REQUIRE_GPIOLIB 14 select ARCH_REQUIRE_GPIOLIB
16 select ARM_AMBA 15 select ARM_AMBA
@@ -30,8 +29,6 @@ config ARCH_EXYNOS
30 29
31if ARCH_EXYNOS 30if ARCH_EXYNOS
32 31
33menu "SAMSUNG EXYNOS SoCs Support"
34
35config ARCH_EXYNOS3 32config ARCH_EXYNOS3
36 bool "SAMSUNG EXYNOS3" 33 bool "SAMSUNG EXYNOS3"
37 select ARM_CPU_SUSPEND if PM 34 select ARM_CPU_SUSPEND if PM
@@ -118,8 +115,6 @@ config SOC_EXYNOS5800
118 default y 115 default y
119 depends on SOC_EXYNOS5420 116 depends on SOC_EXYNOS5420
120 117
121endmenu
122
123config EXYNOS5420_MCPM 118config EXYNOS5420_MCPM
124 bool "Exynos5420 Multi-Cluster PM support" 119 bool "Exynos5420 Multi-Cluster PM support"
125 depends on MCPM && SOC_EXYNOS5420 120 depends on MCPM && SOC_EXYNOS5420
diff --git a/arch/arm/mach-exynos/common.h b/arch/arm/mach-exynos/common.h
index 16617bdb37a9..1ee91763fa7c 100644
--- a/arch/arm/mach-exynos/common.h
+++ b/arch/arm/mach-exynos/common.h
@@ -118,6 +118,7 @@ extern void __iomem *sysram_ns_base_addr;
118extern void __iomem *sysram_base_addr; 118extern void __iomem *sysram_base_addr;
119void exynos_init_io(void); 119void exynos_init_io(void);
120void exynos_restart(enum reboot_mode mode, const char *cmd); 120void exynos_restart(enum reboot_mode mode, const char *cmd);
121void exynos_sysram_init(void);
121void exynos_cpuidle_init(void); 122void exynos_cpuidle_init(void);
122void exynos_cpufreq_init(void); 123void exynos_cpufreq_init(void);
123void exynos_init_late(void); 124void exynos_init_late(void);
diff --git a/arch/arm/mach-exynos/exynos.c b/arch/arm/mach-exynos/exynos.c
index 90aab4d75d08..f38cf7c110cc 100644
--- a/arch/arm/mach-exynos/exynos.c
+++ b/arch/arm/mach-exynos/exynos.c
@@ -184,6 +184,28 @@ void __init exynos_cpufreq_init(void)
184 platform_device_register_simple("exynos-cpufreq", -1, NULL, 0); 184 platform_device_register_simple("exynos-cpufreq", -1, NULL, 0);
185} 185}
186 186
187void __iomem *sysram_base_addr;
188void __iomem *sysram_ns_base_addr;
189
190void __init exynos_sysram_init(void)
191{
192 struct device_node *node;
193
194 for_each_compatible_node(node, NULL, "samsung,exynos4210-sysram") {
195 if (!of_device_is_available(node))
196 continue;
197 sysram_base_addr = of_iomap(node, 0);
198 break;
199 }
200
201 for_each_compatible_node(node, NULL, "samsung,exynos4210-sysram-ns") {
202 if (!of_device_is_available(node))
203 continue;
204 sysram_ns_base_addr = of_iomap(node, 0);
205 break;
206 }
207}
208
187void __init exynos_init_late(void) 209void __init exynos_init_late(void)
188{ 210{
189 if (of_machine_is_compatible("samsung,exynos5440")) 211 if (of_machine_is_compatible("samsung,exynos5440"))
@@ -198,7 +220,7 @@ static int __init exynos_fdt_map_chipid(unsigned long node, const char *uname,
198 int depth, void *data) 220 int depth, void *data)
199{ 221{
200 struct map_desc iodesc; 222 struct map_desc iodesc;
201 __be32 *reg; 223 const __be32 *reg;
202 int len; 224 int len;
203 225
204 if (!of_flat_dt_is_compatible(node, "samsung,exynos4210-chipid") && 226 if (!of_flat_dt_is_compatible(node, "samsung,exynos4210-chipid") &&
@@ -271,6 +293,13 @@ static void __init exynos_dt_machine_init(void)
271 } 293 }
272 } 294 }
273 295
296 /*
297 * This is called from smp_prepare_cpus if we've built for SMP, but
298 * we still need to set it up for PM and firmware ops if not.
299 */
300 if (!IS_ENABLED(SMP))
301 exynos_sysram_init();
302
274 exynos_cpuidle_init(); 303 exynos_cpuidle_init();
275 exynos_cpufreq_init(); 304 exynos_cpufreq_init();
276 305
diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c
index ec02422e8499..1c8d31e39520 100644
--- a/arch/arm/mach-exynos/platsmp.c
+++ b/arch/arm/mach-exynos/platsmp.c
@@ -32,28 +32,6 @@
32 32
33extern void exynos4_secondary_startup(void); 33extern void exynos4_secondary_startup(void);
34 34
35void __iomem *sysram_base_addr;
36void __iomem *sysram_ns_base_addr;
37
38static void __init exynos_smp_prepare_sysram(void)
39{
40 struct device_node *node;
41
42 for_each_compatible_node(node, NULL, "samsung,exynos4210-sysram") {
43 if (!of_device_is_available(node))
44 continue;
45 sysram_base_addr = of_iomap(node, 0);
46 break;
47 }
48
49 for_each_compatible_node(node, NULL, "samsung,exynos4210-sysram-ns") {
50 if (!of_device_is_available(node))
51 continue;
52 sysram_ns_base_addr = of_iomap(node, 0);
53 break;
54 }
55}
56
57static inline void __iomem *cpu_boot_reg_base(void) 35static inline void __iomem *cpu_boot_reg_base(void)
58{ 36{
59 if (soc_is_exynos4210() && samsung_rev() == EXYNOS4210_REV_1_1) 37 if (soc_is_exynos4210() && samsung_rev() == EXYNOS4210_REV_1_1)
@@ -234,11 +212,11 @@ static void __init exynos_smp_prepare_cpus(unsigned int max_cpus)
234{ 212{
235 int i; 213 int i;
236 214
215 exynos_sysram_init();
216
237 if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9) 217 if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9)
238 scu_enable(scu_base_addr()); 218 scu_enable(scu_base_addr());
239 219
240 exynos_smp_prepare_sysram();
241
242 /* 220 /*
243 * Write the address of secondary startup into the 221 * Write the address of secondary startup into the
244 * system-wide flags register. The boot monitor waits 222 * system-wide flags register. The boot monitor waits
diff --git a/arch/arm/mach-highbank/Kconfig b/arch/arm/mach-highbank/Kconfig
index 830b76e70250..a5960e2ac090 100644
--- a/arch/arm/mach-highbank/Kconfig
+++ b/arch/arm/mach-highbank/Kconfig
@@ -1,7 +1,6 @@
1config ARCH_HIGHBANK 1config ARCH_HIGHBANK
2 bool "Calxeda ECX-1000/2000 (Highbank/Midway)" if ARCH_MULTI_V7 2 bool "Calxeda ECX-1000/2000 (Highbank/Midway)" if ARCH_MULTI_V7
3 select ARCH_DMA_ADDR_T_64BIT if ARM_LPAE 3 select ARCH_DMA_ADDR_T_64BIT if ARM_LPAE
4 select ARCH_HAS_CPUFREQ
5 select ARCH_HAS_HOLES_MEMORYMODEL 4 select ARCH_HAS_HOLES_MEMORYMODEL
6 select ARCH_HAS_OPP 5 select ARCH_HAS_OPP
7 select ARCH_SUPPORTS_BIG_ENDIAN 6 select ARCH_SUPPORTS_BIG_ENDIAN
diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig
index 8d42eab76d53..28fa2fa49e5d 100644
--- a/arch/arm/mach-imx/Kconfig
+++ b/arch/arm/mach-imx/Kconfig
@@ -1,6 +1,5 @@
1config ARCH_MXC 1menuconfig ARCH_MXC
2 bool "Freescale i.MX family" if ARCH_MULTI_V4_V5 || ARCH_MULTI_V6_V7 2 bool "Freescale i.MX family" if ARCH_MULTI_V4_V5 || ARCH_MULTI_V6_V7
3 select ARCH_HAS_CPUFREQ
4 select ARCH_HAS_OPP 3 select ARCH_HAS_OPP
5 select ARCH_REQUIRE_GPIOLIB 4 select ARCH_REQUIRE_GPIOLIB
6 select ARM_CPU_SUSPEND if PM 5 select ARM_CPU_SUSPEND if PM
@@ -13,8 +12,7 @@ config ARCH_MXC
13 help 12 help
14 Support for Freescale MXC/iMX-based family of processors 13 Support for Freescale MXC/iMX-based family of processors
15 14
16menu "Freescale i.MX support" 15if ARCH_MXC
17 depends on ARCH_MXC
18 16
19config MXC_TZIC 17config MXC_TZIC
20 bool 18 bool
@@ -99,7 +97,6 @@ config SOC_IMX25
99 97
100config SOC_IMX27 98config SOC_IMX27
101 bool 99 bool
102 select ARCH_HAS_CPUFREQ
103 select ARCH_HAS_OPP 100 select ARCH_HAS_OPP
104 select CPU_ARM926T 101 select CPU_ARM926T
105 select IMX_HAVE_IOMUX_V1 102 select IMX_HAVE_IOMUX_V1
@@ -124,7 +121,6 @@ config SOC_IMX35
124 121
125config SOC_IMX5 122config SOC_IMX5
126 bool 123 bool
127 select ARCH_HAS_CPUFREQ
128 select ARCH_HAS_OPP 124 select ARCH_HAS_OPP
129 select ARCH_MXC_IOMUX_V3 125 select ARCH_MXC_IOMUX_V3
130 select MXC_TZIC 126 select MXC_TZIC
@@ -786,4 +782,4 @@ endif
786 782
787source "arch/arm/mach-imx/devices/Kconfig" 783source "arch/arm/mach-imx/devices/Kconfig"
788 784
789endmenu 785endif
diff --git a/arch/arm/mach-integrator/Kconfig b/arch/arm/mach-integrator/Kconfig
index ba43321001d8..64f8e2564a37 100644
--- a/arch/arm/mach-integrator/Kconfig
+++ b/arch/arm/mach-integrator/Kconfig
@@ -28,7 +28,7 @@ config ARCH_CINTEGRATOR
28 bool 28 bool
29 29
30config INTEGRATOR_IMPD1 30config INTEGRATOR_IMPD1
31 tristate "Include support for Integrator/IM-PD1" 31 bool "Include support for Integrator/IM-PD1"
32 depends on ARCH_INTEGRATOR_AP 32 depends on ARCH_INTEGRATOR_AP
33 select ARCH_REQUIRE_GPIOLIB 33 select ARCH_REQUIRE_GPIOLIB
34 select ARM_VIC 34 select ARM_VIC
diff --git a/arch/arm/mach-integrator/impd1.c b/arch/arm/mach-integrator/impd1.c
index 0e870ea818c4..3ce880729cff 100644
--- a/arch/arm/mach-integrator/impd1.c
+++ b/arch/arm/mach-integrator/impd1.c
@@ -308,7 +308,12 @@ static struct impd1_device impd1_devs[] = {
308 */ 308 */
309#define IMPD1_VALID_IRQS 0x00000bffU 309#define IMPD1_VALID_IRQS 0x00000bffU
310 310
311static int __init impd1_probe(struct lm_device *dev) 311/*
312 * As this module is bool, it is OK to have this as __init_refok() - no
313 * probe calls will be done after the initial system bootup, as devices
314 * are discovered as part of the machine startup.
315 */
316static int __init_refok impd1_probe(struct lm_device *dev)
312{ 317{
313 struct impd1_module *impd1; 318 struct impd1_module *impd1;
314 int irq_base; 319 int irq_base;
@@ -397,6 +402,11 @@ static void impd1_remove(struct lm_device *dev)
397static struct lm_driver impd1_driver = { 402static struct lm_driver impd1_driver = {
398 .drv = { 403 .drv = {
399 .name = "impd1", 404 .name = "impd1",
405 /*
406 * As we're dropping the probe() function, suppress driver
407 * binding from sysfs.
408 */
409 .suppress_bind_attrs = true,
400 }, 410 },
401 .probe = impd1_probe, 411 .probe = impd1_probe,
402 .remove = impd1_remove, 412 .remove = impd1_remove,
diff --git a/arch/arm/mach-keystone/Kconfig b/arch/arm/mach-keystone/Kconfig
index f50bc936cb84..98a156afaa94 100644
--- a/arch/arm/mach-keystone/Kconfig
+++ b/arch/arm/mach-keystone/Kconfig
@@ -1,6 +1,7 @@
1config ARCH_KEYSTONE 1config ARCH_KEYSTONE
2 bool "Texas Instruments Keystone Devices" 2 bool "Texas Instruments Keystone Devices"
3 depends on ARCH_MULTI_V7 3 depends on ARCH_MULTI_V7
4 depends on ARM_PATCH_PHYS_VIRT
4 select ARM_GIC 5 select ARM_GIC
5 select HAVE_ARM_ARCH_TIMER 6 select HAVE_ARM_ARCH_TIMER
6 select CLKSRC_MMIO 7 select CLKSRC_MMIO
diff --git a/arch/arm/mach-moxart/Kconfig b/arch/arm/mach-moxart/Kconfig
index 82a4ba8578a2..f49328c39bef 100644
--- a/arch/arm/mach-moxart/Kconfig
+++ b/arch/arm/mach-moxart/Kconfig
@@ -1,4 +1,4 @@
1config ARCH_MOXART 1menuconfig ARCH_MOXART
2 bool "MOXA ART SoC" if ARCH_MULTI_V4 2 bool "MOXA ART SoC" if ARCH_MULTI_V4
3 select CPU_FA526 3 select CPU_FA526
4 select ARM_DMA_MEM_BUFFERABLE 4 select ARM_DMA_MEM_BUFFERABLE
diff --git a/arch/arm/mach-mvebu/Kconfig b/arch/arm/mach-mvebu/Kconfig
index 6090b9eb00c8..4a7c250c9a30 100644
--- a/arch/arm/mach-mvebu/Kconfig
+++ b/arch/arm/mach-mvebu/Kconfig
@@ -1,4 +1,4 @@
1config ARCH_MVEBU 1menuconfig ARCH_MVEBU
2 bool "Marvell Engineering Business Unit (MVEBU) SoCs" if (ARCH_MULTI_V7 || ARCH_MULTI_V5) 2 bool "Marvell Engineering Business Unit (MVEBU) SoCs" if (ARCH_MULTI_V7 || ARCH_MULTI_V5)
3 select ARCH_SUPPORTS_BIG_ENDIAN 3 select ARCH_SUPPORTS_BIG_ENDIAN
4 select CLKSRC_MMIO 4 select CLKSRC_MMIO
@@ -13,8 +13,6 @@ config ARCH_MVEBU
13 13
14if ARCH_MVEBU 14if ARCH_MVEBU
15 15
16menu "Marvell EBU SoC variants"
17
18config MACH_MVEBU_V7 16config MACH_MVEBU_V7
19 bool 17 bool
20 select ARMADA_370_XP_TIMER 18 select ARMADA_370_XP_TIMER
@@ -84,7 +82,6 @@ config MACH_DOVE
84 82
85config MACH_KIRKWOOD 83config MACH_KIRKWOOD
86 bool "Marvell Kirkwood boards" if ARCH_MULTI_V5 84 bool "Marvell Kirkwood boards" if ARCH_MULTI_V5
87 select ARCH_HAS_CPUFREQ
88 select ARCH_REQUIRE_GPIOLIB 85 select ARCH_REQUIRE_GPIOLIB
89 select CPU_FEROCEON 86 select CPU_FEROCEON
90 select KIRKWOOD_CLK 87 select KIRKWOOD_CLK
@@ -97,6 +94,4 @@ config MACH_KIRKWOOD
97 Say 'Y' here if you want your kernel to support boards based 94 Say 'Y' here if you want your kernel to support boards based
98 on the Marvell Kirkwood device tree. 95 on the Marvell Kirkwood device tree.
99 96
100endmenu
101
102endif 97endif
diff --git a/arch/arm/mach-nomadik/Kconfig b/arch/arm/mach-nomadik/Kconfig
index 486d301f43fd..3c61096c8627 100644
--- a/arch/arm/mach-nomadik/Kconfig
+++ b/arch/arm/mach-nomadik/Kconfig
@@ -1,4 +1,4 @@
1config ARCH_NOMADIK 1menuconfig ARCH_NOMADIK
2 bool "ST-Ericsson Nomadik" 2 bool "ST-Ericsson Nomadik"
3 depends on ARCH_MULTI_V5 3 depends on ARCH_MULTI_V5
4 select ARCH_REQUIRE_GPIOLIB 4 select ARCH_REQUIRE_GPIOLIB
@@ -15,7 +15,6 @@ config ARCH_NOMADIK
15 Support for the Nomadik platform by ST-Ericsson 15 Support for the Nomadik platform by ST-Ericsson
16 16
17if ARCH_NOMADIK 17if ARCH_NOMADIK
18menu "Nomadik boards"
19 18
20config MACH_NOMADIK_8815NHK 19config MACH_NOMADIK_8815NHK
21 bool "ST 8815 Nomadik Hardware Kit (evaluation board)" 20 bool "ST 8815 Nomadik Hardware Kit (evaluation board)"
@@ -24,7 +23,6 @@ config MACH_NOMADIK_8815NHK
24 select I2C_ALGOBIT 23 select I2C_ALGOBIT
25 select I2C_NOMADIK 24 select I2C_NOMADIK
26 25
27endmenu
28endif 26endif
29 27
30config NOMADIK_8815 28config NOMADIK_8815
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 0ba482638ebf..062505345c95 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -1,3 +1,6 @@
1menu "TI OMAP/AM/DM/DRA Family"
2 depends on ARCH_MULTI_V6 || ARCH_MULTI_V7
3
1config ARCH_OMAP 4config ARCH_OMAP
2 bool 5 bool
3 6
@@ -28,7 +31,6 @@ config ARCH_OMAP4
28 select ARM_CPU_SUSPEND if PM 31 select ARM_CPU_SUSPEND if PM
29 select ARM_ERRATA_720789 32 select ARM_ERRATA_720789
30 select ARM_GIC 33 select ARM_GIC
31 select CACHE_L2X0
32 select HAVE_ARM_SCU if SMP 34 select HAVE_ARM_SCU if SMP
33 select HAVE_ARM_TWD if SMP 35 select HAVE_ARM_TWD if SMP
34 select OMAP_INTERCONNECT 36 select OMAP_INTERCONNECT
@@ -80,7 +82,6 @@ config SOC_DRA7XX
80config ARCH_OMAP2PLUS 82config ARCH_OMAP2PLUS
81 bool 83 bool
82 select ARCH_HAS_BANDGAP 84 select ARCH_HAS_BANDGAP
83 select ARCH_HAS_CPUFREQ
84 select ARCH_HAS_HOLES_MEMORYMODEL 85 select ARCH_HAS_HOLES_MEMORYMODEL
85 select ARCH_OMAP 86 select ARCH_OMAP
86 select ARCH_REQUIRE_GPIOLIB 87 select ARCH_REQUIRE_GPIOLIB
@@ -343,3 +344,5 @@ config OMAP4_ERRATA_I688
343endmenu 344endmenu
344 345
345endif 346endif
347
348endmenu
diff --git a/arch/arm/mach-omap2/clkt2xxx_virt_prcm_set.c b/arch/arm/mach-omap2/clkt2xxx_virt_prcm_set.c
index b935ed2922d8..85e0b0c06718 100644
--- a/arch/arm/mach-omap2/clkt2xxx_virt_prcm_set.c
+++ b/arch/arm/mach-omap2/clkt2xxx_virt_prcm_set.c
@@ -208,3 +208,56 @@ void omap2xxx_clkt_vps_late_init(void)
208 clk_put(c); 208 clk_put(c);
209 } 209 }
210} 210}
211
212#ifdef CONFIG_OF
213#include <linux/clk-provider.h>
214#include <linux/clkdev.h>
215
216static const struct clk_ops virt_prcm_set_ops = {
217 .recalc_rate = &omap2_table_mpu_recalc,
218 .set_rate = &omap2_select_table_rate,
219 .round_rate = &omap2_round_to_table_rate,
220};
221
222/**
223 * omap2xxx_clkt_vps_init - initialize virt_prcm_set clock
224 *
225 * Does a manual init for the virtual prcm DVFS clock for OMAP2. This
226 * function is called only from omap2 DT clock init, as the virtual
227 * node is not modelled in the DT clock data.
228 */
229void omap2xxx_clkt_vps_init(void)
230{
231 struct clk_init_data init = { NULL };
232 struct clk_hw_omap *hw = NULL;
233 struct clk *clk;
234 const char *parent_name = "mpu_ck";
235 struct clk_lookup *lookup = NULL;
236
237 omap2xxx_clkt_vps_late_init();
238 omap2xxx_clkt_vps_check_bootloader_rates();
239
240 hw = kzalloc(sizeof(*hw), GFP_KERNEL);
241 lookup = kzalloc(sizeof(*lookup), GFP_KERNEL);
242 if (!hw || !lookup)
243 goto cleanup;
244 init.name = "virt_prcm_set";
245 init.ops = &virt_prcm_set_ops;
246 init.parent_names = &parent_name;
247 init.num_parents = 1;
248
249 hw->hw.init = &init;
250
251 clk = clk_register(NULL, &hw->hw);
252
253 lookup->dev_id = NULL;
254 lookup->con_id = "cpufreq_ck";
255 lookup->clk = clk;
256
257 clkdev_add(lookup);
258 return;
259cleanup:
260 kfree(hw);
261 kfree(lookup);
262}
263#endif
diff --git a/arch/arm/mach-omap2/clock.h b/arch/arm/mach-omap2/clock.h
index bda767a9dea8..12f54d428d7c 100644
--- a/arch/arm/mach-omap2/clock.h
+++ b/arch/arm/mach-omap2/clock.h
@@ -178,17 +178,6 @@ struct clksel {
178 const struct clksel_rate *rates; 178 const struct clksel_rate *rates;
179}; 179};
180 180
181struct clk_hw_omap_ops {
182 void (*find_idlest)(struct clk_hw_omap *oclk,
183 void __iomem **idlest_reg,
184 u8 *idlest_bit, u8 *idlest_val);
185 void (*find_companion)(struct clk_hw_omap *oclk,
186 void __iomem **other_reg,
187 u8 *other_bit);
188 void (*allow_idle)(struct clk_hw_omap *oclk);
189 void (*deny_idle)(struct clk_hw_omap *oclk);
190};
191
192unsigned long omap_fixed_divisor_recalc(struct clk_hw *hw, 181unsigned long omap_fixed_divisor_recalc(struct clk_hw *hw,
193 unsigned long parent_rate); 182 unsigned long parent_rate);
194 183
@@ -279,8 +268,6 @@ extern const struct clk_hw_omap_ops clkhwops_omap3430es2_hsotgusb_wait;
279extern const struct clk_hw_omap_ops clkhwops_am35xx_ipss_module_wait; 268extern const struct clk_hw_omap_ops clkhwops_am35xx_ipss_module_wait;
280extern const struct clk_hw_omap_ops clkhwops_apll54; 269extern const struct clk_hw_omap_ops clkhwops_apll54;
281extern const struct clk_hw_omap_ops clkhwops_apll96; 270extern const struct clk_hw_omap_ops clkhwops_apll96;
282extern const struct clk_hw_omap_ops clkhwops_omap2xxx_dpll;
283extern const struct clk_hw_omap_ops clkhwops_omap2430_i2chs_wait;
284 271
285/* clksel_rate blocks shared between OMAP44xx and AM33xx */ 272/* clksel_rate blocks shared between OMAP44xx and AM33xx */
286extern const struct clksel_rate div_1_0_rates[]; 273extern const struct clksel_rate div_1_0_rates[];
diff --git a/arch/arm/mach-omap2/clock2xxx.h b/arch/arm/mach-omap2/clock2xxx.h
index 539dc08afbba..45f41a411603 100644
--- a/arch/arm/mach-omap2/clock2xxx.h
+++ b/arch/arm/mach-omap2/clock2xxx.h
@@ -21,10 +21,6 @@ unsigned long omap2xxx_sys_clk_recalc(struct clk_hw *clk,
21 unsigned long parent_rate); 21 unsigned long parent_rate);
22unsigned long omap2_osc_clk_recalc(struct clk_hw *clk, 22unsigned long omap2_osc_clk_recalc(struct clk_hw *clk,
23 unsigned long parent_rate); 23 unsigned long parent_rate);
24unsigned long omap2_dpllcore_recalc(struct clk_hw *hw,
25 unsigned long parent_rate);
26int omap2_reprogram_dpllcore(struct clk_hw *clk, unsigned long rate,
27 unsigned long parent_rate);
28void omap2xxx_clkt_dpllcore_init(struct clk_hw *hw); 24void omap2xxx_clkt_dpllcore_init(struct clk_hw *hw);
29unsigned long omap2_clk_apll54_recalc(struct clk_hw *hw, 25unsigned long omap2_clk_apll54_recalc(struct clk_hw *hw,
30 unsigned long parent_rate); 26 unsigned long parent_rate);
diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h
index ff029737c8f0..a373d508799a 100644
--- a/arch/arm/mach-omap2/common.h
+++ b/arch/arm/mach-omap2/common.h
@@ -91,7 +91,14 @@ extern void omap3_sync32k_timer_init(void);
91extern void omap3_secure_sync32k_timer_init(void); 91extern void omap3_secure_sync32k_timer_init(void);
92extern void omap3_gptimer_timer_init(void); 92extern void omap3_gptimer_timer_init(void);
93extern void omap4_local_timer_init(void); 93extern void omap4_local_timer_init(void);
94#ifdef CONFIG_CACHE_L2X0
94int omap_l2_cache_init(void); 95int omap_l2_cache_init(void);
96#else
97static inline int omap_l2_cache_init(void)
98{
99 return 0;
100}
101#endif
95extern void omap5_realtime_timer_init(void); 102extern void omap5_realtime_timer_init(void);
96 103
97void omap2420_init_early(void); 104void omap2420_init_early(void);
diff --git a/arch/arm/mach-omap2/dpll3xxx.c b/arch/arm/mach-omap2/dpll3xxx.c
index fcd8036af910..6d7ba37e2257 100644
--- a/arch/arm/mach-omap2/dpll3xxx.c
+++ b/arch/arm/mach-omap2/dpll3xxx.c
@@ -319,6 +319,15 @@ static int omap3_noncore_dpll_program(struct clk_hw_omap *clk, u16 freqsel)
319 319
320 /* Set DPLL multiplier, divider */ 320 /* Set DPLL multiplier, divider */
321 v = omap2_clk_readl(clk, dd->mult_div1_reg); 321 v = omap2_clk_readl(clk, dd->mult_div1_reg);
322
323 /* Handle Duty Cycle Correction */
324 if (dd->dcc_mask) {
325 if (dd->last_rounded_rate >= dd->dcc_rate)
326 v |= dd->dcc_mask; /* Enable DCC */
327 else
328 v &= ~dd->dcc_mask; /* Disable DCC */
329 }
330
322 v &= ~(dd->mult_mask | dd->div1_mask); 331 v &= ~(dd->mult_mask | dd->div1_mask);
323 v |= dd->last_rounded_m << __ffs(dd->mult_mask); 332 v |= dd->last_rounded_m << __ffs(dd->mult_mask);
324 v |= (dd->last_rounded_n - 1) << __ffs(dd->div1_mask); 333 v |= (dd->last_rounded_n - 1) << __ffs(dd->div1_mask);
diff --git a/arch/arm/mach-prima2/Kconfig b/arch/arm/mach-prima2/Kconfig
index e4e505f52ba0..042f693ef423 100644
--- a/arch/arm/mach-prima2/Kconfig
+++ b/arch/arm/mach-prima2/Kconfig
@@ -1,4 +1,4 @@
1config ARCH_SIRF 1menuconfig ARCH_SIRF
2 bool "CSR SiRF" if ARCH_MULTI_V7 2 bool "CSR SiRF" if ARCH_MULTI_V7
3 select ARCH_HAS_RESET_CONTROLLER 3 select ARCH_HAS_RESET_CONTROLLER
4 select ARCH_REQUIRE_GPIOLIB 4 select ARCH_REQUIRE_GPIOLIB
@@ -11,7 +11,7 @@ config ARCH_SIRF
11 11
12if ARCH_SIRF 12if ARCH_SIRF
13 13
14menu "CSR SiRF atlas6/primaII/Marco/Polo Specific Features" 14comment "CSR SiRF atlas6/primaII/Marco/Polo Specific Features"
15 15
16config ARCH_ATLAS6 16config ARCH_ATLAS6
17 bool "CSR SiRFSoC ATLAS6 ARM Cortex A9 Platform" 17 bool "CSR SiRFSoC ATLAS6 ARM Cortex A9 Platform"
@@ -37,8 +37,6 @@ config ARCH_MARCO
37 help 37 help
38 Support for CSR SiRFSoC ARM Cortex A9 Platform 38 Support for CSR SiRFSoC ARM Cortex A9 Platform
39 39
40endmenu
41
42config SIRF_IRQ 40config SIRF_IRQ
43 bool 41 bool
44 42
diff --git a/arch/arm/mach-qcom/Kconfig b/arch/arm/mach-qcom/Kconfig
index fd2b99dceb89..ee5697ba05bc 100644
--- a/arch/arm/mach-qcom/Kconfig
+++ b/arch/arm/mach-qcom/Kconfig
@@ -1,4 +1,4 @@
1config ARCH_QCOM 1menuconfig ARCH_QCOM
2 bool "Qualcomm Support" if ARCH_MULTI_V7 2 bool "Qualcomm Support" if ARCH_MULTI_V7
3 select ARCH_REQUIRE_GPIOLIB 3 select ARCH_REQUIRE_GPIOLIB
4 select ARM_GIC 4 select ARM_GIC
@@ -11,8 +11,6 @@ config ARCH_QCOM
11 11
12if ARCH_QCOM 12if ARCH_QCOM
13 13
14menu "Qualcomm SoC Selection"
15
16config ARCH_MSM8X60 14config ARCH_MSM8X60
17 bool "Enable support for MSM8X60" 15 bool "Enable support for MSM8X60"
18 select CLKSRC_QCOM 16 select CLKSRC_QCOM
@@ -25,8 +23,6 @@ config ARCH_MSM8974
25 bool "Enable support for MSM8974" 23 bool "Enable support for MSM8974"
26 select HAVE_ARM_ARCH_TIMER 24 select HAVE_ARM_ARCH_TIMER
27 25
28endmenu
29
30config QCOM_SCM 26config QCOM_SCM
31 bool 27 bool
32 28
diff --git a/arch/arm/mach-s3c24xx/Kconfig b/arch/arm/mach-s3c24xx/Kconfig
index 04284de7aca5..ad5316ae524e 100644
--- a/arch/arm/mach-s3c24xx/Kconfig
+++ b/arch/arm/mach-s3c24xx/Kconfig
@@ -117,7 +117,7 @@ config S3C24XX_SETUP_TS
117 Compile in platform device definition for Samsung TouchScreen. 117 Compile in platform device definition for Samsung TouchScreen.
118 118
119config S3C24XX_DMA 119config S3C24XX_DMA
120 bool "S3C2410 DMA support" 120 bool "S3C2410 DMA support (deprecated)"
121 select S3C_DMA 121 select S3C_DMA
122 help 122 help
123 S3C2410 DMA support. This is needed for drivers like sound which 123 S3C2410 DMA support. This is needed for drivers like sound which
diff --git a/arch/arm/mach-s3c64xx/Kconfig b/arch/arm/mach-s3c64xx/Kconfig
index 3136d86b0d6e..26ca2427e53d 100644
--- a/arch/arm/mach-s3c64xx/Kconfig
+++ b/arch/arm/mach-s3c64xx/Kconfig
@@ -18,9 +18,9 @@ config CPU_S3C6410
18 Enable S3C6410 CPU support 18 Enable S3C6410 CPU support
19 19
20config S3C64XX_PL080 20config S3C64XX_PL080
21 bool "S3C64XX DMA using generic PL08x driver" 21 def_bool DMADEVICES
22 select ARM_AMBA
22 select AMBA_PL08X 23 select AMBA_PL08X
23 select SAMSUNG_DMADEV
24 24
25config S3C64XX_SETUP_SDHCI 25config S3C64XX_SETUP_SDHCI
26 bool 26 bool
diff --git a/arch/arm/mach-s5p64x0/Kconfig b/arch/arm/mach-s5p64x0/Kconfig
index bb2111b3751e..26003e23796d 100644
--- a/arch/arm/mach-s5p64x0/Kconfig
+++ b/arch/arm/mach-s5p64x0/Kconfig
@@ -9,16 +9,18 @@ if ARCH_S5P64X0
9 9
10config CPU_S5P6440 10config CPU_S5P6440
11 bool 11 bool
12 select ARM_AMBA
13 select PL330_DMA if DMADEVICES
12 select S5P_SLEEP if PM 14 select S5P_SLEEP if PM
13 select SAMSUNG_DMADEV
14 select SAMSUNG_WAKEMASK if PM 15 select SAMSUNG_WAKEMASK if PM
15 help 16 help
16 Enable S5P6440 CPU support 17 Enable S5P6440 CPU support
17 18
18config CPU_S5P6450 19config CPU_S5P6450
19 bool 20 bool
21 select ARM_AMBA
22 select PL330_DMA if DMADEVICES
20 select S5P_SLEEP if PM 23 select S5P_SLEEP if PM
21 select SAMSUNG_DMADEV
22 select SAMSUNG_WAKEMASK if PM 24 select SAMSUNG_WAKEMASK if PM
23 help 25 help
24 Enable S5P6450 CPU support 26 Enable S5P6450 CPU support
diff --git a/arch/arm/mach-s5pc100/Kconfig b/arch/arm/mach-s5pc100/Kconfig
index 15170be97a74..c5e3a969b063 100644
--- a/arch/arm/mach-s5pc100/Kconfig
+++ b/arch/arm/mach-s5pc100/Kconfig
@@ -9,8 +9,9 @@ if ARCH_S5PC100
9 9
10config CPU_S5PC100 10config CPU_S5PC100
11 bool 11 bool
12 select ARM_AMBA
13 select PL330_DMA if DMADEVICES
12 select S5P_EXT_INT 14 select S5P_EXT_INT
13 select SAMSUNG_DMADEV
14 help 15 help
15 Enable S5PC100 CPU support 16 Enable S5PC100 CPU support
16 17
diff --git a/arch/arm/mach-s5pv210/Kconfig b/arch/arm/mach-s5pv210/Kconfig
index 8c3abe521757..f60f2862856d 100644
--- a/arch/arm/mach-s5pv210/Kconfig
+++ b/arch/arm/mach-s5pv210/Kconfig
@@ -11,10 +11,11 @@ if ARCH_S5PV210
11 11
12config CPU_S5PV210 12config CPU_S5PV210
13 bool 13 bool
14 select ARM_AMBA
15 select PL330_DMA if DMADEVICES
14 select S5P_EXT_INT 16 select S5P_EXT_INT
15 select S5P_PM if PM 17 select S5P_PM if PM
16 select S5P_SLEEP if PM 18 select S5P_SLEEP if PM
17 select SAMSUNG_DMADEV
18 help 19 help
19 Enable S5PV210 CPU support 20 Enable S5PV210 CPU support
20 21
diff --git a/arch/arm/mach-shmobile/Kconfig b/arch/arm/mach-shmobile/Kconfig
index dbd954e61aa7..798073057e51 100644
--- a/arch/arm/mach-shmobile/Kconfig
+++ b/arch/arm/mach-shmobile/Kconfig
@@ -1,7 +1,7 @@
1config ARCH_SHMOBILE 1config ARCH_SHMOBILE
2 bool 2 bool
3 3
4config ARCH_SHMOBILE_MULTI 4menuconfig ARCH_SHMOBILE_MULTI
5 bool "Renesas ARM SoCs" if ARCH_MULTI_V7 5 bool "Renesas ARM SoCs" if ARCH_MULTI_V7
6 depends on MMU 6 depends on MMU
7 select ARCH_SHMOBILE 7 select ARCH_SHMOBILE
@@ -15,7 +15,7 @@ config ARCH_SHMOBILE_MULTI
15 15
16if ARCH_SHMOBILE_MULTI 16if ARCH_SHMOBILE_MULTI
17 17
18comment "Renesas ARM SoCs System Type" 18#comment "Renesas ARM SoCs System Type"
19 19
20config ARCH_EMEV2 20config ARCH_EMEV2
21 bool "Emma Mobile EV2" 21 bool "Emma Mobile EV2"
@@ -85,7 +85,6 @@ config ARCH_R8A73A4
85 select CPU_V7 85 select CPU_V7
86 select SH_CLK_CPG 86 select SH_CLK_CPG
87 select RENESAS_IRQC 87 select RENESAS_IRQC
88 select ARCH_HAS_CPUFREQ
89 select ARCH_HAS_OPP 88 select ARCH_HAS_OPP
90 select SYS_SUPPORTS_SH_CMT 89 select SYS_SUPPORTS_SH_CMT
91 select SYS_SUPPORTS_SH_TMU 90 select SYS_SUPPORTS_SH_TMU
@@ -264,7 +263,6 @@ config MACH_KOELSCH
264config MACH_KZM9G 263config MACH_KZM9G
265 bool "KZM-A9-GT board" 264 bool "KZM-A9-GT board"
266 depends on ARCH_SH73A0 265 depends on ARCH_SH73A0
267 select ARCH_HAS_CPUFREQ
268 select ARCH_HAS_OPP 266 select ARCH_HAS_OPP
269 select ARCH_REQUIRE_GPIOLIB 267 select ARCH_REQUIRE_GPIOLIB
270 select REGULATOR_FIXED_VOLTAGE if REGULATOR 268 select REGULATOR_FIXED_VOLTAGE if REGULATOR
diff --git a/arch/arm/mach-spear/Kconfig b/arch/arm/mach-spear/Kconfig
index 0786249b2832..90df2022276a 100644
--- a/arch/arm/mach-spear/Kconfig
+++ b/arch/arm/mach-spear/Kconfig
@@ -14,7 +14,6 @@ if PLAT_SPEAR
14config ARCH_SPEAR13XX 14config ARCH_SPEAR13XX
15 bool "ST SPEAr13xx" 15 bool "ST SPEAr13xx"
16 depends on ARCH_MULTI_V7 || PLAT_SPEAR_SINGLE 16 depends on ARCH_MULTI_V7 || PLAT_SPEAR_SINGLE
17 select ARCH_HAS_CPUFREQ
18 select ARM_GIC 17 select ARM_GIC
19 select GPIO_SPEAR_SPICS 18 select GPIO_SPEAR_SPICS
20 select HAVE_ARM_SCU if SMP 19 select HAVE_ARM_SCU if SMP
diff --git a/arch/arm/mach-sti/Kconfig b/arch/arm/mach-sti/Kconfig
index abf9ee9bbc3f..7e33e9d2c42e 100644
--- a/arch/arm/mach-sti/Kconfig
+++ b/arch/arm/mach-sti/Kconfig
@@ -1,5 +1,5 @@
1menuconfig ARCH_STI 1menuconfig ARCH_STI
2 bool "STMicroelectronics Consumer Electronics SOCs with Device Trees" if ARCH_MULTI_V7 2 bool "STMicroelectronics Consumer Electronics SOCs" if ARCH_MULTI_V7
3 select ARM_GIC 3 select ARM_GIC
4 select ARM_GLOBAL_TIMER 4 select ARM_GLOBAL_TIMER
5 select PINCTRL 5 select PINCTRL
diff --git a/arch/arm/mach-tegra/Kconfig b/arch/arm/mach-tegra/Kconfig
index e16999e5b735..095399618ca5 100644
--- a/arch/arm/mach-tegra/Kconfig
+++ b/arch/arm/mach-tegra/Kconfig
@@ -1,6 +1,5 @@
1config ARCH_TEGRA 1menuconfig ARCH_TEGRA
2 bool "NVIDIA Tegra" if ARCH_MULTI_V7 2 bool "NVIDIA Tegra" if ARCH_MULTI_V7
3 select ARCH_HAS_CPUFREQ
4 select ARCH_REQUIRE_GPIOLIB 3 select ARCH_REQUIRE_GPIOLIB
5 select ARCH_SUPPORTS_TRUSTED_FOUNDATIONS 4 select ARCH_SUPPORTS_TRUSTED_FOUNDATIONS
6 select ARM_GIC 5 select ARM_GIC
@@ -16,8 +15,7 @@ config ARCH_TEGRA
16 help 15 help
17 This enables support for NVIDIA Tegra based systems. 16 This enables support for NVIDIA Tegra based systems.
18 17
19menu "NVIDIA Tegra options" 18if ARCH_TEGRA
20 depends on ARCH_TEGRA
21 19
22config ARCH_TEGRA_2x_SOC 20config ARCH_TEGRA_2x_SOC
23 bool "Enable support for Tegra20 family" 21 bool "Enable support for Tegra20 family"
@@ -69,4 +67,4 @@ config TEGRA_AHB
69 which controls AHB bus master arbitration and some 67 which controls AHB bus master arbitration and some
70 performance parameters(priority, prefech size). 68 performance parameters(priority, prefech size).
71 69
72endmenu 70endif
diff --git a/arch/arm/mach-u300/Kconfig b/arch/arm/mach-u300/Kconfig
index e3a96d7302e9..bc51a71394af 100644
--- a/arch/arm/mach-u300/Kconfig
+++ b/arch/arm/mach-u300/Kconfig
@@ -1,4 +1,4 @@
1config ARCH_U300 1menuconfig ARCH_U300
2 bool "ST-Ericsson U300 Series" if ARCH_MULTI_V5 2 bool "ST-Ericsson U300 Series" if ARCH_MULTI_V5
3 depends on MMU 3 depends on MMU
4 select ARCH_REQUIRE_GPIOLIB 4 select ARCH_REQUIRE_GPIOLIB
@@ -16,8 +16,6 @@ config ARCH_U300
16 16
17if ARCH_U300 17if ARCH_U300
18 18
19menu "ST-Ericsson AB U300/U335 Platform"
20
21config MACH_U300 19config MACH_U300
22 depends on ARCH_U300 20 depends on ARCH_U300
23 bool "U300" 21 bool "U300"
@@ -43,6 +41,4 @@ config MACH_U300_SPIDUMMY
43 you don't need it. Selecting this will activate the 41 you don't need it. Selecting this will activate the
44 SPI framework and ARM PL022 support. 42 SPI framework and ARM PL022 support.
45 43
46endmenu
47
48endif 44endif
diff --git a/arch/arm/mach-ux500/Kconfig b/arch/arm/mach-ux500/Kconfig
index b41a42da1505..5be7c4583a93 100644
--- a/arch/arm/mach-ux500/Kconfig
+++ b/arch/arm/mach-ux500/Kconfig
@@ -1,9 +1,8 @@
1config ARCH_U8500 1menuconfig ARCH_U8500
2 bool "ST-Ericsson U8500 Series" if ARCH_MULTI_V7 2 bool "ST-Ericsson U8500 Series" if ARCH_MULTI_V7
3 depends on MMU 3 depends on MMU
4 select AB8500_CORE 4 select AB8500_CORE
5 select ABX500_CORE 5 select ABX500_CORE
6 select ARCH_HAS_CPUFREQ
7 select ARCH_REQUIRE_GPIOLIB 6 select ARCH_REQUIRE_GPIOLIB
8 select ARM_AMBA 7 select ARM_AMBA
9 select ARM_ERRATA_754322 8 select ARM_ERRATA_754322
@@ -34,8 +33,6 @@ config UX500_SOC_DB8500
34 select REGULATOR 33 select REGULATOR
35 select REGULATOR_DB8500_PRCMU 34 select REGULATOR_DB8500_PRCMU
36 35
37menu "Ux500 target platform (boards)"
38
39config MACH_MOP500 36config MACH_MOP500
40 bool "U8500 Development platform, MOP500 versions" 37 bool "U8500 Development platform, MOP500 versions"
41 select I2C 38 select I2C
@@ -68,8 +65,6 @@ config UX500_AUTO_PLATFORM
68 a working kernel. If everything else is disabled, this 65 a working kernel. If everything else is disabled, this
69 automatically enables MACH_MOP500. 66 automatically enables MACH_MOP500.
70 67
71endmenu
72
73config UX500_DEBUG_UART 68config UX500_DEBUG_UART
74 int "Ux500 UART to use for low-level debug" 69 int "Ux500 UART to use for low-level debug"
75 default 2 70 default 2
diff --git a/arch/arm/mach-vexpress/Kconfig b/arch/arm/mach-vexpress/Kconfig
index 90249cfc37b3..99c1f151c403 100644
--- a/arch/arm/mach-vexpress/Kconfig
+++ b/arch/arm/mach-vexpress/Kconfig
@@ -1,4 +1,4 @@
1config ARCH_VEXPRESS 1menuconfig ARCH_VEXPRESS
2 bool "ARM Ltd. Versatile Express family" if ARCH_MULTI_V7 2 bool "ARM Ltd. Versatile Express family" if ARCH_MULTI_V7
3 select ARCH_REQUIRE_GPIOLIB 3 select ARCH_REQUIRE_GPIOLIB
4 select ARCH_SUPPORTS_BIG_ENDIAN 4 select ARCH_SUPPORTS_BIG_ENDIAN
@@ -37,8 +37,7 @@ config ARCH_VEXPRESS
37 platforms. The traditional (ATAGs) boot method is not usable on 37 platforms. The traditional (ATAGs) boot method is not usable on
38 these boards with this option. 38 these boards with this option.
39 39
40menu "Versatile Express platform type" 40if ARCH_VEXPRESS
41 depends on ARCH_VEXPRESS
42 41
43config ARCH_VEXPRESS_CORTEX_A5_A9_ERRATA 42config ARCH_VEXPRESS_CORTEX_A5_A9_ERRATA
44 bool "Enable A5 and A9 only errata work-arounds" 43 bool "Enable A5 and A9 only errata work-arounds"
@@ -65,7 +64,6 @@ config ARCH_VEXPRESS_DCSCB
65 64
66config ARCH_VEXPRESS_SPC 65config ARCH_VEXPRESS_SPC
67 bool "Versatile Express Serial Power Controller (SPC)" 66 bool "Versatile Express Serial Power Controller (SPC)"
68 select ARCH_HAS_CPUFREQ
69 select ARCH_HAS_OPP 67 select ARCH_HAS_OPP
70 select PM_OPP 68 select PM_OPP
71 help 69 help
@@ -83,4 +81,4 @@ config ARCH_VEXPRESS_TC2_PM
83 Support for CPU and cluster power management on Versatile Express 81 Support for CPU and cluster power management on Versatile Express
84 with a TC2 (A15x2 A7x3) big.LITTLE core tile. 82 with a TC2 (A15x2 A7x3) big.LITTLE core tile.
85 83
86endmenu 84endif
diff --git a/arch/arm/mach-vt8500/Kconfig b/arch/arm/mach-vt8500/Kconfig
index 08f56a41cb55..aaaa24fe4d71 100644
--- a/arch/arm/mach-vt8500/Kconfig
+++ b/arch/arm/mach-vt8500/Kconfig
@@ -1,6 +1,5 @@
1config ARCH_VT8500 1config ARCH_VT8500
2 bool 2 bool
3 select ARCH_HAS_CPUFREQ
4 select ARCH_REQUIRE_GPIOLIB 3 select ARCH_REQUIRE_GPIOLIB
5 select CLKDEV_LOOKUP 4 select CLKDEV_LOOKUP
6 select VT8500_TIMER 5 select VT8500_TIMER
diff --git a/arch/arm/mach-zynq/Kconfig b/arch/arm/mach-zynq/Kconfig
index 573e0db1d0f0..0c164f81e72d 100644
--- a/arch/arm/mach-zynq/Kconfig
+++ b/arch/arm/mach-zynq/Kconfig
@@ -1,6 +1,5 @@
1config ARCH_ZYNQ 1config ARCH_ZYNQ
2 bool "Xilinx Zynq ARM Cortex A9 Platform" if ARCH_MULTI_V7 2 bool "Xilinx Zynq ARM Cortex A9 Platform" if ARCH_MULTI_V7
3 select ARCH_HAS_CPUFREQ
4 select ARCH_HAS_OPP 3 select ARCH_HAS_OPP
5 select ARCH_SUPPORTS_BIG_ENDIAN 4 select ARCH_SUPPORTS_BIG_ENDIAN
6 select ARM_AMBA 5 select ARM_AMBA
diff --git a/arch/arm/plat-samsung/Kconfig b/arch/arm/plat-samsung/Kconfig
index 243dfcb2ca0e..301b892d97d9 100644
--- a/arch/arm/plat-samsung/Kconfig
+++ b/arch/arm/plat-samsung/Kconfig
@@ -35,27 +35,15 @@ config SAMSUNG_PM
35 Base platform power management code for samsung code 35 Base platform power management code for samsung code
36 36
37if PLAT_SAMSUNG 37if PLAT_SAMSUNG
38menu "Samsung Common options"
38 39
39# boot configurations 40# boot configurations
40 41
41comment "Boot options" 42comment "Boot options"
42 43
43config S3C_BOOT_ERROR_RESET
44 bool "S3C Reboot on decompression error"
45 help
46 Say y here to use the watchdog to reset the system if the
47 kernel decompressor detects an error during decompression.
48
49config S3C_BOOT_UART_FORCE_FIFO
50 bool "Force UART FIFO on during boot process"
51 default y
52 help
53 Say Y here to force the UART FIFOs on during the kernel
54 uncompressor
55
56
57config S3C_LOWLEVEL_UART_PORT 44config S3C_LOWLEVEL_UART_PORT
58 int "S3C UART to use for low-level messages" 45 int "S3C UART to use for low-level messages"
46 depends on ARCH_S3C64XX
59 default 0 47 default 0
60 help 48 help
61 Choice of which UART port to use for the low-level messages, 49 Choice of which UART port to use for the low-level messages,
@@ -407,17 +395,16 @@ config SAMSUNG_PM_GPIO
407 Include legacy GPIO power management code for platforms not using 395 Include legacy GPIO power management code for platforms not using
408 pinctrl-samsung driver. 396 pinctrl-samsung driver.
409 397
410endif
411
412config SAMSUNG_DMADEV 398config SAMSUNG_DMADEV
413 bool 399 bool "Use legacy Samsung DMA abstraction"
414 select ARM_AMBA 400 depends on CPU_S5PV210 || CPU_S5PC100 || ARCH_S5P64X0 || ARCH_S3C64XX
415 select DMADEVICES 401 select DMADEVICES
416 select PL330_DMA if (ARCH_EXYNOS5 || ARCH_EXYNOS4 || CPU_S5PV210 || CPU_S5PC100 || \ 402 default y
417 CPU_S5P6450 || CPU_S5P6440)
418 help 403 help
419 Use DMA device engine for PL330 DMAC. 404 Use DMA device engine for PL330 DMAC.
420 405
406endif
407
421config S5P_DEV_MFC 408config S5P_DEV_MFC
422 bool 409 bool
423 help 410 help
@@ -503,4 +490,5 @@ config DEBUG_S3C_UART
503 default "2" if DEBUG_S3C_UART2 490 default "2" if DEBUG_S3C_UART2
504 default "3" if DEBUG_S3C_UART3 491 default "3" if DEBUG_S3C_UART3
505 492
493endmenu
506endif 494endif
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 7295419165e1..a474de346be6 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1,8 +1,9 @@
1config ARM64 1config ARM64
2 def_bool y 2 def_bool y
3 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE 3 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
4 select ARCH_USE_CMPXCHG_LOCKREF 4 select ARCH_HAS_OPP
5 select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST 5 select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
6 select ARCH_USE_CMPXCHG_LOCKREF
6 select ARCH_WANT_OPTIONAL_GPIOLIB 7 select ARCH_WANT_OPTIONAL_GPIOLIB
7 select ARCH_WANT_COMPAT_IPC_PARSE_VERSION 8 select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
8 select ARCH_WANT_FRAME_POINTERS 9 select ARCH_WANT_FRAME_POINTERS
diff --git a/arch/arm64/boot/dts/apm-mustang.dts b/arch/arm64/boot/dts/apm-mustang.dts
index 1247ca1200b1..6541962f5d70 100644
--- a/arch/arm64/boot/dts/apm-mustang.dts
+++ b/arch/arm64/boot/dts/apm-mustang.dts
@@ -24,3 +24,7 @@
24 reg = < 0x1 0x00000000 0x0 0x80000000 >; /* Updated by bootloader */ 24 reg = < 0x1 0x00000000 0x0 0x80000000 >; /* Updated by bootloader */
25 }; 25 };
26}; 26};
27
28&serial0 {
29 status = "ok";
30};
diff --git a/arch/arm64/boot/dts/apm-storm.dtsi b/arch/arm64/boot/dts/apm-storm.dtsi
index c5f0a47a1375..40aa96ce13c4 100644
--- a/arch/arm64/boot/dts/apm-storm.dtsi
+++ b/arch/arm64/boot/dts/apm-storm.dtsi
@@ -273,8 +273,9 @@
273 }; 273 };
274 274
275 serial0: serial@1c020000 { 275 serial0: serial@1c020000 {
276 status = "disabled";
276 device_type = "serial"; 277 device_type = "serial";
277 compatible = "ns16550"; 278 compatible = "ns16550a";
278 reg = <0 0x1c020000 0x0 0x1000>; 279 reg = <0 0x1c020000 0x0 0x1000>;
279 reg-shift = <2>; 280 reg-shift = <2>;
280 clock-frequency = <10000000>; /* Updated by bootloader */ 281 clock-frequency = <10000000>; /* Updated by bootloader */
@@ -282,6 +283,39 @@
282 interrupts = <0x0 0x4c 0x4>; 283 interrupts = <0x0 0x4c 0x4>;
283 }; 284 };
284 285
286 serial1: serial@1c021000 {
287 status = "disabled";
288 device_type = "serial";
289 compatible = "ns16550a";
290 reg = <0 0x1c021000 0x0 0x1000>;
291 reg-shift = <2>;
292 clock-frequency = <10000000>; /* Updated by bootloader */
293 interrupt-parent = <&gic>;
294 interrupts = <0x0 0x4d 0x4>;
295 };
296
297 serial2: serial@1c022000 {
298 status = "disabled";
299 device_type = "serial";
300 compatible = "ns16550a";
301 reg = <0 0x1c022000 0x0 0x1000>;
302 reg-shift = <2>;
303 clock-frequency = <10000000>; /* Updated by bootloader */
304 interrupt-parent = <&gic>;
305 interrupts = <0x0 0x4e 0x4>;
306 };
307
308 serial3: serial@1c023000 {
309 status = "disabled";
310 device_type = "serial";
311 compatible = "ns16550a";
312 reg = <0 0x1c023000 0x0 0x1000>;
313 reg-shift = <2>;
314 clock-frequency = <10000000>; /* Updated by bootloader */
315 interrupt-parent = <&gic>;
316 interrupts = <0x0 0x4f 0x4>;
317 };
318
285 phy1: phy@1f21a000 { 319 phy1: phy@1f21a000 {
286 compatible = "apm,xgene-phy"; 320 compatible = "apm,xgene-phy";
287 reg = <0x0 0x1f21a000 0x0 0x100>; 321 reg = <0x0 0x1f21a000 0x0 0x100>;
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 157e1d8d9a47..3421f316f5dc 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -6,9 +6,18 @@ CONFIG_NO_HZ_IDLE=y
6CONFIG_HIGH_RES_TIMERS=y 6CONFIG_HIGH_RES_TIMERS=y
7CONFIG_BSD_PROCESS_ACCT=y 7CONFIG_BSD_PROCESS_ACCT=y
8CONFIG_BSD_PROCESS_ACCT_V3=y 8CONFIG_BSD_PROCESS_ACCT_V3=y
9CONFIG_TASKSTATS=y
10CONFIG_TASK_DELAY_ACCT=y
11CONFIG_TASK_XACCT=y
12CONFIG_TASK_IO_ACCOUNTING=y
9CONFIG_IKCONFIG=y 13CONFIG_IKCONFIG=y
10CONFIG_IKCONFIG_PROC=y 14CONFIG_IKCONFIG_PROC=y
11CONFIG_LOG_BUF_SHIFT=14 15CONFIG_LOG_BUF_SHIFT=14
16CONFIG_RESOURCE_COUNTERS=y
17CONFIG_MEMCG=y
18CONFIG_MEMCG_SWAP=y
19CONFIG_MEMCG_KMEM=y
20CONFIG_CGROUP_HUGETLB=y
12# CONFIG_UTS_NS is not set 21# CONFIG_UTS_NS is not set
13# CONFIG_IPC_NS is not set 22# CONFIG_IPC_NS is not set
14# CONFIG_PID_NS is not set 23# CONFIG_PID_NS is not set
@@ -27,6 +36,7 @@ CONFIG_ARCH_VEXPRESS=y
27CONFIG_ARCH_XGENE=y 36CONFIG_ARCH_XGENE=y
28CONFIG_SMP=y 37CONFIG_SMP=y
29CONFIG_PREEMPT=y 38CONFIG_PREEMPT=y
39CONFIG_KSM=y
30CONFIG_TRANSPARENT_HUGEPAGE=y 40CONFIG_TRANSPARENT_HUGEPAGE=y
31CONFIG_CMA=y 41CONFIG_CMA=y
32CONFIG_CMDLINE="console=ttyAMA0" 42CONFIG_CMDLINE="console=ttyAMA0"
@@ -45,6 +55,7 @@ CONFIG_IP_PNP_BOOTP=y
45CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 55CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
46CONFIG_DEVTMPFS=y 56CONFIG_DEVTMPFS=y
47CONFIG_DMA_CMA=y 57CONFIG_DMA_CMA=y
58CONFIG_BLK_DEV_LOOP=y
48CONFIG_VIRTIO_BLK=y 59CONFIG_VIRTIO_BLK=y
49# CONFIG_SCSI_PROC_FS is not set 60# CONFIG_SCSI_PROC_FS is not set
50CONFIG_BLK_DEV_SD=y 61CONFIG_BLK_DEV_SD=y
@@ -53,6 +64,7 @@ CONFIG_ATA=y
53CONFIG_PATA_PLATFORM=y 64CONFIG_PATA_PLATFORM=y
54CONFIG_PATA_OF_PLATFORM=y 65CONFIG_PATA_OF_PLATFORM=y
55CONFIG_NETDEVICES=y 66CONFIG_NETDEVICES=y
67CONFIG_TUN=y
56CONFIG_SMC91X=y 68CONFIG_SMC91X=y
57CONFIG_SMSC911X=y 69CONFIG_SMSC911X=y
58# CONFIG_WLAN is not set 70# CONFIG_WLAN is not set
@@ -85,6 +97,8 @@ CONFIG_EXT3_FS=y
85# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set 97# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
86# CONFIG_EXT3_FS_XATTR is not set 98# CONFIG_EXT3_FS_XATTR is not set
87CONFIG_EXT4_FS=y 99CONFIG_EXT4_FS=y
100CONFIG_FANOTIFY=y
101CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
88CONFIG_FUSE_FS=y 102CONFIG_FUSE_FS=y
89CONFIG_CUSE=y 103CONFIG_CUSE=y
90CONFIG_VFAT_FS=y 104CONFIG_VFAT_FS=y
@@ -104,6 +118,7 @@ CONFIG_DEBUG_KERNEL=y
104CONFIG_LOCKUP_DETECTOR=y 118CONFIG_LOCKUP_DETECTOR=y
105# CONFIG_SCHED_DEBUG is not set 119# CONFIG_SCHED_DEBUG is not set
106# CONFIG_FTRACE is not set 120# CONFIG_FTRACE is not set
121CONFIG_SECURITY=y
107CONFIG_CRYPTO_ANSI_CPRNG=y 122CONFIG_CRYPTO_ANSI_CPRNG=y
108CONFIG_ARM64_CRYPTO=y 123CONFIG_ARM64_CRYPTO=y
109CONFIG_CRYPTO_SHA1_ARM64_CE=y 124CONFIG_CRYPTO_SHA1_ARM64_CE=y
diff --git a/arch/arm64/crypto/ghash-ce-core.S b/arch/arm64/crypto/ghash-ce-core.S
index b9e6eaf41c9b..dc457015884e 100644
--- a/arch/arm64/crypto/ghash-ce-core.S
+++ b/arch/arm64/crypto/ghash-ce-core.S
@@ -3,14 +3,6 @@
3 * 3 *
4 * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org> 4 * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
5 * 5 *
6 * Based on arch/x86/crypto/ghash-pmullni-intel_asm.S
7 *
8 * Copyright (c) 2009 Intel Corp.
9 * Author: Huang Ying <ying.huang@intel.com>
10 * Vinodh Gopal
11 * Erdinc Ozturk
12 * Deniz Karakoyunlu
13 *
14 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
15 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
16 * by the Free Software Foundation. 8 * by the Free Software Foundation.
@@ -19,13 +11,15 @@
19#include <linux/linkage.h> 11#include <linux/linkage.h>
20#include <asm/assembler.h> 12#include <asm/assembler.h>
21 13
22 DATA .req v0 14 SHASH .req v0
23 SHASH .req v1 15 SHASH2 .req v1
24 IN1 .req v2
25 T1 .req v2 16 T1 .req v2
26 T2 .req v3 17 T2 .req v3
27 T3 .req v4 18 MASK .req v4
28 VZR .req v5 19 XL .req v5
20 XM .req v6
21 XH .req v7
22 IN1 .req v7
29 23
30 .text 24 .text
31 .arch armv8-a+crypto 25 .arch armv8-a+crypto
@@ -35,61 +29,51 @@
35 * struct ghash_key const *k, const char *head) 29 * struct ghash_key const *k, const char *head)
36 */ 30 */
37ENTRY(pmull_ghash_update) 31ENTRY(pmull_ghash_update)
38 ld1 {DATA.16b}, [x1]
39 ld1 {SHASH.16b}, [x3] 32 ld1 {SHASH.16b}, [x3]
40 eor VZR.16b, VZR.16b, VZR.16b 33 ld1 {XL.16b}, [x1]
34 movi MASK.16b, #0xe1
35 ext SHASH2.16b, SHASH.16b, SHASH.16b, #8
36 shl MASK.2d, MASK.2d, #57
37 eor SHASH2.16b, SHASH2.16b, SHASH.16b
41 38
42 /* do the head block first, if supplied */ 39 /* do the head block first, if supplied */
43 cbz x4, 0f 40 cbz x4, 0f
44 ld1 {IN1.2d}, [x4] 41 ld1 {T1.2d}, [x4]
45 b 1f 42 b 1f
46 43
470: ld1 {IN1.2d}, [x2], #16 440: ld1 {T1.2d}, [x2], #16
48 sub w0, w0, #1 45 sub w0, w0, #1
491: ext IN1.16b, IN1.16b, IN1.16b, #8
50CPU_LE( rev64 IN1.16b, IN1.16b )
51 eor DATA.16b, DATA.16b, IN1.16b
52 46
53 /* multiply DATA by SHASH in GF(2^128) */ 471: /* multiply XL by SHASH in GF(2^128) */
54 ext T2.16b, DATA.16b, DATA.16b, #8 48CPU_LE( rev64 T1.16b, T1.16b )
55 ext T3.16b, SHASH.16b, SHASH.16b, #8
56 eor T2.16b, T2.16b, DATA.16b
57 eor T3.16b, T3.16b, SHASH.16b
58 49
59 pmull2 T1.1q, SHASH.2d, DATA.2d // a1 * b1 50 ext T2.16b, XL.16b, XL.16b, #8
60 pmull DATA.1q, SHASH.1d, DATA.1d // a0 * b0 51 ext IN1.16b, T1.16b, T1.16b, #8
61 pmull T2.1q, T2.1d, T3.1d // (a1 + a0)(b1 + b0) 52 eor T1.16b, T1.16b, T2.16b
62 eor T2.16b, T2.16b, T1.16b // (a0 * b1) + (a1 * b0) 53 eor XL.16b, XL.16b, IN1.16b
63 eor T2.16b, T2.16b, DATA.16b
64 54
65 ext T3.16b, VZR.16b, T2.16b, #8 55 pmull2 XH.1q, SHASH.2d, XL.2d // a1 * b1
66 ext T2.16b, T2.16b, VZR.16b, #8 56 eor T1.16b, T1.16b, XL.16b
67 eor DATA.16b, DATA.16b, T3.16b 57 pmull XL.1q, SHASH.1d, XL.1d // a0 * b0
68 eor T1.16b, T1.16b, T2.16b // <T1:DATA> is result of 58 pmull XM.1q, SHASH2.1d, T1.1d // (a1 + a0)(b1 + b0)
69 // carry-less multiplication
70 59
71 /* first phase of the reduction */ 60 ext T1.16b, XL.16b, XH.16b, #8
72 shl T3.2d, DATA.2d, #1 61 eor T2.16b, XL.16b, XH.16b
73 eor T3.16b, T3.16b, DATA.16b 62 eor XM.16b, XM.16b, T1.16b
74 shl T3.2d, T3.2d, #5 63 eor XM.16b, XM.16b, T2.16b
75 eor T3.16b, T3.16b, DATA.16b 64 pmull T2.1q, XL.1d, MASK.1d
76 shl T3.2d, T3.2d, #57
77 ext T2.16b, VZR.16b, T3.16b, #8
78 ext T3.16b, T3.16b, VZR.16b, #8
79 eor DATA.16b, DATA.16b, T2.16b
80 eor T1.16b, T1.16b, T3.16b
81 65
82 /* second phase of the reduction */ 66 mov XH.d[0], XM.d[1]
83 ushr T2.2d, DATA.2d, #5 67 mov XM.d[1], XL.d[0]
84 eor T2.16b, T2.16b, DATA.16b 68
85 ushr T2.2d, T2.2d, #1 69 eor XL.16b, XM.16b, T2.16b
86 eor T2.16b, T2.16b, DATA.16b 70 ext T2.16b, XL.16b, XL.16b, #8
87 ushr T2.2d, T2.2d, #1 71 pmull XL.1q, XL.1d, MASK.1d
88 eor T1.16b, T1.16b, T2.16b 72 eor T2.16b, T2.16b, XH.16b
89 eor DATA.16b, DATA.16b, T1.16b 73 eor XL.16b, XL.16b, T2.16b
90 74
91 cbnz w0, 0b 75 cbnz w0, 0b
92 76
93 st1 {DATA.16b}, [x1] 77 st1 {XL.16b}, [x1]
94 ret 78 ret
95ENDPROC(pmull_ghash_update) 79ENDPROC(pmull_ghash_update)
diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c
index b92baf3f68c7..833ec1e3f3e9 100644
--- a/arch/arm64/crypto/ghash-ce-glue.c
+++ b/arch/arm64/crypto/ghash-ce-glue.c
@@ -67,11 +67,12 @@ static int ghash_update(struct shash_desc *desc, const u8 *src,
67 blocks = len / GHASH_BLOCK_SIZE; 67 blocks = len / GHASH_BLOCK_SIZE;
68 len %= GHASH_BLOCK_SIZE; 68 len %= GHASH_BLOCK_SIZE;
69 69
70 kernel_neon_begin_partial(6); 70 kernel_neon_begin_partial(8);
71 pmull_ghash_update(blocks, ctx->digest, src, key, 71 pmull_ghash_update(blocks, ctx->digest, src, key,
72 partial ? ctx->buf : NULL); 72 partial ? ctx->buf : NULL);
73 kernel_neon_end(); 73 kernel_neon_end();
74 src += blocks * GHASH_BLOCK_SIZE; 74 src += blocks * GHASH_BLOCK_SIZE;
75 partial = 0;
75 } 76 }
76 if (len) 77 if (len)
77 memcpy(ctx->buf + partial, src, len); 78 memcpy(ctx->buf + partial, src, len);
@@ -88,7 +89,7 @@ static int ghash_final(struct shash_desc *desc, u8 *dst)
88 89
89 memset(ctx->buf + partial, 0, GHASH_BLOCK_SIZE - partial); 90 memset(ctx->buf + partial, 0, GHASH_BLOCK_SIZE - partial);
90 91
91 kernel_neon_begin_partial(6); 92 kernel_neon_begin_partial(8);
92 pmull_ghash_update(1, ctx->digest, ctx->buf, key, NULL); 93 pmull_ghash_update(1, ctx->digest, ctx->buf, key, NULL);
93 kernel_neon_end(); 94 kernel_neon_end();
94 } 95 }
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index 42c7eecd2bb6..0b3fcf86e6ba 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -30,7 +30,6 @@ generic-y += msgbuf.h
30generic-y += mutex.h 30generic-y += mutex.h
31generic-y += pci.h 31generic-y += pci.h
32generic-y += poll.h 32generic-y += poll.h
33generic-y += posix_types.h
34generic-y += preempt.h 33generic-y += preempt.h
35generic-y += resource.h 34generic-y += resource.h
36generic-y += rwsem.h 35generic-y += rwsem.h
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
index 3a4572ec3273..dc82e52acdb3 100644
--- a/arch/arm64/include/asm/dma-mapping.h
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -26,8 +26,6 @@
26#include <xen/xen.h> 26#include <xen/xen.h>
27#include <asm/xen/hypervisor.h> 27#include <asm/xen/hypervisor.h>
28 28
29#define ARCH_HAS_DMA_GET_REQUIRED_MASK
30
31#define DMA_ERROR_CODE (~(dma_addr_t)0) 29#define DMA_ERROR_CODE (~(dma_addr_t)0)
32extern struct dma_map_ops *dma_ops; 30extern struct dma_map_ops *dma_ops;
33extern struct dma_map_ops coherent_swiotlb_dma_ops; 31extern struct dma_map_ops coherent_swiotlb_dma_ops;
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 598cc384fc1c..579702086488 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -246,7 +246,7 @@ static inline pmd_t pte_pmd(pte_t pte)
246#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd))) 246#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
247#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd))) 247#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
248#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd))) 248#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
249#define pmd_mknotpresent(pmd) (__pmd(pmd_val(pmd) &= ~PMD_TYPE_MASK)) 249#define pmd_mknotpresent(pmd) (__pmd(pmd_val(pmd) & ~PMD_TYPE_MASK))
250 250
251#define __HAVE_ARCH_PMD_WRITE 251#define __HAVE_ARCH_PMD_WRITE
252#define pmd_write(pmd) pte_write(pmd_pte(pmd)) 252#define pmd_write(pmd) pte_write(pmd_pte(pmd))
diff --git a/arch/arm64/include/uapi/asm/posix_types.h b/arch/arm64/include/uapi/asm/posix_types.h
new file mode 100644
index 000000000000..7985ff60ca3f
--- /dev/null
+++ b/arch/arm64/include/uapi/asm/posix_types.h
@@ -0,0 +1,10 @@
1#ifndef __ASM_POSIX_TYPES_H
2#define __ASM_POSIX_TYPES_H
3
4typedef unsigned short __kernel_old_uid_t;
5typedef unsigned short __kernel_old_gid_t;
6#define __kernel_old_uid_t __kernel_old_uid_t
7
8#include <asm-generic/posix_types.h>
9
10#endif /* __ASM_POSIX_TYPES_H */
diff --git a/arch/arm64/include/uapi/asm/sigcontext.h b/arch/arm64/include/uapi/asm/sigcontext.h
index b72cf405b3fe..ee469be1ae1d 100644
--- a/arch/arm64/include/uapi/asm/sigcontext.h
+++ b/arch/arm64/include/uapi/asm/sigcontext.h
@@ -58,7 +58,7 @@ struct fpsimd_context {
58 58
59struct esr_context { 59struct esr_context {
60 struct _aarch64_ctx head; 60 struct _aarch64_ctx head;
61 u64 esr; 61 __u64 esr;
62}; 62};
63 63
64#endif /* _UAPI__ASM_SIGCONTEXT_H */ 64#endif /* _UAPI__ASM_SIGCONTEXT_H */
diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
index b051871f2965..aa5f9fcbf9ee 100644
--- a/arch/arm64/kernel/entry-ftrace.S
+++ b/arch/arm64/kernel/entry-ftrace.S
@@ -205,7 +205,7 @@ ENDPROC(ftrace_graph_caller)
205 * 205 *
206 * Run ftrace_return_to_handler() before going back to parent. 206 * Run ftrace_return_to_handler() before going back to parent.
207 * @fp is checked against the value passed by ftrace_graph_caller() 207 * @fp is checked against the value passed by ftrace_graph_caller()
208 * only when CONFIG_FUNCTION_GRAPH_FP_TEST is enabled. 208 * only when CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST is enabled.
209 */ 209 */
210ENTRY(return_to_handler) 210ENTRY(return_to_handler)
211 str x0, [sp, #-16]! 211 str x0, [sp, #-16]!
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index bf017f4ffb4f..9ce04ba6bcb0 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -279,7 +279,6 @@ el1_sp_pc:
279 */ 279 */
280 mrs x0, far_el1 280 mrs x0, far_el1
281 enable_dbg 281 enable_dbg
282 mov x1, x25
283 mov x2, sp 282 mov x2, sp
284 b do_sp_pc_abort 283 b do_sp_pc_abort
285el1_undef: 284el1_undef:
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 3e926b9c0641..9fde010c945f 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -655,11 +655,16 @@ static int compat_gpr_get(struct task_struct *target,
655 reg = task_pt_regs(target)->regs[idx]; 655 reg = task_pt_regs(target)->regs[idx];
656 } 656 }
657 657
658 ret = copy_to_user(ubuf, &reg, sizeof(reg)); 658 if (kbuf) {
659 if (ret) 659 memcpy(kbuf, &reg, sizeof(reg));
660 break; 660 kbuf += sizeof(reg);
661 661 } else {
662 ubuf += sizeof(reg); 662 ret = copy_to_user(ubuf, &reg, sizeof(reg));
663 if (ret)
664 break;
665
666 ubuf += sizeof(reg);
667 }
663 } 668 }
664 669
665 return ret; 670 return ret;
@@ -689,11 +694,16 @@ static int compat_gpr_set(struct task_struct *target,
689 unsigned int idx = start + i; 694 unsigned int idx = start + i;
690 compat_ulong_t reg; 695 compat_ulong_t reg;
691 696
692 ret = copy_from_user(&reg, ubuf, sizeof(reg)); 697 if (kbuf) {
693 if (ret) 698 memcpy(&reg, kbuf, sizeof(reg));
694 return ret; 699 kbuf += sizeof(reg);
700 } else {
701 ret = copy_from_user(&reg, ubuf, sizeof(reg));
702 if (ret)
703 return ret;
695 704
696 ubuf += sizeof(reg); 705 ubuf += sizeof(reg);
706 }
697 707
698 switch (idx) { 708 switch (idx) {
699 case 15: 709 case 15:
@@ -827,6 +837,7 @@ static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off,
827 compat_ulong_t val) 837 compat_ulong_t val)
828{ 838{
829 int ret; 839 int ret;
840 mm_segment_t old_fs = get_fs();
830 841
831 if (off & 3 || off >= COMPAT_USER_SZ) 842 if (off & 3 || off >= COMPAT_USER_SZ)
832 return -EIO; 843 return -EIO;
@@ -834,10 +845,13 @@ static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off,
834 if (off >= sizeof(compat_elf_gregset_t)) 845 if (off >= sizeof(compat_elf_gregset_t))
835 return 0; 846 return 0;
836 847
848 set_fs(KERNEL_DS);
837 ret = copy_regset_from_user(tsk, &user_aarch32_view, 849 ret = copy_regset_from_user(tsk, &user_aarch32_view,
838 REGSET_COMPAT_GPR, off, 850 REGSET_COMPAT_GPR, off,
839 sizeof(compat_ulong_t), 851 sizeof(compat_ulong_t),
840 &val); 852 &val);
853 set_fs(old_fs);
854
841 return ret; 855 return ret;
842} 856}
843 857
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 091d428d64ac..f43db8a69262 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -71,7 +71,7 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
71 /* 4GB maximum for 32-bit only capable devices */ 71 /* 4GB maximum for 32-bit only capable devices */
72 if (IS_ENABLED(CONFIG_ZONE_DMA)) { 72 if (IS_ENABLED(CONFIG_ZONE_DMA)) {
73 unsigned long max_dma_phys = 73 unsigned long max_dma_phys =
74 (unsigned long)dma_to_phys(NULL, DMA_BIT_MASK(32) + 1); 74 (unsigned long)(dma_to_phys(NULL, DMA_BIT_MASK(32)) + 1);
75 max_dma = max(min, min(max, max_dma_phys >> PAGE_SHIFT)); 75 max_dma = max(min, min(max, max_dma_phys >> PAGE_SHIFT));
76 zone_size[ZONE_DMA] = max_dma - min; 76 zone_size[ZONE_DMA] = max_dma - min;
77 } 77 }
@@ -126,6 +126,8 @@ static void arm64_memory_present(void)
126 126
127void __init arm64_memblock_init(void) 127void __init arm64_memblock_init(void)
128{ 128{
129 phys_addr_t dma_phys_limit = 0;
130
129 /* Register the kernel text, kernel data and initrd with memblock */ 131 /* Register the kernel text, kernel data and initrd with memblock */
130 memblock_reserve(__pa(_text), _end - _text); 132 memblock_reserve(__pa(_text), _end - _text);
131#ifdef CONFIG_BLK_DEV_INITRD 133#ifdef CONFIG_BLK_DEV_INITRD
@@ -141,7 +143,11 @@ void __init arm64_memblock_init(void)
141 memblock_reserve(__pa(idmap_pg_dir), IDMAP_DIR_SIZE); 143 memblock_reserve(__pa(idmap_pg_dir), IDMAP_DIR_SIZE);
142 144
143 early_init_fdt_scan_reserved_mem(); 145 early_init_fdt_scan_reserved_mem();
144 dma_contiguous_reserve(0); 146
147 /* 4GB maximum for 32-bit only capable devices */
148 if (IS_ENABLED(CONFIG_ZONE_DMA))
149 dma_phys_limit = dma_to_phys(NULL, DMA_BIT_MASK(32)) + 1;
150 dma_contiguous_reserve(dma_phys_limit);
145 151
146 memblock_allow_resize(); 152 memblock_allow_resize();
147 memblock_dump_all(); 153 memblock_dump_all();
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index 1a871b78e570..344387a55406 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -242,7 +242,7 @@ struct ioc {
242 struct pci_dev *sac_only_dev; 242 struct pci_dev *sac_only_dev;
243}; 243};
244 244
245static struct ioc *ioc_list; 245static struct ioc *ioc_list, *ioc_found;
246static int reserve_sba_gart = 1; 246static int reserve_sba_gart = 1;
247 247
248static SBA_INLINE void sba_mark_invalid(struct ioc *, dma_addr_t, size_t); 248static SBA_INLINE void sba_mark_invalid(struct ioc *, dma_addr_t, size_t);
@@ -1809,20 +1809,13 @@ static struct ioc_iommu ioc_iommu_info[] __initdata = {
1809 { SX2000_IOC_ID, "sx2000", NULL }, 1809 { SX2000_IOC_ID, "sx2000", NULL },
1810}; 1810};
1811 1811
1812static struct ioc * 1812static void ioc_init(unsigned long hpa, struct ioc *ioc)
1813ioc_init(unsigned long hpa, void *handle)
1814{ 1813{
1815 struct ioc *ioc;
1816 struct ioc_iommu *info; 1814 struct ioc_iommu *info;
1817 1815
1818 ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
1819 if (!ioc)
1820 return NULL;
1821
1822 ioc->next = ioc_list; 1816 ioc->next = ioc_list;
1823 ioc_list = ioc; 1817 ioc_list = ioc;
1824 1818
1825 ioc->handle = handle;
1826 ioc->ioc_hpa = ioremap(hpa, 0x1000); 1819 ioc->ioc_hpa = ioremap(hpa, 0x1000);
1827 1820
1828 ioc->func_id = READ_REG(ioc->ioc_hpa + IOC_FUNC_ID); 1821 ioc->func_id = READ_REG(ioc->ioc_hpa + IOC_FUNC_ID);
@@ -1863,8 +1856,6 @@ ioc_init(unsigned long hpa, void *handle)
1863 "%s %d.%d HPA 0x%lx IOVA space %dMb at 0x%lx\n", 1856 "%s %d.%d HPA 0x%lx IOVA space %dMb at 0x%lx\n",
1864 ioc->name, (ioc->rev >> 4) & 0xF, ioc->rev & 0xF, 1857 ioc->name, (ioc->rev >> 4) & 0xF, ioc->rev & 0xF,
1865 hpa, ioc->iov_size >> 20, ioc->ibase); 1858 hpa, ioc->iov_size >> 20, ioc->ibase);
1866
1867 return ioc;
1868} 1859}
1869 1860
1870 1861
@@ -2031,22 +2022,21 @@ sba_map_ioc_to_node(struct ioc *ioc, acpi_handle handle)
2031#endif 2022#endif
2032} 2023}
2033 2024
2034static int 2025static void acpi_sba_ioc_add(struct ioc *ioc)
2035acpi_sba_ioc_add(struct acpi_device *device,
2036 const struct acpi_device_id *not_used)
2037{ 2026{
2038 struct ioc *ioc; 2027 acpi_handle handle = ioc->handle;
2039 acpi_status status; 2028 acpi_status status;
2040 u64 hpa, length; 2029 u64 hpa, length;
2041 struct acpi_device_info *adi; 2030 struct acpi_device_info *adi;
2042 2031
2043 status = hp_acpi_csr_space(device->handle, &hpa, &length); 2032 ioc_found = ioc->next;
2033 status = hp_acpi_csr_space(handle, &hpa, &length);
2044 if (ACPI_FAILURE(status)) 2034 if (ACPI_FAILURE(status))
2045 return 1; 2035 goto err;
2046 2036
2047 status = acpi_get_object_info(device->handle, &adi); 2037 status = acpi_get_object_info(handle, &adi);
2048 if (ACPI_FAILURE(status)) 2038 if (ACPI_FAILURE(status))
2049 return 1; 2039 goto err;
2050 2040
2051 /* 2041 /*
2052 * For HWP0001, only SBA appears in ACPI namespace. It encloses the PCI 2042 * For HWP0001, only SBA appears in ACPI namespace. It encloses the PCI
@@ -2067,13 +2057,13 @@ acpi_sba_ioc_add(struct acpi_device *device,
2067 if (!iovp_shift) 2057 if (!iovp_shift)
2068 iovp_shift = 12; 2058 iovp_shift = 12;
2069 2059
2070 ioc = ioc_init(hpa, device->handle); 2060 ioc_init(hpa, ioc);
2071 if (!ioc)
2072 return 1;
2073
2074 /* setup NUMA node association */ 2061 /* setup NUMA node association */
2075 sba_map_ioc_to_node(ioc, device->handle); 2062 sba_map_ioc_to_node(ioc, handle);
2076 return 0; 2063 return;
2064
2065 err:
2066 kfree(ioc);
2077} 2067}
2078 2068
2079static const struct acpi_device_id hp_ioc_iommu_device_ids[] = { 2069static const struct acpi_device_id hp_ioc_iommu_device_ids[] = {
@@ -2081,9 +2071,26 @@ static const struct acpi_device_id hp_ioc_iommu_device_ids[] = {
2081 {"HWP0004", 0}, 2071 {"HWP0004", 0},
2082 {"", 0}, 2072 {"", 0},
2083}; 2073};
2074
2075static int acpi_sba_ioc_attach(struct acpi_device *device,
2076 const struct acpi_device_id *not_used)
2077{
2078 struct ioc *ioc;
2079
2080 ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
2081 if (!ioc)
2082 return -ENOMEM;
2083
2084 ioc->next = ioc_found;
2085 ioc_found = ioc;
2086 ioc->handle = device->handle;
2087 return 1;
2088}
2089
2090
2084static struct acpi_scan_handler acpi_sba_ioc_handler = { 2091static struct acpi_scan_handler acpi_sba_ioc_handler = {
2085 .ids = hp_ioc_iommu_device_ids, 2092 .ids = hp_ioc_iommu_device_ids,
2086 .attach = acpi_sba_ioc_add, 2093 .attach = acpi_sba_ioc_attach,
2087}; 2094};
2088 2095
2089static int __init acpi_sba_ioc_init_acpi(void) 2096static int __init acpi_sba_ioc_init_acpi(void)
@@ -2118,9 +2125,12 @@ sba_init(void)
2118#endif 2125#endif
2119 2126
2120 /* 2127 /*
2121 * ioc_list should be populated by the acpi_sba_ioc_handler's .attach() 2128 * ioc_found should be populated by the acpi_sba_ioc_handler's .attach()
2122 * routine, but that only happens if acpi_scan_init() has already run. 2129 * routine, but that only happens if acpi_scan_init() has already run.
2123 */ 2130 */
2131 while (ioc_found)
2132 acpi_sba_ioc_add(ioc_found);
2133
2124 if (!ioc_list) { 2134 if (!ioc_list) {
2125#ifdef CONFIG_IA64_GENERIC 2135#ifdef CONFIG_IA64_GENERIC
2126 /* 2136 /*
diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig
index 8df022c43af7..fd09a10a2b53 100644
--- a/arch/s390/configs/default_defconfig
+++ b/arch/s390/configs/default_defconfig
@@ -45,7 +45,8 @@ CONFIG_SOLARIS_X86_PARTITION=y
45CONFIG_UNIXWARE_DISKLABEL=y 45CONFIG_UNIXWARE_DISKLABEL=y
46CONFIG_CFQ_GROUP_IOSCHED=y 46CONFIG_CFQ_GROUP_IOSCHED=y
47CONFIG_DEFAULT_DEADLINE=y 47CONFIG_DEFAULT_DEADLINE=y
48CONFIG_MARCH_Z9_109=y 48CONFIG_MARCH_Z196=y
49CONFIG_TUNE_ZEC12=y
49CONFIG_NR_CPUS=256 50CONFIG_NR_CPUS=256
50CONFIG_PREEMPT=y 51CONFIG_PREEMPT=y
51CONFIG_HZ_100=y 52CONFIG_HZ_100=y
@@ -240,7 +241,6 @@ CONFIG_IP_VS_PE_SIP=m
240CONFIG_NF_CONNTRACK_IPV4=m 241CONFIG_NF_CONNTRACK_IPV4=m
241# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set 242# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
242CONFIG_NF_TABLES_IPV4=m 243CONFIG_NF_TABLES_IPV4=m
243CONFIG_NFT_REJECT_IPV4=m
244CONFIG_NFT_CHAIN_ROUTE_IPV4=m 244CONFIG_NFT_CHAIN_ROUTE_IPV4=m
245CONFIG_NFT_CHAIN_NAT_IPV4=m 245CONFIG_NFT_CHAIN_NAT_IPV4=m
246CONFIG_NF_TABLES_ARP=m 246CONFIG_NF_TABLES_ARP=m
@@ -456,6 +456,7 @@ CONFIG_TN3270_FS=y
456CONFIG_WATCHDOG=y 456CONFIG_WATCHDOG=y
457CONFIG_WATCHDOG_NOWAYOUT=y 457CONFIG_WATCHDOG_NOWAYOUT=y
458CONFIG_SOFT_WATCHDOG=m 458CONFIG_SOFT_WATCHDOG=m
459CONFIG_DIAG288_WATCHDOG=m
459# CONFIG_HID is not set 460# CONFIG_HID is not set
460# CONFIG_USB_SUPPORT is not set 461# CONFIG_USB_SUPPORT is not set
461CONFIG_INFINIBAND=m 462CONFIG_INFINIBAND=m
diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig
index c81a74e3e25a..b061180d3544 100644
--- a/arch/s390/configs/gcov_defconfig
+++ b/arch/s390/configs/gcov_defconfig
@@ -45,7 +45,8 @@ CONFIG_SOLARIS_X86_PARTITION=y
45CONFIG_UNIXWARE_DISKLABEL=y 45CONFIG_UNIXWARE_DISKLABEL=y
46CONFIG_CFQ_GROUP_IOSCHED=y 46CONFIG_CFQ_GROUP_IOSCHED=y
47CONFIG_DEFAULT_DEADLINE=y 47CONFIG_DEFAULT_DEADLINE=y
48CONFIG_MARCH_Z9_109=y 48CONFIG_MARCH_Z196=y
49CONFIG_TUNE_ZEC12=y
49CONFIG_NR_CPUS=256 50CONFIG_NR_CPUS=256
50CONFIG_HZ_100=y 51CONFIG_HZ_100=y
51CONFIG_MEMORY_HOTPLUG=y 52CONFIG_MEMORY_HOTPLUG=y
@@ -238,7 +239,6 @@ CONFIG_IP_VS_PE_SIP=m
238CONFIG_NF_CONNTRACK_IPV4=m 239CONFIG_NF_CONNTRACK_IPV4=m
239# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set 240# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
240CONFIG_NF_TABLES_IPV4=m 241CONFIG_NF_TABLES_IPV4=m
241CONFIG_NFT_REJECT_IPV4=m
242CONFIG_NFT_CHAIN_ROUTE_IPV4=m 242CONFIG_NFT_CHAIN_ROUTE_IPV4=m
243CONFIG_NFT_CHAIN_NAT_IPV4=m 243CONFIG_NFT_CHAIN_NAT_IPV4=m
244CONFIG_NF_TABLES_ARP=m 244CONFIG_NF_TABLES_ARP=m
@@ -453,6 +453,7 @@ CONFIG_TN3270_FS=y
453CONFIG_WATCHDOG=y 453CONFIG_WATCHDOG=y
454CONFIG_WATCHDOG_NOWAYOUT=y 454CONFIG_WATCHDOG_NOWAYOUT=y
455CONFIG_SOFT_WATCHDOG=m 455CONFIG_SOFT_WATCHDOG=m
456CONFIG_DIAG288_WATCHDOG=m
456# CONFIG_HID is not set 457# CONFIG_HID is not set
457# CONFIG_USB_SUPPORT is not set 458# CONFIG_USB_SUPPORT is not set
458CONFIG_INFINIBAND=m 459CONFIG_INFINIBAND=m
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig
index b5ba8fe1cc64..d279baa08014 100644
--- a/arch/s390/configs/performance_defconfig
+++ b/arch/s390/configs/performance_defconfig
@@ -43,7 +43,8 @@ CONFIG_SOLARIS_X86_PARTITION=y
43CONFIG_UNIXWARE_DISKLABEL=y 43CONFIG_UNIXWARE_DISKLABEL=y
44CONFIG_CFQ_GROUP_IOSCHED=y 44CONFIG_CFQ_GROUP_IOSCHED=y
45CONFIG_DEFAULT_DEADLINE=y 45CONFIG_DEFAULT_DEADLINE=y
46CONFIG_MARCH_Z9_109=y 46CONFIG_MARCH_Z196=y
47CONFIG_TUNE_ZEC12=y
47CONFIG_NR_CPUS=256 48CONFIG_NR_CPUS=256
48CONFIG_HZ_100=y 49CONFIG_HZ_100=y
49CONFIG_MEMORY_HOTPLUG=y 50CONFIG_MEMORY_HOTPLUG=y
@@ -236,7 +237,6 @@ CONFIG_IP_VS_PE_SIP=m
236CONFIG_NF_CONNTRACK_IPV4=m 237CONFIG_NF_CONNTRACK_IPV4=m
237# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set 238# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
238CONFIG_NF_TABLES_IPV4=m 239CONFIG_NF_TABLES_IPV4=m
239CONFIG_NFT_REJECT_IPV4=m
240CONFIG_NFT_CHAIN_ROUTE_IPV4=m 240CONFIG_NFT_CHAIN_ROUTE_IPV4=m
241CONFIG_NFT_CHAIN_NAT_IPV4=m 241CONFIG_NFT_CHAIN_NAT_IPV4=m
242CONFIG_NF_TABLES_ARP=m 242CONFIG_NF_TABLES_ARP=m
@@ -451,6 +451,7 @@ CONFIG_TN3270_FS=y
451CONFIG_WATCHDOG=y 451CONFIG_WATCHDOG=y
452CONFIG_WATCHDOG_NOWAYOUT=y 452CONFIG_WATCHDOG_NOWAYOUT=y
453CONFIG_SOFT_WATCHDOG=m 453CONFIG_SOFT_WATCHDOG=m
454CONFIG_DIAG288_WATCHDOG=m
454# CONFIG_HID is not set 455# CONFIG_HID is not set
455# CONFIG_USB_SUPPORT is not set 456# CONFIG_USB_SUPPORT is not set
456CONFIG_INFINIBAND=m 457CONFIG_INFINIBAND=m
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig
index cef073ca1f07..948e0e057a23 100644
--- a/arch/s390/configs/zfcpdump_defconfig
+++ b/arch/s390/configs/zfcpdump_defconfig
@@ -8,7 +8,8 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y
8CONFIG_PARTITION_ADVANCED=y 8CONFIG_PARTITION_ADVANCED=y
9CONFIG_IBM_PARTITION=y 9CONFIG_IBM_PARTITION=y
10CONFIG_DEFAULT_DEADLINE=y 10CONFIG_DEFAULT_DEADLINE=y
11CONFIG_MARCH_Z9_109=y 11CONFIG_MARCH_Z196=y
12CONFIG_TUNE_ZEC12=y
12# CONFIG_COMPAT is not set 13# CONFIG_COMPAT is not set
13CONFIG_NR_CPUS=2 14CONFIG_NR_CPUS=2
14# CONFIG_HOTPLUG_CPU is not set 15# CONFIG_HOTPLUG_CPU is not set
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index 4557cb7ffddf..2e56498a40df 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -135,8 +135,8 @@ CONFIG_PROVE_LOCKING=y
135CONFIG_LOCK_STAT=y 135CONFIG_LOCK_STAT=y
136CONFIG_DEBUG_LOCKDEP=y 136CONFIG_DEBUG_LOCKDEP=y
137CONFIG_DEBUG_ATOMIC_SLEEP=y 137CONFIG_DEBUG_ATOMIC_SLEEP=y
138CONFIG_DEBUG_WRITECOUNT=y
139CONFIG_DEBUG_LIST=y 138CONFIG_DEBUG_LIST=y
139CONFIG_DEBUG_PI_LIST=y
140CONFIG_DEBUG_SG=y 140CONFIG_DEBUG_SG=y
141CONFIG_DEBUG_NOTIFIERS=y 141CONFIG_DEBUG_NOTIFIERS=y
142CONFIG_PROVE_RCU=y 142CONFIG_PROVE_RCU=y
@@ -199,4 +199,10 @@ CONFIG_CRYPTO_SHA512_S390=m
199CONFIG_CRYPTO_DES_S390=m 199CONFIG_CRYPTO_DES_S390=m
200CONFIG_CRYPTO_AES_S390=m 200CONFIG_CRYPTO_AES_S390=m
201CONFIG_CRC7=m 201CONFIG_CRC7=m
202# CONFIG_XZ_DEC_X86 is not set
203# CONFIG_XZ_DEC_POWERPC is not set
204# CONFIG_XZ_DEC_IA64 is not set
205# CONFIG_XZ_DEC_ARM is not set
206# CONFIG_XZ_DEC_ARMTHUMB is not set
207# CONFIG_XZ_DEC_SPARC is not set
202CONFIG_CMM=m 208CONFIG_CMM=m
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index c28f32a45af5..3815bfea1b2d 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -33,10 +33,9 @@ static inline int init_new_context(struct task_struct *tsk,
33 33
34static inline void set_user_asce(struct mm_struct *mm) 34static inline void set_user_asce(struct mm_struct *mm)
35{ 35{
36 pgd_t *pgd = mm->pgd; 36 S390_lowcore.user_asce = mm->context.asce_bits | __pa(mm->pgd);
37 37 if (current->thread.mm_segment.ar4)
38 S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd); 38 __ctl_load(S390_lowcore.user_asce, 7, 7);
39 set_fs(current->thread.mm_segment);
40 set_cpu_flag(CIF_ASCE); 39 set_cpu_flag(CIF_ASCE);
41} 40}
42 41
@@ -70,12 +69,11 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
70 /* Clear old ASCE by loading the kernel ASCE. */ 69 /* Clear old ASCE by loading the kernel ASCE. */
71 __ctl_load(S390_lowcore.kernel_asce, 1, 1); 70 __ctl_load(S390_lowcore.kernel_asce, 1, 1);
72 __ctl_load(S390_lowcore.kernel_asce, 7, 7); 71 __ctl_load(S390_lowcore.kernel_asce, 7, 7);
73 /* Delay loading of the new ASCE to control registers CR1 & CR7 */
74 set_cpu_flag(CIF_ASCE);
75 atomic_inc(&next->context.attach_count); 72 atomic_inc(&next->context.attach_count);
76 atomic_dec(&prev->context.attach_count); 73 atomic_dec(&prev->context.attach_count);
77 if (MACHINE_HAS_TLB_LC) 74 if (MACHINE_HAS_TLB_LC)
78 cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask); 75 cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
76 S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd);
79} 77}
80 78
81#define finish_arch_post_lock_switch finish_arch_post_lock_switch 79#define finish_arch_post_lock_switch finish_arch_post_lock_switch
@@ -84,17 +82,18 @@ static inline void finish_arch_post_lock_switch(void)
84 struct task_struct *tsk = current; 82 struct task_struct *tsk = current;
85 struct mm_struct *mm = tsk->mm; 83 struct mm_struct *mm = tsk->mm;
86 84
87 if (!mm) 85 load_kernel_asce();
88 return; 86 if (mm) {
89 preempt_disable(); 87 preempt_disable();
90 while (atomic_read(&mm->context.attach_count) >> 16) 88 while (atomic_read(&mm->context.attach_count) >> 16)
91 cpu_relax(); 89 cpu_relax();
92 90
93 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); 91 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
94 set_user_asce(mm); 92 if (mm->context.flush_mm)
95 if (mm->context.flush_mm) 93 __tlb_flush_mm(mm);
96 __tlb_flush_mm(mm); 94 preempt_enable();
97 preempt_enable(); 95 }
96 set_fs(current->thread.mm_segment);
98} 97}
99 98
100#define enter_lazy_tlb(mm,tsk) do { } while (0) 99#define enter_lazy_tlb(mm,tsk) do { } while (0)
diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h
index 29c81f82705e..df38c70cd59e 100644
--- a/arch/s390/include/asm/switch_to.h
+++ b/arch/s390/include/asm/switch_to.h
@@ -134,8 +134,4 @@ static inline void restore_access_regs(unsigned int *acrs)
134 prev = __switch_to(prev,next); \ 134 prev = __switch_to(prev,next); \
135} while (0) 135} while (0)
136 136
137#define finish_arch_switch(prev) do { \
138 set_fs(current->thread.mm_segment); \
139} while (0)
140
141#endif /* __ASM_SWITCH_TO_H */ 137#endif /* __ASM_SWITCH_TO_H */
diff --git a/arch/s390/include/uapi/asm/ucontext.h b/arch/s390/include/uapi/asm/ucontext.h
index 200e06325c6a..3e077b2a4705 100644
--- a/arch/s390/include/uapi/asm/ucontext.h
+++ b/arch/s390/include/uapi/asm/ucontext.h
@@ -16,7 +16,9 @@ struct ucontext_extended {
16 struct ucontext *uc_link; 16 struct ucontext *uc_link;
17 stack_t uc_stack; 17 stack_t uc_stack;
18 _sigregs uc_mcontext; 18 _sigregs uc_mcontext;
19 unsigned long uc_sigmask[2]; 19 sigset_t uc_sigmask;
20 /* Allow for uc_sigmask growth. Glibc uses a 1024-bit sigset_t. */
21 unsigned char __unused[128 - sizeof(sigset_t)];
20 unsigned long uc_gprs_high[16]; 22 unsigned long uc_gprs_high[16];
21}; 23};
22 24
@@ -27,7 +29,9 @@ struct ucontext {
27 struct ucontext *uc_link; 29 struct ucontext *uc_link;
28 stack_t uc_stack; 30 stack_t uc_stack;
29 _sigregs uc_mcontext; 31 _sigregs uc_mcontext;
30 sigset_t uc_sigmask; /* mask last for extensibility */ 32 sigset_t uc_sigmask;
33 /* Allow for uc_sigmask growth. Glibc uses a 1024-bit sigset_t. */
34 unsigned char __unused[128 - sizeof(sigset_t)];
31}; 35};
32 36
33#endif /* !_ASM_S390_UCONTEXT_H */ 37#endif /* !_ASM_S390_UCONTEXT_H */
diff --git a/arch/s390/kernel/compat_linux.h b/arch/s390/kernel/compat_linux.h
index 39ddfdb40ae8..70d4b7c4beaa 100644
--- a/arch/s390/kernel/compat_linux.h
+++ b/arch/s390/kernel/compat_linux.h
@@ -69,7 +69,9 @@ struct ucontext32 {
69 __u32 uc_link; /* pointer */ 69 __u32 uc_link; /* pointer */
70 compat_stack_t uc_stack; 70 compat_stack_t uc_stack;
71 _sigregs32 uc_mcontext; 71 _sigregs32 uc_mcontext;
72 compat_sigset_t uc_sigmask; /* mask last for extensibility */ 72 compat_sigset_t uc_sigmask;
73 /* Allow for uc_sigmask growth. Glibc uses a 1024-bit sigset_t. */
74 unsigned char __unused[128 - sizeof(compat_sigset_t)];
73}; 75};
74 76
75struct stat64_emu31; 77struct stat64_emu31;
diff --git a/arch/sparc/crypto/aes_glue.c b/arch/sparc/crypto/aes_glue.c
index 503e6d96ad4e..df922f52d76d 100644
--- a/arch/sparc/crypto/aes_glue.c
+++ b/arch/sparc/crypto/aes_glue.c
@@ -124,7 +124,7 @@ extern void aes_sparc64_ctr_crypt_256(const u64 *key, const u64 *input,
124 u64 *output, unsigned int len, 124 u64 *output, unsigned int len,
125 u64 *iv); 125 u64 *iv);
126 126
127struct aes_ops aes128_ops = { 127static struct aes_ops aes128_ops = {
128 .encrypt = aes_sparc64_encrypt_128, 128 .encrypt = aes_sparc64_encrypt_128,
129 .decrypt = aes_sparc64_decrypt_128, 129 .decrypt = aes_sparc64_decrypt_128,
130 .load_encrypt_keys = aes_sparc64_load_encrypt_keys_128, 130 .load_encrypt_keys = aes_sparc64_load_encrypt_keys_128,
@@ -136,7 +136,7 @@ struct aes_ops aes128_ops = {
136 .ctr_crypt = aes_sparc64_ctr_crypt_128, 136 .ctr_crypt = aes_sparc64_ctr_crypt_128,
137}; 137};
138 138
139struct aes_ops aes192_ops = { 139static struct aes_ops aes192_ops = {
140 .encrypt = aes_sparc64_encrypt_192, 140 .encrypt = aes_sparc64_encrypt_192,
141 .decrypt = aes_sparc64_decrypt_192, 141 .decrypt = aes_sparc64_decrypt_192,
142 .load_encrypt_keys = aes_sparc64_load_encrypt_keys_192, 142 .load_encrypt_keys = aes_sparc64_load_encrypt_keys_192,
@@ -148,7 +148,7 @@ struct aes_ops aes192_ops = {
148 .ctr_crypt = aes_sparc64_ctr_crypt_192, 148 .ctr_crypt = aes_sparc64_ctr_crypt_192,
149}; 149};
150 150
151struct aes_ops aes256_ops = { 151static struct aes_ops aes256_ops = {
152 .encrypt = aes_sparc64_encrypt_256, 152 .encrypt = aes_sparc64_encrypt_256,
153 .decrypt = aes_sparc64_decrypt_256, 153 .decrypt = aes_sparc64_decrypt_256,
154 .load_encrypt_keys = aes_sparc64_load_encrypt_keys_256, 154 .load_encrypt_keys = aes_sparc64_load_encrypt_keys_256,
diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h
index f08fe51b264d..7aed2be45b44 100644
--- a/arch/sparc/include/asm/atomic_32.h
+++ b/arch/sparc/include/asm/atomic_32.h
@@ -20,11 +20,11 @@
20 20
21#define ATOMIC_INIT(i) { (i) } 21#define ATOMIC_INIT(i) { (i) }
22 22
23extern int __atomic_add_return(int, atomic_t *); 23int __atomic_add_return(int, atomic_t *);
24extern int atomic_cmpxchg(atomic_t *, int, int); 24int atomic_cmpxchg(atomic_t *, int, int);
25#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 25#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
26extern int __atomic_add_unless(atomic_t *, int, int); 26int __atomic_add_unless(atomic_t *, int, int);
27extern void atomic_set(atomic_t *, int); 27void atomic_set(atomic_t *, int);
28 28
29#define atomic_read(v) (*(volatile int *)&(v)->counter) 29#define atomic_read(v) (*(volatile int *)&(v)->counter)
30 30
diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
index 8b2f1bde2889..bb894c8bec56 100644
--- a/arch/sparc/include/asm/atomic_64.h
+++ b/arch/sparc/include/asm/atomic_64.h
@@ -20,15 +20,15 @@
20#define atomic_set(v, i) (((v)->counter) = i) 20#define atomic_set(v, i) (((v)->counter) = i)
21#define atomic64_set(v, i) (((v)->counter) = i) 21#define atomic64_set(v, i) (((v)->counter) = i)
22 22
23extern void atomic_add(int, atomic_t *); 23void atomic_add(int, atomic_t *);
24extern void atomic64_add(long, atomic64_t *); 24void atomic64_add(long, atomic64_t *);
25extern void atomic_sub(int, atomic_t *); 25void atomic_sub(int, atomic_t *);
26extern void atomic64_sub(long, atomic64_t *); 26void atomic64_sub(long, atomic64_t *);
27 27
28extern int atomic_add_ret(int, atomic_t *); 28int atomic_add_ret(int, atomic_t *);
29extern long atomic64_add_ret(long, atomic64_t *); 29long atomic64_add_ret(long, atomic64_t *);
30extern int atomic_sub_ret(int, atomic_t *); 30int atomic_sub_ret(int, atomic_t *);
31extern long atomic64_sub_ret(long, atomic64_t *); 31long atomic64_sub_ret(long, atomic64_t *);
32 32
33#define atomic_dec_return(v) atomic_sub_ret(1, v) 33#define atomic_dec_return(v) atomic_sub_ret(1, v)
34#define atomic64_dec_return(v) atomic64_sub_ret(1, v) 34#define atomic64_dec_return(v) atomic64_sub_ret(1, v)
@@ -107,6 +107,6 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
107 107
108#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) 108#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
109 109
110extern long atomic64_dec_if_positive(atomic64_t *v); 110long atomic64_dec_if_positive(atomic64_t *v);
111 111
112#endif /* !(__ARCH_SPARC64_ATOMIC__) */ 112#endif /* !(__ARCH_SPARC64_ATOMIC__) */
diff --git a/arch/sparc/include/asm/auxio.h b/arch/sparc/include/asm/auxio.h
index 13dc67f03011..3e09a07b77e9 100644
--- a/arch/sparc/include/asm/auxio.h
+++ b/arch/sparc/include/asm/auxio.h
@@ -1,5 +1,12 @@
1#ifndef ___ASM_SPARC_AUXIO_H 1#ifndef ___ASM_SPARC_AUXIO_H
2#define ___ASM_SPARC_AUXIO_H 2#define ___ASM_SPARC_AUXIO_H
3
4#ifndef __ASSEMBLY__
5
6extern void __iomem *auxio_register;
7
8#endif /* ifndef __ASSEMBLY__ */
9
3#if defined(__sparc__) && defined(__arch64__) 10#if defined(__sparc__) && defined(__arch64__)
4#include <asm/auxio_64.h> 11#include <asm/auxio_64.h>
5#else 12#else
diff --git a/arch/sparc/include/asm/auxio_32.h b/arch/sparc/include/asm/auxio_32.h
index 3a319775ae37..5d685df427b4 100644
--- a/arch/sparc/include/asm/auxio_32.h
+++ b/arch/sparc/include/asm/auxio_32.h
@@ -34,8 +34,8 @@
34 * NOTE: these routines are implementation dependent-- 34 * NOTE: these routines are implementation dependent--
35 * understand the hardware you are querying! 35 * understand the hardware you are querying!
36 */ 36 */
37extern void set_auxio(unsigned char bits_on, unsigned char bits_off); 37void set_auxio(unsigned char bits_on, unsigned char bits_off);
38extern unsigned char get_auxio(void); /* .../asm/floppy.h */ 38unsigned char get_auxio(void); /* .../asm/floppy.h */
39 39
40/* 40/*
41 * The following routines are provided for driver-compatibility 41 * The following routines are provided for driver-compatibility
@@ -78,7 +78,7 @@ do { \
78 78
79 79
80/* AUXIO2 (Power Off Control) */ 80/* AUXIO2 (Power Off Control) */
81extern __volatile__ unsigned char * auxio_power_register; 81extern volatile u8 __iomem *auxio_power_register;
82 82
83#define AUXIO_POWER_DETECT_FAILURE 32 83#define AUXIO_POWER_DETECT_FAILURE 32
84#define AUXIO_POWER_CLEAR_FAILURE 2 84#define AUXIO_POWER_CLEAR_FAILURE 2
diff --git a/arch/sparc/include/asm/auxio_64.h b/arch/sparc/include/asm/auxio_64.h
index f61cd1e3e395..6079e59a7ad1 100644
--- a/arch/sparc/include/asm/auxio_64.h
+++ b/arch/sparc/include/asm/auxio_64.h
@@ -75,8 +75,6 @@
75 75
76#ifndef __ASSEMBLY__ 76#ifndef __ASSEMBLY__
77 77
78extern void __iomem *auxio_register;
79
80#define AUXIO_LTE_ON 1 78#define AUXIO_LTE_ON 1
81#define AUXIO_LTE_OFF 0 79#define AUXIO_LTE_OFF 0
82 80
@@ -84,7 +82,7 @@ extern void __iomem *auxio_register;
84 * 82 *
85 * on - AUXIO_LTE_ON or AUXIO_LTE_OFF 83 * on - AUXIO_LTE_ON or AUXIO_LTE_OFF
86 */ 84 */
87extern void auxio_set_lte(int on); 85void auxio_set_lte(int on);
88 86
89#define AUXIO_LED_ON 1 87#define AUXIO_LED_ON 1
90#define AUXIO_LED_OFF 0 88#define AUXIO_LED_OFF 0
@@ -93,7 +91,7 @@ extern void auxio_set_lte(int on);
93 * 91 *
94 * on - AUXIO_LED_ON or AUXIO_LED_OFF 92 * on - AUXIO_LED_ON or AUXIO_LED_OFF
95 */ 93 */
96extern void auxio_set_led(int on); 94void auxio_set_led(int on);
97 95
98#endif /* ifndef __ASSEMBLY__ */ 96#endif /* ifndef __ASSEMBLY__ */
99 97
diff --git a/arch/sparc/include/asm/bitext.h b/arch/sparc/include/asm/bitext.h
index 297b2f2fcb49..9c988bf3adb6 100644
--- a/arch/sparc/include/asm/bitext.h
+++ b/arch/sparc/include/asm/bitext.h
@@ -20,8 +20,8 @@ struct bit_map {
20 int num_colors; 20 int num_colors;
21}; 21};
22 22
23extern int bit_map_string_get(struct bit_map *t, int len, int align); 23int bit_map_string_get(struct bit_map *t, int len, int align);
24extern void bit_map_clear(struct bit_map *t, int offset, int len); 24void bit_map_clear(struct bit_map *t, int offset, int len);
25extern void bit_map_init(struct bit_map *t, unsigned long *map, int size); 25void bit_map_init(struct bit_map *t, unsigned long *map, int size);
26 26
27#endif /* defined(_SPARC_BITEXT_H) */ 27#endif /* defined(_SPARC_BITEXT_H) */
diff --git a/arch/sparc/include/asm/bitops_32.h b/arch/sparc/include/asm/bitops_32.h
index 88c9a962502c..600ed1d9c8c8 100644
--- a/arch/sparc/include/asm/bitops_32.h
+++ b/arch/sparc/include/asm/bitops_32.h
@@ -18,9 +18,9 @@
18#error only <linux/bitops.h> can be included directly 18#error only <linux/bitops.h> can be included directly
19#endif 19#endif
20 20
21extern unsigned long ___set_bit(unsigned long *addr, unsigned long mask); 21unsigned long ___set_bit(unsigned long *addr, unsigned long mask);
22extern unsigned long ___clear_bit(unsigned long *addr, unsigned long mask); 22unsigned long ___clear_bit(unsigned long *addr, unsigned long mask);
23extern unsigned long ___change_bit(unsigned long *addr, unsigned long mask); 23unsigned long ___change_bit(unsigned long *addr, unsigned long mask);
24 24
25/* 25/*
26 * Set bit 'nr' in 32-bit quantity at address 'addr' where bit '0' 26 * Set bit 'nr' in 32-bit quantity at address 'addr' where bit '0'
diff --git a/arch/sparc/include/asm/bitops_64.h b/arch/sparc/include/asm/bitops_64.h
index f1a051ca301a..2d522402a937 100644
--- a/arch/sparc/include/asm/bitops_64.h
+++ b/arch/sparc/include/asm/bitops_64.h
@@ -15,12 +15,12 @@
15#include <asm/byteorder.h> 15#include <asm/byteorder.h>
16#include <asm/barrier.h> 16#include <asm/barrier.h>
17 17
18extern int test_and_set_bit(unsigned long nr, volatile unsigned long *addr); 18int test_and_set_bit(unsigned long nr, volatile unsigned long *addr);
19extern int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr); 19int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr);
20extern int test_and_change_bit(unsigned long nr, volatile unsigned long *addr); 20int test_and_change_bit(unsigned long nr, volatile unsigned long *addr);
21extern void set_bit(unsigned long nr, volatile unsigned long *addr); 21void set_bit(unsigned long nr, volatile unsigned long *addr);
22extern void clear_bit(unsigned long nr, volatile unsigned long *addr); 22void clear_bit(unsigned long nr, volatile unsigned long *addr);
23extern void change_bit(unsigned long nr, volatile unsigned long *addr); 23void change_bit(unsigned long nr, volatile unsigned long *addr);
24 24
25#include <asm-generic/bitops/non-atomic.h> 25#include <asm-generic/bitops/non-atomic.h>
26 26
@@ -30,8 +30,8 @@ extern void change_bit(unsigned long nr, volatile unsigned long *addr);
30 30
31#ifdef __KERNEL__ 31#ifdef __KERNEL__
32 32
33extern int ffs(int x); 33int ffs(int x);
34extern unsigned long __ffs(unsigned long); 34unsigned long __ffs(unsigned long);
35 35
36#include <asm-generic/bitops/ffz.h> 36#include <asm-generic/bitops/ffz.h>
37#include <asm-generic/bitops/sched.h> 37#include <asm-generic/bitops/sched.h>
@@ -41,10 +41,10 @@ extern unsigned long __ffs(unsigned long);
41 * of bits set) of a N-bit word 41 * of bits set) of a N-bit word
42 */ 42 */
43 43
44extern unsigned long __arch_hweight64(__u64 w); 44unsigned long __arch_hweight64(__u64 w);
45extern unsigned int __arch_hweight32(unsigned int w); 45unsigned int __arch_hweight32(unsigned int w);
46extern unsigned int __arch_hweight16(unsigned int w); 46unsigned int __arch_hweight16(unsigned int w);
47extern unsigned int __arch_hweight8(unsigned int w); 47unsigned int __arch_hweight8(unsigned int w);
48 48
49#include <asm-generic/bitops/const_hweight.h> 49#include <asm-generic/bitops/const_hweight.h>
50#include <asm-generic/bitops/lock.h> 50#include <asm-generic/bitops/lock.h>
diff --git a/arch/sparc/include/asm/btext.h b/arch/sparc/include/asm/btext.h
index 9b2bc6b6ed0a..75a32b109e15 100644
--- a/arch/sparc/include/asm/btext.h
+++ b/arch/sparc/include/asm/btext.h
@@ -1,6 +1,6 @@
1#ifndef _SPARC_BTEXT_H 1#ifndef _SPARC_BTEXT_H
2#define _SPARC_BTEXT_H 2#define _SPARC_BTEXT_H
3 3
4extern int btext_find_display(void); 4int btext_find_display(void);
5 5
6#endif /* _SPARC_BTEXT_H */ 6#endif /* _SPARC_BTEXT_H */
diff --git a/arch/sparc/include/asm/bug.h b/arch/sparc/include/asm/bug.h
index 6bd9f43cb5a5..eaa8f8d38125 100644
--- a/arch/sparc/include/asm/bug.h
+++ b/arch/sparc/include/asm/bug.h
@@ -5,7 +5,7 @@
5#include <linux/compiler.h> 5#include <linux/compiler.h>
6 6
7#ifdef CONFIG_DEBUG_BUGVERBOSE 7#ifdef CONFIG_DEBUG_BUGVERBOSE
8extern void do_BUG(const char *file, int line); 8void do_BUG(const char *file, int line);
9#define BUG() do { \ 9#define BUG() do { \
10 do_BUG(__FILE__, __LINE__); \ 10 do_BUG(__FILE__, __LINE__); \
11 __builtin_trap(); \ 11 __builtin_trap(); \
@@ -20,6 +20,6 @@ extern void do_BUG(const char *file, int line);
20#include <asm-generic/bug.h> 20#include <asm-generic/bug.h>
21 21
22struct pt_regs; 22struct pt_regs;
23extern void die_if_kernel(char *str, struct pt_regs *regs) __attribute__ ((noreturn)); 23void __noreturn die_if_kernel(char *str, struct pt_regs *regs);
24 24
25#endif 25#endif
diff --git a/arch/sparc/include/asm/cacheflush_32.h b/arch/sparc/include/asm/cacheflush_32.h
index bb014c24f318..12164006181c 100644
--- a/arch/sparc/include/asm/cacheflush_32.h
+++ b/arch/sparc/include/asm/cacheflush_32.h
@@ -36,7 +36,7 @@
36#define flush_page_for_dma(addr) \ 36#define flush_page_for_dma(addr) \
37 sparc32_cachetlb_ops->page_for_dma(addr) 37 sparc32_cachetlb_ops->page_for_dma(addr)
38 38
39extern void sparc_flush_page_to_ram(struct page *page); 39void sparc_flush_page_to_ram(struct page *page);
40 40
41#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 41#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
42#define flush_dcache_page(page) sparc_flush_page_to_ram(page) 42#define flush_dcache_page(page) sparc_flush_page_to_ram(page)
@@ -51,8 +51,8 @@ extern void sparc_flush_page_to_ram(struct page *page);
51 * way the windows are all clean for the next process and the stack 51 * way the windows are all clean for the next process and the stack
52 * frames are up to date. 52 * frames are up to date.
53 */ 53 */
54extern void flush_user_windows(void); 54void flush_user_windows(void);
55extern void kill_user_windows(void); 55void kill_user_windows(void);
56extern void flushw_all(void); 56void flushw_all(void);
57 57
58#endif /* _SPARC_CACHEFLUSH_H */ 58#endif /* _SPARC_CACHEFLUSH_H */
diff --git a/arch/sparc/include/asm/cacheflush_64.h b/arch/sparc/include/asm/cacheflush_64.h
index 301736d9e7a1..38965379e350 100644
--- a/arch/sparc/include/asm/cacheflush_64.h
+++ b/arch/sparc/include/asm/cacheflush_64.h
@@ -10,7 +10,7 @@
10/* Cache flush operations. */ 10/* Cache flush operations. */
11#define flushw_all() __asm__ __volatile__("flushw") 11#define flushw_all() __asm__ __volatile__("flushw")
12 12
13extern void __flushw_user(void); 13void __flushw_user(void);
14#define flushw_user() __flushw_user() 14#define flushw_user() __flushw_user()
15 15
16#define flush_user_windows flushw_user 16#define flush_user_windows flushw_user
@@ -30,29 +30,29 @@ extern void __flushw_user(void);
30 * use block commit stores (which invalidate icache lines) during 30 * use block commit stores (which invalidate icache lines) during
31 * module load, so we need this. 31 * module load, so we need this.
32 */ 32 */
33extern void flush_icache_range(unsigned long start, unsigned long end); 33void flush_icache_range(unsigned long start, unsigned long end);
34extern void __flush_icache_page(unsigned long); 34void __flush_icache_page(unsigned long);
35 35
36extern void __flush_dcache_page(void *addr, int flush_icache); 36void __flush_dcache_page(void *addr, int flush_icache);
37extern void flush_dcache_page_impl(struct page *page); 37void flush_dcache_page_impl(struct page *page);
38#ifdef CONFIG_SMP 38#ifdef CONFIG_SMP
39extern void smp_flush_dcache_page_impl(struct page *page, int cpu); 39void smp_flush_dcache_page_impl(struct page *page, int cpu);
40extern void flush_dcache_page_all(struct mm_struct *mm, struct page *page); 40void flush_dcache_page_all(struct mm_struct *mm, struct page *page);
41#else 41#else
42#define smp_flush_dcache_page_impl(page,cpu) flush_dcache_page_impl(page) 42#define smp_flush_dcache_page_impl(page,cpu) flush_dcache_page_impl(page)
43#define flush_dcache_page_all(mm,page) flush_dcache_page_impl(page) 43#define flush_dcache_page_all(mm,page) flush_dcache_page_impl(page)
44#endif 44#endif
45 45
46extern void __flush_dcache_range(unsigned long start, unsigned long end); 46void __flush_dcache_range(unsigned long start, unsigned long end);
47#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 47#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
48extern void flush_dcache_page(struct page *page); 48void flush_dcache_page(struct page *page);
49 49
50#define flush_icache_page(vma, pg) do { } while(0) 50#define flush_icache_page(vma, pg) do { } while(0)
51#define flush_icache_user_range(vma,pg,adr,len) do { } while (0) 51#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
52 52
53extern void flush_ptrace_access(struct vm_area_struct *, struct page *, 53void flush_ptrace_access(struct vm_area_struct *, struct page *,
54 unsigned long uaddr, void *kaddr, 54 unsigned long uaddr, void *kaddr,
55 unsigned long len, int write); 55 unsigned long len, int write);
56 56
57#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ 57#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
58 do { \ 58 do { \
diff --git a/arch/sparc/include/asm/checksum_32.h b/arch/sparc/include/asm/checksum_32.h
index 04471dc64847..426b2389a1c2 100644
--- a/arch/sparc/include/asm/checksum_32.h
+++ b/arch/sparc/include/asm/checksum_32.h
@@ -29,7 +29,7 @@
29 * 29 *
30 * it's best to have buff aligned on a 32-bit boundary 30 * it's best to have buff aligned on a 32-bit boundary
31 */ 31 */
32extern __wsum csum_partial(const void *buff, int len, __wsum sum); 32__wsum csum_partial(const void *buff, int len, __wsum sum);
33 33
34/* the same as csum_partial, but copies from fs:src while it 34/* the same as csum_partial, but copies from fs:src while it
35 * checksums 35 * checksums
@@ -38,7 +38,7 @@ extern __wsum csum_partial(const void *buff, int len, __wsum sum);
38 * better 64-bit) boundary 38 * better 64-bit) boundary
39 */ 39 */
40 40
41extern unsigned int __csum_partial_copy_sparc_generic (const unsigned char *, unsigned char *); 41unsigned int __csum_partial_copy_sparc_generic (const unsigned char *, unsigned char *);
42 42
43static inline __wsum 43static inline __wsum
44csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) 44csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
diff --git a/arch/sparc/include/asm/checksum_64.h b/arch/sparc/include/asm/checksum_64.h
index 2ff81ae8f3af..b8779a6a5911 100644
--- a/arch/sparc/include/asm/checksum_64.h
+++ b/arch/sparc/include/asm/checksum_64.h
@@ -29,7 +29,7 @@
29 * 29 *
30 * it's best to have buff aligned on a 32-bit boundary 30 * it's best to have buff aligned on a 32-bit boundary
31 */ 31 */
32extern __wsum csum_partial(const void * buff, int len, __wsum sum); 32__wsum csum_partial(const void * buff, int len, __wsum sum);
33 33
34/* the same as csum_partial, but copies from user space while it 34/* the same as csum_partial, but copies from user space while it
35 * checksums 35 * checksums
@@ -37,12 +37,12 @@ extern __wsum csum_partial(const void * buff, int len, __wsum sum);
37 * here even more important to align src and dst on a 32-bit (or even 37 * here even more important to align src and dst on a 32-bit (or even
38 * better 64-bit) boundary 38 * better 64-bit) boundary
39 */ 39 */
40extern __wsum csum_partial_copy_nocheck(const void *src, void *dst, 40__wsum csum_partial_copy_nocheck(const void *src, void *dst,
41 int len, __wsum sum); 41 int len, __wsum sum);
42 42
43extern long __csum_partial_copy_from_user(const void __user *src, 43long __csum_partial_copy_from_user(const void __user *src,
44 void *dst, int len, 44 void *dst, int len,
45 __wsum sum); 45 __wsum sum);
46 46
47static inline __wsum 47static inline __wsum
48csum_partial_copy_from_user(const void __user *src, 48csum_partial_copy_from_user(const void __user *src,
@@ -59,9 +59,9 @@ csum_partial_copy_from_user(const void __user *src,
59 * Copy and checksum to user 59 * Copy and checksum to user
60 */ 60 */
61#define HAVE_CSUM_COPY_USER 61#define HAVE_CSUM_COPY_USER
62extern long __csum_partial_copy_to_user(const void *src, 62long __csum_partial_copy_to_user(const void *src,
63 void __user *dst, int len, 63 void __user *dst, int len,
64 __wsum sum); 64 __wsum sum);
65 65
66static inline __wsum 66static inline __wsum
67csum_and_copy_to_user(const void *src, 67csum_and_copy_to_user(const void *src,
@@ -77,7 +77,7 @@ csum_and_copy_to_user(const void *src,
77/* ihl is always 5 or greater, almost always is 5, and iph is word aligned 77/* ihl is always 5 or greater, almost always is 5, and iph is word aligned
78 * the majority of the time. 78 * the majority of the time.
79 */ 79 */
80extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl); 80__sum16 ip_fast_csum(const void *iph, unsigned int ihl);
81 81
82/* Fold a partial checksum without adding pseudo headers. */ 82/* Fold a partial checksum without adding pseudo headers. */
83static inline __sum16 csum_fold(__wsum sum) 83static inline __sum16 csum_fold(__wsum sum)
@@ -96,9 +96,9 @@ static inline __sum16 csum_fold(__wsum sum)
96} 96}
97 97
98static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, 98static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
99 unsigned int len, 99 unsigned int len,
100 unsigned short proto, 100 unsigned short proto,
101 __wsum sum) 101 __wsum sum)
102{ 102{
103 __asm__ __volatile__( 103 __asm__ __volatile__(
104" addcc %1, %0, %0\n" 104" addcc %1, %0, %0\n"
@@ -116,9 +116,9 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
116 * returns a 16-bit checksum, already complemented 116 * returns a 16-bit checksum, already complemented
117 */ 117 */
118static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, 118static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
119 unsigned short len, 119 unsigned short len,
120 unsigned short proto, 120 unsigned short proto,
121 __wsum sum) 121 __wsum sum)
122{ 122{
123 return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); 123 return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
124} 124}
diff --git a/arch/sparc/include/asm/cmpxchg_32.h b/arch/sparc/include/asm/cmpxchg_32.h
index 1fae1a02e3c2..32c29a133f9d 100644
--- a/arch/sparc/include/asm/cmpxchg_32.h
+++ b/arch/sparc/include/asm/cmpxchg_32.h
@@ -20,7 +20,7 @@ static inline unsigned long xchg_u32(__volatile__ unsigned long *m, unsigned lon
20 return val; 20 return val;
21} 21}
22 22
23extern void __xchg_called_with_bad_pointer(void); 23void __xchg_called_with_bad_pointer(void);
24 24
25static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int size) 25static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int size)
26{ 26{
@@ -45,9 +45,9 @@ static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int
45#define __HAVE_ARCH_CMPXCHG 1 45#define __HAVE_ARCH_CMPXCHG 1
46 46
47/* bug catcher for when unsupported size is used - won't link */ 47/* bug catcher for when unsupported size is used - won't link */
48extern void __cmpxchg_called_with_bad_pointer(void); 48void __cmpxchg_called_with_bad_pointer(void);
49/* we only need to support cmpxchg of a u32 on sparc */ 49/* we only need to support cmpxchg of a u32 on sparc */
50extern unsigned long __cmpxchg_u32(volatile u32 *m, u32 old, u32 new_); 50unsigned long __cmpxchg_u32(volatile u32 *m, u32 old, u32 new_);
51 51
52/* don't worry...optimizer will get rid of most of this */ 52/* don't worry...optimizer will get rid of most of this */
53static inline unsigned long 53static inline unsigned long
diff --git a/arch/sparc/include/asm/cmpxchg_64.h b/arch/sparc/include/asm/cmpxchg_64.h
index 4adefe8e2885..0e1ed6cfbf68 100644
--- a/arch/sparc/include/asm/cmpxchg_64.h
+++ b/arch/sparc/include/asm/cmpxchg_64.h
@@ -42,7 +42,7 @@ static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long
42 42
43#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) 43#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
44 44
45extern void __xchg_called_with_bad_pointer(void); 45void __xchg_called_with_bad_pointer(void);
46 46
47static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, 47static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr,
48 int size) 48 int size)
@@ -91,7 +91,7 @@ __cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
91 91
92/* This function doesn't exist, so you'll get a linker error 92/* This function doesn't exist, so you'll get a linker error
93 if something tries to do an invalid cmpxchg(). */ 93 if something tries to do an invalid cmpxchg(). */
94extern void __cmpxchg_called_with_bad_pointer(void); 94void __cmpxchg_called_with_bad_pointer(void);
95 95
96static inline unsigned long 96static inline unsigned long
97__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) 97__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
diff --git a/arch/sparc/include/asm/cpudata.h b/arch/sparc/include/asm/cpudata.h
index b5976de7cacd..128b56b08676 100644
--- a/arch/sparc/include/asm/cpudata.h
+++ b/arch/sparc/include/asm/cpudata.h
@@ -1,5 +1,15 @@
1#ifndef ___ASM_SPARC_CPUDATA_H 1#ifndef ___ASM_SPARC_CPUDATA_H
2#define ___ASM_SPARC_CPUDATA_H 2#define ___ASM_SPARC_CPUDATA_H
3
4#ifndef __ASSEMBLY__
5
6#include <linux/threads.h>
7#include <linux/percpu.h>
8
9extern const struct seq_operations cpuinfo_op;
10
11#endif /* !(__ASSEMBLY__) */
12
3#if defined(__sparc__) && defined(__arch64__) 13#if defined(__sparc__) && defined(__arch64__)
4#include <asm/cpudata_64.h> 14#include <asm/cpudata_64.h>
5#else 15#else
diff --git a/arch/sparc/include/asm/cpudata_64.h b/arch/sparc/include/asm/cpudata_64.h
index 050ef35b9dcf..0e594076912c 100644
--- a/arch/sparc/include/asm/cpudata_64.h
+++ b/arch/sparc/include/asm/cpudata_64.h
@@ -8,9 +8,6 @@
8 8
9#ifndef __ASSEMBLY__ 9#ifndef __ASSEMBLY__
10 10
11#include <linux/percpu.h>
12#include <linux/threads.h>
13
14typedef struct { 11typedef struct {
15 /* Dcache line 1 */ 12 /* Dcache line 1 */
16 unsigned int __softirq_pending; /* must be 1st, see rtrap.S */ 13 unsigned int __softirq_pending; /* must be 1st, see rtrap.S */
@@ -35,8 +32,6 @@ DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
35#define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu)) 32#define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu))
36#define local_cpu_data() __get_cpu_var(__cpu_data) 33#define local_cpu_data() __get_cpu_var(__cpu_data)
37 34
38extern const struct seq_operations cpuinfo_op;
39
40#endif /* !(__ASSEMBLY__) */ 35#endif /* !(__ASSEMBLY__) */
41 36
42#include <asm/trap_block.h> 37#include <asm/trap_block.h>
diff --git a/arch/sparc/include/asm/delay_32.h b/arch/sparc/include/asm/delay_32.h
index bc9aba2bead6..3fb8ca144b4f 100644
--- a/arch/sparc/include/asm/delay_32.h
+++ b/arch/sparc/include/asm/delay_32.h
@@ -20,8 +20,8 @@ static inline void __delay(unsigned long loops)
20} 20}
21 21
22/* This is too messy with inline asm on the Sparc. */ 22/* This is too messy with inline asm on the Sparc. */
23extern void __udelay(unsigned long usecs, unsigned long lpj); 23void __udelay(unsigned long usecs, unsigned long lpj);
24extern void __ndelay(unsigned long nsecs, unsigned long lpj); 24void __ndelay(unsigned long nsecs, unsigned long lpj);
25 25
26#ifdef CONFIG_SMP 26#ifdef CONFIG_SMP
27#define __udelay_val cpu_data(smp_processor_id()).udelay_val 27#define __udelay_val cpu_data(smp_processor_id()).udelay_val
diff --git a/arch/sparc/include/asm/delay_64.h b/arch/sparc/include/asm/delay_64.h
index a77aa622d762..0ba5424856d8 100644
--- a/arch/sparc/include/asm/delay_64.h
+++ b/arch/sparc/include/asm/delay_64.h
@@ -8,8 +8,8 @@
8 8
9#ifndef __ASSEMBLY__ 9#ifndef __ASSEMBLY__
10 10
11extern void __delay(unsigned long loops); 11void __delay(unsigned long loops);
12extern void udelay(unsigned long usecs); 12void udelay(unsigned long usecs);
13#define mdelay(n) udelay((n) * 1000) 13#define mdelay(n) udelay((n) * 1000)
14 14
15#endif /* !__ASSEMBLY__ */ 15#endif /* !__ASSEMBLY__ */
diff --git a/arch/sparc/include/asm/device.h b/arch/sparc/include/asm/device.h
index daa6a8a5e9cd..bb3f0b0c6754 100644
--- a/arch/sparc/include/asm/device.h
+++ b/arch/sparc/include/asm/device.h
@@ -19,7 +19,7 @@ struct dev_archdata {
19 int numa_node; 19 int numa_node;
20}; 20};
21 21
22extern void of_propagate_archdata(struct platform_device *bus); 22void of_propagate_archdata(struct platform_device *bus);
23 23
24struct pdev_archdata { 24struct pdev_archdata {
25 struct resource resource[PROMREG_MAX]; 25 struct resource resource[PROMREG_MAX];
diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
index 05fe53f5346e..1ee02710b2dc 100644
--- a/arch/sparc/include/asm/dma-mapping.h
+++ b/arch/sparc/include/asm/dma-mapping.h
@@ -7,7 +7,7 @@
7 7
8#define DMA_ERROR_CODE (~(dma_addr_t)0x0) 8#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
9 9
10extern int dma_supported(struct device *dev, u64 mask); 10int dma_supported(struct device *dev, u64 mask);
11 11
12#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 12#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
13#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 13#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
diff --git a/arch/sparc/include/asm/ebus_dma.h b/arch/sparc/include/asm/ebus_dma.h
index f07a5b541c98..fcfb4948147f 100644
--- a/arch/sparc/include/asm/ebus_dma.h
+++ b/arch/sparc/include/asm/ebus_dma.h
@@ -22,14 +22,14 @@ struct ebus_dma_info {
22 unsigned char name[64]; 22 unsigned char name[64];
23}; 23};
24 24
25extern int ebus_dma_register(struct ebus_dma_info *p); 25int ebus_dma_register(struct ebus_dma_info *p);
26extern int ebus_dma_irq_enable(struct ebus_dma_info *p, int on); 26int ebus_dma_irq_enable(struct ebus_dma_info *p, int on);
27extern void ebus_dma_unregister(struct ebus_dma_info *p); 27void ebus_dma_unregister(struct ebus_dma_info *p);
28extern int ebus_dma_request(struct ebus_dma_info *p, dma_addr_t bus_addr, 28int ebus_dma_request(struct ebus_dma_info *p, dma_addr_t bus_addr,
29 size_t len); 29 size_t len);
30extern void ebus_dma_prepare(struct ebus_dma_info *p, int write); 30void ebus_dma_prepare(struct ebus_dma_info *p, int write);
31extern unsigned int ebus_dma_residue(struct ebus_dma_info *p); 31unsigned int ebus_dma_residue(struct ebus_dma_info *p);
32extern unsigned int ebus_dma_addr(struct ebus_dma_info *p); 32unsigned int ebus_dma_addr(struct ebus_dma_info *p);
33extern void ebus_dma_enable(struct ebus_dma_info *p, int on); 33void ebus_dma_enable(struct ebus_dma_info *p, int on);
34 34
35#endif /* __ASM_SPARC_EBUS_DMA_H */ 35#endif /* __ASM_SPARC_EBUS_DMA_H */
diff --git a/arch/sparc/include/asm/floppy_32.h b/arch/sparc/include/asm/floppy_32.h
index fb3f16954c69..071b83e52f15 100644
--- a/arch/sparc/include/asm/floppy_32.h
+++ b/arch/sparc/include/asm/floppy_32.h
@@ -9,11 +9,12 @@
9#include <linux/of.h> 9#include <linux/of.h>
10#include <linux/of_device.h> 10#include <linux/of_device.h>
11 11
12#include <asm/page.h>
13#include <asm/pgtable.h> 12#include <asm/pgtable.h>
14#include <asm/idprom.h> 13#include <asm/idprom.h>
15#include <asm/oplib.h> 14#include <asm/oplib.h>
16#include <asm/auxio.h> 15#include <asm/auxio.h>
16#include <asm/setup.h>
17#include <asm/page.h>
17#include <asm/irq.h> 18#include <asm/irq.h>
18 19
19/* We don't need no stinkin' I/O port allocation crap. */ 20/* We don't need no stinkin' I/O port allocation crap. */
@@ -49,7 +50,6 @@ struct sun_flpy_controller {
49 50
50/* You'll only ever find one controller on a SparcStation anyways. */ 51/* You'll only ever find one controller on a SparcStation anyways. */
51static struct sun_flpy_controller *sun_fdc = NULL; 52static struct sun_flpy_controller *sun_fdc = NULL;
52extern volatile unsigned char *fdc_status;
53 53
54struct sun_floppy_ops { 54struct sun_floppy_ops {
55 unsigned char (*fd_inb)(int port); 55 unsigned char (*fd_inb)(int port);
@@ -212,13 +212,6 @@ static void sun_82077_fd_outb(unsigned char value, int port)
212 * underruns. If non-zero, doing_pdma encodes the direction of 212 * underruns. If non-zero, doing_pdma encodes the direction of
213 * the transfer for debugging. 1=read 2=write 213 * the transfer for debugging. 1=read 2=write
214 */ 214 */
215extern char *pdma_vaddr;
216extern unsigned long pdma_size;
217extern volatile int doing_pdma;
218
219/* This is software state */
220extern char *pdma_base;
221extern unsigned long pdma_areasize;
222 215
223/* Common routines to all controller types on the Sparc. */ 216/* Common routines to all controller types on the Sparc. */
224static inline void virtual_dma_init(void) 217static inline void virtual_dma_init(void)
@@ -263,8 +256,7 @@ static inline void sun_fd_enable_dma(void)
263 pdma_areasize = pdma_size; 256 pdma_areasize = pdma_size;
264} 257}
265 258
266extern int sparc_floppy_request_irq(unsigned int irq, 259int sparc_floppy_request_irq(unsigned int irq, irq_handler_t irq_handler);
267 irq_handler_t irq_handler);
268 260
269static int sun_fd_request_irq(void) 261static int sun_fd_request_irq(void)
270{ 262{
diff --git a/arch/sparc/include/asm/floppy_64.h b/arch/sparc/include/asm/floppy_64.h
index 7c90c50c200d..625756406a7e 100644
--- a/arch/sparc/include/asm/floppy_64.h
+++ b/arch/sparc/include/asm/floppy_64.h
@@ -296,7 +296,7 @@ struct sun_pci_dma_op {
296static struct sun_pci_dma_op sun_pci_dma_current = { -1U, 0, 0, NULL}; 296static struct sun_pci_dma_op sun_pci_dma_current = { -1U, 0, 0, NULL};
297static struct sun_pci_dma_op sun_pci_dma_pending = { -1U, 0, 0, NULL}; 297static struct sun_pci_dma_op sun_pci_dma_pending = { -1U, 0, 0, NULL};
298 298
299extern irqreturn_t floppy_interrupt(int irq, void *dev_id); 299irqreturn_t floppy_interrupt(int irq, void *dev_id);
300 300
301static unsigned char sun_pci_fd_inb(unsigned long port) 301static unsigned char sun_pci_fd_inb(unsigned long port)
302{ 302{
diff --git a/arch/sparc/include/asm/ftrace.h b/arch/sparc/include/asm/ftrace.h
index b0f18e9893db..9ec94ad116fb 100644
--- a/arch/sparc/include/asm/ftrace.h
+++ b/arch/sparc/include/asm/ftrace.h
@@ -6,7 +6,7 @@
6#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */ 6#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */
7 7
8#ifndef __ASSEMBLY__ 8#ifndef __ASSEMBLY__
9extern void _mcount(void); 9void _mcount(void);
10#endif 10#endif
11 11
12#endif 12#endif
@@ -22,4 +22,8 @@ struct dyn_arch_ftrace {
22}; 22};
23#endif /* CONFIG_DYNAMIC_FTRACE */ 23#endif /* CONFIG_DYNAMIC_FTRACE */
24 24
25unsigned long prepare_ftrace_return(unsigned long parent,
26 unsigned long self_addr,
27 unsigned long frame_pointer);
28
25#endif /* _ASM_SPARC64_FTRACE */ 29#endif /* _ASM_SPARC64_FTRACE */
diff --git a/arch/sparc/include/asm/highmem.h b/arch/sparc/include/asm/highmem.h
index 4f9e15c757e2..92ded294a4ec 100644
--- a/arch/sparc/include/asm/highmem.h
+++ b/arch/sparc/include/asm/highmem.h
@@ -31,7 +31,7 @@ extern unsigned long highstart_pfn, highend_pfn;
31extern pgprot_t kmap_prot; 31extern pgprot_t kmap_prot;
32extern pte_t *pkmap_page_table; 32extern pte_t *pkmap_page_table;
33 33
34extern void kmap_init(void) __init; 34void kmap_init(void) __init;
35 35
36/* 36/*
37 * Right now we initialize only a single pte table. It can be extended 37 * Right now we initialize only a single pte table. It can be extended
@@ -49,8 +49,8 @@ extern void kmap_init(void) __init;
49 49
50#define PKMAP_END (PKMAP_ADDR(LAST_PKMAP)) 50#define PKMAP_END (PKMAP_ADDR(LAST_PKMAP))
51 51
52extern void *kmap_high(struct page *page); 52void *kmap_high(struct page *page);
53extern void kunmap_high(struct page *page); 53void kunmap_high(struct page *page);
54 54
55static inline void *kmap(struct page *page) 55static inline void *kmap(struct page *page)
56{ 56{
@@ -68,8 +68,8 @@ static inline void kunmap(struct page *page)
68 kunmap_high(page); 68 kunmap_high(page);
69} 69}
70 70
71extern void *kmap_atomic(struct page *page); 71void *kmap_atomic(struct page *page);
72extern void __kunmap_atomic(void *kvaddr); 72void __kunmap_atomic(void *kvaddr);
73 73
74#define flush_cache_kmaps() flush_cache_all() 74#define flush_cache_kmaps() flush_cache_all()
75 75
diff --git a/arch/sparc/include/asm/hvtramp.h b/arch/sparc/include/asm/hvtramp.h
index b2b9b947b3a4..04b56f862bbe 100644
--- a/arch/sparc/include/asm/hvtramp.h
+++ b/arch/sparc/include/asm/hvtramp.h
@@ -19,7 +19,7 @@ struct hvtramp_descr {
19 struct hvtramp_mapping maps[1]; 19 struct hvtramp_mapping maps[1];
20}; 20};
21 21
22extern void hv_cpu_startup(unsigned long hvdescr_pa); 22void hv_cpu_startup(unsigned long hvdescr_pa);
23 23
24#endif 24#endif
25 25
diff --git a/arch/sparc/include/asm/hypervisor.h b/arch/sparc/include/asm/hypervisor.h
index ca121f0fa3ec..94b39caea3eb 100644
--- a/arch/sparc/include/asm/hypervisor.h
+++ b/arch/sparc/include/asm/hypervisor.h
@@ -98,7 +98,7 @@
98#define HV_FAST_MACH_EXIT 0x00 98#define HV_FAST_MACH_EXIT 0x00
99 99
100#ifndef __ASSEMBLY__ 100#ifndef __ASSEMBLY__
101extern void sun4v_mach_exit(unsigned long exit_code); 101void sun4v_mach_exit(unsigned long exit_code);
102#endif 102#endif
103 103
104/* Domain services. */ 104/* Domain services. */
@@ -127,9 +127,9 @@ extern void sun4v_mach_exit(unsigned long exit_code);
127#define HV_FAST_MACH_DESC 0x01 127#define HV_FAST_MACH_DESC 0x01
128 128
129#ifndef __ASSEMBLY__ 129#ifndef __ASSEMBLY__
130extern unsigned long sun4v_mach_desc(unsigned long buffer_pa, 130unsigned long sun4v_mach_desc(unsigned long buffer_pa,
131 unsigned long buf_len, 131 unsigned long buf_len,
132 unsigned long *real_buf_len); 132 unsigned long *real_buf_len);
133#endif 133#endif
134 134
135/* mach_sir() 135/* mach_sir()
@@ -148,7 +148,7 @@ extern unsigned long sun4v_mach_desc(unsigned long buffer_pa,
148#define HV_FAST_MACH_SIR 0x02 148#define HV_FAST_MACH_SIR 0x02
149 149
150#ifndef __ASSEMBLY__ 150#ifndef __ASSEMBLY__
151extern void sun4v_mach_sir(void); 151void sun4v_mach_sir(void);
152#endif 152#endif
153 153
154/* mach_set_watchdog() 154/* mach_set_watchdog()
@@ -204,8 +204,8 @@ extern void sun4v_mach_sir(void);
204#define HV_FAST_MACH_SET_WATCHDOG 0x05 204#define HV_FAST_MACH_SET_WATCHDOG 0x05
205 205
206#ifndef __ASSEMBLY__ 206#ifndef __ASSEMBLY__
207extern unsigned long sun4v_mach_set_watchdog(unsigned long timeout, 207unsigned long sun4v_mach_set_watchdog(unsigned long timeout,
208 unsigned long *orig_timeout); 208 unsigned long *orig_timeout);
209#endif 209#endif
210 210
211/* CPU services. 211/* CPU services.
@@ -250,10 +250,10 @@ extern unsigned long sun4v_mach_set_watchdog(unsigned long timeout,
250#define HV_FAST_CPU_START 0x10 250#define HV_FAST_CPU_START 0x10
251 251
252#ifndef __ASSEMBLY__ 252#ifndef __ASSEMBLY__
253extern unsigned long sun4v_cpu_start(unsigned long cpuid, 253unsigned long sun4v_cpu_start(unsigned long cpuid,
254 unsigned long pc, 254 unsigned long pc,
255 unsigned long rtba, 255 unsigned long rtba,
256 unsigned long arg0); 256 unsigned long arg0);
257#endif 257#endif
258 258
259/* cpu_stop() 259/* cpu_stop()
@@ -278,7 +278,7 @@ extern unsigned long sun4v_cpu_start(unsigned long cpuid,
278#define HV_FAST_CPU_STOP 0x11 278#define HV_FAST_CPU_STOP 0x11
279 279
280#ifndef __ASSEMBLY__ 280#ifndef __ASSEMBLY__
281extern unsigned long sun4v_cpu_stop(unsigned long cpuid); 281unsigned long sun4v_cpu_stop(unsigned long cpuid);
282#endif 282#endif
283 283
284/* cpu_yield() 284/* cpu_yield()
@@ -295,7 +295,7 @@ extern unsigned long sun4v_cpu_stop(unsigned long cpuid);
295#define HV_FAST_CPU_YIELD 0x12 295#define HV_FAST_CPU_YIELD 0x12
296 296
297#ifndef __ASSEMBLY__ 297#ifndef __ASSEMBLY__
298extern unsigned long sun4v_cpu_yield(void); 298unsigned long sun4v_cpu_yield(void);
299#endif 299#endif
300 300
301/* cpu_qconf() 301/* cpu_qconf()
@@ -341,9 +341,9 @@ extern unsigned long sun4v_cpu_yield(void);
341#define HV_CPU_QUEUE_NONRES_ERROR 0x3f 341#define HV_CPU_QUEUE_NONRES_ERROR 0x3f
342 342
343#ifndef __ASSEMBLY__ 343#ifndef __ASSEMBLY__
344extern unsigned long sun4v_cpu_qconf(unsigned long type, 344unsigned long sun4v_cpu_qconf(unsigned long type,
345 unsigned long queue_paddr, 345 unsigned long queue_paddr,
346 unsigned long num_queue_entries); 346 unsigned long num_queue_entries);
347#endif 347#endif
348 348
349/* cpu_qinfo() 349/* cpu_qinfo()
@@ -394,7 +394,9 @@ extern unsigned long sun4v_cpu_qconf(unsigned long type,
394#define HV_FAST_CPU_MONDO_SEND 0x42 394#define HV_FAST_CPU_MONDO_SEND 0x42
395 395
396#ifndef __ASSEMBLY__ 396#ifndef __ASSEMBLY__
397extern unsigned long sun4v_cpu_mondo_send(unsigned long cpu_count, unsigned long cpu_list_pa, unsigned long mondo_block_pa); 397unsigned long sun4v_cpu_mondo_send(unsigned long cpu_count,
398 unsigned long cpu_list_pa,
399 unsigned long mondo_block_pa);
398#endif 400#endif
399 401
400/* cpu_myid() 402/* cpu_myid()
@@ -425,7 +427,7 @@ extern unsigned long sun4v_cpu_mondo_send(unsigned long cpu_count, unsigned long
425#define HV_CPU_STATE_ERROR 0x03 427#define HV_CPU_STATE_ERROR 0x03
426 428
427#ifndef __ASSEMBLY__ 429#ifndef __ASSEMBLY__
428extern long sun4v_cpu_state(unsigned long cpuid); 430long sun4v_cpu_state(unsigned long cpuid);
429#endif 431#endif
430 432
431/* cpu_set_rtba() 433/* cpu_set_rtba()
@@ -625,8 +627,8 @@ struct hv_fault_status {
625#define HV_FAST_MMU_TSB_CTX0 0x20 627#define HV_FAST_MMU_TSB_CTX0 0x20
626 628
627#ifndef __ASSEMBLY__ 629#ifndef __ASSEMBLY__
628extern unsigned long sun4v_mmu_tsb_ctx0(unsigned long num_descriptions, 630unsigned long sun4v_mmu_tsb_ctx0(unsigned long num_descriptions,
629 unsigned long tsb_desc_ra); 631 unsigned long tsb_desc_ra);
630#endif 632#endif
631 633
632/* mmu_tsb_ctxnon0() 634/* mmu_tsb_ctxnon0()
@@ -710,7 +712,7 @@ extern unsigned long sun4v_mmu_tsb_ctx0(unsigned long num_descriptions,
710#define HV_FAST_MMU_DEMAP_ALL 0x24 712#define HV_FAST_MMU_DEMAP_ALL 0x24
711 713
712#ifndef __ASSEMBLY__ 714#ifndef __ASSEMBLY__
713extern void sun4v_mmu_demap_all(void); 715void sun4v_mmu_demap_all(void);
714#endif 716#endif
715 717
716/* mmu_map_perm_addr() 718/* mmu_map_perm_addr()
@@ -740,10 +742,10 @@ extern void sun4v_mmu_demap_all(void);
740#define HV_FAST_MMU_MAP_PERM_ADDR 0x25 742#define HV_FAST_MMU_MAP_PERM_ADDR 0x25
741 743
742#ifndef __ASSEMBLY__ 744#ifndef __ASSEMBLY__
743extern unsigned long sun4v_mmu_map_perm_addr(unsigned long vaddr, 745unsigned long sun4v_mmu_map_perm_addr(unsigned long vaddr,
744 unsigned long set_to_zero, 746 unsigned long set_to_zero,
745 unsigned long tte, 747 unsigned long tte,
746 unsigned long flags); 748 unsigned long flags);
747#endif 749#endif
748 750
749/* mmu_fault_area_conf() 751/* mmu_fault_area_conf()
@@ -945,7 +947,7 @@ extern unsigned long sun4v_mmu_map_perm_addr(unsigned long vaddr,
945#define HV_FAST_TOD_GET 0x50 947#define HV_FAST_TOD_GET 0x50
946 948
947#ifndef __ASSEMBLY__ 949#ifndef __ASSEMBLY__
948extern unsigned long sun4v_tod_get(unsigned long *time); 950unsigned long sun4v_tod_get(unsigned long *time);
949#endif 951#endif
950 952
951/* tod_set() 953/* tod_set()
@@ -962,7 +964,7 @@ extern unsigned long sun4v_tod_get(unsigned long *time);
962#define HV_FAST_TOD_SET 0x51 964#define HV_FAST_TOD_SET 0x51
963 965
964#ifndef __ASSEMBLY__ 966#ifndef __ASSEMBLY__
965extern unsigned long sun4v_tod_set(unsigned long time); 967unsigned long sun4v_tod_set(unsigned long time);
966#endif 968#endif
967 969
968/* Console services */ 970/* Console services */
@@ -1038,14 +1040,14 @@ extern unsigned long sun4v_tod_set(unsigned long time);
1038#define HV_FAST_CONS_WRITE 0x63 1040#define HV_FAST_CONS_WRITE 0x63
1039 1041
1040#ifndef __ASSEMBLY__ 1042#ifndef __ASSEMBLY__
1041extern long sun4v_con_getchar(long *status); 1043long sun4v_con_getchar(long *status);
1042extern long sun4v_con_putchar(long c); 1044long sun4v_con_putchar(long c);
1043extern long sun4v_con_read(unsigned long buffer, 1045long sun4v_con_read(unsigned long buffer,
1044 unsigned long size, 1046 unsigned long size,
1045 unsigned long *bytes_read); 1047 unsigned long *bytes_read);
1046extern unsigned long sun4v_con_write(unsigned long buffer, 1048unsigned long sun4v_con_write(unsigned long buffer,
1047 unsigned long size, 1049 unsigned long size,
1048 unsigned long *bytes_written); 1050 unsigned long *bytes_written);
1049#endif 1051#endif
1050 1052
1051/* mach_set_soft_state() 1053/* mach_set_soft_state()
@@ -1080,8 +1082,8 @@ extern unsigned long sun4v_con_write(unsigned long buffer,
1080#define HV_SOFT_STATE_TRANSITION 0x02 1082#define HV_SOFT_STATE_TRANSITION 0x02
1081 1083
1082#ifndef __ASSEMBLY__ 1084#ifndef __ASSEMBLY__
1083extern unsigned long sun4v_mach_set_soft_state(unsigned long soft_state, 1085unsigned long sun4v_mach_set_soft_state(unsigned long soft_state,
1084 unsigned long msg_string_ra); 1086 unsigned long msg_string_ra);
1085#endif 1087#endif
1086 1088
1087/* mach_get_soft_state() 1089/* mach_get_soft_state()
@@ -1159,20 +1161,20 @@ extern unsigned long sun4v_mach_set_soft_state(unsigned long soft_state,
1159#define HV_FAST_SVC_CLRSTATUS 0x84 1161#define HV_FAST_SVC_CLRSTATUS 0x84
1160 1162
1161#ifndef __ASSEMBLY__ 1163#ifndef __ASSEMBLY__
1162extern unsigned long sun4v_svc_send(unsigned long svc_id, 1164unsigned long sun4v_svc_send(unsigned long svc_id,
1163 unsigned long buffer, 1165 unsigned long buffer,
1164 unsigned long buffer_size, 1166 unsigned long buffer_size,
1165 unsigned long *sent_bytes); 1167 unsigned long *sent_bytes);
1166extern unsigned long sun4v_svc_recv(unsigned long svc_id, 1168unsigned long sun4v_svc_recv(unsigned long svc_id,
1167 unsigned long buffer, 1169 unsigned long buffer,
1168 unsigned long buffer_size, 1170 unsigned long buffer_size,
1169 unsigned long *recv_bytes); 1171 unsigned long *recv_bytes);
1170extern unsigned long sun4v_svc_getstatus(unsigned long svc_id, 1172unsigned long sun4v_svc_getstatus(unsigned long svc_id,
1171 unsigned long *status_bits); 1173 unsigned long *status_bits);
1172extern unsigned long sun4v_svc_setstatus(unsigned long svc_id, 1174unsigned long sun4v_svc_setstatus(unsigned long svc_id,
1173 unsigned long status_bits); 1175 unsigned long status_bits);
1174extern unsigned long sun4v_svc_clrstatus(unsigned long svc_id, 1176unsigned long sun4v_svc_clrstatus(unsigned long svc_id,
1175 unsigned long status_bits); 1177 unsigned long status_bits);
1176#endif 1178#endif
1177 1179
1178/* Trap trace services. 1180/* Trap trace services.
@@ -1458,8 +1460,8 @@ struct hv_trap_trace_entry {
1458#define HV_FAST_INTR_DEVINO2SYSINO 0xa0 1460#define HV_FAST_INTR_DEVINO2SYSINO 0xa0
1459 1461
1460#ifndef __ASSEMBLY__ 1462#ifndef __ASSEMBLY__
1461extern unsigned long sun4v_devino_to_sysino(unsigned long devhandle, 1463unsigned long sun4v_devino_to_sysino(unsigned long devhandle,
1462 unsigned long devino); 1464 unsigned long devino);
1463#endif 1465#endif
1464 1466
1465/* intr_getenabled() 1467/* intr_getenabled()
@@ -1476,7 +1478,7 @@ extern unsigned long sun4v_devino_to_sysino(unsigned long devhandle,
1476#define HV_FAST_INTR_GETENABLED 0xa1 1478#define HV_FAST_INTR_GETENABLED 0xa1
1477 1479
1478#ifndef __ASSEMBLY__ 1480#ifndef __ASSEMBLY__
1479extern unsigned long sun4v_intr_getenabled(unsigned long sysino); 1481unsigned long sun4v_intr_getenabled(unsigned long sysino);
1480#endif 1482#endif
1481 1483
1482/* intr_setenabled() 1484/* intr_setenabled()
@@ -1492,7 +1494,8 @@ extern unsigned long sun4v_intr_getenabled(unsigned long sysino);
1492#define HV_FAST_INTR_SETENABLED 0xa2 1494#define HV_FAST_INTR_SETENABLED 0xa2
1493 1495
1494#ifndef __ASSEMBLY__ 1496#ifndef __ASSEMBLY__
1495extern unsigned long sun4v_intr_setenabled(unsigned long sysino, unsigned long intr_enabled); 1497unsigned long sun4v_intr_setenabled(unsigned long sysino,
1498 unsigned long intr_enabled);
1496#endif 1499#endif
1497 1500
1498/* intr_getstate() 1501/* intr_getstate()
@@ -1508,7 +1511,7 @@ extern unsigned long sun4v_intr_setenabled(unsigned long sysino, unsigned long i
1508#define HV_FAST_INTR_GETSTATE 0xa3 1511#define HV_FAST_INTR_GETSTATE 0xa3
1509 1512
1510#ifndef __ASSEMBLY__ 1513#ifndef __ASSEMBLY__
1511extern unsigned long sun4v_intr_getstate(unsigned long sysino); 1514unsigned long sun4v_intr_getstate(unsigned long sysino);
1512#endif 1515#endif
1513 1516
1514/* intr_setstate() 1517/* intr_setstate()
@@ -1528,7 +1531,7 @@ extern unsigned long sun4v_intr_getstate(unsigned long sysino);
1528#define HV_FAST_INTR_SETSTATE 0xa4 1531#define HV_FAST_INTR_SETSTATE 0xa4
1529 1532
1530#ifndef __ASSEMBLY__ 1533#ifndef __ASSEMBLY__
1531extern unsigned long sun4v_intr_setstate(unsigned long sysino, unsigned long intr_state); 1534unsigned long sun4v_intr_setstate(unsigned long sysino, unsigned long intr_state);
1532#endif 1535#endif
1533 1536
1534/* intr_gettarget() 1537/* intr_gettarget()
@@ -1546,7 +1549,7 @@ extern unsigned long sun4v_intr_setstate(unsigned long sysino, unsigned long int
1546#define HV_FAST_INTR_GETTARGET 0xa5 1549#define HV_FAST_INTR_GETTARGET 0xa5
1547 1550
1548#ifndef __ASSEMBLY__ 1551#ifndef __ASSEMBLY__
1549extern unsigned long sun4v_intr_gettarget(unsigned long sysino); 1552unsigned long sun4v_intr_gettarget(unsigned long sysino);
1550#endif 1553#endif
1551 1554
1552/* intr_settarget() 1555/* intr_settarget()
@@ -1563,7 +1566,7 @@ extern unsigned long sun4v_intr_gettarget(unsigned long sysino);
1563#define HV_FAST_INTR_SETTARGET 0xa6 1566#define HV_FAST_INTR_SETTARGET 0xa6
1564 1567
1565#ifndef __ASSEMBLY__ 1568#ifndef __ASSEMBLY__
1566extern unsigned long sun4v_intr_settarget(unsigned long sysino, unsigned long cpuid); 1569unsigned long sun4v_intr_settarget(unsigned long sysino, unsigned long cpuid);
1567#endif 1570#endif
1568 1571
1569/* vintr_get_cookie() 1572/* vintr_get_cookie()
@@ -1647,30 +1650,30 @@ extern unsigned long sun4v_intr_settarget(unsigned long sysino, unsigned long cp
1647#define HV_FAST_VINTR_SET_TARGET 0xae 1650#define HV_FAST_VINTR_SET_TARGET 0xae
1648 1651
1649#ifndef __ASSEMBLY__ 1652#ifndef __ASSEMBLY__
1650extern unsigned long sun4v_vintr_get_cookie(unsigned long dev_handle, 1653unsigned long sun4v_vintr_get_cookie(unsigned long dev_handle,
1651 unsigned long dev_ino, 1654 unsigned long dev_ino,
1652 unsigned long *cookie); 1655 unsigned long *cookie);
1653extern unsigned long sun4v_vintr_set_cookie(unsigned long dev_handle, 1656unsigned long sun4v_vintr_set_cookie(unsigned long dev_handle,
1654 unsigned long dev_ino, 1657 unsigned long dev_ino,
1655 unsigned long cookie); 1658 unsigned long cookie);
1656extern unsigned long sun4v_vintr_get_valid(unsigned long dev_handle, 1659unsigned long sun4v_vintr_get_valid(unsigned long dev_handle,
1657 unsigned long dev_ino, 1660 unsigned long dev_ino,
1658 unsigned long *valid); 1661 unsigned long *valid);
1659extern unsigned long sun4v_vintr_set_valid(unsigned long dev_handle, 1662unsigned long sun4v_vintr_set_valid(unsigned long dev_handle,
1660 unsigned long dev_ino, 1663 unsigned long dev_ino,
1661 unsigned long valid); 1664 unsigned long valid);
1662extern unsigned long sun4v_vintr_get_state(unsigned long dev_handle, 1665unsigned long sun4v_vintr_get_state(unsigned long dev_handle,
1663 unsigned long dev_ino, 1666 unsigned long dev_ino,
1664 unsigned long *state); 1667 unsigned long *state);
1665extern unsigned long sun4v_vintr_set_state(unsigned long dev_handle, 1668unsigned long sun4v_vintr_set_state(unsigned long dev_handle,
1666 unsigned long dev_ino, 1669 unsigned long dev_ino,
1667 unsigned long state); 1670 unsigned long state);
1668extern unsigned long sun4v_vintr_get_target(unsigned long dev_handle, 1671unsigned long sun4v_vintr_get_target(unsigned long dev_handle,
1669 unsigned long dev_ino, 1672 unsigned long dev_ino,
1670 unsigned long *cpuid); 1673 unsigned long *cpuid);
1671extern unsigned long sun4v_vintr_set_target(unsigned long dev_handle, 1674unsigned long sun4v_vintr_set_target(unsigned long dev_handle,
1672 unsigned long dev_ino, 1675 unsigned long dev_ino,
1673 unsigned long cpuid); 1676 unsigned long cpuid);
1674#endif 1677#endif
1675 1678
1676/* PCI IO services. 1679/* PCI IO services.
@@ -2627,50 +2630,50 @@ struct ldc_mtable_entry {
2627#define HV_FAST_LDC_REVOKE 0xef 2630#define HV_FAST_LDC_REVOKE 0xef
2628 2631
2629#ifndef __ASSEMBLY__ 2632#ifndef __ASSEMBLY__
2630extern unsigned long sun4v_ldc_tx_qconf(unsigned long channel, 2633unsigned long sun4v_ldc_tx_qconf(unsigned long channel,
2631 unsigned long ra, 2634 unsigned long ra,
2632 unsigned long num_entries); 2635 unsigned long num_entries);
2633extern unsigned long sun4v_ldc_tx_qinfo(unsigned long channel, 2636unsigned long sun4v_ldc_tx_qinfo(unsigned long channel,
2634 unsigned long *ra, 2637 unsigned long *ra,
2635 unsigned long *num_entries); 2638 unsigned long *num_entries);
2636extern unsigned long sun4v_ldc_tx_get_state(unsigned long channel, 2639unsigned long sun4v_ldc_tx_get_state(unsigned long channel,
2637 unsigned long *head_off, 2640 unsigned long *head_off,
2638 unsigned long *tail_off, 2641 unsigned long *tail_off,
2639 unsigned long *chan_state); 2642 unsigned long *chan_state);
2640extern unsigned long sun4v_ldc_tx_set_qtail(unsigned long channel, 2643unsigned long sun4v_ldc_tx_set_qtail(unsigned long channel,
2641 unsigned long tail_off); 2644 unsigned long tail_off);
2642extern unsigned long sun4v_ldc_rx_qconf(unsigned long channel, 2645unsigned long sun4v_ldc_rx_qconf(unsigned long channel,
2643 unsigned long ra, 2646 unsigned long ra,
2644 unsigned long num_entries); 2647 unsigned long num_entries);
2645extern unsigned long sun4v_ldc_rx_qinfo(unsigned long channel, 2648unsigned long sun4v_ldc_rx_qinfo(unsigned long channel,
2646 unsigned long *ra, 2649 unsigned long *ra,
2647 unsigned long *num_entries); 2650 unsigned long *num_entries);
2648extern unsigned long sun4v_ldc_rx_get_state(unsigned long channel, 2651unsigned long sun4v_ldc_rx_get_state(unsigned long channel,
2649 unsigned long *head_off, 2652 unsigned long *head_off,
2650 unsigned long *tail_off, 2653 unsigned long *tail_off,
2651 unsigned long *chan_state); 2654 unsigned long *chan_state);
2652extern unsigned long sun4v_ldc_rx_set_qhead(unsigned long channel, 2655unsigned long sun4v_ldc_rx_set_qhead(unsigned long channel,
2653 unsigned long head_off); 2656 unsigned long head_off);
2654extern unsigned long sun4v_ldc_set_map_table(unsigned long channel, 2657unsigned long sun4v_ldc_set_map_table(unsigned long channel,
2655 unsigned long ra, 2658 unsigned long ra,
2656 unsigned long num_entries); 2659 unsigned long num_entries);
2657extern unsigned long sun4v_ldc_get_map_table(unsigned long channel, 2660unsigned long sun4v_ldc_get_map_table(unsigned long channel,
2658 unsigned long *ra, 2661 unsigned long *ra,
2659 unsigned long *num_entries); 2662 unsigned long *num_entries);
2660extern unsigned long sun4v_ldc_copy(unsigned long channel, 2663unsigned long sun4v_ldc_copy(unsigned long channel,
2661 unsigned long dir_code, 2664 unsigned long dir_code,
2662 unsigned long tgt_raddr, 2665 unsigned long tgt_raddr,
2663 unsigned long lcl_raddr, 2666 unsigned long lcl_raddr,
2664 unsigned long len, 2667 unsigned long len,
2665 unsigned long *actual_len); 2668 unsigned long *actual_len);
2666extern unsigned long sun4v_ldc_mapin(unsigned long channel, 2669unsigned long sun4v_ldc_mapin(unsigned long channel,
2667 unsigned long cookie, 2670 unsigned long cookie,
2668 unsigned long *ra, 2671 unsigned long *ra,
2669 unsigned long *perm); 2672 unsigned long *perm);
2670extern unsigned long sun4v_ldc_unmap(unsigned long ra); 2673unsigned long sun4v_ldc_unmap(unsigned long ra);
2671extern unsigned long sun4v_ldc_revoke(unsigned long channel, 2674unsigned long sun4v_ldc_revoke(unsigned long channel,
2672 unsigned long cookie, 2675 unsigned long cookie,
2673 unsigned long mte_cookie); 2676 unsigned long mte_cookie);
2674#endif 2677#endif
2675 2678
2676/* Performance counter services. */ 2679/* Performance counter services. */
@@ -2727,14 +2730,14 @@ extern unsigned long sun4v_ldc_revoke(unsigned long channel,
2727#define HV_FAST_N2_SET_PERFREG 0x105 2730#define HV_FAST_N2_SET_PERFREG 0x105
2728 2731
2729#ifndef __ASSEMBLY__ 2732#ifndef __ASSEMBLY__
2730extern unsigned long sun4v_niagara_getperf(unsigned long reg, 2733unsigned long sun4v_niagara_getperf(unsigned long reg,
2731 unsigned long *val); 2734 unsigned long *val);
2732extern unsigned long sun4v_niagara_setperf(unsigned long reg, 2735unsigned long sun4v_niagara_setperf(unsigned long reg,
2733 unsigned long val); 2736 unsigned long val);
2734extern unsigned long sun4v_niagara2_getperf(unsigned long reg, 2737unsigned long sun4v_niagara2_getperf(unsigned long reg,
2735 unsigned long *val); 2738 unsigned long *val);
2736extern unsigned long sun4v_niagara2_setperf(unsigned long reg, 2739unsigned long sun4v_niagara2_setperf(unsigned long reg,
2737 unsigned long val); 2740 unsigned long val);
2738#endif 2741#endif
2739 2742
2740/* MMU statistics services. 2743/* MMU statistics services.
@@ -2829,8 +2832,8 @@ struct hv_mmu_statistics {
2829#define HV_FAST_MMUSTAT_INFO 0x103 2832#define HV_FAST_MMUSTAT_INFO 0x103
2830 2833
2831#ifndef __ASSEMBLY__ 2834#ifndef __ASSEMBLY__
2832extern unsigned long sun4v_mmustat_conf(unsigned long ra, unsigned long *orig_ra); 2835unsigned long sun4v_mmustat_conf(unsigned long ra, unsigned long *orig_ra);
2833extern unsigned long sun4v_mmustat_info(unsigned long *ra); 2836unsigned long sun4v_mmustat_info(unsigned long *ra);
2834#endif 2837#endif
2835 2838
2836/* NCS crypto services */ 2839/* NCS crypto services */
@@ -2919,9 +2922,9 @@ struct hv_ncs_qtail_update_arg {
2919#define HV_FAST_NCS_REQUEST 0x110 2922#define HV_FAST_NCS_REQUEST 0x110
2920 2923
2921#ifndef __ASSEMBLY__ 2924#ifndef __ASSEMBLY__
2922extern unsigned long sun4v_ncs_request(unsigned long request, 2925unsigned long sun4v_ncs_request(unsigned long request,
2923 unsigned long arg_ra, 2926 unsigned long arg_ra,
2924 unsigned long arg_size); 2927 unsigned long arg_size);
2925#endif 2928#endif
2926 2929
2927#define HV_FAST_FIRE_GET_PERFREG 0x120 2930#define HV_FAST_FIRE_GET_PERFREG 0x120
@@ -2930,18 +2933,18 @@ extern unsigned long sun4v_ncs_request(unsigned long request,
2930#define HV_FAST_REBOOT_DATA_SET 0x172 2933#define HV_FAST_REBOOT_DATA_SET 0x172
2931 2934
2932#ifndef __ASSEMBLY__ 2935#ifndef __ASSEMBLY__
2933extern unsigned long sun4v_reboot_data_set(unsigned long ra, 2936unsigned long sun4v_reboot_data_set(unsigned long ra,
2934 unsigned long len); 2937 unsigned long len);
2935#endif 2938#endif
2936 2939
2937#define HV_FAST_VT_GET_PERFREG 0x184 2940#define HV_FAST_VT_GET_PERFREG 0x184
2938#define HV_FAST_VT_SET_PERFREG 0x185 2941#define HV_FAST_VT_SET_PERFREG 0x185
2939 2942
2940#ifndef __ASSEMBLY__ 2943#ifndef __ASSEMBLY__
2941extern unsigned long sun4v_vt_get_perfreg(unsigned long reg_num, 2944unsigned long sun4v_vt_get_perfreg(unsigned long reg_num,
2942 unsigned long *reg_val); 2945 unsigned long *reg_val);
2943extern unsigned long sun4v_vt_set_perfreg(unsigned long reg_num, 2946unsigned long sun4v_vt_set_perfreg(unsigned long reg_num,
2944 unsigned long reg_val); 2947 unsigned long reg_val);
2945#endif 2948#endif
2946 2949
2947/* Function numbers for HV_CORE_TRAP. */ 2950/* Function numbers for HV_CORE_TRAP. */
@@ -2978,21 +2981,21 @@ extern unsigned long sun4v_vt_set_perfreg(unsigned long reg_num,
2978#define HV_GRP_DIAG 0x0300 2981#define HV_GRP_DIAG 0x0300
2979 2982
2980#ifndef __ASSEMBLY__ 2983#ifndef __ASSEMBLY__
2981extern unsigned long sun4v_get_version(unsigned long group, 2984unsigned long sun4v_get_version(unsigned long group,
2982 unsigned long *major, 2985 unsigned long *major,
2983 unsigned long *minor); 2986 unsigned long *minor);
2984extern unsigned long sun4v_set_version(unsigned long group, 2987unsigned long sun4v_set_version(unsigned long group,
2985 unsigned long major, 2988 unsigned long major,
2986 unsigned long minor, 2989 unsigned long minor,
2987 unsigned long *actual_minor); 2990 unsigned long *actual_minor);
2988 2991
2989extern int sun4v_hvapi_register(unsigned long group, unsigned long major, 2992int sun4v_hvapi_register(unsigned long group, unsigned long major,
2990 unsigned long *minor); 2993 unsigned long *minor);
2991extern void sun4v_hvapi_unregister(unsigned long group); 2994void sun4v_hvapi_unregister(unsigned long group);
2992extern int sun4v_hvapi_get(unsigned long group, 2995int sun4v_hvapi_get(unsigned long group,
2993 unsigned long *major, 2996 unsigned long *major,
2994 unsigned long *minor); 2997 unsigned long *minor);
2995extern void sun4v_hvapi_init(void); 2998void sun4v_hvapi_init(void);
2996#endif 2999#endif
2997 3000
2998#endif /* !(_SPARC64_HYPERVISOR_H) */ 3001#endif /* !(_SPARC64_HYPERVISOR_H) */
diff --git a/arch/sparc/include/asm/idprom.h b/arch/sparc/include/asm/idprom.h
index 6976aa2439c6..3793f7f91c42 100644
--- a/arch/sparc/include/asm/idprom.h
+++ b/arch/sparc/include/asm/idprom.h
@@ -20,6 +20,6 @@ struct idprom {
20}; 20};
21 21
22extern struct idprom *idprom; 22extern struct idprom *idprom;
23extern void idprom_init(void); 23void idprom_init(void);
24 24
25#endif /* !(_SPARC_IDPROM_H) */ 25#endif /* !(_SPARC_IDPROM_H) */
diff --git a/arch/sparc/include/asm/io-unit.h b/arch/sparc/include/asm/io-unit.h
index 01ab2f613e91..04a9701e7202 100644
--- a/arch/sparc/include/asm/io-unit.h
+++ b/arch/sparc/include/asm/io-unit.h
@@ -43,7 +43,7 @@
43struct iounit_struct { 43struct iounit_struct {
44 unsigned long bmap[(IOUNIT_DMA_SIZE >> (PAGE_SHIFT + 3)) / sizeof(unsigned long)]; 44 unsigned long bmap[(IOUNIT_DMA_SIZE >> (PAGE_SHIFT + 3)) / sizeof(unsigned long)];
45 spinlock_t lock; 45 spinlock_t lock;
46 iopte_t *page_table; 46 iopte_t __iomem *page_table;
47 unsigned long rotor[3]; 47 unsigned long rotor[3];
48 unsigned long limit[4]; 48 unsigned long limit[4];
49}; 49};
diff --git a/arch/sparc/include/asm/io_32.h b/arch/sparc/include/asm/io_32.h
index c1acbd891cbc..9f532902627c 100644
--- a/arch/sparc/include/asm/io_32.h
+++ b/arch/sparc/include/asm/io_32.h
@@ -2,191 +2,94 @@
2#define __SPARC_IO_H 2#define __SPARC_IO_H
3 3
4#include <linux/kernel.h> 4#include <linux/kernel.h>
5#include <linux/types.h>
6#include <linux/ioport.h> /* struct resource */ 5#include <linux/ioport.h> /* struct resource */
7 6
8#include <asm/page.h> /* IO address mapping routines need this */ 7#define readb_relaxed(__addr) readb(__addr)
9#include <asm-generic/pci_iomap.h> 8#define readw_relaxed(__addr) readw(__addr)
10 9#define readl_relaxed(__addr) readl(__addr)
11#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
12
13static inline u32 flip_dword (u32 l)
14{
15 return ((l&0xff)<<24) | (((l>>8)&0xff)<<16) | (((l>>16)&0xff)<<8)| ((l>>24)&0xff);
16}
17
18static inline u16 flip_word (u16 w)
19{
20 return ((w&0xff) << 8) | ((w>>8)&0xff);
21}
22
23#define mmiowb()
24
25/*
26 * Memory mapped I/O to PCI
27 */
28
29static inline u8 __raw_readb(const volatile void __iomem *addr)
30{
31 return *(__force volatile u8 *)addr;
32}
33
34static inline u16 __raw_readw(const volatile void __iomem *addr)
35{
36 return *(__force volatile u16 *)addr;
37}
38
39static inline u32 __raw_readl(const volatile void __iomem *addr)
40{
41 return *(__force volatile u32 *)addr;
42}
43 10
44static inline void __raw_writeb(u8 b, volatile void __iomem *addr) 11#define IO_SPACE_LIMIT 0xffffffff
45{
46 *(__force volatile u8 *)addr = b;
47}
48 12
49static inline void __raw_writew(u16 w, volatile void __iomem *addr) 13#define memset_io(d,c,sz) _memset_io(d,c,sz)
50{ 14#define memcpy_fromio(d,s,sz) _memcpy_fromio(d,s,sz)
51 *(__force volatile u16 *)addr = w; 15#define memcpy_toio(d,s,sz) _memcpy_toio(d,s,sz)
52}
53 16
54static inline void __raw_writel(u32 l, volatile void __iomem *addr) 17#include <asm-generic/io.h>
55{
56 *(__force volatile u32 *)addr = l;
57}
58 18
59static inline u8 __readb(const volatile void __iomem *addr) 19static inline void _memset_io(volatile void __iomem *dst,
20 int c, __kernel_size_t n)
60{ 21{
61 return *(__force volatile u8 *)addr; 22 volatile void __iomem *d = dst;
62}
63 23
64static inline u16 __readw(const volatile void __iomem *addr) 24 while (n--) {
65{ 25 writeb(c, d);
66 return flip_word(*(__force volatile u16 *)addr); 26 d++;
27 }
67} 28}
68 29
69static inline u32 __readl(const volatile void __iomem *addr) 30static inline void _memcpy_fromio(void *dst, const volatile void __iomem *src,
31 __kernel_size_t n)
70{ 32{
71 return flip_dword(*(__force volatile u32 *)addr); 33 char *d = dst;
72}
73 34
74static inline void __writeb(u8 b, volatile void __iomem *addr) 35 while (n--) {
75{ 36 char tmp = readb(src);
76 *(__force volatile u8 *)addr = b; 37 *d++ = tmp;
38 src++;
39 }
77} 40}
78 41
79static inline void __writew(u16 w, volatile void __iomem *addr) 42static inline void _memcpy_toio(volatile void __iomem *dst, const void *src,
43 __kernel_size_t n)
80{ 44{
81 *(__force volatile u16 *)addr = flip_word(w); 45 const char *s = src;
82} 46 volatile void __iomem *d = dst;
83 47
84static inline void __writel(u32 l, volatile void __iomem *addr) 48 while (n--) {
85{ 49 char tmp = *s++;
86 *(__force volatile u32 *)addr = flip_dword(l); 50 writeb(tmp, d);
51 d++;
52 }
87} 53}
88 54
89#define readb(__addr) __readb(__addr)
90#define readw(__addr) __readw(__addr)
91#define readl(__addr) __readl(__addr)
92#define readb_relaxed(__addr) readb(__addr)
93#define readw_relaxed(__addr) readw(__addr)
94#define readl_relaxed(__addr) readl(__addr)
95
96#define writeb(__b, __addr) __writeb((__b),(__addr))
97#define writew(__w, __addr) __writew((__w),(__addr))
98#define writel(__l, __addr) __writel((__l),(__addr))
99
100/*
101 * I/O space operations
102 *
103 * Arrangement on a Sun is somewhat complicated.
104 *
105 * First of all, we want to use standard Linux drivers
106 * for keyboard, PC serial, etc. These drivers think
107 * they access I/O space and use inb/outb.
108 * On the other hand, EBus bridge accepts PCI *memory*
109 * cycles and converts them into ISA *I/O* cycles.
110 * Ergo, we want inb & outb to generate PCI memory cycles.
111 *
112 * If we want to issue PCI *I/O* cycles, we do this
113 * with a low 64K fixed window in PCIC. This window gets
114 * mapped somewhere into virtual kernel space and we
115 * can use inb/outb again.
116 */
117#define inb_local(__addr) __readb((void __iomem *)(unsigned long)(__addr))
118#define inb(__addr) __readb((void __iomem *)(unsigned long)(__addr))
119#define inw(__addr) __readw((void __iomem *)(unsigned long)(__addr))
120#define inl(__addr) __readl((void __iomem *)(unsigned long)(__addr))
121
122#define outb_local(__b, __addr) __writeb(__b, (void __iomem *)(unsigned long)(__addr))
123#define outb(__b, __addr) __writeb(__b, (void __iomem *)(unsigned long)(__addr))
124#define outw(__w, __addr) __writew(__w, (void __iomem *)(unsigned long)(__addr))
125#define outl(__l, __addr) __writel(__l, (void __iomem *)(unsigned long)(__addr))
126
127#define inb_p(__addr) inb(__addr)
128#define outb_p(__b, __addr) outb(__b, __addr)
129#define inw_p(__addr) inw(__addr)
130#define outw_p(__w, __addr) outw(__w, __addr)
131#define inl_p(__addr) inl(__addr)
132#define outl_p(__l, __addr) outl(__l, __addr)
133
134void outsb(unsigned long addr, const void *src, unsigned long cnt);
135void outsw(unsigned long addr, const void *src, unsigned long cnt);
136void outsl(unsigned long addr, const void *src, unsigned long cnt);
137void insb(unsigned long addr, void *dst, unsigned long count);
138void insw(unsigned long addr, void *dst, unsigned long count);
139void insl(unsigned long addr, void *dst, unsigned long count);
140
141#define IO_SPACE_LIMIT 0xffffffff
142
143/* 55/*
144 * SBus accessors. 56 * SBus accessors.
145 * 57 *
146 * SBus has only one, memory mapped, I/O space. 58 * SBus has only one, memory mapped, I/O space.
147 * We do not need to flip bytes for SBus of course. 59 * We do not need to flip bytes for SBus of course.
148 */ 60 */
149static inline u8 _sbus_readb(const volatile void __iomem *addr) 61static inline u8 sbus_readb(const volatile void __iomem *addr)
150{ 62{
151 return *(__force volatile u8 *)addr; 63 return *(__force volatile u8 *)addr;
152} 64}
153 65
154static inline u16 _sbus_readw(const volatile void __iomem *addr) 66static inline u16 sbus_readw(const volatile void __iomem *addr)
155{ 67{
156 return *(__force volatile u16 *)addr; 68 return *(__force volatile u16 *)addr;
157} 69}
158 70
159static inline u32 _sbus_readl(const volatile void __iomem *addr) 71static inline u32 sbus_readl(const volatile void __iomem *addr)
160{ 72{
161 return *(__force volatile u32 *)addr; 73 return *(__force volatile u32 *)addr;
162} 74}
163 75
164static inline void _sbus_writeb(u8 b, volatile void __iomem *addr) 76static inline void sbus_writeb(u8 b, volatile void __iomem *addr)
165{ 77{
166 *(__force volatile u8 *)addr = b; 78 *(__force volatile u8 *)addr = b;
167} 79}
168 80
169static inline void _sbus_writew(u16 w, volatile void __iomem *addr) 81static inline void sbus_writew(u16 w, volatile void __iomem *addr)
170{ 82{
171 *(__force volatile u16 *)addr = w; 83 *(__force volatile u16 *)addr = w;
172} 84}
173 85
174static inline void _sbus_writel(u32 l, volatile void __iomem *addr) 86static inline void sbus_writel(u32 l, volatile void __iomem *addr)
175{ 87{
176 *(__force volatile u32 *)addr = l; 88 *(__force volatile u32 *)addr = l;
177} 89}
178 90
179/* 91static inline void sbus_memset_io(volatile void __iomem *__dst, int c,
180 * The only reason for #define's is to hide casts to unsigned long. 92 __kernel_size_t n)
181 */
182#define sbus_readb(__addr) _sbus_readb(__addr)
183#define sbus_readw(__addr) _sbus_readw(__addr)
184#define sbus_readl(__addr) _sbus_readl(__addr)
185#define sbus_writeb(__b, __addr) _sbus_writeb(__b, __addr)
186#define sbus_writew(__w, __addr) _sbus_writew(__w, __addr)
187#define sbus_writel(__l, __addr) _sbus_writel(__l, __addr)
188
189static inline void sbus_memset_io(volatile void __iomem *__dst, int c, __kernel_size_t n)
190{ 93{
191 while(n--) { 94 while(n--) {
192 sbus_writeb(c, __dst); 95 sbus_writeb(c, __dst);
@@ -194,22 +97,9 @@ static inline void sbus_memset_io(volatile void __iomem *__dst, int c, __kernel_
194 } 97 }
195} 98}
196 99
197static inline void 100static inline void sbus_memcpy_fromio(void *dst,
198_memset_io(volatile void __iomem *dst, int c, __kernel_size_t n) 101 const volatile void __iomem *src,
199{ 102 __kernel_size_t n)
200 volatile void __iomem *d = dst;
201
202 while (n--) {
203 writeb(c, d);
204 d++;
205 }
206}
207
208#define memset_io(d,c,sz) _memset_io(d,c,sz)
209
210static inline void
211_sbus_memcpy_fromio(void *dst, const volatile void __iomem *src,
212 __kernel_size_t n)
213{ 103{
214 char *d = dst; 104 char *d = dst;
215 105
@@ -220,25 +110,9 @@ _sbus_memcpy_fromio(void *dst, const volatile void __iomem *src,
220 } 110 }
221} 111}
222 112
223#define sbus_memcpy_fromio(d, s, sz) _sbus_memcpy_fromio(d, s, sz) 113static inline void sbus_memcpy_toio(volatile void __iomem *dst,
224 114 const void *src,
225static inline void 115 __kernel_size_t n)
226_memcpy_fromio(void *dst, const volatile void __iomem *src, __kernel_size_t n)
227{
228 char *d = dst;
229
230 while (n--) {
231 char tmp = readb(src);
232 *d++ = tmp;
233 src++;
234 }
235}
236
237#define memcpy_fromio(d,s,sz) _memcpy_fromio(d,s,sz)
238
239static inline void
240_sbus_memcpy_toio(volatile void __iomem *dst, const void *src,
241 __kernel_size_t n)
242{ 116{
243 const char *s = src; 117 const char *s = src;
244 volatile void __iomem *d = dst; 118 volatile void __iomem *d = dst;
@@ -250,81 +124,26 @@ _sbus_memcpy_toio(volatile void __iomem *dst, const void *src,
250 } 124 }
251} 125}
252 126
253#define sbus_memcpy_toio(d, s, sz) _sbus_memcpy_toio(d, s, sz)
254
255static inline void
256_memcpy_toio(volatile void __iomem *dst, const void *src, __kernel_size_t n)
257{
258 const char *s = src;
259 volatile void __iomem *d = dst;
260
261 while (n--) {
262 char tmp = *s++;
263 writeb(tmp, d);
264 d++;
265 }
266}
267
268#define memcpy_toio(d,s,sz) _memcpy_toio(d,s,sz)
269
270#ifdef __KERNEL__ 127#ifdef __KERNEL__
271 128
272/* 129/*
273 * Bus number may be embedded in the higher bits of the physical address. 130 * Bus number may be embedded in the higher bits of the physical address.
274 * This is why we have no bus number argument to ioremap(). 131 * This is why we have no bus number argument to ioremap().
275 */ 132 */
276extern void __iomem *ioremap(unsigned long offset, unsigned long size); 133void __iomem *ioremap(unsigned long offset, unsigned long size);
277#define ioremap_nocache(X,Y) ioremap((X),(Y)) 134#define ioremap_nocache(X,Y) ioremap((X),(Y))
278#define ioremap_wc(X,Y) ioremap((X),(Y)) 135#define ioremap_wc(X,Y) ioremap((X),(Y))
279extern void iounmap(volatile void __iomem *addr); 136void iounmap(volatile void __iomem *addr);
280
281#define ioread8(X) readb(X)
282#define ioread16(X) readw(X)
283#define ioread16be(X) __raw_readw(X)
284#define ioread32(X) readl(X)
285#define ioread32be(X) __raw_readl(X)
286#define iowrite8(val,X) writeb(val,X)
287#define iowrite16(val,X) writew(val,X)
288#define iowrite16be(val,X) __raw_writew(val,X)
289#define iowrite32(val,X) writel(val,X)
290#define iowrite32be(val,X) __raw_writel(val,X)
291
292static inline void ioread8_rep(void __iomem *port, void *buf, unsigned long count)
293{
294 insb((unsigned long __force)port, buf, count);
295}
296static inline void ioread16_rep(void __iomem *port, void *buf, unsigned long count)
297{
298 insw((unsigned long __force)port, buf, count);
299}
300
301static inline void ioread32_rep(void __iomem *port, void *buf, unsigned long count)
302{
303 insl((unsigned long __force)port, buf, count);
304}
305
306static inline void iowrite8_rep(void __iomem *port, const void *buf, unsigned long count)
307{
308 outsb((unsigned long __force)port, buf, count);
309}
310
311static inline void iowrite16_rep(void __iomem *port, const void *buf, unsigned long count)
312{
313 outsw((unsigned long __force)port, buf, count);
314}
315
316static inline void iowrite32_rep(void __iomem *port, const void *buf, unsigned long count)
317{
318 outsl((unsigned long __force)port, buf, count);
319}
320 137
321/* Create a virtual mapping cookie for an IO port range */ 138/* Create a virtual mapping cookie for an IO port range */
322extern void __iomem *ioport_map(unsigned long port, unsigned int nr); 139void __iomem *ioport_map(unsigned long port, unsigned int nr);
323extern void ioport_unmap(void __iomem *); 140void ioport_unmap(void __iomem *);
324 141
325/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */ 142/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
326struct pci_dev; 143struct pci_dev;
327extern void pci_iounmap(struct pci_dev *dev, void __iomem *); 144void pci_iounmap(struct pci_dev *dev, void __iomem *);
145
146
328 147
329/* 148/*
330 * At the moment, we do not use CMOS_READ anywhere outside of rtc.c, 149 * At the moment, we do not use CMOS_READ anywhere outside of rtc.c,
@@ -343,21 +162,11 @@ static inline int sbus_can_burst64(void)
343 return 0; /* actually, sparc_cpu_model==sun4d */ 162 return 0; /* actually, sparc_cpu_model==sun4d */
344} 163}
345struct device; 164struct device;
346extern void sbus_set_sbus64(struct device *, int); 165void sbus_set_sbus64(struct device *, int);
347 166
348#endif 167#endif
349 168
350#define __ARCH_HAS_NO_PAGE_ZERO_MAPPED 1 169#define __ARCH_HAS_NO_PAGE_ZERO_MAPPED 1
351 170
352/*
353 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
354 * access
355 */
356#define xlate_dev_mem_ptr(p) __va(p)
357
358/*
359 * Convert a virtual cached pointer to an uncached pointer
360 */
361#define xlate_dev_kmem_ptr(p) p
362 171
363#endif /* !(__SPARC_IO_H) */ 172#endif /* !(__SPARC_IO_H) */
diff --git a/arch/sparc/include/asm/io_64.h b/arch/sparc/include/asm/io_64.h
index 09b0b88aeb2a..05381c3a4228 100644
--- a/arch/sparc/include/asm/io_64.h
+++ b/arch/sparc/include/asm/io_64.h
@@ -15,7 +15,6 @@
15 15
16/* BIO layer definitions. */ 16/* BIO layer definitions. */
17extern unsigned long kern_base, kern_size; 17extern unsigned long kern_base, kern_size;
18#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
19 18
20static inline u8 _inb(unsigned long addr) 19static inline u8 _inb(unsigned long addr)
21{ 20{
@@ -91,12 +90,12 @@ static inline void _outl(u32 l, unsigned long addr)
91#define inl_p(__addr) inl(__addr) 90#define inl_p(__addr) inl(__addr)
92#define outl_p(__l, __addr) outl(__l, __addr) 91#define outl_p(__l, __addr) outl(__l, __addr)
93 92
94extern void outsb(unsigned long, const void *, unsigned long); 93void outsb(unsigned long, const void *, unsigned long);
95extern void outsw(unsigned long, const void *, unsigned long); 94void outsw(unsigned long, const void *, unsigned long);
96extern void outsl(unsigned long, const void *, unsigned long); 95void outsl(unsigned long, const void *, unsigned long);
97extern void insb(unsigned long, void *, unsigned long); 96void insb(unsigned long, void *, unsigned long);
98extern void insw(unsigned long, void *, unsigned long); 97void insw(unsigned long, void *, unsigned long);
99extern void insl(unsigned long, void *, unsigned long); 98void insl(unsigned long, void *, unsigned long);
100 99
101static inline void ioread8_rep(void __iomem *port, void *buf, unsigned long count) 100static inline void ioread8_rep(void __iomem *port, void *buf, unsigned long count)
102{ 101{
@@ -509,12 +508,12 @@ static inline void iounmap(volatile void __iomem *addr)
509#define iowrite32be(val,X) __raw_writel(val,X) 508#define iowrite32be(val,X) __raw_writel(val,X)
510 509
511/* Create a virtual mapping cookie for an IO port range */ 510/* Create a virtual mapping cookie for an IO port range */
512extern void __iomem *ioport_map(unsigned long port, unsigned int nr); 511void __iomem *ioport_map(unsigned long port, unsigned int nr);
513extern void ioport_unmap(void __iomem *); 512void ioport_unmap(void __iomem *);
514 513
515/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */ 514/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
516struct pci_dev; 515struct pci_dev;
517extern void pci_iounmap(struct pci_dev *dev, void __iomem *); 516void pci_iounmap(struct pci_dev *dev, void __iomem *);
518 517
519static inline int sbus_can_dma_64bit(void) 518static inline int sbus_can_dma_64bit(void)
520{ 519{
@@ -525,7 +524,7 @@ static inline int sbus_can_burst64(void)
525 return 1; 524 return 1;
526} 525}
527struct device; 526struct device;
528extern void sbus_set_sbus64(struct device *, int); 527void sbus_set_sbus64(struct device *, int);
529 528
530/* 529/*
531 * Convert a physical pointer to a virtual kernel pointer for /dev/mem 530 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
diff --git a/arch/sparc/include/asm/iommu_32.h b/arch/sparc/include/asm/iommu_32.h
index 70c589c05a10..f6c066b52fd6 100644
--- a/arch/sparc/include/asm/iommu_32.h
+++ b/arch/sparc/include/asm/iommu_32.h
@@ -99,7 +99,7 @@ struct iommu_regs {
99#define IOPTE_WAZ 0x00000001 /* Write as zeros */ 99#define IOPTE_WAZ 0x00000001 /* Write as zeros */
100 100
101struct iommu_struct { 101struct iommu_struct {
102 struct iommu_regs *regs; 102 struct iommu_regs __iomem *regs;
103 iopte_t *page_table; 103 iopte_t *page_table;
104 /* For convenience */ 104 /* For convenience */
105 unsigned long start; /* First managed virtual address */ 105 unsigned long start; /* First managed virtual address */
@@ -108,14 +108,14 @@ struct iommu_struct {
108 struct bit_map usemap; 108 struct bit_map usemap;
109}; 109};
110 110
111static inline void iommu_invalidate(struct iommu_regs *regs) 111static inline void iommu_invalidate(struct iommu_regs __iomem *regs)
112{ 112{
113 regs->tlbflush = 0; 113 sbus_writel(0, &regs->tlbflush);
114} 114}
115 115
116static inline void iommu_invalidate_page(struct iommu_regs *regs, unsigned long ba) 116static inline void iommu_invalidate_page(struct iommu_regs __iomem *regs, unsigned long ba)
117{ 117{
118 regs->pageflush = (ba & PAGE_MASK); 118 sbus_writel(ba & PAGE_MASK, &regs->pageflush);
119} 119}
120 120
121#endif /* !(_SPARC_IOMMU_H) */ 121#endif /* !(_SPARC_IOMMU_H) */
diff --git a/arch/sparc/include/asm/iommu_64.h b/arch/sparc/include/asm/iommu_64.h
index caf798b56191..2b9321ab064d 100644
--- a/arch/sparc/include/asm/iommu_64.h
+++ b/arch/sparc/include/asm/iommu_64.h
@@ -58,8 +58,8 @@ struct strbuf {
58 volatile unsigned long __flushflag_buf[(64+(64-1)) / sizeof(long)]; 58 volatile unsigned long __flushflag_buf[(64+(64-1)) / sizeof(long)];
59}; 59};
60 60
61extern int iommu_table_init(struct iommu *iommu, int tsbsize, 61int iommu_table_init(struct iommu *iommu, int tsbsize,
62 u32 dma_offset, u32 dma_addr_mask, 62 u32 dma_offset, u32 dma_addr_mask,
63 int numa_node); 63 int numa_node);
64 64
65#endif /* !(_SPARC64_IOMMU_H) */ 65#endif /* !(_SPARC64_IOMMU_H) */
diff --git a/arch/sparc/include/asm/irq_32.h b/arch/sparc/include/asm/irq_32.h
index 2ae3acaeb1b3..eecd3d8442c9 100644
--- a/arch/sparc/include/asm/irq_32.h
+++ b/arch/sparc/include/asm/irq_32.h
@@ -16,7 +16,8 @@
16 16
17#define irq_canonicalize(irq) (irq) 17#define irq_canonicalize(irq) (irq)
18 18
19extern void __init init_IRQ(void); 19void __init init_IRQ(void);
20void __init sun4d_init_sbi_irq(void);
20 21
21#define NO_IRQ 0xffffffff 22#define NO_IRQ 0xffffffff
22 23
diff --git a/arch/sparc/include/asm/irq_64.h b/arch/sparc/include/asm/irq_64.h
index abf6afe82ca8..375cffcf7dbd 100644
--- a/arch/sparc/include/asm/irq_64.h
+++ b/arch/sparc/include/asm/irq_64.h
@@ -39,32 +39,32 @@
39 */ 39 */
40#define NR_IRQS 255 40#define NR_IRQS 255
41 41
42extern void irq_install_pre_handler(int irq, 42void irq_install_pre_handler(int irq,
43 void (*func)(unsigned int, void *, void *), 43 void (*func)(unsigned int, void *, void *),
44 void *arg1, void *arg2); 44 void *arg1, void *arg2);
45#define irq_canonicalize(irq) (irq) 45#define irq_canonicalize(irq) (irq)
46extern unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap); 46unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap);
47extern unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino); 47unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino);
48extern unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino); 48unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino);
49extern unsigned int sun4v_build_msi(u32 devhandle, unsigned int *irq_p, 49unsigned int sun4v_build_msi(u32 devhandle, unsigned int *irq_p,
50 unsigned int msi_devino_start, 50 unsigned int msi_devino_start,
51 unsigned int msi_devino_end); 51 unsigned int msi_devino_end);
52extern void sun4v_destroy_msi(unsigned int irq); 52void sun4v_destroy_msi(unsigned int irq);
53extern unsigned int sun4u_build_msi(u32 portid, unsigned int *irq_p, 53unsigned int sun4u_build_msi(u32 portid, unsigned int *irq_p,
54 unsigned int msi_devino_start, 54 unsigned int msi_devino_start,
55 unsigned int msi_devino_end, 55 unsigned int msi_devino_end,
56 unsigned long imap_base, 56 unsigned long imap_base,
57 unsigned long iclr_base); 57 unsigned long iclr_base);
58extern void sun4u_destroy_msi(unsigned int irq); 58void sun4u_destroy_msi(unsigned int irq);
59 59
60extern unsigned char irq_alloc(unsigned int dev_handle, 60unsigned char irq_alloc(unsigned int dev_handle,
61 unsigned int dev_ino); 61 unsigned int dev_ino);
62#ifdef CONFIG_PCI_MSI 62#ifdef CONFIG_PCI_MSI
63extern void irq_free(unsigned int irq); 63void irq_free(unsigned int irq);
64#endif 64#endif
65 65
66extern void __init init_IRQ(void); 66void __init init_IRQ(void);
67extern void fixup_irqs(void); 67void fixup_irqs(void);
68 68
69static inline void set_softint(unsigned long bits) 69static inline void set_softint(unsigned long bits)
70{ 70{
diff --git a/arch/sparc/include/asm/irqflags_32.h b/arch/sparc/include/asm/irqflags_32.h
index e414c06615c1..71cc284f55c5 100644
--- a/arch/sparc/include/asm/irqflags_32.h
+++ b/arch/sparc/include/asm/irqflags_32.h
@@ -15,9 +15,9 @@
15#include <linux/types.h> 15#include <linux/types.h>
16#include <asm/psr.h> 16#include <asm/psr.h>
17 17
18extern void arch_local_irq_restore(unsigned long); 18void arch_local_irq_restore(unsigned long);
19extern unsigned long arch_local_irq_save(void); 19unsigned long arch_local_irq_save(void);
20extern void arch_local_irq_enable(void); 20void arch_local_irq_enable(void);
21 21
22static inline notrace unsigned long arch_local_save_flags(void) 22static inline notrace unsigned long arch_local_save_flags(void)
23{ 23{
diff --git a/arch/sparc/include/asm/kdebug_64.h b/arch/sparc/include/asm/kdebug_64.h
index feb3578e12c4..04465de8f3b5 100644
--- a/arch/sparc/include/asm/kdebug_64.h
+++ b/arch/sparc/include/asm/kdebug_64.h
@@ -3,7 +3,7 @@
3 3
4struct pt_regs; 4struct pt_regs;
5 5
6extern void bad_trap(struct pt_regs *, long); 6void bad_trap(struct pt_regs *, long);
7 7
8/* Grossly misnamed. */ 8/* Grossly misnamed. */
9enum die_val { 9enum die_val {
diff --git a/arch/sparc/include/asm/kgdb.h b/arch/sparc/include/asm/kgdb.h
index b6ef301d05bf..47366af7a589 100644
--- a/arch/sparc/include/asm/kgdb.h
+++ b/arch/sparc/include/asm/kgdb.h
@@ -28,9 +28,12 @@ enum regnames {
28#define NUMREGBYTES ((GDB_CSR + 1) * 4) 28#define NUMREGBYTES ((GDB_CSR + 1) * 4)
29#else 29#else
30#define NUMREGBYTES ((GDB_Y + 1) * 8) 30#define NUMREGBYTES ((GDB_Y + 1) * 8)
31
32struct pt_regs;
33asmlinkage void kgdb_trap(unsigned long trap_level, struct pt_regs *regs);
31#endif 34#endif
32 35
33extern void arch_kgdb_breakpoint(void); 36void arch_kgdb_breakpoint(void);
34 37
35#define BREAK_INSTR_SIZE 4 38#define BREAK_INSTR_SIZE 4
36#define CACHE_FLUSH_IS_SAFE 1 39#define CACHE_FLUSH_IS_SAFE 1
diff --git a/arch/sparc/include/asm/kprobes.h b/arch/sparc/include/asm/kprobes.h
index 5879d71afdaa..a145d798e112 100644
--- a/arch/sparc/include/asm/kprobes.h
+++ b/arch/sparc/include/asm/kprobes.h
@@ -43,7 +43,9 @@ struct kprobe_ctlblk {
43 struct prev_kprobe prev_kprobe; 43 struct prev_kprobe prev_kprobe;
44}; 44};
45 45
46extern int kprobe_exceptions_notify(struct notifier_block *self, 46int kprobe_exceptions_notify(struct notifier_block *self,
47 unsigned long val, void *data); 47 unsigned long val, void *data);
48extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr); 48int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
49asmlinkage void __kprobes kprobe_trap(unsigned long trap_level,
50 struct pt_regs *regs);
49#endif /* _SPARC64_KPROBES_H */ 51#endif /* _SPARC64_KPROBES_H */
diff --git a/arch/sparc/include/asm/ldc.h b/arch/sparc/include/asm/ldc.h
index bdb524a7b814..c8c67f621f4f 100644
--- a/arch/sparc/include/asm/ldc.h
+++ b/arch/sparc/include/asm/ldc.h
@@ -4,9 +4,9 @@
4#include <asm/hypervisor.h> 4#include <asm/hypervisor.h>
5 5
6extern int ldom_domaining_enabled; 6extern int ldom_domaining_enabled;
7extern void ldom_set_var(const char *var, const char *value); 7void ldom_set_var(const char *var, const char *value);
8extern void ldom_reboot(const char *boot_command); 8void ldom_reboot(const char *boot_command);
9extern void ldom_power_off(void); 9void ldom_power_off(void);
10 10
11/* The event handler will be evoked when link state changes 11/* The event handler will be evoked when link state changes
12 * or data becomes available on the receive side. 12 * or data becomes available on the receive side.
@@ -51,30 +51,30 @@ struct ldc_channel_config {
51struct ldc_channel; 51struct ldc_channel;
52 52
53/* Allocate state for a channel. */ 53/* Allocate state for a channel. */
54extern struct ldc_channel *ldc_alloc(unsigned long id, 54struct ldc_channel *ldc_alloc(unsigned long id,
55 const struct ldc_channel_config *cfgp, 55 const struct ldc_channel_config *cfgp,
56 void *event_arg); 56 void *event_arg);
57 57
58/* Shut down and free state for a channel. */ 58/* Shut down and free state for a channel. */
59extern void ldc_free(struct ldc_channel *lp); 59void ldc_free(struct ldc_channel *lp);
60 60
61/* Register TX and RX queues of the link with the hypervisor. */ 61/* Register TX and RX queues of the link with the hypervisor. */
62extern int ldc_bind(struct ldc_channel *lp, const char *name); 62int ldc_bind(struct ldc_channel *lp, const char *name);
63 63
64/* For non-RAW protocols we need to complete a handshake before 64/* For non-RAW protocols we need to complete a handshake before
65 * communication can proceed. ldc_connect() does that, if the 65 * communication can proceed. ldc_connect() does that, if the
66 * handshake completes successfully, an LDC_EVENT_UP event will 66 * handshake completes successfully, an LDC_EVENT_UP event will
67 * be sent up to the driver. 67 * be sent up to the driver.
68 */ 68 */
69extern int ldc_connect(struct ldc_channel *lp); 69int ldc_connect(struct ldc_channel *lp);
70extern int ldc_disconnect(struct ldc_channel *lp); 70int ldc_disconnect(struct ldc_channel *lp);
71 71
72extern int ldc_state(struct ldc_channel *lp); 72int ldc_state(struct ldc_channel *lp);
73 73
74/* Read and write operations. Only valid when the link is up. */ 74/* Read and write operations. Only valid when the link is up. */
75extern int ldc_write(struct ldc_channel *lp, const void *buf, 75int ldc_write(struct ldc_channel *lp, const void *buf,
76 unsigned int size); 76 unsigned int size);
77extern int ldc_read(struct ldc_channel *lp, void *buf, unsigned int size); 77int ldc_read(struct ldc_channel *lp, void *buf, unsigned int size);
78 78
79#define LDC_MAP_SHADOW 0x01 79#define LDC_MAP_SHADOW 0x01
80#define LDC_MAP_DIRECT 0x02 80#define LDC_MAP_DIRECT 0x02
@@ -92,22 +92,22 @@ struct ldc_trans_cookie {
92}; 92};
93 93
94struct scatterlist; 94struct scatterlist;
95extern int ldc_map_sg(struct ldc_channel *lp, 95int ldc_map_sg(struct ldc_channel *lp,
96 struct scatterlist *sg, int num_sg, 96 struct scatterlist *sg, int num_sg,
97 struct ldc_trans_cookie *cookies, int ncookies, 97 struct ldc_trans_cookie *cookies, int ncookies,
98 unsigned int map_perm); 98 unsigned int map_perm);
99 99
100extern int ldc_map_single(struct ldc_channel *lp, 100int ldc_map_single(struct ldc_channel *lp,
101 void *buf, unsigned int len, 101 void *buf, unsigned int len,
102 struct ldc_trans_cookie *cookies, int ncookies, 102 struct ldc_trans_cookie *cookies, int ncookies,
103 unsigned int map_perm); 103 unsigned int map_perm);
104 104
105extern void ldc_unmap(struct ldc_channel *lp, struct ldc_trans_cookie *cookies, 105void ldc_unmap(struct ldc_channel *lp, struct ldc_trans_cookie *cookies,
106 int ncookies); 106 int ncookies);
107 107
108extern int ldc_copy(struct ldc_channel *lp, int copy_dir, 108int ldc_copy(struct ldc_channel *lp, int copy_dir,
109 void *buf, unsigned int len, unsigned long offset, 109 void *buf, unsigned int len, unsigned long offset,
110 struct ldc_trans_cookie *cookies, int ncookies); 110 struct ldc_trans_cookie *cookies, int ncookies);
111 111
112static inline int ldc_get_dring_entry(struct ldc_channel *lp, 112static inline int ldc_get_dring_entry(struct ldc_channel *lp,
113 void *buf, unsigned int len, 113 void *buf, unsigned int len,
@@ -127,12 +127,12 @@ static inline int ldc_put_dring_entry(struct ldc_channel *lp,
127 return ldc_copy(lp, LDC_COPY_OUT, buf, len, offset, cookies, ncookies); 127 return ldc_copy(lp, LDC_COPY_OUT, buf, len, offset, cookies, ncookies);
128} 128}
129 129
130extern void *ldc_alloc_exp_dring(struct ldc_channel *lp, unsigned int len, 130void *ldc_alloc_exp_dring(struct ldc_channel *lp, unsigned int len,
131 struct ldc_trans_cookie *cookies, 131 struct ldc_trans_cookie *cookies,
132 int *ncookies, unsigned int map_perm); 132 int *ncookies, unsigned int map_perm);
133 133
134extern void ldc_free_exp_dring(struct ldc_channel *lp, void *buf, 134void ldc_free_exp_dring(struct ldc_channel *lp, void *buf,
135 unsigned int len, 135 unsigned int len,
136 struct ldc_trans_cookie *cookies, int ncookies); 136 struct ldc_trans_cookie *cookies, int ncookies);
137 137
138#endif /* _SPARC64_LDC_H */ 138#endif /* _SPARC64_LDC_H */
diff --git a/arch/sparc/include/asm/leon.h b/arch/sparc/include/asm/leon.h
index c2f6ff6d7a35..204771cd74a5 100644
--- a/arch/sparc/include/asm/leon.h
+++ b/arch/sparc/include/asm/leon.h
@@ -82,8 +82,8 @@ static inline unsigned long leon_load_reg(unsigned long paddr)
82#define LEON_BYPASS_LOAD_PA(x) leon_load_reg((unsigned long)(x)) 82#define LEON_BYPASS_LOAD_PA(x) leon_load_reg((unsigned long)(x))
83#define LEON_BYPASS_STORE_PA(x, v) leon_store_reg((unsigned long)(x), (unsigned long)(v)) 83#define LEON_BYPASS_STORE_PA(x, v) leon_store_reg((unsigned long)(x), (unsigned long)(v))
84 84
85extern void leon_switch_mm(void); 85void leon_switch_mm(void);
86extern void leon_init_IRQ(void); 86void leon_init_IRQ(void);
87 87
88static inline unsigned long sparc_leon3_get_dcachecfg(void) 88static inline unsigned long sparc_leon3_get_dcachecfg(void)
89{ 89{
@@ -196,14 +196,14 @@ static inline int sparc_leon3_cpuid(void)
196#ifndef __ASSEMBLY__ 196#ifndef __ASSEMBLY__
197struct vm_area_struct; 197struct vm_area_struct;
198 198
199extern unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr); 199unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr);
200extern void leon_flush_icache_all(void); 200void leon_flush_icache_all(void);
201extern void leon_flush_dcache_all(void); 201void leon_flush_dcache_all(void);
202extern void leon_flush_cache_all(void); 202void leon_flush_cache_all(void);
203extern void leon_flush_tlb_all(void); 203void leon_flush_tlb_all(void);
204extern int leon_flush_during_switch; 204extern int leon_flush_during_switch;
205extern int leon_flush_needed(void); 205int leon_flush_needed(void);
206extern void leon_flush_pcache_all(struct vm_area_struct *vma, unsigned long page); 206void leon_flush_pcache_all(struct vm_area_struct *vma, unsigned long page);
207 207
208/* struct that hold LEON3 cache configuration registers */ 208/* struct that hold LEON3 cache configuration registers */
209struct leon3_cacheregs { 209struct leon3_cacheregs {
@@ -217,29 +217,29 @@ struct leon3_cacheregs {
217 217
218struct device_node; 218struct device_node;
219struct task_struct; 219struct task_struct;
220extern unsigned int leon_build_device_irq(unsigned int real_irq, 220unsigned int leon_build_device_irq(unsigned int real_irq,
221 irq_flow_handler_t flow_handler, 221 irq_flow_handler_t flow_handler,
222 const char *name, int do_ack); 222 const char *name, int do_ack);
223extern void leon_update_virq_handling(unsigned int virq, 223void leon_update_virq_handling(unsigned int virq,
224 irq_flow_handler_t flow_handler, 224 irq_flow_handler_t flow_handler,
225 const char *name, int do_ack); 225 const char *name, int do_ack);
226extern void leon_init_timers(void); 226void leon_init_timers(void);
227extern void leon_trans_init(struct device_node *dp); 227void leon_trans_init(struct device_node *dp);
228extern void leon_node_init(struct device_node *dp, struct device_node ***nextp); 228void leon_node_init(struct device_node *dp, struct device_node ***nextp);
229extern void init_leon(void); 229void init_leon(void);
230extern void poke_leonsparc(void); 230void poke_leonsparc(void);
231extern void leon3_getCacheRegs(struct leon3_cacheregs *regs); 231void leon3_getCacheRegs(struct leon3_cacheregs *regs);
232extern int leon3_ticker_irq; 232extern int leon3_ticker_irq;
233 233
234#ifdef CONFIG_SMP 234#ifdef CONFIG_SMP
235extern int leon_smp_nrcpus(void); 235int leon_smp_nrcpus(void);
236extern void leon_clear_profile_irq(int cpu); 236void leon_clear_profile_irq(int cpu);
237extern void leon_smp_done(void); 237void leon_smp_done(void);
238extern void leon_boot_cpus(void); 238void leon_boot_cpus(void);
239extern int leon_boot_one_cpu(int i, struct task_struct *); 239int leon_boot_one_cpu(int i, struct task_struct *);
240void leon_init_smp(void); 240void leon_init_smp(void);
241void leon_enable_irq_cpu(unsigned int irq_nr, unsigned int cpu); 241void leon_enable_irq_cpu(unsigned int irq_nr, unsigned int cpu);
242extern irqreturn_t leon_percpu_timer_interrupt(int irq, void *unused); 242irqreturn_t leon_percpu_timer_interrupt(int irq, void *unused);
243 243
244extern unsigned int smpleon_ipi[]; 244extern unsigned int smpleon_ipi[];
245extern unsigned int linux_trap_ipi15_leon[]; 245extern unsigned int linux_trap_ipi15_leon[];
diff --git a/arch/sparc/include/asm/leon_pci.h b/arch/sparc/include/asm/leon_pci.h
index bfd3ab3092b5..049d067ed8be 100644
--- a/arch/sparc/include/asm/leon_pci.h
+++ b/arch/sparc/include/asm/leon_pci.h
@@ -16,7 +16,7 @@ struct leon_pci_info {
16 int (*map_irq)(const struct pci_dev *dev, u8 slot, u8 pin); 16 int (*map_irq)(const struct pci_dev *dev, u8 slot, u8 pin);
17}; 17};
18 18
19extern void leon_pci_init(struct platform_device *ofdev, 19void leon_pci_init(struct platform_device *ofdev,
20 struct leon_pci_info *info); 20 struct leon_pci_info *info);
21 21
22#endif /* _ASM_LEON_PCI_H_ */ 22#endif /* _ASM_LEON_PCI_H_ */
diff --git a/arch/sparc/include/asm/mc146818rtc.h b/arch/sparc/include/asm/mc146818rtc.h
index 67ed9e3a0235..d8e72f37dc4b 100644
--- a/arch/sparc/include/asm/mc146818rtc.h
+++ b/arch/sparc/include/asm/mc146818rtc.h
@@ -1,5 +1,10 @@
1#ifndef ___ASM_SPARC_MC146818RTC_H 1#ifndef ___ASM_SPARC_MC146818RTC_H
2#define ___ASM_SPARC_MC146818RTC_H 2#define ___ASM_SPARC_MC146818RTC_H
3
4#include <linux/spinlock.h>
5
6extern spinlock_t rtc_lock;
7
3#if defined(__sparc__) && defined(__arch64__) 8#if defined(__sparc__) && defined(__arch64__)
4#include <asm/mc146818rtc_64.h> 9#include <asm/mc146818rtc_64.h>
5#else 10#else
diff --git a/arch/sparc/include/asm/mdesc.h b/arch/sparc/include/asm/mdesc.h
index 139097f3a67b..aebeb88f70db 100644
--- a/arch/sparc/include/asm/mdesc.h
+++ b/arch/sparc/include/asm/mdesc.h
@@ -12,13 +12,13 @@ struct mdesc_handle;
12 * the first argument to all of the operational calls that work 12 * the first argument to all of the operational calls that work
13 * on mdescs. 13 * on mdescs.
14 */ 14 */
15extern struct mdesc_handle *mdesc_grab(void); 15struct mdesc_handle *mdesc_grab(void);
16extern void mdesc_release(struct mdesc_handle *); 16void mdesc_release(struct mdesc_handle *);
17 17
18#define MDESC_NODE_NULL (~(u64)0) 18#define MDESC_NODE_NULL (~(u64)0)
19 19
20extern u64 mdesc_node_by_name(struct mdesc_handle *handle, 20u64 mdesc_node_by_name(struct mdesc_handle *handle,
21 u64 from_node, const char *name); 21 u64 from_node, const char *name);
22#define mdesc_for_each_node_by_name(__hdl, __node, __name) \ 22#define mdesc_for_each_node_by_name(__hdl, __node, __name) \
23 for (__node = mdesc_node_by_name(__hdl, MDESC_NODE_NULL, __name); \ 23 for (__node = mdesc_node_by_name(__hdl, MDESC_NODE_NULL, __name); \
24 (__node) != MDESC_NODE_NULL; \ 24 (__node) != MDESC_NODE_NULL; \
@@ -34,9 +34,9 @@ extern u64 mdesc_node_by_name(struct mdesc_handle *handle,
34 * 34 *
35 * These same rules apply to mdesc_node_name(). 35 * These same rules apply to mdesc_node_name().
36 */ 36 */
37extern const void *mdesc_get_property(struct mdesc_handle *handle, 37const void *mdesc_get_property(struct mdesc_handle *handle,
38 u64 node, const char *name, int *lenp); 38 u64 node, const char *name, int *lenp);
39extern const char *mdesc_node_name(struct mdesc_handle *hp, u64 node); 39const char *mdesc_node_name(struct mdesc_handle *hp, u64 node);
40 40
41/* MD arc iteration, the standard sequence is: 41/* MD arc iteration, the standard sequence is:
42 * 42 *
@@ -50,16 +50,16 @@ extern const char *mdesc_node_name(struct mdesc_handle *hp, u64 node);
50#define MDESC_ARC_TYPE_FWD "fwd" 50#define MDESC_ARC_TYPE_FWD "fwd"
51#define MDESC_ARC_TYPE_BACK "back" 51#define MDESC_ARC_TYPE_BACK "back"
52 52
53extern u64 mdesc_next_arc(struct mdesc_handle *handle, u64 from, 53u64 mdesc_next_arc(struct mdesc_handle *handle, u64 from,
54 const char *arc_type); 54 const char *arc_type);
55#define mdesc_for_each_arc(__arc, __hdl, __node, __type) \ 55#define mdesc_for_each_arc(__arc, __hdl, __node, __type) \
56 for (__arc = mdesc_next_arc(__hdl, __node, __type); \ 56 for (__arc = mdesc_next_arc(__hdl, __node, __type); \
57 (__arc) != MDESC_NODE_NULL; \ 57 (__arc) != MDESC_NODE_NULL; \
58 __arc = mdesc_next_arc(__hdl, __arc, __type)) 58 __arc = mdesc_next_arc(__hdl, __arc, __type))
59 59
60extern u64 mdesc_arc_target(struct mdesc_handle *hp, u64 arc); 60u64 mdesc_arc_target(struct mdesc_handle *hp, u64 arc);
61 61
62extern void mdesc_update(void); 62void mdesc_update(void);
63 63
64struct mdesc_notifier_client { 64struct mdesc_notifier_client {
65 void (*add)(struct mdesc_handle *handle, u64 node); 65 void (*add)(struct mdesc_handle *handle, u64 node);
@@ -69,12 +69,12 @@ struct mdesc_notifier_client {
69 struct mdesc_notifier_client *next; 69 struct mdesc_notifier_client *next;
70}; 70};
71 71
72extern void mdesc_register_notifier(struct mdesc_notifier_client *client); 72void mdesc_register_notifier(struct mdesc_notifier_client *client);
73 73
74extern void mdesc_fill_in_cpu_data(cpumask_t *mask); 74void mdesc_fill_in_cpu_data(cpumask_t *mask);
75extern void mdesc_populate_present_mask(cpumask_t *mask); 75void mdesc_populate_present_mask(cpumask_t *mask);
76extern void mdesc_get_page_sizes(cpumask_t *mask, unsigned long *pgsz_mask); 76void mdesc_get_page_sizes(cpumask_t *mask, unsigned long *pgsz_mask);
77 77
78extern void sun4v_mdesc_init(void); 78void sun4v_mdesc_init(void);
79 79
80#endif 80#endif
diff --git a/arch/sparc/include/asm/mmu_64.h b/arch/sparc/include/asm/mmu_64.h
index f668797ae234..70067ce184b1 100644
--- a/arch/sparc/include/asm/mmu_64.h
+++ b/arch/sparc/include/asm/mmu_64.h
@@ -67,9 +67,9 @@ struct tsb {
67 unsigned long pte; 67 unsigned long pte;
68} __attribute__((aligned(TSB_ENTRY_ALIGNMENT))); 68} __attribute__((aligned(TSB_ENTRY_ALIGNMENT)));
69 69
70extern void __tsb_insert(unsigned long ent, unsigned long tag, unsigned long pte); 70void __tsb_insert(unsigned long ent, unsigned long tag, unsigned long pte);
71extern void tsb_flush(unsigned long ent, unsigned long tag); 71void tsb_flush(unsigned long ent, unsigned long tag);
72extern void tsb_init(struct tsb *tsb, unsigned long size); 72void tsb_init(struct tsb *tsb, unsigned long size);
73 73
74struct tsb_config { 74struct tsb_config {
75 struct tsb *tsb; 75 struct tsb *tsb;
diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h
index 3d528f06e4b0..b84be675e507 100644
--- a/arch/sparc/include/asm/mmu_context_64.h
+++ b/arch/sparc/include/asm/mmu_context_64.h
@@ -17,20 +17,20 @@ extern spinlock_t ctx_alloc_lock;
17extern unsigned long tlb_context_cache; 17extern unsigned long tlb_context_cache;
18extern unsigned long mmu_context_bmap[]; 18extern unsigned long mmu_context_bmap[];
19 19
20extern void get_new_mmu_context(struct mm_struct *mm); 20void get_new_mmu_context(struct mm_struct *mm);
21#ifdef CONFIG_SMP 21#ifdef CONFIG_SMP
22extern void smp_new_mmu_context_version(void); 22void smp_new_mmu_context_version(void);
23#else 23#else
24#define smp_new_mmu_context_version() do { } while (0) 24#define smp_new_mmu_context_version() do { } while (0)
25#endif 25#endif
26 26
27extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm); 27int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
28extern void destroy_context(struct mm_struct *mm); 28void destroy_context(struct mm_struct *mm);
29 29
30extern void __tsb_context_switch(unsigned long pgd_pa, 30void __tsb_context_switch(unsigned long pgd_pa,
31 struct tsb_config *tsb_base, 31 struct tsb_config *tsb_base,
32 struct tsb_config *tsb_huge, 32 struct tsb_config *tsb_huge,
33 unsigned long tsb_descr_pa); 33 unsigned long tsb_descr_pa);
34 34
35static inline void tsb_context_switch(struct mm_struct *mm) 35static inline void tsb_context_switch(struct mm_struct *mm)
36{ 36{
@@ -46,9 +46,11 @@ static inline void tsb_context_switch(struct mm_struct *mm)
46 , __pa(&mm->context.tsb_descr[0])); 46 , __pa(&mm->context.tsb_descr[0]));
47} 47}
48 48
49extern void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long mm_rss); 49void tsb_grow(struct mm_struct *mm,
50 unsigned long tsb_index,
51 unsigned long mm_rss);
50#ifdef CONFIG_SMP 52#ifdef CONFIG_SMP
51extern void smp_tsb_sync(struct mm_struct *mm); 53void smp_tsb_sync(struct mm_struct *mm);
52#else 54#else
53#define smp_tsb_sync(__mm) do { } while (0) 55#define smp_tsb_sync(__mm) do { } while (0)
54#endif 56#endif
@@ -66,7 +68,7 @@ extern void smp_tsb_sync(struct mm_struct *mm);
66 : "r" (CTX_HWBITS((__mm)->context)), \ 68 : "r" (CTX_HWBITS((__mm)->context)), \
67 "r" (SECONDARY_CONTEXT), "i" (ASI_DMMU), "i" (ASI_MMU)) 69 "r" (SECONDARY_CONTEXT), "i" (ASI_DMMU), "i" (ASI_MMU))
68 70
69extern void __flush_tlb_mm(unsigned long, unsigned long); 71void __flush_tlb_mm(unsigned long, unsigned long);
70 72
71/* Switch the current MM context. */ 73/* Switch the current MM context. */
72static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk) 74static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk)
diff --git a/arch/sparc/include/asm/nmi.h b/arch/sparc/include/asm/nmi.h
index 72e6500e7ab0..26ad2b2607c6 100644
--- a/arch/sparc/include/asm/nmi.h
+++ b/arch/sparc/include/asm/nmi.h
@@ -1,13 +1,13 @@
1#ifndef __NMI_H 1#ifndef __NMI_H
2#define __NMI_H 2#define __NMI_H
3 3
4extern int __init nmi_init(void); 4int __init nmi_init(void);
5extern void perfctr_irq(int irq, struct pt_regs *regs); 5void perfctr_irq(int irq, struct pt_regs *regs);
6extern void nmi_adjust_hz(unsigned int new_hz); 6void nmi_adjust_hz(unsigned int new_hz);
7 7
8extern atomic_t nmi_active; 8extern atomic_t nmi_active;
9 9
10extern void start_nmi_watchdog(void *unused); 10void start_nmi_watchdog(void *unused);
11extern void stop_nmi_watchdog(void *unused); 11void stop_nmi_watchdog(void *unused);
12 12
13#endif /* __NMI_H */ 13#endif /* __NMI_H */
diff --git a/arch/sparc/include/asm/oplib_32.h b/arch/sparc/include/asm/oplib_32.h
index c72f3045820c..56a09b9d7b1b 100644
--- a/arch/sparc/include/asm/oplib_32.h
+++ b/arch/sparc/include/asm/oplib_32.h
@@ -43,28 +43,28 @@ extern struct linux_nodeops *prom_nodeops;
43/* You must call prom_init() before using any of the library services, 43/* You must call prom_init() before using any of the library services,
44 * preferably as early as possible. Pass it the romvec pointer. 44 * preferably as early as possible. Pass it the romvec pointer.
45 */ 45 */
46extern void prom_init(struct linux_romvec *rom_ptr); 46void prom_init(struct linux_romvec *rom_ptr);
47 47
48/* Boot argument acquisition, returns the boot command line string. */ 48/* Boot argument acquisition, returns the boot command line string. */
49extern char *prom_getbootargs(void); 49char *prom_getbootargs(void);
50 50
51/* Miscellaneous routines, don't really fit in any category per se. */ 51/* Miscellaneous routines, don't really fit in any category per se. */
52 52
53/* Reboot the machine with the command line passed. */ 53/* Reboot the machine with the command line passed. */
54extern void prom_reboot(char *boot_command); 54void prom_reboot(char *boot_command);
55 55
56/* Evaluate the forth string passed. */ 56/* Evaluate the forth string passed. */
57extern void prom_feval(char *forth_string); 57void prom_feval(char *forth_string);
58 58
59/* Enter the prom, with possibility of continuation with the 'go' 59/* Enter the prom, with possibility of continuation with the 'go'
60 * command in newer proms. 60 * command in newer proms.
61 */ 61 */
62extern void prom_cmdline(void); 62void prom_cmdline(void);
63 63
64/* Enter the prom, with no chance of continuation for the stand-alone 64/* Enter the prom, with no chance of continuation for the stand-alone
65 * which calls this. 65 * which calls this.
66 */ 66 */
67extern void __noreturn prom_halt(void); 67void __noreturn prom_halt(void);
68 68
69/* Set the PROM 'sync' callback function to the passed function pointer. 69/* Set the PROM 'sync' callback function to the passed function pointer.
70 * When the user gives the 'sync' command at the prom prompt while the 70 * When the user gives the 'sync' command at the prom prompt while the
@@ -73,37 +73,37 @@ extern void __noreturn prom_halt(void);
73 * XXX The arguments are different on V0 vs. V2->higher proms, grrr! XXX 73 * XXX The arguments are different on V0 vs. V2->higher proms, grrr! XXX
74 */ 74 */
75typedef void (*sync_func_t)(void); 75typedef void (*sync_func_t)(void);
76extern void prom_setsync(sync_func_t func_ptr); 76void prom_setsync(sync_func_t func_ptr);
77 77
78/* Acquire the IDPROM of the root node in the prom device tree. This 78/* Acquire the IDPROM of the root node in the prom device tree. This
79 * gets passed a buffer where you would like it stuffed. The return value 79 * gets passed a buffer where you would like it stuffed. The return value
80 * is the format type of this idprom or 0xff on error. 80 * is the format type of this idprom or 0xff on error.
81 */ 81 */
82extern unsigned char prom_get_idprom(char *idp_buffer, int idpbuf_size); 82unsigned char prom_get_idprom(char *idp_buffer, int idpbuf_size);
83 83
84/* Get the prom major version. */ 84/* Get the prom major version. */
85extern int prom_version(void); 85int prom_version(void);
86 86
87/* Get the prom plugin revision. */ 87/* Get the prom plugin revision. */
88extern int prom_getrev(void); 88int prom_getrev(void);
89 89
90/* Get the prom firmware revision. */ 90/* Get the prom firmware revision. */
91extern int prom_getprev(void); 91int prom_getprev(void);
92 92
93/* Write a buffer of characters to the console. */ 93/* Write a buffer of characters to the console. */
94extern void prom_console_write_buf(const char *buf, int len); 94void prom_console_write_buf(const char *buf, int len);
95 95
96/* Prom's internal routines, don't use in kernel/boot code. */ 96/* Prom's internal routines, don't use in kernel/boot code. */
97extern __printf(1, 2) void prom_printf(const char *fmt, ...); 97__printf(1, 2) void prom_printf(const char *fmt, ...);
98extern void prom_write(const char *buf, unsigned int len); 98void prom_write(const char *buf, unsigned int len);
99 99
100/* Multiprocessor operations... */ 100/* Multiprocessor operations... */
101 101
102/* Start the CPU with the given device tree node, context table, and context 102/* Start the CPU with the given device tree node, context table, and context
103 * at the passed program counter. 103 * at the passed program counter.
104 */ 104 */
105extern int prom_startcpu(int cpunode, struct linux_prom_registers *context_table, 105int prom_startcpu(int cpunode, struct linux_prom_registers *context_table,
106 int context, char *program_counter); 106 int context, char *program_counter);
107 107
108/* Initialize the memory lists based upon the prom version. */ 108/* Initialize the memory lists based upon the prom version. */
109void prom_meminit(void); 109void prom_meminit(void);
@@ -111,65 +111,65 @@ void prom_meminit(void);
111/* PROM device tree traversal functions... */ 111/* PROM device tree traversal functions... */
112 112
113/* Get the child node of the given node, or zero if no child exists. */ 113/* Get the child node of the given node, or zero if no child exists. */
114extern phandle prom_getchild(phandle parent_node); 114phandle prom_getchild(phandle parent_node);
115 115
116/* Get the next sibling node of the given node, or zero if no further 116/* Get the next sibling node of the given node, or zero if no further
117 * siblings exist. 117 * siblings exist.
118 */ 118 */
119extern phandle prom_getsibling(phandle node); 119phandle prom_getsibling(phandle node);
120 120
121/* Get the length, at the passed node, of the given property type. 121/* Get the length, at the passed node, of the given property type.
122 * Returns -1 on error (ie. no such property at this node). 122 * Returns -1 on error (ie. no such property at this node).
123 */ 123 */
124extern int prom_getproplen(phandle thisnode, const char *property); 124int prom_getproplen(phandle thisnode, const char *property);
125 125
126/* Fetch the requested property using the given buffer. Returns 126/* Fetch the requested property using the given buffer. Returns
127 * the number of bytes the prom put into your buffer or -1 on error. 127 * the number of bytes the prom put into your buffer or -1 on error.
128 */ 128 */
129extern int __must_check prom_getproperty(phandle thisnode, const char *property, 129int __must_check prom_getproperty(phandle thisnode, const char *property,
130 char *prop_buffer, int propbuf_size); 130 char *prop_buffer, int propbuf_size);
131 131
132/* Acquire an integer property. */ 132/* Acquire an integer property. */
133extern int prom_getint(phandle node, char *property); 133int prom_getint(phandle node, char *property);
134 134
135/* Acquire an integer property, with a default value. */ 135/* Acquire an integer property, with a default value. */
136extern int prom_getintdefault(phandle node, char *property, int defval); 136int prom_getintdefault(phandle node, char *property, int defval);
137 137
138/* Acquire a boolean property, 0=FALSE 1=TRUE. */ 138/* Acquire a boolean property, 0=FALSE 1=TRUE. */
139extern int prom_getbool(phandle node, char *prop); 139int prom_getbool(phandle node, char *prop);
140 140
141/* Acquire a string property, null string on error. */ 141/* Acquire a string property, null string on error. */
142extern void prom_getstring(phandle node, char *prop, char *buf, int bufsize); 142void prom_getstring(phandle node, char *prop, char *buf, int bufsize);
143 143
144/* Search all siblings starting at the passed node for "name" matching 144/* Search all siblings starting at the passed node for "name" matching
145 * the given string. Returns the node on success, zero on failure. 145 * the given string. Returns the node on success, zero on failure.
146 */ 146 */
147extern phandle prom_searchsiblings(phandle node_start, char *name); 147phandle prom_searchsiblings(phandle node_start, char *name);
148 148
149/* Returns the next property after the passed property for the given 149/* Returns the next property after the passed property for the given
150 * node. Returns null string on failure. 150 * node. Returns null string on failure.
151 */ 151 */
152extern char *prom_nextprop(phandle node, char *prev_property, char *buffer); 152char *prom_nextprop(phandle node, char *prev_property, char *buffer);
153 153
154/* Returns phandle of the path specified */ 154/* Returns phandle of the path specified */
155extern phandle prom_finddevice(char *name); 155phandle prom_finddevice(char *name);
156 156
157/* Set the indicated property at the given node with the passed value. 157/* Set the indicated property at the given node with the passed value.
158 * Returns the number of bytes of your value that the prom took. 158 * Returns the number of bytes of your value that the prom took.
159 */ 159 */
160extern int prom_setprop(phandle node, const char *prop_name, char *prop_value, 160int prom_setprop(phandle node, const char *prop_name, char *prop_value,
161 int value_size); 161 int value_size);
162 162
163extern phandle prom_inst2pkg(int); 163phandle prom_inst2pkg(int);
164 164
165/* Dorking with Bus ranges... */ 165/* Dorking with Bus ranges... */
166 166
167/* Apply promlib probes OBIO ranges to registers. */ 167/* Apply promlib probes OBIO ranges to registers. */
168extern void prom_apply_obio_ranges(struct linux_prom_registers *obioregs, int nregs); 168void prom_apply_obio_ranges(struct linux_prom_registers *obioregs, int nregs);
169 169
170/* Apply ranges of any prom node (and optionally parent node as well) to registers. */ 170/* Apply ranges of any prom node (and optionally parent node as well) to registers. */
171extern void prom_apply_generic_ranges(phandle node, phandle parent, 171void prom_apply_generic_ranges(phandle node, phandle parent,
172 struct linux_prom_registers *sbusregs, int nregs); 172 struct linux_prom_registers *sbusregs, int nregs);
173 173
174void prom_ranges_init(void); 174void prom_ranges_init(void);
175 175
diff --git a/arch/sparc/include/asm/oplib_64.h b/arch/sparc/include/asm/oplib_64.h
index a12dbe3b7762..f34682430fcf 100644
--- a/arch/sparc/include/asm/oplib_64.h
+++ b/arch/sparc/include/asm/oplib_64.h
@@ -62,100 +62,100 @@ struct linux_mem_p1275 {
62/* You must call prom_init() before using any of the library services, 62/* You must call prom_init() before using any of the library services,
63 * preferably as early as possible. Pass it the romvec pointer. 63 * preferably as early as possible. Pass it the romvec pointer.
64 */ 64 */
65extern void prom_init(void *cif_handler, void *cif_stack); 65void prom_init(void *cif_handler, void *cif_stack);
66 66
67/* Boot argument acquisition, returns the boot command line string. */ 67/* Boot argument acquisition, returns the boot command line string. */
68extern char *prom_getbootargs(void); 68char *prom_getbootargs(void);
69 69
70/* Miscellaneous routines, don't really fit in any category per se. */ 70/* Miscellaneous routines, don't really fit in any category per se. */
71 71
72/* Reboot the machine with the command line passed. */ 72/* Reboot the machine with the command line passed. */
73extern void prom_reboot(const char *boot_command); 73void prom_reboot(const char *boot_command);
74 74
75/* Evaluate the forth string passed. */ 75/* Evaluate the forth string passed. */
76extern void prom_feval(const char *forth_string); 76void prom_feval(const char *forth_string);
77 77
78/* Enter the prom, with possibility of continuation with the 'go' 78/* Enter the prom, with possibility of continuation with the 'go'
79 * command in newer proms. 79 * command in newer proms.
80 */ 80 */
81extern void prom_cmdline(void); 81void prom_cmdline(void);
82 82
83/* Enter the prom, with no chance of continuation for the stand-alone 83/* Enter the prom, with no chance of continuation for the stand-alone
84 * which calls this. 84 * which calls this.
85 */ 85 */
86extern void prom_halt(void) __attribute__ ((noreturn)); 86void prom_halt(void) __attribute__ ((noreturn));
87 87
88/* Halt and power-off the machine. */ 88/* Halt and power-off the machine. */
89extern void prom_halt_power_off(void) __attribute__ ((noreturn)); 89void prom_halt_power_off(void) __attribute__ ((noreturn));
90 90
91/* Acquire the IDPROM of the root node in the prom device tree. This 91/* Acquire the IDPROM of the root node in the prom device tree. This
92 * gets passed a buffer where you would like it stuffed. The return value 92 * gets passed a buffer where you would like it stuffed. The return value
93 * is the format type of this idprom or 0xff on error. 93 * is the format type of this idprom or 0xff on error.
94 */ 94 */
95extern unsigned char prom_get_idprom(char *idp_buffer, int idpbuf_size); 95unsigned char prom_get_idprom(char *idp_buffer, int idpbuf_size);
96 96
97/* Write a buffer of characters to the console. */ 97/* Write a buffer of characters to the console. */
98extern void prom_console_write_buf(const char *buf, int len); 98void prom_console_write_buf(const char *buf, int len);
99 99
100/* Prom's internal routines, don't use in kernel/boot code. */ 100/* Prom's internal routines, don't use in kernel/boot code. */
101extern __printf(1, 2) void prom_printf(const char *fmt, ...); 101__printf(1, 2) void prom_printf(const char *fmt, ...);
102extern void prom_write(const char *buf, unsigned int len); 102void prom_write(const char *buf, unsigned int len);
103 103
104/* Multiprocessor operations... */ 104/* Multiprocessor operations... */
105#ifdef CONFIG_SMP 105#ifdef CONFIG_SMP
106/* Start the CPU with the given device tree node at the passed program 106/* Start the CPU with the given device tree node at the passed program
107 * counter with the given arg passed in via register %o0. 107 * counter with the given arg passed in via register %o0.
108 */ 108 */
109extern void prom_startcpu(int cpunode, unsigned long pc, unsigned long arg); 109void prom_startcpu(int cpunode, unsigned long pc, unsigned long arg);
110 110
111/* Start the CPU with the given cpu ID at the passed program 111/* Start the CPU with the given cpu ID at the passed program
112 * counter with the given arg passed in via register %o0. 112 * counter with the given arg passed in via register %o0.
113 */ 113 */
114extern void prom_startcpu_cpuid(int cpuid, unsigned long pc, unsigned long arg); 114void prom_startcpu_cpuid(int cpuid, unsigned long pc, unsigned long arg);
115 115
116/* Stop the CPU with the given cpu ID. */ 116/* Stop the CPU with the given cpu ID. */
117extern void prom_stopcpu_cpuid(int cpuid); 117void prom_stopcpu_cpuid(int cpuid);
118 118
119/* Stop the current CPU. */ 119/* Stop the current CPU. */
120extern void prom_stopself(void); 120void prom_stopself(void);
121 121
122/* Idle the current CPU. */ 122/* Idle the current CPU. */
123extern void prom_idleself(void); 123void prom_idleself(void);
124 124
125/* Resume the CPU with the passed device tree node. */ 125/* Resume the CPU with the passed device tree node. */
126extern void prom_resumecpu(int cpunode); 126void prom_resumecpu(int cpunode);
127#endif 127#endif
128 128
129/* Power management interfaces. */ 129/* Power management interfaces. */
130 130
131/* Put the current CPU to sleep. */ 131/* Put the current CPU to sleep. */
132extern void prom_sleepself(void); 132void prom_sleepself(void);
133 133
134/* Put the entire system to sleep. */ 134/* Put the entire system to sleep. */
135extern int prom_sleepsystem(void); 135int prom_sleepsystem(void);
136 136
137/* Initiate a wakeup event. */ 137/* Initiate a wakeup event. */
138extern int prom_wakeupsystem(void); 138int prom_wakeupsystem(void);
139 139
140/* MMU and memory related OBP interfaces. */ 140/* MMU and memory related OBP interfaces. */
141 141
142/* Get unique string identifying SIMM at given physical address. */ 142/* Get unique string identifying SIMM at given physical address. */
143extern int prom_getunumber(int syndrome_code, 143int prom_getunumber(int syndrome_code,
144 unsigned long phys_addr, 144 unsigned long phys_addr,
145 char *buf, int buflen); 145 char *buf, int buflen);
146 146
147/* Retain physical memory to the caller across soft resets. */ 147/* Retain physical memory to the caller across soft resets. */
148extern int prom_retain(const char *name, unsigned long size, 148int prom_retain(const char *name, unsigned long size,
149 unsigned long align, unsigned long *paddr); 149 unsigned long align, unsigned long *paddr);
150 150
151/* Load explicit I/D TLB entries into the calling processor. */ 151/* Load explicit I/D TLB entries into the calling processor. */
152extern long prom_itlb_load(unsigned long index, 152long prom_itlb_load(unsigned long index,
153 unsigned long tte_data, 153 unsigned long tte_data,
154 unsigned long vaddr); 154 unsigned long vaddr);
155 155
156extern long prom_dtlb_load(unsigned long index, 156long prom_dtlb_load(unsigned long index,
157 unsigned long tte_data, 157 unsigned long tte_data,
158 unsigned long vaddr); 158 unsigned long vaddr);
159 159
160/* Map/Unmap client program address ranges. First the format of 160/* Map/Unmap client program address ranges. First the format of
161 * the mapping mode argument. 161 * the mapping mode argument.
@@ -170,81 +170,81 @@ extern long prom_dtlb_load(unsigned long index,
170#define PROM_MAP_IE 0x0100 /* Invert-Endianness */ 170#define PROM_MAP_IE 0x0100 /* Invert-Endianness */
171#define PROM_MAP_DEFAULT (PROM_MAP_WRITE | PROM_MAP_READ | PROM_MAP_EXEC | PROM_MAP_CACHED) 171#define PROM_MAP_DEFAULT (PROM_MAP_WRITE | PROM_MAP_READ | PROM_MAP_EXEC | PROM_MAP_CACHED)
172 172
173extern int prom_map(int mode, unsigned long size, 173int prom_map(int mode, unsigned long size,
174 unsigned long vaddr, unsigned long paddr); 174 unsigned long vaddr, unsigned long paddr);
175extern void prom_unmap(unsigned long size, unsigned long vaddr); 175void prom_unmap(unsigned long size, unsigned long vaddr);
176 176
177 177
178/* PROM device tree traversal functions... */ 178/* PROM device tree traversal functions... */
179 179
180/* Get the child node of the given node, or zero if no child exists. */ 180/* Get the child node of the given node, or zero if no child exists. */
181extern phandle prom_getchild(phandle parent_node); 181phandle prom_getchild(phandle parent_node);
182 182
183/* Get the next sibling node of the given node, or zero if no further 183/* Get the next sibling node of the given node, or zero if no further
184 * siblings exist. 184 * siblings exist.
185 */ 185 */
186extern phandle prom_getsibling(phandle node); 186phandle prom_getsibling(phandle node);
187 187
188/* Get the length, at the passed node, of the given property type. 188/* Get the length, at the passed node, of the given property type.
189 * Returns -1 on error (ie. no such property at this node). 189 * Returns -1 on error (ie. no such property at this node).
190 */ 190 */
191extern int prom_getproplen(phandle thisnode, const char *property); 191int prom_getproplen(phandle thisnode, const char *property);
192 192
193/* Fetch the requested property using the given buffer. Returns 193/* Fetch the requested property using the given buffer. Returns
194 * the number of bytes the prom put into your buffer or -1 on error. 194 * the number of bytes the prom put into your buffer or -1 on error.
195 */ 195 */
196extern int prom_getproperty(phandle thisnode, const char *property, 196int prom_getproperty(phandle thisnode, const char *property,
197 char *prop_buffer, int propbuf_size); 197 char *prop_buffer, int propbuf_size);
198 198
199/* Acquire an integer property. */ 199/* Acquire an integer property. */
200extern int prom_getint(phandle node, const char *property); 200int prom_getint(phandle node, const char *property);
201 201
202/* Acquire an integer property, with a default value. */ 202/* Acquire an integer property, with a default value. */
203extern int prom_getintdefault(phandle node, const char *property, int defval); 203int prom_getintdefault(phandle node, const char *property, int defval);
204 204
205/* Acquire a boolean property, 0=FALSE 1=TRUE. */ 205/* Acquire a boolean property, 0=FALSE 1=TRUE. */
206extern int prom_getbool(phandle node, const char *prop); 206int prom_getbool(phandle node, const char *prop);
207 207
208/* Acquire a string property, null string on error. */ 208/* Acquire a string property, null string on error. */
209extern void prom_getstring(phandle node, const char *prop, char *buf, 209void prom_getstring(phandle node, const char *prop, char *buf,
210 int bufsize); 210 int bufsize);
211 211
212/* Does the passed node have the given "name"? YES=1 NO=0 */ 212/* Does the passed node have the given "name"? YES=1 NO=0 */
213extern int prom_nodematch(phandle thisnode, const char *name); 213int prom_nodematch(phandle thisnode, const char *name);
214 214
215/* Search all siblings starting at the passed node for "name" matching 215/* Search all siblings starting at the passed node for "name" matching
216 * the given string. Returns the node on success, zero on failure. 216 * the given string. Returns the node on success, zero on failure.
217 */ 217 */
218extern phandle prom_searchsiblings(phandle node_start, const char *name); 218phandle prom_searchsiblings(phandle node_start, const char *name);
219 219
220/* Return the first property type, as a string, for the given node. 220/* Return the first property type, as a string, for the given node.
221 * Returns a null string on error. Buffer should be at least 32B long. 221 * Returns a null string on error. Buffer should be at least 32B long.
222 */ 222 */
223extern char *prom_firstprop(phandle node, char *buffer); 223char *prom_firstprop(phandle node, char *buffer);
224 224
225/* Returns the next property after the passed property for the given 225/* Returns the next property after the passed property for the given
226 * node. Returns null string on failure. Buffer should be at least 32B long. 226 * node. Returns null string on failure. Buffer should be at least 32B long.
227 */ 227 */
228extern char *prom_nextprop(phandle node, const char *prev_property, char *buf); 228char *prom_nextprop(phandle node, const char *prev_property, char *buf);
229 229
230/* Returns 1 if the specified node has given property. */ 230/* Returns 1 if the specified node has given property. */
231extern int prom_node_has_property(phandle node, const char *property); 231int prom_node_has_property(phandle node, const char *property);
232 232
233/* Returns phandle of the path specified */ 233/* Returns phandle of the path specified */
234extern phandle prom_finddevice(const char *name); 234phandle prom_finddevice(const char *name);
235 235
236/* Set the indicated property at the given node with the passed value. 236/* Set the indicated property at the given node with the passed value.
237 * Returns the number of bytes of your value that the prom took. 237 * Returns the number of bytes of your value that the prom took.
238 */ 238 */
239extern int prom_setprop(phandle node, const char *prop_name, char *prop_value, 239int prom_setprop(phandle node, const char *prop_name, char *prop_value,
240 int value_size); 240 int value_size);
241 241
242extern phandle prom_inst2pkg(int); 242phandle prom_inst2pkg(int);
243extern void prom_sun4v_guest_soft_state(void); 243void prom_sun4v_guest_soft_state(void);
244 244
245extern int prom_ihandle2path(int handle, char *buffer, int bufsize); 245int prom_ihandle2path(int handle, char *buffer, int bufsize);
246 246
247/* Client interface level routines. */ 247/* Client interface level routines. */
248extern void p1275_cmd_direct(unsigned long *); 248void p1275_cmd_direct(unsigned long *);
249 249
250#endif /* !(__SPARC64_OPLIB_H) */ 250#endif /* !(__SPARC64_OPLIB_H) */
diff --git a/arch/sparc/include/asm/page.h b/arch/sparc/include/asm/page.h
index f21de0349025..1be2fdec6268 100644
--- a/arch/sparc/include/asm/page.h
+++ b/arch/sparc/include/asm/page.h
@@ -1,5 +1,8 @@
1#ifndef ___ASM_SPARC_PAGE_H 1#ifndef ___ASM_SPARC_PAGE_H
2#define ___ASM_SPARC_PAGE_H 2#define ___ASM_SPARC_PAGE_H
3
4#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
5
3#if defined(__sparc__) && defined(__arch64__) 6#if defined(__sparc__) && defined(__arch64__)
4#include <asm/page_64.h> 7#include <asm/page_64.h>
5#else 8#else
diff --git a/arch/sparc/include/asm/page_64.h b/arch/sparc/include/asm/page_64.h
index aac53fcea807..bf109984a032 100644
--- a/arch/sparc/include/asm/page_64.h
+++ b/arch/sparc/include/asm/page_64.h
@@ -31,17 +31,17 @@
31 31
32#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) 32#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
33struct pt_regs; 33struct pt_regs;
34extern void hugetlb_setup(struct pt_regs *regs); 34void hugetlb_setup(struct pt_regs *regs);
35#endif 35#endif
36 36
37#define WANT_PAGE_VIRTUAL 37#define WANT_PAGE_VIRTUAL
38 38
39extern void _clear_page(void *page); 39void _clear_page(void *page);
40#define clear_page(X) _clear_page((void *)(X)) 40#define clear_page(X) _clear_page((void *)(X))
41struct page; 41struct page;
42extern void clear_user_page(void *addr, unsigned long vaddr, struct page *page); 42void clear_user_page(void *addr, unsigned long vaddr, struct page *page);
43#define copy_page(X,Y) memcpy((void *)(X), (void *)(Y), PAGE_SIZE) 43#define copy_page(X,Y) memcpy((void *)(X), (void *)(Y), PAGE_SIZE)
44extern void copy_user_page(void *to, void *from, unsigned long vaddr, struct page *topage); 44void copy_user_page(void *to, void *from, unsigned long vaddr, struct page *topage);
45 45
46/* Unlike sparc32, sparc64's parameter passing API is more 46/* Unlike sparc32, sparc64's parameter passing API is more
47 * sane in that structures which as small enough are passed 47 * sane in that structures which as small enough are passed
diff --git a/arch/sparc/include/asm/pci_64.h b/arch/sparc/include/asm/pci_64.h
index c6c7396e7627..bd00a6226169 100644
--- a/arch/sparc/include/asm/pci_64.h
+++ b/arch/sparc/include/asm/pci_64.h
@@ -52,7 +52,7 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev,
52 52
53/* Return the index of the PCI controller for device PDEV. */ 53/* Return the index of the PCI controller for device PDEV. */
54 54
55extern int pci_domain_nr(struct pci_bus *bus); 55int pci_domain_nr(struct pci_bus *bus);
56static inline int pci_proc_domain(struct pci_bus *bus) 56static inline int pci_proc_domain(struct pci_bus *bus)
57{ 57{
58 return 1; 58 return 1;
@@ -64,9 +64,9 @@ static inline int pci_proc_domain(struct pci_bus *bus)
64#define HAVE_ARCH_PCI_GET_UNMAPPED_AREA 64#define HAVE_ARCH_PCI_GET_UNMAPPED_AREA
65#define get_pci_unmapped_area get_fb_unmapped_area 65#define get_pci_unmapped_area get_fb_unmapped_area
66 66
67extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, 67int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
68 enum pci_mmap_state mmap_state, 68 enum pci_mmap_state mmap_state,
69 int write_combine); 69 int write_combine);
70 70
71static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) 71static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
72{ 72{
@@ -74,9 +74,9 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
74} 74}
75 75
76#define HAVE_ARCH_PCI_RESOURCE_TO_USER 76#define HAVE_ARCH_PCI_RESOURCE_TO_USER
77extern void pci_resource_to_user(const struct pci_dev *dev, int bar, 77void pci_resource_to_user(const struct pci_dev *dev, int bar,
78 const struct resource *rsrc, 78 const struct resource *rsrc,
79 resource_size_t *start, resource_size_t *end); 79 resource_size_t *start, resource_size_t *end);
80#endif /* __KERNEL__ */ 80#endif /* __KERNEL__ */
81 81
82#endif /* __SPARC64_PCI_H */ 82#endif /* __SPARC64_PCI_H */
diff --git a/arch/sparc/include/asm/pcic.h b/arch/sparc/include/asm/pcic.h
index 6676cbcc8b6a..f41706792592 100644
--- a/arch/sparc/include/asm/pcic.h
+++ b/arch/sparc/include/asm/pcic.h
@@ -30,10 +30,10 @@ struct linux_pcic {
30}; 30};
31 31
32#ifdef CONFIG_PCIC_PCI 32#ifdef CONFIG_PCIC_PCI
33extern int pcic_present(void); 33int pcic_present(void);
34extern int pcic_probe(void); 34int pcic_probe(void);
35extern void pci_time_init(void); 35void pci_time_init(void);
36extern void sun4m_pci_init_IRQ(void); 36void sun4m_pci_init_IRQ(void);
37#else 37#else
38static inline int pcic_present(void) { return 0; } 38static inline int pcic_present(void) { return 0; }
39static inline int pcic_probe(void) { return 0; } 39static inline int pcic_probe(void) { return 0; }
diff --git a/arch/sparc/include/asm/pcr.h b/arch/sparc/include/asm/pcr.h
index 942bb17f60cd..cdf800c3326c 100644
--- a/arch/sparc/include/asm/pcr.h
+++ b/arch/sparc/include/asm/pcr.h
@@ -12,8 +12,8 @@ struct pcr_ops {
12}; 12};
13extern const struct pcr_ops *pcr_ops; 13extern const struct pcr_ops *pcr_ops;
14 14
15extern void deferred_pcr_work_irq(int irq, struct pt_regs *regs); 15void deferred_pcr_work_irq(int irq, struct pt_regs *regs);
16extern void schedule_deferred_pcr_work(void); 16void schedule_deferred_pcr_work(void);
17 17
18#define PCR_PIC_PRIV 0x00000001 /* PIC access is privileged */ 18#define PCR_PIC_PRIV 0x00000001 /* PIC access is privileged */
19#define PCR_STRACE 0x00000002 /* Trace supervisor events */ 19#define PCR_STRACE 0x00000002 /* Trace supervisor events */
@@ -45,6 +45,6 @@ extern void schedule_deferred_pcr_work(void);
45#define PCR_N4_PICNHT 0x00020000 /* PIC non-hypervisor trap */ 45#define PCR_N4_PICNHT 0x00020000 /* PIC non-hypervisor trap */
46#define PCR_N4_NTC 0x00040000 /* Next-To-Commit wrap */ 46#define PCR_N4_NTC 0x00040000 /* Next-To-Commit wrap */
47 47
48extern int pcr_arch_init(void); 48int pcr_arch_init(void);
49 49
50#endif /* __PCR_H */ 50#endif /* __PCR_H */
diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
index 9b1c36de0f18..a3890da94428 100644
--- a/arch/sparc/include/asm/pgalloc_32.h
+++ b/arch/sparc/include/asm/pgalloc_32.h
@@ -14,6 +14,8 @@ struct page;
14void *srmmu_get_nocache(int size, int align); 14void *srmmu_get_nocache(int size, int align);
15void srmmu_free_nocache(void *addr, int size); 15void srmmu_free_nocache(void *addr, int size);
16 16
17extern struct resource sparc_iomap;
18
17#define check_pgt_cache() do { } while (0) 19#define check_pgt_cache() do { } while (0)
18 20
19pgd_t *get_pgd_fast(void); 21pgd_t *get_pgd_fast(void);
diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
index bcfe063bce23..39a7ac49b00c 100644
--- a/arch/sparc/include/asm/pgalloc_64.h
+++ b/arch/sparc/include/asm/pgalloc_64.h
@@ -38,12 +38,12 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
38 kmem_cache_free(pgtable_cache, pmd); 38 kmem_cache_free(pgtable_cache, pmd);
39} 39}
40 40
41extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 41pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
42 unsigned long address); 42 unsigned long address);
43extern pgtable_t pte_alloc_one(struct mm_struct *mm, 43pgtable_t pte_alloc_one(struct mm_struct *mm,
44 unsigned long address); 44 unsigned long address);
45extern void pte_free_kernel(struct mm_struct *mm, pte_t *pte); 45void pte_free_kernel(struct mm_struct *mm, pte_t *pte);
46extern void pte_free(struct mm_struct *mm, pgtable_t ptepage); 46void pte_free(struct mm_struct *mm, pgtable_t ptepage);
47 47
48#define pmd_populate_kernel(MM, PMD, PTE) pmd_set(MM, PMD, PTE) 48#define pmd_populate_kernel(MM, PMD, PTE) pmd_set(MM, PMD, PTE)
49#define pmd_populate(MM, PMD, PTE) pmd_set(MM, PMD, PTE) 49#define pmd_populate(MM, PMD, PTE) pmd_set(MM, PMD, PTE)
@@ -51,12 +51,12 @@ extern void pte_free(struct mm_struct *mm, pgtable_t ptepage);
51 51
52#define check_pgt_cache() do { } while (0) 52#define check_pgt_cache() do { } while (0)
53 53
54extern void pgtable_free(void *table, bool is_page); 54void pgtable_free(void *table, bool is_page);
55 55
56#ifdef CONFIG_SMP 56#ifdef CONFIG_SMP
57 57
58struct mmu_gather; 58struct mmu_gather;
59extern void tlb_remove_table(struct mmu_gather *, void *); 59void tlb_remove_table(struct mmu_gather *, void *);
60 60
61static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, bool is_page) 61static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, bool is_page)
62{ 62{
diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
index 502f632f6cc7..b9b91ae19fe1 100644
--- a/arch/sparc/include/asm/pgtable_32.h
+++ b/arch/sparc/include/asm/pgtable_32.h
@@ -25,8 +25,9 @@
25struct vm_area_struct; 25struct vm_area_struct;
26struct page; 26struct page;
27 27
28extern void load_mmu(void); 28void load_mmu(void);
29extern unsigned long calc_highpages(void); 29unsigned long calc_highpages(void);
30unsigned long __init bootmem_init(unsigned long *pages_avail);
30 31
31#define pte_ERROR(e) __builtin_trap() 32#define pte_ERROR(e) __builtin_trap()
32#define pmd_ERROR(e) __builtin_trap() 33#define pmd_ERROR(e) __builtin_trap()
@@ -56,7 +57,7 @@ extern unsigned long calc_highpages(void);
56 * srmmu.c will assign the real one (which is dynamically sized) */ 57 * srmmu.c will assign the real one (which is dynamically sized) */
57#define swapper_pg_dir NULL 58#define swapper_pg_dir NULL
58 59
59extern void paging_init(void); 60void paging_init(void);
60 61
61extern unsigned long ptr_in_current_pgd; 62extern unsigned long ptr_in_current_pgd;
62 63
@@ -428,8 +429,8 @@ extern unsigned long *sparc_valid_addr_bitmap;
428#define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4)) 429#define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4))
429#define GET_PFN(pfn) (pfn & 0x0fffffffUL) 430#define GET_PFN(pfn) (pfn & 0x0fffffffUL)
430 431
431extern int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long, 432int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
432 unsigned long, pgprot_t); 433 unsigned long, pgprot_t);
433 434
434static inline int io_remap_pfn_range(struct vm_area_struct *vma, 435static inline int io_remap_pfn_range(struct vm_area_struct *vma,
435 unsigned long from, unsigned long pfn, 436 unsigned long from, unsigned long pfn,
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 1a49ffdf9da9..3770bf5c6e1b 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -210,9 +210,9 @@ static inline bool kern_addr_valid(unsigned long addr)
210 210
211#ifndef __ASSEMBLY__ 211#ifndef __ASSEMBLY__
212 212
213extern pte_t mk_pte_io(unsigned long, pgprot_t, int, unsigned long); 213pte_t mk_pte_io(unsigned long, pgprot_t, int, unsigned long);
214 214
215extern unsigned long pte_sz_bits(unsigned long size); 215unsigned long pte_sz_bits(unsigned long size);
216 216
217extern pgprot_t PAGE_KERNEL; 217extern pgprot_t PAGE_KERNEL;
218extern pgprot_t PAGE_KERNEL_LOCKED; 218extern pgprot_t PAGE_KERNEL_LOCKED;
@@ -780,8 +780,8 @@ static inline int pmd_present(pmd_t pmd)
780 !__kern_addr_valid(pud_val(pud))) 780 !__kern_addr_valid(pud_val(pud)))
781 781
782#ifdef CONFIG_TRANSPARENT_HUGEPAGE 782#ifdef CONFIG_TRANSPARENT_HUGEPAGE
783extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, 783void set_pmd_at(struct mm_struct *mm, unsigned long addr,
784 pmd_t *pmdp, pmd_t pmd); 784 pmd_t *pmdp, pmd_t pmd);
785#else 785#else
786static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, 786static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
787 pmd_t *pmdp, pmd_t pmd) 787 pmd_t *pmdp, pmd_t pmd)
@@ -840,8 +840,8 @@ static inline unsigned long __pmd_page(pmd_t pmd)
840#define pte_unmap(pte) do { } while (0) 840#define pte_unmap(pte) do { } while (0)
841 841
842/* Actual page table PTE updates. */ 842/* Actual page table PTE updates. */
843extern void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, 843void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
844 pte_t *ptep, pte_t orig, int fullmm); 844 pte_t *ptep, pte_t orig, int fullmm);
845 845
846#define __HAVE_ARCH_PMDP_GET_AND_CLEAR 846#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
847static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, 847static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
@@ -900,28 +900,28 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
900extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; 900extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
901extern pmd_t swapper_low_pmd_dir[PTRS_PER_PMD]; 901extern pmd_t swapper_low_pmd_dir[PTRS_PER_PMD];
902 902
903extern void paging_init(void); 903void paging_init(void);
904extern unsigned long find_ecache_flush_span(unsigned long size); 904unsigned long find_ecache_flush_span(unsigned long size);
905 905
906struct seq_file; 906struct seq_file;
907extern void mmu_info(struct seq_file *); 907void mmu_info(struct seq_file *);
908 908
909struct vm_area_struct; 909struct vm_area_struct;
910extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *); 910void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
911#ifdef CONFIG_TRANSPARENT_HUGEPAGE 911#ifdef CONFIG_TRANSPARENT_HUGEPAGE
912extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, 912void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
913 pmd_t *pmd); 913 pmd_t *pmd);
914 914
915#define __HAVE_ARCH_PMDP_INVALIDATE 915#define __HAVE_ARCH_PMDP_INVALIDATE
916extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, 916extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
917 pmd_t *pmdp); 917 pmd_t *pmdp);
918 918
919#define __HAVE_ARCH_PGTABLE_DEPOSIT 919#define __HAVE_ARCH_PGTABLE_DEPOSIT
920extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, 920void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
921 pgtable_t pgtable); 921 pgtable_t pgtable);
922 922
923#define __HAVE_ARCH_PGTABLE_WITHDRAW 923#define __HAVE_ARCH_PGTABLE_WITHDRAW
924extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); 924pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
925#endif 925#endif
926 926
927/* Encode and de-code a swap entry */ 927/* Encode and de-code a swap entry */
@@ -937,12 +937,12 @@ extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
937#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 937#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
938 938
939/* File offset in PTE support. */ 939/* File offset in PTE support. */
940extern unsigned long pte_file(pte_t); 940unsigned long pte_file(pte_t);
941#define pte_to_pgoff(pte) (pte_val(pte) >> PAGE_SHIFT) 941#define pte_to_pgoff(pte) (pte_val(pte) >> PAGE_SHIFT)
942extern pte_t pgoff_to_pte(unsigned long); 942pte_t pgoff_to_pte(unsigned long);
943#define PTE_FILE_MAX_BITS (64UL - PAGE_SHIFT - 1UL) 943#define PTE_FILE_MAX_BITS (64UL - PAGE_SHIFT - 1UL)
944 944
945extern int page_in_phys_avail(unsigned long paddr); 945int page_in_phys_avail(unsigned long paddr);
946 946
947/* 947/*
948 * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in 948 * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
@@ -952,8 +952,8 @@ extern int page_in_phys_avail(unsigned long paddr);
952#define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4)) 952#define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4))
953#define GET_PFN(pfn) (pfn & 0x0fffffffffffffffUL) 953#define GET_PFN(pfn) (pfn & 0x0fffffffffffffffUL)
954 954
955extern int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long, 955int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
956 unsigned long, pgprot_t); 956 unsigned long, pgprot_t);
957 957
958static inline int io_remap_pfn_range(struct vm_area_struct *vma, 958static inline int io_remap_pfn_range(struct vm_area_struct *vma,
959 unsigned long from, unsigned long pfn, 959 unsigned long from, unsigned long pfn,
@@ -981,20 +981,20 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
981/* We provide a special get_unmapped_area for framebuffer mmaps to try and use 981/* We provide a special get_unmapped_area for framebuffer mmaps to try and use
982 * the largest alignment possible such that larget PTEs can be used. 982 * the largest alignment possible such that larget PTEs can be used.
983 */ 983 */
984extern unsigned long get_fb_unmapped_area(struct file *filp, unsigned long, 984unsigned long get_fb_unmapped_area(struct file *filp, unsigned long,
985 unsigned long, unsigned long, 985 unsigned long, unsigned long,
986 unsigned long); 986 unsigned long);
987#define HAVE_ARCH_FB_UNMAPPED_AREA 987#define HAVE_ARCH_FB_UNMAPPED_AREA
988 988
989extern void pgtable_cache_init(void); 989void pgtable_cache_init(void);
990extern void sun4v_register_fault_status(void); 990void sun4v_register_fault_status(void);
991extern void sun4v_ktsb_register(void); 991void sun4v_ktsb_register(void);
992extern void __init cheetah_ecache_flush_init(void); 992void __init cheetah_ecache_flush_init(void);
993extern void sun4v_patch_tlb_handlers(void); 993void sun4v_patch_tlb_handlers(void);
994 994
995extern unsigned long cmdline_memory_size; 995extern unsigned long cmdline_memory_size;
996 996
997extern asmlinkage void do_sparc64_fault(struct pt_regs *regs); 997asmlinkage void do_sparc64_fault(struct pt_regs *regs);
998 998
999#endif /* !(__ASSEMBLY__) */ 999#endif /* !(__ASSEMBLY__) */
1000 1000
diff --git a/arch/sparc/include/asm/processor_32.h b/arch/sparc/include/asm/processor_32.h
index 2c7baa4c4505..a564817bbc2e 100644
--- a/arch/sparc/include/asm/processor_32.h
+++ b/arch/sparc/include/asm/processor_32.h
@@ -74,7 +74,7 @@ struct thread_struct {
74} 74}
75 75
76/* Return saved PC of a blocked thread. */ 76/* Return saved PC of a blocked thread. */
77extern unsigned long thread_saved_pc(struct task_struct *t); 77unsigned long thread_saved_pc(struct task_struct *t);
78 78
79/* Do necessary setup to start up a newly executed thread. */ 79/* Do necessary setup to start up a newly executed thread. */
80static inline void start_thread(struct pt_regs * regs, unsigned long pc, 80static inline void start_thread(struct pt_regs * regs, unsigned long pc,
@@ -107,7 +107,7 @@ static inline void start_thread(struct pt_regs * regs, unsigned long pc,
107/* Free all resources held by a thread. */ 107/* Free all resources held by a thread. */
108#define release_thread(tsk) do { } while(0) 108#define release_thread(tsk) do { } while(0)
109 109
110extern unsigned long get_wchan(struct task_struct *); 110unsigned long get_wchan(struct task_struct *);
111 111
112#define task_pt_regs(tsk) ((tsk)->thread.kregs) 112#define task_pt_regs(tsk) ((tsk)->thread.kregs)
113#define KSTK_EIP(tsk) ((tsk)->thread.kregs->pc) 113#define KSTK_EIP(tsk) ((tsk)->thread.kregs->pc)
@@ -116,6 +116,7 @@ extern unsigned long get_wchan(struct task_struct *);
116#ifdef __KERNEL__ 116#ifdef __KERNEL__
117 117
118extern struct task_struct *last_task_used_math; 118extern struct task_struct *last_task_used_math;
119int do_mathemu(struct pt_regs *regs, struct task_struct *fpt);
119 120
120#define cpu_relax() barrier() 121#define cpu_relax() barrier()
121extern void (*sparc_idle)(void); 122extern void (*sparc_idle)(void);
diff --git a/arch/sparc/include/asm/processor_64.h b/arch/sparc/include/asm/processor_64.h
index 4c3f7f01c709..7028fe1a7c04 100644
--- a/arch/sparc/include/asm/processor_64.h
+++ b/arch/sparc/include/asm/processor_64.h
@@ -95,7 +95,7 @@ struct thread_struct {
95 95
96/* Return saved PC of a blocked thread. */ 96/* Return saved PC of a blocked thread. */
97struct task_struct; 97struct task_struct;
98extern unsigned long thread_saved_pc(struct task_struct *); 98unsigned long thread_saved_pc(struct task_struct *);
99 99
100/* On Uniprocessor, even in RMO processes see TSO semantics */ 100/* On Uniprocessor, even in RMO processes see TSO semantics */
101#ifdef CONFIG_SMP 101#ifdef CONFIG_SMP
@@ -194,7 +194,7 @@ do { \
194/* Free all resources held by a thread. */ 194/* Free all resources held by a thread. */
195#define release_thread(tsk) do { } while (0) 195#define release_thread(tsk) do { } while (0)
196 196
197extern unsigned long get_wchan(struct task_struct *task); 197unsigned long get_wchan(struct task_struct *task);
198 198
199#define task_pt_regs(tsk) (task_thread_info(tsk)->kregs) 199#define task_pt_regs(tsk) (task_thread_info(tsk)->kregs)
200#define KSTK_EIP(tsk) (task_pt_regs(tsk)->tpc) 200#define KSTK_EIP(tsk) (task_pt_regs(tsk)->tpc)
@@ -253,6 +253,8 @@ static inline void prefetchw(const void *x)
253 253
254#define HAVE_ARCH_PICK_MMAP_LAYOUT 254#define HAVE_ARCH_PICK_MMAP_LAYOUT
255 255
256int do_mathemu(struct pt_regs *regs, struct fpustate *f, bool illegal_insn_trap);
257
256#endif /* !(__ASSEMBLY__) */ 258#endif /* !(__ASSEMBLY__) */
257 259
258#endif /* !(__ASM_SPARC64_PROCESSOR_H) */ 260#endif /* !(__ASM_SPARC64_PROCESSOR_H) */
diff --git a/arch/sparc/include/asm/prom.h b/arch/sparc/include/asm/prom.h
index 11ebd659e7b6..d955c8df62d6 100644
--- a/arch/sparc/include/asm/prom.h
+++ b/arch/sparc/include/asm/prom.h
@@ -36,28 +36,28 @@ struct of_irq_controller {
36 void *data; 36 void *data;
37}; 37};
38 38
39extern struct device_node *of_find_node_by_cpuid(int cpuid); 39struct device_node *of_find_node_by_cpuid(int cpuid);
40extern int of_set_property(struct device_node *node, const char *name, void *val, int len); 40int of_set_property(struct device_node *node, const char *name, void *val, int len);
41extern struct mutex of_set_property_mutex; 41extern struct mutex of_set_property_mutex;
42extern int of_getintprop_default(struct device_node *np, 42int of_getintprop_default(struct device_node *np,
43 const char *name, 43 const char *name,
44 int def); 44 int def);
45extern int of_find_in_proplist(const char *list, const char *match, int len); 45int of_find_in_proplist(const char *list, const char *match, int len);
46 46
47extern void prom_build_devicetree(void); 47void prom_build_devicetree(void);
48extern void of_populate_present_mask(void); 48void of_populate_present_mask(void);
49extern void of_fill_in_cpu_data(void); 49void of_fill_in_cpu_data(void);
50 50
51struct resource; 51struct resource;
52extern void __iomem *of_ioremap(struct resource *res, unsigned long offset, unsigned long size, char *name); 52void __iomem *of_ioremap(struct resource *res, unsigned long offset, unsigned long size, char *name);
53extern void of_iounmap(struct resource *res, void __iomem *base, unsigned long size); 53void of_iounmap(struct resource *res, void __iomem *base, unsigned long size);
54 54
55extern struct device_node *of_console_device; 55extern struct device_node *of_console_device;
56extern char *of_console_path; 56extern char *of_console_path;
57extern char *of_console_options; 57extern char *of_console_options;
58 58
59extern void irq_trans_init(struct device_node *dp); 59void irq_trans_init(struct device_node *dp);
60extern char *build_path_component(struct device_node *dp); 60char *build_path_component(struct device_node *dp);
61 61
62#endif /* __KERNEL__ */ 62#endif /* __KERNEL__ */
63#endif /* _SPARC_PROM_H */ 63#endif /* _SPARC_PROM_H */
diff --git a/arch/sparc/include/asm/ptrace.h b/arch/sparc/include/asm/ptrace.h
index bdfafd7af46f..bac6a946ee00 100644
--- a/arch/sparc/include/asm/ptrace.h
+++ b/arch/sparc/include/asm/ptrace.h
@@ -73,7 +73,7 @@ static inline long regs_return_value(struct pt_regs *regs)
73 return regs->u_regs[UREG_I0]; 73 return regs->u_regs[UREG_I0];
74} 74}
75#ifdef CONFIG_SMP 75#ifdef CONFIG_SMP
76extern unsigned long profile_pc(struct pt_regs *); 76unsigned long profile_pc(struct pt_regs *);
77#else 77#else
78#define profile_pc(regs) instruction_pointer(regs) 78#define profile_pc(regs) instruction_pointer(regs)
79#endif 79#endif
diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h
index 5e35e0517318..f5fffd84d0dd 100644
--- a/arch/sparc/include/asm/setup.h
+++ b/arch/sparc/include/asm/setup.h
@@ -4,8 +4,9 @@
4#ifndef _SPARC_SETUP_H 4#ifndef _SPARC_SETUP_H
5#define _SPARC_SETUP_H 5#define _SPARC_SETUP_H
6 6
7#include <uapi/asm/setup.h> 7#include <linux/interrupt.h>
8 8
9#include <uapi/asm/setup.h>
9 10
10extern char reboot_command[]; 11extern char reboot_command[];
11 12
@@ -22,9 +23,43 @@ static inline int con_is_present(void)
22{ 23{
23 return serial_console ? 0 : 1; 24 return serial_console ? 0 : 1;
24} 25}
26
27/* from irq_32.c */
28extern volatile unsigned char *fdc_status;
29extern char *pdma_vaddr;
30extern unsigned long pdma_size;
31extern volatile int doing_pdma;
32
33/* This is software state */
34extern char *pdma_base;
35extern unsigned long pdma_areasize;
36
37int sparc_floppy_request_irq(unsigned int irq, irq_handler_t irq_handler);
38
39/* setup_32.c */
40extern unsigned long cmdline_memory_size;
41
42/* devices.c */
43void __init device_scan(void);
44
45/* unaligned_32.c */
46unsigned long safe_compute_effective_address(struct pt_regs *, unsigned int);
47
48#endif
49
50#ifdef CONFIG_SPARC64
51/* unaligned_64.c */
52int handle_ldf_stq(u32 insn, struct pt_regs *regs);
53void handle_ld_nf(u32 insn, struct pt_regs *regs);
54
55/* init_64.c */
56extern atomic_t dcpage_flushes;
57extern atomic_t dcpage_flushes_xcall;
58
59extern int sysctl_tsb_ratio;
25#endif 60#endif
26 61
27extern void sun_do_break(void); 62void sun_do_break(void);
28extern int stop_a_enabled; 63extern int stop_a_enabled;
29extern int scons_pwroff; 64extern int scons_pwroff;
30 65
diff --git a/arch/sparc/include/asm/sfp-machine_32.h b/arch/sparc/include/asm/sfp-machine_32.h
index 01d9c3b5a73b..838c9d58f3b4 100644
--- a/arch/sparc/include/asm/sfp-machine_32.h
+++ b/arch/sparc/include/asm/sfp-machine_32.h
@@ -79,9 +79,9 @@
79 __asm__ ("addcc %r7,%8,%2\n\t" \ 79 __asm__ ("addcc %r7,%8,%2\n\t" \
80 "addxcc %r5,%6,%1\n\t" \ 80 "addxcc %r5,%6,%1\n\t" \
81 "addx %r3,%4,%0\n" \ 81 "addx %r3,%4,%0\n" \
82 : "=r" ((USItype)(r2)), \ 82 : "=r" (r2), \
83 "=&r" ((USItype)(r1)), \ 83 "=&r" (r1), \
84 "=&r" ((USItype)(r0)) \ 84 "=&r" (r0) \
85 : "%rJ" ((USItype)(x2)), \ 85 : "%rJ" ((USItype)(x2)), \
86 "rI" ((USItype)(y2)), \ 86 "rI" ((USItype)(y2)), \
87 "%rJ" ((USItype)(x1)), \ 87 "%rJ" ((USItype)(x1)), \
@@ -94,9 +94,9 @@
94 __asm__ ("subcc %r7,%8,%2\n\t" \ 94 __asm__ ("subcc %r7,%8,%2\n\t" \
95 "subxcc %r5,%6,%1\n\t" \ 95 "subxcc %r5,%6,%1\n\t" \
96 "subx %r3,%4,%0\n" \ 96 "subx %r3,%4,%0\n" \
97 : "=r" ((USItype)(r2)), \ 97 : "=r" (r2), \
98 "=&r" ((USItype)(r1)), \ 98 "=&r" (r1), \
99 "=&r" ((USItype)(r0)) \ 99 "=&r" (r0) \
100 : "%rJ" ((USItype)(x2)), \ 100 : "%rJ" ((USItype)(x2)), \
101 "rI" ((USItype)(y2)), \ 101 "rI" ((USItype)(y2)), \
102 "%rJ" ((USItype)(x1)), \ 102 "%rJ" ((USItype)(x1)), \
@@ -115,8 +115,8 @@
115 "addxcc %r6,%7,%0\n\t" \ 115 "addxcc %r6,%7,%0\n\t" \
116 "addxcc %r4,%5,%%g2\n\t" \ 116 "addxcc %r4,%5,%%g2\n\t" \
117 "addx %r2,%3,%%g1\n\t" \ 117 "addx %r2,%3,%%g1\n\t" \
118 : "=&r" ((USItype)(r1)), \ 118 : "=&r" (r1), \
119 "=&r" ((USItype)(r0)) \ 119 "=&r" (r0) \
120 : "%rJ" ((USItype)(x3)), \ 120 : "%rJ" ((USItype)(x3)), \
121 "rI" ((USItype)(y3)), \ 121 "rI" ((USItype)(y3)), \
122 "%rJ" ((USItype)(x2)), \ 122 "%rJ" ((USItype)(x2)), \
@@ -140,8 +140,8 @@
140 "subxcc %r6,%7,%0\n\t" \ 140 "subxcc %r6,%7,%0\n\t" \
141 "subxcc %r4,%5,%%g2\n\t" \ 141 "subxcc %r4,%5,%%g2\n\t" \
142 "subx %r2,%3,%%g1\n\t" \ 142 "subx %r2,%3,%%g1\n\t" \
143 : "=&r" ((USItype)(r1)), \ 143 : "=&r" (r1), \
144 "=&r" ((USItype)(r0)) \ 144 "=&r" (r0) \
145 : "%rJ" ((USItype)(x3)), \ 145 : "%rJ" ((USItype)(x3)), \
146 "rI" ((USItype)(y3)), \ 146 "rI" ((USItype)(y3)), \
147 "%rJ" ((USItype)(x2)), \ 147 "%rJ" ((USItype)(x2)), \
@@ -164,10 +164,10 @@
164 "addxcc %2,%%g0,%2\n\t" \ 164 "addxcc %2,%%g0,%2\n\t" \
165 "addxcc %1,%%g0,%1\n\t" \ 165 "addxcc %1,%%g0,%1\n\t" \
166 "addx %0,%%g0,%0\n\t" \ 166 "addx %0,%%g0,%0\n\t" \
167 : "=&r" ((USItype)(x3)), \ 167 : "=&r" (x3), \
168 "=&r" ((USItype)(x2)), \ 168 "=&r" (x2), \
169 "=&r" ((USItype)(x1)), \ 169 "=&r" (x1), \
170 "=&r" ((USItype)(x0)) \ 170 "=&r" (x0) \
171 : "rI" ((USItype)(i)), \ 171 : "rI" ((USItype)(i)), \
172 "0" ((USItype)(x3)), \ 172 "0" ((USItype)(x3)), \
173 "1" ((USItype)(x2)), \ 173 "1" ((USItype)(x2)), \
diff --git a/arch/sparc/include/asm/smp_32.h b/arch/sparc/include/asm/smp_32.h
index 3c8917f054de..7c24e08a88d2 100644
--- a/arch/sparc/include/asm/smp_32.h
+++ b/arch/sparc/include/asm/smp_32.h
@@ -93,15 +93,15 @@ static inline void xc4(smpfunc_t func, unsigned long arg1, unsigned long arg2,
93 arg1, arg2, arg3, arg4); 93 arg1, arg2, arg3, arg4);
94} 94}
95 95
96extern void arch_send_call_function_single_ipi(int cpu); 96void arch_send_call_function_single_ipi(int cpu);
97extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); 97void arch_send_call_function_ipi_mask(const struct cpumask *mask);
98 98
99static inline int cpu_logical_map(int cpu) 99static inline int cpu_logical_map(int cpu)
100{ 100{
101 return cpu; 101 return cpu;
102} 102}
103 103
104extern int hard_smp_processor_id(void); 104int hard_smp_processor_id(void);
105 105
106#define raw_smp_processor_id() (current_thread_info()->cpu) 106#define raw_smp_processor_id() (current_thread_info()->cpu)
107 107
diff --git a/arch/sparc/include/asm/smp_64.h b/arch/sparc/include/asm/smp_64.h
index 05710393959f..26d9e7726867 100644
--- a/arch/sparc/include/asm/smp_64.h
+++ b/arch/sparc/include/asm/smp_64.h
@@ -33,29 +33,35 @@
33DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); 33DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
34extern cpumask_t cpu_core_map[NR_CPUS]; 34extern cpumask_t cpu_core_map[NR_CPUS];
35 35
36extern void arch_send_call_function_single_ipi(int cpu); 36void arch_send_call_function_single_ipi(int cpu);
37extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); 37void arch_send_call_function_ipi_mask(const struct cpumask *mask);
38 38
39/* 39/*
40 * General functions that each host system must provide. 40 * General functions that each host system must provide.
41 */ 41 */
42 42
43extern int hard_smp_processor_id(void); 43int hard_smp_processor_id(void);
44#define raw_smp_processor_id() (current_thread_info()->cpu) 44#define raw_smp_processor_id() (current_thread_info()->cpu)
45 45
46extern void smp_fill_in_sib_core_maps(void); 46void smp_fill_in_sib_core_maps(void);
47extern void cpu_play_dead(void); 47void cpu_play_dead(void);
48 48
49extern void smp_fetch_global_regs(void); 49void smp_fetch_global_regs(void);
50extern void smp_fetch_global_pmu(void); 50void smp_fetch_global_pmu(void);
51 51
52struct seq_file; 52struct seq_file;
53void smp_bogo(struct seq_file *); 53void smp_bogo(struct seq_file *);
54void smp_info(struct seq_file *); 54void smp_info(struct seq_file *);
55 55
56void smp_callin(void);
57void cpu_panic(void);
58void smp_synchronize_tick_client(void);
59void smp_capture(void);
60void smp_release(void);
61
56#ifdef CONFIG_HOTPLUG_CPU 62#ifdef CONFIG_HOTPLUG_CPU
57extern int __cpu_disable(void); 63int __cpu_disable(void);
58extern void __cpu_die(unsigned int cpu); 64void __cpu_die(unsigned int cpu);
59#endif 65#endif
60 66
61#endif /* !(__ASSEMBLY__) */ 67#endif /* !(__ASSEMBLY__) */
diff --git a/arch/sparc/include/asm/spitfire.h b/arch/sparc/include/asm/spitfire.h
index 6b67e50fb9b4..3fc58691dbd0 100644
--- a/arch/sparc/include/asm/spitfire.h
+++ b/arch/sparc/include/asm/spitfire.h
@@ -62,7 +62,7 @@ extern enum ultra_tlb_layout tlb_type;
62extern int sun4v_chip_type; 62extern int sun4v_chip_type;
63 63
64extern int cheetah_pcache_forced_on; 64extern int cheetah_pcache_forced_on;
65extern void cheetah_enable_pcache(void); 65void cheetah_enable_pcache(void);
66 66
67#define sparc64_highest_locked_tlbent() \ 67#define sparc64_highest_locked_tlbent() \
68 (tlb_type == spitfire ? \ 68 (tlb_type == spitfire ? \
diff --git a/arch/sparc/include/asm/stacktrace.h b/arch/sparc/include/asm/stacktrace.h
index 6cee39adf6d6..c30d066f3048 100644
--- a/arch/sparc/include/asm/stacktrace.h
+++ b/arch/sparc/include/asm/stacktrace.h
@@ -1,6 +1,6 @@
1#ifndef _SPARC64_STACKTRACE_H 1#ifndef _SPARC64_STACKTRACE_H
2#define _SPARC64_STACKTRACE_H 2#define _SPARC64_STACKTRACE_H
3 3
4extern void stack_trace_flush(void); 4void stack_trace_flush(void);
5 5
6#endif /* _SPARC64_STACKTRACE_H */ 6#endif /* _SPARC64_STACKTRACE_H */
diff --git a/arch/sparc/include/asm/starfire.h b/arch/sparc/include/asm/starfire.h
index d56ce60a5992..c100dc27a0a9 100644
--- a/arch/sparc/include/asm/starfire.h
+++ b/arch/sparc/include/asm/starfire.h
@@ -11,10 +11,10 @@
11 11
12extern int this_is_starfire; 12extern int this_is_starfire;
13 13
14extern void check_if_starfire(void); 14void check_if_starfire(void);
15extern int starfire_hard_smp_processor_id(void); 15int starfire_hard_smp_processor_id(void);
16extern void starfire_hookup(int); 16void starfire_hookup(int);
17extern unsigned int starfire_translate(unsigned long imap, unsigned int upaid); 17unsigned int starfire_translate(unsigned long imap, unsigned int upaid);
18 18
19#endif 19#endif
20#endif 20#endif
diff --git a/arch/sparc/include/asm/string_32.h b/arch/sparc/include/asm/string_32.h
index 12f67857152e..69974e924611 100644
--- a/arch/sparc/include/asm/string_32.h
+++ b/arch/sparc/include/asm/string_32.h
@@ -15,7 +15,7 @@
15 15
16#ifdef __KERNEL__ 16#ifdef __KERNEL__
17 17
18extern void __memmove(void *,const void *,__kernel_size_t); 18void __memmove(void *,const void *,__kernel_size_t);
19 19
20#ifndef EXPORT_SYMTAB_STROPS 20#ifndef EXPORT_SYMTAB_STROPS
21 21
@@ -40,8 +40,8 @@ extern void __memmove(void *,const void *,__kernel_size_t);
40#undef memscan 40#undef memscan
41#define memscan(__arg0, __char, __arg2) \ 41#define memscan(__arg0, __char, __arg2) \
42({ \ 42({ \
43 extern void *__memscan_zero(void *, size_t); \ 43 void *__memscan_zero(void *, size_t); \
44 extern void *__memscan_generic(void *, int, size_t); \ 44 void *__memscan_generic(void *, int, size_t); \
45 void *__retval, *__addr = (__arg0); \ 45 void *__retval, *__addr = (__arg0); \
46 size_t __size = (__arg2); \ 46 size_t __size = (__arg2); \
47 \ 47 \
@@ -54,14 +54,14 @@ extern void __memmove(void *,const void *,__kernel_size_t);
54}) 54})
55 55
56#define __HAVE_ARCH_MEMCMP 56#define __HAVE_ARCH_MEMCMP
57extern int memcmp(const void *,const void *,__kernel_size_t); 57int memcmp(const void *,const void *,__kernel_size_t);
58 58
59/* Now the str*() stuff... */ 59/* Now the str*() stuff... */
60#define __HAVE_ARCH_STRLEN 60#define __HAVE_ARCH_STRLEN
61extern __kernel_size_t strlen(const char *); 61__kernel_size_t strlen(const char *);
62 62
63#define __HAVE_ARCH_STRNCMP 63#define __HAVE_ARCH_STRNCMP
64extern int strncmp(const char *, const char *, __kernel_size_t); 64int strncmp(const char *, const char *, __kernel_size_t);
65 65
66#endif /* !EXPORT_SYMTAB_STROPS */ 66#endif /* !EXPORT_SYMTAB_STROPS */
67 67
diff --git a/arch/sparc/include/asm/string_64.h b/arch/sparc/include/asm/string_64.h
index 9623bc213158..5936b8ff3c05 100644
--- a/arch/sparc/include/asm/string_64.h
+++ b/arch/sparc/include/asm/string_64.h
@@ -19,7 +19,7 @@
19 19
20/* First the mem*() things. */ 20/* First the mem*() things. */
21#define __HAVE_ARCH_MEMMOVE 21#define __HAVE_ARCH_MEMMOVE
22extern void *memmove(void *, const void *, __kernel_size_t); 22void *memmove(void *, const void *, __kernel_size_t);
23 23
24#define __HAVE_ARCH_MEMCPY 24#define __HAVE_ARCH_MEMCPY
25#define memcpy(t, f, n) __builtin_memcpy(t, f, n) 25#define memcpy(t, f, n) __builtin_memcpy(t, f, n)
@@ -32,8 +32,8 @@ extern void *memmove(void *, const void *, __kernel_size_t);
32#undef memscan 32#undef memscan
33#define memscan(__arg0, __char, __arg2) \ 33#define memscan(__arg0, __char, __arg2) \
34({ \ 34({ \
35 extern void *__memscan_zero(void *, size_t); \ 35 void *__memscan_zero(void *, size_t); \
36 extern void *__memscan_generic(void *, int, size_t); \ 36 void *__memscan_generic(void *, int, size_t); \
37 void *__retval, *__addr = (__arg0); \ 37 void *__retval, *__addr = (__arg0); \
38 size_t __size = (__arg2); \ 38 size_t __size = (__arg2); \
39 \ 39 \
@@ -46,14 +46,14 @@ extern void *memmove(void *, const void *, __kernel_size_t);
46}) 46})
47 47
48#define __HAVE_ARCH_MEMCMP 48#define __HAVE_ARCH_MEMCMP
49extern int memcmp(const void *,const void *,__kernel_size_t); 49int memcmp(const void *,const void *,__kernel_size_t);
50 50
51/* Now the str*() stuff... */ 51/* Now the str*() stuff... */
52#define __HAVE_ARCH_STRLEN 52#define __HAVE_ARCH_STRLEN
53extern __kernel_size_t strlen(const char *); 53__kernel_size_t strlen(const char *);
54 54
55#define __HAVE_ARCH_STRNCMP 55#define __HAVE_ARCH_STRNCMP
56extern int strncmp(const char *, const char *, __kernel_size_t); 56int strncmp(const char *, const char *, __kernel_size_t);
57 57
58#endif /* !EXPORT_SYMTAB_STROPS */ 58#endif /* !EXPORT_SYMTAB_STROPS */
59 59
diff --git a/arch/sparc/include/asm/switch_to_32.h b/arch/sparc/include/asm/switch_to_32.h
index e32e82b76eed..16f10374feb3 100644
--- a/arch/sparc/include/asm/switch_to_32.h
+++ b/arch/sparc/include/asm/switch_to_32.h
@@ -99,8 +99,8 @@ extern struct thread_info *current_set[NR_CPUS];
99 "o0", "o1", "o2", "o3", "o7"); \ 99 "o0", "o1", "o2", "o3", "o7"); \
100 } while(0) 100 } while(0)
101 101
102extern void fpsave(unsigned long *fpregs, unsigned long *fsr, 102void fpsave(unsigned long *fpregs, unsigned long *fsr,
103 void *fpqueue, unsigned long *fpqdepth); 103 void *fpqueue, unsigned long *fpqdepth);
104extern void synchronize_user_stack(void); 104void synchronize_user_stack(void);
105 105
106#endif /* __SPARC_SWITCH_TO_H */ 106#endif /* __SPARC_SWITCH_TO_H */
diff --git a/arch/sparc/include/asm/switch_to_64.h b/arch/sparc/include/asm/switch_to_64.h
index 8d284801f232..10e76332dc99 100644
--- a/arch/sparc/include/asm/switch_to_64.h
+++ b/arch/sparc/include/asm/switch_to_64.h
@@ -65,7 +65,7 @@ do { save_and_clear_fpu(); \
65 "o0", "o1", "o2", "o3", "o4", "o5", "o7"); \ 65 "o0", "o1", "o2", "o3", "o4", "o5", "o7"); \
66} while(0) 66} while(0)
67 67
68extern void synchronize_user_stack(void); 68void synchronize_user_stack(void);
69extern void fault_in_user_windows(void); 69void fault_in_user_windows(void);
70 70
71#endif /* __SPARC64_SWITCH_TO_64_H */ 71#endif /* __SPARC64_SWITCH_TO_64_H */
diff --git a/arch/sparc/include/asm/syscalls.h b/arch/sparc/include/asm/syscalls.h
index bf8972adea17..b0a0db8ea61a 100644
--- a/arch/sparc/include/asm/syscalls.h
+++ b/arch/sparc/include/asm/syscalls.h
@@ -3,9 +3,9 @@
3 3
4struct pt_regs; 4struct pt_regs;
5 5
6extern asmlinkage long sparc_do_fork(unsigned long clone_flags, 6asmlinkage long sparc_do_fork(unsigned long clone_flags,
7 unsigned long stack_start, 7 unsigned long stack_start,
8 struct pt_regs *regs, 8 struct pt_regs *regs,
9 unsigned long stack_size); 9 unsigned long stack_size);
10 10
11#endif /* _SPARC64_SYSCALLS_H */ 11#endif /* _SPARC64_SYSCALLS_H */
diff --git a/arch/sparc/include/asm/timer_32.h b/arch/sparc/include/asm/timer_32.h
index 72f40a546de3..f8e708a0aa58 100644
--- a/arch/sparc/include/asm/timer_32.h
+++ b/arch/sparc/include/asm/timer_32.h
@@ -32,13 +32,13 @@ static inline unsigned int timer_value(unsigned int value)
32 return (value + 1) << TIMER_VALUE_SHIFT; 32 return (value + 1) << TIMER_VALUE_SHIFT;
33} 33}
34 34
35extern __volatile__ unsigned int *master_l10_counter; 35extern volatile u32 __iomem *master_l10_counter;
36 36
37extern irqreturn_t notrace timer_interrupt(int dummy, void *dev_id); 37irqreturn_t notrace timer_interrupt(int dummy, void *dev_id);
38 38
39#ifdef CONFIG_SMP 39#ifdef CONFIG_SMP
40DECLARE_PER_CPU(struct clock_event_device, sparc32_clockevent); 40DECLARE_PER_CPU(struct clock_event_device, sparc32_clockevent);
41extern void register_percpu_ce(int cpu); 41void register_percpu_ce(int cpu);
42#endif 42#endif
43 43
44#endif /* !(_SPARC_TIMER_H) */ 44#endif /* !(_SPARC_TIMER_H) */
diff --git a/arch/sparc/include/asm/timer_64.h b/arch/sparc/include/asm/timer_64.h
index 01197d8215c4..fce415034000 100644
--- a/arch/sparc/include/asm/timer_64.h
+++ b/arch/sparc/include/asm/timer_64.h
@@ -23,8 +23,8 @@ struct sparc64_tick_ops {
23 23
24extern struct sparc64_tick_ops *tick_ops; 24extern struct sparc64_tick_ops *tick_ops;
25 25
26extern unsigned long sparc64_get_clock_tick(unsigned int cpu); 26unsigned long sparc64_get_clock_tick(unsigned int cpu);
27extern void setup_sparc64_timer(void); 27void setup_sparc64_timer(void);
28extern void __init time_init(void); 28void __init time_init(void);
29 29
30#endif /* _SPARC64_TIMER_H */ 30#endif /* _SPARC64_TIMER_H */
diff --git a/arch/sparc/include/asm/tlb_64.h b/arch/sparc/include/asm/tlb_64.h
index 190e18913cc6..4cb392f75d2b 100644
--- a/arch/sparc/include/asm/tlb_64.h
+++ b/arch/sparc/include/asm/tlb_64.h
@@ -8,19 +8,19 @@
8#include <asm/mmu_context.h> 8#include <asm/mmu_context.h>
9 9
10#ifdef CONFIG_SMP 10#ifdef CONFIG_SMP
11extern void smp_flush_tlb_pending(struct mm_struct *, 11void smp_flush_tlb_pending(struct mm_struct *,
12 unsigned long, unsigned long *); 12 unsigned long, unsigned long *);
13#endif 13#endif
14 14
15#ifdef CONFIG_SMP 15#ifdef CONFIG_SMP
16extern void smp_flush_tlb_mm(struct mm_struct *mm); 16void smp_flush_tlb_mm(struct mm_struct *mm);
17#define do_flush_tlb_mm(mm) smp_flush_tlb_mm(mm) 17#define do_flush_tlb_mm(mm) smp_flush_tlb_mm(mm)
18#else 18#else
19#define do_flush_tlb_mm(mm) __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT) 19#define do_flush_tlb_mm(mm) __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT)
20#endif 20#endif
21 21
22extern void __flush_tlb_pending(unsigned long, unsigned long, unsigned long *); 22void __flush_tlb_pending(unsigned long, unsigned long, unsigned long *);
23extern void flush_tlb_pending(void); 23void flush_tlb_pending(void);
24 24
25#define tlb_start_vma(tlb, vma) do { } while (0) 25#define tlb_start_vma(tlb, vma) do { } while (0)
26#define tlb_end_vma(tlb, vma) do { } while (0) 26#define tlb_end_vma(tlb, vma) do { } while (0)
diff --git a/arch/sparc/include/asm/tlbflush_64.h b/arch/sparc/include/asm/tlbflush_64.h
index 3c3c89f52643..816d8202fa0a 100644
--- a/arch/sparc/include/asm/tlbflush_64.h
+++ b/arch/sparc/include/asm/tlbflush_64.h
@@ -14,9 +14,9 @@ struct tlb_batch {
14 unsigned long vaddrs[TLB_BATCH_NR]; 14 unsigned long vaddrs[TLB_BATCH_NR];
15}; 15};
16 16
17extern void flush_tsb_kernel_range(unsigned long start, unsigned long end); 17void flush_tsb_kernel_range(unsigned long start, unsigned long end);
18extern void flush_tsb_user(struct tlb_batch *tb); 18void flush_tsb_user(struct tlb_batch *tb);
19extern void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr); 19void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr);
20 20
21/* TLB flush operations. */ 21/* TLB flush operations. */
22 22
@@ -36,15 +36,15 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
36 36
37#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE 37#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
38 38
39extern void flush_tlb_pending(void); 39void flush_tlb_pending(void);
40extern void arch_enter_lazy_mmu_mode(void); 40void arch_enter_lazy_mmu_mode(void);
41extern void arch_leave_lazy_mmu_mode(void); 41void arch_leave_lazy_mmu_mode(void);
42#define arch_flush_lazy_mmu_mode() do {} while (0) 42#define arch_flush_lazy_mmu_mode() do {} while (0)
43 43
44/* Local cpu only. */ 44/* Local cpu only. */
45extern void __flush_tlb_all(void); 45void __flush_tlb_all(void);
46extern void __flush_tlb_page(unsigned long context, unsigned long vaddr); 46void __flush_tlb_page(unsigned long context, unsigned long vaddr);
47extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end); 47void __flush_tlb_kernel_range(unsigned long start, unsigned long end);
48 48
49#ifndef CONFIG_SMP 49#ifndef CONFIG_SMP
50 50
@@ -60,8 +60,8 @@ static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vad
60 60
61#else /* CONFIG_SMP */ 61#else /* CONFIG_SMP */
62 62
63extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end); 63void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end);
64extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr); 64void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr);
65 65
66#define flush_tlb_kernel_range(start, end) \ 66#define flush_tlb_kernel_range(start, end) \
67do { flush_tsb_kernel_range(start,end); \ 67do { flush_tsb_kernel_range(start,end); \
diff --git a/arch/sparc/include/asm/topology_64.h b/arch/sparc/include/asm/topology_64.h
index a2d10fc64faf..ed8f071132e4 100644
--- a/arch/sparc/include/asm/topology_64.h
+++ b/arch/sparc/include/asm/topology_64.h
@@ -18,7 +18,7 @@ static inline int cpu_to_node(int cpu)
18 18
19struct pci_bus; 19struct pci_bus;
20#ifdef CONFIG_PCI 20#ifdef CONFIG_PCI
21extern int pcibus_to_node(struct pci_bus *pbus); 21int pcibus_to_node(struct pci_bus *pbus);
22#else 22#else
23static inline int pcibus_to_node(struct pci_bus *pbus) 23static inline int pcibus_to_node(struct pci_bus *pbus)
24{ 24{
diff --git a/arch/sparc/include/asm/trap_block.h b/arch/sparc/include/asm/trap_block.h
index 7e26b2db6211..6fd4436d32f0 100644
--- a/arch/sparc/include/asm/trap_block.h
+++ b/arch/sparc/include/asm/trap_block.h
@@ -51,11 +51,11 @@ struct trap_per_cpu {
51 unsigned long __per_cpu_base; 51 unsigned long __per_cpu_base;
52} __attribute__((aligned(64))); 52} __attribute__((aligned(64)));
53extern struct trap_per_cpu trap_block[NR_CPUS]; 53extern struct trap_per_cpu trap_block[NR_CPUS];
54extern void init_cur_cpu_trap(struct thread_info *); 54void init_cur_cpu_trap(struct thread_info *);
55extern void setup_tba(void); 55void setup_tba(void);
56extern int ncpus_probed; 56extern int ncpus_probed;
57 57
58extern unsigned long real_hard_smp_processor_id(void); 58unsigned long real_hard_smp_processor_id(void);
59 59
60struct cpuid_patch_entry { 60struct cpuid_patch_entry {
61 unsigned int addr; 61 unsigned int addr;
diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
index 0167d26d0d1d..bd56c28fff9f 100644
--- a/arch/sparc/include/asm/uaccess.h
+++ b/arch/sparc/include/asm/uaccess.h
@@ -9,6 +9,6 @@
9#define user_addr_max() \ 9#define user_addr_max() \
10 (segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL) 10 (segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL)
11 11
12extern long strncpy_from_user(char *dest, const char __user *src, long count); 12long strncpy_from_user(char *dest, const char __user *src, long count);
13 13
14#endif 14#endif
diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
index 53a28dd59f59..9634d086fc56 100644
--- a/arch/sparc/include/asm/uaccess_32.h
+++ b/arch/sparc/include/asm/uaccess_32.h
@@ -78,9 +78,9 @@ struct exception_table_entry
78}; 78};
79 79
80/* Returns 0 if exception not found and fixup otherwise. */ 80/* Returns 0 if exception not found and fixup otherwise. */
81extern unsigned long search_extables_range(unsigned long addr, unsigned long *g2); 81unsigned long search_extables_range(unsigned long addr, unsigned long *g2);
82 82
83extern void __ret_efault(void); 83void __ret_efault(void);
84 84
85/* Uh, these should become the main single-value transfer routines.. 85/* Uh, these should become the main single-value transfer routines..
86 * They automatically use the right size if we just have the right 86 * They automatically use the right size if we just have the right
@@ -152,7 +152,7 @@ __asm__ __volatile__( \
152 : "=&r" (ret) : "r" (x), "m" (*__m(addr)), \ 152 : "=&r" (ret) : "r" (x), "m" (*__m(addr)), \
153 "i" (-EFAULT)) 153 "i" (-EFAULT))
154 154
155extern int __put_user_bad(void); 155int __put_user_bad(void);
156 156
157#define __get_user_check(x,addr,size,type) ({ \ 157#define __get_user_check(x,addr,size,type) ({ \
158register int __gu_ret; \ 158register int __gu_ret; \
@@ -244,9 +244,9 @@ __asm__ __volatile__( \
244 ".previous\n\t" \ 244 ".previous\n\t" \
245 : "=&r" (x) : "m" (*__m(addr)), "i" (retval)) 245 : "=&r" (x) : "m" (*__m(addr)), "i" (retval))
246 246
247extern int __get_user_bad(void); 247int __get_user_bad(void);
248 248
249extern unsigned long __copy_user(void __user *to, const void __user *from, unsigned long size); 249unsigned long __copy_user(void __user *to, const void __user *from, unsigned long size);
250 250
251static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n) 251static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
252{ 252{
@@ -306,8 +306,8 @@ static inline unsigned long clear_user(void __user *addr, unsigned long n)
306 return n; 306 return n;
307} 307}
308 308
309extern __must_check long strlen_user(const char __user *str); 309__must_check long strlen_user(const char __user *str);
310extern __must_check long strnlen_user(const char __user *str, long n); 310__must_check long strnlen_user(const char __user *str, long n);
311 311
312#endif /* __ASSEMBLY__ */ 312#endif /* __ASSEMBLY__ */
313 313
diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
index ad7e178337f1..c990a5e577f0 100644
--- a/arch/sparc/include/asm/uaccess_64.h
+++ b/arch/sparc/include/asm/uaccess_64.h
@@ -76,8 +76,8 @@ struct exception_table_entry {
76 unsigned int insn, fixup; 76 unsigned int insn, fixup;
77}; 77};
78 78
79extern void __ret_efault(void); 79void __ret_efault(void);
80extern void __retl_efault(void); 80void __retl_efault(void);
81 81
82/* Uh, these should become the main single-value transfer routines.. 82/* Uh, these should become the main single-value transfer routines..
83 * They automatically use the right size if we just have the right 83 * They automatically use the right size if we just have the right
@@ -134,7 +134,7 @@ __asm__ __volatile__( \
134 : "=r" (ret) : "r" (x), "r" (__m(addr)), \ 134 : "=r" (ret) : "r" (x), "r" (__m(addr)), \
135 "i" (-EFAULT)) 135 "i" (-EFAULT))
136 136
137extern int __put_user_bad(void); 137int __put_user_bad(void);
138 138
139#define __get_user_nocheck(data,addr,size,type) ({ \ 139#define __get_user_nocheck(data,addr,size,type) ({ \
140register int __gu_ret; \ 140register int __gu_ret; \
@@ -204,13 +204,13 @@ __asm__ __volatile__( \
204 ".previous\n\t" \ 204 ".previous\n\t" \
205 : "=r" (x) : "r" (__m(addr)), "i" (retval)) 205 : "=r" (x) : "r" (__m(addr)), "i" (retval))
206 206
207extern int __get_user_bad(void); 207int __get_user_bad(void);
208 208
209extern unsigned long __must_check ___copy_from_user(void *to, 209unsigned long __must_check ___copy_from_user(void *to,
210 const void __user *from, 210 const void __user *from,
211 unsigned long size); 211 unsigned long size);
212extern unsigned long copy_from_user_fixup(void *to, const void __user *from, 212unsigned long copy_from_user_fixup(void *to, const void __user *from,
213 unsigned long size); 213 unsigned long size);
214static inline unsigned long __must_check 214static inline unsigned long __must_check
215copy_from_user(void *to, const void __user *from, unsigned long size) 215copy_from_user(void *to, const void __user *from, unsigned long size)
216{ 216{
@@ -223,11 +223,11 @@ copy_from_user(void *to, const void __user *from, unsigned long size)
223} 223}
224#define __copy_from_user copy_from_user 224#define __copy_from_user copy_from_user
225 225
226extern unsigned long __must_check ___copy_to_user(void __user *to, 226unsigned long __must_check ___copy_to_user(void __user *to,
227 const void *from, 227 const void *from,
228 unsigned long size); 228 unsigned long size);
229extern unsigned long copy_to_user_fixup(void __user *to, const void *from, 229unsigned long copy_to_user_fixup(void __user *to, const void *from,
230 unsigned long size); 230 unsigned long size);
231static inline unsigned long __must_check 231static inline unsigned long __must_check
232copy_to_user(void __user *to, const void *from, unsigned long size) 232copy_to_user(void __user *to, const void *from, unsigned long size)
233{ 233{
@@ -239,11 +239,11 @@ copy_to_user(void __user *to, const void *from, unsigned long size)
239} 239}
240#define __copy_to_user copy_to_user 240#define __copy_to_user copy_to_user
241 241
242extern unsigned long __must_check ___copy_in_user(void __user *to, 242unsigned long __must_check ___copy_in_user(void __user *to,
243 const void __user *from, 243 const void __user *from,
244 unsigned long size); 244 unsigned long size);
245extern unsigned long copy_in_user_fixup(void __user *to, void __user *from, 245unsigned long copy_in_user_fixup(void __user *to, void __user *from,
246 unsigned long size); 246 unsigned long size);
247static inline unsigned long __must_check 247static inline unsigned long __must_check
248copy_in_user(void __user *to, void __user *from, unsigned long size) 248copy_in_user(void __user *to, void __user *from, unsigned long size)
249{ 249{
@@ -255,20 +255,20 @@ copy_in_user(void __user *to, void __user *from, unsigned long size)
255} 255}
256#define __copy_in_user copy_in_user 256#define __copy_in_user copy_in_user
257 257
258extern unsigned long __must_check __clear_user(void __user *, unsigned long); 258unsigned long __must_check __clear_user(void __user *, unsigned long);
259 259
260#define clear_user __clear_user 260#define clear_user __clear_user
261 261
262extern __must_check long strlen_user(const char __user *str); 262__must_check long strlen_user(const char __user *str);
263extern __must_check long strnlen_user(const char __user *str, long n); 263__must_check long strnlen_user(const char __user *str, long n);
264 264
265#define __copy_to_user_inatomic __copy_to_user 265#define __copy_to_user_inatomic __copy_to_user
266#define __copy_from_user_inatomic __copy_from_user 266#define __copy_from_user_inatomic __copy_from_user
267 267
268struct pt_regs; 268struct pt_regs;
269extern unsigned long compute_effective_address(struct pt_regs *, 269unsigned long compute_effective_address(struct pt_regs *,
270 unsigned int insn, 270 unsigned int insn,
271 unsigned int rd); 271 unsigned int rd);
272 272
273#endif /* __ASSEMBLY__ */ 273#endif /* __ASSEMBLY__ */
274 274
diff --git a/arch/sparc/include/asm/vio.h b/arch/sparc/include/asm/vio.h
index 432afa838861..e0f6c399f1d0 100644
--- a/arch/sparc/include/asm/vio.h
+++ b/arch/sparc/include/asm/vio.h
@@ -372,14 +372,14 @@ do { if (vio->debug & VIO_DEBUG_##TYPE) \
372 vio->vdev->channel_id, ## a); \ 372 vio->vdev->channel_id, ## a); \
373} while (0) 373} while (0)
374 374
375extern int __vio_register_driver(struct vio_driver *drv, struct module *owner, 375int __vio_register_driver(struct vio_driver *drv, struct module *owner,
376 const char *mod_name); 376 const char *mod_name);
377/* 377/*
378 * vio_register_driver must be a macro so that KBUILD_MODNAME can be expanded 378 * vio_register_driver must be a macro so that KBUILD_MODNAME can be expanded
379 */ 379 */
380#define vio_register_driver(driver) \ 380#define vio_register_driver(driver) \
381 __vio_register_driver(driver, THIS_MODULE, KBUILD_MODNAME) 381 __vio_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
382extern void vio_unregister_driver(struct vio_driver *drv); 382void vio_unregister_driver(struct vio_driver *drv);
383 383
384static inline struct vio_driver *to_vio_driver(struct device_driver *drv) 384static inline struct vio_driver *to_vio_driver(struct device_driver *drv)
385{ 385{
@@ -391,21 +391,21 @@ static inline struct vio_dev *to_vio_dev(struct device *dev)
391 return container_of(dev, struct vio_dev, dev); 391 return container_of(dev, struct vio_dev, dev);
392} 392}
393 393
394extern int vio_ldc_send(struct vio_driver_state *vio, void *data, int len); 394int vio_ldc_send(struct vio_driver_state *vio, void *data, int len);
395extern void vio_link_state_change(struct vio_driver_state *vio, int event); 395void vio_link_state_change(struct vio_driver_state *vio, int event);
396extern void vio_conn_reset(struct vio_driver_state *vio); 396void vio_conn_reset(struct vio_driver_state *vio);
397extern int vio_control_pkt_engine(struct vio_driver_state *vio, void *pkt); 397int vio_control_pkt_engine(struct vio_driver_state *vio, void *pkt);
398extern int vio_validate_sid(struct vio_driver_state *vio, 398int vio_validate_sid(struct vio_driver_state *vio,
399 struct vio_msg_tag *tp); 399 struct vio_msg_tag *tp);
400extern u32 vio_send_sid(struct vio_driver_state *vio); 400u32 vio_send_sid(struct vio_driver_state *vio);
401extern int vio_ldc_alloc(struct vio_driver_state *vio, 401int vio_ldc_alloc(struct vio_driver_state *vio,
402 struct ldc_channel_config *base_cfg, void *event_arg); 402 struct ldc_channel_config *base_cfg, void *event_arg);
403extern void vio_ldc_free(struct vio_driver_state *vio); 403void vio_ldc_free(struct vio_driver_state *vio);
404extern int vio_driver_init(struct vio_driver_state *vio, struct vio_dev *vdev, 404int vio_driver_init(struct vio_driver_state *vio, struct vio_dev *vdev,
405 u8 dev_class, struct vio_version *ver_table, 405 u8 dev_class, struct vio_version *ver_table,
406 int ver_table_size, struct vio_driver_ops *ops, 406 int ver_table_size, struct vio_driver_ops *ops,
407 char *name); 407 char *name);
408 408
409extern void vio_port_up(struct vio_driver_state *vio); 409void vio_port_up(struct vio_driver_state *vio);
410 410
411#endif /* _SPARC64_VIO_H */ 411#endif /* _SPARC64_VIO_H */
diff --git a/arch/sparc/include/asm/visasm.h b/arch/sparc/include/asm/visasm.h
index 39ca301920db..b26673759283 100644
--- a/arch/sparc/include/asm/visasm.h
+++ b/arch/sparc/include/asm/visasm.h
@@ -57,7 +57,8 @@ static inline void save_and_clear_fpu(void) {
57" " : : "i" (FPRS_FEF|FPRS_DU) : 57" " : : "i" (FPRS_FEF|FPRS_DU) :
58 "o5", "g1", "g2", "g3", "g7", "cc"); 58 "o5", "g1", "g2", "g3", "g7", "cc");
59} 59}
60extern int vis_emul(struct pt_regs *, unsigned int); 60
61int vis_emul(struct pt_regs *, unsigned int);
61#endif 62#endif
62 63
63#endif /* _SPARC64_ASI_H */ 64#endif /* _SPARC64_ASI_H */
diff --git a/arch/sparc/include/asm/xor_64.h b/arch/sparc/include/asm/xor_64.h
index ee8edc68423e..50c882856031 100644
--- a/arch/sparc/include/asm/xor_64.h
+++ b/arch/sparc/include/asm/xor_64.h
@@ -20,13 +20,13 @@
20 20
21#include <asm/spitfire.h> 21#include <asm/spitfire.h>
22 22
23extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *); 23void xor_vis_2(unsigned long, unsigned long *, unsigned long *);
24extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *, 24void xor_vis_3(unsigned long, unsigned long *, unsigned long *,
25 unsigned long *); 25 unsigned long *);
26extern void xor_vis_4(unsigned long, unsigned long *, unsigned long *, 26void xor_vis_4(unsigned long, unsigned long *, unsigned long *,
27 unsigned long *, unsigned long *); 27 unsigned long *, unsigned long *);
28extern void xor_vis_5(unsigned long, unsigned long *, unsigned long *, 28void xor_vis_5(unsigned long, unsigned long *, unsigned long *,
29 unsigned long *, unsigned long *, unsigned long *); 29 unsigned long *, unsigned long *, unsigned long *);
30 30
31/* XXX Ugh, write cheetah versions... -DaveM */ 31/* XXX Ugh, write cheetah versions... -DaveM */
32 32
@@ -38,13 +38,13 @@ static struct xor_block_template xor_block_VIS = {
38 .do_5 = xor_vis_5, 38 .do_5 = xor_vis_5,
39}; 39};
40 40
41extern void xor_niagara_2(unsigned long, unsigned long *, unsigned long *); 41void xor_niagara_2(unsigned long, unsigned long *, unsigned long *);
42extern void xor_niagara_3(unsigned long, unsigned long *, unsigned long *, 42void xor_niagara_3(unsigned long, unsigned long *, unsigned long *,
43 unsigned long *); 43 unsigned long *);
44extern void xor_niagara_4(unsigned long, unsigned long *, unsigned long *, 44void xor_niagara_4(unsigned long, unsigned long *, unsigned long *,
45 unsigned long *, unsigned long *); 45 unsigned long *, unsigned long *);
46extern void xor_niagara_5(unsigned long, unsigned long *, unsigned long *, 46void xor_niagara_5(unsigned long, unsigned long *, unsigned long *,
47 unsigned long *, unsigned long *, unsigned long *); 47 unsigned long *, unsigned long *, unsigned long *);
48 48
49static struct xor_block_template xor_block_niagara = { 49static struct xor_block_template xor_block_niagara = {
50 .name = "Niagara", 50 .name = "Niagara",
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index d15cc1794b0e..7cf9c6ea3f1f 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -42,7 +42,6 @@ obj-y += time_$(BITS).o
42obj-$(CONFIG_SPARC32) += windows.o 42obj-$(CONFIG_SPARC32) += windows.o
43obj-y += cpu.o 43obj-y += cpu.o
44obj-$(CONFIG_SPARC32) += devices.o 44obj-$(CONFIG_SPARC32) += devices.o
45obj-$(CONFIG_SPARC32) += tadpole.o
46obj-y += ptrace_$(BITS).o 45obj-y += ptrace_$(BITS).o
47obj-y += unaligned_$(BITS).o 46obj-y += unaligned_$(BITS).o
48obj-y += una_asm_$(BITS).o 47obj-y += una_asm_$(BITS).o
diff --git a/arch/sparc/kernel/audit.c b/arch/sparc/kernel/audit.c
index 8fff0ac63d56..24361b494a93 100644
--- a/arch/sparc/kernel/audit.c
+++ b/arch/sparc/kernel/audit.c
@@ -3,6 +3,8 @@
3#include <linux/audit.h> 3#include <linux/audit.h>
4#include <asm/unistd.h> 4#include <asm/unistd.h>
5 5
6#include "kernel.h"
7
6static unsigned dir_class[] = { 8static unsigned dir_class[] = {
7#include <asm-generic/audit_dir_write.h> 9#include <asm-generic/audit_dir_write.h>
8~0U 10~0U
@@ -40,7 +42,6 @@ int audit_classify_arch(int arch)
40int audit_classify_syscall(int abi, unsigned syscall) 42int audit_classify_syscall(int abi, unsigned syscall)
41{ 43{
42#ifdef CONFIG_COMPAT 44#ifdef CONFIG_COMPAT
43 extern int sparc32_classify_syscall(unsigned);
44 if (abi == AUDIT_ARCH_SPARC) 45 if (abi == AUDIT_ARCH_SPARC)
45 return sparc32_classify_syscall(syscall); 46 return sparc32_classify_syscall(syscall);
46#endif 47#endif
@@ -61,11 +62,6 @@ int audit_classify_syscall(int abi, unsigned syscall)
61static int __init audit_classes_init(void) 62static int __init audit_classes_init(void)
62{ 63{
63#ifdef CONFIG_COMPAT 64#ifdef CONFIG_COMPAT
64 extern __u32 sparc32_dir_class[];
65 extern __u32 sparc32_write_class[];
66 extern __u32 sparc32_read_class[];
67 extern __u32 sparc32_chattr_class[];
68 extern __u32 sparc32_signal_class[];
69 audit_register_class(AUDIT_CLASS_WRITE_32, sparc32_write_class); 65 audit_register_class(AUDIT_CLASS_WRITE_32, sparc32_write_class);
70 audit_register_class(AUDIT_CLASS_READ_32, sparc32_read_class); 66 audit_register_class(AUDIT_CLASS_READ_32, sparc32_read_class);
71 audit_register_class(AUDIT_CLASS_DIR_WRITE_32, sparc32_dir_class); 67 audit_register_class(AUDIT_CLASS_DIR_WRITE_32, sparc32_dir_class);
diff --git a/arch/sparc/kernel/auxio_32.c b/arch/sparc/kernel/auxio_32.c
index e20cc55fb768..ae88c223e4d3 100644
--- a/arch/sparc/kernel/auxio_32.c
+++ b/arch/sparc/kernel/auxio_32.c
@@ -9,12 +9,15 @@
9#include <linux/of.h> 9#include <linux/of.h>
10#include <linux/of_device.h> 10#include <linux/of_device.h>
11#include <linux/export.h> 11#include <linux/export.h>
12
12#include <asm/oplib.h> 13#include <asm/oplib.h>
13#include <asm/io.h> 14#include <asm/io.h>
14#include <asm/auxio.h> 15#include <asm/auxio.h>
15#include <asm/string.h> /* memset(), Linux has no bzero() */ 16#include <asm/string.h> /* memset(), Linux has no bzero() */
16#include <asm/cpu_type.h> 17#include <asm/cpu_type.h>
17 18
19#include "kernel.h"
20
18/* Probe and map in the Auxiliary I/O register */ 21/* Probe and map in the Auxiliary I/O register */
19 22
20/* auxio_register is not static because it is referenced 23/* auxio_register is not static because it is referenced
@@ -103,7 +106,7 @@ EXPORT_SYMBOL(set_auxio);
103 106
104/* sun4m power control register (AUXIO2) */ 107/* sun4m power control register (AUXIO2) */
105 108
106volatile unsigned char * auxio_power_register = NULL; 109volatile u8 __iomem *auxio_power_register = NULL;
107 110
108void __init auxio_power_probe(void) 111void __init auxio_power_probe(void)
109{ 112{
@@ -127,8 +130,8 @@ void __init auxio_power_probe(void)
127 r.flags = regs.which_io & 0xF; 130 r.flags = regs.which_io & 0xF;
128 r.start = regs.phys_addr; 131 r.start = regs.phys_addr;
129 r.end = regs.phys_addr + regs.reg_size - 1; 132 r.end = regs.phys_addr + regs.reg_size - 1;
130 auxio_power_register = (unsigned char *) of_ioremap(&r, 0, 133 auxio_power_register =
131 regs.reg_size, "auxpower"); 134 (u8 __iomem *)of_ioremap(&r, 0, regs.reg_size, "auxpower");
132 135
133 /* Display a quick message on the console. */ 136 /* Display a quick message on the console. */
134 if (auxio_power_register) 137 if (auxio_power_register)
diff --git a/arch/sparc/kernel/btext.c b/arch/sparc/kernel/btext.c
index 57073e56ba9e..987f7ec497cc 100644
--- a/arch/sparc/kernel/btext.c
+++ b/arch/sparc/kernel/btext.c
@@ -137,7 +137,7 @@ static void scrollscreen(void)
137} 137}
138#endif /* ndef NO_SCROLL */ 138#endif /* ndef NO_SCROLL */
139 139
140void btext_drawchar(char c) 140static void btext_drawchar(char c)
141{ 141{
142 int cline = 0; 142 int cline = 0;
143#ifdef NO_SCROLL 143#ifdef NO_SCROLL
diff --git a/arch/sparc/kernel/compat_audit.c b/arch/sparc/kernel/compat_audit.c
index d865575b25bf..7062263d09c1 100644
--- a/arch/sparc/kernel/compat_audit.c
+++ b/arch/sparc/kernel/compat_audit.c
@@ -1,5 +1,6 @@
1#define __32bit_syscall_numbers__ 1#define __32bit_syscall_numbers__
2#include <asm/unistd.h> 2#include <asm/unistd.h>
3#include "kernel.h"
3 4
4unsigned sparc32_dir_class[] = { 5unsigned sparc32_dir_class[] = {
5#include <asm-generic/audit_dir_write.h> 6#include <asm-generic/audit_dir_write.h>
diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c
index 5c5125895db8..82a3a71c451e 100644
--- a/arch/sparc/kernel/cpu.c
+++ b/arch/sparc/kernel/cpu.c
@@ -22,6 +22,7 @@
22#include <asm/cpudata.h> 22#include <asm/cpudata.h>
23 23
24#include "kernel.h" 24#include "kernel.h"
25#include "entry.h"
25 26
26DEFINE_PER_CPU(cpuinfo_sparc, __cpu_data) = { 0 }; 27DEFINE_PER_CPU(cpuinfo_sparc, __cpu_data) = { 0 };
27EXPORT_PER_CPU_SYMBOL(__cpu_data); 28EXPORT_PER_CPU_SYMBOL(__cpu_data);
diff --git a/arch/sparc/kernel/cpumap.h b/arch/sparc/kernel/cpumap.h
index e639880ab864..9dac398c434a 100644
--- a/arch/sparc/kernel/cpumap.h
+++ b/arch/sparc/kernel/cpumap.h
@@ -2,8 +2,8 @@
2#define _CPUMAP_H 2#define _CPUMAP_H
3 3
4#ifdef CONFIG_SMP 4#ifdef CONFIG_SMP
5extern void cpu_map_rebuild(void); 5void cpu_map_rebuild(void);
6extern int map_to_cpu(unsigned int index); 6int map_to_cpu(unsigned int index);
7#define cpu_map_init() cpu_map_rebuild() 7#define cpu_map_init() cpu_map_rebuild()
8#else 8#else
9#define cpu_map_init() do {} while (0) 9#define cpu_map_init() do {} while (0)
diff --git a/arch/sparc/kernel/devices.c b/arch/sparc/kernel/devices.c
index 3d465e87f7e2..8d5d09f09caf 100644
--- a/arch/sparc/kernel/devices.c
+++ b/arch/sparc/kernel/devices.c
@@ -19,8 +19,9 @@
19#include <asm/smp.h> 19#include <asm/smp.h>
20#include <asm/cpudata.h> 20#include <asm/cpudata.h>
21#include <asm/cpu_type.h> 21#include <asm/cpu_type.h>
22#include <asm/setup.h>
22 23
23extern void clock_stop_probe(void); /* tadpole.c */ 24#include "kernel.h"
24 25
25static char *cpu_mid_prop(void) 26static char *cpu_mid_prop(void)
26{ 27{
@@ -131,11 +132,6 @@ void __init device_scan(void)
131 } 132 }
132#endif /* !CONFIG_SMP */ 133#endif /* !CONFIG_SMP */
133 134
134 { 135 auxio_probe();
135 extern void auxio_probe(void); 136 auxio_power_probe();
136 extern void auxio_power_probe(void);
137 auxio_probe();
138 auxio_power_probe();
139 }
140 clock_stop_probe();
141} 137}
diff --git a/arch/sparc/kernel/entry.h b/arch/sparc/kernel/entry.h
index 140966fbd303..ebaba6167dd4 100644
--- a/arch/sparc/kernel/entry.h
+++ b/arch/sparc/kernel/entry.h
@@ -6,40 +6,39 @@
6#include <linux/init.h> 6#include <linux/init.h>
7 7
8/* irq */ 8/* irq */
9extern void handler_irq(int irq, struct pt_regs *regs); 9void handler_irq(int irq, struct pt_regs *regs);
10 10
11#ifdef CONFIG_SPARC32 11#ifdef CONFIG_SPARC32
12/* traps */ 12/* traps */
13extern void do_hw_interrupt(struct pt_regs *regs, unsigned long type); 13void do_hw_interrupt(struct pt_regs *regs, unsigned long type);
14extern void do_illegal_instruction(struct pt_regs *regs, unsigned long pc, 14void do_illegal_instruction(struct pt_regs *regs, unsigned long pc,
15 unsigned long npc, unsigned long psr); 15 unsigned long npc, unsigned long psr);
16 16
17extern void do_priv_instruction(struct pt_regs *regs, unsigned long pc, 17void do_priv_instruction(struct pt_regs *regs, unsigned long pc,
18 unsigned long npc, unsigned long psr); 18 unsigned long npc, unsigned long psr);
19extern void do_memaccess_unaligned(struct pt_regs *regs, unsigned long pc, 19void do_memaccess_unaligned(struct pt_regs *regs, unsigned long pc,
20 unsigned long npc, 20 unsigned long npc, unsigned long psr);
21 unsigned long psr); 21void do_fpd_trap(struct pt_regs *regs, unsigned long pc,
22extern void do_fpd_trap(struct pt_regs *regs, unsigned long pc, 22 unsigned long npc, unsigned long psr);
23void do_fpe_trap(struct pt_regs *regs, unsigned long pc,
24 unsigned long npc, unsigned long psr);
25void handle_tag_overflow(struct pt_regs *regs, unsigned long pc,
26 unsigned long npc, unsigned long psr);
27void handle_watchpoint(struct pt_regs *regs, unsigned long pc,
28 unsigned long npc, unsigned long psr);
29void handle_reg_access(struct pt_regs *regs, unsigned long pc,
30 unsigned long npc, unsigned long psr);
31void handle_cp_disabled(struct pt_regs *regs, unsigned long pc,
23 unsigned long npc, unsigned long psr); 32 unsigned long npc, unsigned long psr);
24extern void do_fpe_trap(struct pt_regs *regs, unsigned long pc, 33void handle_cp_exception(struct pt_regs *regs, unsigned long pc,
25 unsigned long npc, unsigned long psr); 34 unsigned long npc, unsigned long psr);
26extern void handle_tag_overflow(struct pt_regs *regs, unsigned long pc,
27 unsigned long npc, unsigned long psr);
28extern void handle_watchpoint(struct pt_regs *regs, unsigned long pc,
29 unsigned long npc, unsigned long psr);
30extern void handle_reg_access(struct pt_regs *regs, unsigned long pc,
31 unsigned long npc, unsigned long psr);
32extern void handle_cp_disabled(struct pt_regs *regs, unsigned long pc,
33 unsigned long npc, unsigned long psr);
34extern void handle_cp_exception(struct pt_regs *regs, unsigned long pc,
35 unsigned long npc, unsigned long psr);
36 35
37 36
38 37
39/* entry.S */ 38/* entry.S */
40extern void fpsave(unsigned long *fpregs, unsigned long *fsr, 39void fpsave(unsigned long *fpregs, unsigned long *fsr,
41 void *fpqueue, unsigned long *fpqdepth); 40 void *fpqueue, unsigned long *fpqdepth);
42extern void fpload(unsigned long *fpregs, unsigned long *fsr); 41void fpload(unsigned long *fpregs, unsigned long *fsr);
43 42
44#else /* CONFIG_SPARC32 */ 43#else /* CONFIG_SPARC32 */
45 44
@@ -66,123 +65,123 @@ struct pause_patch_entry {
66extern struct pause_patch_entry __pause_3insn_patch, 65extern struct pause_patch_entry __pause_3insn_patch,
67 __pause_3insn_patch_end; 66 __pause_3insn_patch_end;
68 67
69extern void __init per_cpu_patch(void); 68void __init per_cpu_patch(void);
70extern void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *, 69void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *,
71 struct sun4v_1insn_patch_entry *); 70 struct sun4v_1insn_patch_entry *);
72extern void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *, 71void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *,
73 struct sun4v_2insn_patch_entry *); 72 struct sun4v_2insn_patch_entry *);
74extern void __init sun4v_patch(void); 73void __init sun4v_patch(void);
75extern void __init boot_cpu_id_too_large(int cpu); 74void __init boot_cpu_id_too_large(int cpu);
76extern unsigned int dcache_parity_tl1_occurred; 75extern unsigned int dcache_parity_tl1_occurred;
77extern unsigned int icache_parity_tl1_occurred; 76extern unsigned int icache_parity_tl1_occurred;
78 77
79extern asmlinkage void sparc_breakpoint(struct pt_regs *regs); 78asmlinkage void sparc_breakpoint(struct pt_regs *regs);
80extern void timer_interrupt(int irq, struct pt_regs *regs); 79void timer_interrupt(int irq, struct pt_regs *regs);
81 80
82extern void do_notify_resume(struct pt_regs *regs, 81void do_notify_resume(struct pt_regs *regs,
83 unsigned long orig_i0, 82 unsigned long orig_i0,
84 unsigned long thread_info_flags); 83 unsigned long thread_info_flags);
85 84
86extern asmlinkage int syscall_trace_enter(struct pt_regs *regs); 85asmlinkage int syscall_trace_enter(struct pt_regs *regs);
87extern asmlinkage void syscall_trace_leave(struct pt_regs *regs); 86asmlinkage void syscall_trace_leave(struct pt_regs *regs);
88 87
89extern void bad_trap_tl1(struct pt_regs *regs, long lvl); 88void bad_trap_tl1(struct pt_regs *regs, long lvl);
90 89
91extern void do_fpieee(struct pt_regs *regs); 90void do_fpieee(struct pt_regs *regs);
92extern void do_fpother(struct pt_regs *regs); 91void do_fpother(struct pt_regs *regs);
93extern void do_tof(struct pt_regs *regs); 92void do_tof(struct pt_regs *regs);
94extern void do_div0(struct pt_regs *regs); 93void do_div0(struct pt_regs *regs);
95extern void do_illegal_instruction(struct pt_regs *regs); 94void do_illegal_instruction(struct pt_regs *regs);
96extern void mem_address_unaligned(struct pt_regs *regs, 95void mem_address_unaligned(struct pt_regs *regs,
97 unsigned long sfar, 96 unsigned long sfar,
98 unsigned long sfsr); 97 unsigned long sfsr);
99extern void sun4v_do_mna(struct pt_regs *regs, 98void sun4v_do_mna(struct pt_regs *regs,
100 unsigned long addr, 99 unsigned long addr,
101 unsigned long type_ctx); 100 unsigned long type_ctx);
102extern void do_privop(struct pt_regs *regs); 101void do_privop(struct pt_regs *regs);
103extern void do_privact(struct pt_regs *regs); 102void do_privact(struct pt_regs *regs);
104extern void do_cee(struct pt_regs *regs); 103void do_cee(struct pt_regs *regs);
105extern void do_cee_tl1(struct pt_regs *regs); 104void do_cee_tl1(struct pt_regs *regs);
106extern void do_dae_tl1(struct pt_regs *regs); 105void do_dae_tl1(struct pt_regs *regs);
107extern void do_iae_tl1(struct pt_regs *regs); 106void do_iae_tl1(struct pt_regs *regs);
108extern void do_div0_tl1(struct pt_regs *regs); 107void do_div0_tl1(struct pt_regs *regs);
109extern void do_fpdis_tl1(struct pt_regs *regs); 108void do_fpdis_tl1(struct pt_regs *regs);
110extern void do_fpieee_tl1(struct pt_regs *regs); 109void do_fpieee_tl1(struct pt_regs *regs);
111extern void do_fpother_tl1(struct pt_regs *regs); 110void do_fpother_tl1(struct pt_regs *regs);
112extern void do_ill_tl1(struct pt_regs *regs); 111void do_ill_tl1(struct pt_regs *regs);
113extern void do_irq_tl1(struct pt_regs *regs); 112void do_irq_tl1(struct pt_regs *regs);
114extern void do_lddfmna_tl1(struct pt_regs *regs); 113void do_lddfmna_tl1(struct pt_regs *regs);
115extern void do_stdfmna_tl1(struct pt_regs *regs); 114void do_stdfmna_tl1(struct pt_regs *regs);
116extern void do_paw(struct pt_regs *regs); 115void do_paw(struct pt_regs *regs);
117extern void do_paw_tl1(struct pt_regs *regs); 116void do_paw_tl1(struct pt_regs *regs);
118extern void do_vaw(struct pt_regs *regs); 117void do_vaw(struct pt_regs *regs);
119extern void do_vaw_tl1(struct pt_regs *regs); 118void do_vaw_tl1(struct pt_regs *regs);
120extern void do_tof_tl1(struct pt_regs *regs); 119void do_tof_tl1(struct pt_regs *regs);
121extern void do_getpsr(struct pt_regs *regs); 120void do_getpsr(struct pt_regs *regs);
122 121
123extern void spitfire_insn_access_exception(struct pt_regs *regs, 122void spitfire_insn_access_exception(struct pt_regs *regs,
124 unsigned long sfsr, 123 unsigned long sfsr,
125 unsigned long sfar); 124 unsigned long sfar);
126extern void spitfire_insn_access_exception_tl1(struct pt_regs *regs, 125void spitfire_insn_access_exception_tl1(struct pt_regs *regs,
127 unsigned long sfsr, 126 unsigned long sfsr,
128 unsigned long sfar); 127 unsigned long sfar);
129extern void spitfire_data_access_exception(struct pt_regs *regs, 128void spitfire_data_access_exception(struct pt_regs *regs,
130 unsigned long sfsr, 129 unsigned long sfsr,
131 unsigned long sfar); 130 unsigned long sfar);
132extern void spitfire_data_access_exception_tl1(struct pt_regs *regs, 131void spitfire_data_access_exception_tl1(struct pt_regs *regs,
133 unsigned long sfsr, 132 unsigned long sfsr,
134 unsigned long sfar); 133 unsigned long sfar);
135extern void spitfire_access_error(struct pt_regs *regs, 134void spitfire_access_error(struct pt_regs *regs,
136 unsigned long status_encoded, 135 unsigned long status_encoded,
137 unsigned long afar); 136 unsigned long afar);
138 137
139extern void cheetah_fecc_handler(struct pt_regs *regs, 138void cheetah_fecc_handler(struct pt_regs *regs,
140 unsigned long afsr, 139 unsigned long afsr,
141 unsigned long afar); 140 unsigned long afar);
142extern void cheetah_cee_handler(struct pt_regs *regs, 141void cheetah_cee_handler(struct pt_regs *regs,
143 unsigned long afsr, 142 unsigned long afsr,
144 unsigned long afar); 143 unsigned long afar);
145extern void cheetah_deferred_handler(struct pt_regs *regs, 144void cheetah_deferred_handler(struct pt_regs *regs,
146 unsigned long afsr, 145 unsigned long afsr,
147 unsigned long afar); 146 unsigned long afar);
148extern void cheetah_plus_parity_error(int type, struct pt_regs *regs); 147void cheetah_plus_parity_error(int type, struct pt_regs *regs);
149 148
150extern void sun4v_insn_access_exception(struct pt_regs *regs, 149void sun4v_insn_access_exception(struct pt_regs *regs,
151 unsigned long addr, 150 unsigned long addr,
152 unsigned long type_ctx); 151 unsigned long type_ctx);
153extern void sun4v_insn_access_exception_tl1(struct pt_regs *regs, 152void sun4v_insn_access_exception_tl1(struct pt_regs *regs,
154 unsigned long addr, 153 unsigned long addr,
155 unsigned long type_ctx); 154 unsigned long type_ctx);
156extern void sun4v_data_access_exception(struct pt_regs *regs, 155void sun4v_data_access_exception(struct pt_regs *regs,
157 unsigned long addr, 156 unsigned long addr,
158 unsigned long type_ctx); 157 unsigned long type_ctx);
159extern void sun4v_data_access_exception_tl1(struct pt_regs *regs, 158void sun4v_data_access_exception_tl1(struct pt_regs *regs,
160 unsigned long addr, 159 unsigned long addr,
161 unsigned long type_ctx); 160 unsigned long type_ctx);
162extern void sun4v_resum_error(struct pt_regs *regs, 161void sun4v_resum_error(struct pt_regs *regs,
163 unsigned long offset); 162 unsigned long offset);
164extern void sun4v_resum_overflow(struct pt_regs *regs); 163void sun4v_resum_overflow(struct pt_regs *regs);
165extern void sun4v_nonresum_error(struct pt_regs *regs, 164void sun4v_nonresum_error(struct pt_regs *regs,
166 unsigned long offset); 165 unsigned long offset);
167extern void sun4v_nonresum_overflow(struct pt_regs *regs); 166void sun4v_nonresum_overflow(struct pt_regs *regs);
168 167
169extern unsigned long sun4v_err_itlb_vaddr; 168extern unsigned long sun4v_err_itlb_vaddr;
170extern unsigned long sun4v_err_itlb_ctx; 169extern unsigned long sun4v_err_itlb_ctx;
171extern unsigned long sun4v_err_itlb_pte; 170extern unsigned long sun4v_err_itlb_pte;
172extern unsigned long sun4v_err_itlb_error; 171extern unsigned long sun4v_err_itlb_error;
173 172
174extern void sun4v_itlb_error_report(struct pt_regs *regs, int tl); 173void sun4v_itlb_error_report(struct pt_regs *regs, int tl);
175 174
176extern unsigned long sun4v_err_dtlb_vaddr; 175extern unsigned long sun4v_err_dtlb_vaddr;
177extern unsigned long sun4v_err_dtlb_ctx; 176extern unsigned long sun4v_err_dtlb_ctx;
178extern unsigned long sun4v_err_dtlb_pte; 177extern unsigned long sun4v_err_dtlb_pte;
179extern unsigned long sun4v_err_dtlb_error; 178extern unsigned long sun4v_err_dtlb_error;
180 179
181extern void sun4v_dtlb_error_report(struct pt_regs *regs, int tl); 180void sun4v_dtlb_error_report(struct pt_regs *regs, int tl);
182extern void hypervisor_tlbop_error(unsigned long err, 181void hypervisor_tlbop_error(unsigned long err,
183 unsigned long op); 182 unsigned long op);
184extern void hypervisor_tlbop_error_xcall(unsigned long err, 183void hypervisor_tlbop_error_xcall(unsigned long err,
185 unsigned long op); 184 unsigned long op);
186 185
187/* WARNING: The error trap handlers in assembly know the precise 186/* WARNING: The error trap handlers in assembly know the precise
188 * layout of the following structure. 187 * layout of the following structure.
@@ -248,8 +247,8 @@ struct ino_bucket {
248extern struct ino_bucket *ivector_table; 247extern struct ino_bucket *ivector_table;
249extern unsigned long ivector_table_pa; 248extern unsigned long ivector_table_pa;
250 249
251extern void init_irqwork_curcpu(void); 250void init_irqwork_curcpu(void);
252extern void sun4v_register_mondo_queues(int this_cpu); 251void sun4v_register_mondo_queues(int this_cpu);
253 252
254#endif /* CONFIG_SPARC32 */ 253#endif /* CONFIG_SPARC32 */
255#endif /* _ENTRY_H */ 254#endif /* _ENTRY_H */
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
index 76663b019eb5..bfa4d0c2df42 100644
--- a/arch/sparc/kernel/iommu.c
+++ b/arch/sparc/kernel/iommu.c
@@ -21,6 +21,7 @@
21#include <asm/iommu.h> 21#include <asm/iommu.h>
22 22
23#include "iommu_common.h" 23#include "iommu_common.h"
24#include "kernel.h"
24 25
25#define STC_CTXMATCH_ADDR(STC, CTX) \ 26#define STC_CTXMATCH_ADDR(STC, CTX) \
26 ((STC)->strbuf_ctxmatch_base + ((CTX) << 3)) 27 ((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
@@ -840,8 +841,6 @@ static struct dma_map_ops sun4u_dma_ops = {
840struct dma_map_ops *dma_ops = &sun4u_dma_ops; 841struct dma_map_ops *dma_ops = &sun4u_dma_ops;
841EXPORT_SYMBOL(dma_ops); 842EXPORT_SYMBOL(dma_ops);
842 843
843extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
844
845int dma_supported(struct device *dev, u64 device_mask) 844int dma_supported(struct device *dev, u64 device_mask)
846{ 845{
847 struct iommu *iommu = dev->archdata.iommu; 846 struct iommu *iommu = dev->archdata.iommu;
diff --git a/arch/sparc/kernel/iommu_common.h b/arch/sparc/kernel/iommu_common.h
index 591f5879039c..1ec0de4156e7 100644
--- a/arch/sparc/kernel/iommu_common.h
+++ b/arch/sparc/kernel/iommu_common.h
@@ -48,12 +48,12 @@ static inline int is_span_boundary(unsigned long entry,
48 return iommu_is_span_boundary(entry, nr, shift, boundary_size); 48 return iommu_is_span_boundary(entry, nr, shift, boundary_size);
49} 49}
50 50
51extern unsigned long iommu_range_alloc(struct device *dev, 51unsigned long iommu_range_alloc(struct device *dev,
52 struct iommu *iommu, 52 struct iommu *iommu,
53 unsigned long npages, 53 unsigned long npages,
54 unsigned long *handle); 54 unsigned long *handle);
55extern void iommu_range_free(struct iommu *iommu, 55void iommu_range_free(struct iommu *iommu,
56 dma_addr_t dma_addr, 56 dma_addr_t dma_addr,
57 unsigned long npages); 57 unsigned long npages);
58 58
59#endif /* _IOMMU_COMMON_H */ 59#endif /* _IOMMU_COMMON_H */
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index e7e215dfa866..7f08ec8a7c68 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -186,7 +186,7 @@ static void __iomem *_sparc_alloc_io(unsigned int busno, unsigned long phys,
186 186
187 if (name == NULL) name = "???"; 187 if (name == NULL) name = "???";
188 188
189 if ((xres = xres_alloc()) != 0) { 189 if ((xres = xres_alloc()) != NULL) {
190 tack = xres->xname; 190 tack = xres->xname;
191 res = &xres->xres; 191 res = &xres->xres;
192 } else { 192 } else {
@@ -400,7 +400,7 @@ static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
400 BUG(); 400 BUG();
401} 401}
402 402
403struct dma_map_ops sbus_dma_ops = { 403static struct dma_map_ops sbus_dma_ops = {
404 .alloc = sbus_alloc_coherent, 404 .alloc = sbus_alloc_coherent,
405 .free = sbus_free_coherent, 405 .free = sbus_free_coherent,
406 .map_page = sbus_map_page, 406 .map_page = sbus_map_page,
@@ -681,7 +681,7 @@ static int sparc_io_proc_show(struct seq_file *m, void *v)
681 const char *nm; 681 const char *nm;
682 682
683 for (r = root->child; r != NULL; r = r->sibling) { 683 for (r = root->child; r != NULL; r = r->sibling) {
684 if ((nm = r->name) == 0) nm = "???"; 684 if ((nm = r->name) == NULL) nm = "???";
685 seq_printf(m, "%016llx-%016llx: %s\n", 685 seq_printf(m, "%016llx-%016llx: %s\n",
686 (unsigned long long)r->start, 686 (unsigned long long)r->start,
687 (unsigned long long)r->end, nm); 687 (unsigned long long)r->end, nm);
diff --git a/arch/sparc/kernel/irq.h b/arch/sparc/kernel/irq.h
index b66b6aad1d6d..70a0b8ddd0ba 100644
--- a/arch/sparc/kernel/irq.h
+++ b/arch/sparc/kernel/irq.h
@@ -82,11 +82,20 @@ void handler_irq(unsigned int pil, struct pt_regs *regs);
82 82
83unsigned long leon_get_irqmask(unsigned int irq); 83unsigned long leon_get_irqmask(unsigned int irq);
84 84
85/* irq_32.c */
86void sparc_floppy_irq(int irq, void *dev_id, struct pt_regs *regs);
87
88/* sun4m_irq.c */
89void sun4m_nmi(struct pt_regs *regs);
90
91/* sun4d_irq.c */
92void sun4d_handler_irq(unsigned int pil, struct pt_regs *regs);
93
85#ifdef CONFIG_SMP 94#ifdef CONFIG_SMP
86 95
87/* All SUN4D IPIs are sent on this IRQ, may be shared with hard IRQs */ 96/* All SUN4D IPIs are sent on this IRQ, may be shared with hard IRQs */
88#define SUN4D_IPI_IRQ 13 97#define SUN4D_IPI_IRQ 13
89 98
90extern void sun4d_ipi_interrupt(void); 99void sun4d_ipi_interrupt(void);
91 100
92#endif 101#endif
diff --git a/arch/sparc/kernel/irq_32.c b/arch/sparc/kernel/irq_32.c
index c145f6fd123b..a979e99f8751 100644
--- a/arch/sparc/kernel/irq_32.c
+++ b/arch/sparc/kernel/irq_32.c
@@ -17,6 +17,7 @@
17 17
18#include <asm/cacheflush.h> 18#include <asm/cacheflush.h>
19#include <asm/cpudata.h> 19#include <asm/cpudata.h>
20#include <asm/setup.h>
20#include <asm/pcic.h> 21#include <asm/pcic.h>
21#include <asm/leon.h> 22#include <asm/leon.h>
22 23
diff --git a/arch/sparc/kernel/kernel.h b/arch/sparc/kernel/kernel.h
index a702d9ab019c..e7f652be9e61 100644
--- a/arch/sparc/kernel/kernel.h
+++ b/arch/sparc/kernel/kernel.h
@@ -2,6 +2,7 @@
2#define __SPARC_KERNEL_H 2#define __SPARC_KERNEL_H
3 3
4#include <linux/interrupt.h> 4#include <linux/interrupt.h>
5#include <linux/ftrace.h>
5 6
6#include <asm/traps.h> 7#include <asm/traps.h>
7#include <asm/head.h> 8#include <asm/head.h>
@@ -15,62 +16,111 @@ extern int ncpus_probed;
15#ifdef CONFIG_SPARC64 16#ifdef CONFIG_SPARC64
16/* setup_64.c */ 17/* setup_64.c */
17struct seq_file; 18struct seq_file;
18extern void cpucap_info(struct seq_file *); 19void cpucap_info(struct seq_file *);
19 20
20static inline unsigned long kimage_addr_to_ra(const char *p) 21static inline unsigned long kimage_addr_to_ra(const void *p)
21{ 22{
22 unsigned long val = (unsigned long) p; 23 unsigned long val = (unsigned long) p;
23 24
24 return kern_base + (val - KERNBASE); 25 return kern_base + (val - KERNBASE);
25} 26}
27
28/* sys_sparc_64.c */
29asmlinkage long sys_kern_features(void);
30
31/* unaligned_64.c */
32asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn);
33int handle_popc(u32 insn, struct pt_regs *regs);
34void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr);
35void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr);
36
37/* smp_64.c */
38void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs);
39void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs);
40void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs);
41void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs);
42void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs);
43
44/* kgdb_64.c */
45void __irq_entry smp_kgdb_capture_client(int irq, struct pt_regs *regs);
46
47/* pci.c */
48int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
49
50/* signal32.c */
51void do_sigreturn32(struct pt_regs *regs);
52asmlinkage void do_rt_sigreturn32(struct pt_regs *regs);
53void do_signal32(struct pt_regs * regs);
54asmlinkage int do_sys32_sigstack(u32 u_ssptr, u32 u_ossptr, unsigned long sp);
55
56/* compat_audit.c */
57extern unsigned sparc32_dir_class[];
58extern unsigned sparc32_chattr_class[];
59extern unsigned sparc32_write_class[];
60extern unsigned sparc32_read_class[];
61extern unsigned sparc32_signal_class[];
62int sparc32_classify_syscall(unsigned syscall);
26#endif 63#endif
27 64
28#ifdef CONFIG_SPARC32 65#ifdef CONFIG_SPARC32
29/* setup_32.c */ 66/* setup_32.c */
67struct linux_romvec;
30void sparc32_start_kernel(struct linux_romvec *rp); 68void sparc32_start_kernel(struct linux_romvec *rp);
31 69
32/* cpu.c */ 70/* cpu.c */
33extern void cpu_probe(void); 71void cpu_probe(void);
34 72
35/* traps_32.c */ 73/* traps_32.c */
36extern void handle_hw_divzero(struct pt_regs *regs, unsigned long pc, 74void handle_hw_divzero(struct pt_regs *regs, unsigned long pc,
37 unsigned long npc, unsigned long psr); 75 unsigned long npc, unsigned long psr);
38/* irq_32.c */ 76/* irq_32.c */
39extern struct irqaction static_irqaction[]; 77extern struct irqaction static_irqaction[];
40extern int static_irq_count; 78extern int static_irq_count;
41extern spinlock_t irq_action_lock; 79extern spinlock_t irq_action_lock;
42 80
43extern void unexpected_irq(int irq, void *dev_id, struct pt_regs * regs); 81void unexpected_irq(int irq, void *dev_id, struct pt_regs * regs);
44extern void init_IRQ(void); 82void init_IRQ(void);
45 83
46/* sun4m_irq.c */ 84/* sun4m_irq.c */
47extern void sun4m_init_IRQ(void); 85void sun4m_init_IRQ(void);
48extern void sun4m_unmask_profile_irq(void); 86void sun4m_unmask_profile_irq(void);
49extern void sun4m_clear_profile_irq(int cpu); 87void sun4m_clear_profile_irq(int cpu);
50 88
51/* sun4m_smp.c */ 89/* sun4m_smp.c */
52void sun4m_cpu_pre_starting(void *arg); 90void sun4m_cpu_pre_starting(void *arg);
53void sun4m_cpu_pre_online(void *arg); 91void sun4m_cpu_pre_online(void *arg);
92void __init smp4m_boot_cpus(void);
93int smp4m_boot_one_cpu(int i, struct task_struct *idle);
94void __init smp4m_smp_done(void);
95void smp4m_cross_call_irq(void);
96void smp4m_percpu_timer_interrupt(struct pt_regs *regs);
54 97
55/* sun4d_irq.c */ 98/* sun4d_irq.c */
56extern spinlock_t sun4d_imsk_lock; 99extern spinlock_t sun4d_imsk_lock;
57 100
58extern void sun4d_init_IRQ(void); 101void sun4d_init_IRQ(void);
59extern int sun4d_request_irq(unsigned int irq, 102int sun4d_request_irq(unsigned int irq,
60 irq_handler_t handler, 103 irq_handler_t handler,
61 unsigned long irqflags, 104 unsigned long irqflags,
62 const char *devname, void *dev_id); 105 const char *devname, void *dev_id);
63extern int show_sun4d_interrupts(struct seq_file *, void *); 106int show_sun4d_interrupts(struct seq_file *, void *);
64extern void sun4d_distribute_irqs(void); 107void sun4d_distribute_irqs(void);
65extern void sun4d_free_irq(unsigned int irq, void *dev_id); 108void sun4d_free_irq(unsigned int irq, void *dev_id);
66 109
67/* sun4d_smp.c */ 110/* sun4d_smp.c */
68void sun4d_cpu_pre_starting(void *arg); 111void sun4d_cpu_pre_starting(void *arg);
69void sun4d_cpu_pre_online(void *arg); 112void sun4d_cpu_pre_online(void *arg);
113void __init smp4d_boot_cpus(void);
114int smp4d_boot_one_cpu(int i, struct task_struct *idle);
115void __init smp4d_smp_done(void);
116void smp4d_cross_call_irq(void);
117void smp4d_percpu_timer_interrupt(struct pt_regs *regs);
70 118
71/* leon_smp.c */ 119/* leon_smp.c */
72void leon_cpu_pre_starting(void *arg); 120void leon_cpu_pre_starting(void *arg);
73void leon_cpu_pre_online(void *arg); 121void leon_cpu_pre_online(void *arg);
122void leonsmp_ipi_interrupt(void);
123void leon_cross_call_irq(void);
74 124
75/* head_32.S */ 125/* head_32.S */
76extern unsigned int t_nmi[]; 126extern unsigned int t_nmi[];
@@ -89,12 +139,48 @@ extern unsigned int real_irq_entry[];
89extern unsigned int smp4d_ticker[]; 139extern unsigned int smp4d_ticker[];
90extern unsigned int patchme_maybe_smp_msg[]; 140extern unsigned int patchme_maybe_smp_msg[];
91 141
92extern void floppy_hardint(void); 142void floppy_hardint(void);
93 143
94/* trampoline_32.S */ 144/* trampoline_32.S */
95extern unsigned long sun4m_cpu_startup; 145extern unsigned long sun4m_cpu_startup;
96extern unsigned long sun4d_cpu_startup; 146extern unsigned long sun4d_cpu_startup;
97 147
148/* process_32.c */
149asmlinkage int sparc_do_fork(unsigned long clone_flags,
150 unsigned long stack_start,
151 struct pt_regs *regs,
152 unsigned long stack_size);
153
154/* signal_32.c */
155asmlinkage void do_sigreturn(struct pt_regs *regs);
156asmlinkage void do_rt_sigreturn(struct pt_regs *regs);
157void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0,
158 unsigned long thread_info_flags);
159asmlinkage int do_sys_sigstack(struct sigstack __user *ssptr,
160 struct sigstack __user *ossptr,
161 unsigned long sp);
162
163/* ptrace_32.c */
164asmlinkage int syscall_trace(struct pt_regs *regs, int syscall_exit_p);
165
166/* unaligned_32.c */
167asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn);
168asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn);
169
170/* windows.c */
171void try_to_clear_window_buffer(struct pt_regs *regs, int who);
172
173/* auxio_32.c */
174void __init auxio_probe(void);
175void __init auxio_power_probe(void);
176
177/* pcic.c */
178extern void __iomem *pcic_regs;
179void pcic_nmi(unsigned int pend, struct pt_regs *regs);
180
181/* time_32.c */
182void __init time_init(void);
183
98#else /* CONFIG_SPARC32 */ 184#else /* CONFIG_SPARC32 */
99#endif /* CONFIG_SPARC32 */ 185#endif /* CONFIG_SPARC32 */
100#endif /* !(__SPARC_KERNEL_H) */ 186#endif /* !(__SPARC_KERNEL_H) */
diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c
index b45fe3fb4d2c..cbf21d0870e0 100644
--- a/arch/sparc/kernel/kgdb_64.c
+++ b/arch/sparc/kernel/kgdb_64.c
@@ -13,6 +13,8 @@
13#include <asm/ptrace.h> 13#include <asm/ptrace.h>
14#include <asm/irq.h> 14#include <asm/irq.h>
15 15
16#include "kernel.h"
17
16void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs) 18void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
17{ 19{
18 struct reg_window *win; 20 struct reg_window *win;
diff --git a/arch/sparc/kernel/kprobes.c b/arch/sparc/kernel/kprobes.c
index 1b0973503197..98d712843413 100644
--- a/arch/sparc/kernel/kprobes.c
+++ b/arch/sparc/kernel/kprobes.c
@@ -512,7 +512,8 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
512/* 512/*
513 * Called when the probe at kretprobe trampoline is hit 513 * Called when the probe at kretprobe trampoline is hit
514 */ 514 */
515int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) 515static int __kprobes trampoline_probe_handler(struct kprobe *p,
516 struct pt_regs *regs)
516{ 517{
517 struct kretprobe_instance *ri = NULL; 518 struct kretprobe_instance *ri = NULL;
518 struct hlist_head *head, empty_rp; 519 struct hlist_head *head, empty_rp;
@@ -576,7 +577,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
576 return 1; 577 return 1;
577} 578}
578 579
579void kretprobe_trampoline_holder(void) 580static void __used kretprobe_trampoline_holder(void)
580{ 581{
581 asm volatile(".global kretprobe_trampoline\n" 582 asm volatile(".global kretprobe_trampoline\n"
582 "kretprobe_trampoline:\n" 583 "kretprobe_trampoline:\n"
diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c
index b7c68976cbc7..683c4af999de 100644
--- a/arch/sparc/kernel/leon_kernel.c
+++ b/arch/sparc/kernel/leon_kernel.c
@@ -32,12 +32,12 @@ struct leon3_gptimer_regs_map *leon3_gptimer_regs; /* timer controller base addr
32 32
33int leondebug_irq_disable; 33int leondebug_irq_disable;
34int leon_debug_irqout; 34int leon_debug_irqout;
35static int dummy_master_l10_counter; 35static volatile u32 dummy_master_l10_counter;
36unsigned long amba_system_id; 36unsigned long amba_system_id;
37static DEFINE_SPINLOCK(leon_irq_lock); 37static DEFINE_SPINLOCK(leon_irq_lock);
38 38
39static unsigned long leon3_gptimer_idx; /* Timer Index (0..6) within Timer Core */
39unsigned long leon3_gptimer_irq; /* interrupt controller irq number */ 40unsigned long leon3_gptimer_irq; /* interrupt controller irq number */
40unsigned long leon3_gptimer_idx; /* Timer Index (0..6) within Timer Core */
41unsigned int sparc_leon_eirq; 41unsigned int sparc_leon_eirq;
42#define LEON_IMASK(cpu) (&leon3_irqctrl_regs->mask[cpu]) 42#define LEON_IMASK(cpu) (&leon3_irqctrl_regs->mask[cpu])
43#define LEON_IACK (&leon3_irqctrl_regs->iclear) 43#define LEON_IACK (&leon3_irqctrl_regs->iclear)
@@ -65,7 +65,7 @@ static void leon_handle_ext_irq(unsigned int irq, struct irq_desc *desc)
65} 65}
66 66
67/* The extended IRQ controller has been found, this function registers it */ 67/* The extended IRQ controller has been found, this function registers it */
68void leon_eirq_setup(unsigned int eirq) 68static void leon_eirq_setup(unsigned int eirq)
69{ 69{
70 unsigned long mask, oldmask; 70 unsigned long mask, oldmask;
71 unsigned int veirq; 71 unsigned int veirq;
@@ -270,7 +270,7 @@ static u32 leon_cycles_offset(void)
270#ifdef CONFIG_SMP 270#ifdef CONFIG_SMP
271 271
272/* smp clockevent irq */ 272/* smp clockevent irq */
273irqreturn_t leon_percpu_timer_ce_interrupt(int irq, void *unused) 273static irqreturn_t leon_percpu_timer_ce_interrupt(int irq, void *unused)
274{ 274{
275 struct clock_event_device *ce; 275 struct clock_event_device *ce;
276 int cpu = smp_processor_id(); 276 int cpu = smp_processor_id();
@@ -313,7 +313,7 @@ void __init leon_init_timers(void)
313 313
314 leondebug_irq_disable = 0; 314 leondebug_irq_disable = 0;
315 leon_debug_irqout = 0; 315 leon_debug_irqout = 0;
316 master_l10_counter = (unsigned int *)&dummy_master_l10_counter; 316 master_l10_counter = (u32 __iomem *)&dummy_master_l10_counter;
317 dummy_master_l10_counter = 0; 317 dummy_master_l10_counter = 0;
318 318
319 rootnp = of_find_node_by_path("/ambapp0"); 319 rootnp = of_find_node_by_path("/ambapp0");
diff --git a/arch/sparc/kernel/leon_pci.c b/arch/sparc/kernel/leon_pci.c
index e16c4157e1ae..899b7203a4e4 100644
--- a/arch/sparc/kernel/leon_pci.c
+++ b/arch/sparc/kernel/leon_pci.c
@@ -98,82 +98,3 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res,
98{ 98{
99 return res->start; 99 return res->start;
100} 100}
101
102/* in/out routines taken from pcic.c
103 *
104 * This probably belongs here rather than ioport.c because
105 * we do not want this crud linked into SBus kernels.
106 * Also, think for a moment about likes of floppy.c that
107 * include architecture specific parts. They may want to redefine ins/outs.
108 *
109 * We do not use horrible macros here because we want to
110 * advance pointer by sizeof(size).
111 */
112void outsb(unsigned long addr, const void *src, unsigned long count)
113{
114 while (count) {
115 count -= 1;
116 outb(*(const char *)src, addr);
117 src += 1;
118 /* addr += 1; */
119 }
120}
121EXPORT_SYMBOL(outsb);
122
123void outsw(unsigned long addr, const void *src, unsigned long count)
124{
125 while (count) {
126 count -= 2;
127 outw(*(const short *)src, addr);
128 src += 2;
129 /* addr += 2; */
130 }
131}
132EXPORT_SYMBOL(outsw);
133
134void outsl(unsigned long addr, const void *src, unsigned long count)
135{
136 while (count) {
137 count -= 4;
138 outl(*(const long *)src, addr);
139 src += 4;
140 /* addr += 4; */
141 }
142}
143EXPORT_SYMBOL(outsl);
144
145void insb(unsigned long addr, void *dst, unsigned long count)
146{
147 while (count) {
148 count -= 1;
149 *(unsigned char *)dst = inb(addr);
150 dst += 1;
151 /* addr += 1; */
152 }
153}
154EXPORT_SYMBOL(insb);
155
156void insw(unsigned long addr, void *dst, unsigned long count)
157{
158 while (count) {
159 count -= 2;
160 *(unsigned short *)dst = inw(addr);
161 dst += 2;
162 /* addr += 2; */
163 }
164}
165EXPORT_SYMBOL(insw);
166
167void insl(unsigned long addr, void *dst, unsigned long count)
168{
169 while (count) {
170 count -= 4;
171 /*
172 * XXX I am sure we are in for an unaligned trap here.
173 */
174 *(unsigned long *)dst = inl(addr);
175 dst += 4;
176 /* addr += 4; */
177 }
178}
179EXPORT_SYMBOL(insl);
diff --git a/arch/sparc/kernel/leon_pci_grpci1.c b/arch/sparc/kernel/leon_pci_grpci1.c
index 6df26e37f879..c8bf26edfa7c 100644
--- a/arch/sparc/kernel/leon_pci_grpci1.c
+++ b/arch/sparc/kernel/leon_pci_grpci1.c
@@ -80,7 +80,7 @@ struct grpci1_regs {
80 80
81struct grpci1_priv { 81struct grpci1_priv {
82 struct leon_pci_info info; /* must be on top of this structure */ 82 struct leon_pci_info info; /* must be on top of this structure */
83 struct grpci1_regs *regs; /* GRPCI register map */ 83 struct grpci1_regs __iomem *regs; /* GRPCI register map */
84 struct device *dev; 84 struct device *dev;
85 int pci_err_mask; /* STATUS register error mask */ 85 int pci_err_mask; /* STATUS register error mask */
86 int irq; /* LEON irqctrl GRPCI IRQ */ 86 int irq; /* LEON irqctrl GRPCI IRQ */
@@ -101,7 +101,7 @@ static struct grpci1_priv *grpci1priv;
101static int grpci1_cfg_w32(struct grpci1_priv *priv, unsigned int bus, 101static int grpci1_cfg_w32(struct grpci1_priv *priv, unsigned int bus,
102 unsigned int devfn, int where, u32 val); 102 unsigned int devfn, int where, u32 val);
103 103
104int grpci1_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 104static int grpci1_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
105{ 105{
106 struct grpci1_priv *priv = dev->bus->sysdata; 106 struct grpci1_priv *priv = dev->bus->sysdata;
107 int irq_group; 107 int irq_group;
@@ -144,7 +144,7 @@ static int grpci1_cfg_r32(struct grpci1_priv *priv, unsigned int bus,
144 grpci1_cfg_w32(priv, TGT, 0, PCI_COMMAND, tmp); 144 grpci1_cfg_w32(priv, TGT, 0, PCI_COMMAND, tmp);
145 } else { 145 } else {
146 /* Bus always little endian (unaffected by byte-swapping) */ 146 /* Bus always little endian (unaffected by byte-swapping) */
147 *val = flip_dword(tmp); 147 *val = swab32(tmp);
148 } 148 }
149 149
150 return 0; 150 return 0;
@@ -197,7 +197,7 @@ static int grpci1_cfg_w32(struct grpci1_priv *priv, unsigned int bus,
197 197
198 pci_conf = (unsigned int *) (priv->pci_conf | 198 pci_conf = (unsigned int *) (priv->pci_conf |
199 (devfn << 8) | (where & 0xfc)); 199 (devfn << 8) | (where & 0xfc));
200 LEON3_BYPASS_STORE_PA(pci_conf, flip_dword(val)); 200 LEON3_BYPASS_STORE_PA(pci_conf, swab32(val));
201 201
202 return 0; 202 return 0;
203} 203}
@@ -417,10 +417,10 @@ out:
417 * BAR1: peripheral DMA to host's memory (size at least 256MByte) 417 * BAR1: peripheral DMA to host's memory (size at least 256MByte)
418 * BAR2..BAR5: not implemented in hardware 418 * BAR2..BAR5: not implemented in hardware
419 */ 419 */
420void grpci1_hw_init(struct grpci1_priv *priv) 420static void grpci1_hw_init(struct grpci1_priv *priv)
421{ 421{
422 u32 ahbadr, bar_sz, data, pciadr; 422 u32 ahbadr, bar_sz, data, pciadr;
423 struct grpci1_regs *regs = priv->regs; 423 struct grpci1_regs __iomem *regs = priv->regs;
424 424
425 /* set 1:1 mapping between AHB -> PCI memory space */ 425 /* set 1:1 mapping between AHB -> PCI memory space */
426 REGSTORE(regs->cfg_stat, priv->pci_area & 0xf0000000); 426 REGSTORE(regs->cfg_stat, priv->pci_area & 0xf0000000);
@@ -509,7 +509,7 @@ static irqreturn_t grpci1_err_interrupt(int irq, void *arg)
509 509
510static int grpci1_of_probe(struct platform_device *ofdev) 510static int grpci1_of_probe(struct platform_device *ofdev)
511{ 511{
512 struct grpci1_regs *regs; 512 struct grpci1_regs __iomem *regs;
513 struct grpci1_priv *priv; 513 struct grpci1_priv *priv;
514 int err, len; 514 int err, len;
515 const int *tmp; 515 const int *tmp;
@@ -690,7 +690,7 @@ err3:
690err2: 690err2:
691 release_resource(&priv->info.mem_space); 691 release_resource(&priv->info.mem_space);
692err1: 692err1:
693 iounmap((void *)priv->pci_io_va); 693 iounmap((void __iomem *)priv->pci_io_va);
694 grpci1priv = NULL; 694 grpci1priv = NULL;
695 return err; 695 return err;
696} 696}
diff --git a/arch/sparc/kernel/leon_pci_grpci2.c b/arch/sparc/kernel/leon_pci_grpci2.c
index 24d6a4446349..e433a4d69fe0 100644
--- a/arch/sparc/kernel/leon_pci_grpci2.c
+++ b/arch/sparc/kernel/leon_pci_grpci2.c
@@ -191,7 +191,7 @@ struct grpci2_cap_first {
191 191
192struct grpci2_priv { 192struct grpci2_priv {
193 struct leon_pci_info info; /* must be on top of this structure */ 193 struct leon_pci_info info; /* must be on top of this structure */
194 struct grpci2_regs *regs; 194 struct grpci2_regs __iomem *regs;
195 char irq; 195 char irq;
196 char irq_mode; /* IRQ Mode from CAPSTS REG */ 196 char irq_mode; /* IRQ Mode from CAPSTS REG */
197 char bt_enabled; 197 char bt_enabled;
@@ -215,10 +215,10 @@ struct grpci2_priv {
215 struct grpci2_barcfg tgtbars[6]; 215 struct grpci2_barcfg tgtbars[6];
216}; 216};
217 217
218DEFINE_SPINLOCK(grpci2_dev_lock); 218static DEFINE_SPINLOCK(grpci2_dev_lock);
219struct grpci2_priv *grpci2priv; 219static struct grpci2_priv *grpci2priv;
220 220
221int grpci2_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 221static int grpci2_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
222{ 222{
223 struct grpci2_priv *priv = dev->bus->sysdata; 223 struct grpci2_priv *priv = dev->bus->sysdata;
224 int irq_group; 224 int irq_group;
@@ -270,7 +270,7 @@ static int grpci2_cfg_r32(struct grpci2_priv *priv, unsigned int bus,
270 *val = 0xffffffff; 270 *val = 0xffffffff;
271 } else { 271 } else {
272 /* Bus always little endian (unaffected by byte-swapping) */ 272 /* Bus always little endian (unaffected by byte-swapping) */
273 *val = flip_dword(tmp); 273 *val = swab32(tmp);
274 } 274 }
275 275
276 return 0; 276 return 0;
@@ -328,7 +328,7 @@ static int grpci2_cfg_w32(struct grpci2_priv *priv, unsigned int bus,
328 328
329 pci_conf = (unsigned int *) (priv->pci_conf | 329 pci_conf = (unsigned int *) (priv->pci_conf |
330 (devfn << 8) | (where & 0xfc)); 330 (devfn << 8) | (where & 0xfc));
331 LEON3_BYPASS_STORE_PA(pci_conf, flip_dword(val)); 331 LEON3_BYPASS_STORE_PA(pci_conf, swab32(val));
332 332
333 /* Wait until GRPCI2 signals that CFG access is done, it should be 333 /* Wait until GRPCI2 signals that CFG access is done, it should be
334 * done instantaneously unless a DMA operation is ongoing... 334 * done instantaneously unless a DMA operation is ongoing...
@@ -561,10 +561,10 @@ out:
561 return virq; 561 return virq;
562} 562}
563 563
564void grpci2_hw_init(struct grpci2_priv *priv) 564static void grpci2_hw_init(struct grpci2_priv *priv)
565{ 565{
566 u32 ahbadr, pciadr, bar_sz, capptr, io_map, data; 566 u32 ahbadr, pciadr, bar_sz, capptr, io_map, data;
567 struct grpci2_regs *regs = priv->regs; 567 struct grpci2_regs __iomem *regs = priv->regs;
568 int i; 568 int i;
569 struct grpci2_barcfg *barcfg = priv->tgtbars; 569 struct grpci2_barcfg *barcfg = priv->tgtbars;
570 570
@@ -655,7 +655,7 @@ static irqreturn_t grpci2_jump_interrupt(int irq, void *arg)
655static irqreturn_t grpci2_err_interrupt(int irq, void *arg) 655static irqreturn_t grpci2_err_interrupt(int irq, void *arg)
656{ 656{
657 struct grpci2_priv *priv = arg; 657 struct grpci2_priv *priv = arg;
658 struct grpci2_regs *regs = priv->regs; 658 struct grpci2_regs __iomem *regs = priv->regs;
659 unsigned int status; 659 unsigned int status;
660 660
661 status = REGLOAD(regs->sts_cap); 661 status = REGLOAD(regs->sts_cap);
@@ -682,7 +682,7 @@ static irqreturn_t grpci2_err_interrupt(int irq, void *arg)
682 682
683static int grpci2_of_probe(struct platform_device *ofdev) 683static int grpci2_of_probe(struct platform_device *ofdev)
684{ 684{
685 struct grpci2_regs *regs; 685 struct grpci2_regs __iomem *regs;
686 struct grpci2_priv *priv; 686 struct grpci2_priv *priv;
687 int err, i, len; 687 int err, i, len;
688 const int *tmp; 688 const int *tmp;
@@ -878,7 +878,7 @@ err4:
878 release_resource(&priv->info.mem_space); 878 release_resource(&priv->info.mem_space);
879err3: 879err3:
880 err = -ENOMEM; 880 err = -ENOMEM;
881 iounmap((void *)priv->pci_io_va); 881 iounmap((void __iomem *)priv->pci_io_va);
882err2: 882err2:
883 kfree(priv); 883 kfree(priv);
884err1: 884err1:
diff --git a/arch/sparc/kernel/leon_pmc.c b/arch/sparc/kernel/leon_pmc.c
index b0b3967a2dd2..ddcf950282ed 100644
--- a/arch/sparc/kernel/leon_pmc.c
+++ b/arch/sparc/kernel/leon_pmc.c
@@ -12,14 +12,14 @@
12#include <asm/processor.h> 12#include <asm/processor.h>
13 13
14/* List of Systems that need fixup instructions around power-down instruction */ 14/* List of Systems that need fixup instructions around power-down instruction */
15unsigned int pmc_leon_fixup_ids[] = { 15static unsigned int pmc_leon_fixup_ids[] = {
16 AEROFLEX_UT699, 16 AEROFLEX_UT699,
17 GAISLER_GR712RC, 17 GAISLER_GR712RC,
18 LEON4_NEXTREME1, 18 LEON4_NEXTREME1,
19 0 19 0
20}; 20};
21 21
22int pmc_leon_need_fixup(void) 22static int pmc_leon_need_fixup(void)
23{ 23{
24 unsigned int systemid = amba_system_id >> 16; 24 unsigned int systemid = amba_system_id >> 16;
25 unsigned int *id; 25 unsigned int *id;
@@ -38,7 +38,7 @@ int pmc_leon_need_fixup(void)
38 * CPU idle callback function for systems that need some extra handling 38 * CPU idle callback function for systems that need some extra handling
39 * See .../arch/sparc/kernel/process.c 39 * See .../arch/sparc/kernel/process.c
40 */ 40 */
41void pmc_leon_idle_fixup(void) 41static void pmc_leon_idle_fixup(void)
42{ 42{
43 /* Prepare an address to a non-cachable region. APB is always 43 /* Prepare an address to a non-cachable region. APB is always
44 * none-cachable. One instruction is executed after the Sleep 44 * none-cachable. One instruction is executed after the Sleep
@@ -62,7 +62,7 @@ void pmc_leon_idle_fixup(void)
62 * CPU idle callback function 62 * CPU idle callback function
63 * See .../arch/sparc/kernel/process.c 63 * See .../arch/sparc/kernel/process.c
64 */ 64 */
65void pmc_leon_idle(void) 65static void pmc_leon_idle(void)
66{ 66{
67 /* Interrupts need to be enabled to not hang the CPU */ 67 /* Interrupts need to be enabled to not hang the CPU */
68 local_irq_enable(); 68 local_irq_enable();
diff --git a/arch/sparc/kernel/leon_smp.c b/arch/sparc/kernel/leon_smp.c
index 6edf955f987c..018ef11f57df 100644
--- a/arch/sparc/kernel/leon_smp.c
+++ b/arch/sparc/kernel/leon_smp.c
@@ -130,7 +130,7 @@ void leon_configure_cache_smp(void)
130 local_ops->tlb_all(); 130 local_ops->tlb_all();
131} 131}
132 132
133void leon_smp_setbroadcast(unsigned int mask) 133static void leon_smp_setbroadcast(unsigned int mask)
134{ 134{
135 int broadcast = 135 int broadcast =
136 ((LEON3_BYPASS_LOAD_PA(&(leon3_irqctrl_regs->mpstatus)) >> 136 ((LEON3_BYPASS_LOAD_PA(&(leon3_irqctrl_regs->mpstatus)) >>
@@ -148,13 +148,6 @@ void leon_smp_setbroadcast(unsigned int mask)
148 LEON_BYPASS_STORE_PA(&(leon3_irqctrl_regs->mpbroadcast), mask); 148 LEON_BYPASS_STORE_PA(&(leon3_irqctrl_regs->mpbroadcast), mask);
149} 149}
150 150
151unsigned int leon_smp_getbroadcast(void)
152{
153 unsigned int mask;
154 mask = LEON_BYPASS_LOAD_PA(&(leon3_irqctrl_regs->mpbroadcast));
155 return mask;
156}
157
158int leon_smp_nrcpus(void) 151int leon_smp_nrcpus(void)
159{ 152{
160 int nrcpu = 153 int nrcpu =
@@ -266,10 +259,6 @@ void __init leon_smp_done(void)
266 259
267} 260}
268 261
269void leon_irq_rotate(int cpu)
270{
271}
272
273struct leon_ipi_work { 262struct leon_ipi_work {
274 int single; 263 int single;
275 int msk; 264 int msk;
diff --git a/arch/sparc/kernel/of_device_common.c b/arch/sparc/kernel/of_device_common.c
index 3241f56331c2..de0ee3971f00 100644
--- a/arch/sparc/kernel/of_device_common.c
+++ b/arch/sparc/kernel/of_device_common.c
@@ -5,8 +5,10 @@
5#include <linux/mod_devicetable.h> 5#include <linux/mod_devicetable.h>
6#include <linux/errno.h> 6#include <linux/errno.h>
7#include <linux/irq.h> 7#include <linux/irq.h>
8#include <linux/of_device.h>
9#include <linux/of_platform.h> 8#include <linux/of_platform.h>
9#include <linux/of_address.h>
10#include <linux/of_device.h>
11#include <linux/of_irq.h>
10 12
11#include "of_device_common.h" 13#include "of_device_common.h"
12 14
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index 857ad77df9c0..539babf00bb2 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -28,6 +28,7 @@
28#include <asm/apb.h> 28#include <asm/apb.h>
29 29
30#include "pci_impl.h" 30#include "pci_impl.h"
31#include "kernel.h"
31 32
32/* List of all PCI controllers found in the system. */ 33/* List of all PCI controllers found in the system. */
33struct pci_pbm_info *pci_pbm_root = NULL; 34struct pci_pbm_info *pci_pbm_root = NULL;
diff --git a/arch/sparc/kernel/pci_impl.h b/arch/sparc/kernel/pci_impl.h
index 5f688531f48c..75803c780af3 100644
--- a/arch/sparc/kernel/pci_impl.h
+++ b/arch/sparc/kernel/pci_impl.h
@@ -48,8 +48,8 @@ struct sparc64_msiq_ops {
48 unsigned long devino); 48 unsigned long devino);
49}; 49};
50 50
51extern void sparc64_pbm_msi_init(struct pci_pbm_info *pbm, 51void sparc64_pbm_msi_init(struct pci_pbm_info *pbm,
52 const struct sparc64_msiq_ops *ops); 52 const struct sparc64_msiq_ops *ops);
53 53
54struct sparc64_msiq_cookie { 54struct sparc64_msiq_cookie {
55 struct pci_pbm_info *pbm; 55 struct pci_pbm_info *pbm;
@@ -158,23 +158,23 @@ extern struct pci_pbm_info *pci_pbm_root;
158extern int pci_num_pbms; 158extern int pci_num_pbms;
159 159
160/* PCI bus scanning and fixup support. */ 160/* PCI bus scanning and fixup support. */
161extern void pci_get_pbm_props(struct pci_pbm_info *pbm); 161void pci_get_pbm_props(struct pci_pbm_info *pbm);
162extern struct pci_bus *pci_scan_one_pbm(struct pci_pbm_info *pbm, 162struct pci_bus *pci_scan_one_pbm(struct pci_pbm_info *pbm,
163 struct device *parent); 163 struct device *parent);
164extern void pci_determine_mem_io_space(struct pci_pbm_info *pbm); 164void pci_determine_mem_io_space(struct pci_pbm_info *pbm);
165 165
166/* Error reporting support. */ 166/* Error reporting support. */
167extern void pci_scan_for_target_abort(struct pci_pbm_info *, struct pci_bus *); 167void pci_scan_for_target_abort(struct pci_pbm_info *, struct pci_bus *);
168extern void pci_scan_for_master_abort(struct pci_pbm_info *, struct pci_bus *); 168void pci_scan_for_master_abort(struct pci_pbm_info *, struct pci_bus *);
169extern void pci_scan_for_parity_error(struct pci_pbm_info *, struct pci_bus *); 169void pci_scan_for_parity_error(struct pci_pbm_info *, struct pci_bus *);
170 170
171/* Configuration space access. */ 171/* Configuration space access. */
172extern void pci_config_read8(u8 *addr, u8 *ret); 172void pci_config_read8(u8 *addr, u8 *ret);
173extern void pci_config_read16(u16 *addr, u16 *ret); 173void pci_config_read16(u16 *addr, u16 *ret);
174extern void pci_config_read32(u32 *addr, u32 *ret); 174void pci_config_read32(u32 *addr, u32 *ret);
175extern void pci_config_write8(u8 *addr, u8 val); 175void pci_config_write8(u8 *addr, u8 val);
176extern void pci_config_write16(u16 *addr, u16 val); 176void pci_config_write16(u16 *addr, u16 val);
177extern void pci_config_write32(u32 *addr, u32 val); 177void pci_config_write32(u32 *addr, u32 val);
178 178
179extern struct pci_ops sun4u_pci_ops; 179extern struct pci_ops sun4u_pci_ops;
180extern struct pci_ops sun4v_pci_ops; 180extern struct pci_ops sun4v_pci_ops;
diff --git a/arch/sparc/kernel/pci_sun4v.h b/arch/sparc/kernel/pci_sun4v.h
index 8e9fc3a5b4f5..5642212390b2 100644
--- a/arch/sparc/kernel/pci_sun4v.h
+++ b/arch/sparc/kernel/pci_sun4v.h
@@ -6,87 +6,87 @@
6#ifndef _PCI_SUN4V_H 6#ifndef _PCI_SUN4V_H
7#define _PCI_SUN4V_H 7#define _PCI_SUN4V_H
8 8
9extern long pci_sun4v_iommu_map(unsigned long devhandle, 9long pci_sun4v_iommu_map(unsigned long devhandle,
10 unsigned long tsbid, 10 unsigned long tsbid,
11 unsigned long num_ttes, 11 unsigned long num_ttes,
12 unsigned long io_attributes, 12 unsigned long io_attributes,
13 unsigned long io_page_list_pa); 13 unsigned long io_page_list_pa);
14extern unsigned long pci_sun4v_iommu_demap(unsigned long devhandle, 14unsigned long pci_sun4v_iommu_demap(unsigned long devhandle,
15 unsigned long tsbid, 15 unsigned long tsbid,
16 unsigned long num_ttes); 16 unsigned long num_ttes);
17extern unsigned long pci_sun4v_iommu_getmap(unsigned long devhandle, 17unsigned long pci_sun4v_iommu_getmap(unsigned long devhandle,
18 unsigned long tsbid, 18 unsigned long tsbid,
19 unsigned long *io_attributes, 19 unsigned long *io_attributes,
20 unsigned long *real_address); 20 unsigned long *real_address);
21extern unsigned long pci_sun4v_config_get(unsigned long devhandle, 21unsigned long pci_sun4v_config_get(unsigned long devhandle,
22 unsigned long pci_device, 22 unsigned long pci_device,
23 unsigned long config_offset, 23 unsigned long config_offset,
24 unsigned long size); 24 unsigned long size);
25extern int pci_sun4v_config_put(unsigned long devhandle, 25int pci_sun4v_config_put(unsigned long devhandle,
26 unsigned long pci_device, 26 unsigned long pci_device,
27 unsigned long config_offset, 27 unsigned long config_offset,
28 unsigned long size, 28 unsigned long size,
29 unsigned long data); 29 unsigned long data);
30 30
31extern unsigned long pci_sun4v_msiq_conf(unsigned long devhandle, 31unsigned long pci_sun4v_msiq_conf(unsigned long devhandle,
32 unsigned long msiqid, 32 unsigned long msiqid,
33 unsigned long msiq_paddr, 33 unsigned long msiq_paddr,
34 unsigned long num_entries); 34 unsigned long num_entries);
35extern unsigned long pci_sun4v_msiq_info(unsigned long devhandle, 35unsigned long pci_sun4v_msiq_info(unsigned long devhandle,
36 unsigned long msiqid, 36 unsigned long msiqid,
37 unsigned long *msiq_paddr, 37 unsigned long *msiq_paddr,
38 unsigned long *num_entries); 38 unsigned long *num_entries);
39extern unsigned long pci_sun4v_msiq_getvalid(unsigned long devhandle, 39unsigned long pci_sun4v_msiq_getvalid(unsigned long devhandle,
40 unsigned long msiqid, 40 unsigned long msiqid,
41 unsigned long *valid); 41 unsigned long *valid);
42extern unsigned long pci_sun4v_msiq_setvalid(unsigned long devhandle, 42unsigned long pci_sun4v_msiq_setvalid(unsigned long devhandle,
43 unsigned long msiqid, 43 unsigned long msiqid,
44 unsigned long valid); 44 unsigned long valid);
45extern unsigned long pci_sun4v_msiq_getstate(unsigned long devhandle, 45unsigned long pci_sun4v_msiq_getstate(unsigned long devhandle,
46 unsigned long msiqid, 46 unsigned long msiqid,
47 unsigned long *state); 47 unsigned long *state);
48extern unsigned long pci_sun4v_msiq_setstate(unsigned long devhandle, 48unsigned long pci_sun4v_msiq_setstate(unsigned long devhandle,
49 unsigned long msiqid, 49 unsigned long msiqid,
50 unsigned long state); 50 unsigned long state);
51extern unsigned long pci_sun4v_msiq_gethead(unsigned long devhandle, 51unsigned long pci_sun4v_msiq_gethead(unsigned long devhandle,
52 unsigned long msiqid, 52 unsigned long msiqid,
53 unsigned long *head); 53 unsigned long *head);
54extern unsigned long pci_sun4v_msiq_sethead(unsigned long devhandle, 54unsigned long pci_sun4v_msiq_sethead(unsigned long devhandle,
55 unsigned long msiqid, 55 unsigned long msiqid,
56 unsigned long head); 56 unsigned long head);
57extern unsigned long pci_sun4v_msiq_gettail(unsigned long devhandle, 57unsigned long pci_sun4v_msiq_gettail(unsigned long devhandle,
58 unsigned long msiqid, 58 unsigned long msiqid,
59 unsigned long *head); 59 unsigned long *head);
60extern unsigned long pci_sun4v_msi_getvalid(unsigned long devhandle, 60unsigned long pci_sun4v_msi_getvalid(unsigned long devhandle,
61 unsigned long msinum, 61 unsigned long msinum,
62 unsigned long *valid); 62 unsigned long *valid);
63extern unsigned long pci_sun4v_msi_setvalid(unsigned long devhandle, 63unsigned long pci_sun4v_msi_setvalid(unsigned long devhandle,
64 unsigned long msinum, 64 unsigned long msinum,
65 unsigned long valid); 65 unsigned long valid);
66extern unsigned long pci_sun4v_msi_getmsiq(unsigned long devhandle, 66unsigned long pci_sun4v_msi_getmsiq(unsigned long devhandle,
67 unsigned long msinum, 67 unsigned long msinum,
68 unsigned long *msiq); 68 unsigned long *msiq);
69extern unsigned long pci_sun4v_msi_setmsiq(unsigned long devhandle, 69unsigned long pci_sun4v_msi_setmsiq(unsigned long devhandle,
70 unsigned long msinum, 70 unsigned long msinum,
71 unsigned long msiq, 71 unsigned long msiq,
72 unsigned long msitype); 72 unsigned long msitype);
73extern unsigned long pci_sun4v_msi_getstate(unsigned long devhandle, 73unsigned long pci_sun4v_msi_getstate(unsigned long devhandle,
74 unsigned long msinum, 74 unsigned long msinum,
75 unsigned long *state); 75 unsigned long *state);
76extern unsigned long pci_sun4v_msi_setstate(unsigned long devhandle, 76unsigned long pci_sun4v_msi_setstate(unsigned long devhandle,
77 unsigned long msinum, 77 unsigned long msinum,
78 unsigned long state); 78 unsigned long state);
79extern unsigned long pci_sun4v_msg_getmsiq(unsigned long devhandle, 79unsigned long pci_sun4v_msg_getmsiq(unsigned long devhandle,
80 unsigned long msinum, 80 unsigned long msinum,
81 unsigned long *msiq); 81 unsigned long *msiq);
82extern unsigned long pci_sun4v_msg_setmsiq(unsigned long devhandle, 82unsigned long pci_sun4v_msg_setmsiq(unsigned long devhandle,
83 unsigned long msinum, 83 unsigned long msinum,
84 unsigned long msiq); 84 unsigned long msiq);
85extern unsigned long pci_sun4v_msg_getvalid(unsigned long devhandle, 85unsigned long pci_sun4v_msg_getvalid(unsigned long devhandle,
86 unsigned long msinum, 86 unsigned long msinum,
87 unsigned long *valid); 87 unsigned long *valid);
88extern unsigned long pci_sun4v_msg_setvalid(unsigned long devhandle, 88unsigned long pci_sun4v_msg_setvalid(unsigned long devhandle,
89 unsigned long msinum, 89 unsigned long msinum,
90 unsigned long valid); 90 unsigned long valid);
91 91
92#endif /* !(_PCI_SUN4V_H) */ 92#endif /* !(_PCI_SUN4V_H) */
diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c
index 09f4fdd8d808..6cc78c213c01 100644
--- a/arch/sparc/kernel/pcic.c
+++ b/arch/sparc/kernel/pcic.c
@@ -36,6 +36,7 @@
36#include <asm/uaccess.h> 36#include <asm/uaccess.h>
37#include <asm/irq_regs.h> 37#include <asm/irq_regs.h>
38 38
39#include "kernel.h"
39#include "irq.h" 40#include "irq.h"
40 41
41/* 42/*
@@ -162,8 +163,8 @@ static int pcic0_up;
162static struct linux_pcic pcic0; 163static struct linux_pcic pcic0;
163 164
164void __iomem *pcic_regs; 165void __iomem *pcic_regs;
165volatile int pcic_speculative; 166static volatile int pcic_speculative;
166volatile int pcic_trapped; 167static volatile int pcic_trapped;
167 168
168/* forward */ 169/* forward */
169unsigned int pcic_build_device_irq(struct platform_device *op, 170unsigned int pcic_build_device_irq(struct platform_device *op,
@@ -329,7 +330,7 @@ int __init pcic_probe(void)
329 330
330 pcic->pcic_res_cfg_addr.name = "pcic_cfg_addr"; 331 pcic->pcic_res_cfg_addr.name = "pcic_cfg_addr";
331 if ((pcic->pcic_config_space_addr = 332 if ((pcic->pcic_config_space_addr =
332 ioremap(regs[2].phys_addr, regs[2].reg_size * 2)) == 0) { 333 ioremap(regs[2].phys_addr, regs[2].reg_size * 2)) == NULL) {
333 prom_printf("PCIC: Error, cannot map " 334 prom_printf("PCIC: Error, cannot map "
334 "PCI Configuration Space Address.\n"); 335 "PCI Configuration Space Address.\n");
335 prom_halt(); 336 prom_halt();
@@ -341,7 +342,7 @@ int __init pcic_probe(void)
341 */ 342 */
342 pcic->pcic_res_cfg_data.name = "pcic_cfg_data"; 343 pcic->pcic_res_cfg_data.name = "pcic_cfg_data";
343 if ((pcic->pcic_config_space_data = 344 if ((pcic->pcic_config_space_data =
344 ioremap(regs[3].phys_addr, regs[3].reg_size * 2)) == 0) { 345 ioremap(regs[3].phys_addr, regs[3].reg_size * 2)) == NULL) {
345 prom_printf("PCIC: Error, cannot map " 346 prom_printf("PCIC: Error, cannot map "
346 "PCI Configuration Space Data.\n"); 347 "PCI Configuration Space Data.\n");
347 prom_halt(); 348 prom_halt();
@@ -353,7 +354,6 @@ int __init pcic_probe(void)
353 strcpy(pbm->prom_name, namebuf); 354 strcpy(pbm->prom_name, namebuf);
354 355
355 { 356 {
356 extern volatile int t_nmi[4];
357 extern int pcic_nmi_trap_patch[4]; 357 extern int pcic_nmi_trap_patch[4];
358 358
359 t_nmi[0] = pcic_nmi_trap_patch[0]; 359 t_nmi[0] = pcic_nmi_trap_patch[0];
@@ -536,7 +536,7 @@ pcic_fill_irq(struct linux_pcic *pcic, struct pci_dev *dev, int node)
536 prom_getstring(node, "name", namebuf, sizeof(namebuf)); 536 prom_getstring(node, "name", namebuf, sizeof(namebuf));
537 } 537 }
538 538
539 if ((p = pcic->pcic_imap) == 0) { 539 if ((p = pcic->pcic_imap) == NULL) {
540 dev->irq = 0; 540 dev->irq = 0;
541 return; 541 return;
542 } 542 }
@@ -670,30 +670,6 @@ void pcibios_fixup_bus(struct pci_bus *bus)
670 } 670 }
671} 671}
672 672
673/*
674 * pcic_pin_to_irq() is exported to bus probing code
675 */
676unsigned int
677pcic_pin_to_irq(unsigned int pin, const char *name)
678{
679 struct linux_pcic *pcic = &pcic0;
680 unsigned int irq;
681 unsigned int ivec;
682
683 if (pin < 4) {
684 ivec = readw(pcic->pcic_regs+PCI_INT_SELECT_LO);
685 irq = ivec >> (pin << 2) & 0xF;
686 } else if (pin < 8) {
687 ivec = readw(pcic->pcic_regs+PCI_INT_SELECT_HI);
688 irq = ivec >> ((pin-4) << 2) & 0xF;
689 } else { /* Corrupted map */
690 printk("PCIC: BAD PIN %d FOR %s\n", pin, name);
691 for (;;) {} /* XXX Cannot panic properly in case of PROLL */
692 }
693/* P3 */ /* printk("PCIC: dev %s pin %d ivec 0x%x irq %x\n", name, pin, ivec, irq); */
694 return irq;
695}
696
697/* Makes compiler happy */ 673/* Makes compiler happy */
698static volatile int pcic_timer_dummy; 674static volatile int pcic_timer_dummy;
699 675
@@ -783,7 +759,7 @@ int pcibios_enable_device(struct pci_dev *pdev, int mask)
783void pcic_nmi(unsigned int pend, struct pt_regs *regs) 759void pcic_nmi(unsigned int pend, struct pt_regs *regs)
784{ 760{
785 761
786 pend = flip_dword(pend); 762 pend = swab32(pend);
787 763
788 if (!pcic_speculative || (pend & PCI_SYS_INT_PENDING_PIO) == 0) { 764 if (!pcic_speculative || (pend & PCI_SYS_INT_PENDING_PIO) == 0) {
789 /* 765 /*
@@ -875,82 +851,4 @@ void __init sun4m_pci_init_IRQ(void)
875 sparc_config.load_profile_irq = pcic_load_profile_irq; 851 sparc_config.load_profile_irq = pcic_load_profile_irq;
876} 852}
877 853
878/*
879 * This probably belongs here rather than ioport.c because
880 * we do not want this crud linked into SBus kernels.
881 * Also, think for a moment about likes of floppy.c that
882 * include architecture specific parts. They may want to redefine ins/outs.
883 *
884 * We do not use horrible macros here because we want to
885 * advance pointer by sizeof(size).
886 */
887void outsb(unsigned long addr, const void *src, unsigned long count)
888{
889 while (count) {
890 count -= 1;
891 outb(*(const char *)src, addr);
892 src += 1;
893 /* addr += 1; */
894 }
895}
896EXPORT_SYMBOL(outsb);
897
898void outsw(unsigned long addr, const void *src, unsigned long count)
899{
900 while (count) {
901 count -= 2;
902 outw(*(const short *)src, addr);
903 src += 2;
904 /* addr += 2; */
905 }
906}
907EXPORT_SYMBOL(outsw);
908
909void outsl(unsigned long addr, const void *src, unsigned long count)
910{
911 while (count) {
912 count -= 4;
913 outl(*(const long *)src, addr);
914 src += 4;
915 /* addr += 4; */
916 }
917}
918EXPORT_SYMBOL(outsl);
919
920void insb(unsigned long addr, void *dst, unsigned long count)
921{
922 while (count) {
923 count -= 1;
924 *(unsigned char *)dst = inb(addr);
925 dst += 1;
926 /* addr += 1; */
927 }
928}
929EXPORT_SYMBOL(insb);
930
931void insw(unsigned long addr, void *dst, unsigned long count)
932{
933 while (count) {
934 count -= 2;
935 *(unsigned short *)dst = inw(addr);
936 dst += 2;
937 /* addr += 2; */
938 }
939}
940EXPORT_SYMBOL(insw);
941
942void insl(unsigned long addr, void *dst, unsigned long count)
943{
944 while (count) {
945 count -= 4;
946 /*
947 * XXX I am sure we are in for an unaligned trap here.
948 */
949 *(unsigned long *)dst = inl(addr);
950 dst += 4;
951 /* addr += 4; */
952 }
953}
954EXPORT_SYMBOL(insl);
955
956subsys_initcall(pcic_init); 854subsys_initcall(pcic_init);
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index b5c38faa4ead..8efd33753ad3 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -110,7 +110,7 @@ struct cpu_hw_events {
110 110
111 unsigned int group_flag; 111 unsigned int group_flag;
112}; 112};
113DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, }; 113static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, };
114 114
115/* An event map describes the characteristics of a performance 115/* An event map describes the characteristics of a performance
116 * counter event. In particular it gives the encoding as well as 116 * counter event. In particular it gives the encoding as well as
@@ -1153,7 +1153,7 @@ static void perf_stop_nmi_watchdog(void *unused)
1153 cpuc->pcr[i] = pcr_ops->read_pcr(i); 1153 cpuc->pcr[i] = pcr_ops->read_pcr(i);
1154} 1154}
1155 1155
1156void perf_event_grab_pmc(void) 1156static void perf_event_grab_pmc(void)
1157{ 1157{
1158 if (atomic_inc_not_zero(&active_events)) 1158 if (atomic_inc_not_zero(&active_events))
1159 return; 1159 return;
@@ -1169,7 +1169,7 @@ void perf_event_grab_pmc(void)
1169 mutex_unlock(&pmc_grab_mutex); 1169 mutex_unlock(&pmc_grab_mutex);
1170} 1170}
1171 1171
1172void perf_event_release_pmc(void) 1172static void perf_event_release_pmc(void)
1173{ 1173{
1174 if (atomic_dec_and_mutex_lock(&active_events, &pmc_grab_mutex)) { 1174 if (atomic_dec_and_mutex_lock(&active_events, &pmc_grab_mutex)) {
1175 if (atomic_read(&nmi_active) == 0) 1175 if (atomic_read(&nmi_active) == 0)
@@ -1669,7 +1669,7 @@ static bool __init supported_pmu(void)
1669 return false; 1669 return false;
1670} 1670}
1671 1671
1672int __init init_hw_perf_events(void) 1672static int __init init_hw_perf_events(void)
1673{ 1673{
1674 pr_info("Performance events: "); 1674 pr_info("Performance events: ");
1675 1675
@@ -1742,10 +1742,11 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry,
1742 1742
1743 ufp = regs->u_regs[UREG_I6] + STACK_BIAS; 1743 ufp = regs->u_regs[UREG_I6] + STACK_BIAS;
1744 do { 1744 do {
1745 struct sparc_stackf *usf, sf; 1745 struct sparc_stackf __user *usf;
1746 struct sparc_stackf sf;
1746 unsigned long pc; 1747 unsigned long pc;
1747 1748
1748 usf = (struct sparc_stackf *) ufp; 1749 usf = (struct sparc_stackf __user *)ufp;
1749 if (__copy_from_user_inatomic(&sf, usf, sizeof(sf))) 1750 if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
1750 break; 1751 break;
1751 1752
@@ -1765,17 +1766,19 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry,
1765 unsigned long pc; 1766 unsigned long pc;
1766 1767
1767 if (thread32_stack_is_64bit(ufp)) { 1768 if (thread32_stack_is_64bit(ufp)) {
1768 struct sparc_stackf *usf, sf; 1769 struct sparc_stackf __user *usf;
1770 struct sparc_stackf sf;
1769 1771
1770 ufp += STACK_BIAS; 1772 ufp += STACK_BIAS;
1771 usf = (struct sparc_stackf *) ufp; 1773 usf = (struct sparc_stackf __user *)ufp;
1772 if (__copy_from_user_inatomic(&sf, usf, sizeof(sf))) 1774 if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
1773 break; 1775 break;
1774 pc = sf.callers_pc & 0xffffffff; 1776 pc = sf.callers_pc & 0xffffffff;
1775 ufp = ((unsigned long) sf.fp) & 0xffffffff; 1777 ufp = ((unsigned long) sf.fp) & 0xffffffff;
1776 } else { 1778 } else {
1777 struct sparc_stackf32 *usf, sf; 1779 struct sparc_stackf32 __user *usf;
1778 usf = (struct sparc_stackf32 *) ufp; 1780 struct sparc_stackf32 sf;
1781 usf = (struct sparc_stackf32 __user *)ufp;
1779 if (__copy_from_user_inatomic(&sf, usf, sizeof(sf))) 1782 if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
1780 break; 1783 break;
1781 pc = sf.callers_pc; 1784 pc = sf.callers_pc;
diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
index 510baec1b69b..50e7b626afe8 100644
--- a/arch/sparc/kernel/process_32.c
+++ b/arch/sparc/kernel/process_32.c
@@ -10,6 +10,7 @@
10 10
11#include <stdarg.h> 11#include <stdarg.h>
12 12
13#include <linux/elfcore.h>
13#include <linux/errno.h> 14#include <linux/errno.h>
14#include <linux/module.h> 15#include <linux/module.h>
15#include <linux/sched.h> 16#include <linux/sched.h>
@@ -23,6 +24,7 @@
23#include <linux/delay.h> 24#include <linux/delay.h>
24#include <linux/pm.h> 25#include <linux/pm.h>
25#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/cpu.h>
26 28
27#include <asm/auxio.h> 29#include <asm/auxio.h>
28#include <asm/oplib.h> 30#include <asm/oplib.h>
@@ -38,6 +40,8 @@
38#include <asm/unistd.h> 40#include <asm/unistd.h>
39#include <asm/setup.h> 41#include <asm/setup.h>
40 42
43#include "kernel.h"
44
41/* 45/*
42 * Power management idle function 46 * Power management idle function
43 * Set in pm platform drivers (apc.c and pmc.c) 47 * Set in pm platform drivers (apc.c and pmc.c)
@@ -102,8 +106,12 @@ void machine_restart(char * cmd)
102void machine_power_off(void) 106void machine_power_off(void)
103{ 107{
104 if (auxio_power_register && 108 if (auxio_power_register &&
105 (strcmp(of_console_device->type, "serial") || scons_pwroff)) 109 (strcmp(of_console_device->type, "serial") || scons_pwroff)) {
106 *auxio_power_register |= AUXIO_POWER_OFF; 110 u8 power_register = sbus_readb(auxio_power_register);
111 power_register |= AUXIO_POWER_OFF;
112 sbus_writeb(power_register, auxio_power_register);
113 }
114
107 machine_halt(); 115 machine_halt();
108} 116}
109 117
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index d7b4967f8fa6..b2988f25e230 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -88,7 +88,7 @@ void arch_cpu_idle(void)
88} 88}
89 89
90#ifdef CONFIG_HOTPLUG_CPU 90#ifdef CONFIG_HOTPLUG_CPU
91void arch_cpu_idle_dead() 91void arch_cpu_idle_dead(void)
92{ 92{
93 sched_preempt_enable_no_resched(); 93 sched_preempt_enable_no_resched();
94 cpu_play_dead(); 94 cpu_play_dead();
diff --git a/arch/sparc/kernel/prom.h b/arch/sparc/kernel/prom.h
index cf5fe1c0b024..890281b12b28 100644
--- a/arch/sparc/kernel/prom.h
+++ b/arch/sparc/kernel/prom.h
@@ -4,7 +4,7 @@
4#include <linux/spinlock.h> 4#include <linux/spinlock.h>
5#include <asm/prom.h> 5#include <asm/prom.h>
6 6
7extern void of_console_init(void); 7void of_console_init(void);
8 8
9extern unsigned int prom_early_allocated; 9extern unsigned int prom_early_allocated;
10 10
diff --git a/arch/sparc/kernel/prom_64.c b/arch/sparc/kernel/prom_64.c
index 9a690d39c01b..20cc5d80a471 100644
--- a/arch/sparc/kernel/prom_64.c
+++ b/arch/sparc/kernel/prom_64.c
@@ -15,11 +15,12 @@
15 * 2 of the License, or (at your option) any later version. 15 * 2 of the License, or (at your option) any later version.
16 */ 16 */
17 17
18#include <linux/memblock.h>
18#include <linux/kernel.h> 19#include <linux/kernel.h>
19#include <linux/types.h>
20#include <linux/string.h> 20#include <linux/string.h>
21#include <linux/types.h>
22#include <linux/cpu.h>
21#include <linux/mm.h> 23#include <linux/mm.h>
22#include <linux/memblock.h>
23#include <linux/of.h> 24#include <linux/of.h>
24 25
25#include <asm/prom.h> 26#include <asm/prom.h>
diff --git a/arch/sparc/kernel/psycho_common.h b/arch/sparc/kernel/psycho_common.h
index 590b4ed8ab5e..05a6e30a928e 100644
--- a/arch/sparc/kernel/psycho_common.h
+++ b/arch/sparc/kernel/psycho_common.h
@@ -30,19 +30,19 @@ enum psycho_error_type {
30 UE_ERR, CE_ERR, PCI_ERR 30 UE_ERR, CE_ERR, PCI_ERR
31}; 31};
32 32
33extern void psycho_check_iommu_error(struct pci_pbm_info *pbm, 33void psycho_check_iommu_error(struct pci_pbm_info *pbm,
34 unsigned long afsr, 34 unsigned long afsr,
35 unsigned long afar, 35 unsigned long afar,
36 enum psycho_error_type type); 36 enum psycho_error_type type);
37 37
38extern irqreturn_t psycho_pcierr_intr(int irq, void *dev_id); 38irqreturn_t psycho_pcierr_intr(int irq, void *dev_id);
39 39
40extern int psycho_iommu_init(struct pci_pbm_info *pbm, int tsbsize, 40int psycho_iommu_init(struct pci_pbm_info *pbm, int tsbsize,
41 u32 dvma_offset, u32 dma_mask, 41 u32 dvma_offset, u32 dma_mask,
42 unsigned long write_complete_offset); 42 unsigned long write_complete_offset);
43 43
44extern void psycho_pbm_init_common(struct pci_pbm_info *pbm, 44void psycho_pbm_init_common(struct pci_pbm_info *pbm,
45 struct platform_device *op, 45 struct platform_device *op,
46 const char *chip_name, int chip_type); 46 const char *chip_name, int chip_type);
47 47
48#endif /* _PSYCHO_COMMON_H */ 48#endif /* _PSYCHO_COMMON_H */
diff --git a/arch/sparc/kernel/ptrace_32.c b/arch/sparc/kernel/ptrace_32.c
index 896ba7c5cd8e..a331fdc11a2c 100644
--- a/arch/sparc/kernel/ptrace_32.c
+++ b/arch/sparc/kernel/ptrace_32.c
@@ -26,6 +26,8 @@
26#include <asm/uaccess.h> 26#include <asm/uaccess.h>
27#include <asm/cacheflush.h> 27#include <asm/cacheflush.h>
28 28
29#include "kernel.h"
30
29/* #define ALLOW_INIT_TRACING */ 31/* #define ALLOW_INIT_TRACING */
30 32
31/* 33/*
diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c
index 1434526970a6..baef495c06bd 100644
--- a/arch/sparc/kernel/setup_32.c
+++ b/arch/sparc/kernel/setup_32.c
@@ -267,7 +267,7 @@ static __init void leon_patch(void)
267} 267}
268 268
269struct tt_entry *sparc_ttable; 269struct tt_entry *sparc_ttable;
270struct pt_regs fake_swapper_regs; 270static struct pt_regs fake_swapper_regs;
271 271
272/* Called from head_32.S - before we have setup anything 272/* Called from head_32.S - before we have setup anything
273 * in the kernel. Be very careful with what you do here. 273 * in the kernel. Be very careful with what you do here.
@@ -365,7 +365,7 @@ void __init setup_arch(char **cmdline_p)
365 365
366 prom_setsync(prom_sync_me); 366 prom_setsync(prom_sync_me);
367 367
368 if((boot_flags&BOOTME_DEBUG) && (linux_dbvec!=0) && 368 if((boot_flags & BOOTME_DEBUG) && (linux_dbvec != NULL) &&
369 ((*(short *)linux_dbvec) != -1)) { 369 ((*(short *)linux_dbvec) != -1)) {
370 printk("Booted under KADB. Syncing trap table.\n"); 370 printk("Booted under KADB. Syncing trap table.\n");
371 (*(linux_dbvec->teach_debugger))(); 371 (*(linux_dbvec->teach_debugger))();
diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c
index ee789d2ef05d..62deba7be1a9 100644
--- a/arch/sparc/kernel/signal32.c
+++ b/arch/sparc/kernel/signal32.c
@@ -31,6 +31,7 @@
31#include <asm/switch_to.h> 31#include <asm/switch_to.h>
32 32
33#include "sigutil.h" 33#include "sigutil.h"
34#include "kernel.h"
34 35
35/* This magic should be in g_upper[0] for all upper parts 36/* This magic should be in g_upper[0] for all upper parts
36 * to be valid. 37 * to be valid.
@@ -145,7 +146,7 @@ void do_sigreturn32(struct pt_regs *regs)
145 unsigned int psr; 146 unsigned int psr;
146 unsigned pc, npc; 147 unsigned pc, npc;
147 sigset_t set; 148 sigset_t set;
148 unsigned seta[_COMPAT_NSIG_WORDS]; 149 compat_sigset_t seta;
149 int err, i; 150 int err, i;
150 151
151 /* Always make any pending restarted system calls return -EINTR */ 152 /* Always make any pending restarted system calls return -EINTR */
@@ -209,17 +210,13 @@ void do_sigreturn32(struct pt_regs *regs)
209 if (restore_rwin_state(compat_ptr(rwin_save))) 210 if (restore_rwin_state(compat_ptr(rwin_save)))
210 goto segv; 211 goto segv;
211 } 212 }
212 err |= __get_user(seta[0], &sf->info.si_mask); 213 err |= __get_user(seta.sig[0], &sf->info.si_mask);
213 err |= copy_from_user(seta+1, &sf->extramask, 214 err |= copy_from_user(&seta.sig[1], &sf->extramask,
214 (_COMPAT_NSIG_WORDS - 1) * sizeof(unsigned int)); 215 (_COMPAT_NSIG_WORDS - 1) * sizeof(unsigned int));
215 if (err) 216 if (err)
216 goto segv; 217 goto segv;
217 switch (_NSIG_WORDS) { 218
218 case 4: set.sig[3] = seta[6] + (((long)seta[7]) << 32); 219 set.sig[0] = seta.sig[0] + (((long)seta.sig[1]) << 32);
219 case 3: set.sig[2] = seta[4] + (((long)seta[5]) << 32);
220 case 2: set.sig[1] = seta[2] + (((long)seta[3]) << 32);
221 case 1: set.sig[0] = seta[0] + (((long)seta[1]) << 32);
222 }
223 set_current_blocked(&set); 220 set_current_blocked(&set);
224 return; 221 return;
225 222
@@ -303,12 +300,7 @@ asmlinkage void do_rt_sigreturn32(struct pt_regs *regs)
303 goto segv; 300 goto segv;
304 } 301 }
305 302
306 switch (_NSIG_WORDS) { 303 set.sig[0] = seta.sig[0] + (((long)seta.sig[1]) << 32);
307 case 4: set.sig[3] = seta.sig[6] + (((long)seta.sig[7]) << 32);
308 case 3: set.sig[2] = seta.sig[4] + (((long)seta.sig[5]) << 32);
309 case 2: set.sig[1] = seta.sig[2] + (((long)seta.sig[3]) << 32);
310 case 1: set.sig[0] = seta.sig[0] + (((long)seta.sig[1]) << 32);
311 }
312 set_current_blocked(&set); 304 set_current_blocked(&set);
313 return; 305 return;
314segv: 306segv:
@@ -417,7 +409,7 @@ static int setup_frame32(struct ksignal *ksig, struct pt_regs *regs,
417 void __user *tail; 409 void __user *tail;
418 int sigframe_size; 410 int sigframe_size;
419 u32 psr; 411 u32 psr;
420 unsigned int seta[_COMPAT_NSIG_WORDS]; 412 compat_sigset_t seta;
421 413
422 /* 1. Make sure everything is clean */ 414 /* 1. Make sure everything is clean */
423 synchronize_user_stack(); 415 synchronize_user_stack();
@@ -481,18 +473,14 @@ static int setup_frame32(struct ksignal *ksig, struct pt_regs *regs,
481 err |= __put_user(0, &sf->rwin_save); 473 err |= __put_user(0, &sf->rwin_save);
482 } 474 }
483 475
484 switch (_NSIG_WORDS) { 476 /* If these change we need to know - assignments to seta relies on these sizes */
485 case 4: seta[7] = (oldset->sig[3] >> 32); 477 BUILD_BUG_ON(_NSIG_WORDS != 1);
486 seta[6] = oldset->sig[3]; 478 BUILD_BUG_ON(_COMPAT_NSIG_WORDS != 2);
487 case 3: seta[5] = (oldset->sig[2] >> 32); 479 seta.sig[1] = (oldset->sig[0] >> 32);
488 seta[4] = oldset->sig[2]; 480 seta.sig[0] = oldset->sig[0];
489 case 2: seta[3] = (oldset->sig[1] >> 32); 481
490 seta[2] = oldset->sig[1]; 482 err |= __put_user(seta.sig[0], &sf->info.si_mask);
491 case 1: seta[1] = (oldset->sig[0] >> 32); 483 err |= __copy_to_user(sf->extramask, &seta.sig[1],
492 seta[0] = oldset->sig[0];
493 }
494 err |= __put_user(seta[0], &sf->info.si_mask);
495 err |= __copy_to_user(sf->extramask, seta + 1,
496 (_COMPAT_NSIG_WORDS - 1) * sizeof(unsigned int)); 484 (_COMPAT_NSIG_WORDS - 1) * sizeof(unsigned int));
497 485
498 if (!wsaved) { 486 if (!wsaved) {
@@ -622,16 +610,8 @@ static int setup_rt_frame32(struct ksignal *ksig, struct pt_regs *regs,
622 /* Setup sigaltstack */ 610 /* Setup sigaltstack */
623 err |= __compat_save_altstack(&sf->stack, regs->u_regs[UREG_FP]); 611 err |= __compat_save_altstack(&sf->stack, regs->u_regs[UREG_FP]);
624 612
625 switch (_NSIG_WORDS) { 613 seta.sig[1] = (oldset->sig[0] >> 32);
626 case 4: seta.sig[7] = (oldset->sig[3] >> 32); 614 seta.sig[0] = oldset->sig[0];
627 seta.sig[6] = oldset->sig[3];
628 case 3: seta.sig[5] = (oldset->sig[2] >> 32);
629 seta.sig[4] = oldset->sig[2];
630 case 2: seta.sig[3] = (oldset->sig[1] >> 32);
631 seta.sig[2] = oldset->sig[1];
632 case 1: seta.sig[1] = (oldset->sig[0] >> 32);
633 seta.sig[0] = oldset->sig[0];
634 }
635 err |= __copy_to_user(&sf->mask, &seta, sizeof(compat_sigset_t)); 615 err |= __copy_to_user(&sf->mask, &seta, sizeof(compat_sigset_t));
636 616
637 if (!wsaved) { 617 if (!wsaved) {
diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c
index 7d5d8e1f8415..9ee72fc8e0e4 100644
--- a/arch/sparc/kernel/signal_32.c
+++ b/arch/sparc/kernel/signal_32.c
@@ -28,6 +28,7 @@
28#include <asm/switch_to.h> 28#include <asm/switch_to.h>
29 29
30#include "sigutil.h" 30#include "sigutil.h"
31#include "kernel.h"
31 32
32extern void fpsave(unsigned long *fpregs, unsigned long *fsr, 33extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
33 void *fpqueue, unsigned long *fpqdepth); 34 void *fpqueue, unsigned long *fpqdepth);
@@ -341,7 +342,7 @@ static int setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs,
341 err |= __put_user(0, &sf->extra_size); 342 err |= __put_user(0, &sf->extra_size);
342 343
343 if (psr & PSR_EF) { 344 if (psr & PSR_EF) {
344 __siginfo_fpu_t *fp = tail; 345 __siginfo_fpu_t __user *fp = tail;
345 tail += sizeof(*fp); 346 tail += sizeof(*fp);
346 err |= save_fpu_state(regs, fp); 347 err |= save_fpu_state(regs, fp);
347 err |= __put_user(fp, &sf->fpu_save); 348 err |= __put_user(fp, &sf->fpu_save);
@@ -349,7 +350,7 @@ static int setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs,
349 err |= __put_user(0, &sf->fpu_save); 350 err |= __put_user(0, &sf->fpu_save);
350 } 351 }
351 if (wsaved) { 352 if (wsaved) {
352 __siginfo_rwin_t *rwp = tail; 353 __siginfo_rwin_t __user *rwp = tail;
353 tail += sizeof(*rwp); 354 tail += sizeof(*rwp);
354 err |= save_rwin_state(wsaved, rwp); 355 err |= save_rwin_state(wsaved, rwp);
355 err |= __put_user(rwp, &sf->rwin_save); 356 err |= __put_user(rwp, &sf->rwin_save);
@@ -517,9 +518,9 @@ void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0,
517 } 518 }
518} 519}
519 520
520asmlinkage int 521asmlinkage int do_sys_sigstack(struct sigstack __user *ssptr,
521do_sys_sigstack(struct sigstack __user *ssptr, struct sigstack __user *ossptr, 522 struct sigstack __user *ossptr,
522 unsigned long sp) 523 unsigned long sp)
523{ 524{
524 int ret = -EFAULT; 525 int ret = -EFAULT;
525 526
diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
index cd91d010e6d3..1a6999868031 100644
--- a/arch/sparc/kernel/signal_64.c
+++ b/arch/sparc/kernel/signal_64.c
@@ -35,9 +35,10 @@
35#include <asm/switch_to.h> 35#include <asm/switch_to.h>
36#include <asm/cacheflush.h> 36#include <asm/cacheflush.h>
37 37
38#include "entry.h"
39#include "systbls.h"
40#include "sigutil.h" 38#include "sigutil.h"
39#include "systbls.h"
40#include "kernel.h"
41#include "entry.h"
41 42
42/* {set, get}context() needed for 64-bit SparcLinux userland. */ 43/* {set, get}context() needed for 64-bit SparcLinux userland. */
43asmlinkage void sparc64_set_context(struct pt_regs *regs) 44asmlinkage void sparc64_set_context(struct pt_regs *regs)
@@ -492,7 +493,6 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
492 493
493#ifdef CONFIG_COMPAT 494#ifdef CONFIG_COMPAT
494 if (test_thread_flag(TIF_32BIT)) { 495 if (test_thread_flag(TIF_32BIT)) {
495 extern void do_signal32(struct pt_regs *);
496 do_signal32(regs); 496 do_signal32(regs);
497 return; 497 return;
498 } 498 }
diff --git a/arch/sparc/kernel/smp_32.c b/arch/sparc/kernel/smp_32.c
index a102bfba6ea8..7958242d63c5 100644
--- a/arch/sparc/kernel/smp_32.c
+++ b/arch/sparc/kernel/smp_32.c
@@ -20,6 +20,7 @@
20#include <linux/seq_file.h> 20#include <linux/seq_file.h>
21#include <linux/cache.h> 21#include <linux/cache.h>
22#include <linux/delay.h> 22#include <linux/delay.h>
23#include <linux/profile.h>
23#include <linux/cpu.h> 24#include <linux/cpu.h>
24 25
25#include <asm/ptrace.h> 26#include <asm/ptrace.h>
@@ -75,8 +76,6 @@ void smp_store_cpu_info(int id)
75 76
76void __init smp_cpus_done(unsigned int max_cpus) 77void __init smp_cpus_done(unsigned int max_cpus)
77{ 78{
78 extern void smp4m_smp_done(void);
79 extern void smp4d_smp_done(void);
80 unsigned long bogosum = 0; 79 unsigned long bogosum = 0;
81 int cpu, num = 0; 80 int cpu, num = 0;
82 81
@@ -183,8 +182,6 @@ int setup_profiling_timer(unsigned int multiplier)
183 182
184void __init smp_prepare_cpus(unsigned int max_cpus) 183void __init smp_prepare_cpus(unsigned int max_cpus)
185{ 184{
186 extern void __init smp4m_boot_cpus(void);
187 extern void __init smp4d_boot_cpus(void);
188 int i, cpuid, extra; 185 int i, cpuid, extra;
189 186
190 printk("Entering SMP Mode...\n"); 187 printk("Entering SMP Mode...\n");
@@ -261,8 +258,6 @@ void __init smp_prepare_boot_cpu(void)
261 258
262int __cpu_up(unsigned int cpu, struct task_struct *tidle) 259int __cpu_up(unsigned int cpu, struct task_struct *tidle)
263{ 260{
264 extern int smp4m_boot_one_cpu(int, struct task_struct *);
265 extern int smp4d_boot_one_cpu(int, struct task_struct *);
266 int ret=0; 261 int ret=0;
267 262
268 switch(sparc_cpu_model) { 263 switch(sparc_cpu_model) {
@@ -297,7 +292,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
297 return ret; 292 return ret;
298} 293}
299 294
300void arch_cpu_pre_starting(void *arg) 295static void arch_cpu_pre_starting(void *arg)
301{ 296{
302 local_ops->cache_all(); 297 local_ops->cache_all();
303 local_ops->tlb_all(); 298 local_ops->tlb_all();
@@ -317,7 +312,7 @@ void arch_cpu_pre_starting(void *arg)
317 } 312 }
318} 313}
319 314
320void arch_cpu_pre_online(void *arg) 315static void arch_cpu_pre_online(void *arg)
321{ 316{
322 unsigned int cpuid = hard_smp_processor_id(); 317 unsigned int cpuid = hard_smp_processor_id();
323 318
@@ -344,7 +339,7 @@ void arch_cpu_pre_online(void *arg)
344 } 339 }
345} 340}
346 341
347void sparc_start_secondary(void *arg) 342static void sparc_start_secondary(void *arg)
348{ 343{
349 unsigned int cpu; 344 unsigned int cpu;
350 345
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index 745a3633ce14..41aa2478f3ca 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -25,6 +25,7 @@
25#include <linux/ftrace.h> 25#include <linux/ftrace.h>
26#include <linux/cpu.h> 26#include <linux/cpu.h>
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/kgdb.h>
28 29
29#include <asm/head.h> 30#include <asm/head.h>
30#include <asm/ptrace.h> 31#include <asm/ptrace.h>
@@ -35,6 +36,7 @@
35#include <asm/hvtramp.h> 36#include <asm/hvtramp.h>
36#include <asm/io.h> 37#include <asm/io.h>
37#include <asm/timer.h> 38#include <asm/timer.h>
39#include <asm/setup.h>
38 40
39#include <asm/irq.h> 41#include <asm/irq.h>
40#include <asm/irq_regs.h> 42#include <asm/irq_regs.h>
@@ -52,6 +54,7 @@
52#include <asm/pcr.h> 54#include <asm/pcr.h>
53 55
54#include "cpumap.h" 56#include "cpumap.h"
57#include "kernel.h"
55 58
56DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE; 59DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
57cpumask_t cpu_core_map[NR_CPUS] __read_mostly = 60cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
@@ -272,14 +275,6 @@ static void smp_synchronize_one_tick(int cpu)
272} 275}
273 276
274#if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU) 277#if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
275/* XXX Put this in some common place. XXX */
276static unsigned long kimage_addr_to_ra(void *p)
277{
278 unsigned long val = (unsigned long) p;
279
280 return kern_base + (val - KERNBASE);
281}
282
283static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg, 278static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg,
284 void **descrp) 279 void **descrp)
285{ 280{
@@ -867,11 +862,6 @@ extern unsigned long xcall_flush_dcache_page_cheetah;
867#endif 862#endif
868extern unsigned long xcall_flush_dcache_page_spitfire; 863extern unsigned long xcall_flush_dcache_page_spitfire;
869 864
870#ifdef CONFIG_DEBUG_DCFLUSH
871extern atomic_t dcpage_flushes;
872extern atomic_t dcpage_flushes_xcall;
873#endif
874
875static inline void __local_flush_dcache_page(struct page *page) 865static inline void __local_flush_dcache_page(struct page *page)
876{ 866{
877#ifdef DCACHE_ALIASING_POSSIBLE 867#ifdef DCACHE_ALIASING_POSSIBLE
diff --git a/arch/sparc/kernel/sun4d_irq.c b/arch/sparc/kernel/sun4d_irq.c
index f8933be3ca8b..a1bb2675b280 100644
--- a/arch/sparc/kernel/sun4d_irq.c
+++ b/arch/sparc/kernel/sun4d_irq.c
@@ -143,7 +143,7 @@ static void sun4d_sbus_handler_irq(int sbusl)
143 } 143 }
144} 144}
145 145
146void sun4d_handler_irq(int pil, struct pt_regs *regs) 146void sun4d_handler_irq(unsigned int pil, struct pt_regs *regs)
147{ 147{
148 struct pt_regs *old_regs; 148 struct pt_regs *old_regs;
149 /* SBUS IRQ level (1 - 7) */ 149 /* SBUS IRQ level (1 - 7) */
@@ -236,7 +236,7 @@ static void sun4d_shutdown_irq(struct irq_data *data)
236 irq_unlink(data->irq); 236 irq_unlink(data->irq);
237} 237}
238 238
239struct irq_chip sun4d_irq = { 239static struct irq_chip sun4d_irq = {
240 .name = "sun4d", 240 .name = "sun4d",
241 .irq_startup = sun4d_startup_irq, 241 .irq_startup = sun4d_startup_irq,
242 .irq_shutdown = sun4d_shutdown_irq, 242 .irq_shutdown = sun4d_shutdown_irq,
@@ -285,9 +285,9 @@ static void __init sun4d_load_profile_irqs(void)
285 } 285 }
286} 286}
287 287
288unsigned int _sun4d_build_device_irq(unsigned int real_irq, 288static unsigned int _sun4d_build_device_irq(unsigned int real_irq,
289 unsigned int pil, 289 unsigned int pil,
290 unsigned int board) 290 unsigned int board)
291{ 291{
292 struct sun4d_handler_data *handler_data; 292 struct sun4d_handler_data *handler_data;
293 unsigned int irq; 293 unsigned int irq;
@@ -320,8 +320,8 @@ err_out:
320 320
321 321
322 322
323unsigned int sun4d_build_device_irq(struct platform_device *op, 323static unsigned int sun4d_build_device_irq(struct platform_device *op,
324 unsigned int real_irq) 324 unsigned int real_irq)
325{ 325{
326 struct device_node *dp = op->dev.of_node; 326 struct device_node *dp = op->dev.of_node;
327 struct device_node *board_parent, *bus = dp->parent; 327 struct device_node *board_parent, *bus = dp->parent;
@@ -383,7 +383,8 @@ err_out:
383 return irq; 383 return irq;
384} 384}
385 385
386unsigned int sun4d_build_timer_irq(unsigned int board, unsigned int real_irq) 386static unsigned int sun4d_build_timer_irq(unsigned int board,
387 unsigned int real_irq)
387{ 388{
388 return _sun4d_build_device_irq(real_irq, real_irq, board); 389 return _sun4d_build_device_irq(real_irq, real_irq, board);
389} 390}
diff --git a/arch/sparc/kernel/sys_sparc32.c b/arch/sparc/kernel/sys_sparc32.c
index 71368850dfc0..022c30c72ebd 100644
--- a/arch/sparc/kernel/sys_sparc32.c
+++ b/arch/sparc/kernel/sys_sparc32.c
@@ -49,6 +49,8 @@
49#include <asm/mmu_context.h> 49#include <asm/mmu_context.h>
50#include <asm/compat_signal.h> 50#include <asm/compat_signal.h>
51 51
52#include "systbls.h"
53
52asmlinkage long sys32_truncate64(const char __user * path, unsigned long high, unsigned long low) 54asmlinkage long sys32_truncate64(const char __user * path, unsigned long high, unsigned long low)
53{ 55{
54 if ((int)high < 0) 56 if ((int)high < 0)
diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
index 3a8d1844402e..646988d4c1a3 100644
--- a/arch/sparc/kernel/sys_sparc_32.c
+++ b/arch/sparc/kernel/sys_sparc_32.c
@@ -24,6 +24,8 @@
24#include <asm/uaccess.h> 24#include <asm/uaccess.h>
25#include <asm/unistd.h> 25#include <asm/unistd.h>
26 26
27#include "systbls.h"
28
27/* #define DEBUG_UNIMP_SYSCALL */ 29/* #define DEBUG_UNIMP_SYSCALL */
28 30
29/* XXX Make this per-binary type, this way we can detect the type of 31/* XXX Make this per-binary type, this way we can detect the type of
@@ -68,7 +70,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
68 * sys_pipe() is the normal C calling standard for creating 70 * sys_pipe() is the normal C calling standard for creating
69 * a pipe. It's not the way unix traditionally does this, though. 71 * a pipe. It's not the way unix traditionally does this, though.
70 */ 72 */
71asmlinkage int sparc_pipe(struct pt_regs *regs) 73asmlinkage long sparc_pipe(struct pt_regs *regs)
72{ 74{
73 int fd[2]; 75 int fd[2];
74 int error; 76 int error;
@@ -93,7 +95,7 @@ int sparc_mmap_check(unsigned long addr, unsigned long len)
93 95
94/* Linux version of mmap */ 96/* Linux version of mmap */
95 97
96asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len, 98asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
97 unsigned long prot, unsigned long flags, unsigned long fd, 99 unsigned long prot, unsigned long flags, unsigned long fd,
98 unsigned long pgoff) 100 unsigned long pgoff)
99{ 101{
@@ -103,7 +105,7 @@ asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len,
103 pgoff >> (PAGE_SHIFT - 12)); 105 pgoff >> (PAGE_SHIFT - 12));
104} 106}
105 107
106asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len, 108asmlinkage long sys_mmap(unsigned long addr, unsigned long len,
107 unsigned long prot, unsigned long flags, unsigned long fd, 109 unsigned long prot, unsigned long flags, unsigned long fd,
108 unsigned long off) 110 unsigned long off)
109{ 111{
@@ -197,7 +199,7 @@ SYSCALL_DEFINE5(rt_sigaction, int, sig,
197 return ret; 199 return ret;
198} 200}
199 201
200asmlinkage int sys_getdomainname(char __user *name, int len) 202asmlinkage long sys_getdomainname(char __user *name, int len)
201{ 203{
202 int nlen, err; 204 int nlen, err;
203 205
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index beb0b5a5f21f..c85403d0496c 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -31,6 +31,7 @@
31#include <asm/unistd.h> 31#include <asm/unistd.h>
32 32
33#include "entry.h" 33#include "entry.h"
34#include "kernel.h"
34#include "systbls.h" 35#include "systbls.h"
35 36
36/* #define DEBUG_UNIMP_SYSCALL */ 37/* #define DEBUG_UNIMP_SYSCALL */
diff --git a/arch/sparc/kernel/systbls.h b/arch/sparc/kernel/systbls.h
index 26e6dd72e92a..2dab8236d490 100644
--- a/arch/sparc/kernel/systbls.h
+++ b/arch/sparc/kernel/systbls.h
@@ -1,41 +1,103 @@
1#ifndef _SYSTBLS_H 1#ifndef _SYSTBLS_H
2#define _SYSTBLS_H 2#define _SYSTBLS_H
3 3
4#include <linux/signal.h>
4#include <linux/kernel.h> 5#include <linux/kernel.h>
6#include <linux/compat.h>
5#include <linux/types.h> 7#include <linux/types.h>
6#include <linux/signal.h> 8
7#include <asm/utrap.h> 9#include <asm/utrap.h>
8 10
9extern asmlinkage unsigned long sys_getpagesize(void); 11asmlinkage unsigned long sys_getpagesize(void);
10extern asmlinkage long sparc_pipe(struct pt_regs *regs); 12asmlinkage long sparc_pipe(struct pt_regs *regs);
11extern asmlinkage long sys_sparc_ipc(unsigned int call, int first, 13asmlinkage unsigned long c_sys_nis_syscall(struct pt_regs *regs);
12 unsigned long second, 14asmlinkage long sys_getdomainname(char __user *name, int len);
13 unsigned long third, 15void do_rt_sigreturn(struct pt_regs *regs);
14 void __user *ptr, long fifth); 16asmlinkage long sys_mmap(unsigned long addr, unsigned long len,
15extern asmlinkage long sparc64_personality(unsigned long personality); 17 unsigned long prot, unsigned long flags,
16extern asmlinkage long sys64_munmap(unsigned long addr, size_t len); 18 unsigned long fd, unsigned long off);
17extern asmlinkage unsigned long sys64_mremap(unsigned long addr, 19asmlinkage void sparc_breakpoint(struct pt_regs *regs);
18 unsigned long old_len, 20
19 unsigned long new_len, 21#ifdef CONFIG_SPARC32
20 unsigned long flags, 22asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
21 unsigned long new_addr); 23 unsigned long prot, unsigned long flags,
22extern asmlinkage unsigned long c_sys_nis_syscall(struct pt_regs *regs); 24 unsigned long fd, unsigned long pgoff);
23extern asmlinkage long sys_getdomainname(char __user *name, int len); 25long sparc_remap_file_pages(unsigned long start, unsigned long size,
24extern asmlinkage long sys_utrap_install(utrap_entry_t type, 26 unsigned long prot, unsigned long pgoff,
25 utrap_handler_t new_p, 27 unsigned long flags);
26 utrap_handler_t new_d,
27 utrap_handler_t __user *old_p,
28 utrap_handler_t __user *old_d);
29extern asmlinkage long sparc_memory_ordering(unsigned long model,
30 struct pt_regs *regs);
31extern asmlinkage long sys_rt_sigaction(int sig,
32 const struct sigaction __user *act,
33 struct sigaction __user *oact,
34 void __user *restorer,
35 size_t sigsetsize);
36 28
37extern asmlinkage void sparc64_set_context(struct pt_regs *regs); 29#endif /* CONFIG_SPARC32 */
38extern asmlinkage void sparc64_get_context(struct pt_regs *regs);
39extern void do_rt_sigreturn(struct pt_regs *regs);
40 30
31#ifdef CONFIG_SPARC64
32asmlinkage long sys_sparc_ipc(unsigned int call, int first,
33 unsigned long second,
34 unsigned long third,
35 void __user *ptr, long fifth);
36asmlinkage long sparc64_personality(unsigned long personality);
37asmlinkage long sys64_munmap(unsigned long addr, size_t len);
38asmlinkage unsigned long sys64_mremap(unsigned long addr,
39 unsigned long old_len,
40 unsigned long new_len,
41 unsigned long flags,
42 unsigned long new_addr);
43asmlinkage long sys_utrap_install(utrap_entry_t type,
44 utrap_handler_t new_p,
45 utrap_handler_t new_d,
46 utrap_handler_t __user *old_p,
47 utrap_handler_t __user *old_d);
48asmlinkage long sparc_memory_ordering(unsigned long model,
49 struct pt_regs *regs);
50asmlinkage void sparc64_set_context(struct pt_regs *regs);
51asmlinkage void sparc64_get_context(struct pt_regs *regs);
52asmlinkage long sys32_truncate64(const char __user * path,
53 unsigned long high,
54 unsigned long low);
55asmlinkage long sys32_ftruncate64(unsigned int fd,
56 unsigned long high,
57 unsigned long low);
58struct compat_stat64;
59asmlinkage long compat_sys_stat64(const char __user * filename,
60 struct compat_stat64 __user *statbuf);
61asmlinkage long compat_sys_lstat64(const char __user * filename,
62 struct compat_stat64 __user *statbuf);
63asmlinkage long compat_sys_fstat64(unsigned int fd,
64 struct compat_stat64 __user * statbuf);
65asmlinkage long compat_sys_fstatat64(unsigned int dfd,
66 const char __user *filename,
67 struct compat_stat64 __user * statbuf, int flag);
68asmlinkage compat_ssize_t sys32_pread64(unsigned int fd,
69 char __user *ubuf,
70 compat_size_t count,
71 unsigned long poshi,
72 unsigned long poslo);
73asmlinkage compat_ssize_t sys32_pwrite64(unsigned int fd,
74 char __user *ubuf,
75 compat_size_t count,
76 unsigned long poshi,
77 unsigned long poslo);
78asmlinkage long compat_sys_readahead(int fd,
79 unsigned long offhi,
80 unsigned long offlo,
81 compat_size_t count);
82long compat_sys_fadvise64(int fd,
83 unsigned long offhi,
84 unsigned long offlo,
85 compat_size_t len, int advice);
86long compat_sys_fadvise64_64(int fd,
87 unsigned long offhi, unsigned long offlo,
88 unsigned long lenhi, unsigned long lenlo,
89 int advice);
90long sys32_sync_file_range(unsigned int fd,
91 unsigned long off_high, unsigned long off_low,
92 unsigned long nb_high, unsigned long nb_low,
93 unsigned int flags);
94asmlinkage long compat_sys_fallocate(int fd, int mode, u32 offhi, u32 offlo,
95 u32 lenhi, u32 lenlo);
96asmlinkage long compat_sys_fstat64(unsigned int fd,
97 struct compat_stat64 __user * statbuf);
98asmlinkage long compat_sys_fstatat64(unsigned int dfd,
99 const char __user *filename,
100 struct compat_stat64 __user * statbuf,
101 int flag);
102#endif /* CONFIG_SPARC64 */
41#endif /* _SYSTBLS_H */ 103#endif /* _SYSTBLS_H */
diff --git a/arch/sparc/kernel/tadpole.c b/arch/sparc/kernel/tadpole.c
deleted file mode 100644
index 9aba8bd5a78b..000000000000
--- a/arch/sparc/kernel/tadpole.c
+++ /dev/null
@@ -1,126 +0,0 @@
1/* tadpole.c: Probing for the tadpole clock stopping h/w at boot time.
2 *
3 * Copyright (C) 1996 David Redman (djhr@tadpole.co.uk)
4 */
5
6#include <linux/string.h>
7#include <linux/kernel.h>
8#include <linux/sched.h>
9#include <linux/init.h>
10
11#include <asm/asi.h>
12#include <asm/oplib.h>
13#include <asm/io.h>
14
15#define MACIO_SCSI_CSR_ADDR 0x78400000
16#define MACIO_EN_DMA 0x00000200
17#define CLOCK_INIT_DONE 1
18
19static int clk_state;
20static volatile unsigned char *clk_ctrl;
21void (*cpu_pwr_save)(void);
22
23static inline unsigned int ldphys(unsigned int addr)
24{
25 unsigned long data;
26
27 __asm__ __volatile__("\n\tlda [%1] %2, %0\n\t" :
28 "=r" (data) :
29 "r" (addr), "i" (ASI_M_BYPASS));
30 return data;
31}
32
33static void clk_init(void)
34{
35 __asm__ __volatile__("mov 0x6c, %%g1\n\t"
36 "mov 0x4c, %%g2\n\t"
37 "mov 0xdf, %%g3\n\t"
38 "stb %%g1, [%0+3]\n\t"
39 "stb %%g2, [%0+3]\n\t"
40 "stb %%g3, [%0+3]\n\t" : :
41 "r" (clk_ctrl) :
42 "g1", "g2", "g3");
43}
44
45static void clk_slow(void)
46{
47 __asm__ __volatile__("mov 0xcc, %%g2\n\t"
48 "mov 0x4c, %%g3\n\t"
49 "mov 0xcf, %%g4\n\t"
50 "mov 0xdf, %%g5\n\t"
51 "stb %%g2, [%0+3]\n\t"
52 "stb %%g3, [%0+3]\n\t"
53 "stb %%g4, [%0+3]\n\t"
54 "stb %%g5, [%0+3]\n\t" : :
55 "r" (clk_ctrl) :
56 "g2", "g3", "g4", "g5");
57}
58
59/*
60 * Tadpole is guaranteed to be UP, using local_irq_save.
61 */
62static void tsu_clockstop(void)
63{
64 unsigned int mcsr;
65 unsigned long flags;
66
67 if (!clk_ctrl)
68 return;
69 if (!(clk_state & CLOCK_INIT_DONE)) {
70 local_irq_save(flags);
71 clk_init();
72 clk_state |= CLOCK_INIT_DONE; /* all done */
73 local_irq_restore(flags);
74 return;
75 }
76 if (!(clk_ctrl[2] & 1))
77 return; /* no speed up yet */
78
79 local_irq_save(flags);
80
81 /* if SCSI DMA in progress, don't slow clock */
82 mcsr = ldphys(MACIO_SCSI_CSR_ADDR);
83 if ((mcsr&MACIO_EN_DMA) != 0) {
84 local_irq_restore(flags);
85 return;
86 }
87 /* TODO... the minimum clock setting ought to increase the
88 * memory refresh interval..
89 */
90 clk_slow();
91 local_irq_restore(flags);
92}
93
94static void swift_clockstop(void)
95{
96 if (!clk_ctrl)
97 return;
98 clk_ctrl[0] = 0;
99}
100
101void __init clock_stop_probe(void)
102{
103 phandle node, clk_nd;
104 char name[20];
105
106 prom_getstring(prom_root_node, "name", name, sizeof(name));
107 if (strncmp(name, "Tadpole", 7))
108 return;
109 node = prom_getchild(prom_root_node);
110 node = prom_searchsiblings(node, "obio");
111 node = prom_getchild(node);
112 clk_nd = prom_searchsiblings(node, "clk-ctrl");
113 if (!clk_nd)
114 return;
115 printk("Clock Stopping h/w detected... ");
116 clk_ctrl = (char *) prom_getint(clk_nd, "address");
117 clk_state = 0;
118 if (name[10] == '\0') {
119 cpu_pwr_save = tsu_clockstop;
120 printk("enabled (S3)\n");
121 } else if ((name[10] == 'X') || (name[10] == 'G')) {
122 cpu_pwr_save = swift_clockstop;
123 printk("enabled (%s)\n",name+7);
124 } else
125 printk("disabled %s\n",name+7);
126}
diff --git a/arch/sparc/kernel/time_32.c b/arch/sparc/kernel/time_32.c
index c4c27b0f9063..5923d1e4e7c9 100644
--- a/arch/sparc/kernel/time_32.c
+++ b/arch/sparc/kernel/time_32.c
@@ -36,6 +36,7 @@
36#include <linux/of_device.h> 36#include <linux/of_device.h>
37#include <linux/platform_device.h> 37#include <linux/platform_device.h>
38 38
39#include <asm/mc146818rtc.h>
39#include <asm/oplib.h> 40#include <asm/oplib.h>
40#include <asm/timex.h> 41#include <asm/timex.h>
41#include <asm/timer.h> 42#include <asm/timer.h>
@@ -47,6 +48,7 @@
47#include <asm/irq_regs.h> 48#include <asm/irq_regs.h>
48#include <asm/setup.h> 49#include <asm/setup.h>
49 50
51#include "kernel.h"
50#include "irq.h" 52#include "irq.h"
51 53
52static __cacheline_aligned_in_smp DEFINE_SEQLOCK(timer_cs_lock); 54static __cacheline_aligned_in_smp DEFINE_SEQLOCK(timer_cs_lock);
@@ -83,7 +85,7 @@ unsigned long profile_pc(struct pt_regs *regs)
83 85
84EXPORT_SYMBOL(profile_pc); 86EXPORT_SYMBOL(profile_pc);
85 87
86__volatile__ unsigned int *master_l10_counter; 88volatile u32 __iomem *master_l10_counter;
87 89
88int update_persistent_clock(struct timespec now) 90int update_persistent_clock(struct timespec now)
89{ 91{
@@ -143,9 +145,9 @@ static __init void setup_timer_ce(void)
143 145
144static unsigned int sbus_cycles_offset(void) 146static unsigned int sbus_cycles_offset(void)
145{ 147{
146 unsigned int val, offset; 148 u32 val, offset;
147 149
148 val = *master_l10_counter; 150 val = sbus_readl(master_l10_counter);
149 offset = (val >> TIMER_VALUE_SHIFT) & TIMER_VALUE_MASK; 151 offset = (val >> TIMER_VALUE_SHIFT) & TIMER_VALUE_MASK;
150 152
151 /* Limit hit? */ 153 /* Limit hit? */
diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
index 662982946a89..6fd386c5232a 100644
--- a/arch/sparc/kernel/traps_32.c
+++ b/arch/sparc/kernel/traps_32.c
@@ -44,7 +44,7 @@ static void instruction_dump(unsigned long *pc)
44#define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t") 44#define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
45#define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t") 45#define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
46 46
47void die_if_kernel(char *str, struct pt_regs *regs) 47void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
48{ 48{
49 static int die_counter; 49 static int die_counter;
50 int count = 0; 50 int count = 0;
@@ -219,8 +219,6 @@ static unsigned long fake_fsr;
219static unsigned long fake_queue[32] __attribute__ ((aligned (8))); 219static unsigned long fake_queue[32] __attribute__ ((aligned (8)));
220static unsigned long fake_depth; 220static unsigned long fake_depth;
221 221
222extern int do_mathemu(struct pt_regs *, struct task_struct *);
223
224void do_fpe_trap(struct pt_regs *regs, unsigned long pc, unsigned long npc, 222void do_fpe_trap(struct pt_regs *regs, unsigned long pc, unsigned long npc,
225 unsigned long psr) 223 unsigned long psr)
226{ 224{
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index 4ced92f05358..fb6640ec8557 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -43,8 +43,10 @@
43#include <asm/prom.h> 43#include <asm/prom.h>
44#include <asm/memctrl.h> 44#include <asm/memctrl.h>
45#include <asm/cacheflush.h> 45#include <asm/cacheflush.h>
46#include <asm/setup.h>
46 47
47#include "entry.h" 48#include "entry.h"
49#include "kernel.h"
48#include "kstack.h" 50#include "kstack.h"
49 51
50/* When an irrecoverable trap occurs at tl > 0, the trap entry 52/* When an irrecoverable trap occurs at tl > 0, the trap entry
@@ -2209,8 +2211,6 @@ out:
2209 exception_exit(prev_state); 2211 exception_exit(prev_state);
2210} 2212}
2211 2213
2212extern int do_mathemu(struct pt_regs *, struct fpustate *, bool);
2213
2214void do_fpother(struct pt_regs *regs) 2214void do_fpother(struct pt_regs *regs)
2215{ 2215{
2216 enum ctx_state prev_state = exception_enter(); 2216 enum ctx_state prev_state = exception_enter();
@@ -2383,7 +2383,7 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
2383 return (struct reg_window *) (fp + STACK_BIAS); 2383 return (struct reg_window *) (fp + STACK_BIAS);
2384} 2384}
2385 2385
2386void die_if_kernel(char *str, struct pt_regs *regs) 2386void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
2387{ 2387{
2388 static int die_counter; 2388 static int die_counter;
2389 int count = 0; 2389 int count = 0;
@@ -2433,9 +2433,6 @@ EXPORT_SYMBOL(die_if_kernel);
2433#define VIS_OPCODE_MASK ((0x3 << 30) | (0x3f << 19)) 2433#define VIS_OPCODE_MASK ((0x3 << 30) | (0x3f << 19))
2434#define VIS_OPCODE_VAL ((0x2 << 30) | (0x36 << 19)) 2434#define VIS_OPCODE_VAL ((0x2 << 30) | (0x36 << 19))
2435 2435
2436extern int handle_popc(u32 insn, struct pt_regs *regs);
2437extern int handle_ldf_stq(u32 insn, struct pt_regs *regs);
2438
2439void do_illegal_instruction(struct pt_regs *regs) 2436void do_illegal_instruction(struct pt_regs *regs)
2440{ 2437{
2441 enum ctx_state prev_state = exception_enter(); 2438 enum ctx_state prev_state = exception_enter();
@@ -2486,8 +2483,6 @@ out:
2486 exception_exit(prev_state); 2483 exception_exit(prev_state);
2487} 2484}
2488 2485
2489extern void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn);
2490
2491void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr) 2486void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
2492{ 2487{
2493 enum ctx_state prev_state = exception_enter(); 2488 enum ctx_state prev_state = exception_enter();
diff --git a/arch/sparc/kernel/unaligned_32.c b/arch/sparc/kernel/unaligned_32.c
index c0ec89786193..c5c61b3c6b56 100644
--- a/arch/sparc/kernel/unaligned_32.c
+++ b/arch/sparc/kernel/unaligned_32.c
@@ -16,6 +16,10 @@
16#include <linux/smp.h> 16#include <linux/smp.h>
17#include <linux/perf_event.h> 17#include <linux/perf_event.h>
18 18
19#include <asm/setup.h>
20
21#include "kernel.h"
22
19enum direction { 23enum direction {
20 load, /* ld, ldd, ldh, ldsh */ 24 load, /* ld, ldd, ldh, ldsh */
21 store, /* st, std, sth, stsh */ 25 store, /* st, std, sth, stsh */
diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
index 35ab8b60d256..62098a89bbbf 100644
--- a/arch/sparc/kernel/unaligned_64.c
+++ b/arch/sparc/kernel/unaligned_64.c
@@ -24,8 +24,10 @@
24#include <linux/context_tracking.h> 24#include <linux/context_tracking.h>
25#include <asm/fpumacro.h> 25#include <asm/fpumacro.h>
26#include <asm/cacheflush.h> 26#include <asm/cacheflush.h>
27#include <asm/setup.h>
27 28
28#include "entry.h" 29#include "entry.h"
30#include "kernel.h"
29 31
30enum direction { 32enum direction {
31 load, /* ld, ldd, ldh, ldsh */ 33 load, /* ld, ldd, ldh, ldsh */
diff --git a/arch/sparc/kernel/windows.c b/arch/sparc/kernel/windows.c
index 3107381e576d..87bab0a3857a 100644
--- a/arch/sparc/kernel/windows.c
+++ b/arch/sparc/kernel/windows.c
@@ -10,8 +10,11 @@
10#include <linux/mm.h> 10#include <linux/mm.h>
11#include <linux/smp.h> 11#include <linux/smp.h>
12 12
13#include <asm/cacheflush.h>
13#include <asm/uaccess.h> 14#include <asm/uaccess.h>
14 15
16#include "kernel.h"
17
15/* Do save's until all user register windows are out of the cpu. */ 18/* Do save's until all user register windows are out of the cpu. */
16void flush_user_windows(void) 19void flush_user_windows(void)
17{ 20{
diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
index dbe119b63b48..3269b0234093 100644
--- a/arch/sparc/lib/Makefile
+++ b/arch/sparc/lib/Makefile
@@ -41,7 +41,7 @@ lib-$(CONFIG_SPARC64) += GENpatch.o GENpage.o GENbzero.o
41lib-$(CONFIG_SPARC64) += copy_in_user.o user_fixup.o memmove.o 41lib-$(CONFIG_SPARC64) += copy_in_user.o user_fixup.o memmove.o
42lib-$(CONFIG_SPARC64) += mcount.o ipcsum.o xor.o hweight.o ffs.o 42lib-$(CONFIG_SPARC64) += mcount.o ipcsum.o xor.o hweight.o ffs.o
43 43
44obj-y += iomap.o 44obj-$(CONFIG_SPARC64) += iomap.o
45obj-$(CONFIG_SPARC32) += atomic32.o ucmpdi2.o 45obj-$(CONFIG_SPARC32) += atomic32.o ucmpdi2.o
46obj-y += ksyms.o 46obj-y += ksyms.o
47obj-$(CONFIG_SPARC64) += PeeCeeI.o 47obj-$(CONFIG_SPARC64) += PeeCeeI.o
diff --git a/arch/sparc/math-emu/sfp-util_32.h b/arch/sparc/math-emu/sfp-util_32.h
index d1b2aff3c259..bb587d5f3d9d 100644
--- a/arch/sparc/math-emu/sfp-util_32.h
+++ b/arch/sparc/math-emu/sfp-util_32.h
@@ -4,20 +4,20 @@
4#include <asm/byteorder.h> 4#include <asm/byteorder.h>
5 5
6#define add_ssaaaa(sh, sl, ah, al, bh, bl) \ 6#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
7 __asm__ ("addcc %r4,%5,%1\n\t" \ 7 __asm__ ("addcc %r4,%5,%1\n\t" \
8 "addx %r2,%3,%0\n" \ 8 "addx %r2,%3,%0\n" \
9 : "=r" ((USItype)(sh)), \ 9 : "=r" (sh), \
10 "=&r" ((USItype)(sl)) \ 10 "=&r" (sl) \
11 : "%rJ" ((USItype)(ah)), \ 11 : "%rJ" ((USItype)(ah)), \
12 "rI" ((USItype)(bh)), \ 12 "rI" ((USItype)(bh)), \
13 "%rJ" ((USItype)(al)), \ 13 "%rJ" ((USItype)(al)), \
14 "rI" ((USItype)(bl)) \ 14 "rI" ((USItype)(bl)) \
15 : "cc") 15 : "cc")
16#define sub_ddmmss(sh, sl, ah, al, bh, bl) \ 16#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
17 __asm__ ("subcc %r4,%5,%1\n\t" \ 17 __asm__ ("subcc %r4,%5,%1\n\t" \
18 "subx %r2,%3,%0\n" \ 18 "subx %r2,%3,%0\n" \
19 : "=r" ((USItype)(sh)), \ 19 : "=r" (sh), \
20 "=&r" ((USItype)(sl)) \ 20 "=&r" (sl) \
21 : "rJ" ((USItype)(ah)), \ 21 : "rJ" ((USItype)(ah)), \
22 "rI" ((USItype)(bh)), \ 22 "rI" ((USItype)(bh)), \
23 "rJ" ((USItype)(al)), \ 23 "rJ" ((USItype)(al)), \
@@ -65,8 +65,8 @@
65 "mulscc %%g1,0,%%g1\n\t" \ 65 "mulscc %%g1,0,%%g1\n\t" \
66 "add %%g1,%%g2,%0\n\t" \ 66 "add %%g1,%%g2,%0\n\t" \
67 "rd %%y,%1\n" \ 67 "rd %%y,%1\n" \
68 : "=r" ((USItype)(w1)), \ 68 : "=r" (w1), \
69 "=r" ((USItype)(w0)) \ 69 "=r" (w0) \
70 : "%rI" ((USItype)(u)), \ 70 : "%rI" ((USItype)(u)), \
71 "r" ((USItype)(v)) \ 71 "r" ((USItype)(v)) \
72 : "%g1", "%g2", "cc") 72 : "%g1", "%g2", "cc")
@@ -98,8 +98,8 @@
98 "sub %1,%2,%1\n\t" \ 98 "sub %1,%2,%1\n\t" \
99 "3: xnor %0,0,%0\n\t" \ 99 "3: xnor %0,0,%0\n\t" \
100 "! End of inline udiv_qrnnd\n" \ 100 "! End of inline udiv_qrnnd\n" \
101 : "=&r" ((USItype)(q)), \ 101 : "=&r" (q), \
102 "=&r" ((USItype)(r)) \ 102 "=&r" (r) \
103 : "r" ((USItype)(d)), \ 103 : "r" ((USItype)(d)), \
104 "1" ((USItype)(n1)), \ 104 "1" ((USItype)(n1)), \
105 "0" ((USItype)(n0)) : "%g1", "cc") 105 "0" ((USItype)(n0)) : "%g1", "cc")
diff --git a/arch/sparc/math-emu/sfp-util_64.h b/arch/sparc/math-emu/sfp-util_64.h
index 425d3cf01af4..51320a861cc2 100644
--- a/arch/sparc/math-emu/sfp-util_64.h
+++ b/arch/sparc/math-emu/sfp-util_64.h
@@ -17,8 +17,8 @@
17 "bcs,a,pn %%xcc, 1f\n\t" \ 17 "bcs,a,pn %%xcc, 1f\n\t" \
18 "add %0, 1, %0\n" \ 18 "add %0, 1, %0\n" \
19 "1:" \ 19 "1:" \
20 : "=r" ((UDItype)(sh)), \ 20 : "=r" (sh), \
21 "=&r" ((UDItype)(sl)) \ 21 "=&r" (sl) \
22 : "r" ((UDItype)(ah)), \ 22 : "r" ((UDItype)(ah)), \
23 "r" ((UDItype)(bh)), \ 23 "r" ((UDItype)(bh)), \
24 "r" ((UDItype)(al)), \ 24 "r" ((UDItype)(al)), \
@@ -31,8 +31,8 @@
31 "bcs,a,pn %%xcc, 1f\n\t" \ 31 "bcs,a,pn %%xcc, 1f\n\t" \
32 "sub %0, 1, %0\n" \ 32 "sub %0, 1, %0\n" \
33 "1:" \ 33 "1:" \
34 : "=r" ((UDItype)(sh)), \ 34 : "=r" (sh), \
35 "=&r" ((UDItype)(sl)) \ 35 "=&r" (sl) \
36 : "r" ((UDItype)(ah)), \ 36 : "r" ((UDItype)(ah)), \
37 "r" ((UDItype)(bh)), \ 37 "r" ((UDItype)(bh)), \
38 "r" ((UDItype)(al)), \ 38 "r" ((UDItype)(al)), \
@@ -64,8 +64,8 @@
64 "sllx %3,32,%3\n\t" \ 64 "sllx %3,32,%3\n\t" \
65 "add %1,%3,%1\n\t" \ 65 "add %1,%3,%1\n\t" \
66 "add %5,%2,%0" \ 66 "add %5,%2,%0" \
67 : "=r" ((UDItype)(wh)), \ 67 : "=r" (wh), \
68 "=&r" ((UDItype)(wl)), \ 68 "=&r" (wl), \
69 "=&r" (tmp1), "=&r" (tmp2), "=&r" (tmp3), "=&r" (tmp4) \ 69 "=&r" (tmp1), "=&r" (tmp2), "=&r" (tmp3), "=&r" (tmp4) \
70 : "r" ((UDItype)(u)), \ 70 : "r" ((UDItype)(u)), \
71 "r" ((UDItype)(v)) \ 71 "r" ((UDItype)(v)) \
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
index 59dbd4645725..908e8c17c902 100644
--- a/arch/sparc/mm/fault_32.c
+++ b/arch/sparc/mm/fault_32.c
@@ -26,14 +26,14 @@
26#include <asm/pgtable.h> 26#include <asm/pgtable.h>
27#include <asm/openprom.h> 27#include <asm/openprom.h>
28#include <asm/oplib.h> 28#include <asm/oplib.h>
29#include <asm/setup.h>
29#include <asm/smp.h> 30#include <asm/smp.h>
30#include <asm/traps.h> 31#include <asm/traps.h>
31#include <asm/uaccess.h> 32#include <asm/uaccess.h>
32 33
33int show_unhandled_signals = 1; 34#include "mm_32.h"
34 35
35static void unhandled_fault(unsigned long, struct task_struct *, 36int show_unhandled_signals = 1;
36 struct pt_regs *) __attribute__ ((noreturn));
37 37
38static void __noreturn unhandled_fault(unsigned long address, 38static void __noreturn unhandled_fault(unsigned long address,
39 struct task_struct *tsk, 39 struct task_struct *tsk,
@@ -141,9 +141,6 @@ static void __do_fault_siginfo(int code, int sig, struct pt_regs *regs,
141 force_sig_info (sig, &info, current); 141 force_sig_info (sig, &info, current);
142} 142}
143 143
144extern unsigned long safe_compute_effective_address(struct pt_regs *,
145 unsigned int);
146
147static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault) 144static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
148{ 145{
149 unsigned int insn; 146 unsigned int insn;
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index 4ced3fc66130..587cd0565128 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -32,6 +32,7 @@
32#include <asm/lsu.h> 32#include <asm/lsu.h>
33#include <asm/sections.h> 33#include <asm/sections.h>
34#include <asm/mmu_context.h> 34#include <asm/mmu_context.h>
35#include <asm/setup.h>
35 36
36int show_unhandled_signals = 1; 37int show_unhandled_signals = 1;
37 38
@@ -196,9 +197,6 @@ static void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
196 force_sig_info(sig, &info, current); 197 force_sig_info(sig, &info, current);
197} 198}
198 199
199extern int handle_ldf_stq(u32, struct pt_regs *);
200extern int handle_ld_nf(u32, struct pt_regs *);
201
202static unsigned int get_fault_insn(struct pt_regs *regs, unsigned int insn) 200static unsigned int get_fault_insn(struct pt_regs *regs, unsigned int insn)
203{ 201{
204 if (!insn) { 202 if (!insn) {
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
index db6987082805..eb8287155279 100644
--- a/arch/sparc/mm/init_32.c
+++ b/arch/sparc/mm/init_32.c
@@ -31,10 +31,13 @@
31#include <asm/pgtable.h> 31#include <asm/pgtable.h>
32#include <asm/vaddrs.h> 32#include <asm/vaddrs.h>
33#include <asm/pgalloc.h> /* bug in asm-generic/tlb.h: check_pgt_cache */ 33#include <asm/pgalloc.h> /* bug in asm-generic/tlb.h: check_pgt_cache */
34#include <asm/setup.h>
34#include <asm/tlb.h> 35#include <asm/tlb.h>
35#include <asm/prom.h> 36#include <asm/prom.h>
36#include <asm/leon.h> 37#include <asm/leon.h>
37 38
39#include "mm_32.h"
40
38unsigned long *sparc_valid_addr_bitmap; 41unsigned long *sparc_valid_addr_bitmap;
39EXPORT_SYMBOL(sparc_valid_addr_bitmap); 42EXPORT_SYMBOL(sparc_valid_addr_bitmap);
40 43
@@ -63,7 +66,6 @@ void show_mem(unsigned int filter)
63} 66}
64 67
65 68
66extern unsigned long cmdline_memory_size;
67unsigned long last_valid_pfn; 69unsigned long last_valid_pfn;
68 70
69unsigned long calc_highpages(void) 71unsigned long calc_highpages(void)
@@ -246,9 +248,6 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
246 * init routine based upon the Sun model type on the Sparc. 248 * init routine based upon the Sun model type on the Sparc.
247 * 249 *
248 */ 250 */
249extern void srmmu_paging_init(void);
250extern void device_scan(void);
251
252void __init paging_init(void) 251void __init paging_init(void)
253{ 252{
254 srmmu_paging_init(); 253 srmmu_paging_init();
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index ed3c969a5f4c..16b58ff11e65 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -47,6 +47,7 @@
47#include <asm/prom.h> 47#include <asm/prom.h>
48#include <asm/mdesc.h> 48#include <asm/mdesc.h>
49#include <asm/cpudata.h> 49#include <asm/cpudata.h>
50#include <asm/setup.h>
50#include <asm/irq.h> 51#include <asm/irq.h>
51 52
52#include "init_64.h" 53#include "init_64.h"
@@ -794,11 +795,11 @@ struct node_mem_mask {
794static struct node_mem_mask node_masks[MAX_NUMNODES]; 795static struct node_mem_mask node_masks[MAX_NUMNODES];
795static int num_node_masks; 796static int num_node_masks;
796 797
798#ifdef CONFIG_NEED_MULTIPLE_NODES
799
797int numa_cpu_lookup_table[NR_CPUS]; 800int numa_cpu_lookup_table[NR_CPUS];
798cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES]; 801cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES];
799 802
800#ifdef CONFIG_NEED_MULTIPLE_NODES
801
802struct mdesc_mblock { 803struct mdesc_mblock {
803 u64 base; 804 u64 base;
804 u64 size; 805 u64 size;
@@ -887,17 +888,21 @@ static void __init allocate_node_data(int nid)
887 888
888static void init_node_masks_nonnuma(void) 889static void init_node_masks_nonnuma(void)
889{ 890{
891#ifdef CONFIG_NEED_MULTIPLE_NODES
890 int i; 892 int i;
893#endif
891 894
892 numadbg("Initializing tables for non-numa.\n"); 895 numadbg("Initializing tables for non-numa.\n");
893 896
894 node_masks[0].mask = node_masks[0].val = 0; 897 node_masks[0].mask = node_masks[0].val = 0;
895 num_node_masks = 1; 898 num_node_masks = 1;
896 899
900#ifdef CONFIG_NEED_MULTIPLE_NODES
897 for (i = 0; i < NR_CPUS; i++) 901 for (i = 0; i < NR_CPUS; i++)
898 numa_cpu_lookup_table[i] = 0; 902 numa_cpu_lookup_table[i] = 0;
899 903
900 cpumask_setall(&numa_cpumask_lookup_table[0]); 904 cpumask_setall(&numa_cpumask_lookup_table[0]);
905#endif
901} 906}
902 907
903#ifdef CONFIG_NEED_MULTIPLE_NODES 908#ifdef CONFIG_NEED_MULTIPLE_NODES
diff --git a/arch/sparc/mm/init_64.h b/arch/sparc/mm/init_64.h
index 5d3782deb403..0668b364f44d 100644
--- a/arch/sparc/mm/init_64.h
+++ b/arch/sparc/mm/init_64.h
@@ -21,7 +21,7 @@ extern unsigned int sparc64_highest_unlocked_tlb_ent;
21extern unsigned long sparc64_kern_pri_context; 21extern unsigned long sparc64_kern_pri_context;
22extern unsigned long sparc64_kern_pri_nuc_bits; 22extern unsigned long sparc64_kern_pri_nuc_bits;
23extern unsigned long sparc64_kern_sec_context; 23extern unsigned long sparc64_kern_sec_context;
24extern void mmu_info(struct seq_file *m); 24void mmu_info(struct seq_file *m);
25 25
26struct linux_prom_translation { 26struct linux_prom_translation {
27 unsigned long virt; 27 unsigned long virt;
@@ -36,7 +36,7 @@ extern unsigned int prom_trans_ents;
36/* Exported for SMP bootup purposes. */ 36/* Exported for SMP bootup purposes. */
37extern unsigned long kern_locked_tte_data; 37extern unsigned long kern_locked_tte_data;
38 38
39extern void prom_world(int enter); 39void prom_world(int enter);
40 40
41#ifdef CONFIG_SPARSEMEM_VMEMMAP 41#ifdef CONFIG_SPARSEMEM_VMEMMAP
42#define VMEMMAP_CHUNK_SHIFT 22 42#define VMEMMAP_CHUNK_SHIFT 22
diff --git a/arch/sparc/mm/io-unit.c b/arch/sparc/mm/io-unit.c
index eb99862e9654..f311bf219016 100644
--- a/arch/sparc/mm/io-unit.c
+++ b/arch/sparc/mm/io-unit.c
@@ -25,6 +25,8 @@
25#include <asm/dma.h> 25#include <asm/dma.h>
26#include <asm/oplib.h> 26#include <asm/oplib.h>
27 27
28#include "mm_32.h"
29
28/* #define IOUNIT_DEBUG */ 30/* #define IOUNIT_DEBUG */
29#ifdef IOUNIT_DEBUG 31#ifdef IOUNIT_DEBUG
30#define IOD(x) printk(x) 32#define IOD(x) printk(x)
@@ -38,7 +40,8 @@
38static void __init iounit_iommu_init(struct platform_device *op) 40static void __init iounit_iommu_init(struct platform_device *op)
39{ 41{
40 struct iounit_struct *iounit; 42 struct iounit_struct *iounit;
41 iopte_t *xpt, *xptend; 43 iopte_t __iomem *xpt;
44 iopte_t __iomem *xptend;
42 45
43 iounit = kzalloc(sizeof(struct iounit_struct), GFP_ATOMIC); 46 iounit = kzalloc(sizeof(struct iounit_struct), GFP_ATOMIC);
44 if (!iounit) { 47 if (!iounit) {
@@ -62,10 +65,10 @@ static void __init iounit_iommu_init(struct platform_device *op)
62 op->dev.archdata.iommu = iounit; 65 op->dev.archdata.iommu = iounit;
63 iounit->page_table = xpt; 66 iounit->page_table = xpt;
64 spin_lock_init(&iounit->lock); 67 spin_lock_init(&iounit->lock);
65 68
66 for (xptend = iounit->page_table + (16 * PAGE_SIZE) / sizeof(iopte_t); 69 xptend = iounit->page_table + (16 * PAGE_SIZE) / sizeof(iopte_t);
67 xpt < xptend;) 70 for (; xpt < xptend; xpt++)
68 iopte_val(*xpt++) = 0; 71 sbus_writel(0, xpt);
69} 72}
70 73
71static int __init iounit_init(void) 74static int __init iounit_init(void)
@@ -130,7 +133,7 @@ nexti: scan = find_next_zero_bit(iounit->bmap, limit, scan);
130 vaddr = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT) + (vaddr & ~PAGE_MASK); 133 vaddr = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT) + (vaddr & ~PAGE_MASK);
131 for (k = 0; k < npages; k++, iopte = __iopte(iopte_val(iopte) + 0x100), scan++) { 134 for (k = 0; k < npages; k++, iopte = __iopte(iopte_val(iopte) + 0x100), scan++) {
132 set_bit(scan, iounit->bmap); 135 set_bit(scan, iounit->bmap);
133 iounit->page_table[scan] = iopte; 136 sbus_writel(iopte, &iounit->page_table[scan]);
134 } 137 }
135 IOD(("%08lx\n", vaddr)); 138 IOD(("%08lx\n", vaddr));
136 return vaddr; 139 return vaddr;
@@ -202,7 +205,7 @@ static int iounit_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned lon
202 struct iounit_struct *iounit = dev->archdata.iommu; 205 struct iounit_struct *iounit = dev->archdata.iommu;
203 unsigned long page, end; 206 unsigned long page, end;
204 pgprot_t dvma_prot; 207 pgprot_t dvma_prot;
205 iopte_t *iopte; 208 iopte_t __iomem *iopte;
206 209
207 *pba = addr; 210 *pba = addr;
208 211
@@ -224,8 +227,8 @@ static int iounit_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned lon
224 227
225 i = ((addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT); 228 i = ((addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT);
226 229
227 iopte = (iopte_t *)(iounit->page_table + i); 230 iopte = iounit->page_table + i;
228 *iopte = MKIOPTE(__pa(page)); 231 sbus_writel(MKIOPTE(__pa(page)), iopte);
229 } 232 }
230 addr += PAGE_SIZE; 233 addr += PAGE_SIZE;
231 va += PAGE_SIZE; 234 va += PAGE_SIZE;
diff --git a/arch/sparc/mm/iommu.c b/arch/sparc/mm/iommu.c
index 28f96f27c768..491511d37e37 100644
--- a/arch/sparc/mm/iommu.c
+++ b/arch/sparc/mm/iommu.c
@@ -27,6 +27,8 @@
27#include <asm/iommu.h> 27#include <asm/iommu.h>
28#include <asm/dma.h> 28#include <asm/dma.h>
29 29
30#include "mm_32.h"
31
30/* 32/*
31 * This can be sized dynamically, but we will do this 33 * This can be sized dynamically, but we will do this
32 * only when we have a guidance about actual I/O pressures. 34 * only when we have a guidance about actual I/O pressures.
@@ -37,9 +39,6 @@
37#define IOMMU_NPTES (IOMMU_WINSIZE/PAGE_SIZE) /* 64K PTEs, 256KB */ 39#define IOMMU_NPTES (IOMMU_WINSIZE/PAGE_SIZE) /* 64K PTEs, 256KB */
38#define IOMMU_ORDER 6 /* 4096 * (1<<6) */ 40#define IOMMU_ORDER 6 /* 4096 * (1<<6) */
39 41
40/* srmmu.c */
41extern int viking_mxcc_present;
42extern int flush_page_for_dma_global;
43static int viking_flush; 42static int viking_flush;
44/* viking.S */ 43/* viking.S */
45extern void viking_flush_page(unsigned long page); 44extern void viking_flush_page(unsigned long page);
@@ -59,6 +58,8 @@ static void __init sbus_iommu_init(struct platform_device *op)
59 struct iommu_struct *iommu; 58 struct iommu_struct *iommu;
60 unsigned int impl, vers; 59 unsigned int impl, vers;
61 unsigned long *bitmap; 60 unsigned long *bitmap;
61 unsigned long control;
62 unsigned long base;
62 unsigned long tmp; 63 unsigned long tmp;
63 64
64 iommu = kmalloc(sizeof(struct iommu_struct), GFP_KERNEL); 65 iommu = kmalloc(sizeof(struct iommu_struct), GFP_KERNEL);
@@ -73,12 +74,14 @@ static void __init sbus_iommu_init(struct platform_device *op)
73 prom_printf("Cannot map IOMMU registers\n"); 74 prom_printf("Cannot map IOMMU registers\n");
74 prom_halt(); 75 prom_halt();
75 } 76 }
76 impl = (iommu->regs->control & IOMMU_CTRL_IMPL) >> 28; 77
77 vers = (iommu->regs->control & IOMMU_CTRL_VERS) >> 24; 78 control = sbus_readl(&iommu->regs->control);
78 tmp = iommu->regs->control; 79 impl = (control & IOMMU_CTRL_IMPL) >> 28;
79 tmp &= ~(IOMMU_CTRL_RNGE); 80 vers = (control & IOMMU_CTRL_VERS) >> 24;
80 tmp |= (IOMMU_RNGE_256MB | IOMMU_CTRL_ENAB); 81 control &= ~(IOMMU_CTRL_RNGE);
81 iommu->regs->control = tmp; 82 control |= (IOMMU_RNGE_256MB | IOMMU_CTRL_ENAB);
83 sbus_writel(control, &iommu->regs->control);
84
82 iommu_invalidate(iommu->regs); 85 iommu_invalidate(iommu->regs);
83 iommu->start = IOMMU_START; 86 iommu->start = IOMMU_START;
84 iommu->end = 0xffffffff; 87 iommu->end = 0xffffffff;
@@ -100,7 +103,9 @@ static void __init sbus_iommu_init(struct platform_device *op)
100 memset(iommu->page_table, 0, IOMMU_NPTES*sizeof(iopte_t)); 103 memset(iommu->page_table, 0, IOMMU_NPTES*sizeof(iopte_t));
101 flush_cache_all(); 104 flush_cache_all();
102 flush_tlb_all(); 105 flush_tlb_all();
103 iommu->regs->base = __pa((unsigned long) iommu->page_table) >> 4; 106
107 base = __pa((unsigned long)iommu->page_table) >> 4;
108 sbus_writel(base, &iommu->regs->base);
104 iommu_invalidate(iommu->regs); 109 iommu_invalidate(iommu->regs);
105 110
106 bitmap = kmalloc(IOMMU_NPTES>>3, GFP_KERNEL); 111 bitmap = kmalloc(IOMMU_NPTES>>3, GFP_KERNEL);
diff --git a/arch/sparc/mm/leon_mm.c b/arch/sparc/mm/leon_mm.c
index 5bed085a2c17..3b17b6f7895a 100644
--- a/arch/sparc/mm/leon_mm.c
+++ b/arch/sparc/mm/leon_mm.c
@@ -15,10 +15,10 @@
15#include <asm/leon.h> 15#include <asm/leon.h>
16#include <asm/tlbflush.h> 16#include <asm/tlbflush.h>
17 17
18#include "srmmu.h" 18#include "mm_32.h"
19 19
20int leon_flush_during_switch = 1; 20int leon_flush_during_switch = 1;
21int srmmu_swprobe_trace; 21static int srmmu_swprobe_trace;
22 22
23static inline unsigned long leon_get_ctable_ptr(void) 23static inline unsigned long leon_get_ctable_ptr(void)
24{ 24{
diff --git a/arch/sparc/mm/mm_32.h b/arch/sparc/mm/mm_32.h
new file mode 100644
index 000000000000..a6c27ca9a721
--- /dev/null
+++ b/arch/sparc/mm/mm_32.h
@@ -0,0 +1,24 @@
1/* fault_32.c - visible as they are called from assembler */
2asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc,
3 unsigned long address);
4asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
5 unsigned long address);
6
7void window_overflow_fault(void);
8void window_underflow_fault(unsigned long sp);
9void window_ret_fault(struct pt_regs *regs);
10
11/* srmmu.c */
12extern char *srmmu_name;
13extern int viking_mxcc_present;
14extern int flush_page_for_dma_global;
15
16extern void (*poke_srmmu)(void);
17
18void __init srmmu_paging_init(void);
19
20/* iommu.c */
21void ld_mmu_iommu(void);
22
23/* io-unit.c */
24void ld_mmu_iounit(void);
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index cfbe53c17b0d..be65f035d18a 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -49,7 +49,7 @@
49#include <asm/mxcc.h> 49#include <asm/mxcc.h>
50#include <asm/ross.h> 50#include <asm/ross.h>
51 51
52#include "srmmu.h" 52#include "mm_32.h"
53 53
54enum mbus_module srmmu_modtype; 54enum mbus_module srmmu_modtype;
55static unsigned int hwbug_bitmask; 55static unsigned int hwbug_bitmask;
@@ -100,7 +100,6 @@ static unsigned long srmmu_nocache_end;
100#define SRMMU_NOCACHE_ALIGN_MAX (sizeof(ctxd_t)*SRMMU_MAX_CONTEXTS) 100#define SRMMU_NOCACHE_ALIGN_MAX (sizeof(ctxd_t)*SRMMU_MAX_CONTEXTS)
101 101
102void *srmmu_nocache_pool; 102void *srmmu_nocache_pool;
103void *srmmu_nocache_bitmap;
104static struct bit_map srmmu_nocache_map; 103static struct bit_map srmmu_nocache_map;
105 104
106static inline int srmmu_pmd_none(pmd_t pmd) 105static inline int srmmu_pmd_none(pmd_t pmd)
@@ -173,7 +172,7 @@ static void *__srmmu_get_nocache(int size, int align)
173 printk(KERN_ERR "srmmu: out of nocache %d: %d/%d\n", 172 printk(KERN_ERR "srmmu: out of nocache %d: %d/%d\n",
174 size, (int) srmmu_nocache_size, 173 size, (int) srmmu_nocache_size,
175 srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT); 174 srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT);
176 return 0; 175 return NULL;
177 } 176 }
178 177
179 addr = SRMMU_NOCACHE_VADDR + (offset << SRMMU_NOCACHE_BITMAP_SHIFT); 178 addr = SRMMU_NOCACHE_VADDR + (offset << SRMMU_NOCACHE_BITMAP_SHIFT);
@@ -269,6 +268,7 @@ static void __init srmmu_nocache_calcsize(void)
269 268
270static void __init srmmu_nocache_init(void) 269static void __init srmmu_nocache_init(void)
271{ 270{
271 void *srmmu_nocache_bitmap;
272 unsigned int bitmap_bits; 272 unsigned int bitmap_bits;
273 pgd_t *pgd; 273 pgd_t *pgd;
274 pmd_t *pmd; 274 pmd_t *pmd;
@@ -728,7 +728,7 @@ static inline unsigned long srmmu_probe(unsigned long vaddr)
728 "=r" (retval) : 728 "=r" (retval) :
729 "r" (vaddr | 0x400), "i" (ASI_M_FLUSH_PROBE)); 729 "r" (vaddr | 0x400), "i" (ASI_M_FLUSH_PROBE));
730 } else { 730 } else {
731 retval = leon_swprobe(vaddr, 0); 731 retval = leon_swprobe(vaddr, NULL);
732 } 732 }
733 return retval; 733 return retval;
734} 734}
@@ -865,8 +865,6 @@ static void __init map_kernel(void)
865 865
866void (*poke_srmmu)(void) = NULL; 866void (*poke_srmmu)(void) = NULL;
867 867
868extern unsigned long bootmem_init(unsigned long *pages_avail);
869
870void __init srmmu_paging_init(void) 868void __init srmmu_paging_init(void)
871{ 869{
872 int i; 870 int i;
@@ -1771,9 +1769,6 @@ static struct sparc32_cachetlb_ops smp_cachetlb_ops = {
1771/* Load up routines and constants for sun4m and sun4d mmu */ 1769/* Load up routines and constants for sun4m and sun4d mmu */
1772void __init load_mmu(void) 1770void __init load_mmu(void)
1773{ 1771{
1774 extern void ld_mmu_iommu(void);
1775 extern void ld_mmu_iounit(void);
1776
1777 /* Functions */ 1772 /* Functions */
1778 get_srmmu_type(); 1773 get_srmmu_type();
1779 1774
diff --git a/arch/sparc/mm/srmmu.h b/arch/sparc/mm/srmmu.h
deleted file mode 100644
index 5703274ccf89..000000000000
--- a/arch/sparc/mm/srmmu.h
+++ /dev/null
@@ -1,4 +0,0 @@
1/* srmmu.c */
2extern char *srmmu_name;
3
4extern void (*poke_srmmu)(void);
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
index fe19b81acc09..a06576683c38 100644
--- a/arch/sparc/mm/tsb.c
+++ b/arch/sparc/mm/tsb.c
@@ -9,6 +9,7 @@
9#include <asm/page.h> 9#include <asm/page.h>
10#include <asm/pgtable.h> 10#include <asm/pgtable.h>
11#include <asm/mmu_context.h> 11#include <asm/mmu_context.h>
12#include <asm/setup.h>
12#include <asm/tsb.h> 13#include <asm/tsb.h>
13#include <asm/tlb.h> 14#include <asm/tlb.h>
14#include <asm/oplib.h> 15#include <asm/oplib.h>
diff --git a/arch/sparc/prom/misc_64.c b/arch/sparc/prom/misc_64.c
index f178b9dcc7b7..53a696d3eb3b 100644
--- a/arch/sparc/prom/misc_64.c
+++ b/arch/sparc/prom/misc_64.c
@@ -81,11 +81,6 @@ void prom_feval(const char *fstring)
81} 81}
82EXPORT_SYMBOL(prom_feval); 82EXPORT_SYMBOL(prom_feval);
83 83
84#ifdef CONFIG_SMP
85extern void smp_capture(void);
86extern void smp_release(void);
87#endif
88
89/* Drop into the prom, with the chance to continue with the 'go' 84/* Drop into the prom, with the chance to continue with the 'go'
90 * prom command. 85 * prom command.
91 */ 86 */
diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig
index aafad6fa1667..928237a7b9ca 100644
--- a/arch/unicore32/Kconfig
+++ b/arch/unicore32/Kconfig
@@ -51,9 +51,6 @@ config ARCH_HAS_ILOG2_U32
51config ARCH_HAS_ILOG2_U64 51config ARCH_HAS_ILOG2_U64
52 bool 52 bool
53 53
54config ARCH_HAS_CPUFREQ
55 bool
56
57config GENERIC_HWEIGHT 54config GENERIC_HWEIGHT
58 def_bool y 55 def_bool y
59 56
@@ -87,7 +84,6 @@ config ARCH_PUV3
87 select GENERIC_CLOCKEVENTS 84 select GENERIC_CLOCKEVENTS
88 select HAVE_CLK 85 select HAVE_CLK
89 select ARCH_REQUIRE_GPIOLIB 86 select ARCH_REQUIRE_GPIOLIB
90 select ARCH_HAS_CPUFREQ
91 87
92# CONFIGs for ARCH_PUV3 88# CONFIGs for ARCH_PUV3
93 89
@@ -198,9 +194,7 @@ menu "Power management options"
198 194
199source "kernel/power/Kconfig" 195source "kernel/power/Kconfig"
200 196
201if ARCH_HAS_CPUFREQ
202source "drivers/cpufreq/Kconfig" 197source "drivers/cpufreq/Kconfig"
203endif
204 198
205config ARCH_SUSPEND_POSSIBLE 199config ARCH_SUSPEND_POSSIBLE
206 def_bool y if !ARCH_FPGA 200 def_bool y if !ARCH_FPGA
diff --git a/arch/unicore32/include/asm/io.h b/arch/unicore32/include/asm/io.h
index 39decb6e6f57..cb1d8fd2b16b 100644
--- a/arch/unicore32/include/asm/io.h
+++ b/arch/unicore32/include/asm/io.h
@@ -39,10 +39,37 @@ extern void __uc32_iounmap(volatile void __iomem *addr);
39#define ioremap_nocache(cookie, size) __uc32_ioremap(cookie, size) 39#define ioremap_nocache(cookie, size) __uc32_ioremap(cookie, size)
40#define iounmap(cookie) __uc32_iounmap(cookie) 40#define iounmap(cookie) __uc32_iounmap(cookie)
41 41
42#define readb_relaxed readb
43#define readw_relaxed readw
44#define readl_relaxed readl
45
42#define HAVE_ARCH_PIO_SIZE 46#define HAVE_ARCH_PIO_SIZE
43#define PIO_OFFSET (unsigned int)(PCI_IOBASE) 47#define PIO_OFFSET (unsigned int)(PCI_IOBASE)
44#define PIO_MASK (unsigned int)(IO_SPACE_LIMIT) 48#define PIO_MASK (unsigned int)(IO_SPACE_LIMIT)
45#define PIO_RESERVED (PIO_OFFSET + PIO_MASK + 1) 49#define PIO_RESERVED (PIO_OFFSET + PIO_MASK + 1)
46 50
51#ifdef CONFIG_STRICT_DEVMEM
52
53#include <linux/ioport.h>
54#include <linux/mm.h>
55
56/*
57 * devmem_is_allowed() checks to see if /dev/mem access to a certain
58 * address is valid. The argument is a physical page number.
59 * We mimic x86 here by disallowing access to system RAM as well as
60 * device-exclusive MMIO regions. This effectively disable read()/write()
61 * on /dev/mem.
62 */
63static inline int devmem_is_allowed(unsigned long pfn)
64{
65 if (iomem_is_exclusive(pfn << PAGE_SHIFT))
66 return 0;
67 if (!page_is_ram(pfn))
68 return 1;
69 return 0;
70}
71
72#endif /* CONFIG_STRICT_DEVMEM */
73
47#endif /* __KERNEL__ */ 74#endif /* __KERNEL__ */
48#endif /* __UNICORE_IO_H__ */ 75#endif /* __UNICORE_IO_H__ */
diff --git a/arch/unicore32/include/asm/pgtable.h b/arch/unicore32/include/asm/pgtable.h
index 233c25880df4..ed6f7d000fba 100644
--- a/arch/unicore32/include/asm/pgtable.h
+++ b/arch/unicore32/include/asm/pgtable.h
@@ -87,16 +87,16 @@ extern pgprot_t pgprot_kernel;
87 87
88#define PAGE_NONE pgprot_user 88#define PAGE_NONE pgprot_user
89#define PAGE_SHARED __pgprot(pgprot_val(pgprot_user | PTE_READ \ 89#define PAGE_SHARED __pgprot(pgprot_val(pgprot_user | PTE_READ \
90 | PTE_WRITE) 90 | PTE_WRITE))
91#define PAGE_SHARED_EXEC __pgprot(pgprot_val(pgprot_user | PTE_READ \ 91#define PAGE_SHARED_EXEC __pgprot(pgprot_val(pgprot_user | PTE_READ \
92 | PTE_WRITE \ 92 | PTE_WRITE \
93 | PTE_EXEC) 93 | PTE_EXEC))
94#define PAGE_COPY __pgprot(pgprot_val(pgprot_user | PTE_READ) 94#define PAGE_COPY __pgprot(pgprot_val(pgprot_user | PTE_READ)
95#define PAGE_COPY_EXEC __pgprot(pgprot_val(pgprot_user | PTE_READ \ 95#define PAGE_COPY_EXEC __pgprot(pgprot_val(pgprot_user | PTE_READ \
96 | PTE_EXEC) 96 | PTE_EXEC))
97#define PAGE_READONLY __pgprot(pgprot_val(pgprot_user | PTE_READ) 97#define PAGE_READONLY __pgprot(pgprot_val(pgprot_user | PTE_READ))
98#define PAGE_READONLY_EXEC __pgprot(pgprot_val(pgprot_user | PTE_READ \ 98#define PAGE_READONLY_EXEC __pgprot(pgprot_val(pgprot_user | PTE_READ \
99 | PTE_EXEC) 99 | PTE_EXEC))
100#define PAGE_KERNEL pgprot_kernel 100#define PAGE_KERNEL pgprot_kernel
101#define PAGE_KERNEL_EXEC __pgprot(pgprot_val(pgprot_kernel | PTE_EXEC)) 101#define PAGE_KERNEL_EXEC __pgprot(pgprot_val(pgprot_kernel | PTE_EXEC))
102 102
diff --git a/arch/unicore32/include/asm/ptrace.h b/arch/unicore32/include/asm/ptrace.h
index 9df53d991c78..02bf5a415bf5 100644
--- a/arch/unicore32/include/asm/ptrace.h
+++ b/arch/unicore32/include/asm/ptrace.h
@@ -55,6 +55,7 @@ static inline int valid_user_regs(struct pt_regs *regs)
55 55
56#define instruction_pointer(regs) ((regs)->UCreg_pc) 56#define instruction_pointer(regs) ((regs)->UCreg_pc)
57#define user_stack_pointer(regs) ((regs)->UCreg_sp) 57#define user_stack_pointer(regs) ((regs)->UCreg_sp)
58#define profile_pc(regs) instruction_pointer(regs)
58 59
59#endif /* __ASSEMBLY__ */ 60#endif /* __ASSEMBLY__ */
60#endif 61#endif
diff --git a/arch/unicore32/kernel/clock.c b/arch/unicore32/kernel/clock.c
index 18d4563e6fa5..b1ca775f6f6e 100644
--- a/arch/unicore32/kernel/clock.c
+++ b/arch/unicore32/kernel/clock.c
@@ -179,7 +179,7 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
179 } 179 }
180#ifdef CONFIG_CPU_FREQ 180#ifdef CONFIG_CPU_FREQ
181 if (clk == &clk_mclk_clk) { 181 if (clk == &clk_mclk_clk) {
182 u32 pll_rate, divstatus = PM_DIVSTATUS; 182 u32 pll_rate, divstatus = readl(PM_DIVSTATUS);
183 int ret, i; 183 int ret, i;
184 184
185 /* lookup mclk_clk_table */ 185 /* lookup mclk_clk_table */
@@ -201,10 +201,10 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
201 / (((divstatus & 0x0000f000) >> 12) + 1); 201 / (((divstatus & 0x0000f000) >> 12) + 1);
202 202
203 /* set pll sys cfg reg. */ 203 /* set pll sys cfg reg. */
204 PM_PLLSYSCFG = pll_rate; 204 writel(pll_rate, PM_PLLSYSCFG);
205 205
206 PM_PMCR = PM_PMCR_CFBSYS; 206 writel(PM_PMCR_CFBSYS, PM_PMCR);
207 while ((PM_PLLDFCDONE & PM_PLLDFCDONE_SYSDFC) 207 while ((readl(PM_PLLDFCDONE) & PM_PLLDFCDONE_SYSDFC)
208 != PM_PLLDFCDONE_SYSDFC) 208 != PM_PLLDFCDONE_SYSDFC)
209 udelay(100); 209 udelay(100);
210 /* about 1ms */ 210 /* about 1ms */
diff --git a/arch/unicore32/kernel/ksyms.c b/arch/unicore32/kernel/ksyms.c
index d285d71cbe35..0323528a80fd 100644
--- a/arch/unicore32/kernel/ksyms.c
+++ b/arch/unicore32/kernel/ksyms.c
@@ -23,41 +23,15 @@
23 23
24#include "ksyms.h" 24#include "ksyms.h"
25 25
26EXPORT_SYMBOL(find_first_bit);
27EXPORT_SYMBOL(find_first_zero_bit);
26EXPORT_SYMBOL(find_next_zero_bit); 28EXPORT_SYMBOL(find_next_zero_bit);
27EXPORT_SYMBOL(find_next_bit); 29EXPORT_SYMBOL(find_next_bit);
28 30
29EXPORT_SYMBOL(__backtrace);
30
31 /* platform dependent support */ 31 /* platform dependent support */
32EXPORT_SYMBOL(__udelay); 32EXPORT_SYMBOL(__udelay);
33EXPORT_SYMBOL(__const_udelay); 33EXPORT_SYMBOL(__const_udelay);
34 34
35 /* networking */
36EXPORT_SYMBOL(csum_partial);
37EXPORT_SYMBOL(csum_partial_copy_from_user);
38EXPORT_SYMBOL(csum_partial_copy_nocheck);
39EXPORT_SYMBOL(__csum_ipv6_magic);
40
41 /* io */
42#ifndef __raw_readsb
43EXPORT_SYMBOL(__raw_readsb);
44#endif
45#ifndef __raw_readsw
46EXPORT_SYMBOL(__raw_readsw);
47#endif
48#ifndef __raw_readsl
49EXPORT_SYMBOL(__raw_readsl);
50#endif
51#ifndef __raw_writesb
52EXPORT_SYMBOL(__raw_writesb);
53#endif
54#ifndef __raw_writesw
55EXPORT_SYMBOL(__raw_writesw);
56#endif
57#ifndef __raw_writesl
58EXPORT_SYMBOL(__raw_writesl);
59#endif
60
61 /* string / mem functions */ 35 /* string / mem functions */
62EXPORT_SYMBOL(strchr); 36EXPORT_SYMBOL(strchr);
63EXPORT_SYMBOL(strrchr); 37EXPORT_SYMBOL(strrchr);
@@ -76,23 +50,12 @@ EXPORT_SYMBOL(__copy_from_user);
76EXPORT_SYMBOL(__copy_to_user); 50EXPORT_SYMBOL(__copy_to_user);
77EXPORT_SYMBOL(__clear_user); 51EXPORT_SYMBOL(__clear_user);
78 52
79EXPORT_SYMBOL(__get_user_1);
80EXPORT_SYMBOL(__get_user_2);
81EXPORT_SYMBOL(__get_user_4);
82
83EXPORT_SYMBOL(__put_user_1);
84EXPORT_SYMBOL(__put_user_2);
85EXPORT_SYMBOL(__put_user_4);
86EXPORT_SYMBOL(__put_user_8);
87
88EXPORT_SYMBOL(__ashldi3); 53EXPORT_SYMBOL(__ashldi3);
89EXPORT_SYMBOL(__ashrdi3); 54EXPORT_SYMBOL(__ashrdi3);
90EXPORT_SYMBOL(__divsi3); 55EXPORT_SYMBOL(__divsi3);
91EXPORT_SYMBOL(__lshrdi3); 56EXPORT_SYMBOL(__lshrdi3);
92EXPORT_SYMBOL(__modsi3); 57EXPORT_SYMBOL(__modsi3);
93EXPORT_SYMBOL(__muldi3);
94EXPORT_SYMBOL(__ucmpdi2); 58EXPORT_SYMBOL(__ucmpdi2);
95EXPORT_SYMBOL(__udivsi3); 59EXPORT_SYMBOL(__udivsi3);
96EXPORT_SYMBOL(__umodsi3); 60EXPORT_SYMBOL(__umodsi3);
97EXPORT_SYMBOL(__bswapsi2);
98 61
diff --git a/arch/unicore32/kernel/ksyms.h b/arch/unicore32/kernel/ksyms.h
index 185cdc712d03..31472ad9467a 100644
--- a/arch/unicore32/kernel/ksyms.h
+++ b/arch/unicore32/kernel/ksyms.h
@@ -8,8 +8,6 @@ extern void __ashrdi3(void);
8extern void __divsi3(void); 8extern void __divsi3(void);
9extern void __lshrdi3(void); 9extern void __lshrdi3(void);
10extern void __modsi3(void); 10extern void __modsi3(void);
11extern void __muldi3(void);
12extern void __ucmpdi2(void); 11extern void __ucmpdi2(void);
13extern void __udivsi3(void); 12extern void __udivsi3(void);
14extern void __umodsi3(void); 13extern void __umodsi3(void);
15extern void __bswapsi2(void);
diff --git a/arch/unicore32/kernel/module.c b/arch/unicore32/kernel/module.c
index 16bd1495b934..dc41f6dfedb6 100644
--- a/arch/unicore32/kernel/module.c
+++ b/arch/unicore32/kernel/module.c
@@ -24,14 +24,9 @@
24 24
25void *module_alloc(unsigned long size) 25void *module_alloc(unsigned long size)
26{ 26{
27 struct vm_struct *area; 27 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
28 28 GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE,
29 size = PAGE_ALIGN(size); 29 __builtin_return_address(0));
30 area = __get_vm_area(size, VM_ALLOC, MODULES_VADDR, MODULES_END);
31 if (!area)
32 return NULL;
33
34 return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL_EXEC);
35} 30}
36 31
37int 32int
diff --git a/arch/unicore32/kernel/process.c b/arch/unicore32/kernel/process.c
index 778ebba80827..b008e9961465 100644
--- a/arch/unicore32/kernel/process.c
+++ b/arch/unicore32/kernel/process.c
@@ -60,6 +60,7 @@ void machine_halt(void)
60 * Function pointers to optional machine specific functions 60 * Function pointers to optional machine specific functions
61 */ 61 */
62void (*pm_power_off)(void) = NULL; 62void (*pm_power_off)(void) = NULL;
63EXPORT_SYMBOL(pm_power_off);
63 64
64void machine_power_off(void) 65void machine_power_off(void)
65{ 66{
diff --git a/arch/unicore32/kernel/setup.c b/arch/unicore32/kernel/setup.c
index 87adbf5ebfe0..3fa317f96122 100644
--- a/arch/unicore32/kernel/setup.c
+++ b/arch/unicore32/kernel/setup.c
@@ -53,6 +53,10 @@ struct stack {
53 53
54static struct stack stacks[NR_CPUS]; 54static struct stack stacks[NR_CPUS];
55 55
56#ifdef CONFIG_VGA_CONSOLE
57struct screen_info screen_info;
58#endif
59
56char elf_platform[ELF_PLATFORM_SIZE]; 60char elf_platform[ELF_PLATFORM_SIZE];
57EXPORT_SYMBOL(elf_platform); 61EXPORT_SYMBOL(elf_platform);
58 62
diff --git a/arch/unicore32/mm/alignment.c b/arch/unicore32/mm/alignment.c
index de7dc5fdd58b..24e836023e6c 100644
--- a/arch/unicore32/mm/alignment.c
+++ b/arch/unicore32/mm/alignment.c
@@ -21,6 +21,7 @@
21#include <linux/sched.h> 21#include <linux/sched.h>
22#include <linux/uaccess.h> 22#include <linux/uaccess.h>
23 23
24#include <asm/pgtable.h>
24#include <asm/tlbflush.h> 25#include <asm/tlbflush.h>
25#include <asm/unaligned.h> 26#include <asm/unaligned.h>
26 27
diff --git a/arch/unicore32/mm/proc-syms.c b/arch/unicore32/mm/proc-syms.c
index f30071e3665d..21c00fc85c99 100644
--- a/arch/unicore32/mm/proc-syms.c
+++ b/arch/unicore32/mm/proc-syms.c
@@ -19,5 +19,7 @@
19EXPORT_SYMBOL(cpu_dcache_clean_area); 19EXPORT_SYMBOL(cpu_dcache_clean_area);
20EXPORT_SYMBOL(cpu_set_pte); 20EXPORT_SYMBOL(cpu_set_pte);
21 21
22EXPORT_SYMBOL(__cpuc_coherent_kern_range);
23
22EXPORT_SYMBOL(__cpuc_dma_flush_range); 24EXPORT_SYMBOL(__cpuc_dma_flush_range);
23EXPORT_SYMBOL(__cpuc_dma_clean_range); 25EXPORT_SYMBOL(__cpuc_dma_clean_range);
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index fcefdda5136d..a8f749ef0fdc 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1672,7 +1672,6 @@ config RELOCATABLE
1672config RANDOMIZE_BASE 1672config RANDOMIZE_BASE
1673 bool "Randomize the address of the kernel image" 1673 bool "Randomize the address of the kernel image"
1674 depends on RELOCATABLE 1674 depends on RELOCATABLE
1675 depends on !HIBERNATION
1676 default n 1675 default n
1677 ---help--- 1676 ---help---
1678 Randomizes the physical and virtual address at which the 1677 Randomizes the physical and virtual address at which the
diff --git a/arch/x86/boot/compressed/aslr.c b/arch/x86/boot/compressed/aslr.c
index 4dbf967da50d..fc6091abedb7 100644
--- a/arch/x86/boot/compressed/aslr.c
+++ b/arch/x86/boot/compressed/aslr.c
@@ -289,10 +289,17 @@ unsigned char *choose_kernel_location(unsigned char *input,
289 unsigned long choice = (unsigned long)output; 289 unsigned long choice = (unsigned long)output;
290 unsigned long random; 290 unsigned long random;
291 291
292#ifdef CONFIG_HIBERNATION
293 if (!cmdline_find_option_bool("kaslr")) {
294 debug_putstr("KASLR disabled by default...\n");
295 goto out;
296 }
297#else
292 if (cmdline_find_option_bool("nokaslr")) { 298 if (cmdline_find_option_bool("nokaslr")) {
293 debug_putstr("KASLR disabled...\n"); 299 debug_putstr("KASLR disabled by cmdline...\n");
294 goto out; 300 goto out;
295 } 301 }
302#endif
296 303
297 /* Record the various known unsafe memory ranges. */ 304 /* Record the various known unsafe memory ranges. */
298 mem_avoid_init((unsigned long)input, input_size, 305 mem_avoid_init((unsigned long)input, input_size,
diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
index 9769df094035..3c0809a0631f 100644
--- a/arch/x86/vdso/Makefile
+++ b/arch/x86/vdso/Makefile
@@ -9,18 +9,9 @@ VDSOX32-$(CONFIG_X86_X32_ABI) := y
9VDSO32-$(CONFIG_X86_32) := y 9VDSO32-$(CONFIG_X86_32) := y
10VDSO32-$(CONFIG_COMPAT) := y 10VDSO32-$(CONFIG_COMPAT) := y
11 11
12vdso-install-$(VDSO64-y) += vdso.so
13vdso-install-$(VDSOX32-y) += vdsox32.so
14vdso-install-$(VDSO32-y) += $(vdso32-images)
15
16
17# files to link into the vdso 12# files to link into the vdso
18vobjs-y := vdso-note.o vclock_gettime.o vgetcpu.o 13vobjs-y := vdso-note.o vclock_gettime.o vgetcpu.o vdso-fakesections.o
19 14vobjs-nox32 := vdso-fakesections.o
20vobjs-$(VDSOX32-y) += $(vobjx32s-compat)
21
22# Filter out x32 objects.
23vobj64s := $(filter-out $(vobjx32s-compat),$(vobjs-y))
24 15
25# files to link into kernel 16# files to link into kernel
26obj-y += vma.o 17obj-y += vma.o
@@ -34,7 +25,7 @@ vdso_img-$(VDSO32-y) += 32-sysenter
34 25
35obj-$(VDSO32-y) += vdso32-setup.o 26obj-$(VDSO32-y) += vdso32-setup.o
36 27
37vobjs := $(foreach F,$(vobj64s),$(obj)/$F) 28vobjs := $(foreach F,$(vobjs-y),$(obj)/$F)
38 29
39$(obj)/vdso.o: $(obj)/vdso.so 30$(obj)/vdso.o: $(obj)/vdso.so
40 31
@@ -104,7 +95,13 @@ VDSO_LDFLAGS_vdsox32.lds = -Wl,-m,elf32_x86_64 \
104 -Wl,-z,max-page-size=4096 \ 95 -Wl,-z,max-page-size=4096 \
105 -Wl,-z,common-page-size=4096 96 -Wl,-z,common-page-size=4096
106 97
107vobjx32s-y := $(vobj64s:.o=-x32.o) 98# 64-bit objects to re-brand as x32
99vobjs64-for-x32 := $(filter-out $(vobjs-nox32),$(vobjs-y))
100
101# x32-rebranded versions
102vobjx32s-y := $(vobjs64-for-x32:.o=-x32.o)
103
104# same thing, but in the output directory
108vobjx32s := $(foreach F,$(vobjx32s-y),$(obj)/$F) 105vobjx32s := $(foreach F,$(vobjx32s-y),$(obj)/$F)
109 106
110# Convert 64bit object file to x32 for x32 vDSO. 107# Convert 64bit object file to x32 for x32 vDSO.
@@ -176,15 +173,20 @@ VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
176GCOV_PROFILE := n 173GCOV_PROFILE := n
177 174
178# 175#
179# Install the unstripped copy of vdso*.so listed in $(vdso-install-y). 176# Install the unstripped copies of vdso*.so.
180# 177#
181quiet_cmd_vdso_install = INSTALL $@ 178quiet_cmd_vdso_install = INSTALL $(@:install_%=%)
182 cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@ 179 cmd_vdso_install = cp $< $(MODLIB)/vdso/$(@:install_%=%)
183$(vdso-install-y): %.so: $(obj)/%.so.dbg FORCE 180
181vdso_img_insttargets := $(vdso_img_sodbg:%.dbg=install_%)
182
183$(MODLIB)/vdso: FORCE
184 @mkdir -p $(MODLIB)/vdso 184 @mkdir -p $(MODLIB)/vdso
185
186$(vdso_img_insttargets): install_%: $(obj)/%.dbg $(MODLIB)/vdso FORCE
185 $(call cmd,vdso_install) 187 $(call cmd,vdso_install)
186 188
187PHONY += vdso_install $(vdso-install-y) 189PHONY += vdso_install $(vdso_img_insttargets)
188vdso_install: $(vdso-install-y) 190vdso_install: $(vdso_img_insttargets) FORCE
189 191
190clean-files := vdso32-syscall* vdso32-sysenter* vdso32-int80* 192clean-files := vdso32-syscall* vdso32-sysenter* vdso32-int80*
diff --git a/arch/x86/vdso/vdso-fakesections.c b/arch/x86/vdso/vdso-fakesections.c
new file mode 100644
index 000000000000..cb8a8d72c24b
--- /dev/null
+++ b/arch/x86/vdso/vdso-fakesections.c
@@ -0,0 +1,32 @@
1/*
2 * Copyright 2014 Andy Lutomirski
3 * Subject to the GNU Public License, v.2
4 *
5 * Hack to keep broken Go programs working.
6 *
7 * The Go runtime had a couple of bugs: it would read the section table to try
8 * to figure out how many dynamic symbols there were (it shouldn't have looked
9 * at the section table at all) and, if there were no SHT_SYNDYM section table
10 * entry, it would use an uninitialized value for the number of symbols. As a
11 * workaround, we supply a minimal section table. vdso2c will adjust the
12 * in-memory image so that "vdso_fake_sections" becomes the section table.
13 *
14 * The bug was introduced by:
15 * https://code.google.com/p/go/source/detail?r=56ea40aac72b (2012-08-31)
16 * and is being addressed in the Go runtime in this issue:
17 * https://code.google.com/p/go/issues/detail?id=8197
18 */
19
20#ifndef __x86_64__
21#error This hack is specific to the 64-bit vDSO
22#endif
23
24#include <linux/elf.h>
25
26extern const __visible struct elf64_shdr vdso_fake_sections[];
27const __visible struct elf64_shdr vdso_fake_sections[] = {
28 {
29 .sh_type = SHT_DYNSYM,
30 .sh_entsize = sizeof(Elf64_Sym),
31 }
32};
diff --git a/arch/x86/vdso/vdso2c.c b/arch/x86/vdso/vdso2c.c
index 450ac6eaf613..7a6bf50f9165 100644
--- a/arch/x86/vdso/vdso2c.c
+++ b/arch/x86/vdso/vdso2c.c
@@ -54,7 +54,7 @@ static void fail(const char *format, ...)
54} 54}
55 55
56/* 56/*
57 * Evil macros to do a little-endian read. 57 * Evil macros for little-endian reads and writes
58 */ 58 */
59#define GLE(x, bits, ifnot) \ 59#define GLE(x, bits, ifnot) \
60 __builtin_choose_expr( \ 60 __builtin_choose_expr( \
@@ -62,11 +62,24 @@ static void fail(const char *format, ...)
62 (__typeof__(*(x)))get_unaligned_le##bits(x), ifnot) 62 (__typeof__(*(x)))get_unaligned_le##bits(x), ifnot)
63 63
64extern void bad_get_le(void); 64extern void bad_get_le(void);
65#define LAST_LE(x) \ 65#define LAST_GLE(x) \
66 __builtin_choose_expr(sizeof(*(x)) == 1, *(x), bad_get_le()) 66 __builtin_choose_expr(sizeof(*(x)) == 1, *(x), bad_get_le())
67 67
68#define GET_LE(x) \ 68#define GET_LE(x) \
69 GLE(x, 64, GLE(x, 32, GLE(x, 16, LAST_LE(x)))) 69 GLE(x, 64, GLE(x, 32, GLE(x, 16, LAST_GLE(x))))
70
71#define PLE(x, val, bits, ifnot) \
72 __builtin_choose_expr( \
73 (sizeof(*(x)) == bits/8), \
74 put_unaligned_le##bits((val), (x)), ifnot)
75
76extern void bad_put_le(void);
77#define LAST_PLE(x, val) \
78 __builtin_choose_expr(sizeof(*(x)) == 1, *(x) = (val), bad_put_le())
79
80#define PUT_LE(x, val) \
81 PLE(x, val, 64, PLE(x, val, 32, PLE(x, val, 16, LAST_PLE(x, val))))
82
70 83
71#define NSYMS (sizeof(required_syms) / sizeof(required_syms[0])) 84#define NSYMS (sizeof(required_syms) / sizeof(required_syms[0]))
72 85
diff --git a/arch/x86/vdso/vdso2c.h b/arch/x86/vdso/vdso2c.h
index 8a074637a576..c6eefaf389b9 100644
--- a/arch/x86/vdso/vdso2c.h
+++ b/arch/x86/vdso/vdso2c.h
@@ -18,6 +18,8 @@ static void GOFUNC(void *addr, size_t len, FILE *outfile, const char *name)
18 const char *secstrings; 18 const char *secstrings;
19 uint64_t syms[NSYMS] = {}; 19 uint64_t syms[NSYMS] = {};
20 20
21 uint64_t fake_sections_value = 0, fake_sections_size = 0;
22
21 Elf_Phdr *pt = (Elf_Phdr *)(addr + GET_LE(&hdr->e_phoff)); 23 Elf_Phdr *pt = (Elf_Phdr *)(addr + GET_LE(&hdr->e_phoff));
22 24
23 /* Walk the segment table. */ 25 /* Walk the segment table. */
@@ -84,6 +86,7 @@ static void GOFUNC(void *addr, size_t len, FILE *outfile, const char *name)
84 GET_LE(&symtab_hdr->sh_entsize) * i; 86 GET_LE(&symtab_hdr->sh_entsize) * i;
85 const char *name = addr + GET_LE(&strtab_hdr->sh_offset) + 87 const char *name = addr + GET_LE(&strtab_hdr->sh_offset) +
86 GET_LE(&sym->st_name); 88 GET_LE(&sym->st_name);
89
87 for (k = 0; k < NSYMS; k++) { 90 for (k = 0; k < NSYMS; k++) {
88 if (!strcmp(name, required_syms[k])) { 91 if (!strcmp(name, required_syms[k])) {
89 if (syms[k]) { 92 if (syms[k]) {
@@ -93,6 +96,13 @@ static void GOFUNC(void *addr, size_t len, FILE *outfile, const char *name)
93 syms[k] = GET_LE(&sym->st_value); 96 syms[k] = GET_LE(&sym->st_value);
94 } 97 }
95 } 98 }
99
100 if (!strcmp(name, "vdso_fake_sections")) {
101 if (fake_sections_value)
102 fail("duplicate vdso_fake_sections\n");
103 fake_sections_value = GET_LE(&sym->st_value);
104 fake_sections_size = GET_LE(&sym->st_size);
105 }
96 } 106 }
97 107
98 /* Validate mapping addresses. */ 108 /* Validate mapping addresses. */
@@ -112,11 +122,14 @@ static void GOFUNC(void *addr, size_t len, FILE *outfile, const char *name)
112 if (syms[sym_end_mapping] % 4096) 122 if (syms[sym_end_mapping] % 4096)
113 fail("end_mapping must be a multiple of 4096\n"); 123 fail("end_mapping must be a multiple of 4096\n");
114 124
115 /* Remove sections. */ 125 /* Remove sections or use fakes */
116 hdr->e_shoff = 0; 126 if (fake_sections_size % sizeof(Elf_Shdr))
117 hdr->e_shentsize = 0; 127 fail("vdso_fake_sections size is not a multiple of %ld\n",
118 hdr->e_shnum = 0; 128 (long)sizeof(Elf_Shdr));
119 hdr->e_shstrndx = htole16(SHN_UNDEF); 129 PUT_LE(&hdr->e_shoff, fake_sections_value);
130 PUT_LE(&hdr->e_shentsize, fake_sections_value ? sizeof(Elf_Shdr) : 0);
131 PUT_LE(&hdr->e_shnum, fake_sections_size / sizeof(Elf_Shdr));
132 PUT_LE(&hdr->e_shstrndx, SHN_UNDEF);
120 133
121 if (!name) { 134 if (!name) {
122 fwrite(addr, load_size, 1, outfile); 135 fwrite(addr, load_size, 1, outfile);
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index f17b29210ac4..ffb101e45731 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1537,7 +1537,10 @@ asmlinkage __visible void __init xen_start_kernel(void)
1537 if (!xen_pvh_domain()) 1537 if (!xen_pvh_domain())
1538 pv_cpu_ops = xen_cpu_ops; 1538 pv_cpu_ops = xen_cpu_ops;
1539 1539
1540 x86_init.resources.memory_setup = xen_memory_setup; 1540 if (xen_feature(XENFEAT_auto_translated_physmap))
1541 x86_init.resources.memory_setup = xen_auto_xlated_memory_setup;
1542 else
1543 x86_init.resources.memory_setup = xen_memory_setup;
1541 x86_init.oem.arch_setup = xen_arch_setup; 1544 x86_init.oem.arch_setup = xen_arch_setup;
1542 x86_init.oem.banner = xen_banner; 1545 x86_init.oem.banner = xen_banner;
1543 1546
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 821a11ada590..2e555163c2fe 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -27,7 +27,6 @@
27#include <xen/interface/memory.h> 27#include <xen/interface/memory.h>
28#include <xen/interface/physdev.h> 28#include <xen/interface/physdev.h>
29#include <xen/features.h> 29#include <xen/features.h>
30#include "mmu.h"
31#include "xen-ops.h" 30#include "xen-ops.h"
32#include "vdso.h" 31#include "vdso.h"
33 32
@@ -82,9 +81,6 @@ static void __init xen_add_extra_mem(u64 start, u64 size)
82 81
83 memblock_reserve(start, size); 82 memblock_reserve(start, size);
84 83
85 if (xen_feature(XENFEAT_auto_translated_physmap))
86 return;
87
88 xen_max_p2m_pfn = PFN_DOWN(start + size); 84 xen_max_p2m_pfn = PFN_DOWN(start + size);
89 for (pfn = PFN_DOWN(start); pfn < xen_max_p2m_pfn; pfn++) { 85 for (pfn = PFN_DOWN(start); pfn < xen_max_p2m_pfn; pfn++) {
90 unsigned long mfn = pfn_to_mfn(pfn); 86 unsigned long mfn = pfn_to_mfn(pfn);
@@ -107,7 +103,6 @@ static unsigned long __init xen_do_chunk(unsigned long start,
107 .domid = DOMID_SELF 103 .domid = DOMID_SELF
108 }; 104 };
109 unsigned long len = 0; 105 unsigned long len = 0;
110 int xlated_phys = xen_feature(XENFEAT_auto_translated_physmap);
111 unsigned long pfn; 106 unsigned long pfn;
112 int ret; 107 int ret;
113 108
@@ -121,7 +116,7 @@ static unsigned long __init xen_do_chunk(unsigned long start,
121 continue; 116 continue;
122 frame = mfn; 117 frame = mfn;
123 } else { 118 } else {
124 if (!xlated_phys && mfn != INVALID_P2M_ENTRY) 119 if (mfn != INVALID_P2M_ENTRY)
125 continue; 120 continue;
126 frame = pfn; 121 frame = pfn;
127 } 122 }
@@ -159,13 +154,6 @@ static unsigned long __init xen_do_chunk(unsigned long start,
159static unsigned long __init xen_release_chunk(unsigned long start, 154static unsigned long __init xen_release_chunk(unsigned long start,
160 unsigned long end) 155 unsigned long end)
161{ 156{
162 /*
163 * Xen already ballooned out the E820 non RAM regions for us
164 * and set them up properly in EPT.
165 */
166 if (xen_feature(XENFEAT_auto_translated_physmap))
167 return end - start;
168
169 return xen_do_chunk(start, end, true); 157 return xen_do_chunk(start, end, true);
170} 158}
171 159
@@ -234,13 +222,7 @@ static void __init xen_set_identity_and_release_chunk(
234 * (except for the ISA region which must be 1:1 mapped) to 222 * (except for the ISA region which must be 1:1 mapped) to
235 * release the refcounts (in Xen) on the original frames. 223 * release the refcounts (in Xen) on the original frames.
236 */ 224 */
237 225 for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++) {
238 /*
239 * PVH E820 matches the hypervisor's P2M which means we need to
240 * account for the proper values of *release and *identity.
241 */
242 for (pfn = start_pfn; !xen_feature(XENFEAT_auto_translated_physmap) &&
243 pfn <= max_pfn_mapped && pfn < end_pfn; pfn++) {
244 pte_t pte = __pte_ma(0); 226 pte_t pte = __pte_ma(0);
245 227
246 if (pfn < PFN_UP(ISA_END_ADDRESS)) 228 if (pfn < PFN_UP(ISA_END_ADDRESS))
@@ -518,6 +500,35 @@ char * __init xen_memory_setup(void)
518} 500}
519 501
520/* 502/*
503 * Machine specific memory setup for auto-translated guests.
504 */
505char * __init xen_auto_xlated_memory_setup(void)
506{
507 static struct e820entry map[E820MAX] __initdata;
508
509 struct xen_memory_map memmap;
510 int i;
511 int rc;
512
513 memmap.nr_entries = E820MAX;
514 set_xen_guest_handle(memmap.buffer, map);
515
516 rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
517 if (rc < 0)
518 panic("No memory map (%d)\n", rc);
519
520 sanitize_e820_map(map, ARRAY_SIZE(map), &memmap.nr_entries);
521
522 for (i = 0; i < memmap.nr_entries; i++)
523 e820_add_region(map[i].addr, map[i].size, map[i].type);
524
525 memblock_reserve(__pa(xen_start_info->mfn_list),
526 xen_start_info->pt_base - xen_start_info->mfn_list);
527
528 return "Xen";
529}
530
531/*
521 * Set the bit indicating "nosegneg" library variants should be used. 532 * Set the bit indicating "nosegneg" library variants should be used.
522 * We only need to bother in pure 32-bit mode; compat 32-bit processes 533 * We only need to bother in pure 32-bit mode; compat 32-bit processes
523 * can have un-truncated segments, so wrapping around is allowed. 534 * can have un-truncated segments, so wrapping around is allowed.
@@ -590,13 +601,7 @@ void xen_enable_syscall(void)
590 } 601 }
591#endif /* CONFIG_X86_64 */ 602#endif /* CONFIG_X86_64 */
592} 603}
593void xen_enable_nmi(void) 604
594{
595#ifdef CONFIG_X86_64
596 if (register_callback(CALLBACKTYPE_nmi, (char *)nmi))
597 BUG();
598#endif
599}
600void __init xen_pvmmu_arch_setup(void) 605void __init xen_pvmmu_arch_setup(void)
601{ 606{
602 HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments); 607 HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
@@ -611,7 +616,6 @@ void __init xen_pvmmu_arch_setup(void)
611 616
612 xen_enable_sysenter(); 617 xen_enable_sysenter();
613 xen_enable_syscall(); 618 xen_enable_syscall();
614 xen_enable_nmi();
615} 619}
616 620
617/* This function is not called for HVM domains */ 621/* This function is not called for HVM domains */
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index c834d4b231f0..97d87659f779 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -36,6 +36,7 @@ void xen_mm_unpin_all(void);
36void xen_set_pat(u64); 36void xen_set_pat(u64);
37 37
38char * __init xen_memory_setup(void); 38char * __init xen_memory_setup(void);
39char * xen_auto_xlated_memory_setup(void);
39void __init xen_arch_setup(void); 40void __init xen_arch_setup(void);
40void xen_enable_sysenter(void); 41void xen_enable_sysenter(void);
41void xen_enable_syscall(void); 42void xen_enable_syscall(void);
diff --git a/block/blk-core.c b/block/blk-core.c
index 9aca8c71e70b..6f8dba161bfe 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -43,6 +43,7 @@
43EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap); 43EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
44EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); 44EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
45EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete); 45EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
46EXPORT_TRACEPOINT_SYMBOL_GPL(block_split);
46EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug); 47EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
47 48
48DEFINE_IDA(blk_queue_ida); 49DEFINE_IDA(blk_queue_ida);
@@ -3311,8 +3312,7 @@ int __init blk_dev_init(void)
3311 3312
3312 /* used for unplugging and affects IO latency/throughput - HIGHPRI */ 3313 /* used for unplugging and affects IO latency/throughput - HIGHPRI */
3313 kblockd_workqueue = alloc_workqueue("kblockd", 3314 kblockd_workqueue = alloc_workqueue("kblockd",
3314 WQ_MEM_RECLAIM | WQ_HIGHPRI | 3315 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
3315 WQ_POWER_EFFICIENT, 0);
3316 if (!kblockd_workqueue) 3316 if (!kblockd_workqueue)
3317 panic("Failed to create kblockd\n"); 3317 panic("Failed to create kblockd\n");
3318 3318
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 8ffee4b5f93d..3cb5e9e7108a 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -422,44 +422,6 @@ void blk_insert_flush(struct request *rq)
422} 422}
423 423
424/** 424/**
425 * blk_abort_flushes - @q is being aborted, abort flush requests
426 * @q: request_queue being aborted
427 *
428 * To be called from elv_abort_queue(). @q is being aborted. Prepare all
429 * FLUSH/FUA requests for abortion.
430 *
431 * CONTEXT:
432 * spin_lock_irq(q->queue_lock)
433 */
434void blk_abort_flushes(struct request_queue *q)
435{
436 struct request *rq, *n;
437 int i;
438
439 /*
440 * Requests in flight for data are already owned by the dispatch
441 * queue or the device driver. Just restore for normal completion.
442 */
443 list_for_each_entry_safe(rq, n, &q->flush_data_in_flight, flush.list) {
444 list_del_init(&rq->flush.list);
445 blk_flush_restore_request(rq);
446 }
447
448 /*
449 * We need to give away requests on flush queues. Restore for
450 * normal completion and put them on the dispatch queue.
451 */
452 for (i = 0; i < ARRAY_SIZE(q->flush_queue); i++) {
453 list_for_each_entry_safe(rq, n, &q->flush_queue[i],
454 flush.list) {
455 list_del_init(&rq->flush.list);
456 blk_flush_restore_request(rq);
457 list_add_tail(&rq->queuelist, &q->queue_head);
458 }
459 }
460}
461
462/**
463 * blkdev_issue_flush - queue a flush 425 * blkdev_issue_flush - queue a flush
464 * @bdev: blockdev to issue flush for 426 * @bdev: blockdev to issue flush for
465 * @gfp_mask: memory allocation flags (for bio_alloc) 427 * @gfp_mask: memory allocation flags (for bio_alloc)
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 1aab39f71d95..c1b92426c95e 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -43,9 +43,16 @@ bool blk_mq_has_free_tags(struct blk_mq_tags *tags)
43 return bt_has_free_tags(&tags->bitmap_tags); 43 return bt_has_free_tags(&tags->bitmap_tags);
44} 44}
45 45
46static inline void bt_index_inc(unsigned int *index) 46static inline int bt_index_inc(int index)
47{ 47{
48 *index = (*index + 1) & (BT_WAIT_QUEUES - 1); 48 return (index + 1) & (BT_WAIT_QUEUES - 1);
49}
50
51static inline void bt_index_atomic_inc(atomic_t *index)
52{
53 int old = atomic_read(index);
54 int new = bt_index_inc(old);
55 atomic_cmpxchg(index, old, new);
49} 56}
50 57
51/* 58/*
@@ -69,14 +76,14 @@ static void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags)
69 int i, wake_index; 76 int i, wake_index;
70 77
71 bt = &tags->bitmap_tags; 78 bt = &tags->bitmap_tags;
72 wake_index = bt->wake_index; 79 wake_index = atomic_read(&bt->wake_index);
73 for (i = 0; i < BT_WAIT_QUEUES; i++) { 80 for (i = 0; i < BT_WAIT_QUEUES; i++) {
74 struct bt_wait_state *bs = &bt->bs[wake_index]; 81 struct bt_wait_state *bs = &bt->bs[wake_index];
75 82
76 if (waitqueue_active(&bs->wait)) 83 if (waitqueue_active(&bs->wait))
77 wake_up(&bs->wait); 84 wake_up(&bs->wait);
78 85
79 bt_index_inc(&wake_index); 86 wake_index = bt_index_inc(wake_index);
80 } 87 }
81} 88}
82 89
@@ -212,12 +219,14 @@ static struct bt_wait_state *bt_wait_ptr(struct blk_mq_bitmap_tags *bt,
212 struct blk_mq_hw_ctx *hctx) 219 struct blk_mq_hw_ctx *hctx)
213{ 220{
214 struct bt_wait_state *bs; 221 struct bt_wait_state *bs;
222 int wait_index;
215 223
216 if (!hctx) 224 if (!hctx)
217 return &bt->bs[0]; 225 return &bt->bs[0];
218 226
219 bs = &bt->bs[hctx->wait_index]; 227 wait_index = atomic_read(&hctx->wait_index);
220 bt_index_inc(&hctx->wait_index); 228 bs = &bt->bs[wait_index];
229 bt_index_atomic_inc(&hctx->wait_index);
221 return bs; 230 return bs;
222} 231}
223 232
@@ -239,18 +248,12 @@ static int bt_get(struct blk_mq_alloc_data *data,
239 248
240 bs = bt_wait_ptr(bt, hctx); 249 bs = bt_wait_ptr(bt, hctx);
241 do { 250 do {
242 bool was_empty;
243
244 was_empty = list_empty(&wait.task_list);
245 prepare_to_wait(&bs->wait, &wait, TASK_UNINTERRUPTIBLE); 251 prepare_to_wait(&bs->wait, &wait, TASK_UNINTERRUPTIBLE);
246 252
247 tag = __bt_get(hctx, bt, last_tag); 253 tag = __bt_get(hctx, bt, last_tag);
248 if (tag != -1) 254 if (tag != -1)
249 break; 255 break;
250 256
251 if (was_empty)
252 atomic_set(&bs->wait_cnt, bt->wake_cnt);
253
254 blk_mq_put_ctx(data->ctx); 257 blk_mq_put_ctx(data->ctx);
255 258
256 io_schedule(); 259 io_schedule();
@@ -313,18 +316,19 @@ static struct bt_wait_state *bt_wake_ptr(struct blk_mq_bitmap_tags *bt)
313{ 316{
314 int i, wake_index; 317 int i, wake_index;
315 318
316 wake_index = bt->wake_index; 319 wake_index = atomic_read(&bt->wake_index);
317 for (i = 0; i < BT_WAIT_QUEUES; i++) { 320 for (i = 0; i < BT_WAIT_QUEUES; i++) {
318 struct bt_wait_state *bs = &bt->bs[wake_index]; 321 struct bt_wait_state *bs = &bt->bs[wake_index];
319 322
320 if (waitqueue_active(&bs->wait)) { 323 if (waitqueue_active(&bs->wait)) {
321 if (wake_index != bt->wake_index) 324 int o = atomic_read(&bt->wake_index);
322 bt->wake_index = wake_index; 325 if (wake_index != o)
326 atomic_cmpxchg(&bt->wake_index, o, wake_index);
323 327
324 return bs; 328 return bs;
325 } 329 }
326 330
327 bt_index_inc(&wake_index); 331 wake_index = bt_index_inc(wake_index);
328 } 332 }
329 333
330 return NULL; 334 return NULL;
@@ -334,6 +338,7 @@ static void bt_clear_tag(struct blk_mq_bitmap_tags *bt, unsigned int tag)
334{ 338{
335 const int index = TAG_TO_INDEX(bt, tag); 339 const int index = TAG_TO_INDEX(bt, tag);
336 struct bt_wait_state *bs; 340 struct bt_wait_state *bs;
341 int wait_cnt;
337 342
338 /* 343 /*
339 * The unlock memory barrier need to order access to req in free 344 * The unlock memory barrier need to order access to req in free
@@ -342,10 +347,19 @@ static void bt_clear_tag(struct blk_mq_bitmap_tags *bt, unsigned int tag)
342 clear_bit_unlock(TAG_TO_BIT(bt, tag), &bt->map[index].word); 347 clear_bit_unlock(TAG_TO_BIT(bt, tag), &bt->map[index].word);
343 348
344 bs = bt_wake_ptr(bt); 349 bs = bt_wake_ptr(bt);
345 if (bs && atomic_dec_and_test(&bs->wait_cnt)) { 350 if (!bs)
346 atomic_set(&bs->wait_cnt, bt->wake_cnt); 351 return;
347 bt_index_inc(&bt->wake_index); 352
353 wait_cnt = atomic_dec_return(&bs->wait_cnt);
354 if (wait_cnt == 0) {
355wake:
356 atomic_add(bt->wake_cnt, &bs->wait_cnt);
357 bt_index_atomic_inc(&bt->wake_index);
348 wake_up(&bs->wait); 358 wake_up(&bs->wait);
359 } else if (wait_cnt < 0) {
360 wait_cnt = atomic_inc_return(&bs->wait_cnt);
361 if (!wait_cnt)
362 goto wake;
349 } 363 }
350} 364}
351 365
@@ -499,10 +513,13 @@ static int bt_alloc(struct blk_mq_bitmap_tags *bt, unsigned int depth,
499 return -ENOMEM; 513 return -ENOMEM;
500 } 514 }
501 515
502 for (i = 0; i < BT_WAIT_QUEUES; i++) 516 bt_update_count(bt, depth);
517
518 for (i = 0; i < BT_WAIT_QUEUES; i++) {
503 init_waitqueue_head(&bt->bs[i].wait); 519 init_waitqueue_head(&bt->bs[i].wait);
520 atomic_set(&bt->bs[i].wait_cnt, bt->wake_cnt);
521 }
504 522
505 bt_update_count(bt, depth);
506 return 0; 523 return 0;
507} 524}
508 525
diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h
index 98696a65d4d4..6206ed17ef76 100644
--- a/block/blk-mq-tag.h
+++ b/block/blk-mq-tag.h
@@ -24,7 +24,7 @@ struct blk_mq_bitmap_tags {
24 unsigned int map_nr; 24 unsigned int map_nr;
25 struct blk_align_bitmap *map; 25 struct blk_align_bitmap *map;
26 26
27 unsigned int wake_index; 27 atomic_t wake_index;
28 struct bt_wait_state *bs; 28 struct bt_wait_state *bs;
29}; 29};
30 30
diff --git a/block/blk-mq.c b/block/blk-mq.c
index e11f5f8e0313..0ef2dc7f01bf 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -109,7 +109,7 @@ static void blk_mq_queue_exit(struct request_queue *q)
109 __percpu_counter_add(&q->mq_usage_counter, -1, 1000000); 109 __percpu_counter_add(&q->mq_usage_counter, -1, 1000000);
110} 110}
111 111
112static void __blk_mq_drain_queue(struct request_queue *q) 112void blk_mq_drain_queue(struct request_queue *q)
113{ 113{
114 while (true) { 114 while (true) {
115 s64 count; 115 s64 count;
@@ -120,7 +120,7 @@ static void __blk_mq_drain_queue(struct request_queue *q)
120 120
121 if (count == 0) 121 if (count == 0)
122 break; 122 break;
123 blk_mq_run_queues(q, false); 123 blk_mq_start_hw_queues(q);
124 msleep(10); 124 msleep(10);
125 } 125 }
126} 126}
@@ -139,12 +139,7 @@ static void blk_mq_freeze_queue(struct request_queue *q)
139 spin_unlock_irq(q->queue_lock); 139 spin_unlock_irq(q->queue_lock);
140 140
141 if (drain) 141 if (drain)
142 __blk_mq_drain_queue(q); 142 blk_mq_drain_queue(q);
143}
144
145void blk_mq_drain_queue(struct request_queue *q)
146{
147 __blk_mq_drain_queue(q);
148} 143}
149 144
150static void blk_mq_unfreeze_queue(struct request_queue *q) 145static void blk_mq_unfreeze_queue(struct request_queue *q)
diff --git a/block/blk.h b/block/blk.h
index 45385e9abf6f..6748c4f8d7a1 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -84,7 +84,6 @@ static inline void blk_clear_rq_complete(struct request *rq)
84#define ELV_ON_HASH(rq) ((rq)->cmd_flags & REQ_HASHED) 84#define ELV_ON_HASH(rq) ((rq)->cmd_flags & REQ_HASHED)
85 85
86void blk_insert_flush(struct request *rq); 86void blk_insert_flush(struct request *rq);
87void blk_abort_flushes(struct request_queue *q);
88 87
89static inline struct request *__elv_next_request(struct request_queue *q) 88static inline struct request *__elv_next_request(struct request_queue *q)
90{ 89{
diff --git a/block/elevator.c b/block/elevator.c
index f35edddfe9b5..34bded18910e 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -729,26 +729,6 @@ int elv_may_queue(struct request_queue *q, int rw)
729 return ELV_MQUEUE_MAY; 729 return ELV_MQUEUE_MAY;
730} 730}
731 731
732void elv_abort_queue(struct request_queue *q)
733{
734 struct request *rq;
735
736 blk_abort_flushes(q);
737
738 while (!list_empty(&q->queue_head)) {
739 rq = list_entry_rq(q->queue_head.next);
740 rq->cmd_flags |= REQ_QUIET;
741 trace_block_rq_abort(q, rq);
742 /*
743 * Mark this request as started so we don't trigger
744 * any debug logic in the end I/O path.
745 */
746 blk_start_request(rq);
747 __blk_end_request_all(rq, -EIO);
748 }
749}
750EXPORT_SYMBOL(elv_abort_queue);
751
752void elv_completed_request(struct request_queue *q, struct request *rq) 732void elv_completed_request(struct request_queue *q, struct request *rq)
753{ 733{
754 struct elevator_queue *e = q->elevator; 734 struct elevator_queue *e = q->elevator;
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 63407d264885..9cb65b0e7597 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -34,6 +34,9 @@ ACPI_MODULE_NAME("acpi_lpss");
34 34
35/* Offsets relative to LPSS_PRIVATE_OFFSET */ 35/* Offsets relative to LPSS_PRIVATE_OFFSET */
36#define LPSS_CLK_DIVIDER_DEF_MASK (BIT(1) | BIT(16)) 36#define LPSS_CLK_DIVIDER_DEF_MASK (BIT(1) | BIT(16))
37#define LPSS_RESETS 0x04
38#define LPSS_RESETS_RESET_FUNC BIT(0)
39#define LPSS_RESETS_RESET_APB BIT(1)
37#define LPSS_GENERAL 0x08 40#define LPSS_GENERAL 0x08
38#define LPSS_GENERAL_LTR_MODE_SW BIT(2) 41#define LPSS_GENERAL_LTR_MODE_SW BIT(2)
39#define LPSS_GENERAL_UART_RTS_OVRD BIT(3) 42#define LPSS_GENERAL_UART_RTS_OVRD BIT(3)
@@ -99,6 +102,17 @@ static void lpss_uart_setup(struct lpss_private_data *pdata)
99 writel(reg | LPSS_GENERAL_UART_RTS_OVRD, pdata->mmio_base + offset); 102 writel(reg | LPSS_GENERAL_UART_RTS_OVRD, pdata->mmio_base + offset);
100} 103}
101 104
105static void lpss_i2c_setup(struct lpss_private_data *pdata)
106{
107 unsigned int offset;
108 u32 val;
109
110 offset = pdata->dev_desc->prv_offset + LPSS_RESETS;
111 val = readl(pdata->mmio_base + offset);
112 val |= LPSS_RESETS_RESET_APB | LPSS_RESETS_RESET_FUNC;
113 writel(val, pdata->mmio_base + offset);
114}
115
102static struct lpss_device_desc lpt_dev_desc = { 116static struct lpss_device_desc lpt_dev_desc = {
103 .clk_required = true, 117 .clk_required = true,
104 .prv_offset = 0x800, 118 .prv_offset = 0x800,
@@ -171,6 +185,7 @@ static struct lpss_device_desc byt_i2c_dev_desc = {
171 .prv_offset = 0x800, 185 .prv_offset = 0x800,
172 .save_ctx = true, 186 .save_ctx = true,
173 .shared_clock = &i2c_clock, 187 .shared_clock = &i2c_clock,
188 .setup = lpss_i2c_setup,
174}; 189};
175 190
176#else 191#else
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index e48fc98e71c4..0d7116f34b95 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -32,6 +32,7 @@
32#include <linux/jiffies.h> 32#include <linux/jiffies.h>
33#include <linux/async.h> 33#include <linux/async.h>
34#include <linux/dmi.h> 34#include <linux/dmi.h>
35#include <linux/delay.h>
35#include <linux/slab.h> 36#include <linux/slab.h>
36#include <linux/suspend.h> 37#include <linux/suspend.h>
37#include <asm/unaligned.h> 38#include <asm/unaligned.h>
@@ -70,6 +71,7 @@ MODULE_DESCRIPTION("ACPI Battery Driver");
70MODULE_LICENSE("GPL"); 71MODULE_LICENSE("GPL");
71 72
72static int battery_bix_broken_package; 73static int battery_bix_broken_package;
74static int battery_notification_delay_ms;
73static unsigned int cache_time = 1000; 75static unsigned int cache_time = 1000;
74module_param(cache_time, uint, 0644); 76module_param(cache_time, uint, 0644);
75MODULE_PARM_DESC(cache_time, "cache time in milliseconds"); 77MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
@@ -930,7 +932,10 @@ static ssize_t acpi_battery_write_alarm(struct file *file,
930 goto end; 932 goto end;
931 } 933 }
932 alarm_string[count] = '\0'; 934 alarm_string[count] = '\0';
933 battery->alarm = simple_strtol(alarm_string, NULL, 0); 935 if (kstrtoint(alarm_string, 0, &battery->alarm)) {
936 result = -EINVAL;
937 goto end;
938 }
934 result = acpi_battery_set_alarm(battery); 939 result = acpi_battery_set_alarm(battery);
935 end: 940 end:
936 if (!result) 941 if (!result)
@@ -1062,6 +1067,14 @@ static void acpi_battery_notify(struct acpi_device *device, u32 event)
1062 if (!battery) 1067 if (!battery)
1063 return; 1068 return;
1064 old = battery->bat.dev; 1069 old = battery->bat.dev;
1070 /*
1071 * On Acer Aspire V5-573G notifications are sometimes triggered too
1072 * early. For example, when AC is unplugged and notification is
1073 * triggered, battery state is still reported as "Full", and changes to
1074 * "Discharging" only after short delay, without any notification.
1075 */
1076 if (battery_notification_delay_ms > 0)
1077 msleep(battery_notification_delay_ms);
1065 if (event == ACPI_BATTERY_NOTIFY_INFO) 1078 if (event == ACPI_BATTERY_NOTIFY_INFO)
1066 acpi_battery_refresh(battery); 1079 acpi_battery_refresh(battery);
1067 acpi_battery_update(battery, false); 1080 acpi_battery_update(battery, false);
@@ -1106,14 +1119,35 @@ static int battery_notify(struct notifier_block *nb,
1106 return 0; 1119 return 0;
1107} 1120}
1108 1121
1122static int battery_bix_broken_package_quirk(const struct dmi_system_id *d)
1123{
1124 battery_bix_broken_package = 1;
1125 return 0;
1126}
1127
1128static int battery_notification_delay_quirk(const struct dmi_system_id *d)
1129{
1130 battery_notification_delay_ms = 1000;
1131 return 0;
1132}
1133
1109static struct dmi_system_id bat_dmi_table[] = { 1134static struct dmi_system_id bat_dmi_table[] = {
1110 { 1135 {
1136 .callback = battery_bix_broken_package_quirk,
1111 .ident = "NEC LZ750/LS", 1137 .ident = "NEC LZ750/LS",
1112 .matches = { 1138 .matches = {
1113 DMI_MATCH(DMI_SYS_VENDOR, "NEC"), 1139 DMI_MATCH(DMI_SYS_VENDOR, "NEC"),
1114 DMI_MATCH(DMI_PRODUCT_NAME, "PC-LZ750LS"), 1140 DMI_MATCH(DMI_PRODUCT_NAME, "PC-LZ750LS"),
1115 }, 1141 },
1116 }, 1142 },
1143 {
1144 .callback = battery_notification_delay_quirk,
1145 .ident = "Acer Aspire V5-573G",
1146 .matches = {
1147 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
1148 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire V5-573G"),
1149 },
1150 },
1117 {}, 1151 {},
1118}; 1152};
1119 1153
@@ -1227,8 +1261,7 @@ static void __init acpi_battery_init_async(void *unused, async_cookie_t cookie)
1227 if (acpi_disabled) 1261 if (acpi_disabled)
1228 return; 1262 return;
1229 1263
1230 if (dmi_check_system(bat_dmi_table)) 1264 dmi_check_system(bat_dmi_table);
1231 battery_bix_broken_package = 1;
1232 1265
1233#ifdef CONFIG_ACPI_PROCFS_POWER 1266#ifdef CONFIG_ACPI_PROCFS_POWER
1234 acpi_battery_dir = acpi_lock_battery_dir(); 1267 acpi_battery_dir = acpi_lock_battery_dir();
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 3f2bdc812d23..bad25b070fe0 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -235,7 +235,8 @@ void acpi_os_vprintf(const char *fmt, va_list args)
235static unsigned long acpi_rsdp; 235static unsigned long acpi_rsdp;
236static int __init setup_acpi_rsdp(char *arg) 236static int __init setup_acpi_rsdp(char *arg)
237{ 237{
238 acpi_rsdp = simple_strtoul(arg, NULL, 16); 238 if (kstrtoul(arg, 16, &acpi_rsdp))
239 return -EINVAL;
239 return 0; 240 return 0;
240} 241}
241early_param("acpi_rsdp", setup_acpi_rsdp); 242early_param("acpi_rsdp", setup_acpi_rsdp);
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index 05550ba44d32..6d5a6cda0734 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -360,7 +360,8 @@ static int __init acpi_parse_apic_instance(char *str)
360 if (!str) 360 if (!str)
361 return -EINVAL; 361 return -EINVAL;
362 362
363 acpi_apic_instance = simple_strtoul(str, NULL, 0); 363 if (kstrtoint(str, 0, &acpi_apic_instance))
364 return -EINVAL;
364 365
365 pr_notice("Shall use APIC/MADT table %d\n", acpi_apic_instance); 366 pr_notice("Shall use APIC/MADT table %d\n", acpi_apic_instance);
366 367
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 77087a29b127..a3b042c4d448 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -79,7 +79,7 @@ MODULE_PARM_DESC(home_node, "Home node for the device");
79 79
80static int queue_mode = NULL_Q_MQ; 80static int queue_mode = NULL_Q_MQ;
81module_param(queue_mode, int, S_IRUGO); 81module_param(queue_mode, int, S_IRUGO);
82MODULE_PARM_DESC(use_mq, "Use blk-mq interface (0=bio,1=rq,2=multiqueue)"); 82MODULE_PARM_DESC(queue_mode, "Block interface to use (0=bio,1=rq,2=multiqueue)");
83 83
84static int gb = 250; 84static int gb = 250;
85module_param(gb, int, S_IRUGO); 85module_param(gb, int, S_IRUGO);
@@ -227,7 +227,10 @@ static void null_cmd_end_timer(struct nullb_cmd *cmd)
227 227
228static void null_softirq_done_fn(struct request *rq) 228static void null_softirq_done_fn(struct request *rq)
229{ 229{
230 end_cmd(blk_mq_rq_to_pdu(rq)); 230 if (queue_mode == NULL_Q_MQ)
231 end_cmd(blk_mq_rq_to_pdu(rq));
232 else
233 end_cmd(rq->special);
231} 234}
232 235
233static inline void null_handle_cmd(struct nullb_cmd *cmd) 236static inline void null_handle_cmd(struct nullb_cmd *cmd)
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index a842c71dcc21..02351e217165 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -10,10 +10,6 @@
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 */ 13 */
18 14
19#include <linux/nvme.h> 15#include <linux/nvme.h>
@@ -46,16 +42,26 @@
46#include <scsi/sg.h> 42#include <scsi/sg.h>
47#include <asm-generic/io-64-nonatomic-lo-hi.h> 43#include <asm-generic/io-64-nonatomic-lo-hi.h>
48 44
49#define NVME_Q_DEPTH 1024 45#include <trace/events/block.h>
46
47#define NVME_Q_DEPTH 1024
50#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command)) 48#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
51#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion)) 49#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
52#define ADMIN_TIMEOUT (60 * HZ) 50#define ADMIN_TIMEOUT (admin_timeout * HZ)
53#define IOD_TIMEOUT (4 * NVME_IO_TIMEOUT) 51#define IOD_TIMEOUT (retry_time * HZ)
52
53static unsigned char admin_timeout = 60;
54module_param(admin_timeout, byte, 0644);
55MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands");
54 56
55unsigned char io_timeout = 30; 57unsigned char nvme_io_timeout = 30;
56module_param(io_timeout, byte, 0644); 58module_param_named(io_timeout, nvme_io_timeout, byte, 0644);
57MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O"); 59MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
58 60
61static unsigned char retry_time = 30;
62module_param(retry_time, byte, 0644);
63MODULE_PARM_DESC(retry_time, "time in seconds to retry failed I/O");
64
59static int nvme_major; 65static int nvme_major;
60module_param(nvme_major, int, 0); 66module_param(nvme_major, int, 0);
61 67
@@ -67,6 +73,7 @@ static LIST_HEAD(dev_list);
67static struct task_struct *nvme_thread; 73static struct task_struct *nvme_thread;
68static struct workqueue_struct *nvme_workq; 74static struct workqueue_struct *nvme_workq;
69static wait_queue_head_t nvme_kthread_wait; 75static wait_queue_head_t nvme_kthread_wait;
76static struct notifier_block nvme_nb;
70 77
71static void nvme_reset_failed_dev(struct work_struct *ws); 78static void nvme_reset_failed_dev(struct work_struct *ws);
72 79
@@ -199,16 +206,13 @@ static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx,
199#define CMD_CTX_CANCELLED (0x30C + CMD_CTX_BASE) 206#define CMD_CTX_CANCELLED (0x30C + CMD_CTX_BASE)
200#define CMD_CTX_COMPLETED (0x310 + CMD_CTX_BASE) 207#define CMD_CTX_COMPLETED (0x310 + CMD_CTX_BASE)
201#define CMD_CTX_INVALID (0x314 + CMD_CTX_BASE) 208#define CMD_CTX_INVALID (0x314 + CMD_CTX_BASE)
202#define CMD_CTX_FLUSH (0x318 + CMD_CTX_BASE) 209#define CMD_CTX_ABORT (0x318 + CMD_CTX_BASE)
203#define CMD_CTX_ABORT (0x31C + CMD_CTX_BASE)
204 210
205static void special_completion(struct nvme_queue *nvmeq, void *ctx, 211static void special_completion(struct nvme_queue *nvmeq, void *ctx,
206 struct nvme_completion *cqe) 212 struct nvme_completion *cqe)
207{ 213{
208 if (ctx == CMD_CTX_CANCELLED) 214 if (ctx == CMD_CTX_CANCELLED)
209 return; 215 return;
210 if (ctx == CMD_CTX_FLUSH)
211 return;
212 if (ctx == CMD_CTX_ABORT) { 216 if (ctx == CMD_CTX_ABORT) {
213 ++nvmeq->dev->abort_limit; 217 ++nvmeq->dev->abort_limit;
214 return; 218 return;
@@ -247,8 +251,9 @@ static void *free_cmdid(struct nvme_queue *nvmeq, int cmdid,
247 void *ctx; 251 void *ctx;
248 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq); 252 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
249 253
250 if (cmdid >= nvmeq->q_depth) { 254 if (cmdid >= nvmeq->q_depth || !info[cmdid].fn) {
251 *fn = special_completion; 255 if (fn)
256 *fn = special_completion;
252 return CMD_CTX_INVALID; 257 return CMD_CTX_INVALID;
253 } 258 }
254 if (fn) 259 if (fn)
@@ -281,9 +286,17 @@ static struct nvme_queue *raw_nvmeq(struct nvme_dev *dev, int qid)
281 286
282static struct nvme_queue *get_nvmeq(struct nvme_dev *dev) __acquires(RCU) 287static struct nvme_queue *get_nvmeq(struct nvme_dev *dev) __acquires(RCU)
283{ 288{
289 struct nvme_queue *nvmeq;
284 unsigned queue_id = get_cpu_var(*dev->io_queue); 290 unsigned queue_id = get_cpu_var(*dev->io_queue);
291
285 rcu_read_lock(); 292 rcu_read_lock();
286 return rcu_dereference(dev->queues[queue_id]); 293 nvmeq = rcu_dereference(dev->queues[queue_id]);
294 if (nvmeq)
295 return nvmeq;
296
297 rcu_read_unlock();
298 put_cpu_var(*dev->io_queue);
299 return NULL;
287} 300}
288 301
289static void put_nvmeq(struct nvme_queue *nvmeq) __releases(RCU) 302static void put_nvmeq(struct nvme_queue *nvmeq) __releases(RCU)
@@ -295,8 +308,15 @@ static void put_nvmeq(struct nvme_queue *nvmeq) __releases(RCU)
295static struct nvme_queue *lock_nvmeq(struct nvme_dev *dev, int q_idx) 308static struct nvme_queue *lock_nvmeq(struct nvme_dev *dev, int q_idx)
296 __acquires(RCU) 309 __acquires(RCU)
297{ 310{
311 struct nvme_queue *nvmeq;
312
298 rcu_read_lock(); 313 rcu_read_lock();
299 return rcu_dereference(dev->queues[q_idx]); 314 nvmeq = rcu_dereference(dev->queues[q_idx]);
315 if (nvmeq)
316 return nvmeq;
317
318 rcu_read_unlock();
319 return NULL;
300} 320}
301 321
302static void unlock_nvmeq(struct nvme_queue *nvmeq) __releases(RCU) 322static void unlock_nvmeq(struct nvme_queue *nvmeq) __releases(RCU)
@@ -387,25 +407,30 @@ void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod)
387static void nvme_start_io_acct(struct bio *bio) 407static void nvme_start_io_acct(struct bio *bio)
388{ 408{
389 struct gendisk *disk = bio->bi_bdev->bd_disk; 409 struct gendisk *disk = bio->bi_bdev->bd_disk;
390 const int rw = bio_data_dir(bio); 410 if (blk_queue_io_stat(disk->queue)) {
391 int cpu = part_stat_lock(); 411 const int rw = bio_data_dir(bio);
392 part_round_stats(cpu, &disk->part0); 412 int cpu = part_stat_lock();
393 part_stat_inc(cpu, &disk->part0, ios[rw]); 413 part_round_stats(cpu, &disk->part0);
394 part_stat_add(cpu, &disk->part0, sectors[rw], bio_sectors(bio)); 414 part_stat_inc(cpu, &disk->part0, ios[rw]);
395 part_inc_in_flight(&disk->part0, rw); 415 part_stat_add(cpu, &disk->part0, sectors[rw],
396 part_stat_unlock(); 416 bio_sectors(bio));
417 part_inc_in_flight(&disk->part0, rw);
418 part_stat_unlock();
419 }
397} 420}
398 421
399static void nvme_end_io_acct(struct bio *bio, unsigned long start_time) 422static void nvme_end_io_acct(struct bio *bio, unsigned long start_time)
400{ 423{
401 struct gendisk *disk = bio->bi_bdev->bd_disk; 424 struct gendisk *disk = bio->bi_bdev->bd_disk;
402 const int rw = bio_data_dir(bio); 425 if (blk_queue_io_stat(disk->queue)) {
403 unsigned long duration = jiffies - start_time; 426 const int rw = bio_data_dir(bio);
404 int cpu = part_stat_lock(); 427 unsigned long duration = jiffies - start_time;
405 part_stat_add(cpu, &disk->part0, ticks[rw], duration); 428 int cpu = part_stat_lock();
406 part_round_stats(cpu, &disk->part0); 429 part_stat_add(cpu, &disk->part0, ticks[rw], duration);
407 part_dec_in_flight(&disk->part0, rw); 430 part_round_stats(cpu, &disk->part0);
408 part_stat_unlock(); 431 part_dec_in_flight(&disk->part0, rw);
432 part_stat_unlock();
433 }
409} 434}
410 435
411static void bio_completion(struct nvme_queue *nvmeq, void *ctx, 436static void bio_completion(struct nvme_queue *nvmeq, void *ctx,
@@ -414,6 +439,7 @@ static void bio_completion(struct nvme_queue *nvmeq, void *ctx,
414 struct nvme_iod *iod = ctx; 439 struct nvme_iod *iod = ctx;
415 struct bio *bio = iod->private; 440 struct bio *bio = iod->private;
416 u16 status = le16_to_cpup(&cqe->status) >> 1; 441 u16 status = le16_to_cpup(&cqe->status) >> 1;
442 int error = 0;
417 443
418 if (unlikely(status)) { 444 if (unlikely(status)) {
419 if (!(status & NVME_SC_DNR || 445 if (!(status & NVME_SC_DNR ||
@@ -426,6 +452,7 @@ static void bio_completion(struct nvme_queue *nvmeq, void *ctx,
426 wake_up(&nvmeq->sq_full); 452 wake_up(&nvmeq->sq_full);
427 return; 453 return;
428 } 454 }
455 error = -EIO;
429 } 456 }
430 if (iod->nents) { 457 if (iod->nents) {
431 dma_unmap_sg(nvmeq->q_dmadev, iod->sg, iod->nents, 458 dma_unmap_sg(nvmeq->q_dmadev, iod->sg, iod->nents,
@@ -433,10 +460,9 @@ static void bio_completion(struct nvme_queue *nvmeq, void *ctx,
433 nvme_end_io_acct(bio, iod->start_time); 460 nvme_end_io_acct(bio, iod->start_time);
434 } 461 }
435 nvme_free_iod(nvmeq->dev, iod); 462 nvme_free_iod(nvmeq->dev, iod);
436 if (status) 463
437 bio_endio(bio, -EIO); 464 trace_block_bio_complete(bdev_get_queue(bio->bi_bdev), bio, error);
438 else 465 bio_endio(bio, error);
439 bio_endio(bio, 0);
440} 466}
441 467
442/* length is in bytes. gfp flags indicates whether we may sleep. */ 468/* length is in bytes. gfp flags indicates whether we may sleep. */
@@ -525,6 +551,8 @@ static int nvme_split_and_submit(struct bio *bio, struct nvme_queue *nvmeq,
525 if (!split) 551 if (!split)
526 return -ENOMEM; 552 return -ENOMEM;
527 553
554 trace_block_split(bdev_get_queue(bio->bi_bdev), bio,
555 split->bi_iter.bi_sector);
528 bio_chain(split, bio); 556 bio_chain(split, bio);
529 557
530 if (!waitqueue_active(&nvmeq->sq_full)) 558 if (!waitqueue_active(&nvmeq->sq_full))
@@ -627,16 +655,6 @@ static int nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns,
627 return 0; 655 return 0;
628} 656}
629 657
630int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns)
631{
632 int cmdid = alloc_cmdid(nvmeq, (void *)CMD_CTX_FLUSH,
633 special_completion, NVME_IO_TIMEOUT);
634 if (unlikely(cmdid < 0))
635 return cmdid;
636
637 return nvme_submit_flush(nvmeq, ns, cmdid);
638}
639
640static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod) 658static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod)
641{ 659{
642 struct bio *bio = iod->private; 660 struct bio *bio = iod->private;
@@ -652,7 +670,7 @@ static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod)
652 670
653 if (bio->bi_rw & REQ_DISCARD) 671 if (bio->bi_rw & REQ_DISCARD)
654 return nvme_submit_discard(nvmeq, ns, bio, iod, cmdid); 672 return nvme_submit_discard(nvmeq, ns, bio, iod, cmdid);
655 if ((bio->bi_rw & REQ_FLUSH) && !iod->nents) 673 if (bio->bi_rw & REQ_FLUSH)
656 return nvme_submit_flush(nvmeq, ns, cmdid); 674 return nvme_submit_flush(nvmeq, ns, cmdid);
657 675
658 control = 0; 676 control = 0;
@@ -686,6 +704,26 @@ static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod)
686 return 0; 704 return 0;
687} 705}
688 706
707static int nvme_split_flush_data(struct nvme_queue *nvmeq, struct bio *bio)
708{
709 struct bio *split = bio_clone(bio, GFP_ATOMIC);
710 if (!split)
711 return -ENOMEM;
712
713 split->bi_iter.bi_size = 0;
714 split->bi_phys_segments = 0;
715 bio->bi_rw &= ~REQ_FLUSH;
716 bio_chain(split, bio);
717
718 if (!waitqueue_active(&nvmeq->sq_full))
719 add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
720 bio_list_add(&nvmeq->sq_cong, split);
721 bio_list_add(&nvmeq->sq_cong, bio);
722 wake_up_process(nvme_thread);
723
724 return 0;
725}
726
689/* 727/*
690 * Called with local interrupts disabled and the q_lock held. May not sleep. 728 * Called with local interrupts disabled and the q_lock held. May not sleep.
691 */ 729 */
@@ -696,11 +734,8 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
696 int psegs = bio_phys_segments(ns->queue, bio); 734 int psegs = bio_phys_segments(ns->queue, bio);
697 int result; 735 int result;
698 736
699 if ((bio->bi_rw & REQ_FLUSH) && psegs) { 737 if ((bio->bi_rw & REQ_FLUSH) && psegs)
700 result = nvme_submit_flush_data(nvmeq, ns); 738 return nvme_split_flush_data(nvmeq, bio);
701 if (result)
702 return result;
703 }
704 739
705 iod = nvme_alloc_iod(psegs, bio->bi_iter.bi_size, GFP_ATOMIC); 740 iod = nvme_alloc_iod(psegs, bio->bi_iter.bi_size, GFP_ATOMIC);
706 if (!iod) 741 if (!iod)
@@ -795,7 +830,6 @@ static void nvme_make_request(struct request_queue *q, struct bio *bio)
795 int result = -EBUSY; 830 int result = -EBUSY;
796 831
797 if (!nvmeq) { 832 if (!nvmeq) {
798 put_nvmeq(NULL);
799 bio_endio(bio, -EIO); 833 bio_endio(bio, -EIO);
800 return; 834 return;
801 } 835 }
@@ -870,10 +904,8 @@ static int nvme_submit_sync_cmd(struct nvme_dev *dev, int q_idx,
870 struct nvme_queue *nvmeq; 904 struct nvme_queue *nvmeq;
871 905
872 nvmeq = lock_nvmeq(dev, q_idx); 906 nvmeq = lock_nvmeq(dev, q_idx);
873 if (!nvmeq) { 907 if (!nvmeq)
874 unlock_nvmeq(nvmeq);
875 return -ENODEV; 908 return -ENODEV;
876 }
877 909
878 cmdinfo.task = current; 910 cmdinfo.task = current;
879 cmdinfo.status = -EINTR; 911 cmdinfo.status = -EINTR;
@@ -898,9 +930,10 @@ static int nvme_submit_sync_cmd(struct nvme_dev *dev, int q_idx,
898 930
899 if (cmdinfo.status == -EINTR) { 931 if (cmdinfo.status == -EINTR) {
900 nvmeq = lock_nvmeq(dev, q_idx); 932 nvmeq = lock_nvmeq(dev, q_idx);
901 if (nvmeq) 933 if (nvmeq) {
902 nvme_abort_command(nvmeq, cmdid); 934 nvme_abort_command(nvmeq, cmdid);
903 unlock_nvmeq(nvmeq); 935 unlock_nvmeq(nvmeq);
936 }
904 return -EINTR; 937 return -EINTR;
905 } 938 }
906 939
@@ -1358,7 +1391,8 @@ static int nvme_wait_ready(struct nvme_dev *dev, u64 cap, bool enabled)
1358 return -EINTR; 1391 return -EINTR;
1359 if (time_after(jiffies, timeout)) { 1392 if (time_after(jiffies, timeout)) {
1360 dev_err(&dev->pci_dev->dev, 1393 dev_err(&dev->pci_dev->dev,
1361 "Device not ready; aborting initialisation\n"); 1394 "Device not ready; aborting %s\n", enabled ?
1395 "initialisation" : "reset");
1362 return -ENODEV; 1396 return -ENODEV;
1363 } 1397 }
1364 } 1398 }
@@ -1481,7 +1515,11 @@ struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
1481 goto put_pages; 1515 goto put_pages;
1482 } 1516 }
1483 1517
1518 err = -ENOMEM;
1484 iod = nvme_alloc_iod(count, length, GFP_KERNEL); 1519 iod = nvme_alloc_iod(count, length, GFP_KERNEL);
1520 if (!iod)
1521 goto put_pages;
1522
1485 sg = iod->sg; 1523 sg = iod->sg;
1486 sg_init_table(sg, count); 1524 sg_init_table(sg, count);
1487 for (i = 0; i < count; i++) { 1525 for (i = 0; i < count; i++) {
@@ -1494,7 +1532,6 @@ struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
1494 sg_mark_end(&sg[i - 1]); 1532 sg_mark_end(&sg[i - 1]);
1495 iod->nents = count; 1533 iod->nents = count;
1496 1534
1497 err = -ENOMEM;
1498 nents = dma_map_sg(&dev->pci_dev->dev, sg, count, 1535 nents = dma_map_sg(&dev->pci_dev->dev, sg, count,
1499 write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 1536 write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
1500 if (!nents) 1537 if (!nents)
@@ -1894,6 +1931,8 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid,
1894 blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift); 1931 blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
1895 if (dev->max_hw_sectors) 1932 if (dev->max_hw_sectors)
1896 blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors); 1933 blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors);
1934 if (dev->vwc & NVME_CTRL_VWC_PRESENT)
1935 blk_queue_flush(ns->queue, REQ_FLUSH | REQ_FUA);
1897 1936
1898 disk->major = nvme_major; 1937 disk->major = nvme_major;
1899 disk->first_minor = 0; 1938 disk->first_minor = 0;
@@ -2062,8 +2101,13 @@ static int set_queue_count(struct nvme_dev *dev, int count)
2062 2101
2063 status = nvme_set_features(dev, NVME_FEAT_NUM_QUEUES, q_count, 0, 2102 status = nvme_set_features(dev, NVME_FEAT_NUM_QUEUES, q_count, 0,
2064 &result); 2103 &result);
2065 if (status) 2104 if (status < 0)
2066 return status < 0 ? -EIO : -EBUSY; 2105 return status;
2106 if (status > 0) {
2107 dev_err(&dev->pci_dev->dev, "Could not set queue count (%d)\n",
2108 status);
2109 return -EBUSY;
2110 }
2067 return min(result & 0xffff, result >> 16) + 1; 2111 return min(result & 0xffff, result >> 16) + 1;
2068} 2112}
2069 2113
@@ -2072,14 +2116,25 @@ static size_t db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues)
2072 return 4096 + ((nr_io_queues + 1) * 8 * dev->db_stride); 2116 return 4096 + ((nr_io_queues + 1) * 8 * dev->db_stride);
2073} 2117}
2074 2118
2119static void nvme_cpu_workfn(struct work_struct *work)
2120{
2121 struct nvme_dev *dev = container_of(work, struct nvme_dev, cpu_work);
2122 if (dev->initialized)
2123 nvme_assign_io_queues(dev);
2124}
2125
2075static int nvme_cpu_notify(struct notifier_block *self, 2126static int nvme_cpu_notify(struct notifier_block *self,
2076 unsigned long action, void *hcpu) 2127 unsigned long action, void *hcpu)
2077{ 2128{
2078 struct nvme_dev *dev = container_of(self, struct nvme_dev, nb); 2129 struct nvme_dev *dev;
2130
2079 switch (action) { 2131 switch (action) {
2080 case CPU_ONLINE: 2132 case CPU_ONLINE:
2081 case CPU_DEAD: 2133 case CPU_DEAD:
2082 nvme_assign_io_queues(dev); 2134 spin_lock(&dev_list_lock);
2135 list_for_each_entry(dev, &dev_list, node)
2136 schedule_work(&dev->cpu_work);
2137 spin_unlock(&dev_list_lock);
2083 break; 2138 break;
2084 } 2139 }
2085 return NOTIFY_OK; 2140 return NOTIFY_OK;
@@ -2148,11 +2203,6 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
2148 nvme_free_queues(dev, nr_io_queues + 1); 2203 nvme_free_queues(dev, nr_io_queues + 1);
2149 nvme_assign_io_queues(dev); 2204 nvme_assign_io_queues(dev);
2150 2205
2151 dev->nb.notifier_call = &nvme_cpu_notify;
2152 result = register_hotcpu_notifier(&dev->nb);
2153 if (result)
2154 goto free_queues;
2155
2156 return 0; 2206 return 0;
2157 2207
2158 free_queues: 2208 free_queues:
@@ -2184,6 +2234,7 @@ static int nvme_dev_add(struct nvme_dev *dev)
2184 2234
2185 res = nvme_identify(dev, 0, 1, dma_addr); 2235 res = nvme_identify(dev, 0, 1, dma_addr);
2186 if (res) { 2236 if (res) {
2237 dev_err(&pdev->dev, "Identify Controller failed (%d)\n", res);
2187 res = -EIO; 2238 res = -EIO;
2188 goto out; 2239 goto out;
2189 } 2240 }
@@ -2192,6 +2243,7 @@ static int nvme_dev_add(struct nvme_dev *dev)
2192 nn = le32_to_cpup(&ctrl->nn); 2243 nn = le32_to_cpup(&ctrl->nn);
2193 dev->oncs = le16_to_cpup(&ctrl->oncs); 2244 dev->oncs = le16_to_cpup(&ctrl->oncs);
2194 dev->abort_limit = ctrl->acl + 1; 2245 dev->abort_limit = ctrl->acl + 1;
2246 dev->vwc = ctrl->vwc;
2195 memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn)); 2247 memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
2196 memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn)); 2248 memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
2197 memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr)); 2249 memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
@@ -2450,8 +2502,6 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
2450 int i; 2502 int i;
2451 2503
2452 dev->initialized = 0; 2504 dev->initialized = 0;
2453 unregister_hotcpu_notifier(&dev->nb);
2454
2455 nvme_dev_list_remove(dev); 2505 nvme_dev_list_remove(dev);
2456 2506
2457 if (!dev->bar || (dev->bar && readl(&dev->bar->csts) == -1)) { 2507 if (!dev->bar || (dev->bar && readl(&dev->bar->csts) == -1)) {
@@ -2722,6 +2772,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2722 INIT_LIST_HEAD(&dev->namespaces); 2772 INIT_LIST_HEAD(&dev->namespaces);
2723 dev->reset_workfn = nvme_reset_failed_dev; 2773 dev->reset_workfn = nvme_reset_failed_dev;
2724 INIT_WORK(&dev->reset_work, nvme_reset_workfn); 2774 INIT_WORK(&dev->reset_work, nvme_reset_workfn);
2775 INIT_WORK(&dev->cpu_work, nvme_cpu_workfn);
2725 dev->pci_dev = pdev; 2776 dev->pci_dev = pdev;
2726 pci_set_drvdata(pdev, dev); 2777 pci_set_drvdata(pdev, dev);
2727 result = nvme_set_instance(dev); 2778 result = nvme_set_instance(dev);
@@ -2801,6 +2852,7 @@ static void nvme_remove(struct pci_dev *pdev)
2801 2852
2802 pci_set_drvdata(pdev, NULL); 2853 pci_set_drvdata(pdev, NULL);
2803 flush_work(&dev->reset_work); 2854 flush_work(&dev->reset_work);
2855 flush_work(&dev->cpu_work);
2804 misc_deregister(&dev->miscdev); 2856 misc_deregister(&dev->miscdev);
2805 nvme_dev_remove(dev); 2857 nvme_dev_remove(dev);
2806 nvme_dev_shutdown(dev); 2858 nvme_dev_shutdown(dev);
@@ -2889,11 +2941,18 @@ static int __init nvme_init(void)
2889 else if (result > 0) 2941 else if (result > 0)
2890 nvme_major = result; 2942 nvme_major = result;
2891 2943
2892 result = pci_register_driver(&nvme_driver); 2944 nvme_nb.notifier_call = &nvme_cpu_notify;
2945 result = register_hotcpu_notifier(&nvme_nb);
2893 if (result) 2946 if (result)
2894 goto unregister_blkdev; 2947 goto unregister_blkdev;
2948
2949 result = pci_register_driver(&nvme_driver);
2950 if (result)
2951 goto unregister_hotcpu;
2895 return 0; 2952 return 0;
2896 2953
2954 unregister_hotcpu:
2955 unregister_hotcpu_notifier(&nvme_nb);
2897 unregister_blkdev: 2956 unregister_blkdev:
2898 unregister_blkdev(nvme_major, "nvme"); 2957 unregister_blkdev(nvme_major, "nvme");
2899 kill_workq: 2958 kill_workq:
@@ -2904,9 +2963,11 @@ static int __init nvme_init(void)
2904static void __exit nvme_exit(void) 2963static void __exit nvme_exit(void)
2905{ 2964{
2906 pci_unregister_driver(&nvme_driver); 2965 pci_unregister_driver(&nvme_driver);
2966 unregister_hotcpu_notifier(&nvme_nb);
2907 unregister_blkdev(nvme_major, "nvme"); 2967 unregister_blkdev(nvme_major, "nvme");
2908 destroy_workqueue(nvme_workq); 2968 destroy_workqueue(nvme_workq);
2909 BUG_ON(nvme_thread && !IS_ERR(nvme_thread)); 2969 BUG_ON(nvme_thread && !IS_ERR(nvme_thread));
2970 _nvme_check_size();
2910} 2971}
2911 2972
2912MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>"); 2973MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
diff --git a/drivers/block/nvme-scsi.c b/drivers/block/nvme-scsi.c
index 2c3f5be06da1..a4cd6d691c63 100644
--- a/drivers/block/nvme-scsi.c
+++ b/drivers/block/nvme-scsi.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * NVM Express device driver 2 * NVM Express device driver
3 * Copyright (c) 2011, Intel Corporation. 3 * Copyright (c) 2011-2014, Intel Corporation.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License, 6 * under the terms and conditions of the GNU General Public License,
@@ -10,10 +10,6 @@
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 */ 13 */
18 14
19/* 15/*
@@ -243,8 +239,6 @@ static int sg_version_num = 30534; /* 2 digits for each component */
243#define READ_CAP_16_RESP_SIZE 32 239#define READ_CAP_16_RESP_SIZE 32
244 240
245/* NVMe Namespace and Command Defines */ 241/* NVMe Namespace and Command Defines */
246#define NVME_GET_SMART_LOG_PAGE 0x02
247#define NVME_GET_FEAT_TEMP_THRESH 0x04
248#define BYTES_TO_DWORDS 4 242#define BYTES_TO_DWORDS 4
249#define NVME_MAX_FIRMWARE_SLOT 7 243#define NVME_MAX_FIRMWARE_SLOT 7
250 244
@@ -686,6 +680,7 @@ static int nvme_trans_standard_inquiry_page(struct nvme_ns *ns,
686 u8 resp_data_format = 0x02; 680 u8 resp_data_format = 0x02;
687 u8 protect; 681 u8 protect;
688 u8 cmdque = 0x01 << 1; 682 u8 cmdque = 0x01 << 1;
683 u8 fw_offset = sizeof(dev->firmware_rev);
689 684
690 mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), 685 mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
691 &dma_addr, GFP_KERNEL); 686 &dma_addr, GFP_KERNEL);
@@ -721,7 +716,11 @@ static int nvme_trans_standard_inquiry_page(struct nvme_ns *ns,
721 inq_response[7] = cmdque; /* wbus16=0 | sync=0 | vs=0 */ 716 inq_response[7] = cmdque; /* wbus16=0 | sync=0 | vs=0 */
722 strncpy(&inq_response[8], "NVMe ", 8); 717 strncpy(&inq_response[8], "NVMe ", 8);
723 strncpy(&inq_response[16], dev->model, 16); 718 strncpy(&inq_response[16], dev->model, 16);
724 strncpy(&inq_response[32], dev->firmware_rev, 4); 719
720 while (dev->firmware_rev[fw_offset - 1] == ' ' && fw_offset > 4)
721 fw_offset--;
722 fw_offset -= 4;
723 strncpy(&inq_response[32], dev->firmware_rev + fw_offset, 4);
725 724
726 xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH); 725 xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
727 res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len); 726 res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
@@ -1018,8 +1017,8 @@ static int nvme_trans_log_info_exceptions(struct nvme_ns *ns,
1018 c.common.opcode = nvme_admin_get_log_page; 1017 c.common.opcode = nvme_admin_get_log_page;
1019 c.common.nsid = cpu_to_le32(0xFFFFFFFF); 1018 c.common.nsid = cpu_to_le32(0xFFFFFFFF);
1020 c.common.prp1 = cpu_to_le64(dma_addr); 1019 c.common.prp1 = cpu_to_le64(dma_addr);
1021 c.common.cdw10[0] = cpu_to_le32(((sizeof(struct nvme_smart_log) / 1020 c.common.cdw10[0] = cpu_to_le32((((sizeof(struct nvme_smart_log) /
1022 BYTES_TO_DWORDS) << 16) | NVME_GET_SMART_LOG_PAGE); 1021 BYTES_TO_DWORDS) - 1) << 16) | NVME_LOG_SMART);
1023 res = nvme_submit_admin_cmd(dev, &c, NULL); 1022 res = nvme_submit_admin_cmd(dev, &c, NULL);
1024 if (res != NVME_SC_SUCCESS) { 1023 if (res != NVME_SC_SUCCESS) {
1025 temp_c = LOG_TEMP_UNKNOWN; 1024 temp_c = LOG_TEMP_UNKNOWN;
@@ -1086,8 +1085,8 @@ static int nvme_trans_log_temperature(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1086 c.common.opcode = nvme_admin_get_log_page; 1085 c.common.opcode = nvme_admin_get_log_page;
1087 c.common.nsid = cpu_to_le32(0xFFFFFFFF); 1086 c.common.nsid = cpu_to_le32(0xFFFFFFFF);
1088 c.common.prp1 = cpu_to_le64(dma_addr); 1087 c.common.prp1 = cpu_to_le64(dma_addr);
1089 c.common.cdw10[0] = cpu_to_le32(((sizeof(struct nvme_smart_log) / 1088 c.common.cdw10[0] = cpu_to_le32((((sizeof(struct nvme_smart_log) /
1090 BYTES_TO_DWORDS) << 16) | NVME_GET_SMART_LOG_PAGE); 1089 BYTES_TO_DWORDS) - 1) << 16) | NVME_LOG_SMART);
1091 res = nvme_submit_admin_cmd(dev, &c, NULL); 1090 res = nvme_submit_admin_cmd(dev, &c, NULL);
1092 if (res != NVME_SC_SUCCESS) { 1091 if (res != NVME_SC_SUCCESS) {
1093 temp_c_cur = LOG_TEMP_UNKNOWN; 1092 temp_c_cur = LOG_TEMP_UNKNOWN;
@@ -1477,7 +1476,7 @@ static int nvme_trans_power_state(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1477 goto out_dma; 1476 goto out_dma;
1478 } 1477 }
1479 id_ctrl = mem; 1478 id_ctrl = mem;
1480 lowest_pow_st = id_ctrl->npss - 1; 1479 lowest_pow_st = max(POWER_STATE_0, (int)(id_ctrl->npss - 1));
1481 1480
1482 switch (pc) { 1481 switch (pc) {
1483 case NVME_POWER_STATE_START_VALID: 1482 case NVME_POWER_STATE_START_VALID:
@@ -1494,20 +1493,19 @@ static int nvme_trans_power_state(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1494 break; 1493 break;
1495 case NVME_POWER_STATE_IDLE: 1494 case NVME_POWER_STATE_IDLE:
1496 /* Action unspecified if POWER CONDITION MODIFIER != [0,1,2] */ 1495 /* Action unspecified if POWER CONDITION MODIFIER != [0,1,2] */
1497 /* min of desired state and (lps-1) because lps is STOP */
1498 if (pcmod == 0x0) 1496 if (pcmod == 0x0)
1499 ps_desired = min(POWER_STATE_1, (lowest_pow_st - 1)); 1497 ps_desired = POWER_STATE_1;
1500 else if (pcmod == 0x1) 1498 else if (pcmod == 0x1)
1501 ps_desired = min(POWER_STATE_2, (lowest_pow_st - 1)); 1499 ps_desired = POWER_STATE_2;
1502 else if (pcmod == 0x2) 1500 else if (pcmod == 0x2)
1503 ps_desired = min(POWER_STATE_3, (lowest_pow_st - 1)); 1501 ps_desired = POWER_STATE_3;
1504 break; 1502 break;
1505 case NVME_POWER_STATE_STANDBY: 1503 case NVME_POWER_STATE_STANDBY:
1506 /* Action unspecified if POWER CONDITION MODIFIER != [0,1] */ 1504 /* Action unspecified if POWER CONDITION MODIFIER != [0,1] */
1507 if (pcmod == 0x0) 1505 if (pcmod == 0x0)
1508 ps_desired = max(0, (lowest_pow_st - 2)); 1506 ps_desired = max(POWER_STATE_0, (lowest_pow_st - 2));
1509 else if (pcmod == 0x1) 1507 else if (pcmod == 0x1)
1510 ps_desired = max(0, (lowest_pow_st - 1)); 1508 ps_desired = max(POWER_STATE_0, (lowest_pow_st - 1));
1511 break; 1509 break;
1512 case NVME_POWER_STATE_LU_CONTROL: 1510 case NVME_POWER_STATE_LU_CONTROL:
1513 default: 1511 default:
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 4c95b503b09e..bbeb404b3a07 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -541,7 +541,6 @@ static int rbd_open(struct block_device *bdev, fmode_t mode)
541 return -ENOENT; 541 return -ENOENT;
542 542
543 (void) get_device(&rbd_dev->dev); 543 (void) get_device(&rbd_dev->dev);
544 set_device_ro(bdev, rbd_dev->mapping.read_only);
545 544
546 return 0; 545 return 0;
547} 546}
@@ -559,10 +558,76 @@ static void rbd_release(struct gendisk *disk, fmode_t mode)
559 put_device(&rbd_dev->dev); 558 put_device(&rbd_dev->dev);
560} 559}
561 560
561static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg)
562{
563 int ret = 0;
564 int val;
565 bool ro;
566 bool ro_changed = false;
567
568 /* get_user() may sleep, so call it before taking rbd_dev->lock */
569 if (get_user(val, (int __user *)(arg)))
570 return -EFAULT;
571
572 ro = val ? true : false;
573 /* Snapshot doesn't allow to write*/
574 if (rbd_dev->spec->snap_id != CEPH_NOSNAP && !ro)
575 return -EROFS;
576
577 spin_lock_irq(&rbd_dev->lock);
578 /* prevent others open this device */
579 if (rbd_dev->open_count > 1) {
580 ret = -EBUSY;
581 goto out;
582 }
583
584 if (rbd_dev->mapping.read_only != ro) {
585 rbd_dev->mapping.read_only = ro;
586 ro_changed = true;
587 }
588
589out:
590 spin_unlock_irq(&rbd_dev->lock);
591 /* set_disk_ro() may sleep, so call it after releasing rbd_dev->lock */
592 if (ret == 0 && ro_changed)
593 set_disk_ro(rbd_dev->disk, ro ? 1 : 0);
594
595 return ret;
596}
597
598static int rbd_ioctl(struct block_device *bdev, fmode_t mode,
599 unsigned int cmd, unsigned long arg)
600{
601 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
602 int ret = 0;
603
604 switch (cmd) {
605 case BLKROSET:
606 ret = rbd_ioctl_set_ro(rbd_dev, arg);
607 break;
608 default:
609 ret = -ENOTTY;
610 }
611
612 return ret;
613}
614
615#ifdef CONFIG_COMPAT
616static int rbd_compat_ioctl(struct block_device *bdev, fmode_t mode,
617 unsigned int cmd, unsigned long arg)
618{
619 return rbd_ioctl(bdev, mode, cmd, arg);
620}
621#endif /* CONFIG_COMPAT */
622
562static const struct block_device_operations rbd_bd_ops = { 623static const struct block_device_operations rbd_bd_ops = {
563 .owner = THIS_MODULE, 624 .owner = THIS_MODULE,
564 .open = rbd_open, 625 .open = rbd_open,
565 .release = rbd_release, 626 .release = rbd_release,
627 .ioctl = rbd_ioctl,
628#ifdef CONFIG_COMPAT
629 .compat_ioctl = rbd_compat_ioctl,
630#endif
566}; 631};
567 632
568/* 633/*
@@ -1382,6 +1447,13 @@ static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1382 kref_put(&obj_request->kref, rbd_obj_request_destroy); 1447 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1383} 1448}
1384 1449
1450static void rbd_img_request_get(struct rbd_img_request *img_request)
1451{
1452 dout("%s: img %p (was %d)\n", __func__, img_request,
1453 atomic_read(&img_request->kref.refcount));
1454 kref_get(&img_request->kref);
1455}
1456
1385static bool img_request_child_test(struct rbd_img_request *img_request); 1457static bool img_request_child_test(struct rbd_img_request *img_request);
1386static void rbd_parent_request_destroy(struct kref *kref); 1458static void rbd_parent_request_destroy(struct kref *kref);
1387static void rbd_img_request_destroy(struct kref *kref); 1459static void rbd_img_request_destroy(struct kref *kref);
@@ -2142,6 +2214,7 @@ static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
2142 img_request->next_completion = which; 2214 img_request->next_completion = which;
2143out: 2215out:
2144 spin_unlock_irq(&img_request->completion_lock); 2216 spin_unlock_irq(&img_request->completion_lock);
2217 rbd_img_request_put(img_request);
2145 2218
2146 if (!more) 2219 if (!more)
2147 rbd_img_request_complete(img_request); 2220 rbd_img_request_complete(img_request);
@@ -2242,6 +2315,7 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request,
2242 goto out_unwind; 2315 goto out_unwind;
2243 obj_request->osd_req = osd_req; 2316 obj_request->osd_req = osd_req;
2244 obj_request->callback = rbd_img_obj_callback; 2317 obj_request->callback = rbd_img_obj_callback;
2318 rbd_img_request_get(img_request);
2245 2319
2246 if (write_request) { 2320 if (write_request) {
2247 osd_req_op_alloc_hint_init(osd_req, which, 2321 osd_req_op_alloc_hint_init(osd_req, which,
@@ -2872,56 +2946,55 @@ static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
2872} 2946}
2873 2947
2874/* 2948/*
2875 * Request sync osd watch/unwatch. The value of "start" determines 2949 * Initiate a watch request, synchronously.
2876 * whether a watch request is being initiated or torn down.
2877 */ 2950 */
2878static int __rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, bool start) 2951static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev)
2879{ 2952{
2880 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 2953 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2881 struct rbd_obj_request *obj_request; 2954 struct rbd_obj_request *obj_request;
2882 int ret; 2955 int ret;
2883 2956
2884 rbd_assert(start ^ !!rbd_dev->watch_event); 2957 rbd_assert(!rbd_dev->watch_event);
2885 rbd_assert(start ^ !!rbd_dev->watch_request); 2958 rbd_assert(!rbd_dev->watch_request);
2886 2959
2887 if (start) { 2960 ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
2888 ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev, 2961 &rbd_dev->watch_event);
2889 &rbd_dev->watch_event); 2962 if (ret < 0)
2890 if (ret < 0) 2963 return ret;
2891 return ret; 2964
2892 rbd_assert(rbd_dev->watch_event != NULL); 2965 rbd_assert(rbd_dev->watch_event);
2893 }
2894 2966
2895 ret = -ENOMEM;
2896 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0, 2967 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
2897 OBJ_REQUEST_NODATA); 2968 OBJ_REQUEST_NODATA);
2898 if (!obj_request) 2969 if (!obj_request) {
2970 ret = -ENOMEM;
2899 goto out_cancel; 2971 goto out_cancel;
2972 }
2900 2973
2901 obj_request->osd_req = rbd_osd_req_create(rbd_dev, true, 1, 2974 obj_request->osd_req = rbd_osd_req_create(rbd_dev, true, 1,
2902 obj_request); 2975 obj_request);
2903 if (!obj_request->osd_req) 2976 if (!obj_request->osd_req) {
2904 goto out_cancel; 2977 ret = -ENOMEM;
2978 goto out_put;
2979 }
2905 2980
2906 if (start) 2981 ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
2907 ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
2908 else
2909 ceph_osdc_unregister_linger_request(osdc,
2910 rbd_dev->watch_request->osd_req);
2911 2982
2912 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH, 2983 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
2913 rbd_dev->watch_event->cookie, 0, start ? 1 : 0); 2984 rbd_dev->watch_event->cookie, 0, 1);
2914 rbd_osd_req_format_write(obj_request); 2985 rbd_osd_req_format_write(obj_request);
2915 2986
2916 ret = rbd_obj_request_submit(osdc, obj_request); 2987 ret = rbd_obj_request_submit(osdc, obj_request);
2917 if (ret) 2988 if (ret)
2918 goto out_cancel; 2989 goto out_linger;
2990
2919 ret = rbd_obj_request_wait(obj_request); 2991 ret = rbd_obj_request_wait(obj_request);
2920 if (ret) 2992 if (ret)
2921 goto out_cancel; 2993 goto out_linger;
2994
2922 ret = obj_request->result; 2995 ret = obj_request->result;
2923 if (ret) 2996 if (ret)
2924 goto out_cancel; 2997 goto out_linger;
2925 2998
2926 /* 2999 /*
2927 * A watch request is set to linger, so the underlying osd 3000 * A watch request is set to linger, so the underlying osd
@@ -2931,36 +3004,84 @@ static int __rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, bool start)
2931 * it. We'll drop that reference (below) after we've 3004 * it. We'll drop that reference (below) after we've
2932 * unregistered it. 3005 * unregistered it.
2933 */ 3006 */
2934 if (start) { 3007 rbd_dev->watch_request = obj_request;
2935 rbd_dev->watch_request = obj_request;
2936 3008
2937 return 0; 3009 return 0;
3010
3011out_linger:
3012 ceph_osdc_unregister_linger_request(osdc, obj_request->osd_req);
3013out_put:
3014 rbd_obj_request_put(obj_request);
3015out_cancel:
3016 ceph_osdc_cancel_event(rbd_dev->watch_event);
3017 rbd_dev->watch_event = NULL;
3018
3019 return ret;
3020}
3021
3022/*
3023 * Tear down a watch request, synchronously.
3024 */
3025static int __rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
3026{
3027 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3028 struct rbd_obj_request *obj_request;
3029 int ret;
3030
3031 rbd_assert(rbd_dev->watch_event);
3032 rbd_assert(rbd_dev->watch_request);
3033
3034 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
3035 OBJ_REQUEST_NODATA);
3036 if (!obj_request) {
3037 ret = -ENOMEM;
3038 goto out_cancel;
3039 }
3040
3041 obj_request->osd_req = rbd_osd_req_create(rbd_dev, true, 1,
3042 obj_request);
3043 if (!obj_request->osd_req) {
3044 ret = -ENOMEM;
3045 goto out_put;
2938 } 3046 }
2939 3047
3048 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
3049 rbd_dev->watch_event->cookie, 0, 0);
3050 rbd_osd_req_format_write(obj_request);
3051
3052 ret = rbd_obj_request_submit(osdc, obj_request);
3053 if (ret)
3054 goto out_put;
3055
3056 ret = rbd_obj_request_wait(obj_request);
3057 if (ret)
3058 goto out_put;
3059
3060 ret = obj_request->result;
3061 if (ret)
3062 goto out_put;
3063
2940 /* We have successfully torn down the watch request */ 3064 /* We have successfully torn down the watch request */
2941 3065
3066 ceph_osdc_unregister_linger_request(osdc,
3067 rbd_dev->watch_request->osd_req);
2942 rbd_obj_request_put(rbd_dev->watch_request); 3068 rbd_obj_request_put(rbd_dev->watch_request);
2943 rbd_dev->watch_request = NULL; 3069 rbd_dev->watch_request = NULL;
3070
3071out_put:
3072 rbd_obj_request_put(obj_request);
2944out_cancel: 3073out_cancel:
2945 /* Cancel the event if we're tearing down, or on error */
2946 ceph_osdc_cancel_event(rbd_dev->watch_event); 3074 ceph_osdc_cancel_event(rbd_dev->watch_event);
2947 rbd_dev->watch_event = NULL; 3075 rbd_dev->watch_event = NULL;
2948 if (obj_request)
2949 rbd_obj_request_put(obj_request);
2950 3076
2951 return ret; 3077 return ret;
2952} 3078}
2953 3079
2954static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev)
2955{
2956 return __rbd_dev_header_watch_sync(rbd_dev, true);
2957}
2958
2959static void rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev) 3080static void rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
2960{ 3081{
2961 int ret; 3082 int ret;
2962 3083
2963 ret = __rbd_dev_header_watch_sync(rbd_dev, false); 3084 ret = __rbd_dev_header_unwatch_sync(rbd_dev);
2964 if (ret) { 3085 if (ret) {
2965 rbd_warn(rbd_dev, "unable to tear down watch request: %d\n", 3086 rbd_warn(rbd_dev, "unable to tear down watch request: %d\n",
2966 ret); 3087 ret);
@@ -3058,7 +3179,6 @@ static void rbd_request_fn(struct request_queue *q)
3058 __releases(q->queue_lock) __acquires(q->queue_lock) 3179 __releases(q->queue_lock) __acquires(q->queue_lock)
3059{ 3180{
3060 struct rbd_device *rbd_dev = q->queuedata; 3181 struct rbd_device *rbd_dev = q->queuedata;
3061 bool read_only = rbd_dev->mapping.read_only;
3062 struct request *rq; 3182 struct request *rq;
3063 int result; 3183 int result;
3064 3184
@@ -3094,7 +3214,7 @@ static void rbd_request_fn(struct request_queue *q)
3094 3214
3095 if (write_request) { 3215 if (write_request) {
3096 result = -EROFS; 3216 result = -EROFS;
3097 if (read_only) 3217 if (rbd_dev->mapping.read_only)
3098 goto end_request; 3218 goto end_request;
3099 rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP); 3219 rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
3100 } 3220 }
@@ -4683,6 +4803,38 @@ out_err:
4683} 4803}
4684 4804
4685/* 4805/*
4806 * Return pool id (>= 0) or a negative error code.
4807 */
4808static int rbd_add_get_pool_id(struct rbd_client *rbdc, const char *pool_name)
4809{
4810 u64 newest_epoch;
4811 unsigned long timeout = rbdc->client->options->mount_timeout * HZ;
4812 int tries = 0;
4813 int ret;
4814
4815again:
4816 ret = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, pool_name);
4817 if (ret == -ENOENT && tries++ < 1) {
4818 ret = ceph_monc_do_get_version(&rbdc->client->monc, "osdmap",
4819 &newest_epoch);
4820 if (ret < 0)
4821 return ret;
4822
4823 if (rbdc->client->osdc.osdmap->epoch < newest_epoch) {
4824 ceph_monc_request_next_osdmap(&rbdc->client->monc);
4825 (void) ceph_monc_wait_osdmap(&rbdc->client->monc,
4826 newest_epoch, timeout);
4827 goto again;
4828 } else {
4829 /* the osdmap we have is new enough */
4830 return -ENOENT;
4831 }
4832 }
4833
4834 return ret;
4835}
4836
4837/*
4686 * An rbd format 2 image has a unique identifier, distinct from the 4838 * An rbd format 2 image has a unique identifier, distinct from the
4687 * name given to it by the user. Internally, that identifier is 4839 * name given to it by the user. Internally, that identifier is
4688 * what's used to specify the names of objects related to the image. 4840 * what's used to specify the names of objects related to the image.
@@ -4752,7 +4904,7 @@ static int rbd_dev_image_id(struct rbd_device *rbd_dev)
4752 4904
4753 image_id = ceph_extract_encoded_string(&p, p + ret, 4905 image_id = ceph_extract_encoded_string(&p, p + ret,
4754 NULL, GFP_NOIO); 4906 NULL, GFP_NOIO);
4755 ret = IS_ERR(image_id) ? PTR_ERR(image_id) : 0; 4907 ret = PTR_ERR_OR_ZERO(image_id);
4756 if (!ret) 4908 if (!ret)
4757 rbd_dev->image_format = 2; 4909 rbd_dev->image_format = 2;
4758 } else { 4910 } else {
@@ -4907,6 +5059,7 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
4907 if (ret) 5059 if (ret)
4908 goto err_out_disk; 5060 goto err_out_disk;
4909 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE); 5061 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
5062 set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only);
4910 5063
4911 ret = rbd_bus_add_dev(rbd_dev); 5064 ret = rbd_bus_add_dev(rbd_dev);
4912 if (ret) 5065 if (ret)
@@ -5053,7 +5206,6 @@ static ssize_t do_rbd_add(struct bus_type *bus,
5053 struct rbd_options *rbd_opts = NULL; 5206 struct rbd_options *rbd_opts = NULL;
5054 struct rbd_spec *spec = NULL; 5207 struct rbd_spec *spec = NULL;
5055 struct rbd_client *rbdc; 5208 struct rbd_client *rbdc;
5056 struct ceph_osd_client *osdc;
5057 bool read_only; 5209 bool read_only;
5058 int rc = -ENOMEM; 5210 int rc = -ENOMEM;
5059 5211
@@ -5075,8 +5227,7 @@ static ssize_t do_rbd_add(struct bus_type *bus,
5075 } 5227 }
5076 5228
5077 /* pick the pool */ 5229 /* pick the pool */
5078 osdc = &rbdc->client->osdc; 5230 rc = rbd_add_get_pool_id(rbdc, spec->pool_name);
5079 rc = ceph_pg_poolid_by_name(osdc->osdmap, spec->pool_name);
5080 if (rc < 0) 5231 if (rc < 0)
5081 goto err_out_client; 5232 goto err_out_client;
5082 spec->pool_id = (u64)rc; 5233 spec->pool_id = (u64)rc;
@@ -5387,6 +5538,7 @@ err_out_slab:
5387 5538
5388static void __exit rbd_exit(void) 5539static void __exit rbd_exit(void)
5389{ 5540{
5541 ida_destroy(&rbd_dev_id_ida);
5390 rbd_sysfs_cleanup(); 5542 rbd_sysfs_cleanup();
5391 if (single_major) 5543 if (single_major)
5392 unregister_blkdev(rbd_major, RBD_DRV_NAME); 5544 unregister_blkdev(rbd_major, RBD_DRV_NAME);
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index a118ec1650fa..1f37d9870e7a 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -45,7 +45,7 @@ config OMAP_INTERCONNECT
45 45
46config ARM_CCI 46config ARM_CCI
47 bool "ARM CCI driver support" 47 bool "ARM CCI driver support"
48 depends on ARM 48 depends on ARM && OF && CPU_V7
49 help 49 help
50 Driver supporting the CCI cache coherent interconnect for ARM 50 Driver supporting the CCI cache coherent interconnect for ARM
51 platforms. 51 platforms.
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 4ad71ef2cd59..0a7ac0a7b252 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -980,7 +980,6 @@ static void push_to_pool(struct work_struct *work)
980static size_t account(struct entropy_store *r, size_t nbytes, int min, 980static size_t account(struct entropy_store *r, size_t nbytes, int min,
981 int reserved) 981 int reserved)
982{ 982{
983 int have_bytes;
984 int entropy_count, orig; 983 int entropy_count, orig;
985 size_t ibytes; 984 size_t ibytes;
986 985
@@ -989,17 +988,19 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
989 /* Can we pull enough? */ 988 /* Can we pull enough? */
990retry: 989retry:
991 entropy_count = orig = ACCESS_ONCE(r->entropy_count); 990 entropy_count = orig = ACCESS_ONCE(r->entropy_count);
992 have_bytes = entropy_count >> (ENTROPY_SHIFT + 3);
993 ibytes = nbytes; 991 ibytes = nbytes;
994 /* If limited, never pull more than available */ 992 /* If limited, never pull more than available */
995 if (r->limit) 993 if (r->limit) {
996 ibytes = min_t(size_t, ibytes, have_bytes - reserved); 994 int have_bytes = entropy_count >> (ENTROPY_SHIFT + 3);
995
996 if ((have_bytes -= reserved) < 0)
997 have_bytes = 0;
998 ibytes = min_t(size_t, ibytes, have_bytes);
999 }
997 if (ibytes < min) 1000 if (ibytes < min)
998 ibytes = 0; 1001 ibytes = 0;
999 if (have_bytes >= ibytes + reserved) 1002 if ((entropy_count -= ibytes << (ENTROPY_SHIFT + 3)) < 0)
1000 entropy_count -= ibytes << (ENTROPY_SHIFT + 3); 1003 entropy_count = 0;
1001 else
1002 entropy_count = reserved << (ENTROPY_SHIFT + 3);
1003 1004
1004 if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig) 1005 if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
1005 goto retry; 1006 goto retry;
diff --git a/drivers/clk/sunxi/Makefile b/drivers/clk/sunxi/Makefile
index b5bac917612c..762fd64dbd1f 100644
--- a/drivers/clk/sunxi/Makefile
+++ b/drivers/clk/sunxi/Makefile
@@ -3,3 +3,7 @@
3# 3#
4 4
5obj-y += clk-sunxi.o clk-factors.o 5obj-y += clk-sunxi.o clk-factors.o
6obj-y += clk-a10-hosc.o
7obj-y += clk-a20-gmac.o
8
9obj-$(CONFIG_MFD_SUN6I_PRCM) += clk-sun6i-ar100.o clk-sun6i-apb0.o clk-sun6i-apb0-gates.o
diff --git a/drivers/clk/sunxi/clk-a10-hosc.c b/drivers/clk/sunxi/clk-a10-hosc.c
new file mode 100644
index 000000000000..0481d5d673d6
--- /dev/null
+++ b/drivers/clk/sunxi/clk-a10-hosc.c
@@ -0,0 +1,73 @@
1/*
2 * Copyright 2013 Emilio López
3 *
4 * Emilio López <emilio@elopez.com.ar>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#include <linux/clk-provider.h>
18#include <linux/clkdev.h>
19#include <linux/of.h>
20#include <linux/of_address.h>
21
22#define SUNXI_OSC24M_GATE 0
23
24static DEFINE_SPINLOCK(hosc_lock);
25
26static void __init sun4i_osc_clk_setup(struct device_node *node)
27{
28 struct clk *clk;
29 struct clk_fixed_rate *fixed;
30 struct clk_gate *gate;
31 const char *clk_name = node->name;
32 u32 rate;
33
34 if (of_property_read_u32(node, "clock-frequency", &rate))
35 return;
36
37 /* allocate fixed-rate and gate clock structs */
38 fixed = kzalloc(sizeof(struct clk_fixed_rate), GFP_KERNEL);
39 if (!fixed)
40 return;
41 gate = kzalloc(sizeof(struct clk_gate), GFP_KERNEL);
42 if (!gate)
43 goto err_free_fixed;
44
45 of_property_read_string(node, "clock-output-names", &clk_name);
46
47 /* set up gate and fixed rate properties */
48 gate->reg = of_iomap(node, 0);
49 gate->bit_idx = SUNXI_OSC24M_GATE;
50 gate->lock = &hosc_lock;
51 fixed->fixed_rate = rate;
52
53 clk = clk_register_composite(NULL, clk_name,
54 NULL, 0,
55 NULL, NULL,
56 &fixed->hw, &clk_fixed_rate_ops,
57 &gate->hw, &clk_gate_ops,
58 CLK_IS_ROOT);
59
60 if (IS_ERR(clk))
61 goto err_free_gate;
62
63 of_clk_add_provider(node, of_clk_src_simple_get, clk);
64 clk_register_clkdev(clk, clk_name, NULL);
65
66 return;
67
68err_free_gate:
69 kfree(gate);
70err_free_fixed:
71 kfree(fixed);
72}
73CLK_OF_DECLARE(sun4i_osc, "allwinner,sun4i-a10-osc-clk", sun4i_osc_clk_setup);
diff --git a/drivers/clk/sunxi/clk-a20-gmac.c b/drivers/clk/sunxi/clk-a20-gmac.c
new file mode 100644
index 000000000000..633ddc4389ef
--- /dev/null
+++ b/drivers/clk/sunxi/clk-a20-gmac.c
@@ -0,0 +1,119 @@
1/*
2 * Copyright 2013 Emilio López
3 * Emilio López <emilio@elopez.com.ar>
4 *
5 * Copyright 2013 Chen-Yu Tsai
6 * Chen-Yu Tsai <wens@csie.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 */
18
19#include <linux/clk-provider.h>
20#include <linux/clkdev.h>
21#include <linux/of.h>
22#include <linux/of_address.h>
23#include <linux/slab.h>
24
25static DEFINE_SPINLOCK(gmac_lock);
26
27/**
28 * sun7i_a20_gmac_clk_setup - Setup function for A20/A31 GMAC clock module
29 *
30 * This clock looks something like this
31 * ________________________
32 * MII TX clock from PHY >-----|___________ _________|----> to GMAC core
33 * GMAC Int. RGMII TX clk >----|___________\__/__gate---|----> to PHY
34 * Ext. 125MHz RGMII TX clk >--|__divider__/ |
35 * |________________________|
36 *
37 * The external 125 MHz reference is optional, i.e. GMAC can use its
38 * internal TX clock just fine. The A31 GMAC clock module does not have
39 * the divider controls for the external reference.
40 *
41 * To keep it simple, let the GMAC use either the MII TX clock for MII mode,
42 * and its internal TX clock for GMII and RGMII modes. The GMAC driver should
43 * select the appropriate source and gate/ungate the output to the PHY.
44 *
45 * Only the GMAC should use this clock. Altering the clock so that it doesn't
46 * match the GMAC's operation parameters will result in the GMAC not being
47 * able to send traffic out. The GMAC driver should set the clock rate and
48 * enable/disable this clock to configure the required state. The clock
49 * driver then responds by auto-reparenting the clock.
50 */
51
52#define SUN7I_A20_GMAC_GPIT 2
53#define SUN7I_A20_GMAC_MASK 0x3
54#define SUN7I_A20_GMAC_PARENTS 2
55
56static void __init sun7i_a20_gmac_clk_setup(struct device_node *node)
57{
58 struct clk *clk;
59 struct clk_mux *mux;
60 struct clk_gate *gate;
61 const char *clk_name = node->name;
62 const char *parents[SUN7I_A20_GMAC_PARENTS];
63 void *reg;
64
65 if (of_property_read_string(node, "clock-output-names", &clk_name))
66 return;
67
68 /* allocate mux and gate clock structs */
69 mux = kzalloc(sizeof(struct clk_mux), GFP_KERNEL);
70 if (!mux)
71 return;
72
73 gate = kzalloc(sizeof(struct clk_gate), GFP_KERNEL);
74 if (!gate)
75 goto free_mux;
76
77 /* gmac clock requires exactly 2 parents */
78 parents[0] = of_clk_get_parent_name(node, 0);
79 parents[1] = of_clk_get_parent_name(node, 1);
80 if (!parents[0] || !parents[1])
81 goto free_gate;
82
83 reg = of_iomap(node, 0);
84 if (!reg)
85 goto free_gate;
86
87 /* set up gate and fixed rate properties */
88 gate->reg = reg;
89 gate->bit_idx = SUN7I_A20_GMAC_GPIT;
90 gate->lock = &gmac_lock;
91 mux->reg = reg;
92 mux->mask = SUN7I_A20_GMAC_MASK;
93 mux->flags = CLK_MUX_INDEX_BIT;
94 mux->lock = &gmac_lock;
95
96 clk = clk_register_composite(NULL, clk_name,
97 parents, SUN7I_A20_GMAC_PARENTS,
98 &mux->hw, &clk_mux_ops,
99 NULL, NULL,
100 &gate->hw, &clk_gate_ops,
101 0);
102
103 if (IS_ERR(clk))
104 goto iounmap_reg;
105
106 of_clk_add_provider(node, of_clk_src_simple_get, clk);
107 clk_register_clkdev(clk, clk_name, NULL);
108
109 return;
110
111iounmap_reg:
112 iounmap(reg);
113free_gate:
114 kfree(gate);
115free_mux:
116 kfree(mux);
117}
118CLK_OF_DECLARE(sun7i_a20_gmac, "allwinner,sun7i-a20-gmac-clk",
119 sun7i_a20_gmac_clk_setup);
diff --git a/drivers/clk/sunxi/clk-sun6i-apb0-gates.c b/drivers/clk/sunxi/clk-sun6i-apb0-gates.c
new file mode 100644
index 000000000000..44cd27c5c401
--- /dev/null
+++ b/drivers/clk/sunxi/clk-sun6i-apb0-gates.c
@@ -0,0 +1,99 @@
1/*
2 * Copyright (C) 2014 Free Electrons
3 *
4 * License Terms: GNU General Public License v2
5 * Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
6 *
7 * Allwinner A31 APB0 clock gates driver
8 *
9 */
10
11#include <linux/clk-provider.h>
12#include <linux/module.h>
13#include <linux/of.h>
14#include <linux/platform_device.h>
15
16#define SUN6I_APB0_GATES_MAX_SIZE 32
17
18static int sun6i_a31_apb0_gates_clk_probe(struct platform_device *pdev)
19{
20 struct device_node *np = pdev->dev.of_node;
21 struct clk_onecell_data *clk_data;
22 const char *clk_parent;
23 const char *clk_name;
24 struct resource *r;
25 void __iomem *reg;
26 int gate_id;
27 int ngates;
28 int i;
29
30 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
31 reg = devm_ioremap_resource(&pdev->dev, r);
32 if (!reg)
33 return PTR_ERR(reg);
34
35 clk_parent = of_clk_get_parent_name(np, 0);
36 if (!clk_parent)
37 return -EINVAL;
38
39 ngates = of_property_count_strings(np, "clock-output-names");
40 if (ngates < 0)
41 return ngates;
42
43 if (!ngates || ngates > SUN6I_APB0_GATES_MAX_SIZE)
44 return -EINVAL;
45
46 clk_data = devm_kzalloc(&pdev->dev, sizeof(struct clk_onecell_data),
47 GFP_KERNEL);
48 if (!clk_data)
49 return -ENOMEM;
50
51 clk_data->clks = devm_kzalloc(&pdev->dev,
52 SUN6I_APB0_GATES_MAX_SIZE *
53 sizeof(struct clk *),
54 GFP_KERNEL);
55 if (!clk_data->clks)
56 return -ENOMEM;
57
58 for (i = 0; i < ngates; i++) {
59 of_property_read_string_index(np, "clock-output-names",
60 i, &clk_name);
61
62 gate_id = i;
63 of_property_read_u32_index(np, "clock-indices", i, &gate_id);
64
65 WARN_ON(gate_id >= SUN6I_APB0_GATES_MAX_SIZE);
66 if (gate_id >= SUN6I_APB0_GATES_MAX_SIZE)
67 continue;
68
69 clk_data->clks[gate_id] = clk_register_gate(&pdev->dev,
70 clk_name,
71 clk_parent, 0,
72 reg, gate_id,
73 0, NULL);
74 WARN_ON(IS_ERR(clk_data->clks[gate_id]));
75 }
76
77 clk_data->clk_num = ngates;
78
79 return of_clk_add_provider(np, of_clk_src_onecell_get, clk_data);
80}
81
82const struct of_device_id sun6i_a31_apb0_gates_clk_dt_ids[] = {
83 { .compatible = "allwinner,sun6i-a31-apb0-gates-clk" },
84 { /* sentinel */ }
85};
86
87static struct platform_driver sun6i_a31_apb0_gates_clk_driver = {
88 .driver = {
89 .name = "sun6i-a31-apb0-gates-clk",
90 .owner = THIS_MODULE,
91 .of_match_table = sun6i_a31_apb0_gates_clk_dt_ids,
92 },
93 .probe = sun6i_a31_apb0_gates_clk_probe,
94};
95module_platform_driver(sun6i_a31_apb0_gates_clk_driver);
96
97MODULE_AUTHOR("Boris BREZILLON <boris.brezillon@free-electrons.com>");
98MODULE_DESCRIPTION("Allwinner A31 APB0 gate clocks driver");
99MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/sunxi/clk-sun6i-apb0.c b/drivers/clk/sunxi/clk-sun6i-apb0.c
new file mode 100644
index 000000000000..11f17c34c2ae
--- /dev/null
+++ b/drivers/clk/sunxi/clk-sun6i-apb0.c
@@ -0,0 +1,77 @@
1/*
2 * Copyright (C) 2014 Free Electrons
3 *
4 * License Terms: GNU General Public License v2
5 * Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
6 *
7 * Allwinner A31 APB0 clock driver
8 *
9 */
10
11#include <linux/clk-provider.h>
12#include <linux/module.h>
13#include <linux/of.h>
14#include <linux/platform_device.h>
15
16/*
17 * The APB0 clk has a configurable divisor.
18 *
19 * We must use a clk_div_table and not a regular power of 2
20 * divisor here, because the first 2 values divide the clock
21 * by 2.
22 */
23static const struct clk_div_table sun6i_a31_apb0_divs[] = {
24 { .val = 0, .div = 2, },
25 { .val = 1, .div = 2, },
26 { .val = 2, .div = 4, },
27 { .val = 3, .div = 8, },
28 { /* sentinel */ },
29};
30
31static int sun6i_a31_apb0_clk_probe(struct platform_device *pdev)
32{
33 struct device_node *np = pdev->dev.of_node;
34 const char *clk_name = np->name;
35 const char *clk_parent;
36 struct resource *r;
37 void __iomem *reg;
38 struct clk *clk;
39
40 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
41 reg = devm_ioremap_resource(&pdev->dev, r);
42 if (IS_ERR(reg))
43 return PTR_ERR(reg);
44
45 clk_parent = of_clk_get_parent_name(np, 0);
46 if (!clk_parent)
47 return -EINVAL;
48
49 of_property_read_string(np, "clock-output-names", &clk_name);
50
51 clk = clk_register_divider_table(&pdev->dev, clk_name, clk_parent,
52 0, reg, 0, 2, 0, sun6i_a31_apb0_divs,
53 NULL);
54 if (IS_ERR(clk))
55 return PTR_ERR(clk);
56
57 return of_clk_add_provider(np, of_clk_src_simple_get, clk);
58}
59
60const struct of_device_id sun6i_a31_apb0_clk_dt_ids[] = {
61 { .compatible = "allwinner,sun6i-a31-apb0-clk" },
62 { /* sentinel */ }
63};
64
65static struct platform_driver sun6i_a31_apb0_clk_driver = {
66 .driver = {
67 .name = "sun6i-a31-apb0-clk",
68 .owner = THIS_MODULE,
69 .of_match_table = sun6i_a31_apb0_clk_dt_ids,
70 },
71 .probe = sun6i_a31_apb0_clk_probe,
72};
73module_platform_driver(sun6i_a31_apb0_clk_driver);
74
75MODULE_AUTHOR("Boris BREZILLON <boris.brezillon@free-electrons.com>");
76MODULE_DESCRIPTION("Allwinner A31 APB0 clock Driver");
77MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/sunxi/clk-sun6i-ar100.c b/drivers/clk/sunxi/clk-sun6i-ar100.c
new file mode 100644
index 000000000000..f73cc051f0dd
--- /dev/null
+++ b/drivers/clk/sunxi/clk-sun6i-ar100.c
@@ -0,0 +1,233 @@
1/*
2 * Copyright (C) 2014 Free Electrons
3 *
4 * License Terms: GNU General Public License v2
5 * Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
6 *
7 * Allwinner A31 AR100 clock driver
8 *
9 */
10
11#include <linux/clk-provider.h>
12#include <linux/module.h>
13#include <linux/of.h>
14#include <linux/platform_device.h>
15
16#define SUN6I_AR100_MAX_PARENTS 4
17#define SUN6I_AR100_SHIFT_MASK 0x3
18#define SUN6I_AR100_SHIFT_MAX SUN6I_AR100_SHIFT_MASK
19#define SUN6I_AR100_SHIFT_SHIFT 4
20#define SUN6I_AR100_DIV_MASK 0x1f
21#define SUN6I_AR100_DIV_MAX (SUN6I_AR100_DIV_MASK + 1)
22#define SUN6I_AR100_DIV_SHIFT 8
23#define SUN6I_AR100_MUX_MASK 0x3
24#define SUN6I_AR100_MUX_SHIFT 16
25
26struct ar100_clk {
27 struct clk_hw hw;
28 void __iomem *reg;
29};
30
31static inline struct ar100_clk *to_ar100_clk(struct clk_hw *hw)
32{
33 return container_of(hw, struct ar100_clk, hw);
34}
35
36static unsigned long ar100_recalc_rate(struct clk_hw *hw,
37 unsigned long parent_rate)
38{
39 struct ar100_clk *clk = to_ar100_clk(hw);
40 u32 val = readl(clk->reg);
41 int shift = (val >> SUN6I_AR100_SHIFT_SHIFT) & SUN6I_AR100_SHIFT_MASK;
42 int div = (val >> SUN6I_AR100_DIV_SHIFT) & SUN6I_AR100_DIV_MASK;
43
44 return (parent_rate >> shift) / (div + 1);
45}
46
47static long ar100_determine_rate(struct clk_hw *hw, unsigned long rate,
48 unsigned long *best_parent_rate,
49 struct clk **best_parent_clk)
50{
51 int nparents = __clk_get_num_parents(hw->clk);
52 long best_rate = -EINVAL;
53 int i;
54
55 *best_parent_clk = NULL;
56
57 for (i = 0; i < nparents; i++) {
58 unsigned long parent_rate;
59 unsigned long tmp_rate;
60 struct clk *parent;
61 unsigned long div;
62 int shift;
63
64 parent = clk_get_parent_by_index(hw->clk, i);
65 parent_rate = __clk_get_rate(parent);
66 div = DIV_ROUND_UP(parent_rate, rate);
67
68 /*
69 * The AR100 clk contains 2 divisors:
70 * - one power of 2 divisor
71 * - one regular divisor
72 *
73 * First check if we can safely shift (or divide by a power
74 * of 2) without losing precision on the requested rate.
75 */
76 shift = ffs(div) - 1;
77 if (shift > SUN6I_AR100_SHIFT_MAX)
78 shift = SUN6I_AR100_SHIFT_MAX;
79
80 div >>= shift;
81
82 /*
83 * Then if the divisor is still bigger than what the HW
84 * actually supports, use a bigger shift (or power of 2
85 * divider) value and accept to lose some precision.
86 */
87 while (div > SUN6I_AR100_DIV_MAX) {
88 shift++;
89 div >>= 1;
90 if (shift > SUN6I_AR100_SHIFT_MAX)
91 break;
92 }
93
94 /*
95 * If the shift value (or power of 2 divider) is bigger
96 * than what the HW actually support, skip this parent.
97 */
98 if (shift > SUN6I_AR100_SHIFT_MAX)
99 continue;
100
101 tmp_rate = (parent_rate >> shift) / div;
102 if (!*best_parent_clk || tmp_rate > best_rate) {
103 *best_parent_clk = parent;
104 *best_parent_rate = parent_rate;
105 best_rate = tmp_rate;
106 }
107 }
108
109 return best_rate;
110}
111
112static int ar100_set_parent(struct clk_hw *hw, u8 index)
113{
114 struct ar100_clk *clk = to_ar100_clk(hw);
115 u32 val = readl(clk->reg);
116
117 if (index >= SUN6I_AR100_MAX_PARENTS)
118 return -EINVAL;
119
120 val &= ~(SUN6I_AR100_MUX_MASK << SUN6I_AR100_MUX_SHIFT);
121 val |= (index << SUN6I_AR100_MUX_SHIFT);
122 writel(val, clk->reg);
123
124 return 0;
125}
126
127static u8 ar100_get_parent(struct clk_hw *hw)
128{
129 struct ar100_clk *clk = to_ar100_clk(hw);
130 return (readl(clk->reg) >> SUN6I_AR100_MUX_SHIFT) &
131 SUN6I_AR100_MUX_MASK;
132}
133
134static int ar100_set_rate(struct clk_hw *hw, unsigned long rate,
135 unsigned long parent_rate)
136{
137 unsigned long div = parent_rate / rate;
138 struct ar100_clk *clk = to_ar100_clk(hw);
139 u32 val = readl(clk->reg);
140 int shift;
141
142 if (parent_rate % rate)
143 return -EINVAL;
144
145 shift = ffs(div) - 1;
146 if (shift > SUN6I_AR100_SHIFT_MAX)
147 shift = SUN6I_AR100_SHIFT_MAX;
148
149 div >>= shift;
150
151 if (div > SUN6I_AR100_DIV_MAX)
152 return -EINVAL;
153
154 val &= ~((SUN6I_AR100_SHIFT_MASK << SUN6I_AR100_SHIFT_SHIFT) |
155 (SUN6I_AR100_DIV_MASK << SUN6I_AR100_DIV_SHIFT));
156 val |= (shift << SUN6I_AR100_SHIFT_SHIFT) |
157 (div << SUN6I_AR100_DIV_SHIFT);
158 writel(val, clk->reg);
159
160 return 0;
161}
162
163struct clk_ops ar100_ops = {
164 .recalc_rate = ar100_recalc_rate,
165 .determine_rate = ar100_determine_rate,
166 .set_parent = ar100_set_parent,
167 .get_parent = ar100_get_parent,
168 .set_rate = ar100_set_rate,
169};
170
171static int sun6i_a31_ar100_clk_probe(struct platform_device *pdev)
172{
173 const char *parents[SUN6I_AR100_MAX_PARENTS];
174 struct device_node *np = pdev->dev.of_node;
175 const char *clk_name = np->name;
176 struct clk_init_data init;
177 struct ar100_clk *ar100;
178 struct resource *r;
179 struct clk *clk;
180 int nparents;
181 int i;
182
183 ar100 = devm_kzalloc(&pdev->dev, sizeof(*ar100), GFP_KERNEL);
184 if (!ar100)
185 return -ENOMEM;
186
187 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
188 ar100->reg = devm_ioremap_resource(&pdev->dev, r);
189 if (IS_ERR(ar100->reg))
190 return PTR_ERR(ar100->reg);
191
192 nparents = of_clk_get_parent_count(np);
193 if (nparents > SUN6I_AR100_MAX_PARENTS)
194 nparents = SUN6I_AR100_MAX_PARENTS;
195
196 for (i = 0; i < nparents; i++)
197 parents[i] = of_clk_get_parent_name(np, i);
198
199 of_property_read_string(np, "clock-output-names", &clk_name);
200
201 init.name = clk_name;
202 init.ops = &ar100_ops;
203 init.parent_names = parents;
204 init.num_parents = nparents;
205 init.flags = 0;
206
207 ar100->hw.init = &init;
208
209 clk = clk_register(&pdev->dev, &ar100->hw);
210 if (IS_ERR(clk))
211 return PTR_ERR(clk);
212
213 return of_clk_add_provider(np, of_clk_src_simple_get, clk);
214}
215
216const struct of_device_id sun6i_a31_ar100_clk_dt_ids[] = {
217 { .compatible = "allwinner,sun6i-a31-ar100-clk" },
218 { /* sentinel */ }
219};
220
221static struct platform_driver sun6i_a31_ar100_clk_driver = {
222 .driver = {
223 .name = "sun6i-a31-ar100-clk",
224 .owner = THIS_MODULE,
225 .of_match_table = sun6i_a31_ar100_clk_dt_ids,
226 },
227 .probe = sun6i_a31_ar100_clk_probe,
228};
229module_platform_driver(sun6i_a31_ar100_clk_driver);
230
231MODULE_AUTHOR("Boris BREZILLON <boris.brezillon@free-electrons.com>");
232MODULE_DESCRIPTION("Allwinner A31 AR100 clock Driver");
233MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c
index 426483422d3d..fb2ce8440f0e 100644
--- a/drivers/clk/sunxi/clk-sunxi.c
+++ b/drivers/clk/sunxi/clk-sunxi.c
@@ -28,63 +28,6 @@ static DEFINE_SPINLOCK(clk_lock);
28#define SUNXI_MAX_PARENTS 5 28#define SUNXI_MAX_PARENTS 5
29 29
30/** 30/**
31 * sun4i_osc_clk_setup() - Setup function for gatable oscillator
32 */
33
34#define SUNXI_OSC24M_GATE 0
35
36static void __init sun4i_osc_clk_setup(struct device_node *node)
37{
38 struct clk *clk;
39 struct clk_fixed_rate *fixed;
40 struct clk_gate *gate;
41 const char *clk_name = node->name;
42 u32 rate;
43
44 if (of_property_read_u32(node, "clock-frequency", &rate))
45 return;
46
47 /* allocate fixed-rate and gate clock structs */
48 fixed = kzalloc(sizeof(struct clk_fixed_rate), GFP_KERNEL);
49 if (!fixed)
50 return;
51 gate = kzalloc(sizeof(struct clk_gate), GFP_KERNEL);
52 if (!gate)
53 goto err_free_fixed;
54
55 of_property_read_string(node, "clock-output-names", &clk_name);
56
57 /* set up gate and fixed rate properties */
58 gate->reg = of_iomap(node, 0);
59 gate->bit_idx = SUNXI_OSC24M_GATE;
60 gate->lock = &clk_lock;
61 fixed->fixed_rate = rate;
62
63 clk = clk_register_composite(NULL, clk_name,
64 NULL, 0,
65 NULL, NULL,
66 &fixed->hw, &clk_fixed_rate_ops,
67 &gate->hw, &clk_gate_ops,
68 CLK_IS_ROOT);
69
70 if (IS_ERR(clk))
71 goto err_free_gate;
72
73 of_clk_add_provider(node, of_clk_src_simple_get, clk);
74 clk_register_clkdev(clk, clk_name, NULL);
75
76 return;
77
78err_free_gate:
79 kfree(gate);
80err_free_fixed:
81 kfree(fixed);
82}
83CLK_OF_DECLARE(sun4i_osc, "allwinner,sun4i-a10-osc-clk", sun4i_osc_clk_setup);
84
85
86
87/**
88 * sun4i_get_pll1_factors() - calculates n, k, m, p factors for PLL1 31 * sun4i_get_pll1_factors() - calculates n, k, m, p factors for PLL1
89 * PLL1 rate is calculated as follows 32 * PLL1 rate is calculated as follows
90 * rate = (parent_rate * n * (k + 1) >> p) / (m + 1); 33 * rate = (parent_rate * n * (k + 1) >> p) / (m + 1);
@@ -408,104 +351,6 @@ static void sun7i_a20_get_out_factors(u32 *freq, u32 parent_rate,
408 *p = calcp; 351 *p = calcp;
409} 352}
410 353
411
412
413/**
414 * sun7i_a20_gmac_clk_setup - Setup function for A20/A31 GMAC clock module
415 *
416 * This clock looks something like this
417 * ________________________
418 * MII TX clock from PHY >-----|___________ _________|----> to GMAC core
419 * GMAC Int. RGMII TX clk >----|___________\__/__gate---|----> to PHY
420 * Ext. 125MHz RGMII TX clk >--|__divider__/ |
421 * |________________________|
422 *
423 * The external 125 MHz reference is optional, i.e. GMAC can use its
424 * internal TX clock just fine. The A31 GMAC clock module does not have
425 * the divider controls for the external reference.
426 *
427 * To keep it simple, let the GMAC use either the MII TX clock for MII mode,
428 * and its internal TX clock for GMII and RGMII modes. The GMAC driver should
429 * select the appropriate source and gate/ungate the output to the PHY.
430 *
431 * Only the GMAC should use this clock. Altering the clock so that it doesn't
432 * match the GMAC's operation parameters will result in the GMAC not being
433 * able to send traffic out. The GMAC driver should set the clock rate and
434 * enable/disable this clock to configure the required state. The clock
435 * driver then responds by auto-reparenting the clock.
436 */
437
438#define SUN7I_A20_GMAC_GPIT 2
439#define SUN7I_A20_GMAC_MASK 0x3
440#define SUN7I_A20_GMAC_PARENTS 2
441
442static void __init sun7i_a20_gmac_clk_setup(struct device_node *node)
443{
444 struct clk *clk;
445 struct clk_mux *mux;
446 struct clk_gate *gate;
447 const char *clk_name = node->name;
448 const char *parents[SUN7I_A20_GMAC_PARENTS];
449 void *reg;
450
451 if (of_property_read_string(node, "clock-output-names", &clk_name))
452 return;
453
454 /* allocate mux and gate clock structs */
455 mux = kzalloc(sizeof(struct clk_mux), GFP_KERNEL);
456 if (!mux)
457 return;
458
459 gate = kzalloc(sizeof(struct clk_gate), GFP_KERNEL);
460 if (!gate)
461 goto free_mux;
462
463 /* gmac clock requires exactly 2 parents */
464 parents[0] = of_clk_get_parent_name(node, 0);
465 parents[1] = of_clk_get_parent_name(node, 1);
466 if (!parents[0] || !parents[1])
467 goto free_gate;
468
469 reg = of_iomap(node, 0);
470 if (!reg)
471 goto free_gate;
472
473 /* set up gate and fixed rate properties */
474 gate->reg = reg;
475 gate->bit_idx = SUN7I_A20_GMAC_GPIT;
476 gate->lock = &clk_lock;
477 mux->reg = reg;
478 mux->mask = SUN7I_A20_GMAC_MASK;
479 mux->flags = CLK_MUX_INDEX_BIT;
480 mux->lock = &clk_lock;
481
482 clk = clk_register_composite(NULL, clk_name,
483 parents, SUN7I_A20_GMAC_PARENTS,
484 &mux->hw, &clk_mux_ops,
485 NULL, NULL,
486 &gate->hw, &clk_gate_ops,
487 0);
488
489 if (IS_ERR(clk))
490 goto iounmap_reg;
491
492 of_clk_add_provider(node, of_clk_src_simple_get, clk);
493 clk_register_clkdev(clk, clk_name, NULL);
494
495 return;
496
497iounmap_reg:
498 iounmap(reg);
499free_gate:
500 kfree(gate);
501free_mux:
502 kfree(mux);
503}
504CLK_OF_DECLARE(sun7i_a20_gmac, "allwinner,sun7i-a20-gmac-clk",
505 sun7i_a20_gmac_clk_setup);
506
507
508
509/** 354/**
510 * clk_sunxi_mmc_phase_control() - configures MMC clock phase control 355 * clk_sunxi_mmc_phase_control() - configures MMC clock phase control
511 */ 356 */
@@ -1009,6 +854,11 @@ static const struct gates_data sun5i_a13_usb_gates_data __initconst = {
1009 .reset_mask = 0x03, 854 .reset_mask = 0x03,
1010}; 855};
1011 856
857static const struct gates_data sun6i_a31_usb_gates_data __initconst = {
858 .mask = { BIT(18) | BIT(17) | BIT(16) | BIT(10) | BIT(9) | BIT(8) },
859 .reset_mask = BIT(2) | BIT(1) | BIT(0),
860};
861
1012static void __init sunxi_gates_clk_setup(struct device_node *node, 862static void __init sunxi_gates_clk_setup(struct device_node *node,
1013 struct gates_data *data) 863 struct gates_data *data)
1014{ 864{
@@ -1304,6 +1154,7 @@ static const struct of_device_id clk_gates_match[] __initconst = {
1304 {.compatible = "allwinner,sun6i-a31-apb2-gates-clk", .data = &sun6i_a31_apb2_gates_data,}, 1154 {.compatible = "allwinner,sun6i-a31-apb2-gates-clk", .data = &sun6i_a31_apb2_gates_data,},
1305 {.compatible = "allwinner,sun4i-a10-usb-clk", .data = &sun4i_a10_usb_gates_data,}, 1155 {.compatible = "allwinner,sun4i-a10-usb-clk", .data = &sun4i_a10_usb_gates_data,},
1306 {.compatible = "allwinner,sun5i-a13-usb-clk", .data = &sun5i_a13_usb_gates_data,}, 1156 {.compatible = "allwinner,sun5i-a13-usb-clk", .data = &sun5i_a13_usb_gates_data,},
1157 {.compatible = "allwinner,sun6i-a31-usb-clk", .data = &sun6i_a31_usb_gates_data,},
1307 {} 1158 {}
1308}; 1159};
1309 1160
@@ -1321,33 +1172,10 @@ static void __init of_sunxi_table_clock_setup(const struct of_device_id *clk_mat
1321 } 1172 }
1322} 1173}
1323 1174
1324/** 1175static void __init sunxi_init_clocks(const char *clocks[], int nclocks)
1325 * System clock protection
1326 *
1327 * By enabling these critical clocks, we prevent their accidental gating
1328 * by the framework
1329 */
1330static void __init sunxi_clock_protect(void)
1331{ 1176{
1332 struct clk *clk; 1177 unsigned int i;
1333
1334 /* memory bus clock - sun5i+ */
1335 clk = clk_get(NULL, "mbus");
1336 if (!IS_ERR(clk)) {
1337 clk_prepare_enable(clk);
1338 clk_put(clk);
1339 }
1340
1341 /* DDR clock - sun4i+ */
1342 clk = clk_get(NULL, "pll5_ddr");
1343 if (!IS_ERR(clk)) {
1344 clk_prepare_enable(clk);
1345 clk_put(clk);
1346 }
1347}
1348 1178
1349static void __init sunxi_init_clocks(struct device_node *np)
1350{
1351 /* Register factor clocks */ 1179 /* Register factor clocks */
1352 of_sunxi_table_clock_setup(clk_factors_match, sunxi_factors_clk_setup); 1180 of_sunxi_table_clock_setup(clk_factors_match, sunxi_factors_clk_setup);
1353 1181
@@ -1363,11 +1191,48 @@ static void __init sunxi_init_clocks(struct device_node *np)
1363 /* Register gate clocks */ 1191 /* Register gate clocks */
1364 of_sunxi_table_clock_setup(clk_gates_match, sunxi_gates_clk_setup); 1192 of_sunxi_table_clock_setup(clk_gates_match, sunxi_gates_clk_setup);
1365 1193
1366 /* Enable core system clocks */ 1194 /* Protect the clocks that needs to stay on */
1367 sunxi_clock_protect(); 1195 for (i = 0; i < nclocks; i++) {
1196 struct clk *clk = clk_get(NULL, clocks[i]);
1197
1198 if (!IS_ERR(clk))
1199 clk_prepare_enable(clk);
1200 }
1201}
1202
1203static const char *sun4i_a10_critical_clocks[] __initdata = {
1204 "pll5_ddr",
1205};
1206
1207static void __init sun4i_a10_init_clocks(struct device_node *node)
1208{
1209 sunxi_init_clocks(sun4i_a10_critical_clocks,
1210 ARRAY_SIZE(sun4i_a10_critical_clocks));
1211}
1212CLK_OF_DECLARE(sun4i_a10_clk_init, "allwinner,sun4i-a10", sun4i_a10_init_clocks);
1213
1214static const char *sun5i_critical_clocks[] __initdata = {
1215 "mbus",
1216 "pll5_ddr",
1217};
1218
1219static void __init sun5i_init_clocks(struct device_node *node)
1220{
1221 sunxi_init_clocks(sun5i_critical_clocks,
1222 ARRAY_SIZE(sun5i_critical_clocks));
1223}
1224CLK_OF_DECLARE(sun5i_a10s_clk_init, "allwinner,sun5i-a10s", sun5i_init_clocks);
1225CLK_OF_DECLARE(sun5i_a13_clk_init, "allwinner,sun5i-a13", sun5i_init_clocks);
1226CLK_OF_DECLARE(sun7i_a20_clk_init, "allwinner,sun7i-a20", sun5i_init_clocks);
1227
1228static const char *sun6i_critical_clocks[] __initdata = {
1229 "cpu",
1230 "ahb1_sdram",
1231};
1232
1233static void __init sun6i_init_clocks(struct device_node *node)
1234{
1235 sunxi_init_clocks(sun6i_critical_clocks,
1236 ARRAY_SIZE(sun6i_critical_clocks));
1368} 1237}
1369CLK_OF_DECLARE(sun4i_a10_clk_init, "allwinner,sun4i-a10", sunxi_init_clocks); 1238CLK_OF_DECLARE(sun6i_a31_clk_init, "allwinner,sun6i-a31", sun6i_init_clocks);
1370CLK_OF_DECLARE(sun5i_a10s_clk_init, "allwinner,sun5i-a10s", sunxi_init_clocks);
1371CLK_OF_DECLARE(sun5i_a13_clk_init, "allwinner,sun5i-a13", sunxi_init_clocks);
1372CLK_OF_DECLARE(sun6i_a31_clk_init, "allwinner,sun6i-a31", sunxi_init_clocks);
1373CLK_OF_DECLARE(sun7i_a20_clk_init, "allwinner,sun7i-a20", sunxi_init_clocks);
diff --git a/drivers/clk/ti/Makefile b/drivers/clk/ti/Makefile
index 4319d4031aa3..ed4d0aaf8916 100644
--- a/drivers/clk/ti/Makefile
+++ b/drivers/clk/ti/Makefile
@@ -3,9 +3,11 @@ obj-y += clk.o autoidle.o clockdomain.o
3clk-common = dpll.o composite.o divider.o gate.o \ 3clk-common = dpll.o composite.o divider.o gate.o \
4 fixed-factor.o mux.o apll.o 4 fixed-factor.o mux.o apll.o
5obj-$(CONFIG_SOC_AM33XX) += $(clk-common) clk-33xx.o 5obj-$(CONFIG_SOC_AM33XX) += $(clk-common) clk-33xx.o
6obj-$(CONFIG_ARCH_OMAP2) += $(clk-common) interface.o clk-2xxx.o
6obj-$(CONFIG_ARCH_OMAP3) += $(clk-common) interface.o clk-3xxx.o 7obj-$(CONFIG_ARCH_OMAP3) += $(clk-common) interface.o clk-3xxx.o
7obj-$(CONFIG_ARCH_OMAP4) += $(clk-common) clk-44xx.o 8obj-$(CONFIG_ARCH_OMAP4) += $(clk-common) clk-44xx.o
8obj-$(CONFIG_SOC_OMAP5) += $(clk-common) clk-54xx.o 9obj-$(CONFIG_SOC_OMAP5) += $(clk-common) clk-54xx.o
9obj-$(CONFIG_SOC_DRA7XX) += $(clk-common) clk-7xx.o 10obj-$(CONFIG_SOC_DRA7XX) += $(clk-common) clk-7xx.o \
11 clk-dra7-atl.o
10obj-$(CONFIG_SOC_AM43XX) += $(clk-common) clk-43xx.o 12obj-$(CONFIG_SOC_AM43XX) += $(clk-common) clk-43xx.o
11endif 13endif
diff --git a/drivers/clk/ti/apll.c b/drivers/clk/ti/apll.c
index b986f61f5a77..5428c9c547cd 100644
--- a/drivers/clk/ti/apll.c
+++ b/drivers/clk/ti/apll.c
@@ -221,3 +221,184 @@ cleanup:
221 kfree(init); 221 kfree(init);
222} 222}
223CLK_OF_DECLARE(dra7_apll_clock, "ti,dra7-apll-clock", of_dra7_apll_setup); 223CLK_OF_DECLARE(dra7_apll_clock, "ti,dra7-apll-clock", of_dra7_apll_setup);
224
225#define OMAP2_EN_APLL_LOCKED 0x3
226#define OMAP2_EN_APLL_STOPPED 0x0
227
228static int omap2_apll_is_enabled(struct clk_hw *hw)
229{
230 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
231 struct dpll_data *ad = clk->dpll_data;
232 u32 v;
233
234 v = ti_clk_ll_ops->clk_readl(ad->control_reg);
235 v &= ad->enable_mask;
236
237 v >>= __ffs(ad->enable_mask);
238
239 return v == OMAP2_EN_APLL_LOCKED ? 1 : 0;
240}
241
242static unsigned long omap2_apll_recalc(struct clk_hw *hw,
243 unsigned long parent_rate)
244{
245 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
246
247 if (omap2_apll_is_enabled(hw))
248 return clk->fixed_rate;
249
250 return 0;
251}
252
253static int omap2_apll_enable(struct clk_hw *hw)
254{
255 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
256 struct dpll_data *ad = clk->dpll_data;
257 u32 v;
258 int i = 0;
259
260 v = ti_clk_ll_ops->clk_readl(ad->control_reg);
261 v &= ~ad->enable_mask;
262 v |= OMAP2_EN_APLL_LOCKED << __ffs(ad->enable_mask);
263 ti_clk_ll_ops->clk_writel(v, ad->control_reg);
264
265 while (1) {
266 v = ti_clk_ll_ops->clk_readl(ad->idlest_reg);
267 if (v & ad->idlest_mask)
268 break;
269 if (i > MAX_APLL_WAIT_TRIES)
270 break;
271 i++;
272 udelay(1);
273 }
274
275 if (i == MAX_APLL_WAIT_TRIES) {
276 pr_warn("%s failed to transition to locked\n",
277 __clk_get_name(clk->hw.clk));
278 return -EBUSY;
279 }
280
281 return 0;
282}
283
284static void omap2_apll_disable(struct clk_hw *hw)
285{
286 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
287 struct dpll_data *ad = clk->dpll_data;
288 u32 v;
289
290 v = ti_clk_ll_ops->clk_readl(ad->control_reg);
291 v &= ~ad->enable_mask;
292 v |= OMAP2_EN_APLL_STOPPED << __ffs(ad->enable_mask);
293 ti_clk_ll_ops->clk_writel(v, ad->control_reg);
294}
295
296static struct clk_ops omap2_apll_ops = {
297 .enable = &omap2_apll_enable,
298 .disable = &omap2_apll_disable,
299 .is_enabled = &omap2_apll_is_enabled,
300 .recalc_rate = &omap2_apll_recalc,
301};
302
303static void omap2_apll_set_autoidle(struct clk_hw_omap *clk, u32 val)
304{
305 struct dpll_data *ad = clk->dpll_data;
306 u32 v;
307
308 v = ti_clk_ll_ops->clk_readl(ad->autoidle_reg);
309 v &= ~ad->autoidle_mask;
310 v |= val << __ffs(ad->autoidle_mask);
311 ti_clk_ll_ops->clk_writel(v, ad->control_reg);
312}
313
314#define OMAP2_APLL_AUTOIDLE_LOW_POWER_STOP 0x3
315#define OMAP2_APLL_AUTOIDLE_DISABLE 0x0
316
317static void omap2_apll_allow_idle(struct clk_hw_omap *clk)
318{
319 omap2_apll_set_autoidle(clk, OMAP2_APLL_AUTOIDLE_LOW_POWER_STOP);
320}
321
322static void omap2_apll_deny_idle(struct clk_hw_omap *clk)
323{
324 omap2_apll_set_autoidle(clk, OMAP2_APLL_AUTOIDLE_DISABLE);
325}
326
327static struct clk_hw_omap_ops omap2_apll_hwops = {
328 .allow_idle = &omap2_apll_allow_idle,
329 .deny_idle = &omap2_apll_deny_idle,
330};
331
332static void __init of_omap2_apll_setup(struct device_node *node)
333{
334 struct dpll_data *ad = NULL;
335 struct clk_hw_omap *clk_hw = NULL;
336 struct clk_init_data *init = NULL;
337 struct clk *clk;
338 const char *parent_name;
339 u32 val;
340
341 ad = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
342 clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
343 init = kzalloc(sizeof(*init), GFP_KERNEL);
344
345 if (!ad || !clk_hw || !init)
346 goto cleanup;
347
348 clk_hw->dpll_data = ad;
349 clk_hw->hw.init = init;
350 init->ops = &omap2_apll_ops;
351 init->name = node->name;
352 clk_hw->ops = &omap2_apll_hwops;
353
354 init->num_parents = of_clk_get_parent_count(node);
355 if (init->num_parents != 1) {
356 pr_err("%s must have one parent\n", node->name);
357 goto cleanup;
358 }
359
360 parent_name = of_clk_get_parent_name(node, 0);
361 init->parent_names = &parent_name;
362
363 if (of_property_read_u32(node, "ti,clock-frequency", &val)) {
364 pr_err("%s missing clock-frequency\n", node->name);
365 goto cleanup;
366 }
367 clk_hw->fixed_rate = val;
368
369 if (of_property_read_u32(node, "ti,bit-shift", &val)) {
370 pr_err("%s missing bit-shift\n", node->name);
371 goto cleanup;
372 }
373
374 clk_hw->enable_bit = val;
375 ad->enable_mask = 0x3 << val;
376 ad->autoidle_mask = 0x3 << val;
377
378 if (of_property_read_u32(node, "ti,idlest-shift", &val)) {
379 pr_err("%s missing idlest-shift\n", node->name);
380 goto cleanup;
381 }
382
383 ad->idlest_mask = 1 << val;
384
385 ad->control_reg = ti_clk_get_reg_addr(node, 0);
386 ad->autoidle_reg = ti_clk_get_reg_addr(node, 1);
387 ad->idlest_reg = ti_clk_get_reg_addr(node, 2);
388
389 if (!ad->control_reg || !ad->autoidle_reg || !ad->idlest_reg)
390 goto cleanup;
391
392 clk = clk_register(NULL, &clk_hw->hw);
393 if (!IS_ERR(clk)) {
394 of_clk_add_provider(node, of_clk_src_simple_get, clk);
395 kfree(init);
396 return;
397 }
398cleanup:
399 kfree(ad);
400 kfree(clk_hw);
401 kfree(init);
402}
403CLK_OF_DECLARE(omap2_apll_clock, "ti,omap2-apll-clock",
404 of_omap2_apll_setup);
diff --git a/drivers/clk/ti/clk-2xxx.c b/drivers/clk/ti/clk-2xxx.c
new file mode 100644
index 000000000000..c808ab3d2bb2
--- /dev/null
+++ b/drivers/clk/ti/clk-2xxx.c
@@ -0,0 +1,256 @@
1/*
2 * OMAP2 Clock init
3 *
4 * Copyright (C) 2013 Texas Instruments, Inc
5 * Tero Kristo (t-kristo@ti.com)
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation version 2.
10 *
11 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
12 * kind, whether express or implied; without even the implied warranty
13 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#include <linux/kernel.h>
18#include <linux/list.h>
19#include <linux/clk-provider.h>
20#include <linux/clk/ti.h>
21
22static struct ti_dt_clk omap2xxx_clks[] = {
23 DT_CLK(NULL, "func_32k_ck", "func_32k_ck"),
24 DT_CLK(NULL, "secure_32k_ck", "secure_32k_ck"),
25 DT_CLK(NULL, "virt_12m_ck", "virt_12m_ck"),
26 DT_CLK(NULL, "virt_13m_ck", "virt_13m_ck"),
27 DT_CLK(NULL, "virt_19200000_ck", "virt_19200000_ck"),
28 DT_CLK(NULL, "virt_26m_ck", "virt_26m_ck"),
29 DT_CLK(NULL, "aplls_clkin_ck", "aplls_clkin_ck"),
30 DT_CLK(NULL, "aplls_clkin_x2_ck", "aplls_clkin_x2_ck"),
31 DT_CLK(NULL, "osc_ck", "osc_ck"),
32 DT_CLK(NULL, "sys_ck", "sys_ck"),
33 DT_CLK(NULL, "alt_ck", "alt_ck"),
34 DT_CLK(NULL, "mcbsp_clks", "mcbsp_clks"),
35 DT_CLK(NULL, "dpll_ck", "dpll_ck"),
36 DT_CLK(NULL, "apll96_ck", "apll96_ck"),
37 DT_CLK(NULL, "apll54_ck", "apll54_ck"),
38 DT_CLK(NULL, "func_54m_ck", "func_54m_ck"),
39 DT_CLK(NULL, "core_ck", "core_ck"),
40 DT_CLK(NULL, "func_96m_ck", "func_96m_ck"),
41 DT_CLK(NULL, "func_48m_ck", "func_48m_ck"),
42 DT_CLK(NULL, "func_12m_ck", "func_12m_ck"),
43 DT_CLK(NULL, "sys_clkout_src", "sys_clkout_src"),
44 DT_CLK(NULL, "sys_clkout", "sys_clkout"),
45 DT_CLK(NULL, "emul_ck", "emul_ck"),
46 DT_CLK(NULL, "mpu_ck", "mpu_ck"),
47 DT_CLK(NULL, "dsp_fck", "dsp_fck"),
48 DT_CLK(NULL, "gfx_3d_fck", "gfx_3d_fck"),
49 DT_CLK(NULL, "gfx_2d_fck", "gfx_2d_fck"),
50 DT_CLK(NULL, "gfx_ick", "gfx_ick"),
51 DT_CLK("omapdss_dss", "ick", "dss_ick"),
52 DT_CLK(NULL, "dss_ick", "dss_ick"),
53 DT_CLK(NULL, "dss1_fck", "dss1_fck"),
54 DT_CLK(NULL, "dss2_fck", "dss2_fck"),
55 DT_CLK(NULL, "dss_54m_fck", "dss_54m_fck"),
56 DT_CLK(NULL, "core_l3_ck", "core_l3_ck"),
57 DT_CLK(NULL, "ssi_fck", "ssi_ssr_sst_fck"),
58 DT_CLK(NULL, "usb_l4_ick", "usb_l4_ick"),
59 DT_CLK(NULL, "l4_ck", "l4_ck"),
60 DT_CLK(NULL, "ssi_l4_ick", "ssi_l4_ick"),
61 DT_CLK(NULL, "gpt1_ick", "gpt1_ick"),
62 DT_CLK(NULL, "gpt1_fck", "gpt1_fck"),
63 DT_CLK(NULL, "gpt2_ick", "gpt2_ick"),
64 DT_CLK(NULL, "gpt2_fck", "gpt2_fck"),
65 DT_CLK(NULL, "gpt3_ick", "gpt3_ick"),
66 DT_CLK(NULL, "gpt3_fck", "gpt3_fck"),
67 DT_CLK(NULL, "gpt4_ick", "gpt4_ick"),
68 DT_CLK(NULL, "gpt4_fck", "gpt4_fck"),
69 DT_CLK(NULL, "gpt5_ick", "gpt5_ick"),
70 DT_CLK(NULL, "gpt5_fck", "gpt5_fck"),
71 DT_CLK(NULL, "gpt6_ick", "gpt6_ick"),
72 DT_CLK(NULL, "gpt6_fck", "gpt6_fck"),
73 DT_CLK(NULL, "gpt7_ick", "gpt7_ick"),
74 DT_CLK(NULL, "gpt7_fck", "gpt7_fck"),
75 DT_CLK(NULL, "gpt8_ick", "gpt8_ick"),
76 DT_CLK(NULL, "gpt8_fck", "gpt8_fck"),
77 DT_CLK(NULL, "gpt9_ick", "gpt9_ick"),
78 DT_CLK(NULL, "gpt9_fck", "gpt9_fck"),
79 DT_CLK(NULL, "gpt10_ick", "gpt10_ick"),
80 DT_CLK(NULL, "gpt10_fck", "gpt10_fck"),
81 DT_CLK(NULL, "gpt11_ick", "gpt11_ick"),
82 DT_CLK(NULL, "gpt11_fck", "gpt11_fck"),
83 DT_CLK(NULL, "gpt12_ick", "gpt12_ick"),
84 DT_CLK(NULL, "gpt12_fck", "gpt12_fck"),
85 DT_CLK("omap-mcbsp.1", "ick", "mcbsp1_ick"),
86 DT_CLK(NULL, "mcbsp1_ick", "mcbsp1_ick"),
87 DT_CLK(NULL, "mcbsp1_fck", "mcbsp1_fck"),
88 DT_CLK("omap-mcbsp.2", "ick", "mcbsp2_ick"),
89 DT_CLK(NULL, "mcbsp2_ick", "mcbsp2_ick"),
90 DT_CLK(NULL, "mcbsp2_fck", "mcbsp2_fck"),
91 DT_CLK("omap2_mcspi.1", "ick", "mcspi1_ick"),
92 DT_CLK(NULL, "mcspi1_ick", "mcspi1_ick"),
93 DT_CLK(NULL, "mcspi1_fck", "mcspi1_fck"),
94 DT_CLK("omap2_mcspi.2", "ick", "mcspi2_ick"),
95 DT_CLK(NULL, "mcspi2_ick", "mcspi2_ick"),
96 DT_CLK(NULL, "mcspi2_fck", "mcspi2_fck"),
97 DT_CLK(NULL, "uart1_ick", "uart1_ick"),
98 DT_CLK(NULL, "uart1_fck", "uart1_fck"),
99 DT_CLK(NULL, "uart2_ick", "uart2_ick"),
100 DT_CLK(NULL, "uart2_fck", "uart2_fck"),
101 DT_CLK(NULL, "uart3_ick", "uart3_ick"),
102 DT_CLK(NULL, "uart3_fck", "uart3_fck"),
103 DT_CLK(NULL, "gpios_ick", "gpios_ick"),
104 DT_CLK(NULL, "gpios_fck", "gpios_fck"),
105 DT_CLK("omap_wdt", "ick", "mpu_wdt_ick"),
106 DT_CLK(NULL, "mpu_wdt_ick", "mpu_wdt_ick"),
107 DT_CLK(NULL, "mpu_wdt_fck", "mpu_wdt_fck"),
108 DT_CLK(NULL, "sync_32k_ick", "sync_32k_ick"),
109 DT_CLK(NULL, "wdt1_ick", "wdt1_ick"),
110 DT_CLK(NULL, "omapctrl_ick", "omapctrl_ick"),
111 DT_CLK("omap24xxcam", "fck", "cam_fck"),
112 DT_CLK(NULL, "cam_fck", "cam_fck"),
113 DT_CLK("omap24xxcam", "ick", "cam_ick"),
114 DT_CLK(NULL, "cam_ick", "cam_ick"),
115 DT_CLK(NULL, "mailboxes_ick", "mailboxes_ick"),
116 DT_CLK(NULL, "wdt4_ick", "wdt4_ick"),
117 DT_CLK(NULL, "wdt4_fck", "wdt4_fck"),
118 DT_CLK(NULL, "mspro_ick", "mspro_ick"),
119 DT_CLK(NULL, "mspro_fck", "mspro_fck"),
120 DT_CLK(NULL, "fac_ick", "fac_ick"),
121 DT_CLK(NULL, "fac_fck", "fac_fck"),
122 DT_CLK("omap_hdq.0", "ick", "hdq_ick"),
123 DT_CLK(NULL, "hdq_ick", "hdq_ick"),
124 DT_CLK("omap_hdq.0", "fck", "hdq_fck"),
125 DT_CLK(NULL, "hdq_fck", "hdq_fck"),
126 DT_CLK("omap_i2c.1", "ick", "i2c1_ick"),
127 DT_CLK(NULL, "i2c1_ick", "i2c1_ick"),
128 DT_CLK("omap_i2c.2", "ick", "i2c2_ick"),
129 DT_CLK(NULL, "i2c2_ick", "i2c2_ick"),
130 DT_CLK(NULL, "gpmc_fck", "gpmc_fck"),
131 DT_CLK(NULL, "sdma_fck", "sdma_fck"),
132 DT_CLK(NULL, "sdma_ick", "sdma_ick"),
133 DT_CLK(NULL, "sdrc_ick", "sdrc_ick"),
134 DT_CLK(NULL, "des_ick", "des_ick"),
135 DT_CLK("omap-sham", "ick", "sha_ick"),
136 DT_CLK(NULL, "sha_ick", "sha_ick"),
137 DT_CLK("omap_rng", "ick", "rng_ick"),
138 DT_CLK(NULL, "rng_ick", "rng_ick"),
139 DT_CLK("omap-aes", "ick", "aes_ick"),
140 DT_CLK(NULL, "aes_ick", "aes_ick"),
141 DT_CLK(NULL, "pka_ick", "pka_ick"),
142 DT_CLK(NULL, "usb_fck", "usb_fck"),
143 DT_CLK(NULL, "timer_32k_ck", "func_32k_ck"),
144 DT_CLK(NULL, "timer_sys_ck", "sys_ck"),
145 DT_CLK(NULL, "timer_ext_ck", "alt_ck"),
146 { .node_name = NULL },
147};
148
149static struct ti_dt_clk omap2420_clks[] = {
150 DT_CLK(NULL, "sys_clkout2_src", "sys_clkout2_src"),
151 DT_CLK(NULL, "sys_clkout2", "sys_clkout2"),
152 DT_CLK(NULL, "dsp_ick", "dsp_ick"),
153 DT_CLK(NULL, "iva1_ifck", "iva1_ifck"),
154 DT_CLK(NULL, "iva1_mpu_int_ifck", "iva1_mpu_int_ifck"),
155 DT_CLK(NULL, "wdt3_ick", "wdt3_ick"),
156 DT_CLK(NULL, "wdt3_fck", "wdt3_fck"),
157 DT_CLK("mmci-omap.0", "ick", "mmc_ick"),
158 DT_CLK(NULL, "mmc_ick", "mmc_ick"),
159 DT_CLK("mmci-omap.0", "fck", "mmc_fck"),
160 DT_CLK(NULL, "mmc_fck", "mmc_fck"),
161 DT_CLK(NULL, "eac_ick", "eac_ick"),
162 DT_CLK(NULL, "eac_fck", "eac_fck"),
163 DT_CLK(NULL, "i2c1_fck", "i2c1_fck"),
164 DT_CLK(NULL, "i2c2_fck", "i2c2_fck"),
165 DT_CLK(NULL, "vlynq_ick", "vlynq_ick"),
166 DT_CLK(NULL, "vlynq_fck", "vlynq_fck"),
167 DT_CLK("musb-hdrc", "fck", "osc_ck"),
168 { .node_name = NULL },
169};
170
171static struct ti_dt_clk omap2430_clks[] = {
172 DT_CLK("twl", "fck", "osc_ck"),
173 DT_CLK(NULL, "iva2_1_ick", "iva2_1_ick"),
174 DT_CLK(NULL, "mdm_ick", "mdm_ick"),
175 DT_CLK(NULL, "mdm_osc_ck", "mdm_osc_ck"),
176 DT_CLK("omap-mcbsp.3", "ick", "mcbsp3_ick"),
177 DT_CLK(NULL, "mcbsp3_ick", "mcbsp3_ick"),
178 DT_CLK(NULL, "mcbsp3_fck", "mcbsp3_fck"),
179 DT_CLK("omap-mcbsp.4", "ick", "mcbsp4_ick"),
180 DT_CLK(NULL, "mcbsp4_ick", "mcbsp4_ick"),
181 DT_CLK(NULL, "mcbsp4_fck", "mcbsp4_fck"),
182 DT_CLK("omap-mcbsp.5", "ick", "mcbsp5_ick"),
183 DT_CLK(NULL, "mcbsp5_ick", "mcbsp5_ick"),
184 DT_CLK(NULL, "mcbsp5_fck", "mcbsp5_fck"),
185 DT_CLK("omap2_mcspi.3", "ick", "mcspi3_ick"),
186 DT_CLK(NULL, "mcspi3_ick", "mcspi3_ick"),
187 DT_CLK(NULL, "mcspi3_fck", "mcspi3_fck"),
188 DT_CLK(NULL, "icr_ick", "icr_ick"),
189 DT_CLK(NULL, "i2chs1_fck", "i2chs1_fck"),
190 DT_CLK(NULL, "i2chs2_fck", "i2chs2_fck"),
191 DT_CLK("musb-omap2430", "ick", "usbhs_ick"),
192 DT_CLK(NULL, "usbhs_ick", "usbhs_ick"),
193 DT_CLK("omap_hsmmc.0", "ick", "mmchs1_ick"),
194 DT_CLK(NULL, "mmchs1_ick", "mmchs1_ick"),
195 DT_CLK(NULL, "mmchs1_fck", "mmchs1_fck"),
196 DT_CLK("omap_hsmmc.1", "ick", "mmchs2_ick"),
197 DT_CLK(NULL, "mmchs2_ick", "mmchs2_ick"),
198 DT_CLK(NULL, "mmchs2_fck", "mmchs2_fck"),
199 DT_CLK(NULL, "gpio5_ick", "gpio5_ick"),
200 DT_CLK(NULL, "gpio5_fck", "gpio5_fck"),
201 DT_CLK(NULL, "mdm_intc_ick", "mdm_intc_ick"),
202 DT_CLK("omap_hsmmc.0", "mmchsdb_fck", "mmchsdb1_fck"),
203 DT_CLK(NULL, "mmchsdb1_fck", "mmchsdb1_fck"),
204 DT_CLK("omap_hsmmc.1", "mmchsdb_fck", "mmchsdb2_fck"),
205 DT_CLK(NULL, "mmchsdb2_fck", "mmchsdb2_fck"),
206 { .node_name = NULL },
207};
208
209static const char *enable_init_clks[] = {
210 "apll96_ck",
211 "apll54_ck",
212 "sync_32k_ick",
213 "omapctrl_ick",
214 "gpmc_fck",
215 "sdrc_ick",
216};
217
218enum {
219 OMAP2_SOC_OMAP2420,
220 OMAP2_SOC_OMAP2430,
221};
222
223static int __init omap2xxx_dt_clk_init(int soc_type)
224{
225 ti_dt_clocks_register(omap2xxx_clks);
226
227 if (soc_type == OMAP2_SOC_OMAP2420)
228 ti_dt_clocks_register(omap2420_clks);
229 else
230 ti_dt_clocks_register(omap2430_clks);
231
232 omap2xxx_clkt_vps_init();
233
234 omap2_clk_disable_autoidle_all();
235
236 omap2_clk_enable_init_clocks(enable_init_clks,
237 ARRAY_SIZE(enable_init_clks));
238
239 pr_info("Clocking rate (Crystal/DPLL/MPU): %ld.%01ld/%ld/%ld MHz\n",
240 (clk_get_rate(clk_get_sys(NULL, "sys_ck")) / 1000000),
241 (clk_get_rate(clk_get_sys(NULL, "sys_ck")) / 100000) % 10,
242 (clk_get_rate(clk_get_sys(NULL, "dpll_ck")) / 1000000),
243 (clk_get_rate(clk_get_sys(NULL, "mpu_ck")) / 1000000));
244
245 return 0;
246}
247
248int __init omap2420_dt_clk_init(void)
249{
250 return omap2xxx_dt_clk_init(OMAP2_SOC_OMAP2420);
251}
252
253int __init omap2430_dt_clk_init(void)
254{
255 return omap2xxx_dt_clk_init(OMAP2_SOC_OMAP2430);
256}
diff --git a/drivers/clk/ti/clk-54xx.c b/drivers/clk/ti/clk-54xx.c
index 08f3d1b915b3..5e183993e3ec 100644
--- a/drivers/clk/ti/clk-54xx.c
+++ b/drivers/clk/ti/clk-54xx.c
@@ -240,6 +240,12 @@ int __init omap5xxx_dt_clk_init(void)
240 if (rc) 240 if (rc)
241 pr_err("%s: failed to configure ABE DPLL!\n", __func__); 241 pr_err("%s: failed to configure ABE DPLL!\n", __func__);
242 242
243 abe_dpll = clk_get_sys(NULL, "dpll_abe_m2x2_ck");
244 if (!rc)
245 rc = clk_set_rate(abe_dpll, OMAP5_DPLL_ABE_DEFFREQ * 2);
246 if (rc)
247 pr_err("%s: failed to configure ABE m2x2 DPLL!\n", __func__);
248
243 usb_dpll = clk_get_sys(NULL, "dpll_usb_ck"); 249 usb_dpll = clk_get_sys(NULL, "dpll_usb_ck");
244 rc = clk_set_rate(usb_dpll, OMAP5_DPLL_USB_DEFFREQ); 250 rc = clk_set_rate(usb_dpll, OMAP5_DPLL_USB_DEFFREQ);
245 if (rc) 251 if (rc)
diff --git a/drivers/clk/ti/clk-7xx.c b/drivers/clk/ti/clk-7xx.c
index f7e40734c819..e1581335937d 100644
--- a/drivers/clk/ti/clk-7xx.c
+++ b/drivers/clk/ti/clk-7xx.c
@@ -24,7 +24,7 @@ static struct ti_dt_clk dra7xx_clks[] = {
24 DT_CLK(NULL, "atl_clkin0_ck", "atl_clkin0_ck"), 24 DT_CLK(NULL, "atl_clkin0_ck", "atl_clkin0_ck"),
25 DT_CLK(NULL, "atl_clkin1_ck", "atl_clkin1_ck"), 25 DT_CLK(NULL, "atl_clkin1_ck", "atl_clkin1_ck"),
26 DT_CLK(NULL, "atl_clkin2_ck", "atl_clkin2_ck"), 26 DT_CLK(NULL, "atl_clkin2_ck", "atl_clkin2_ck"),
27 DT_CLK(NULL, "atlclkin3_ck", "atlclkin3_ck"), 27 DT_CLK(NULL, "atl_clkin3_ck", "atl_clkin3_ck"),
28 DT_CLK(NULL, "hdmi_clkin_ck", "hdmi_clkin_ck"), 28 DT_CLK(NULL, "hdmi_clkin_ck", "hdmi_clkin_ck"),
29 DT_CLK(NULL, "mlb_clkin_ck", "mlb_clkin_ck"), 29 DT_CLK(NULL, "mlb_clkin_ck", "mlb_clkin_ck"),
30 DT_CLK(NULL, "mlbp_clkin_ck", "mlbp_clkin_ck"), 30 DT_CLK(NULL, "mlbp_clkin_ck", "mlbp_clkin_ck"),
diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c
new file mode 100644
index 000000000000..4a65b410e4d5
--- /dev/null
+++ b/drivers/clk/ti/clk-dra7-atl.c
@@ -0,0 +1,312 @@
1/*
2 * DRA7 ATL (Audio Tracking Logic) clock driver
3 *
4 * Copyright (C) 2013 Texas Instruments, Inc.
5 *
6 * Peter Ujfalusi <peter.ujfalusi@ti.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
13 * kind, whether express or implied; without even the implied warranty
14 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
18#include <linux/module.h>
19#include <linux/clk-provider.h>
20#include <linux/slab.h>
21#include <linux/io.h>
22#include <linux/of.h>
23#include <linux/of_address.h>
24#include <linux/platform_device.h>
25#include <linux/pm_runtime.h>
26
27#define DRA7_ATL_INSTANCES 4
28
29#define DRA7_ATL_PPMR_REG(id) (0x200 + (id * 0x80))
30#define DRA7_ATL_BBSR_REG(id) (0x204 + (id * 0x80))
31#define DRA7_ATL_ATLCR_REG(id) (0x208 + (id * 0x80))
32#define DRA7_ATL_SWEN_REG(id) (0x210 + (id * 0x80))
33#define DRA7_ATL_BWSMUX_REG(id) (0x214 + (id * 0x80))
34#define DRA7_ATL_AWSMUX_REG(id) (0x218 + (id * 0x80))
35#define DRA7_ATL_PCLKMUX_REG(id) (0x21c + (id * 0x80))
36
37#define DRA7_ATL_SWEN BIT(0)
38#define DRA7_ATL_DIVIDER_MASK (0x1f)
39#define DRA7_ATL_PCLKMUX BIT(0)
40struct dra7_atl_clock_info;
41
42struct dra7_atl_desc {
43 struct clk *clk;
44 struct clk_hw hw;
45 struct dra7_atl_clock_info *cinfo;
46 int id;
47
48 bool probed; /* the driver for the IP has been loaded */
49 bool valid; /* configured */
50 bool enabled;
51 u32 bws; /* Baseband Word Select Mux */
52 u32 aws; /* Audio Word Select Mux */
53 u32 divider; /* Cached divider value */
54};
55
56struct dra7_atl_clock_info {
57 struct device *dev;
58 void __iomem *iobase;
59
60 struct dra7_atl_desc *cdesc;
61};
62
63#define to_atl_desc(_hw) container_of(_hw, struct dra7_atl_desc, hw)
64
65static inline void atl_write(struct dra7_atl_clock_info *cinfo, u32 reg,
66 u32 val)
67{
68 __raw_writel(val, cinfo->iobase + reg);
69}
70
71static inline int atl_read(struct dra7_atl_clock_info *cinfo, u32 reg)
72{
73 return __raw_readl(cinfo->iobase + reg);
74}
75
76static int atl_clk_enable(struct clk_hw *hw)
77{
78 struct dra7_atl_desc *cdesc = to_atl_desc(hw);
79
80 if (!cdesc->probed)
81 goto out;
82
83 if (unlikely(!cdesc->valid))
84 dev_warn(cdesc->cinfo->dev, "atl%d has not been configured\n",
85 cdesc->id);
86 pm_runtime_get_sync(cdesc->cinfo->dev);
87
88 atl_write(cdesc->cinfo, DRA7_ATL_ATLCR_REG(cdesc->id),
89 cdesc->divider - 1);
90 atl_write(cdesc->cinfo, DRA7_ATL_SWEN_REG(cdesc->id), DRA7_ATL_SWEN);
91
92out:
93 cdesc->enabled = true;
94
95 return 0;
96}
97
98static void atl_clk_disable(struct clk_hw *hw)
99{
100 struct dra7_atl_desc *cdesc = to_atl_desc(hw);
101
102 if (!cdesc->probed)
103 goto out;
104
105 atl_write(cdesc->cinfo, DRA7_ATL_SWEN_REG(cdesc->id), 0);
106 pm_runtime_put_sync(cdesc->cinfo->dev);
107
108out:
109 cdesc->enabled = false;
110}
111
112static int atl_clk_is_enabled(struct clk_hw *hw)
113{
114 struct dra7_atl_desc *cdesc = to_atl_desc(hw);
115
116 return cdesc->enabled;
117}
118
119static unsigned long atl_clk_recalc_rate(struct clk_hw *hw,
120 unsigned long parent_rate)
121{
122 struct dra7_atl_desc *cdesc = to_atl_desc(hw);
123
124 return parent_rate / cdesc->divider;
125}
126
127static long atl_clk_round_rate(struct clk_hw *hw, unsigned long rate,
128 unsigned long *parent_rate)
129{
130 unsigned divider;
131
132 divider = (*parent_rate + rate / 2) / rate;
133 if (divider > DRA7_ATL_DIVIDER_MASK + 1)
134 divider = DRA7_ATL_DIVIDER_MASK + 1;
135
136 return *parent_rate / divider;
137}
138
139static int atl_clk_set_rate(struct clk_hw *hw, unsigned long rate,
140 unsigned long parent_rate)
141{
142 struct dra7_atl_desc *cdesc = to_atl_desc(hw);
143 u32 divider;
144
145 divider = ((parent_rate + rate / 2) / rate) - 1;
146 if (divider > DRA7_ATL_DIVIDER_MASK)
147 divider = DRA7_ATL_DIVIDER_MASK;
148
149 cdesc->divider = divider + 1;
150
151 return 0;
152}
153
154const struct clk_ops atl_clk_ops = {
155 .enable = atl_clk_enable,
156 .disable = atl_clk_disable,
157 .is_enabled = atl_clk_is_enabled,
158 .recalc_rate = atl_clk_recalc_rate,
159 .round_rate = atl_clk_round_rate,
160 .set_rate = atl_clk_set_rate,
161};
162
163static void __init of_dra7_atl_clock_setup(struct device_node *node)
164{
165 struct dra7_atl_desc *clk_hw = NULL;
166 struct clk_init_data init = { 0 };
167 const char **parent_names = NULL;
168 struct clk *clk;
169
170 clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
171 if (!clk_hw) {
172 pr_err("%s: could not allocate dra7_atl_desc\n", __func__);
173 return;
174 }
175
176 clk_hw->hw.init = &init;
177 clk_hw->divider = 1;
178 init.name = node->name;
179 init.ops = &atl_clk_ops;
180 init.flags = CLK_IGNORE_UNUSED;
181 init.num_parents = of_clk_get_parent_count(node);
182
183 if (init.num_parents != 1) {
184 pr_err("%s: atl clock %s must have 1 parent\n", __func__,
185 node->name);
186 goto cleanup;
187 }
188
189 parent_names = kzalloc(sizeof(char *), GFP_KERNEL);
190
191 if (!parent_names)
192 goto cleanup;
193
194 parent_names[0] = of_clk_get_parent_name(node, 0);
195
196 init.parent_names = parent_names;
197
198 clk = clk_register(NULL, &clk_hw->hw);
199
200 if (!IS_ERR(clk)) {
201 of_clk_add_provider(node, of_clk_src_simple_get, clk);
202 return;
203 }
204cleanup:
205 kfree(parent_names);
206 kfree(clk_hw);
207}
208CLK_OF_DECLARE(dra7_atl_clock, "ti,dra7-atl-clock", of_dra7_atl_clock_setup);
209
210static int of_dra7_atl_clk_probe(struct platform_device *pdev)
211{
212 struct device_node *node = pdev->dev.of_node;
213 struct dra7_atl_clock_info *cinfo;
214 int i;
215 int ret = 0;
216
217 if (!node)
218 return -ENODEV;
219
220 cinfo = devm_kzalloc(&pdev->dev, sizeof(*cinfo), GFP_KERNEL);
221 if (!cinfo)
222 return -ENOMEM;
223
224 cinfo->iobase = of_iomap(node, 0);
225 cinfo->dev = &pdev->dev;
226 pm_runtime_enable(cinfo->dev);
227
228 pm_runtime_get_sync(cinfo->dev);
229 atl_write(cinfo, DRA7_ATL_PCLKMUX_REG(0), DRA7_ATL_PCLKMUX);
230
231 for (i = 0; i < DRA7_ATL_INSTANCES; i++) {
232 struct device_node *cfg_node;
233 char prop[5];
234 struct dra7_atl_desc *cdesc;
235 struct of_phandle_args clkspec;
236 struct clk *clk;
237 int rc;
238
239 rc = of_parse_phandle_with_args(node, "ti,provided-clocks",
240 NULL, i, &clkspec);
241
242 if (rc) {
243 pr_err("%s: failed to lookup atl clock %d\n", __func__,
244 i);
245 return -EINVAL;
246 }
247
248 clk = of_clk_get_from_provider(&clkspec);
249
250 cdesc = to_atl_desc(__clk_get_hw(clk));
251 cdesc->cinfo = cinfo;
252 cdesc->id = i;
253
254 /* Get configuration for the ATL instances */
255 snprintf(prop, sizeof(prop), "atl%u", i);
256 cfg_node = of_find_node_by_name(node, prop);
257 if (cfg_node) {
258 ret = of_property_read_u32(cfg_node, "bws",
259 &cdesc->bws);
260 ret |= of_property_read_u32(cfg_node, "aws",
261 &cdesc->aws);
262 if (!ret) {
263 cdesc->valid = true;
264 atl_write(cinfo, DRA7_ATL_BWSMUX_REG(i),
265 cdesc->bws);
266 atl_write(cinfo, DRA7_ATL_AWSMUX_REG(i),
267 cdesc->aws);
268 }
269 }
270
271 cdesc->probed = true;
272 /*
273 * Enable the clock if it has been asked prior to loading the
274 * hw driver
275 */
276 if (cdesc->enabled)
277 atl_clk_enable(__clk_get_hw(clk));
278 }
279 pm_runtime_put_sync(cinfo->dev);
280
281 return ret;
282}
283
284static int of_dra7_atl_clk_remove(struct platform_device *pdev)
285{
286 pm_runtime_disable(&pdev->dev);
287
288 return 0;
289}
290
291static struct of_device_id of_dra7_atl_clk_match_tbl[] = {
292 { .compatible = "ti,dra7-atl", },
293 {},
294};
295MODULE_DEVICE_TABLE(of, of_dra7_atl_clk_match_tbl);
296
297static struct platform_driver dra7_atl_clk_driver = {
298 .driver = {
299 .name = "dra7-atl",
300 .owner = THIS_MODULE,
301 .of_match_table = of_dra7_atl_clk_match_tbl,
302 },
303 .probe = of_dra7_atl_clk_probe,
304 .remove = of_dra7_atl_clk_remove,
305};
306
307module_platform_driver(dra7_atl_clk_driver);
308
309MODULE_DESCRIPTION("Clock driver for DRA7 Audio Tracking Logic");
310MODULE_ALIAS("platform:dra7-atl-clock");
311MODULE_AUTHOR("Peter Ujfalusi <peter.ujfalusi@ti.com>");
312MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/ti/dpll.c b/drivers/clk/ti/dpll.c
index 7e498a44f97d..abd956d5f838 100644
--- a/drivers/clk/ti/dpll.c
+++ b/drivers/clk/ti/dpll.c
@@ -25,8 +25,6 @@
25#undef pr_fmt 25#undef pr_fmt
26#define pr_fmt(fmt) "%s: " fmt, __func__ 26#define pr_fmt(fmt) "%s: " fmt, __func__
27 27
28#define DPLL_HAS_AUTOIDLE 0x1
29
30#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \ 28#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
31 defined(CONFIG_SOC_DRA7XX) 29 defined(CONFIG_SOC_DRA7XX)
32static const struct clk_ops dpll_m4xen_ck_ops = { 30static const struct clk_ops dpll_m4xen_ck_ops = {
@@ -37,21 +35,18 @@ static const struct clk_ops dpll_m4xen_ck_ops = {
37 .set_rate = &omap3_noncore_dpll_set_rate, 35 .set_rate = &omap3_noncore_dpll_set_rate,
38 .get_parent = &omap2_init_dpll_parent, 36 .get_parent = &omap2_init_dpll_parent,
39}; 37};
38#else
39static const struct clk_ops dpll_m4xen_ck_ops = {};
40#endif 40#endif
41 41
42#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4) || \
43 defined(CONFIG_SOC_OMAP5) || defined(CONFIG_SOC_DRA7XX) || \
44 defined(CONFIG_SOC_AM33XX) || defined(CONFIG_SOC_AM43XX)
42static const struct clk_ops dpll_core_ck_ops = { 45static const struct clk_ops dpll_core_ck_ops = {
43 .recalc_rate = &omap3_dpll_recalc, 46 .recalc_rate = &omap3_dpll_recalc,
44 .get_parent = &omap2_init_dpll_parent, 47 .get_parent = &omap2_init_dpll_parent,
45}; 48};
46 49
47#ifdef CONFIG_ARCH_OMAP3
48static const struct clk_ops omap3_dpll_core_ck_ops = {
49 .get_parent = &omap2_init_dpll_parent,
50 .recalc_rate = &omap3_dpll_recalc,
51 .round_rate = &omap2_dpll_round_rate,
52};
53#endif
54
55static const struct clk_ops dpll_ck_ops = { 50static const struct clk_ops dpll_ck_ops = {
56 .enable = &omap3_noncore_dpll_enable, 51 .enable = &omap3_noncore_dpll_enable,
57 .disable = &omap3_noncore_dpll_disable, 52 .disable = &omap3_noncore_dpll_disable,
@@ -67,6 +62,33 @@ static const struct clk_ops dpll_no_gate_ck_ops = {
67 .round_rate = &omap2_dpll_round_rate, 62 .round_rate = &omap2_dpll_round_rate,
68 .set_rate = &omap3_noncore_dpll_set_rate, 63 .set_rate = &omap3_noncore_dpll_set_rate,
69}; 64};
65#else
66static const struct clk_ops dpll_core_ck_ops = {};
67static const struct clk_ops dpll_ck_ops = {};
68static const struct clk_ops dpll_no_gate_ck_ops = {};
69const struct clk_hw_omap_ops clkhwops_omap3_dpll = {};
70#endif
71
72#ifdef CONFIG_ARCH_OMAP2
73static const struct clk_ops omap2_dpll_core_ck_ops = {
74 .get_parent = &omap2_init_dpll_parent,
75 .recalc_rate = &omap2_dpllcore_recalc,
76 .round_rate = &omap2_dpll_round_rate,
77 .set_rate = &omap2_reprogram_dpllcore,
78};
79#else
80static const struct clk_ops omap2_dpll_core_ck_ops = {};
81#endif
82
83#ifdef CONFIG_ARCH_OMAP3
84static const struct clk_ops omap3_dpll_core_ck_ops = {
85 .get_parent = &omap2_init_dpll_parent,
86 .recalc_rate = &omap3_dpll_recalc,
87 .round_rate = &omap2_dpll_round_rate,
88};
89#else
90static const struct clk_ops omap3_dpll_core_ck_ops = {};
91#endif
70 92
71#ifdef CONFIG_ARCH_OMAP3 93#ifdef CONFIG_ARCH_OMAP3
72static const struct clk_ops omap3_dpll_ck_ops = { 94static const struct clk_ops omap3_dpll_ck_ops = {
@@ -193,14 +215,12 @@ static void ti_clk_register_dpll_x2(struct device_node *node,
193 * @node: device node containing the DPLL info 215 * @node: device node containing the DPLL info
194 * @ops: ops for the DPLL 216 * @ops: ops for the DPLL
195 * @ddt: DPLL data template to use 217 * @ddt: DPLL data template to use
196 * @init_flags: flags for controlling init types
197 * 218 *
198 * Initializes a DPLL clock from device tree data. 219 * Initializes a DPLL clock from device tree data.
199 */ 220 */
200static void __init of_ti_dpll_setup(struct device_node *node, 221static void __init of_ti_dpll_setup(struct device_node *node,
201 const struct clk_ops *ops, 222 const struct clk_ops *ops,
202 const struct dpll_data *ddt, 223 const struct dpll_data *ddt)
203 u8 init_flags)
204{ 224{
205 struct clk_hw_omap *clk_hw = NULL; 225 struct clk_hw_omap *clk_hw = NULL;
206 struct clk_init_data *init = NULL; 226 struct clk_init_data *init = NULL;
@@ -241,13 +261,30 @@ static void __init of_ti_dpll_setup(struct device_node *node,
241 init->parent_names = parent_names; 261 init->parent_names = parent_names;
242 262
243 dd->control_reg = ti_clk_get_reg_addr(node, 0); 263 dd->control_reg = ti_clk_get_reg_addr(node, 0);
244 dd->idlest_reg = ti_clk_get_reg_addr(node, 1);
245 dd->mult_div1_reg = ti_clk_get_reg_addr(node, 2);
246 264
247 if (!dd->control_reg || !dd->idlest_reg || !dd->mult_div1_reg) 265 /*
266 * Special case for OMAP2 DPLL, register order is different due to
267 * missing idlest_reg, also clkhwops is different. Detected from
268 * missing idlest_mask.
269 */
270 if (!dd->idlest_mask) {
271 dd->mult_div1_reg = ti_clk_get_reg_addr(node, 1);
272#ifdef CONFIG_ARCH_OMAP2
273 clk_hw->ops = &clkhwops_omap2xxx_dpll;
274 omap2xxx_clkt_dpllcore_init(&clk_hw->hw);
275#endif
276 } else {
277 dd->idlest_reg = ti_clk_get_reg_addr(node, 1);
278 if (!dd->idlest_reg)
279 goto cleanup;
280
281 dd->mult_div1_reg = ti_clk_get_reg_addr(node, 2);
282 }
283
284 if (!dd->control_reg || !dd->mult_div1_reg)
248 goto cleanup; 285 goto cleanup;
249 286
250 if (init_flags & DPLL_HAS_AUTOIDLE) { 287 if (dd->autoidle_mask) {
251 dd->autoidle_reg = ti_clk_get_reg_addr(node, 3); 288 dd->autoidle_reg = ti_clk_get_reg_addr(node, 3);
252 if (!dd->autoidle_reg) 289 if (!dd->autoidle_reg)
253 goto cleanup; 290 goto cleanup;
@@ -310,7 +347,7 @@ static void __init of_ti_omap3_dpll_setup(struct device_node *node)
310 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED), 347 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
311 }; 348 };
312 349
313 of_ti_dpll_setup(node, &omap3_dpll_ck_ops, &dd, DPLL_HAS_AUTOIDLE); 350 of_ti_dpll_setup(node, &omap3_dpll_ck_ops, &dd);
314} 351}
315CLK_OF_DECLARE(ti_omap3_dpll_clock, "ti,omap3-dpll-clock", 352CLK_OF_DECLARE(ti_omap3_dpll_clock, "ti,omap3-dpll-clock",
316 of_ti_omap3_dpll_setup); 353 of_ti_omap3_dpll_setup);
@@ -329,7 +366,7 @@ static void __init of_ti_omap3_core_dpll_setup(struct device_node *node)
329 .freqsel_mask = 0xf0, 366 .freqsel_mask = 0xf0,
330 }; 367 };
331 368
332 of_ti_dpll_setup(node, &omap3_dpll_core_ck_ops, &dd, DPLL_HAS_AUTOIDLE); 369 of_ti_dpll_setup(node, &omap3_dpll_core_ck_ops, &dd);
333} 370}
334CLK_OF_DECLARE(ti_omap3_core_dpll_clock, "ti,omap3-dpll-core-clock", 371CLK_OF_DECLARE(ti_omap3_core_dpll_clock, "ti,omap3-dpll-core-clock",
335 of_ti_omap3_core_dpll_setup); 372 of_ti_omap3_core_dpll_setup);
@@ -349,7 +386,7 @@ static void __init of_ti_omap3_per_dpll_setup(struct device_node *node)
349 .modes = (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED), 386 .modes = (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED),
350 }; 387 };
351 388
352 of_ti_dpll_setup(node, &omap3_dpll_per_ck_ops, &dd, DPLL_HAS_AUTOIDLE); 389 of_ti_dpll_setup(node, &omap3_dpll_per_ck_ops, &dd);
353} 390}
354CLK_OF_DECLARE(ti_omap3_per_dpll_clock, "ti,omap3-dpll-per-clock", 391CLK_OF_DECLARE(ti_omap3_per_dpll_clock, "ti,omap3-dpll-per-clock",
355 of_ti_omap3_per_dpll_setup); 392 of_ti_omap3_per_dpll_setup);
@@ -371,7 +408,7 @@ static void __init of_ti_omap3_per_jtype_dpll_setup(struct device_node *node)
371 .modes = (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED), 408 .modes = (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED),
372 }; 409 };
373 410
374 of_ti_dpll_setup(node, &omap3_dpll_per_ck_ops, &dd, DPLL_HAS_AUTOIDLE); 411 of_ti_dpll_setup(node, &omap3_dpll_per_ck_ops, &dd);
375} 412}
376CLK_OF_DECLARE(ti_omap3_per_jtype_dpll_clock, "ti,omap3-dpll-per-j-type-clock", 413CLK_OF_DECLARE(ti_omap3_per_jtype_dpll_clock, "ti,omap3-dpll-per-j-type-clock",
377 of_ti_omap3_per_jtype_dpll_setup); 414 of_ti_omap3_per_jtype_dpll_setup);
@@ -391,11 +428,32 @@ static void __init of_ti_omap4_dpll_setup(struct device_node *node)
391 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED), 428 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
392 }; 429 };
393 430
394 of_ti_dpll_setup(node, &dpll_ck_ops, &dd, DPLL_HAS_AUTOIDLE); 431 of_ti_dpll_setup(node, &dpll_ck_ops, &dd);
395} 432}
396CLK_OF_DECLARE(ti_omap4_dpll_clock, "ti,omap4-dpll-clock", 433CLK_OF_DECLARE(ti_omap4_dpll_clock, "ti,omap4-dpll-clock",
397 of_ti_omap4_dpll_setup); 434 of_ti_omap4_dpll_setup);
398 435
436static void __init of_ti_omap5_mpu_dpll_setup(struct device_node *node)
437{
438 const struct dpll_data dd = {
439 .idlest_mask = 0x1,
440 .enable_mask = 0x7,
441 .autoidle_mask = 0x7,
442 .mult_mask = 0x7ff << 8,
443 .div1_mask = 0x7f,
444 .max_multiplier = 2047,
445 .max_divider = 128,
446 .dcc_mask = BIT(22),
447 .dcc_rate = 1400000000, /* DCC beyond 1.4GHz */
448 .min_divider = 1,
449 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
450 };
451
452 of_ti_dpll_setup(node, &dpll_ck_ops, &dd);
453}
454CLK_OF_DECLARE(of_ti_omap5_mpu_dpll_clock, "ti,omap5-mpu-dpll-clock",
455 of_ti_omap5_mpu_dpll_setup);
456
399static void __init of_ti_omap4_core_dpll_setup(struct device_node *node) 457static void __init of_ti_omap4_core_dpll_setup(struct device_node *node)
400{ 458{
401 const struct dpll_data dd = { 459 const struct dpll_data dd = {
@@ -410,7 +468,7 @@ static void __init of_ti_omap4_core_dpll_setup(struct device_node *node)
410 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED), 468 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
411 }; 469 };
412 470
413 of_ti_dpll_setup(node, &dpll_core_ck_ops, &dd, DPLL_HAS_AUTOIDLE); 471 of_ti_dpll_setup(node, &dpll_core_ck_ops, &dd);
414} 472}
415CLK_OF_DECLARE(ti_omap4_core_dpll_clock, "ti,omap4-dpll-core-clock", 473CLK_OF_DECLARE(ti_omap4_core_dpll_clock, "ti,omap4-dpll-core-clock",
416 of_ti_omap4_core_dpll_setup); 474 of_ti_omap4_core_dpll_setup);
@@ -433,7 +491,7 @@ static void __init of_ti_omap4_m4xen_dpll_setup(struct device_node *node)
433 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED), 491 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
434 }; 492 };
435 493
436 of_ti_dpll_setup(node, &dpll_m4xen_ck_ops, &dd, DPLL_HAS_AUTOIDLE); 494 of_ti_dpll_setup(node, &dpll_m4xen_ck_ops, &dd);
437} 495}
438CLK_OF_DECLARE(ti_omap4_m4xen_dpll_clock, "ti,omap4-dpll-m4xen-clock", 496CLK_OF_DECLARE(ti_omap4_m4xen_dpll_clock, "ti,omap4-dpll-m4xen-clock",
439 of_ti_omap4_m4xen_dpll_setup); 497 of_ti_omap4_m4xen_dpll_setup);
@@ -454,7 +512,7 @@ static void __init of_ti_omap4_jtype_dpll_setup(struct device_node *node)
454 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED), 512 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
455 }; 513 };
456 514
457 of_ti_dpll_setup(node, &dpll_m4xen_ck_ops, &dd, DPLL_HAS_AUTOIDLE); 515 of_ti_dpll_setup(node, &dpll_m4xen_ck_ops, &dd);
458} 516}
459CLK_OF_DECLARE(ti_omap4_jtype_dpll_clock, "ti,omap4-dpll-j-type-clock", 517CLK_OF_DECLARE(ti_omap4_jtype_dpll_clock, "ti,omap4-dpll-j-type-clock",
460 of_ti_omap4_jtype_dpll_setup); 518 of_ti_omap4_jtype_dpll_setup);
@@ -465,7 +523,6 @@ static void __init of_ti_am3_no_gate_dpll_setup(struct device_node *node)
465 const struct dpll_data dd = { 523 const struct dpll_data dd = {
466 .idlest_mask = 0x1, 524 .idlest_mask = 0x1,
467 .enable_mask = 0x7, 525 .enable_mask = 0x7,
468 .autoidle_mask = 0x7,
469 .mult_mask = 0x7ff << 8, 526 .mult_mask = 0x7ff << 8,
470 .div1_mask = 0x7f, 527 .div1_mask = 0x7f,
471 .max_multiplier = 2047, 528 .max_multiplier = 2047,
@@ -474,7 +531,7 @@ static void __init of_ti_am3_no_gate_dpll_setup(struct device_node *node)
474 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED), 531 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
475 }; 532 };
476 533
477 of_ti_dpll_setup(node, &dpll_no_gate_ck_ops, &dd, 0); 534 of_ti_dpll_setup(node, &dpll_no_gate_ck_ops, &dd);
478} 535}
479CLK_OF_DECLARE(ti_am3_no_gate_dpll_clock, "ti,am3-dpll-no-gate-clock", 536CLK_OF_DECLARE(ti_am3_no_gate_dpll_clock, "ti,am3-dpll-no-gate-clock",
480 of_ti_am3_no_gate_dpll_setup); 537 of_ti_am3_no_gate_dpll_setup);
@@ -484,7 +541,6 @@ static void __init of_ti_am3_jtype_dpll_setup(struct device_node *node)
484 const struct dpll_data dd = { 541 const struct dpll_data dd = {
485 .idlest_mask = 0x1, 542 .idlest_mask = 0x1,
486 .enable_mask = 0x7, 543 .enable_mask = 0x7,
487 .autoidle_mask = 0x7,
488 .mult_mask = 0x7ff << 8, 544 .mult_mask = 0x7ff << 8,
489 .div1_mask = 0x7f, 545 .div1_mask = 0x7f,
490 .max_multiplier = 4095, 546 .max_multiplier = 4095,
@@ -494,7 +550,7 @@ static void __init of_ti_am3_jtype_dpll_setup(struct device_node *node)
494 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED), 550 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
495 }; 551 };
496 552
497 of_ti_dpll_setup(node, &dpll_ck_ops, &dd, 0); 553 of_ti_dpll_setup(node, &dpll_ck_ops, &dd);
498} 554}
499CLK_OF_DECLARE(ti_am3_jtype_dpll_clock, "ti,am3-dpll-j-type-clock", 555CLK_OF_DECLARE(ti_am3_jtype_dpll_clock, "ti,am3-dpll-j-type-clock",
500 of_ti_am3_jtype_dpll_setup); 556 of_ti_am3_jtype_dpll_setup);
@@ -504,7 +560,6 @@ static void __init of_ti_am3_no_gate_jtype_dpll_setup(struct device_node *node)
504 const struct dpll_data dd = { 560 const struct dpll_data dd = {
505 .idlest_mask = 0x1, 561 .idlest_mask = 0x1,
506 .enable_mask = 0x7, 562 .enable_mask = 0x7,
507 .autoidle_mask = 0x7,
508 .mult_mask = 0x7ff << 8, 563 .mult_mask = 0x7ff << 8,
509 .div1_mask = 0x7f, 564 .div1_mask = 0x7f,
510 .max_multiplier = 2047, 565 .max_multiplier = 2047,
@@ -514,7 +569,7 @@ static void __init of_ti_am3_no_gate_jtype_dpll_setup(struct device_node *node)
514 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED), 569 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
515 }; 570 };
516 571
517 of_ti_dpll_setup(node, &dpll_no_gate_ck_ops, &dd, 0); 572 of_ti_dpll_setup(node, &dpll_no_gate_ck_ops, &dd);
518} 573}
519CLK_OF_DECLARE(ti_am3_no_gate_jtype_dpll_clock, 574CLK_OF_DECLARE(ti_am3_no_gate_jtype_dpll_clock,
520 "ti,am3-dpll-no-gate-j-type-clock", 575 "ti,am3-dpll-no-gate-j-type-clock",
@@ -525,7 +580,6 @@ static void __init of_ti_am3_dpll_setup(struct device_node *node)
525 const struct dpll_data dd = { 580 const struct dpll_data dd = {
526 .idlest_mask = 0x1, 581 .idlest_mask = 0x1,
527 .enable_mask = 0x7, 582 .enable_mask = 0x7,
528 .autoidle_mask = 0x7,
529 .mult_mask = 0x7ff << 8, 583 .mult_mask = 0x7ff << 8,
530 .div1_mask = 0x7f, 584 .div1_mask = 0x7f,
531 .max_multiplier = 2047, 585 .max_multiplier = 2047,
@@ -534,7 +588,7 @@ static void __init of_ti_am3_dpll_setup(struct device_node *node)
534 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED), 588 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
535 }; 589 };
536 590
537 of_ti_dpll_setup(node, &dpll_ck_ops, &dd, 0); 591 of_ti_dpll_setup(node, &dpll_ck_ops, &dd);
538} 592}
539CLK_OF_DECLARE(ti_am3_dpll_clock, "ti,am3-dpll-clock", of_ti_am3_dpll_setup); 593CLK_OF_DECLARE(ti_am3_dpll_clock, "ti,am3-dpll-clock", of_ti_am3_dpll_setup);
540 594
@@ -543,7 +597,6 @@ static void __init of_ti_am3_core_dpll_setup(struct device_node *node)
543 const struct dpll_data dd = { 597 const struct dpll_data dd = {
544 .idlest_mask = 0x1, 598 .idlest_mask = 0x1,
545 .enable_mask = 0x7, 599 .enable_mask = 0x7,
546 .autoidle_mask = 0x7,
547 .mult_mask = 0x7ff << 8, 600 .mult_mask = 0x7ff << 8,
548 .div1_mask = 0x7f, 601 .div1_mask = 0x7f,
549 .max_multiplier = 2047, 602 .max_multiplier = 2047,
@@ -552,7 +605,22 @@ static void __init of_ti_am3_core_dpll_setup(struct device_node *node)
552 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED), 605 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
553 }; 606 };
554 607
555 of_ti_dpll_setup(node, &dpll_core_ck_ops, &dd, 0); 608 of_ti_dpll_setup(node, &dpll_core_ck_ops, &dd);
556} 609}
557CLK_OF_DECLARE(ti_am3_core_dpll_clock, "ti,am3-dpll-core-clock", 610CLK_OF_DECLARE(ti_am3_core_dpll_clock, "ti,am3-dpll-core-clock",
558 of_ti_am3_core_dpll_setup); 611 of_ti_am3_core_dpll_setup);
612
613static void __init of_ti_omap2_core_dpll_setup(struct device_node *node)
614{
615 const struct dpll_data dd = {
616 .enable_mask = 0x3,
617 .mult_mask = 0x3ff << 12,
618 .div1_mask = 0xf << 8,
619 .max_divider = 16,
620 .min_divider = 1,
621 };
622
623 of_ti_dpll_setup(node, &omap2_dpll_core_ck_ops, &dd);
624}
625CLK_OF_DECLARE(ti_omap2_core_dpll_clock, "ti,omap2-dpll-core-clock",
626 of_ti_omap2_core_dpll_setup);
diff --git a/drivers/clk/ti/gate.c b/drivers/clk/ti/gate.c
index 58734817d502..b326d2797feb 100644
--- a/drivers/clk/ti/gate.c
+++ b/drivers/clk/ti/gate.c
@@ -185,7 +185,7 @@ of_ti_composite_no_wait_gate_clk_setup(struct device_node *node)
185CLK_OF_DECLARE(ti_composite_no_wait_gate_clk, "ti,composite-no-wait-gate-clock", 185CLK_OF_DECLARE(ti_composite_no_wait_gate_clk, "ti,composite-no-wait-gate-clock",
186 of_ti_composite_no_wait_gate_clk_setup); 186 of_ti_composite_no_wait_gate_clk_setup);
187 187
188#ifdef CONFIG_ARCH_OMAP3 188#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
189static void __init of_ti_composite_interface_clk_setup(struct device_node *node) 189static void __init of_ti_composite_interface_clk_setup(struct device_node *node)
190{ 190{
191 _of_ti_composite_gate_clk_setup(node, &clkhwops_iclk_wait); 191 _of_ti_composite_gate_clk_setup(node, &clkhwops_iclk_wait);
diff --git a/drivers/clk/ti/interface.c b/drivers/clk/ti/interface.c
index 320a2b168bb2..9c3e8c4aaa40 100644
--- a/drivers/clk/ti/interface.c
+++ b/drivers/clk/ti/interface.c
@@ -94,6 +94,7 @@ static void __init of_ti_no_wait_interface_clk_setup(struct device_node *node)
94CLK_OF_DECLARE(ti_no_wait_interface_clk, "ti,omap3-no-wait-interface-clock", 94CLK_OF_DECLARE(ti_no_wait_interface_clk, "ti,omap3-no-wait-interface-clock",
95 of_ti_no_wait_interface_clk_setup); 95 of_ti_no_wait_interface_clk_setup);
96 96
97#ifdef CONFIG_ARCH_OMAP3
97static void __init of_ti_hsotgusb_interface_clk_setup(struct device_node *node) 98static void __init of_ti_hsotgusb_interface_clk_setup(struct device_node *node)
98{ 99{
99 _of_ti_interface_clk_setup(node, 100 _of_ti_interface_clk_setup(node,
@@ -123,3 +124,13 @@ static void __init of_ti_am35xx_interface_clk_setup(struct device_node *node)
123} 124}
124CLK_OF_DECLARE(ti_am35xx_interface_clk, "ti,am35xx-interface-clock", 125CLK_OF_DECLARE(ti_am35xx_interface_clk, "ti,am35xx-interface-clock",
125 of_ti_am35xx_interface_clk_setup); 126 of_ti_am35xx_interface_clk_setup);
127#endif
128
129#ifdef CONFIG_SOC_OMAP2430
130static void __init of_ti_omap2430_interface_clk_setup(struct device_node *node)
131{
132 _of_ti_interface_clk_setup(node, &clkhwops_omap2430_i2chs_wait);
133}
134CLK_OF_DECLARE(ti_omap2430_interface_clk, "ti,omap2430-interface-clock",
135 of_ti_omap2430_interface_clk_setup);
136#endif
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index e473d6555f96..ffe350f86bca 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -186,6 +186,8 @@ config CPU_FREQ_GOV_CONSERVATIVE
186config GENERIC_CPUFREQ_CPU0 186config GENERIC_CPUFREQ_CPU0
187 tristate "Generic CPU0 cpufreq driver" 187 tristate "Generic CPU0 cpufreq driver"
188 depends on HAVE_CLK && OF 188 depends on HAVE_CLK && OF
189 # if CPU_THERMAL is on and THERMAL=m, CPU0 cannot be =y:
190 depends on !CPU_THERMAL || THERMAL
189 select PM_OPP 191 select PM_OPP
190 help 192 help
191 This adds a generic cpufreq driver for CPU0 frequency management. 193 This adds a generic cpufreq driver for CPU0 frequency management.
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index aed2b0cb83dc..62259d27f03e 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -2242,10 +2242,8 @@ int cpufreq_update_policy(unsigned int cpu)
2242 struct cpufreq_policy new_policy; 2242 struct cpufreq_policy new_policy;
2243 int ret; 2243 int ret;
2244 2244
2245 if (!policy) { 2245 if (!policy)
2246 ret = -ENODEV; 2246 return -ENODEV;
2247 goto no_policy;
2248 }
2249 2247
2250 down_write(&policy->rwsem); 2248 down_write(&policy->rwsem);
2251 2249
@@ -2264,7 +2262,7 @@ int cpufreq_update_policy(unsigned int cpu)
2264 new_policy.cur = cpufreq_driver->get(cpu); 2262 new_policy.cur = cpufreq_driver->get(cpu);
2265 if (WARN_ON(!new_policy.cur)) { 2263 if (WARN_ON(!new_policy.cur)) {
2266 ret = -EIO; 2264 ret = -EIO;
2267 goto no_policy; 2265 goto unlock;
2268 } 2266 }
2269 2267
2270 if (!policy->cur) { 2268 if (!policy->cur) {
@@ -2279,10 +2277,10 @@ int cpufreq_update_policy(unsigned int cpu)
2279 2277
2280 ret = cpufreq_set_policy(policy, &new_policy); 2278 ret = cpufreq_set_policy(policy, &new_policy);
2281 2279
2280unlock:
2282 up_write(&policy->rwsem); 2281 up_write(&policy->rwsem);
2283 2282
2284 cpufreq_cpu_put(policy); 2283 cpufreq_cpu_put(policy);
2285no_policy:
2286 return ret; 2284 return ret;
2287} 2285}
2288EXPORT_SYMBOL(cpufreq_update_policy); 2286EXPORT_SYMBOL(cpufreq_update_policy);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 4e7f492ad583..924bb2d42b1c 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -196,10 +196,7 @@ static signed int pid_calc(struct _pid *pid, int32_t busy)
196 pid->last_err = fp_error; 196 pid->last_err = fp_error;
197 197
198 result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm; 198 result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm;
199 if (result >= 0) 199 result = result + (1 << (FRAC_BITS-1));
200 result = result + (1 << (FRAC_BITS-1));
201 else
202 result = result - (1 << (FRAC_BITS-1));
203 return (signed int)fp_toint(result); 200 return (signed int)fp_toint(result);
204} 201}
205 202
diff --git a/drivers/cpuidle/cpuidle-armada-370-xp.c b/drivers/cpuidle/cpuidle-armada-370-xp.c
index 28587d0f3947..a5fba0287bfb 100644
--- a/drivers/cpuidle/cpuidle-armada-370-xp.c
+++ b/drivers/cpuidle/cpuidle-armada-370-xp.c
@@ -55,7 +55,7 @@ static struct cpuidle_driver armada_370_xp_idle_driver = {
55 .power_usage = 50, 55 .power_usage = 50,
56 .target_residency = 100, 56 .target_residency = 100,
57 .flags = CPUIDLE_FLAG_TIME_VALID, 57 .flags = CPUIDLE_FLAG_TIME_VALID,
58 .name = "MV CPU IDLE", 58 .name = "Idle",
59 .desc = "CPU power down", 59 .desc = "CPU power down",
60 }, 60 },
61 .states[2] = { 61 .states[2] = {
@@ -65,7 +65,7 @@ static struct cpuidle_driver armada_370_xp_idle_driver = {
65 .target_residency = 1000, 65 .target_residency = 1000,
66 .flags = CPUIDLE_FLAG_TIME_VALID | 66 .flags = CPUIDLE_FLAG_TIME_VALID |
67 ARMADA_370_XP_FLAG_DEEP_IDLE, 67 ARMADA_370_XP_FLAG_DEEP_IDLE,
68 .name = "MV CPU DEEP IDLE", 68 .name = "Deep idle",
69 .desc = "CPU and L2 Fabric power down", 69 .desc = "CPU and L2 Fabric power down",
70 }, 70 },
71 .state_count = ARMADA_370_XP_MAX_STATES, 71 .state_count = ARMADA_370_XP_MAX_STATES,
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index d9c9cb4665db..2ebc9071e354 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -2614,7 +2614,7 @@ static struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
2614 2614
2615 desc = of_get_named_gpiod_flags(dev->of_node, prop_name, idx, 2615 desc = of_get_named_gpiod_flags(dev->of_node, prop_name, idx,
2616 &of_flags); 2616 &of_flags);
2617 if (!IS_ERR(desc)) 2617 if (!IS_ERR(desc) || (PTR_ERR(desc) == -EPROBE_DEFER))
2618 break; 2618 break;
2619 } 2619 }
2620 2620
diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c
index 7c2497dea1e9..0dc57d5ecd10 100644
--- a/drivers/gpu/drm/drm_modeset_lock.c
+++ b/drivers/gpu/drm/drm_modeset_lock.c
@@ -64,6 +64,7 @@
64void drm_modeset_acquire_init(struct drm_modeset_acquire_ctx *ctx, 64void drm_modeset_acquire_init(struct drm_modeset_acquire_ctx *ctx,
65 uint32_t flags) 65 uint32_t flags)
66{ 66{
67 memset(ctx, 0, sizeof(*ctx));
67 ww_acquire_init(&ctx->ww_ctx, &crtc_ww_class); 68 ww_acquire_init(&ctx->ww_ctx, &crtc_ww_class);
68 INIT_LIST_HEAD(&ctx->locked); 69 INIT_LIST_HEAD(&ctx->locked);
69} 70}
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 4c22a5b7f4c5..6c656392d67d 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -36,6 +36,8 @@
36#include "i915_drv.h" 36#include "i915_drv.h"
37#include "i915_trace.h" 37#include "i915_trace.h"
38#include <linux/pci.h> 38#include <linux/pci.h>
39#include <linux/console.h>
40#include <linux/vt.h>
39#include <linux/vgaarb.h> 41#include <linux/vgaarb.h>
40#include <linux/acpi.h> 42#include <linux/acpi.h>
41#include <linux/pnp.h> 43#include <linux/pnp.h>
@@ -1386,7 +1388,6 @@ cleanup_gem:
1386 i915_gem_context_fini(dev); 1388 i915_gem_context_fini(dev);
1387 mutex_unlock(&dev->struct_mutex); 1389 mutex_unlock(&dev->struct_mutex);
1388 WARN_ON(dev_priv->mm.aliasing_ppgtt); 1390 WARN_ON(dev_priv->mm.aliasing_ppgtt);
1389 drm_mm_takedown(&dev_priv->gtt.base.mm);
1390cleanup_irq: 1391cleanup_irq:
1391 drm_irq_uninstall(dev); 1392 drm_irq_uninstall(dev);
1392cleanup_gem_stolen: 1393cleanup_gem_stolen:
@@ -1450,6 +1451,38 @@ static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
1450} 1451}
1451#endif 1452#endif
1452 1453
1454#if !defined(CONFIG_VGA_CONSOLE)
1455static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
1456{
1457 return 0;
1458}
1459#elif !defined(CONFIG_DUMMY_CONSOLE)
1460static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
1461{
1462 return -ENODEV;
1463}
1464#else
1465static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
1466{
1467 int ret;
1468
1469 DRM_INFO("Replacing VGA console driver\n");
1470
1471 console_lock();
1472 ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1);
1473 if (ret == 0) {
1474 ret = do_unregister_con_driver(&vga_con);
1475
1476 /* Ignore "already unregistered". */
1477 if (ret == -ENODEV)
1478 ret = 0;
1479 }
1480 console_unlock();
1481
1482 return ret;
1483}
1484#endif
1485
1453static void i915_dump_device_info(struct drm_i915_private *dev_priv) 1486static void i915_dump_device_info(struct drm_i915_private *dev_priv)
1454{ 1487{
1455 const struct intel_device_info *info = &dev_priv->info; 1488 const struct intel_device_info *info = &dev_priv->info;
@@ -1623,8 +1656,15 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1623 if (ret) 1656 if (ret)
1624 goto out_regs; 1657 goto out_regs;
1625 1658
1626 if (drm_core_check_feature(dev, DRIVER_MODESET)) 1659 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1660 ret = i915_kick_out_vgacon(dev_priv);
1661 if (ret) {
1662 DRM_ERROR("failed to remove conflicting VGA console\n");
1663 goto out_gtt;
1664 }
1665
1627 i915_kick_out_firmware_fb(dev_priv); 1666 i915_kick_out_firmware_fb(dev_priv);
1667 }
1628 1668
1629 pci_set_master(dev->pdev); 1669 pci_set_master(dev->pdev);
1630 1670
@@ -1756,8 +1796,6 @@ out_mtrrfree:
1756 arch_phys_wc_del(dev_priv->gtt.mtrr); 1796 arch_phys_wc_del(dev_priv->gtt.mtrr);
1757 io_mapping_free(dev_priv->gtt.mappable); 1797 io_mapping_free(dev_priv->gtt.mappable);
1758out_gtt: 1798out_gtt:
1759 list_del(&dev_priv->gtt.base.global_link);
1760 drm_mm_takedown(&dev_priv->gtt.base.mm);
1761 dev_priv->gtt.base.cleanup(&dev_priv->gtt.base); 1799 dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
1762out_regs: 1800out_regs:
1763 intel_uncore_fini(dev); 1801 intel_uncore_fini(dev);
@@ -1846,7 +1884,6 @@ int i915_driver_unload(struct drm_device *dev)
1846 i915_free_hws(dev); 1884 i915_free_hws(dev);
1847 } 1885 }
1848 1886
1849 list_del(&dev_priv->gtt.base.global_link);
1850 WARN_ON(!list_empty(&dev_priv->vm_list)); 1887 WARN_ON(!list_empty(&dev_priv->vm_list));
1851 1888
1852 drm_vblank_cleanup(dev); 1889 drm_vblank_cleanup(dev);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index eec820aec022..8b3cde703364 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1992,7 +1992,10 @@ static void gen6_gmch_remove(struct i915_address_space *vm)
1992 1992
1993 struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base); 1993 struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
1994 1994
1995 drm_mm_takedown(&vm->mm); 1995 if (drm_mm_initialized(&vm->mm)) {
1996 drm_mm_takedown(&vm->mm);
1997 list_del(&vm->global_link);
1998 }
1996 iounmap(gtt->gsm); 1999 iounmap(gtt->gsm);
1997 teardown_scratch_page(vm->dev); 2000 teardown_scratch_page(vm->dev);
1998} 2001}
@@ -2025,6 +2028,10 @@ static int i915_gmch_probe(struct drm_device *dev,
2025 2028
2026static void i915_gmch_remove(struct i915_address_space *vm) 2029static void i915_gmch_remove(struct i915_address_space *vm)
2027{ 2030{
2031 if (drm_mm_initialized(&vm->mm)) {
2032 drm_mm_takedown(&vm->mm);
2033 list_del(&vm->global_link);
2034 }
2028 intel_gmch_remove(); 2035 intel_gmch_remove();
2029} 2036}
2030 2037
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 87ec60e181a7..66cf41765bf9 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -888,6 +888,8 @@ static void i915_gem_record_rings(struct drm_device *dev,
888 for (i = 0; i < I915_NUM_RINGS; i++) { 888 for (i = 0; i < I915_NUM_RINGS; i++) {
889 struct intel_engine_cs *ring = &dev_priv->ring[i]; 889 struct intel_engine_cs *ring = &dev_priv->ring[i];
890 890
891 error->ring[i].pid = -1;
892
891 if (ring->dev == NULL) 893 if (ring->dev == NULL)
892 continue; 894 continue;
893 895
@@ -895,7 +897,6 @@ static void i915_gem_record_rings(struct drm_device *dev,
895 897
896 i915_record_ring_state(dev, ring, &error->ring[i]); 898 i915_record_ring_state(dev, ring, &error->ring[i]);
897 899
898 error->ring[i].pid = -1;
899 request = i915_gem_find_active_request(ring); 900 request = i915_gem_find_active_request(ring);
900 if (request) { 901 if (request) {
901 /* We need to copy these to an anonymous buffer 902 /* We need to copy these to an anonymous buffer
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 6f8017a7e937..267f069765ad 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2847,10 +2847,14 @@ static int semaphore_passed(struct intel_engine_cs *ring)
2847 struct intel_engine_cs *signaller; 2847 struct intel_engine_cs *signaller;
2848 u32 seqno, ctl; 2848 u32 seqno, ctl;
2849 2849
2850 ring->hangcheck.deadlock = true; 2850 ring->hangcheck.deadlock++;
2851 2851
2852 signaller = semaphore_waits_for(ring, &seqno); 2852 signaller = semaphore_waits_for(ring, &seqno);
2853 if (signaller == NULL || signaller->hangcheck.deadlock) 2853 if (signaller == NULL)
2854 return -1;
2855
2856 /* Prevent pathological recursion due to driver bugs */
2857 if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
2854 return -1; 2858 return -1;
2855 2859
2856 /* cursory check for an unkickable deadlock */ 2860 /* cursory check for an unkickable deadlock */
@@ -2858,7 +2862,13 @@ static int semaphore_passed(struct intel_engine_cs *ring)
2858 if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0) 2862 if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0)
2859 return -1; 2863 return -1;
2860 2864
2861 return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno); 2865 if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
2866 return 1;
2867
2868 if (signaller->hangcheck.deadlock)
2869 return -1;
2870
2871 return 0;
2862} 2872}
2863 2873
2864static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) 2874static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
@@ -2867,7 +2877,7 @@ static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2867 int i; 2877 int i;
2868 2878
2869 for_each_ring(ring, dev_priv, i) 2879 for_each_ring(ring, dev_priv, i)
2870 ring->hangcheck.deadlock = false; 2880 ring->hangcheck.deadlock = 0;
2871} 2881}
2872 2882
2873static enum intel_ring_hangcheck_action 2883static enum intel_ring_hangcheck_action
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 5e6c888b4928..38a98570d10c 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -798,9 +798,6 @@ static void i965_enable_backlight(struct intel_connector *connector)
798 ctl = freq << 16; 798 ctl = freq << 16;
799 I915_WRITE(BLC_PWM_CTL, ctl); 799 I915_WRITE(BLC_PWM_CTL, ctl);
800 800
801 /* XXX: combine this into above write? */
802 intel_panel_actually_set_backlight(connector, panel->backlight.level);
803
804 ctl2 = BLM_PIPE(pipe); 801 ctl2 = BLM_PIPE(pipe);
805 if (panel->backlight.combination_mode) 802 if (panel->backlight.combination_mode)
806 ctl2 |= BLM_COMBINATION_MODE; 803 ctl2 |= BLM_COMBINATION_MODE;
@@ -809,6 +806,8 @@ static void i965_enable_backlight(struct intel_connector *connector)
809 I915_WRITE(BLC_PWM_CTL2, ctl2); 806 I915_WRITE(BLC_PWM_CTL2, ctl2);
810 POSTING_READ(BLC_PWM_CTL2); 807 POSTING_READ(BLC_PWM_CTL2);
811 I915_WRITE(BLC_PWM_CTL2, ctl2 | BLM_PWM_ENABLE); 808 I915_WRITE(BLC_PWM_CTL2, ctl2 | BLM_PWM_ENABLE);
809
810 intel_panel_actually_set_backlight(connector, panel->backlight.level);
812} 811}
813 812
814static void vlv_enable_backlight(struct intel_connector *connector) 813static void vlv_enable_backlight(struct intel_connector *connector)
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index d1e53abec1b5..54242e4f6f4c 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -511,8 +511,7 @@ void intel_update_fbc(struct drm_device *dev)
511 obj = intel_fb->obj; 511 obj = intel_fb->obj;
512 adjusted_mode = &intel_crtc->config.adjusted_mode; 512 adjusted_mode = &intel_crtc->config.adjusted_mode;
513 513
514 if (i915.enable_fbc < 0 && 514 if (i915.enable_fbc < 0) {
515 INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) {
516 if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT)) 515 if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
517 DRM_DEBUG_KMS("disabled per chip default\n"); 516 DRM_DEBUG_KMS("disabled per chip default\n");
518 goto out_disable; 517 goto out_disable;
@@ -3506,15 +3505,11 @@ static void gen8_enable_rps(struct drm_device *dev)
3506 3505
3507 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); 3506 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
3508 3507
3509 /* WaDisablePwrmtrEvent:chv (pre-production hw) */
3510 I915_WRITE(0xA80C, I915_READ(0xA80C) & 0x00ffffff);
3511 I915_WRITE(0xA810, I915_READ(0xA810) & 0xffffff00);
3512
3513 /* 5: Enable RPS */ 3508 /* 5: Enable RPS */
3514 I915_WRITE(GEN6_RP_CONTROL, 3509 I915_WRITE(GEN6_RP_CONTROL,
3515 GEN6_RP_MEDIA_TURBO | 3510 GEN6_RP_MEDIA_TURBO |
3516 GEN6_RP_MEDIA_HW_NORMAL_MODE | 3511 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3517 GEN6_RP_MEDIA_IS_GFX | /* WaSetMaskForGfxBusyness:chv (pre-production hw ?) */ 3512 GEN6_RP_MEDIA_IS_GFX |
3518 GEN6_RP_ENABLE | 3513 GEN6_RP_ENABLE |
3519 GEN6_RP_UP_BUSY_AVG | 3514 GEN6_RP_UP_BUSY_AVG |
3520 GEN6_RP_DOWN_IDLE_AVG); 3515 GEN6_RP_DOWN_IDLE_AVG);
@@ -6024,30 +6019,32 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
6024static struct i915_power_domains *hsw_pwr; 6019static struct i915_power_domains *hsw_pwr;
6025 6020
6026/* Display audio driver power well request */ 6021/* Display audio driver power well request */
6027void i915_request_power_well(void) 6022int i915_request_power_well(void)
6028{ 6023{
6029 struct drm_i915_private *dev_priv; 6024 struct drm_i915_private *dev_priv;
6030 6025
6031 if (WARN_ON(!hsw_pwr)) 6026 if (!hsw_pwr)
6032 return; 6027 return -ENODEV;
6033 6028
6034 dev_priv = container_of(hsw_pwr, struct drm_i915_private, 6029 dev_priv = container_of(hsw_pwr, struct drm_i915_private,
6035 power_domains); 6030 power_domains);
6036 intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO); 6031 intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
6032 return 0;
6037} 6033}
6038EXPORT_SYMBOL_GPL(i915_request_power_well); 6034EXPORT_SYMBOL_GPL(i915_request_power_well);
6039 6035
6040/* Display audio driver power well release */ 6036/* Display audio driver power well release */
6041void i915_release_power_well(void) 6037int i915_release_power_well(void)
6042{ 6038{
6043 struct drm_i915_private *dev_priv; 6039 struct drm_i915_private *dev_priv;
6044 6040
6045 if (WARN_ON(!hsw_pwr)) 6041 if (!hsw_pwr)
6046 return; 6042 return -ENODEV;
6047 6043
6048 dev_priv = container_of(hsw_pwr, struct drm_i915_private, 6044 dev_priv = container_of(hsw_pwr, struct drm_i915_private,
6049 power_domains); 6045 power_domains);
6050 intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO); 6046 intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
6047 return 0;
6051} 6048}
6052EXPORT_SYMBOL_GPL(i915_release_power_well); 6049EXPORT_SYMBOL_GPL(i915_release_power_well);
6053 6050
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 910c83cf7d44..e72017bdcd7f 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -55,7 +55,7 @@ struct intel_ring_hangcheck {
55 u32 seqno; 55 u32 seqno;
56 int score; 56 int score;
57 enum intel_ring_hangcheck_action action; 57 enum intel_ring_hangcheck_action action;
58 bool deadlock; 58 int deadlock;
59}; 59};
60 60
61struct intel_ringbuffer { 61struct intel_ringbuffer {
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 6a4d5bc17697..20375cc7f82d 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1385,7 +1385,9 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
1385 >> SDVO_PORT_MULTIPLY_SHIFT) + 1; 1385 >> SDVO_PORT_MULTIPLY_SHIFT) + 1;
1386 } 1386 }
1387 1387
1388 dotclock = pipe_config->port_clock / pipe_config->pixel_multiplier; 1388 dotclock = pipe_config->port_clock;
1389 if (pipe_config->pixel_multiplier)
1390 dotclock /= pipe_config->pixel_multiplier;
1389 1391
1390 if (HAS_PCH_SPLIT(dev)) 1392 if (HAS_PCH_SPLIT(dev))
1391 ironlake_check_encoder_dotclock(pipe_config, dotclock); 1393 ironlake_check_encoder_dotclock(pipe_config, dotclock);
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 79cba593df0d..4f6fef7ac069 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -320,7 +320,8 @@ static void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
320 struct drm_i915_private *dev_priv = dev->dev_private; 320 struct drm_i915_private *dev_priv = dev->dev_private;
321 unsigned long irqflags; 321 unsigned long irqflags;
322 322
323 del_timer_sync(&dev_priv->uncore.force_wake_timer); 323 if (del_timer_sync(&dev_priv->uncore.force_wake_timer))
324 gen6_force_wake_timer((unsigned long)dev_priv);
324 325
325 /* Hold uncore.lock across reset to prevent any register access 326 /* Hold uncore.lock across reset to prevent any register access
326 * with forcewake not set correctly 327 * with forcewake not set correctly
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index 2b6156d0e4b5..8b307e143632 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -140,6 +140,7 @@ nouveau-y += core/subdev/i2c/nv4e.o
140nouveau-y += core/subdev/i2c/nv50.o 140nouveau-y += core/subdev/i2c/nv50.o
141nouveau-y += core/subdev/i2c/nv94.o 141nouveau-y += core/subdev/i2c/nv94.o
142nouveau-y += core/subdev/i2c/nvd0.o 142nouveau-y += core/subdev/i2c/nvd0.o
143nouveau-y += core/subdev/i2c/gf117.o
143nouveau-y += core/subdev/i2c/nve0.o 144nouveau-y += core/subdev/i2c/nve0.o
144nouveau-y += core/subdev/ibus/nvc0.o 145nouveau-y += core/subdev/ibus/nvc0.o
145nouveau-y += core/subdev/ibus/nve0.o 146nouveau-y += core/subdev/ibus/nve0.o
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
index f199957995fa..8d55ed633b19 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
@@ -314,7 +314,7 @@ nvc0_identify(struct nouveau_device *device)
314 device->cname = "GF117"; 314 device->cname = "GF117";
315 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 315 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
316 device->oclass[NVDEV_SUBDEV_GPIO ] = nvd0_gpio_oclass; 316 device->oclass[NVDEV_SUBDEV_GPIO ] = nvd0_gpio_oclass;
317 device->oclass[NVDEV_SUBDEV_I2C ] = nvd0_i2c_oclass; 317 device->oclass[NVDEV_SUBDEV_I2C ] = gf117_i2c_oclass;
318 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; 318 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
319 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass; 319 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
320 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 320 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/base.c b/drivers/gpu/drm/nouveau/core/engine/disp/base.c
index c41f656abe64..9c38c5e40500 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/base.c
@@ -99,8 +99,10 @@ _nouveau_disp_dtor(struct nouveau_object *object)
99 99
100 nouveau_event_destroy(&disp->vblank); 100 nouveau_event_destroy(&disp->vblank);
101 101
102 list_for_each_entry_safe(outp, outt, &disp->outp, head) { 102 if (disp->outp.next) {
103 nouveau_object_ref(NULL, (struct nouveau_object **)&outp); 103 list_for_each_entry_safe(outp, outt, &disp->outp, head) {
104 nouveau_object_ref(NULL, (struct nouveau_object **)&outp);
105 }
104 } 106 }
105 107
106 nouveau_engine_destroy(&disp->base); 108 nouveau_engine_destroy(&disp->base);
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
index 39562d48101d..5a5b59b21130 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
@@ -241,7 +241,9 @@ dp_link_train_eq(struct dp_state *dp)
241 dp_set_training_pattern(dp, 2); 241 dp_set_training_pattern(dp, 2);
242 242
243 do { 243 do {
244 if (dp_link_train_update(dp, dp->pc2, 400)) 244 if ((tries &&
245 dp_link_train_commit(dp, dp->pc2)) ||
246 dp_link_train_update(dp, dp->pc2, 400))
245 break; 247 break;
246 248
247 eq_done = !!(dp->stat[2] & DPCD_LS04_INTERLANE_ALIGN_DONE); 249 eq_done = !!(dp->stat[2] & DPCD_LS04_INTERLANE_ALIGN_DONE);
@@ -253,9 +255,6 @@ dp_link_train_eq(struct dp_state *dp)
253 !(lane & DPCD_LS02_LANE0_SYMBOL_LOCKED)) 255 !(lane & DPCD_LS02_LANE0_SYMBOL_LOCKED))
254 eq_done = false; 256 eq_done = false;
255 } 257 }
256
257 if (dp_link_train_commit(dp, dp->pc2))
258 break;
259 } while (!eq_done && cr_done && ++tries <= 5); 258 } while (!eq_done && cr_done && ++tries <= 5);
260 259
261 return eq_done ? 0 : -1; 260 return eq_done ? 0 : -1;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
index 1e85f36c705f..26e962b7e702 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
@@ -1270,7 +1270,7 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk, u32 *conf)
1270 i--; 1270 i--;
1271 1271
1272 outp = exec_lookup(priv, head, i, ctrl, &data, &ver, &hdr, &cnt, &len, &info1); 1272 outp = exec_lookup(priv, head, i, ctrl, &data, &ver, &hdr, &cnt, &len, &info1);
1273 if (!data) 1273 if (!outp)
1274 return NULL; 1274 return NULL;
1275 1275
1276 if (outp->info.location == 0) { 1276 if (outp->info.location == 0) {
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpc.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpc.fuc
index 2f7345f7fe07..7445f12b1d9e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpc.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpc.fuc
@@ -54,7 +54,7 @@ mmio_list_base:
54#ifdef INCLUDE_CODE 54#ifdef INCLUDE_CODE
55// reports an exception to the host 55// reports an exception to the host
56// 56//
57// In: $r15 error code (see nvc0.fuc) 57// In: $r15 error code (see os.h)
58// 58//
59error: 59error:
60 push $r14 60 push $r14
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hub.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hub.fuc
index c8ddb8d71b91..b4ad18bf5a26 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hub.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hub.fuc
@@ -49,7 +49,7 @@ hub_mmio_list_next:
49#ifdef INCLUDE_CODE 49#ifdef INCLUDE_CODE
50// reports an exception to the host 50// reports an exception to the host
51// 51//
52// In: $r15 error code (see nvc0.fuc) 52// In: $r15 error code (see os.h)
53// 53//
54error: 54error:
55 nv_iowr(NV_PGRAPH_FECS_CC_SCRATCH_VAL(5), 0, $r15) 55 nv_iowr(NV_PGRAPH_FECS_CC_SCRATCH_VAL(5), 0, $r15)
@@ -343,13 +343,25 @@ ih:
343 ih_no_ctxsw: 343 ih_no_ctxsw:
344 and $r11 $r10 NV_PGRAPH_FECS_INTR_FWMTHD 344 and $r11 $r10 NV_PGRAPH_FECS_INTR_FWMTHD
345 bra e #ih_no_fwmthd 345 bra e #ih_no_fwmthd
346 // none we handle, ack, and fall-through to unhandled 346 // none we handle; report to host and ack
347 nv_rd32($r15, NV_PGRAPH_TRAPPED_DATA_LO)
348 nv_iowr(NV_PGRAPH_FECS_CC_SCRATCH_VAL(4), 0, $r15)
349 nv_rd32($r15, NV_PGRAPH_TRAPPED_ADDR)
350 nv_iowr(NV_PGRAPH_FECS_CC_SCRATCH_VAL(3), 0, $r15)
351 extr $r14 $r15 16:18
352 shl b32 $r14 $r14 2
353 imm32($r15, NV_PGRAPH_FE_OBJECT_TABLE(0))
354 add b32 $r14 $r15
355 call(nv_rd32)
356 nv_iowr(NV_PGRAPH_FECS_CC_SCRATCH_VAL(2), 0, $r15)
357 mov $r15 E_BAD_FWMTHD
358 call(error)
347 mov $r11 0x100 359 mov $r11 0x100
348 nv_wr32(0x400144, $r11) 360 nv_wr32(0x400144, $r11)
349 361
350 // anything we didn't handle, bring it to the host's attention 362 // anything we didn't handle, bring it to the host's attention
351 ih_no_fwmthd: 363 ih_no_fwmthd:
352 mov $r11 0x104 // FIFO | CHSW 364 mov $r11 0x504 // FIFO | CHSW | FWMTHD
353 not b32 $r11 365 not b32 $r11
354 and $r11 $r10 $r11 366 and $r11 $r10 $r11
355 bra e #ih_no_other 367 bra e #ih_no_other
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubgm107.fuc5.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubgm107.fuc5.h
index 214dd16ec566..5f953c5c20b7 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubgm107.fuc5.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubgm107.fuc5.h
@@ -478,10 +478,10 @@ uint32_t gm107_grhub_code[] = {
478 0x01040080, 478 0x01040080,
479 0xbd0001f6, 479 0xbd0001f6,
480 0x01004104, 480 0x01004104,
481 0x627e020f, 481 0xa87e020f,
482 0x717e0006, 482 0xb77e0006,
483 0x100f0006, 483 0x100f0006,
484 0x0006b37e, 484 0x0006f97e,
485 0x98000e98, 485 0x98000e98,
486 0x207e010f, 486 0x207e010f,
487 0x14950001, 487 0x14950001,
@@ -523,8 +523,8 @@ uint32_t gm107_grhub_code[] = {
523 0x800040b7, 523 0x800040b7,
524 0xf40132b6, 524 0xf40132b6,
525 0x000fb41b, 525 0x000fb41b,
526 0x0006b37e, 526 0x0006f97e,
527 0x627e000f, 527 0xa87e000f,
528 0x00800006, 528 0x00800006,
529 0x01f60201, 529 0x01f60201,
530 0xbd04bd00, 530 0xbd04bd00,
@@ -554,7 +554,7 @@ uint32_t gm107_grhub_code[] = {
554 0x0009f602, 554 0x0009f602,
555 0x32f404bd, 555 0x32f404bd,
556 0x0231f401, 556 0x0231f401,
557 0x0008367e, 557 0x00087c7e,
558 0x99f094bd, 558 0x99f094bd,
559 0x17008007, 559 0x17008007,
560 0x0009f602, 560 0x0009f602,
@@ -563,7 +563,7 @@ uint32_t gm107_grhub_code[] = {
563 0x37008006, 563 0x37008006,
564 0x0009f602, 564 0x0009f602,
565 0x31f404bd, 565 0x31f404bd,
566 0x08367e01, 566 0x087c7e01,
567 0xf094bd00, 567 0xf094bd00,
568 0x00800699, 568 0x00800699,
569 0x09f60217, 569 0x09f60217,
@@ -572,7 +572,7 @@ uint32_t gm107_grhub_code[] = {
572 0x20f92f0e, 572 0x20f92f0e,
573 0x32f412b2, 573 0x32f412b2,
574 0x0232f401, 574 0x0232f401,
575 0x0008367e, 575 0x00087c7e,
576 0x008020fc, 576 0x008020fc,
577 0x02f602c0, 577 0x02f602c0,
578 0xf404bd00, 578 0xf404bd00,
@@ -580,7 +580,7 @@ uint32_t gm107_grhub_code[] = {
580 0x23c8130e, 580 0x23c8130e,
581 0x0d0bf41f, 581 0x0d0bf41f,
582 0xf40131f4, 582 0xf40131f4,
583 0x367e0232, 583 0x7c7e0232,
584/* 0x054e: chsw_done */ 584/* 0x054e: chsw_done */
585 0x01020008, 585 0x01020008,
586 0x02c30080, 586 0x02c30080,
@@ -593,7 +593,7 @@ uint32_t gm107_grhub_code[] = {
593 0xb0ff2a0e, 593 0xb0ff2a0e,
594 0x1bf401e4, 594 0x1bf401e4,
595 0x7ef2b20c, 595 0x7ef2b20c,
596 0xf40007d6, 596 0xf400081c,
597/* 0x057a: main_not_ctx_chan */ 597/* 0x057a: main_not_ctx_chan */
598 0xe4b0400e, 598 0xe4b0400e,
599 0x2c1bf402, 599 0x2c1bf402,
@@ -602,7 +602,7 @@ uint32_t gm107_grhub_code[] = {
602 0x0009f602, 602 0x0009f602,
603 0x32f404bd, 603 0x32f404bd,
604 0x0232f401, 604 0x0232f401,
605 0x0008367e, 605 0x00087c7e,
606 0x99f094bd, 606 0x99f094bd,
607 0x17008007, 607 0x17008007,
608 0x0009f602, 608 0x0009f602,
@@ -642,238 +642,238 @@ uint32_t gm107_grhub_code[] = {
642/* 0x061a: ih_no_ctxsw */ 642/* 0x061a: ih_no_ctxsw */
643 0xabe40000, 643 0xabe40000,
644 0x0bf40400, 644 0x0bf40400,
645 0x01004b10, 645 0x07088e56,
646 0x448ebfb2,
647 0x8f7e4001,
648/* 0x062e: ih_no_fwmthd */
649 0x044b0000,
650 0xffb0bd01,
651 0x0bf4b4ab,
652 0x0700800c,
653 0x000bf603,
654/* 0x0642: ih_no_other */
655 0x004004bd,
656 0x000af601,
657 0xf0fc04bd,
658 0xd0fce0fc,
659 0xa0fcb0fc,
660 0x80fc90fc,
661 0xfc0088fe,
662 0x0032f480,
663/* 0x0662: ctx_4170s */
664 0xf5f001f8,
665 0x8effb210,
666 0x7e404170,
667 0xf800008f,
668/* 0x0671: ctx_4170w */
669 0x41708e00,
670 0x00657e40, 646 0x00657e40,
671 0xf0ffb200, 647 0x80ffb200,
672 0x1bf410f4, 648 0xf6020400,
673/* 0x0683: ctx_redswitch */
674 0x4e00f8f3,
675 0xe5f00200,
676 0x20e5f040,
677 0x8010e5f0,
678 0xf6018500,
679 0x04bd000e,
680/* 0x069a: ctx_redswitch_delay */
681 0xf2b6080f,
682 0xfd1bf401,
683 0x0400e5f1,
684 0x0100e5f1,
685 0x01850080,
686 0xbd000ef6,
687/* 0x06b3: ctx_86c */
688 0x8000f804,
689 0xf6022300,
690 0x04bd000f, 649 0x04bd000f,
691 0x148effb2, 650 0x4007048e,
692 0x8f7e408a, 651 0x0000657e,
693 0xffb20000, 652 0x0080ffb2,
694 0x41a88c8e, 653 0x0ff60203,
654 0xc704bd00,
655 0xee9450fe,
656 0x07008f02,
657 0x00efbb40,
658 0x0000657e,
659 0x02020080,
660 0xbd000ff6,
661 0x7e030f04,
662 0x4b0002f8,
663 0xbfb20100,
664 0x4001448e,
695 0x00008f7e, 665 0x00008f7e,
696/* 0x06d2: ctx_mem */ 666/* 0x0674: ih_no_fwmthd */
697 0x008000f8, 667 0xbd05044b,
698 0x0ff60284, 668 0xb4abffb0,
699/* 0x06db: ctx_mem_wait */ 669 0x800c0bf4,
700 0x8f04bd00, 670 0xf6030700,
701 0xcf028400, 671 0x04bd000b,
702 0xfffd00ff, 672/* 0x0688: ih_no_other */
703 0xf61bf405, 673 0xf6010040,
704/* 0x06ea: ctx_load */ 674 0x04bd000a,
705 0x94bd00f8, 675 0xe0fcf0fc,
706 0x800599f0, 676 0xb0fcd0fc,
707 0xf6023700, 677 0x90fca0fc,
708 0x04bd0009, 678 0x88fe80fc,
709 0xb87e0c0a, 679 0xf480fc00,
710 0xf4bd0000, 680 0x01f80032,
711 0x02890080, 681/* 0x06a8: ctx_4170s */
682 0xb210f5f0,
683 0x41708eff,
684 0x008f7e40,
685/* 0x06b7: ctx_4170w */
686 0x8e00f800,
687 0x7e404170,
688 0xb2000065,
689 0x10f4f0ff,
690 0xf8f31bf4,
691/* 0x06c9: ctx_redswitch */
692 0x02004e00,
693 0xf040e5f0,
694 0xe5f020e5,
695 0x85008010,
696 0x000ef601,
697 0x080f04bd,
698/* 0x06e0: ctx_redswitch_delay */
699 0xf401f2b6,
700 0xe5f1fd1b,
701 0xe5f10400,
702 0x00800100,
703 0x0ef60185,
704 0xf804bd00,
705/* 0x06f9: ctx_86c */
706 0x23008000,
707 0x000ff602,
708 0xffb204bd,
709 0x408a148e,
710 0x00008f7e,
711 0x8c8effb2,
712 0x8f7e41a8,
713 0x00f80000,
714/* 0x0718: ctx_mem */
715 0x02840080,
712 0xbd000ff6, 716 0xbd000ff6,
713 0xc1008004, 717/* 0x0721: ctx_mem_wait */
714 0x0002f602, 718 0x84008f04,
715 0x008004bd, 719 0x00ffcf02,
716 0x02f60283, 720 0xf405fffd,
717 0x0f04bd00, 721 0x00f8f61b,
718 0x06d27e07, 722/* 0x0730: ctx_load */
719 0xc0008000, 723 0x99f094bd,
720 0x0002f602, 724 0x37008005,
721 0x0bfe04bd, 725 0x0009f602,
722 0x1f2af000, 726 0x0c0a04bd,
723 0xb60424b6, 727 0x0000b87e,
724 0x94bd0220, 728 0x0080f4bd,
725 0x800899f0, 729 0x0ff60289,
726 0xf6023700, 730 0x8004bd00,
727 0x04bd0009, 731 0xf602c100,
728 0x02810080, 732 0x04bd0002,
729 0xbd0002f6, 733 0x02830080,
730 0x0000d204,
731 0x25f08000,
732 0x88008002,
733 0x0002f602,
734 0x100104bd,
735 0xf0020042,
736 0x12fa0223,
737 0xbd03f805,
738 0x0899f094,
739 0x02170080,
740 0xbd0009f6,
741 0x81019804,
742 0x981814b6,
743 0x25b68002,
744 0x0512fd08,
745 0xbd1601b5,
746 0x0999f094,
747 0x02370080,
748 0xbd0009f6,
749 0x81008004,
750 0x0001f602,
751 0x010204bd,
752 0x02880080,
753 0xbd0002f6, 734 0xbd0002f6,
754 0x01004104, 735 0x7e070f04,
755 0xfa0613f0, 736 0x80000718,
756 0x03f80501, 737 0xf602c000,
738 0x04bd0002,
739 0xf0000bfe,
740 0x24b61f2a,
741 0x0220b604,
757 0x99f094bd, 742 0x99f094bd,
758 0x17008009, 743 0x37008008,
759 0x0009f602, 744 0x0009f602,
760 0x94bd04bd, 745 0x008004bd,
761 0x800599f0, 746 0x02f60281,
747 0xd204bd00,
748 0x80000000,
749 0x800225f0,
750 0xf6028800,
751 0x04bd0002,
752 0x00421001,
753 0x0223f002,
754 0xf80512fa,
755 0xf094bd03,
756 0x00800899,
757 0x09f60217,
758 0x9804bd00,
759 0x14b68101,
760 0x80029818,
761 0xfd0825b6,
762 0x01b50512,
763 0xf094bd16,
764 0x00800999,
765 0x09f60237,
766 0x8004bd00,
767 0xf6028100,
768 0x04bd0001,
769 0x00800102,
770 0x02f60288,
771 0x4104bd00,
772 0x13f00100,
773 0x0501fa06,
774 0x94bd03f8,
775 0x800999f0,
762 0xf6021700, 776 0xf6021700,
763 0x04bd0009, 777 0x04bd0009,
764/* 0x07d6: ctx_chan */ 778 0x99f094bd,
765 0xea7e00f8, 779 0x17008005,
766 0x0c0a0006, 780 0x0009f602,
767 0x0000b87e, 781 0x00f804bd,
768 0xd27e050f, 782/* 0x081c: ctx_chan */
769 0x00f80006, 783 0x0007307e,
770/* 0x07e8: ctx_mmio_exec */ 784 0xb87e0c0a,
771 0x80410398, 785 0x050f0000,
786 0x0007187e,
787/* 0x082e: ctx_mmio_exec */
788 0x039800f8,
789 0x81008041,
790 0x0003f602,
791 0x34bd04bd,
792/* 0x083c: ctx_mmio_loop */
793 0xf4ff34c4,
794 0x00450e1b,
795 0x0653f002,
796 0xf80535fa,
797/* 0x084d: ctx_mmio_pull */
798 0x804e9803,
799 0x7e814f98,
800 0xb600008f,
801 0x12b60830,
802 0xdf1bf401,
803/* 0x0860: ctx_mmio_done */
804 0x80160398,
772 0xf6028100, 805 0xf6028100,
773 0x04bd0003, 806 0x04bd0003,
774/* 0x07f6: ctx_mmio_loop */ 807 0x414000b5,
775 0x34c434bd, 808 0x13f00100,
776 0x0e1bf4ff, 809 0x0601fa06,
777 0xf0020045, 810 0x00f803f8,
778 0x35fa0653, 811/* 0x087c: ctx_xfer */
779/* 0x0807: ctx_mmio_pull */ 812 0x0080040e,
780 0x9803f805, 813 0x0ef60302,
781 0x4f98804e, 814/* 0x0887: ctx_xfer_idle */
782 0x008f7e81, 815 0x8e04bd00,
783 0x0830b600, 816 0xcf030000,
784 0xf40112b6, 817 0xe4f100ee,
785/* 0x081a: ctx_mmio_done */ 818 0x1bf42000,
786 0x0398df1b, 819 0x0611f4f5,
787 0x81008016, 820/* 0x089b: ctx_xfer_pre */
788 0x0003f602, 821 0x0f0c02f4,
789 0x00b504bd, 822 0x06f97e10,
790 0x01004140, 823 0x1b11f400,
791 0xfa0613f0, 824/* 0x08a4: ctx_xfer_pre_load */
792 0x03f80601, 825 0xa87e020f,
793/* 0x0836: ctx_xfer */ 826 0xb77e0006,
794 0x040e00f8, 827 0xc97e0006,
795 0x03020080, 828 0xf4bd0006,
796 0xbd000ef6, 829 0x0006a87e,
797/* 0x0841: ctx_xfer_idle */ 830 0x0007307e,
798 0x00008e04, 831/* 0x08bc: ctx_xfer_exec */
799 0x00eecf03, 832 0xbd160198,
800 0x2000e4f1, 833 0x05008024,
801 0xf4f51bf4, 834 0x0002f601,
802 0x02f40611, 835 0x1fb204bd,
803/* 0x0855: ctx_xfer_pre */ 836 0x41a5008e,
804 0x7e100f0c,
805 0xf40006b3,
806/* 0x085e: ctx_xfer_pre_load */
807 0x020f1b11,
808 0x0006627e,
809 0x0006717e,
810 0x0006837e,
811 0x627ef4bd,
812 0xea7e0006,
813/* 0x0876: ctx_xfer_exec */
814 0x01980006,
815 0x8024bd16,
816 0xf6010500,
817 0x04bd0002,
818 0x008e1fb2,
819 0x8f7e41a5,
820 0xfcf00000,
821 0x022cf001,
822 0xfd0124b6,
823 0xffb205f2,
824 0x41a5048e,
825 0x00008f7e, 837 0x00008f7e,
826 0x0002167e, 838 0xf001fcf0,
827 0xfc8024bd, 839 0x24b6022c,
828 0x02f60247, 840 0x05f2fd01,
829 0xf004bd00, 841 0x048effb2,
830 0x20b6012c, 842 0x8f7e41a5,
831 0x4afc8003, 843 0x167e0000,
832 0x0002f602, 844 0x24bd0002,
833 0xacf004bd, 845 0x0247fc80,
834 0x06a5f001, 846 0xbd0002f6,
835 0x0c98000b, 847 0x012cf004,
836 0x010d9800, 848 0x800320b6,
837 0x3d7e000e, 849 0xf6024afc,
838 0x080a0001, 850 0x04bd0002,
839 0x0000ec7e, 851 0xf001acf0,
840 0x00020a7e, 852 0x000b06a5,
841 0x0a1201f4, 853 0x98000c98,
842 0x00b87e0c, 854 0x000e010d,
843 0x7e050f00, 855 0x00013d7e,
844 0xf40006d2, 856 0xec7e080a,
845/* 0x08f2: ctx_xfer_post */ 857 0x0a7e0000,
846 0x020f2d02, 858 0x01f40002,
847 0x0006627e, 859 0x7e0c0a12,
848 0xb37ef4bd, 860 0x0f0000b8,
849 0x277e0006, 861 0x07187e05,
850 0x717e0002, 862 0x2d02f400,
863/* 0x0938: ctx_xfer_post */
864 0xa87e020f,
851 0xf4bd0006, 865 0xf4bd0006,
852 0x0006627e, 866 0x0006f97e,
853 0x981011f4, 867 0x0002277e,
854 0x11fd4001, 868 0x0006b77e,
855 0x070bf405, 869 0xa87ef4bd,
856 0x0007e87e, 870 0x11f40006,
857/* 0x091c: ctx_xfer_no_post_mmio */ 871 0x40019810,
858/* 0x091c: ctx_xfer_done */ 872 0xf40511fd,
859 0x000000f8, 873 0x2e7e070b,
860 0x00000000, 874/* 0x0962: ctx_xfer_no_post_mmio */
861 0x00000000, 875/* 0x0962: ctx_xfer_done */
862 0x00000000, 876 0x00f80008,
863 0x00000000,
864 0x00000000,
865 0x00000000,
866 0x00000000,
867 0x00000000,
868 0x00000000,
869 0x00000000,
870 0x00000000,
871 0x00000000,
872 0x00000000,
873 0x00000000,
874 0x00000000,
875 0x00000000,
876 0x00000000,
877 0x00000000, 877 0x00000000,
878 0x00000000, 878 0x00000000,
879 0x00000000, 879 0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5.h
index 64dfd75192bf..e49b5a877ae4 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5.h
@@ -478,10 +478,10 @@ uint32_t nv108_grhub_code[] = {
478 0x01040080, 478 0x01040080,
479 0xbd0001f6, 479 0xbd0001f6,
480 0x01004104, 480 0x01004104,
481 0x627e020f, 481 0xa87e020f,
482 0x717e0006, 482 0xb77e0006,
483 0x100f0006, 483 0x100f0006,
484 0x0006b37e, 484 0x0006f97e,
485 0x98000e98, 485 0x98000e98,
486 0x207e010f, 486 0x207e010f,
487 0x14950001, 487 0x14950001,
@@ -523,8 +523,8 @@ uint32_t nv108_grhub_code[] = {
523 0x800040b7, 523 0x800040b7,
524 0xf40132b6, 524 0xf40132b6,
525 0x000fb41b, 525 0x000fb41b,
526 0x0006b37e, 526 0x0006f97e,
527 0x627e000f, 527 0xa87e000f,
528 0x00800006, 528 0x00800006,
529 0x01f60201, 529 0x01f60201,
530 0xbd04bd00, 530 0xbd04bd00,
@@ -554,7 +554,7 @@ uint32_t nv108_grhub_code[] = {
554 0x0009f602, 554 0x0009f602,
555 0x32f404bd, 555 0x32f404bd,
556 0x0231f401, 556 0x0231f401,
557 0x0008367e, 557 0x00087c7e,
558 0x99f094bd, 558 0x99f094bd,
559 0x17008007, 559 0x17008007,
560 0x0009f602, 560 0x0009f602,
@@ -563,7 +563,7 @@ uint32_t nv108_grhub_code[] = {
563 0x37008006, 563 0x37008006,
564 0x0009f602, 564 0x0009f602,
565 0x31f404bd, 565 0x31f404bd,
566 0x08367e01, 566 0x087c7e01,
567 0xf094bd00, 567 0xf094bd00,
568 0x00800699, 568 0x00800699,
569 0x09f60217, 569 0x09f60217,
@@ -572,7 +572,7 @@ uint32_t nv108_grhub_code[] = {
572 0x20f92f0e, 572 0x20f92f0e,
573 0x32f412b2, 573 0x32f412b2,
574 0x0232f401, 574 0x0232f401,
575 0x0008367e, 575 0x00087c7e,
576 0x008020fc, 576 0x008020fc,
577 0x02f602c0, 577 0x02f602c0,
578 0xf404bd00, 578 0xf404bd00,
@@ -580,7 +580,7 @@ uint32_t nv108_grhub_code[] = {
580 0x23c8130e, 580 0x23c8130e,
581 0x0d0bf41f, 581 0x0d0bf41f,
582 0xf40131f4, 582 0xf40131f4,
583 0x367e0232, 583 0x7c7e0232,
584/* 0x054e: chsw_done */ 584/* 0x054e: chsw_done */
585 0x01020008, 585 0x01020008,
586 0x02c30080, 586 0x02c30080,
@@ -593,7 +593,7 @@ uint32_t nv108_grhub_code[] = {
593 0xb0ff2a0e, 593 0xb0ff2a0e,
594 0x1bf401e4, 594 0x1bf401e4,
595 0x7ef2b20c, 595 0x7ef2b20c,
596 0xf40007d6, 596 0xf400081c,
597/* 0x057a: main_not_ctx_chan */ 597/* 0x057a: main_not_ctx_chan */
598 0xe4b0400e, 598 0xe4b0400e,
599 0x2c1bf402, 599 0x2c1bf402,
@@ -602,7 +602,7 @@ uint32_t nv108_grhub_code[] = {
602 0x0009f602, 602 0x0009f602,
603 0x32f404bd, 603 0x32f404bd,
604 0x0232f401, 604 0x0232f401,
605 0x0008367e, 605 0x00087c7e,
606 0x99f094bd, 606 0x99f094bd,
607 0x17008007, 607 0x17008007,
608 0x0009f602, 608 0x0009f602,
@@ -642,238 +642,238 @@ uint32_t nv108_grhub_code[] = {
642/* 0x061a: ih_no_ctxsw */ 642/* 0x061a: ih_no_ctxsw */
643 0xabe40000, 643 0xabe40000,
644 0x0bf40400, 644 0x0bf40400,
645 0x01004b10, 645 0x07088e56,
646 0x448ebfb2,
647 0x8f7e4001,
648/* 0x062e: ih_no_fwmthd */
649 0x044b0000,
650 0xffb0bd01,
651 0x0bf4b4ab,
652 0x0700800c,
653 0x000bf603,
654/* 0x0642: ih_no_other */
655 0x004004bd,
656 0x000af601,
657 0xf0fc04bd,
658 0xd0fce0fc,
659 0xa0fcb0fc,
660 0x80fc90fc,
661 0xfc0088fe,
662 0x0032f480,
663/* 0x0662: ctx_4170s */
664 0xf5f001f8,
665 0x8effb210,
666 0x7e404170,
667 0xf800008f,
668/* 0x0671: ctx_4170w */
669 0x41708e00,
670 0x00657e40, 646 0x00657e40,
671 0xf0ffb200, 647 0x80ffb200,
672 0x1bf410f4, 648 0xf6020400,
673/* 0x0683: ctx_redswitch */
674 0x4e00f8f3,
675 0xe5f00200,
676 0x20e5f040,
677 0x8010e5f0,
678 0xf6018500,
679 0x04bd000e,
680/* 0x069a: ctx_redswitch_delay */
681 0xf2b6080f,
682 0xfd1bf401,
683 0x0400e5f1,
684 0x0100e5f1,
685 0x01850080,
686 0xbd000ef6,
687/* 0x06b3: ctx_86c */
688 0x8000f804,
689 0xf6022300,
690 0x04bd000f, 649 0x04bd000f,
691 0x148effb2, 650 0x4007048e,
692 0x8f7e408a, 651 0x0000657e,
693 0xffb20000, 652 0x0080ffb2,
694 0x41a88c8e, 653 0x0ff60203,
654 0xc704bd00,
655 0xee9450fe,
656 0x07008f02,
657 0x00efbb40,
658 0x0000657e,
659 0x02020080,
660 0xbd000ff6,
661 0x7e030f04,
662 0x4b0002f8,
663 0xbfb20100,
664 0x4001448e,
695 0x00008f7e, 665 0x00008f7e,
696/* 0x06d2: ctx_mem */ 666/* 0x0674: ih_no_fwmthd */
697 0x008000f8, 667 0xbd05044b,
698 0x0ff60284, 668 0xb4abffb0,
699/* 0x06db: ctx_mem_wait */ 669 0x800c0bf4,
700 0x8f04bd00, 670 0xf6030700,
701 0xcf028400, 671 0x04bd000b,
702 0xfffd00ff, 672/* 0x0688: ih_no_other */
703 0xf61bf405, 673 0xf6010040,
704/* 0x06ea: ctx_load */ 674 0x04bd000a,
705 0x94bd00f8, 675 0xe0fcf0fc,
706 0x800599f0, 676 0xb0fcd0fc,
707 0xf6023700, 677 0x90fca0fc,
708 0x04bd0009, 678 0x88fe80fc,
709 0xb87e0c0a, 679 0xf480fc00,
710 0xf4bd0000, 680 0x01f80032,
711 0x02890080, 681/* 0x06a8: ctx_4170s */
682 0xb210f5f0,
683 0x41708eff,
684 0x008f7e40,
685/* 0x06b7: ctx_4170w */
686 0x8e00f800,
687 0x7e404170,
688 0xb2000065,
689 0x10f4f0ff,
690 0xf8f31bf4,
691/* 0x06c9: ctx_redswitch */
692 0x02004e00,
693 0xf040e5f0,
694 0xe5f020e5,
695 0x85008010,
696 0x000ef601,
697 0x080f04bd,
698/* 0x06e0: ctx_redswitch_delay */
699 0xf401f2b6,
700 0xe5f1fd1b,
701 0xe5f10400,
702 0x00800100,
703 0x0ef60185,
704 0xf804bd00,
705/* 0x06f9: ctx_86c */
706 0x23008000,
707 0x000ff602,
708 0xffb204bd,
709 0x408a148e,
710 0x00008f7e,
711 0x8c8effb2,
712 0x8f7e41a8,
713 0x00f80000,
714/* 0x0718: ctx_mem */
715 0x02840080,
712 0xbd000ff6, 716 0xbd000ff6,
713 0xc1008004, 717/* 0x0721: ctx_mem_wait */
714 0x0002f602, 718 0x84008f04,
715 0x008004bd, 719 0x00ffcf02,
716 0x02f60283, 720 0xf405fffd,
717 0x0f04bd00, 721 0x00f8f61b,
718 0x06d27e07, 722/* 0x0730: ctx_load */
719 0xc0008000, 723 0x99f094bd,
720 0x0002f602, 724 0x37008005,
721 0x0bfe04bd, 725 0x0009f602,
722 0x1f2af000, 726 0x0c0a04bd,
723 0xb60424b6, 727 0x0000b87e,
724 0x94bd0220, 728 0x0080f4bd,
725 0x800899f0, 729 0x0ff60289,
726 0xf6023700, 730 0x8004bd00,
727 0x04bd0009, 731 0xf602c100,
728 0x02810080, 732 0x04bd0002,
729 0xbd0002f6, 733 0x02830080,
730 0x0000d204,
731 0x25f08000,
732 0x88008002,
733 0x0002f602,
734 0x100104bd,
735 0xf0020042,
736 0x12fa0223,
737 0xbd03f805,
738 0x0899f094,
739 0x02170080,
740 0xbd0009f6,
741 0x81019804,
742 0x981814b6,
743 0x25b68002,
744 0x0512fd08,
745 0xbd1601b5,
746 0x0999f094,
747 0x02370080,
748 0xbd0009f6,
749 0x81008004,
750 0x0001f602,
751 0x010204bd,
752 0x02880080,
753 0xbd0002f6, 734 0xbd0002f6,
754 0x01004104, 735 0x7e070f04,
755 0xfa0613f0, 736 0x80000718,
756 0x03f80501, 737 0xf602c000,
738 0x04bd0002,
739 0xf0000bfe,
740 0x24b61f2a,
741 0x0220b604,
757 0x99f094bd, 742 0x99f094bd,
758 0x17008009, 743 0x37008008,
759 0x0009f602, 744 0x0009f602,
760 0x94bd04bd, 745 0x008004bd,
761 0x800599f0, 746 0x02f60281,
747 0xd204bd00,
748 0x80000000,
749 0x800225f0,
750 0xf6028800,
751 0x04bd0002,
752 0x00421001,
753 0x0223f002,
754 0xf80512fa,
755 0xf094bd03,
756 0x00800899,
757 0x09f60217,
758 0x9804bd00,
759 0x14b68101,
760 0x80029818,
761 0xfd0825b6,
762 0x01b50512,
763 0xf094bd16,
764 0x00800999,
765 0x09f60237,
766 0x8004bd00,
767 0xf6028100,
768 0x04bd0001,
769 0x00800102,
770 0x02f60288,
771 0x4104bd00,
772 0x13f00100,
773 0x0501fa06,
774 0x94bd03f8,
775 0x800999f0,
762 0xf6021700, 776 0xf6021700,
763 0x04bd0009, 777 0x04bd0009,
764/* 0x07d6: ctx_chan */ 778 0x99f094bd,
765 0xea7e00f8, 779 0x17008005,
766 0x0c0a0006, 780 0x0009f602,
767 0x0000b87e, 781 0x00f804bd,
768 0xd27e050f, 782/* 0x081c: ctx_chan */
769 0x00f80006, 783 0x0007307e,
770/* 0x07e8: ctx_mmio_exec */ 784 0xb87e0c0a,
771 0x80410398, 785 0x050f0000,
786 0x0007187e,
787/* 0x082e: ctx_mmio_exec */
788 0x039800f8,
789 0x81008041,
790 0x0003f602,
791 0x34bd04bd,
792/* 0x083c: ctx_mmio_loop */
793 0xf4ff34c4,
794 0x00450e1b,
795 0x0653f002,
796 0xf80535fa,
797/* 0x084d: ctx_mmio_pull */
798 0x804e9803,
799 0x7e814f98,
800 0xb600008f,
801 0x12b60830,
802 0xdf1bf401,
803/* 0x0860: ctx_mmio_done */
804 0x80160398,
772 0xf6028100, 805 0xf6028100,
773 0x04bd0003, 806 0x04bd0003,
774/* 0x07f6: ctx_mmio_loop */ 807 0x414000b5,
775 0x34c434bd, 808 0x13f00100,
776 0x0e1bf4ff, 809 0x0601fa06,
777 0xf0020045, 810 0x00f803f8,
778 0x35fa0653, 811/* 0x087c: ctx_xfer */
779/* 0x0807: ctx_mmio_pull */ 812 0x0080040e,
780 0x9803f805, 813 0x0ef60302,
781 0x4f98804e, 814/* 0x0887: ctx_xfer_idle */
782 0x008f7e81, 815 0x8e04bd00,
783 0x0830b600, 816 0xcf030000,
784 0xf40112b6, 817 0xe4f100ee,
785/* 0x081a: ctx_mmio_done */ 818 0x1bf42000,
786 0x0398df1b, 819 0x0611f4f5,
787 0x81008016, 820/* 0x089b: ctx_xfer_pre */
788 0x0003f602, 821 0x0f0c02f4,
789 0x00b504bd, 822 0x06f97e10,
790 0x01004140, 823 0x1b11f400,
791 0xfa0613f0, 824/* 0x08a4: ctx_xfer_pre_load */
792 0x03f80601, 825 0xa87e020f,
793/* 0x0836: ctx_xfer */ 826 0xb77e0006,
794 0x040e00f8, 827 0xc97e0006,
795 0x03020080, 828 0xf4bd0006,
796 0xbd000ef6, 829 0x0006a87e,
797/* 0x0841: ctx_xfer_idle */ 830 0x0007307e,
798 0x00008e04, 831/* 0x08bc: ctx_xfer_exec */
799 0x00eecf03, 832 0xbd160198,
800 0x2000e4f1, 833 0x05008024,
801 0xf4f51bf4, 834 0x0002f601,
802 0x02f40611, 835 0x1fb204bd,
803/* 0x0855: ctx_xfer_pre */ 836 0x41a5008e,
804 0x7e100f0c,
805 0xf40006b3,
806/* 0x085e: ctx_xfer_pre_load */
807 0x020f1b11,
808 0x0006627e,
809 0x0006717e,
810 0x0006837e,
811 0x627ef4bd,
812 0xea7e0006,
813/* 0x0876: ctx_xfer_exec */
814 0x01980006,
815 0x8024bd16,
816 0xf6010500,
817 0x04bd0002,
818 0x008e1fb2,
819 0x8f7e41a5,
820 0xfcf00000,
821 0x022cf001,
822 0xfd0124b6,
823 0xffb205f2,
824 0x41a5048e,
825 0x00008f7e, 837 0x00008f7e,
826 0x0002167e, 838 0xf001fcf0,
827 0xfc8024bd, 839 0x24b6022c,
828 0x02f60247, 840 0x05f2fd01,
829 0xf004bd00, 841 0x048effb2,
830 0x20b6012c, 842 0x8f7e41a5,
831 0x4afc8003, 843 0x167e0000,
832 0x0002f602, 844 0x24bd0002,
833 0xacf004bd, 845 0x0247fc80,
834 0x06a5f001, 846 0xbd0002f6,
835 0x0c98000b, 847 0x012cf004,
836 0x010d9800, 848 0x800320b6,
837 0x3d7e000e, 849 0xf6024afc,
838 0x080a0001, 850 0x04bd0002,
839 0x0000ec7e, 851 0xf001acf0,
840 0x00020a7e, 852 0x000b06a5,
841 0x0a1201f4, 853 0x98000c98,
842 0x00b87e0c, 854 0x000e010d,
843 0x7e050f00, 855 0x00013d7e,
844 0xf40006d2, 856 0xec7e080a,
845/* 0x08f2: ctx_xfer_post */ 857 0x0a7e0000,
846 0x020f2d02, 858 0x01f40002,
847 0x0006627e, 859 0x7e0c0a12,
848 0xb37ef4bd, 860 0x0f0000b8,
849 0x277e0006, 861 0x07187e05,
850 0x717e0002, 862 0x2d02f400,
863/* 0x0938: ctx_xfer_post */
864 0xa87e020f,
851 0xf4bd0006, 865 0xf4bd0006,
852 0x0006627e, 866 0x0006f97e,
853 0x981011f4, 867 0x0002277e,
854 0x11fd4001, 868 0x0006b77e,
855 0x070bf405, 869 0xa87ef4bd,
856 0x0007e87e, 870 0x11f40006,
857/* 0x091c: ctx_xfer_no_post_mmio */ 871 0x40019810,
858/* 0x091c: ctx_xfer_done */ 872 0xf40511fd,
859 0x000000f8, 873 0x2e7e070b,
860 0x00000000, 874/* 0x0962: ctx_xfer_no_post_mmio */
861 0x00000000, 875/* 0x0962: ctx_xfer_done */
862 0x00000000, 876 0x00f80008,
863 0x00000000,
864 0x00000000,
865 0x00000000,
866 0x00000000,
867 0x00000000,
868 0x00000000,
869 0x00000000,
870 0x00000000,
871 0x00000000,
872 0x00000000,
873 0x00000000,
874 0x00000000,
875 0x00000000,
876 0x00000000,
877 0x00000000, 877 0x00000000,
878 0x00000000, 878 0x00000000,
879 0x00000000, 879 0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h
index f8f7b278a13f..92dfe6a4ac87 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h
@@ -528,10 +528,10 @@ uint32_t nvc0_grhub_code[] = {
528 0x0001d001, 528 0x0001d001,
529 0x17f104bd, 529 0x17f104bd,
530 0xf7f00100, 530 0xf7f00100,
531 0xb521f502, 531 0x0d21f502,
532 0xc721f507, 532 0x1f21f508,
533 0x10f7f007, 533 0x10f7f008,
534 0x081421f5, 534 0x086c21f5,
535 0x98000e98, 535 0x98000e98,
536 0x21f5010f, 536 0x21f5010f,
537 0x14950150, 537 0x14950150,
@@ -574,9 +574,9 @@ uint32_t nvc0_grhub_code[] = {
574 0xb6800040, 574 0xb6800040,
575 0x1bf40132, 575 0x1bf40132,
576 0x00f7f0be, 576 0x00f7f0be,
577 0x081421f5, 577 0x086c21f5,
578 0xf500f7f0, 578 0xf500f7f0,
579 0xf107b521, 579 0xf1080d21,
580 0xf0010007, 580 0xf0010007,
581 0x01d00203, 581 0x01d00203,
582 0xbd04bd00, 582 0xbd04bd00,
@@ -610,8 +610,8 @@ uint32_t nvc0_grhub_code[] = {
610 0x09d00203, 610 0x09d00203,
611 0xf404bd00, 611 0xf404bd00,
612 0x31f40132, 612 0x31f40132,
613 0xe821f502, 613 0x4021f502,
614 0xf094bd09, 614 0xf094bd0a,
615 0x07f10799, 615 0x07f10799,
616 0x03f01700, 616 0x03f01700,
617 0x0009d002, 617 0x0009d002,
@@ -621,7 +621,7 @@ uint32_t nvc0_grhub_code[] = {
621 0x0203f00f, 621 0x0203f00f,
622 0xbd0009d0, 622 0xbd0009d0,
623 0x0131f404, 623 0x0131f404,
624 0x09e821f5, 624 0x0a4021f5,
625 0x99f094bd, 625 0x99f094bd,
626 0x0007f106, 626 0x0007f106,
627 0x0203f017, 627 0x0203f017,
@@ -631,7 +631,7 @@ uint32_t nvc0_grhub_code[] = {
631 0x12b920f9, 631 0x12b920f9,
632 0x0132f402, 632 0x0132f402,
633 0xf50232f4, 633 0xf50232f4,
634 0xfc09e821, 634 0xfc0a4021,
635 0x0007f120, 635 0x0007f120,
636 0x0203f0c0, 636 0x0203f0c0,
637 0xbd0002d0, 637 0xbd0002d0,
@@ -640,7 +640,7 @@ uint32_t nvc0_grhub_code[] = {
640 0xf41f23c8, 640 0xf41f23c8,
641 0x31f40d0b, 641 0x31f40d0b,
642 0x0232f401, 642 0x0232f401,
643 0x09e821f5, 643 0x0a4021f5,
644/* 0x063c: chsw_done */ 644/* 0x063c: chsw_done */
645 0xf10127f0, 645 0xf10127f0,
646 0xf0c30007, 646 0xf0c30007,
@@ -654,7 +654,7 @@ uint32_t nvc0_grhub_code[] = {
654/* 0x0660: main_not_ctx_switch */ 654/* 0x0660: main_not_ctx_switch */
655 0xf401e4b0, 655 0xf401e4b0,
656 0xf2b90d1b, 656 0xf2b90d1b,
657 0x7821f502, 657 0xd021f502,
658 0x460ef409, 658 0x460ef409,
659/* 0x0670: main_not_ctx_chan */ 659/* 0x0670: main_not_ctx_chan */
660 0xf402e4b0, 660 0xf402e4b0,
@@ -664,8 +664,8 @@ uint32_t nvc0_grhub_code[] = {
664 0x09d00203, 664 0x09d00203,
665 0xf404bd00, 665 0xf404bd00,
666 0x32f40132, 666 0x32f40132,
667 0xe821f502, 667 0x4021f502,
668 0xf094bd09, 668 0xf094bd0a,
669 0x07f10799, 669 0x07f10799,
670 0x03f01700, 670 0x03f01700,
671 0x0009d002, 671 0x0009d002,
@@ -710,18 +710,40 @@ uint32_t nvc0_grhub_code[] = {
710/* 0x072b: ih_no_ctxsw */ 710/* 0x072b: ih_no_ctxsw */
711 0xe40421f4, 711 0xe40421f4,
712 0xf40400ab, 712 0xf40400ab,
713 0xb7f1140b, 713 0xe7f16c0b,
714 0xe3f00708,
715 0x6821f440,
716 0xf102ffb9,
717 0xf0040007,
718 0x0fd00203,
719 0xf104bd00,
720 0xf00704e7,
721 0x21f440e3,
722 0x02ffb968,
723 0x030007f1,
724 0xd00203f0,
725 0x04bd000f,
726 0x9450fec7,
727 0xf7f102ee,
728 0xf3f00700,
729 0x00efbb40,
730 0xf16821f4,
731 0xf0020007,
732 0x0fd00203,
733 0xf004bd00,
734 0x21f503f7,
735 0xb7f1037e,
714 0xbfb90100, 736 0xbfb90100,
715 0x44e7f102, 737 0x44e7f102,
716 0x40e3f001, 738 0x40e3f001,
717/* 0x0743: ih_no_fwmthd */ 739/* 0x079b: ih_no_fwmthd */
718 0xf19d21f4, 740 0xf19d21f4,
719 0xbd0104b7, 741 0xbd0504b7,
720 0xb4abffb0, 742 0xb4abffb0,
721 0xf10f0bf4, 743 0xf10f0bf4,
722 0xf0070007, 744 0xf0070007,
723 0x0bd00303, 745 0x0bd00303,
724/* 0x075b: ih_no_other */ 746/* 0x07b3: ih_no_other */
725 0xf104bd00, 747 0xf104bd00,
726 0xf0010007, 748 0xf0010007,
727 0x0ad00003, 749 0x0ad00003,
@@ -731,36 +753,36 @@ uint32_t nvc0_grhub_code[] = {
731 0xfc90fca0, 753 0xfc90fca0,
732 0x0088fe80, 754 0x0088fe80,
733 0x32f480fc, 755 0x32f480fc,
734/* 0x077f: ctx_4160s */ 756/* 0x07d7: ctx_4160s */
735 0xf001f800, 757 0xf001f800,
736 0xffb901f7, 758 0xffb901f7,
737 0x60e7f102, 759 0x60e7f102,
738 0x40e3f041, 760 0x40e3f041,
739/* 0x078f: ctx_4160s_wait */ 761/* 0x07e7: ctx_4160s_wait */
740 0xf19d21f4, 762 0xf19d21f4,
741 0xf04160e7, 763 0xf04160e7,
742 0x21f440e3, 764 0x21f440e3,
743 0x02ffb968, 765 0x02ffb968,
744 0xf404ffc8, 766 0xf404ffc8,
745 0x00f8f00b, 767 0x00f8f00b,
746/* 0x07a4: ctx_4160c */ 768/* 0x07fc: ctx_4160c */
747 0xffb9f4bd, 769 0xffb9f4bd,
748 0x60e7f102, 770 0x60e7f102,
749 0x40e3f041, 771 0x40e3f041,
750 0xf89d21f4, 772 0xf89d21f4,
751/* 0x07b5: ctx_4170s */ 773/* 0x080d: ctx_4170s */
752 0x10f5f000, 774 0x10f5f000,
753 0xf102ffb9, 775 0xf102ffb9,
754 0xf04170e7, 776 0xf04170e7,
755 0x21f440e3, 777 0x21f440e3,
756/* 0x07c7: ctx_4170w */ 778/* 0x081f: ctx_4170w */
757 0xf100f89d, 779 0xf100f89d,
758 0xf04170e7, 780 0xf04170e7,
759 0x21f440e3, 781 0x21f440e3,
760 0x02ffb968, 782 0x02ffb968,
761 0xf410f4f0, 783 0xf410f4f0,
762 0x00f8f01b, 784 0x00f8f01b,
763/* 0x07dc: ctx_redswitch */ 785/* 0x0834: ctx_redswitch */
764 0x0200e7f1, 786 0x0200e7f1,
765 0xf040e5f0, 787 0xf040e5f0,
766 0xe5f020e5, 788 0xe5f020e5,
@@ -768,7 +790,7 @@ uint32_t nvc0_grhub_code[] = {
768 0x0103f085, 790 0x0103f085,
769 0xbd000ed0, 791 0xbd000ed0,
770 0x08f7f004, 792 0x08f7f004,
771/* 0x07f8: ctx_redswitch_delay */ 793/* 0x0850: ctx_redswitch_delay */
772 0xf401f2b6, 794 0xf401f2b6,
773 0xe5f1fd1b, 795 0xe5f1fd1b,
774 0xe5f10400, 796 0xe5f10400,
@@ -776,7 +798,7 @@ uint32_t nvc0_grhub_code[] = {
776 0x03f08500, 798 0x03f08500,
777 0x000ed001, 799 0x000ed001,
778 0x00f804bd, 800 0x00f804bd,
779/* 0x0814: ctx_86c */ 801/* 0x086c: ctx_86c */
780 0x1b0007f1, 802 0x1b0007f1,
781 0xd00203f0, 803 0xd00203f0,
782 0x04bd000f, 804 0x04bd000f,
@@ -787,16 +809,16 @@ uint32_t nvc0_grhub_code[] = {
787 0xa86ce7f1, 809 0xa86ce7f1,
788 0xf441e3f0, 810 0xf441e3f0,
789 0x00f89d21, 811 0x00f89d21,
790/* 0x083c: ctx_mem */ 812/* 0x0894: ctx_mem */
791 0x840007f1, 813 0x840007f1,
792 0xd00203f0, 814 0xd00203f0,
793 0x04bd000f, 815 0x04bd000f,
794/* 0x0848: ctx_mem_wait */ 816/* 0x08a0: ctx_mem_wait */
795 0x8400f7f1, 817 0x8400f7f1,
796 0xcf02f3f0, 818 0xcf02f3f0,
797 0xfffd00ff, 819 0xfffd00ff,
798 0xf31bf405, 820 0xf31bf405,
799/* 0x085a: ctx_load */ 821/* 0x08b2: ctx_load */
800 0x94bd00f8, 822 0x94bd00f8,
801 0xf10599f0, 823 0xf10599f0,
802 0xf00f0007, 824 0xf00f0007,
@@ -814,7 +836,7 @@ uint32_t nvc0_grhub_code[] = {
814 0x02d00203, 836 0x02d00203,
815 0xf004bd00, 837 0xf004bd00,
816 0x21f507f7, 838 0x21f507f7,
817 0x07f1083c, 839 0x07f10894,
818 0x03f0c000, 840 0x03f0c000,
819 0x0002d002, 841 0x0002d002,
820 0x0bfe04bd, 842 0x0bfe04bd,
@@ -869,31 +891,31 @@ uint32_t nvc0_grhub_code[] = {
869 0x03f01700, 891 0x03f01700,
870 0x0009d002, 892 0x0009d002,
871 0x00f804bd, 893 0x00f804bd,
872/* 0x0978: ctx_chan */ 894/* 0x09d0: ctx_chan */
873 0x077f21f5, 895 0x07d721f5,
874 0x085a21f5, 896 0x08b221f5,
875 0xf40ca7f0, 897 0xf40ca7f0,
876 0xf7f0d021, 898 0xf7f0d021,
877 0x3c21f505, 899 0x9421f505,
878 0xa421f508, 900 0xfc21f508,
879/* 0x0993: ctx_mmio_exec */ 901/* 0x09eb: ctx_mmio_exec */
880 0x9800f807, 902 0x9800f807,
881 0x07f14103, 903 0x07f14103,
882 0x03f08100, 904 0x03f08100,
883 0x0003d002, 905 0x0003d002,
884 0x34bd04bd, 906 0x34bd04bd,
885/* 0x09a4: ctx_mmio_loop */ 907/* 0x09fc: ctx_mmio_loop */
886 0xf4ff34c4, 908 0xf4ff34c4,
887 0x57f10f1b, 909 0x57f10f1b,
888 0x53f00200, 910 0x53f00200,
889 0x0535fa06, 911 0x0535fa06,
890/* 0x09b6: ctx_mmio_pull */ 912/* 0x0a0e: ctx_mmio_pull */
891 0x4e9803f8, 913 0x4e9803f8,
892 0x814f9880, 914 0x814f9880,
893 0xb69d21f4, 915 0xb69d21f4,
894 0x12b60830, 916 0x12b60830,
895 0xdf1bf401, 917 0xdf1bf401,
896/* 0x09c8: ctx_mmio_done */ 918/* 0x0a20: ctx_mmio_done */
897 0xf1160398, 919 0xf1160398,
898 0xf0810007, 920 0xf0810007,
899 0x03d00203, 921 0x03d00203,
@@ -902,30 +924,30 @@ uint32_t nvc0_grhub_code[] = {
902 0x13f00100, 924 0x13f00100,
903 0x0601fa06, 925 0x0601fa06,
904 0x00f803f8, 926 0x00f803f8,
905/* 0x09e8: ctx_xfer */ 927/* 0x0a40: ctx_xfer */
906 0xf104e7f0, 928 0xf104e7f0,
907 0xf0020007, 929 0xf0020007,
908 0x0ed00303, 930 0x0ed00303,
909/* 0x09f7: ctx_xfer_idle */ 931/* 0x0a4f: ctx_xfer_idle */
910 0xf104bd00, 932 0xf104bd00,
911 0xf00000e7, 933 0xf00000e7,
912 0xeecf03e3, 934 0xeecf03e3,
913 0x00e4f100, 935 0x00e4f100,
914 0xf21bf420, 936 0xf21bf420,
915 0xf40611f4, 937 0xf40611f4,
916/* 0x0a0e: ctx_xfer_pre */ 938/* 0x0a66: ctx_xfer_pre */
917 0xf7f01102, 939 0xf7f01102,
918 0x1421f510, 940 0x6c21f510,
919 0x7f21f508, 941 0xd721f508,
920 0x1c11f407, 942 0x1c11f407,
921/* 0x0a1c: ctx_xfer_pre_load */ 943/* 0x0a74: ctx_xfer_pre_load */
922 0xf502f7f0, 944 0xf502f7f0,
923 0xf507b521, 945 0xf5080d21,
924 0xf507c721, 946 0xf5081f21,
925 0xbd07dc21, 947 0xbd083421,
926 0xb521f5f4, 948 0x0d21f5f4,
927 0x5a21f507, 949 0xb221f508,
928/* 0x0a35: ctx_xfer_exec */ 950/* 0x0a8d: ctx_xfer_exec */
929 0x16019808, 951 0x16019808,
930 0x07f124bd, 952 0x07f124bd,
931 0x03f00500, 953 0x03f00500,
@@ -960,23 +982,65 @@ uint32_t nvc0_grhub_code[] = {
960 0x1301f402, 982 0x1301f402,
961 0xf40ca7f0, 983 0xf40ca7f0,
962 0xf7f0d021, 984 0xf7f0d021,
963 0x3c21f505, 985 0x9421f505,
964 0x3202f408, 986 0x3202f408,
965/* 0x0ac4: ctx_xfer_post */ 987/* 0x0b1c: ctx_xfer_post */
966 0xf502f7f0, 988 0xf502f7f0,
967 0xbd07b521, 989 0xbd080d21,
968 0x1421f5f4, 990 0x6c21f5f4,
969 0x7f21f508, 991 0x7f21f508,
970 0xc721f502, 992 0x1f21f502,
971 0xf5f4bd07, 993 0xf5f4bd08,
972 0xf407b521, 994 0xf4080d21,
973 0x01981011, 995 0x01981011,
974 0x0511fd40, 996 0x0511fd40,
975 0xf5070bf4, 997 0xf5070bf4,
976/* 0x0aef: ctx_xfer_no_post_mmio */ 998/* 0x0b47: ctx_xfer_no_post_mmio */
977 0xf5099321, 999 0xf509eb21,
978/* 0x0af3: ctx_xfer_done */ 1000/* 0x0b4b: ctx_xfer_done */
979 0xf807a421, 1001 0xf807fc21,
1002 0x00000000,
1003 0x00000000,
1004 0x00000000,
1005 0x00000000,
1006 0x00000000,
1007 0x00000000,
1008 0x00000000,
1009 0x00000000,
1010 0x00000000,
1011 0x00000000,
1012 0x00000000,
1013 0x00000000,
1014 0x00000000,
1015 0x00000000,
1016 0x00000000,
1017 0x00000000,
1018 0x00000000,
1019 0x00000000,
1020 0x00000000,
1021 0x00000000,
1022 0x00000000,
1023 0x00000000,
1024 0x00000000,
1025 0x00000000,
1026 0x00000000,
1027 0x00000000,
1028 0x00000000,
1029 0x00000000,
1030 0x00000000,
1031 0x00000000,
1032 0x00000000,
1033 0x00000000,
1034 0x00000000,
1035 0x00000000,
1036 0x00000000,
1037 0x00000000,
1038 0x00000000,
1039 0x00000000,
1040 0x00000000,
1041 0x00000000,
1042 0x00000000,
1043 0x00000000,
980 0x00000000, 1044 0x00000000,
981 0x00000000, 1045 0x00000000,
982 0x00000000, 1046 0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvd7.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvd7.fuc.h
index 624215a005b0..62b0c7601d8b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvd7.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvd7.fuc.h
@@ -528,10 +528,10 @@ uint32_t nvd7_grhub_code[] = {
528 0x0001d001, 528 0x0001d001,
529 0x17f104bd, 529 0x17f104bd,
530 0xf7f00100, 530 0xf7f00100,
531 0xb521f502, 531 0x0d21f502,
532 0xc721f507, 532 0x1f21f508,
533 0x10f7f007, 533 0x10f7f008,
534 0x081421f5, 534 0x086c21f5,
535 0x98000e98, 535 0x98000e98,
536 0x21f5010f, 536 0x21f5010f,
537 0x14950150, 537 0x14950150,
@@ -574,9 +574,9 @@ uint32_t nvd7_grhub_code[] = {
574 0xb6800040, 574 0xb6800040,
575 0x1bf40132, 575 0x1bf40132,
576 0x00f7f0be, 576 0x00f7f0be,
577 0x081421f5, 577 0x086c21f5,
578 0xf500f7f0, 578 0xf500f7f0,
579 0xf107b521, 579 0xf1080d21,
580 0xf0010007, 580 0xf0010007,
581 0x01d00203, 581 0x01d00203,
582 0xbd04bd00, 582 0xbd04bd00,
@@ -610,8 +610,8 @@ uint32_t nvd7_grhub_code[] = {
610 0x09d00203, 610 0x09d00203,
611 0xf404bd00, 611 0xf404bd00,
612 0x31f40132, 612 0x31f40132,
613 0xe821f502, 613 0x4021f502,
614 0xf094bd09, 614 0xf094bd0a,
615 0x07f10799, 615 0x07f10799,
616 0x03f01700, 616 0x03f01700,
617 0x0009d002, 617 0x0009d002,
@@ -621,7 +621,7 @@ uint32_t nvd7_grhub_code[] = {
621 0x0203f00f, 621 0x0203f00f,
622 0xbd0009d0, 622 0xbd0009d0,
623 0x0131f404, 623 0x0131f404,
624 0x09e821f5, 624 0x0a4021f5,
625 0x99f094bd, 625 0x99f094bd,
626 0x0007f106, 626 0x0007f106,
627 0x0203f017, 627 0x0203f017,
@@ -631,7 +631,7 @@ uint32_t nvd7_grhub_code[] = {
631 0x12b920f9, 631 0x12b920f9,
632 0x0132f402, 632 0x0132f402,
633 0xf50232f4, 633 0xf50232f4,
634 0xfc09e821, 634 0xfc0a4021,
635 0x0007f120, 635 0x0007f120,
636 0x0203f0c0, 636 0x0203f0c0,
637 0xbd0002d0, 637 0xbd0002d0,
@@ -640,7 +640,7 @@ uint32_t nvd7_grhub_code[] = {
640 0xf41f23c8, 640 0xf41f23c8,
641 0x31f40d0b, 641 0x31f40d0b,
642 0x0232f401, 642 0x0232f401,
643 0x09e821f5, 643 0x0a4021f5,
644/* 0x063c: chsw_done */ 644/* 0x063c: chsw_done */
645 0xf10127f0, 645 0xf10127f0,
646 0xf0c30007, 646 0xf0c30007,
@@ -654,7 +654,7 @@ uint32_t nvd7_grhub_code[] = {
654/* 0x0660: main_not_ctx_switch */ 654/* 0x0660: main_not_ctx_switch */
655 0xf401e4b0, 655 0xf401e4b0,
656 0xf2b90d1b, 656 0xf2b90d1b,
657 0x7821f502, 657 0xd021f502,
658 0x460ef409, 658 0x460ef409,
659/* 0x0670: main_not_ctx_chan */ 659/* 0x0670: main_not_ctx_chan */
660 0xf402e4b0, 660 0xf402e4b0,
@@ -664,8 +664,8 @@ uint32_t nvd7_grhub_code[] = {
664 0x09d00203, 664 0x09d00203,
665 0xf404bd00, 665 0xf404bd00,
666 0x32f40132, 666 0x32f40132,
667 0xe821f502, 667 0x4021f502,
668 0xf094bd09, 668 0xf094bd0a,
669 0x07f10799, 669 0x07f10799,
670 0x03f01700, 670 0x03f01700,
671 0x0009d002, 671 0x0009d002,
@@ -710,18 +710,40 @@ uint32_t nvd7_grhub_code[] = {
710/* 0x072b: ih_no_ctxsw */ 710/* 0x072b: ih_no_ctxsw */
711 0xe40421f4, 711 0xe40421f4,
712 0xf40400ab, 712 0xf40400ab,
713 0xb7f1140b, 713 0xe7f16c0b,
714 0xe3f00708,
715 0x6821f440,
716 0xf102ffb9,
717 0xf0040007,
718 0x0fd00203,
719 0xf104bd00,
720 0xf00704e7,
721 0x21f440e3,
722 0x02ffb968,
723 0x030007f1,
724 0xd00203f0,
725 0x04bd000f,
726 0x9450fec7,
727 0xf7f102ee,
728 0xf3f00700,
729 0x00efbb40,
730 0xf16821f4,
731 0xf0020007,
732 0x0fd00203,
733 0xf004bd00,
734 0x21f503f7,
735 0xb7f1037e,
714 0xbfb90100, 736 0xbfb90100,
715 0x44e7f102, 737 0x44e7f102,
716 0x40e3f001, 738 0x40e3f001,
717/* 0x0743: ih_no_fwmthd */ 739/* 0x079b: ih_no_fwmthd */
718 0xf19d21f4, 740 0xf19d21f4,
719 0xbd0104b7, 741 0xbd0504b7,
720 0xb4abffb0, 742 0xb4abffb0,
721 0xf10f0bf4, 743 0xf10f0bf4,
722 0xf0070007, 744 0xf0070007,
723 0x0bd00303, 745 0x0bd00303,
724/* 0x075b: ih_no_other */ 746/* 0x07b3: ih_no_other */
725 0xf104bd00, 747 0xf104bd00,
726 0xf0010007, 748 0xf0010007,
727 0x0ad00003, 749 0x0ad00003,
@@ -731,36 +753,36 @@ uint32_t nvd7_grhub_code[] = {
731 0xfc90fca0, 753 0xfc90fca0,
732 0x0088fe80, 754 0x0088fe80,
733 0x32f480fc, 755 0x32f480fc,
734/* 0x077f: ctx_4160s */ 756/* 0x07d7: ctx_4160s */
735 0xf001f800, 757 0xf001f800,
736 0xffb901f7, 758 0xffb901f7,
737 0x60e7f102, 759 0x60e7f102,
738 0x40e3f041, 760 0x40e3f041,
739/* 0x078f: ctx_4160s_wait */ 761/* 0x07e7: ctx_4160s_wait */
740 0xf19d21f4, 762 0xf19d21f4,
741 0xf04160e7, 763 0xf04160e7,
742 0x21f440e3, 764 0x21f440e3,
743 0x02ffb968, 765 0x02ffb968,
744 0xf404ffc8, 766 0xf404ffc8,
745 0x00f8f00b, 767 0x00f8f00b,
746/* 0x07a4: ctx_4160c */ 768/* 0x07fc: ctx_4160c */
747 0xffb9f4bd, 769 0xffb9f4bd,
748 0x60e7f102, 770 0x60e7f102,
749 0x40e3f041, 771 0x40e3f041,
750 0xf89d21f4, 772 0xf89d21f4,
751/* 0x07b5: ctx_4170s */ 773/* 0x080d: ctx_4170s */
752 0x10f5f000, 774 0x10f5f000,
753 0xf102ffb9, 775 0xf102ffb9,
754 0xf04170e7, 776 0xf04170e7,
755 0x21f440e3, 777 0x21f440e3,
756/* 0x07c7: ctx_4170w */ 778/* 0x081f: ctx_4170w */
757 0xf100f89d, 779 0xf100f89d,
758 0xf04170e7, 780 0xf04170e7,
759 0x21f440e3, 781 0x21f440e3,
760 0x02ffb968, 782 0x02ffb968,
761 0xf410f4f0, 783 0xf410f4f0,
762 0x00f8f01b, 784 0x00f8f01b,
763/* 0x07dc: ctx_redswitch */ 785/* 0x0834: ctx_redswitch */
764 0x0200e7f1, 786 0x0200e7f1,
765 0xf040e5f0, 787 0xf040e5f0,
766 0xe5f020e5, 788 0xe5f020e5,
@@ -768,7 +790,7 @@ uint32_t nvd7_grhub_code[] = {
768 0x0103f085, 790 0x0103f085,
769 0xbd000ed0, 791 0xbd000ed0,
770 0x08f7f004, 792 0x08f7f004,
771/* 0x07f8: ctx_redswitch_delay */ 793/* 0x0850: ctx_redswitch_delay */
772 0xf401f2b6, 794 0xf401f2b6,
773 0xe5f1fd1b, 795 0xe5f1fd1b,
774 0xe5f10400, 796 0xe5f10400,
@@ -776,7 +798,7 @@ uint32_t nvd7_grhub_code[] = {
776 0x03f08500, 798 0x03f08500,
777 0x000ed001, 799 0x000ed001,
778 0x00f804bd, 800 0x00f804bd,
779/* 0x0814: ctx_86c */ 801/* 0x086c: ctx_86c */
780 0x1b0007f1, 802 0x1b0007f1,
781 0xd00203f0, 803 0xd00203f0,
782 0x04bd000f, 804 0x04bd000f,
@@ -787,16 +809,16 @@ uint32_t nvd7_grhub_code[] = {
787 0xa86ce7f1, 809 0xa86ce7f1,
788 0xf441e3f0, 810 0xf441e3f0,
789 0x00f89d21, 811 0x00f89d21,
790/* 0x083c: ctx_mem */ 812/* 0x0894: ctx_mem */
791 0x840007f1, 813 0x840007f1,
792 0xd00203f0, 814 0xd00203f0,
793 0x04bd000f, 815 0x04bd000f,
794/* 0x0848: ctx_mem_wait */ 816/* 0x08a0: ctx_mem_wait */
795 0x8400f7f1, 817 0x8400f7f1,
796 0xcf02f3f0, 818 0xcf02f3f0,
797 0xfffd00ff, 819 0xfffd00ff,
798 0xf31bf405, 820 0xf31bf405,
799/* 0x085a: ctx_load */ 821/* 0x08b2: ctx_load */
800 0x94bd00f8, 822 0x94bd00f8,
801 0xf10599f0, 823 0xf10599f0,
802 0xf00f0007, 824 0xf00f0007,
@@ -814,7 +836,7 @@ uint32_t nvd7_grhub_code[] = {
814 0x02d00203, 836 0x02d00203,
815 0xf004bd00, 837 0xf004bd00,
816 0x21f507f7, 838 0x21f507f7,
817 0x07f1083c, 839 0x07f10894,
818 0x03f0c000, 840 0x03f0c000,
819 0x0002d002, 841 0x0002d002,
820 0x0bfe04bd, 842 0x0bfe04bd,
@@ -869,31 +891,31 @@ uint32_t nvd7_grhub_code[] = {
869 0x03f01700, 891 0x03f01700,
870 0x0009d002, 892 0x0009d002,
871 0x00f804bd, 893 0x00f804bd,
872/* 0x0978: ctx_chan */ 894/* 0x09d0: ctx_chan */
873 0x077f21f5, 895 0x07d721f5,
874 0x085a21f5, 896 0x08b221f5,
875 0xf40ca7f0, 897 0xf40ca7f0,
876 0xf7f0d021, 898 0xf7f0d021,
877 0x3c21f505, 899 0x9421f505,
878 0xa421f508, 900 0xfc21f508,
879/* 0x0993: ctx_mmio_exec */ 901/* 0x09eb: ctx_mmio_exec */
880 0x9800f807, 902 0x9800f807,
881 0x07f14103, 903 0x07f14103,
882 0x03f08100, 904 0x03f08100,
883 0x0003d002, 905 0x0003d002,
884 0x34bd04bd, 906 0x34bd04bd,
885/* 0x09a4: ctx_mmio_loop */ 907/* 0x09fc: ctx_mmio_loop */
886 0xf4ff34c4, 908 0xf4ff34c4,
887 0x57f10f1b, 909 0x57f10f1b,
888 0x53f00200, 910 0x53f00200,
889 0x0535fa06, 911 0x0535fa06,
890/* 0x09b6: ctx_mmio_pull */ 912/* 0x0a0e: ctx_mmio_pull */
891 0x4e9803f8, 913 0x4e9803f8,
892 0x814f9880, 914 0x814f9880,
893 0xb69d21f4, 915 0xb69d21f4,
894 0x12b60830, 916 0x12b60830,
895 0xdf1bf401, 917 0xdf1bf401,
896/* 0x09c8: ctx_mmio_done */ 918/* 0x0a20: ctx_mmio_done */
897 0xf1160398, 919 0xf1160398,
898 0xf0810007, 920 0xf0810007,
899 0x03d00203, 921 0x03d00203,
@@ -902,30 +924,30 @@ uint32_t nvd7_grhub_code[] = {
902 0x13f00100, 924 0x13f00100,
903 0x0601fa06, 925 0x0601fa06,
904 0x00f803f8, 926 0x00f803f8,
905/* 0x09e8: ctx_xfer */ 927/* 0x0a40: ctx_xfer */
906 0xf104e7f0, 928 0xf104e7f0,
907 0xf0020007, 929 0xf0020007,
908 0x0ed00303, 930 0x0ed00303,
909/* 0x09f7: ctx_xfer_idle */ 931/* 0x0a4f: ctx_xfer_idle */
910 0xf104bd00, 932 0xf104bd00,
911 0xf00000e7, 933 0xf00000e7,
912 0xeecf03e3, 934 0xeecf03e3,
913 0x00e4f100, 935 0x00e4f100,
914 0xf21bf420, 936 0xf21bf420,
915 0xf40611f4, 937 0xf40611f4,
916/* 0x0a0e: ctx_xfer_pre */ 938/* 0x0a66: ctx_xfer_pre */
917 0xf7f01102, 939 0xf7f01102,
918 0x1421f510, 940 0x6c21f510,
919 0x7f21f508, 941 0xd721f508,
920 0x1c11f407, 942 0x1c11f407,
921/* 0x0a1c: ctx_xfer_pre_load */ 943/* 0x0a74: ctx_xfer_pre_load */
922 0xf502f7f0, 944 0xf502f7f0,
923 0xf507b521, 945 0xf5080d21,
924 0xf507c721, 946 0xf5081f21,
925 0xbd07dc21, 947 0xbd083421,
926 0xb521f5f4, 948 0x0d21f5f4,
927 0x5a21f507, 949 0xb221f508,
928/* 0x0a35: ctx_xfer_exec */ 950/* 0x0a8d: ctx_xfer_exec */
929 0x16019808, 951 0x16019808,
930 0x07f124bd, 952 0x07f124bd,
931 0x03f00500, 953 0x03f00500,
@@ -960,23 +982,65 @@ uint32_t nvd7_grhub_code[] = {
960 0x1301f402, 982 0x1301f402,
961 0xf40ca7f0, 983 0xf40ca7f0,
962 0xf7f0d021, 984 0xf7f0d021,
963 0x3c21f505, 985 0x9421f505,
964 0x3202f408, 986 0x3202f408,
965/* 0x0ac4: ctx_xfer_post */ 987/* 0x0b1c: ctx_xfer_post */
966 0xf502f7f0, 988 0xf502f7f0,
967 0xbd07b521, 989 0xbd080d21,
968 0x1421f5f4, 990 0x6c21f5f4,
969 0x7f21f508, 991 0x7f21f508,
970 0xc721f502, 992 0x1f21f502,
971 0xf5f4bd07, 993 0xf5f4bd08,
972 0xf407b521, 994 0xf4080d21,
973 0x01981011, 995 0x01981011,
974 0x0511fd40, 996 0x0511fd40,
975 0xf5070bf4, 997 0xf5070bf4,
976/* 0x0aef: ctx_xfer_no_post_mmio */ 998/* 0x0b47: ctx_xfer_no_post_mmio */
977 0xf5099321, 999 0xf509eb21,
978/* 0x0af3: ctx_xfer_done */ 1000/* 0x0b4b: ctx_xfer_done */
979 0xf807a421, 1001 0xf807fc21,
1002 0x00000000,
1003 0x00000000,
1004 0x00000000,
1005 0x00000000,
1006 0x00000000,
1007 0x00000000,
1008 0x00000000,
1009 0x00000000,
1010 0x00000000,
1011 0x00000000,
1012 0x00000000,
1013 0x00000000,
1014 0x00000000,
1015 0x00000000,
1016 0x00000000,
1017 0x00000000,
1018 0x00000000,
1019 0x00000000,
1020 0x00000000,
1021 0x00000000,
1022 0x00000000,
1023 0x00000000,
1024 0x00000000,
1025 0x00000000,
1026 0x00000000,
1027 0x00000000,
1028 0x00000000,
1029 0x00000000,
1030 0x00000000,
1031 0x00000000,
1032 0x00000000,
1033 0x00000000,
1034 0x00000000,
1035 0x00000000,
1036 0x00000000,
1037 0x00000000,
1038 0x00000000,
1039 0x00000000,
1040 0x00000000,
1041 0x00000000,
1042 0x00000000,
1043 0x00000000,
980 0x00000000, 1044 0x00000000,
981 0x00000000, 1045 0x00000000,
982 0x00000000, 1046 0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h
index 6547b3dfc7ed..51c3797d8537 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h
@@ -528,10 +528,10 @@ uint32_t nve0_grhub_code[] = {
528 0x0001d001, 528 0x0001d001,
529 0x17f104bd, 529 0x17f104bd,
530 0xf7f00100, 530 0xf7f00100,
531 0x7f21f502, 531 0xd721f502,
532 0x9121f507, 532 0xe921f507,
533 0x10f7f007, 533 0x10f7f007,
534 0x07de21f5, 534 0x083621f5,
535 0x98000e98, 535 0x98000e98,
536 0x21f5010f, 536 0x21f5010f,
537 0x14950150, 537 0x14950150,
@@ -574,9 +574,9 @@ uint32_t nve0_grhub_code[] = {
574 0xb6800040, 574 0xb6800040,
575 0x1bf40132, 575 0x1bf40132,
576 0x00f7f0be, 576 0x00f7f0be,
577 0x07de21f5, 577 0x083621f5,
578 0xf500f7f0, 578 0xf500f7f0,
579 0xf1077f21, 579 0xf107d721,
580 0xf0010007, 580 0xf0010007,
581 0x01d00203, 581 0x01d00203,
582 0xbd04bd00, 582 0xbd04bd00,
@@ -610,8 +610,8 @@ uint32_t nve0_grhub_code[] = {
610 0x09d00203, 610 0x09d00203,
611 0xf404bd00, 611 0xf404bd00,
612 0x31f40132, 612 0x31f40132,
613 0xaa21f502, 613 0x0221f502,
614 0xf094bd09, 614 0xf094bd0a,
615 0x07f10799, 615 0x07f10799,
616 0x03f01700, 616 0x03f01700,
617 0x0009d002, 617 0x0009d002,
@@ -621,7 +621,7 @@ uint32_t nve0_grhub_code[] = {
621 0x0203f00f, 621 0x0203f00f,
622 0xbd0009d0, 622 0xbd0009d0,
623 0x0131f404, 623 0x0131f404,
624 0x09aa21f5, 624 0x0a0221f5,
625 0x99f094bd, 625 0x99f094bd,
626 0x0007f106, 626 0x0007f106,
627 0x0203f017, 627 0x0203f017,
@@ -631,7 +631,7 @@ uint32_t nve0_grhub_code[] = {
631 0x12b920f9, 631 0x12b920f9,
632 0x0132f402, 632 0x0132f402,
633 0xf50232f4, 633 0xf50232f4,
634 0xfc09aa21, 634 0xfc0a0221,
635 0x0007f120, 635 0x0007f120,
636 0x0203f0c0, 636 0x0203f0c0,
637 0xbd0002d0, 637 0xbd0002d0,
@@ -640,7 +640,7 @@ uint32_t nve0_grhub_code[] = {
640 0xf41f23c8, 640 0xf41f23c8,
641 0x31f40d0b, 641 0x31f40d0b,
642 0x0232f401, 642 0x0232f401,
643 0x09aa21f5, 643 0x0a0221f5,
644/* 0x063c: chsw_done */ 644/* 0x063c: chsw_done */
645 0xf10127f0, 645 0xf10127f0,
646 0xf0c30007, 646 0xf0c30007,
@@ -654,7 +654,7 @@ uint32_t nve0_grhub_code[] = {
654/* 0x0660: main_not_ctx_switch */ 654/* 0x0660: main_not_ctx_switch */
655 0xf401e4b0, 655 0xf401e4b0,
656 0xf2b90d1b, 656 0xf2b90d1b,
657 0x4221f502, 657 0x9a21f502,
658 0x460ef409, 658 0x460ef409,
659/* 0x0670: main_not_ctx_chan */ 659/* 0x0670: main_not_ctx_chan */
660 0xf402e4b0, 660 0xf402e4b0,
@@ -664,8 +664,8 @@ uint32_t nve0_grhub_code[] = {
664 0x09d00203, 664 0x09d00203,
665 0xf404bd00, 665 0xf404bd00,
666 0x32f40132, 666 0x32f40132,
667 0xaa21f502, 667 0x0221f502,
668 0xf094bd09, 668 0xf094bd0a,
669 0x07f10799, 669 0x07f10799,
670 0x03f01700, 670 0x03f01700,
671 0x0009d002, 671 0x0009d002,
@@ -710,18 +710,40 @@ uint32_t nve0_grhub_code[] = {
710/* 0x072b: ih_no_ctxsw */ 710/* 0x072b: ih_no_ctxsw */
711 0xe40421f4, 711 0xe40421f4,
712 0xf40400ab, 712 0xf40400ab,
713 0xb7f1140b, 713 0xe7f16c0b,
714 0xe3f00708,
715 0x6821f440,
716 0xf102ffb9,
717 0xf0040007,
718 0x0fd00203,
719 0xf104bd00,
720 0xf00704e7,
721 0x21f440e3,
722 0x02ffb968,
723 0x030007f1,
724 0xd00203f0,
725 0x04bd000f,
726 0x9450fec7,
727 0xf7f102ee,
728 0xf3f00700,
729 0x00efbb40,
730 0xf16821f4,
731 0xf0020007,
732 0x0fd00203,
733 0xf004bd00,
734 0x21f503f7,
735 0xb7f1037e,
714 0xbfb90100, 736 0xbfb90100,
715 0x44e7f102, 737 0x44e7f102,
716 0x40e3f001, 738 0x40e3f001,
717/* 0x0743: ih_no_fwmthd */ 739/* 0x079b: ih_no_fwmthd */
718 0xf19d21f4, 740 0xf19d21f4,
719 0xbd0104b7, 741 0xbd0504b7,
720 0xb4abffb0, 742 0xb4abffb0,
721 0xf10f0bf4, 743 0xf10f0bf4,
722 0xf0070007, 744 0xf0070007,
723 0x0bd00303, 745 0x0bd00303,
724/* 0x075b: ih_no_other */ 746/* 0x07b3: ih_no_other */
725 0xf104bd00, 747 0xf104bd00,
726 0xf0010007, 748 0xf0010007,
727 0x0ad00003, 749 0x0ad00003,
@@ -731,19 +753,19 @@ uint32_t nve0_grhub_code[] = {
731 0xfc90fca0, 753 0xfc90fca0,
732 0x0088fe80, 754 0x0088fe80,
733 0x32f480fc, 755 0x32f480fc,
734/* 0x077f: ctx_4170s */ 756/* 0x07d7: ctx_4170s */
735 0xf001f800, 757 0xf001f800,
736 0xffb910f5, 758 0xffb910f5,
737 0x70e7f102, 759 0x70e7f102,
738 0x40e3f041, 760 0x40e3f041,
739 0xf89d21f4, 761 0xf89d21f4,
740/* 0x0791: ctx_4170w */ 762/* 0x07e9: ctx_4170w */
741 0x70e7f100, 763 0x70e7f100,
742 0x40e3f041, 764 0x40e3f041,
743 0xb96821f4, 765 0xb96821f4,
744 0xf4f002ff, 766 0xf4f002ff,
745 0xf01bf410, 767 0xf01bf410,
746/* 0x07a6: ctx_redswitch */ 768/* 0x07fe: ctx_redswitch */
747 0xe7f100f8, 769 0xe7f100f8,
748 0xe5f00200, 770 0xe5f00200,
749 0x20e5f040, 771 0x20e5f040,
@@ -751,7 +773,7 @@ uint32_t nve0_grhub_code[] = {
751 0xf0850007, 773 0xf0850007,
752 0x0ed00103, 774 0x0ed00103,
753 0xf004bd00, 775 0xf004bd00,
754/* 0x07c2: ctx_redswitch_delay */ 776/* 0x081a: ctx_redswitch_delay */
755 0xf2b608f7, 777 0xf2b608f7,
756 0xfd1bf401, 778 0xfd1bf401,
757 0x0400e5f1, 779 0x0400e5f1,
@@ -759,7 +781,7 @@ uint32_t nve0_grhub_code[] = {
759 0x850007f1, 781 0x850007f1,
760 0xd00103f0, 782 0xd00103f0,
761 0x04bd000e, 783 0x04bd000e,
762/* 0x07de: ctx_86c */ 784/* 0x0836: ctx_86c */
763 0x07f100f8, 785 0x07f100f8,
764 0x03f01b00, 786 0x03f01b00,
765 0x000fd002, 787 0x000fd002,
@@ -770,17 +792,17 @@ uint32_t nve0_grhub_code[] = {
770 0xe7f102ff, 792 0xe7f102ff,
771 0xe3f0a86c, 793 0xe3f0a86c,
772 0x9d21f441, 794 0x9d21f441,
773/* 0x0806: ctx_mem */ 795/* 0x085e: ctx_mem */
774 0x07f100f8, 796 0x07f100f8,
775 0x03f08400, 797 0x03f08400,
776 0x000fd002, 798 0x000fd002,
777/* 0x0812: ctx_mem_wait */ 799/* 0x086a: ctx_mem_wait */
778 0xf7f104bd, 800 0xf7f104bd,
779 0xf3f08400, 801 0xf3f08400,
780 0x00ffcf02, 802 0x00ffcf02,
781 0xf405fffd, 803 0xf405fffd,
782 0x00f8f31b, 804 0x00f8f31b,
783/* 0x0824: ctx_load */ 805/* 0x087c: ctx_load */
784 0x99f094bd, 806 0x99f094bd,
785 0x0007f105, 807 0x0007f105,
786 0x0203f00f, 808 0x0203f00f,
@@ -797,7 +819,7 @@ uint32_t nve0_grhub_code[] = {
797 0x0203f083, 819 0x0203f083,
798 0xbd0002d0, 820 0xbd0002d0,
799 0x07f7f004, 821 0x07f7f004,
800 0x080621f5, 822 0x085e21f5,
801 0xc00007f1, 823 0xc00007f1,
802 0xd00203f0, 824 0xd00203f0,
803 0x04bd0002, 825 0x04bd0002,
@@ -852,29 +874,29 @@ uint32_t nve0_grhub_code[] = {
852 0x170007f1, 874 0x170007f1,
853 0xd00203f0, 875 0xd00203f0,
854 0x04bd0009, 876 0x04bd0009,
855/* 0x0942: ctx_chan */ 877/* 0x099a: ctx_chan */
856 0x21f500f8, 878 0x21f500f8,
857 0xa7f00824, 879 0xa7f0087c,
858 0xd021f40c, 880 0xd021f40c,
859 0xf505f7f0, 881 0xf505f7f0,
860 0xf8080621, 882 0xf8085e21,
861/* 0x0955: ctx_mmio_exec */ 883/* 0x09ad: ctx_mmio_exec */
862 0x41039800, 884 0x41039800,
863 0x810007f1, 885 0x810007f1,
864 0xd00203f0, 886 0xd00203f0,
865 0x04bd0003, 887 0x04bd0003,
866/* 0x0966: ctx_mmio_loop */ 888/* 0x09be: ctx_mmio_loop */
867 0x34c434bd, 889 0x34c434bd,
868 0x0f1bf4ff, 890 0x0f1bf4ff,
869 0x020057f1, 891 0x020057f1,
870 0xfa0653f0, 892 0xfa0653f0,
871 0x03f80535, 893 0x03f80535,
872/* 0x0978: ctx_mmio_pull */ 894/* 0x09d0: ctx_mmio_pull */
873 0x98804e98, 895 0x98804e98,
874 0x21f4814f, 896 0x21f4814f,
875 0x0830b69d, 897 0x0830b69d,
876 0xf40112b6, 898 0xf40112b6,
877/* 0x098a: ctx_mmio_done */ 899/* 0x09e2: ctx_mmio_done */
878 0x0398df1b, 900 0x0398df1b,
879 0x0007f116, 901 0x0007f116,
880 0x0203f081, 902 0x0203f081,
@@ -883,30 +905,30 @@ uint32_t nve0_grhub_code[] = {
883 0x010017f1, 905 0x010017f1,
884 0xfa0613f0, 906 0xfa0613f0,
885 0x03f80601, 907 0x03f80601,
886/* 0x09aa: ctx_xfer */ 908/* 0x0a02: ctx_xfer */
887 0xe7f000f8, 909 0xe7f000f8,
888 0x0007f104, 910 0x0007f104,
889 0x0303f002, 911 0x0303f002,
890 0xbd000ed0, 912 0xbd000ed0,
891/* 0x09b9: ctx_xfer_idle */ 913/* 0x0a11: ctx_xfer_idle */
892 0x00e7f104, 914 0x00e7f104,
893 0x03e3f000, 915 0x03e3f000,
894 0xf100eecf, 916 0xf100eecf,
895 0xf42000e4, 917 0xf42000e4,
896 0x11f4f21b, 918 0x11f4f21b,
897 0x0d02f406, 919 0x0d02f406,
898/* 0x09d0: ctx_xfer_pre */ 920/* 0x0a28: ctx_xfer_pre */
899 0xf510f7f0, 921 0xf510f7f0,
900 0xf407de21, 922 0xf4083621,
901/* 0x09da: ctx_xfer_pre_load */ 923/* 0x0a32: ctx_xfer_pre_load */
902 0xf7f01c11, 924 0xf7f01c11,
903 0x7f21f502, 925 0xd721f502,
904 0x9121f507, 926 0xe921f507,
905 0xa621f507, 927 0xfe21f507,
906 0xf5f4bd07, 928 0xf5f4bd07,
907 0xf5077f21, 929 0xf507d721,
908/* 0x09f3: ctx_xfer_exec */ 930/* 0x0a4b: ctx_xfer_exec */
909 0x98082421, 931 0x98087c21,
910 0x24bd1601, 932 0x24bd1601,
911 0x050007f1, 933 0x050007f1,
912 0xd00103f0, 934 0xd00103f0,
@@ -941,21 +963,21 @@ uint32_t nve0_grhub_code[] = {
941 0xa7f01301, 963 0xa7f01301,
942 0xd021f40c, 964 0xd021f40c,
943 0xf505f7f0, 965 0xf505f7f0,
944 0xf4080621, 966 0xf4085e21,
945/* 0x0a82: ctx_xfer_post */ 967/* 0x0ada: ctx_xfer_post */
946 0xf7f02e02, 968 0xf7f02e02,
947 0x7f21f502, 969 0xd721f502,
948 0xf5f4bd07, 970 0xf5f4bd07,
949 0xf507de21, 971 0xf5083621,
950 0xf5027f21, 972 0xf5027f21,
951 0xbd079121, 973 0xbd07e921,
952 0x7f21f5f4, 974 0xd721f5f4,
953 0x1011f407, 975 0x1011f407,
954 0xfd400198, 976 0xfd400198,
955 0x0bf40511, 977 0x0bf40511,
956 0x5521f507, 978 0xad21f507,
957/* 0x0aad: ctx_xfer_no_post_mmio */ 979/* 0x0b05: ctx_xfer_no_post_mmio */
958/* 0x0aad: ctx_xfer_done */ 980/* 0x0b05: ctx_xfer_done */
959 0x0000f809, 981 0x0000f809,
960 0x00000000, 982 0x00000000,
961 0x00000000, 983 0x00000000,
@@ -977,4 +999,46 @@ uint32_t nve0_grhub_code[] = {
977 0x00000000, 999 0x00000000,
978 0x00000000, 1000 0x00000000,
979 0x00000000, 1001 0x00000000,
1002 0x00000000,
1003 0x00000000,
1004 0x00000000,
1005 0x00000000,
1006 0x00000000,
1007 0x00000000,
1008 0x00000000,
1009 0x00000000,
1010 0x00000000,
1011 0x00000000,
1012 0x00000000,
1013 0x00000000,
1014 0x00000000,
1015 0x00000000,
1016 0x00000000,
1017 0x00000000,
1018 0x00000000,
1019 0x00000000,
1020 0x00000000,
1021 0x00000000,
1022 0x00000000,
1023 0x00000000,
1024 0x00000000,
1025 0x00000000,
1026 0x00000000,
1027 0x00000000,
1028 0x00000000,
1029 0x00000000,
1030 0x00000000,
1031 0x00000000,
1032 0x00000000,
1033 0x00000000,
1034 0x00000000,
1035 0x00000000,
1036 0x00000000,
1037 0x00000000,
1038 0x00000000,
1039 0x00000000,
1040 0x00000000,
1041 0x00000000,
1042 0x00000000,
1043 0x00000000,
980}; 1044};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvf0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvf0.fuc.h
index a5aee5a4302f..a0af4b703a8e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvf0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvf0.fuc.h
@@ -528,10 +528,10 @@ uint32_t nvf0_grhub_code[] = {
528 0x0001d001, 528 0x0001d001,
529 0x17f104bd, 529 0x17f104bd,
530 0xf7f00100, 530 0xf7f00100,
531 0x7f21f502, 531 0xd721f502,
532 0x9121f507, 532 0xe921f507,
533 0x10f7f007, 533 0x10f7f007,
534 0x07de21f5, 534 0x083621f5,
535 0x98000e98, 535 0x98000e98,
536 0x21f5010f, 536 0x21f5010f,
537 0x14950150, 537 0x14950150,
@@ -574,9 +574,9 @@ uint32_t nvf0_grhub_code[] = {
574 0xb6800040, 574 0xb6800040,
575 0x1bf40132, 575 0x1bf40132,
576 0x00f7f0be, 576 0x00f7f0be,
577 0x07de21f5, 577 0x083621f5,
578 0xf500f7f0, 578 0xf500f7f0,
579 0xf1077f21, 579 0xf107d721,
580 0xf0010007, 580 0xf0010007,
581 0x01d00203, 581 0x01d00203,
582 0xbd04bd00, 582 0xbd04bd00,
@@ -610,8 +610,8 @@ uint32_t nvf0_grhub_code[] = {
610 0x09d00203, 610 0x09d00203,
611 0xf404bd00, 611 0xf404bd00,
612 0x31f40132, 612 0x31f40132,
613 0xaa21f502, 613 0x0221f502,
614 0xf094bd09, 614 0xf094bd0a,
615 0x07f10799, 615 0x07f10799,
616 0x03f01700, 616 0x03f01700,
617 0x0009d002, 617 0x0009d002,
@@ -621,7 +621,7 @@ uint32_t nvf0_grhub_code[] = {
621 0x0203f037, 621 0x0203f037,
622 0xbd0009d0, 622 0xbd0009d0,
623 0x0131f404, 623 0x0131f404,
624 0x09aa21f5, 624 0x0a0221f5,
625 0x99f094bd, 625 0x99f094bd,
626 0x0007f106, 626 0x0007f106,
627 0x0203f017, 627 0x0203f017,
@@ -631,7 +631,7 @@ uint32_t nvf0_grhub_code[] = {
631 0x12b920f9, 631 0x12b920f9,
632 0x0132f402, 632 0x0132f402,
633 0xf50232f4, 633 0xf50232f4,
634 0xfc09aa21, 634 0xfc0a0221,
635 0x0007f120, 635 0x0007f120,
636 0x0203f0c0, 636 0x0203f0c0,
637 0xbd0002d0, 637 0xbd0002d0,
@@ -640,7 +640,7 @@ uint32_t nvf0_grhub_code[] = {
640 0xf41f23c8, 640 0xf41f23c8,
641 0x31f40d0b, 641 0x31f40d0b,
642 0x0232f401, 642 0x0232f401,
643 0x09aa21f5, 643 0x0a0221f5,
644/* 0x063c: chsw_done */ 644/* 0x063c: chsw_done */
645 0xf10127f0, 645 0xf10127f0,
646 0xf0c30007, 646 0xf0c30007,
@@ -654,7 +654,7 @@ uint32_t nvf0_grhub_code[] = {
654/* 0x0660: main_not_ctx_switch */ 654/* 0x0660: main_not_ctx_switch */
655 0xf401e4b0, 655 0xf401e4b0,
656 0xf2b90d1b, 656 0xf2b90d1b,
657 0x4221f502, 657 0x9a21f502,
658 0x460ef409, 658 0x460ef409,
659/* 0x0670: main_not_ctx_chan */ 659/* 0x0670: main_not_ctx_chan */
660 0xf402e4b0, 660 0xf402e4b0,
@@ -664,8 +664,8 @@ uint32_t nvf0_grhub_code[] = {
664 0x09d00203, 664 0x09d00203,
665 0xf404bd00, 665 0xf404bd00,
666 0x32f40132, 666 0x32f40132,
667 0xaa21f502, 667 0x0221f502,
668 0xf094bd09, 668 0xf094bd0a,
669 0x07f10799, 669 0x07f10799,
670 0x03f01700, 670 0x03f01700,
671 0x0009d002, 671 0x0009d002,
@@ -710,18 +710,40 @@ uint32_t nvf0_grhub_code[] = {
710/* 0x072b: ih_no_ctxsw */ 710/* 0x072b: ih_no_ctxsw */
711 0xe40421f4, 711 0xe40421f4,
712 0xf40400ab, 712 0xf40400ab,
713 0xb7f1140b, 713 0xe7f16c0b,
714 0xe3f00708,
715 0x6821f440,
716 0xf102ffb9,
717 0xf0040007,
718 0x0fd00203,
719 0xf104bd00,
720 0xf00704e7,
721 0x21f440e3,
722 0x02ffb968,
723 0x030007f1,
724 0xd00203f0,
725 0x04bd000f,
726 0x9450fec7,
727 0xf7f102ee,
728 0xf3f00700,
729 0x00efbb40,
730 0xf16821f4,
731 0xf0020007,
732 0x0fd00203,
733 0xf004bd00,
734 0x21f503f7,
735 0xb7f1037e,
714 0xbfb90100, 736 0xbfb90100,
715 0x44e7f102, 737 0x44e7f102,
716 0x40e3f001, 738 0x40e3f001,
717/* 0x0743: ih_no_fwmthd */ 739/* 0x079b: ih_no_fwmthd */
718 0xf19d21f4, 740 0xf19d21f4,
719 0xbd0104b7, 741 0xbd0504b7,
720 0xb4abffb0, 742 0xb4abffb0,
721 0xf10f0bf4, 743 0xf10f0bf4,
722 0xf0070007, 744 0xf0070007,
723 0x0bd00303, 745 0x0bd00303,
724/* 0x075b: ih_no_other */ 746/* 0x07b3: ih_no_other */
725 0xf104bd00, 747 0xf104bd00,
726 0xf0010007, 748 0xf0010007,
727 0x0ad00003, 749 0x0ad00003,
@@ -731,19 +753,19 @@ uint32_t nvf0_grhub_code[] = {
731 0xfc90fca0, 753 0xfc90fca0,
732 0x0088fe80, 754 0x0088fe80,
733 0x32f480fc, 755 0x32f480fc,
734/* 0x077f: ctx_4170s */ 756/* 0x07d7: ctx_4170s */
735 0xf001f800, 757 0xf001f800,
736 0xffb910f5, 758 0xffb910f5,
737 0x70e7f102, 759 0x70e7f102,
738 0x40e3f041, 760 0x40e3f041,
739 0xf89d21f4, 761 0xf89d21f4,
740/* 0x0791: ctx_4170w */ 762/* 0x07e9: ctx_4170w */
741 0x70e7f100, 763 0x70e7f100,
742 0x40e3f041, 764 0x40e3f041,
743 0xb96821f4, 765 0xb96821f4,
744 0xf4f002ff, 766 0xf4f002ff,
745 0xf01bf410, 767 0xf01bf410,
746/* 0x07a6: ctx_redswitch */ 768/* 0x07fe: ctx_redswitch */
747 0xe7f100f8, 769 0xe7f100f8,
748 0xe5f00200, 770 0xe5f00200,
749 0x20e5f040, 771 0x20e5f040,
@@ -751,7 +773,7 @@ uint32_t nvf0_grhub_code[] = {
751 0xf0850007, 773 0xf0850007,
752 0x0ed00103, 774 0x0ed00103,
753 0xf004bd00, 775 0xf004bd00,
754/* 0x07c2: ctx_redswitch_delay */ 776/* 0x081a: ctx_redswitch_delay */
755 0xf2b608f7, 777 0xf2b608f7,
756 0xfd1bf401, 778 0xfd1bf401,
757 0x0400e5f1, 779 0x0400e5f1,
@@ -759,7 +781,7 @@ uint32_t nvf0_grhub_code[] = {
759 0x850007f1, 781 0x850007f1,
760 0xd00103f0, 782 0xd00103f0,
761 0x04bd000e, 783 0x04bd000e,
762/* 0x07de: ctx_86c */ 784/* 0x0836: ctx_86c */
763 0x07f100f8, 785 0x07f100f8,
764 0x03f02300, 786 0x03f02300,
765 0x000fd002, 787 0x000fd002,
@@ -770,17 +792,17 @@ uint32_t nvf0_grhub_code[] = {
770 0xe7f102ff, 792 0xe7f102ff,
771 0xe3f0a88c, 793 0xe3f0a88c,
772 0x9d21f441, 794 0x9d21f441,
773/* 0x0806: ctx_mem */ 795/* 0x085e: ctx_mem */
774 0x07f100f8, 796 0x07f100f8,
775 0x03f08400, 797 0x03f08400,
776 0x000fd002, 798 0x000fd002,
777/* 0x0812: ctx_mem_wait */ 799/* 0x086a: ctx_mem_wait */
778 0xf7f104bd, 800 0xf7f104bd,
779 0xf3f08400, 801 0xf3f08400,
780 0x00ffcf02, 802 0x00ffcf02,
781 0xf405fffd, 803 0xf405fffd,
782 0x00f8f31b, 804 0x00f8f31b,
783/* 0x0824: ctx_load */ 805/* 0x087c: ctx_load */
784 0x99f094bd, 806 0x99f094bd,
785 0x0007f105, 807 0x0007f105,
786 0x0203f037, 808 0x0203f037,
@@ -797,7 +819,7 @@ uint32_t nvf0_grhub_code[] = {
797 0x0203f083, 819 0x0203f083,
798 0xbd0002d0, 820 0xbd0002d0,
799 0x07f7f004, 821 0x07f7f004,
800 0x080621f5, 822 0x085e21f5,
801 0xc00007f1, 823 0xc00007f1,
802 0xd00203f0, 824 0xd00203f0,
803 0x04bd0002, 825 0x04bd0002,
@@ -852,29 +874,29 @@ uint32_t nvf0_grhub_code[] = {
852 0x170007f1, 874 0x170007f1,
853 0xd00203f0, 875 0xd00203f0,
854 0x04bd0009, 876 0x04bd0009,
855/* 0x0942: ctx_chan */ 877/* 0x099a: ctx_chan */
856 0x21f500f8, 878 0x21f500f8,
857 0xa7f00824, 879 0xa7f0087c,
858 0xd021f40c, 880 0xd021f40c,
859 0xf505f7f0, 881 0xf505f7f0,
860 0xf8080621, 882 0xf8085e21,
861/* 0x0955: ctx_mmio_exec */ 883/* 0x09ad: ctx_mmio_exec */
862 0x41039800, 884 0x41039800,
863 0x810007f1, 885 0x810007f1,
864 0xd00203f0, 886 0xd00203f0,
865 0x04bd0003, 887 0x04bd0003,
866/* 0x0966: ctx_mmio_loop */ 888/* 0x09be: ctx_mmio_loop */
867 0x34c434bd, 889 0x34c434bd,
868 0x0f1bf4ff, 890 0x0f1bf4ff,
869 0x020057f1, 891 0x020057f1,
870 0xfa0653f0, 892 0xfa0653f0,
871 0x03f80535, 893 0x03f80535,
872/* 0x0978: ctx_mmio_pull */ 894/* 0x09d0: ctx_mmio_pull */
873 0x98804e98, 895 0x98804e98,
874 0x21f4814f, 896 0x21f4814f,
875 0x0830b69d, 897 0x0830b69d,
876 0xf40112b6, 898 0xf40112b6,
877/* 0x098a: ctx_mmio_done */ 899/* 0x09e2: ctx_mmio_done */
878 0x0398df1b, 900 0x0398df1b,
879 0x0007f116, 901 0x0007f116,
880 0x0203f081, 902 0x0203f081,
@@ -883,30 +905,30 @@ uint32_t nvf0_grhub_code[] = {
883 0x010017f1, 905 0x010017f1,
884 0xfa0613f0, 906 0xfa0613f0,
885 0x03f80601, 907 0x03f80601,
886/* 0x09aa: ctx_xfer */ 908/* 0x0a02: ctx_xfer */
887 0xe7f000f8, 909 0xe7f000f8,
888 0x0007f104, 910 0x0007f104,
889 0x0303f002, 911 0x0303f002,
890 0xbd000ed0, 912 0xbd000ed0,
891/* 0x09b9: ctx_xfer_idle */ 913/* 0x0a11: ctx_xfer_idle */
892 0x00e7f104, 914 0x00e7f104,
893 0x03e3f000, 915 0x03e3f000,
894 0xf100eecf, 916 0xf100eecf,
895 0xf42000e4, 917 0xf42000e4,
896 0x11f4f21b, 918 0x11f4f21b,
897 0x0d02f406, 919 0x0d02f406,
898/* 0x09d0: ctx_xfer_pre */ 920/* 0x0a28: ctx_xfer_pre */
899 0xf510f7f0, 921 0xf510f7f0,
900 0xf407de21, 922 0xf4083621,
901/* 0x09da: ctx_xfer_pre_load */ 923/* 0x0a32: ctx_xfer_pre_load */
902 0xf7f01c11, 924 0xf7f01c11,
903 0x7f21f502, 925 0xd721f502,
904 0x9121f507, 926 0xe921f507,
905 0xa621f507, 927 0xfe21f507,
906 0xf5f4bd07, 928 0xf5f4bd07,
907 0xf5077f21, 929 0xf507d721,
908/* 0x09f3: ctx_xfer_exec */ 930/* 0x0a4b: ctx_xfer_exec */
909 0x98082421, 931 0x98087c21,
910 0x24bd1601, 932 0x24bd1601,
911 0x050007f1, 933 0x050007f1,
912 0xd00103f0, 934 0xd00103f0,
@@ -941,21 +963,21 @@ uint32_t nvf0_grhub_code[] = {
941 0xa7f01301, 963 0xa7f01301,
942 0xd021f40c, 964 0xd021f40c,
943 0xf505f7f0, 965 0xf505f7f0,
944 0xf4080621, 966 0xf4085e21,
945/* 0x0a82: ctx_xfer_post */ 967/* 0x0ada: ctx_xfer_post */
946 0xf7f02e02, 968 0xf7f02e02,
947 0x7f21f502, 969 0xd721f502,
948 0xf5f4bd07, 970 0xf5f4bd07,
949 0xf507de21, 971 0xf5083621,
950 0xf5027f21, 972 0xf5027f21,
951 0xbd079121, 973 0xbd07e921,
952 0x7f21f5f4, 974 0xd721f5f4,
953 0x1011f407, 975 0x1011f407,
954 0xfd400198, 976 0xfd400198,
955 0x0bf40511, 977 0x0bf40511,
956 0x5521f507, 978 0xad21f507,
957/* 0x0aad: ctx_xfer_no_post_mmio */ 979/* 0x0b05: ctx_xfer_no_post_mmio */
958/* 0x0aad: ctx_xfer_done */ 980/* 0x0b05: ctx_xfer_done */
959 0x0000f809, 981 0x0000f809,
960 0x00000000, 982 0x00000000,
961 0x00000000, 983 0x00000000,
@@ -977,4 +999,46 @@ uint32_t nvf0_grhub_code[] = {
977 0x00000000, 999 0x00000000,
978 0x00000000, 1000 0x00000000,
979 0x00000000, 1001 0x00000000,
1002 0x00000000,
1003 0x00000000,
1004 0x00000000,
1005 0x00000000,
1006 0x00000000,
1007 0x00000000,
1008 0x00000000,
1009 0x00000000,
1010 0x00000000,
1011 0x00000000,
1012 0x00000000,
1013 0x00000000,
1014 0x00000000,
1015 0x00000000,
1016 0x00000000,
1017 0x00000000,
1018 0x00000000,
1019 0x00000000,
1020 0x00000000,
1021 0x00000000,
1022 0x00000000,
1023 0x00000000,
1024 0x00000000,
1025 0x00000000,
1026 0x00000000,
1027 0x00000000,
1028 0x00000000,
1029 0x00000000,
1030 0x00000000,
1031 0x00000000,
1032 0x00000000,
1033 0x00000000,
1034 0x00000000,
1035 0x00000000,
1036 0x00000000,
1037 0x00000000,
1038 0x00000000,
1039 0x00000000,
1040 0x00000000,
1041 0x00000000,
1042 0x00000000,
1043 0x00000000,
980}; 1044};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/macros.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/macros.fuc
index a47d49db5232..2a0b0f844299 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/macros.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/macros.fuc
@@ -30,6 +30,12 @@
30#define GK110 0xf0 30#define GK110 0xf0
31#define GK208 0x108 31#define GK208 0x108
32 32
33#define NV_PGRAPH_TRAPPED_ADDR 0x400704
34#define NV_PGRAPH_TRAPPED_DATA_LO 0x400708
35#define NV_PGRAPH_TRAPPED_DATA_HI 0x40070c
36
37#define NV_PGRAPH_FE_OBJECT_TABLE(n) ((n) * 4 + 0x400700)
38
33#define NV_PGRAPH_FECS_INTR_ACK 0x409004 39#define NV_PGRAPH_FECS_INTR_ACK 0x409004
34#define NV_PGRAPH_FECS_INTR 0x409008 40#define NV_PGRAPH_FECS_INTR 0x409008
35#define NV_PGRAPH_FECS_INTR_FWMTHD 0x00000400 41#define NV_PGRAPH_FECS_INTR_FWMTHD 0x00000400
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/os.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/os.h
index fd1d380de094..1718ae4e8224 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/os.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/os.h
@@ -3,5 +3,6 @@
3 3
4#define E_BAD_COMMAND 0x00000001 4#define E_BAD_COMMAND 0x00000001
5#define E_CMD_OVERFLOW 0x00000002 5#define E_CMD_OVERFLOW 0x00000002
6#define E_BAD_FWMTHD 0x00000003
6 7
7#endif 8#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
index 1a2d56493cf6..20665c21d80e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
@@ -976,7 +976,6 @@ nv50_graph_init(struct nouveau_object *object)
976 break; 976 break;
977 case 0xa0: 977 case 0xa0:
978 default: 978 default:
979 nv_wr32(priv, 0x402cc0, 0x00000000);
980 if (nv_device(priv)->chipset == 0xa0 || 979 if (nv_device(priv)->chipset == 0xa0 ||
981 nv_device(priv)->chipset == 0xaa || 980 nv_device(priv)->chipset == 0xaa ||
982 nv_device(priv)->chipset == 0xac) { 981 nv_device(priv)->chipset == 0xac) {
@@ -991,10 +990,10 @@ nv50_graph_init(struct nouveau_object *object)
991 990
992 /* zero out zcull regions */ 991 /* zero out zcull regions */
993 for (i = 0; i < 8; i++) { 992 for (i = 0; i < 8; i++) {
994 nv_wr32(priv, 0x402c20 + (i * 8), 0x00000000); 993 nv_wr32(priv, 0x402c20 + (i * 0x10), 0x00000000);
995 nv_wr32(priv, 0x402c24 + (i * 8), 0x00000000); 994 nv_wr32(priv, 0x402c24 + (i * 0x10), 0x00000000);
996 nv_wr32(priv, 0x402c28 + (i * 8), 0x00000000); 995 nv_wr32(priv, 0x402c28 + (i * 0x10), 0x00000000);
997 nv_wr32(priv, 0x402c2c + (i * 8), 0x00000000); 996 nv_wr32(priv, 0x402c2c + (i * 0x10), 0x00000000);
998 } 997 }
999 return 0; 998 return 0;
1000} 999}
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
index bf7bdb1f291e..aa0838916354 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
@@ -789,17 +789,40 @@ nvc0_graph_ctxctl_debug(struct nvc0_graph_priv *priv)
789static void 789static void
790nvc0_graph_ctxctl_isr(struct nvc0_graph_priv *priv) 790nvc0_graph_ctxctl_isr(struct nvc0_graph_priv *priv)
791{ 791{
792 u32 ustat = nv_rd32(priv, 0x409c18); 792 u32 stat = nv_rd32(priv, 0x409c18);
793 793
794 if (ustat & 0x00000001) 794 if (stat & 0x00000001) {
795 nv_error(priv, "CTXCTL ucode error\n"); 795 u32 code = nv_rd32(priv, 0x409814);
796 if (ustat & 0x00080000) 796 if (code == E_BAD_FWMTHD) {
797 nv_error(priv, "CTXCTL watchdog timeout\n"); 797 u32 class = nv_rd32(priv, 0x409808);
798 if (ustat & ~0x00080001) 798 u32 addr = nv_rd32(priv, 0x40980c);
799 nv_error(priv, "CTXCTL 0x%08x\n", ustat); 799 u32 subc = (addr & 0x00070000) >> 16;
800 u32 mthd = (addr & 0x00003ffc);
801 u32 data = nv_rd32(priv, 0x409810);
802
803 nv_error(priv, "FECS MTHD subc %d class 0x%04x "
804 "mthd 0x%04x data 0x%08x\n",
805 subc, class, mthd, data);
800 806
801 nvc0_graph_ctxctl_debug(priv); 807 nv_wr32(priv, 0x409c20, 0x00000001);
802 nv_wr32(priv, 0x409c20, ustat); 808 stat &= ~0x00000001;
809 } else {
810 nv_error(priv, "FECS ucode error %d\n", code);
811 }
812 }
813
814 if (stat & 0x00080000) {
815 nv_error(priv, "FECS watchdog timeout\n");
816 nvc0_graph_ctxctl_debug(priv);
817 nv_wr32(priv, 0x409c20, 0x00080000);
818 stat &= ~0x00080000;
819 }
820
821 if (stat) {
822 nv_error(priv, "FECS 0x%08x\n", stat);
823 nvc0_graph_ctxctl_debug(priv);
824 nv_wr32(priv, 0x409c20, stat);
825 }
803} 826}
804 827
805static void 828static void
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
index 75203a99d902..ffc289198dd8 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
@@ -38,6 +38,8 @@
38#include <engine/fifo.h> 38#include <engine/fifo.h>
39#include <engine/graph.h> 39#include <engine/graph.h>
40 40
41#include "fuc/os.h"
42
41#define GPC_MAX 32 43#define GPC_MAX 32
42#define TPC_MAX (GPC_MAX * 8) 44#define TPC_MAX (GPC_MAX * 8)
43 45
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
index db1b39d08013..825f7bb46b67 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
@@ -84,6 +84,7 @@ extern struct nouveau_oclass *nv4e_i2c_oclass;
84extern struct nouveau_oclass *nv50_i2c_oclass; 84extern struct nouveau_oclass *nv50_i2c_oclass;
85extern struct nouveau_oclass *nv94_i2c_oclass; 85extern struct nouveau_oclass *nv94_i2c_oclass;
86extern struct nouveau_oclass *nvd0_i2c_oclass; 86extern struct nouveau_oclass *nvd0_i2c_oclass;
87extern struct nouveau_oclass *gf117_i2c_oclass;
87extern struct nouveau_oclass *nve0_i2c_oclass; 88extern struct nouveau_oclass *nve0_i2c_oclass;
88 89
89static inline int 90static inline int
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c
index 4ac1aa30ea11..0e62a3240144 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c
@@ -307,7 +307,6 @@ calc_clk(struct nve0_clock_priv *priv,
307 info->dsrc = src0; 307 info->dsrc = src0;
308 if (div0) { 308 if (div0) {
309 info->ddiv |= 0x80000000; 309 info->ddiv |= 0x80000000;
310 info->ddiv |= div0 << 8;
311 info->ddiv |= div0; 310 info->ddiv |= div0;
312 } 311 }
313 if (div1D) { 312 if (div1D) {
@@ -352,7 +351,7 @@ nve0_clock_prog_0(struct nve0_clock_priv *priv, int clk)
352{ 351{
353 struct nve0_clock_info *info = &priv->eng[clk]; 352 struct nve0_clock_info *info = &priv->eng[clk];
354 if (!info->ssel) { 353 if (!info->ssel) {
355 nv_mask(priv, 0x1371d0 + (clk * 0x04), 0x80003f3f, info->ddiv); 354 nv_mask(priv, 0x1371d0 + (clk * 0x04), 0x8000003f, info->ddiv);
356 nv_wr32(priv, 0x137160 + (clk * 0x04), info->dsrc); 355 nv_wr32(priv, 0x137160 + (clk * 0x04), info->dsrc);
357 } 356 }
358} 357}
@@ -389,7 +388,10 @@ static void
389nve0_clock_prog_3(struct nve0_clock_priv *priv, int clk) 388nve0_clock_prog_3(struct nve0_clock_priv *priv, int clk)
390{ 389{
391 struct nve0_clock_info *info = &priv->eng[clk]; 390 struct nve0_clock_info *info = &priv->eng[clk];
392 nv_mask(priv, 0x137250 + (clk * 0x04), 0x00003f3f, info->mdiv); 391 if (info->ssel)
392 nv_mask(priv, 0x137250 + (clk * 0x04), 0x00003f00, info->mdiv);
393 else
394 nv_mask(priv, 0x137250 + (clk * 0x04), 0x0000003f, info->mdiv);
393} 395}
394 396
395static void 397static void
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c
index 84c7efbc4f38..1ad3ea503133 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c
@@ -262,8 +262,8 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
262 struct nve0_ram *ram = (void *)pfb->ram; 262 struct nve0_ram *ram = (void *)pfb->ram;
263 struct nve0_ramfuc *fuc = &ram->fuc; 263 struct nve0_ramfuc *fuc = &ram->fuc;
264 struct nouveau_ram_data *next = ram->base.next; 264 struct nouveau_ram_data *next = ram->base.next;
265 int vc = !(next->bios.ramcfg_11_02_08); 265 int vc = !next->bios.ramcfg_11_02_08;
266 int mv = !(next->bios.ramcfg_11_02_04); 266 int mv = !next->bios.ramcfg_11_02_04;
267 u32 mask, data; 267 u32 mask, data;
268 268
269 ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000); 269 ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000);
@@ -370,8 +370,8 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
370 } 370 }
371 } 371 }
372 372
373 if ( (next->bios.ramcfg_11_02_40) || 373 if (next->bios.ramcfg_11_02_40 ||
374 (next->bios.ramcfg_11_07_10)) { 374 next->bios.ramcfg_11_07_10) {
375 ram_mask(fuc, 0x132040, 0x00010000, 0x00010000); 375 ram_mask(fuc, 0x132040, 0x00010000, 0x00010000);
376 ram_nsec(fuc, 20000); 376 ram_nsec(fuc, 20000);
377 } 377 }
@@ -417,7 +417,7 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
417 ram_mask(fuc, 0x10f694, 0xff00ff00, data); 417 ram_mask(fuc, 0x10f694, 0xff00ff00, data);
418 } 418 }
419 419
420 if (ram->mode == 2 && (next->bios.ramcfg_11_08_10)) 420 if (ram->mode == 2 && next->bios.ramcfg_11_08_10)
421 data = 0x00000080; 421 data = 0x00000080;
422 else 422 else
423 data = 0x00000000; 423 data = 0x00000000;
@@ -425,13 +425,13 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
425 425
426 mask = 0x00070000; 426 mask = 0x00070000;
427 data = 0x00000000; 427 data = 0x00000000;
428 if (!(next->bios.ramcfg_11_02_80)) 428 if (!next->bios.ramcfg_11_02_80)
429 data |= 0x03000000; 429 data |= 0x03000000;
430 if (!(next->bios.ramcfg_11_02_40)) 430 if (!next->bios.ramcfg_11_02_40)
431 data |= 0x00002000; 431 data |= 0x00002000;
432 if (!(next->bios.ramcfg_11_07_10)) 432 if (!next->bios.ramcfg_11_07_10)
433 data |= 0x00004000; 433 data |= 0x00004000;
434 if (!(next->bios.ramcfg_11_07_08)) 434 if (!next->bios.ramcfg_11_07_08)
435 data |= 0x00000003; 435 data |= 0x00000003;
436 else 436 else
437 data |= 0x74000000; 437 data |= 0x74000000;
@@ -486,7 +486,7 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
486 486
487 data = mask = 0x00000000; 487 data = mask = 0x00000000;
488 if (NOTE00(ramcfg_02_03 != 0)) { 488 if (NOTE00(ramcfg_02_03 != 0)) {
489 data |= (next->bios.ramcfg_11_02_03) << 8; 489 data |= next->bios.ramcfg_11_02_03 << 8;
490 mask |= 0x00000300; 490 mask |= 0x00000300;
491 } 491 }
492 if (NOTE00(ramcfg_01_10)) { 492 if (NOTE00(ramcfg_01_10)) {
@@ -498,7 +498,7 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
498 498
499 data = mask = 0x00000000; 499 data = mask = 0x00000000;
500 if (NOTE00(timing_30_07 != 0)) { 500 if (NOTE00(timing_30_07 != 0)) {
501 data |= (next->bios.timing_20_30_07) << 28; 501 data |= next->bios.timing_20_30_07 << 28;
502 mask |= 0x70000000; 502 mask |= 0x70000000;
503 } 503 }
504 if (NOTE00(ramcfg_01_01)) { 504 if (NOTE00(ramcfg_01_01)) {
@@ -510,7 +510,7 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
510 510
511 data = mask = 0x00000000; 511 data = mask = 0x00000000;
512 if (NOTE00(timing_30_07 != 0)) { 512 if (NOTE00(timing_30_07 != 0)) {
513 data |= (next->bios.timing_20_30_07) << 28; 513 data |= next->bios.timing_20_30_07 << 28;
514 mask |= 0x70000000; 514 mask |= 0x70000000;
515 } 515 }
516 if (NOTE00(ramcfg_01_02)) { 516 if (NOTE00(ramcfg_01_02)) {
@@ -522,16 +522,16 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
522 522
523 mask = 0x33f00000; 523 mask = 0x33f00000;
524 data = 0x00000000; 524 data = 0x00000000;
525 if (!(next->bios.ramcfg_11_01_04)) 525 if (!next->bios.ramcfg_11_01_04)
526 data |= 0x20200000; 526 data |= 0x20200000;
527 if (!(next->bios.ramcfg_11_07_80)) 527 if (!next->bios.ramcfg_11_07_80)
528 data |= 0x12800000; 528 data |= 0x12800000;
529 /*XXX: see note above about there probably being some condition 529 /*XXX: see note above about there probably being some condition
530 * for the 10f824 stuff that uses ramcfg 3... 530 * for the 10f824 stuff that uses ramcfg 3...
531 */ 531 */
532 if ( (next->bios.ramcfg_11_03_f0)) { 532 if (next->bios.ramcfg_11_03_f0) {
533 if (next->bios.rammap_11_08_0c) { 533 if (next->bios.rammap_11_08_0c) {
534 if (!(next->bios.ramcfg_11_07_80)) 534 if (!next->bios.ramcfg_11_07_80)
535 mask |= 0x00000020; 535 mask |= 0x00000020;
536 else 536 else
537 data |= 0x00000020; 537 data |= 0x00000020;
@@ -563,7 +563,7 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
563 ram_wait(fuc, 0x100710, 0x80000000, 0x80000000, 200000); 563 ram_wait(fuc, 0x100710, 0x80000000, 0x80000000, 200000);
564 } 564 }
565 565
566 data = (next->bios.timing_20_30_07) << 8; 566 data = next->bios.timing_20_30_07 << 8;
567 if (next->bios.ramcfg_11_01_01) 567 if (next->bios.ramcfg_11_01_01)
568 data |= 0x80000000; 568 data |= 0x80000000;
569 ram_mask(fuc, 0x100778, 0x00000700, data); 569 ram_mask(fuc, 0x100778, 0x00000700, data);
@@ -588,7 +588,7 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
588 ram_wr32(fuc, 0x10f310, 0x00000001); /* REFRESH */ 588 ram_wr32(fuc, 0x10f310, 0x00000001); /* REFRESH */
589 ram_wr32(fuc, 0x10f210, 0x80000000); /* REFRESH_AUTO = 1 */ 589 ram_wr32(fuc, 0x10f210, 0x80000000); /* REFRESH_AUTO = 1 */
590 590
591 if ((next->bios.ramcfg_11_08_10) && (ram->mode == 2) /*XXX*/) { 591 if (next->bios.ramcfg_11_08_10 && (ram->mode == 2) /*XXX*/) {
592 u32 temp = ram_mask(fuc, 0x10f294, 0xff000000, 0x24000000); 592 u32 temp = ram_mask(fuc, 0x10f294, 0xff000000, 0x24000000);
593 nve0_ram_train(fuc, 0xbc0e0000, 0xa4010000); /*XXX*/ 593 nve0_ram_train(fuc, 0xbc0e0000, 0xa4010000); /*XXX*/
594 ram_nsec(fuc, 1000); 594 ram_nsec(fuc, 1000);
@@ -621,8 +621,8 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
621 data = ram_rd32(fuc, 0x10f978); 621 data = ram_rd32(fuc, 0x10f978);
622 data &= ~0x00046144; 622 data &= ~0x00046144;
623 data |= 0x0000000b; 623 data |= 0x0000000b;
624 if (!(next->bios.ramcfg_11_07_08)) { 624 if (!next->bios.ramcfg_11_07_08) {
625 if (!(next->bios.ramcfg_11_07_04)) 625 if (!next->bios.ramcfg_11_07_04)
626 data |= 0x0000200c; 626 data |= 0x0000200c;
627 else 627 else
628 data |= 0x00000000; 628 data |= 0x00000000;
@@ -636,11 +636,11 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
636 ram_wr32(fuc, 0x10f830, data); 636 ram_wr32(fuc, 0x10f830, data);
637 } 637 }
638 638
639 if (!(next->bios.ramcfg_11_07_08)) { 639 if (!next->bios.ramcfg_11_07_08) {
640 data = 0x88020000; 640 data = 0x88020000;
641 if ( (next->bios.ramcfg_11_07_04)) 641 if ( next->bios.ramcfg_11_07_04)
642 data |= 0x10000000; 642 data |= 0x10000000;
643 if (!(next->bios.rammap_11_08_10)) 643 if (!next->bios.rammap_11_08_10)
644 data |= 0x00080000; 644 data |= 0x00080000;
645 } else { 645 } else {
646 data = 0xa40e0000; 646 data = 0xa40e0000;
@@ -689,8 +689,8 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
689 const u32 runk0 = ram->fN1 << 16; 689 const u32 runk0 = ram->fN1 << 16;
690 const u32 runk1 = ram->fN1; 690 const u32 runk1 = ram->fN1;
691 struct nouveau_ram_data *next = ram->base.next; 691 struct nouveau_ram_data *next = ram->base.next;
692 int vc = !(next->bios.ramcfg_11_02_08); 692 int vc = !next->bios.ramcfg_11_02_08;
693 int mv = !(next->bios.ramcfg_11_02_04); 693 int mv = !next->bios.ramcfg_11_02_04;
694 u32 mask, data; 694 u32 mask, data;
695 695
696 ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000); 696 ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000);
@@ -705,7 +705,7 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
705 } 705 }
706 706
707 ram_mask(fuc, 0x10f200, 0x00000800, 0x00000000); 707 ram_mask(fuc, 0x10f200, 0x00000800, 0x00000000);
708 if ((next->bios.ramcfg_11_03_f0)) 708 if (next->bios.ramcfg_11_03_f0)
709 ram_mask(fuc, 0x10f808, 0x04000000, 0x04000000); 709 ram_mask(fuc, 0x10f808, 0x04000000, 0x04000000);
710 710
711 ram_wr32(fuc, 0x10f314, 0x00000001); /* PRECHARGE */ 711 ram_wr32(fuc, 0x10f314, 0x00000001); /* PRECHARGE */
@@ -761,7 +761,7 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
761 761
762 ram_mask(fuc, 0x1373f4, 0x00000000, 0x00010010); 762 ram_mask(fuc, 0x1373f4, 0x00000000, 0x00010010);
763 data = ram_rd32(fuc, 0x1373ec) & ~0x00030000; 763 data = ram_rd32(fuc, 0x1373ec) & ~0x00030000;
764 data |= (next->bios.ramcfg_11_03_30) << 12; 764 data |= next->bios.ramcfg_11_03_30 << 16;
765 ram_wr32(fuc, 0x1373ec, data); 765 ram_wr32(fuc, 0x1373ec, data);
766 ram_mask(fuc, 0x1373f4, 0x00000003, 0x00000000); 766 ram_mask(fuc, 0x1373f4, 0x00000003, 0x00000000);
767 ram_mask(fuc, 0x1373f4, 0x00000010, 0x00000000); 767 ram_mask(fuc, 0x1373f4, 0x00000010, 0x00000000);
@@ -793,8 +793,8 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
793 } 793 }
794 } 794 }
795 795
796 if ( (next->bios.ramcfg_11_02_40) || 796 if (next->bios.ramcfg_11_02_40 ||
797 (next->bios.ramcfg_11_07_10)) { 797 next->bios.ramcfg_11_07_10) {
798 ram_mask(fuc, 0x132040, 0x00010000, 0x00010000); 798 ram_mask(fuc, 0x132040, 0x00010000, 0x00010000);
799 ram_nsec(fuc, 20000); 799 ram_nsec(fuc, 20000);
800 } 800 }
@@ -810,13 +810,13 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
810 810
811 mask = 0x00010000; 811 mask = 0x00010000;
812 data = 0x00000000; 812 data = 0x00000000;
813 if (!(next->bios.ramcfg_11_02_80)) 813 if (!next->bios.ramcfg_11_02_80)
814 data |= 0x03000000; 814 data |= 0x03000000;
815 if (!(next->bios.ramcfg_11_02_40)) 815 if (!next->bios.ramcfg_11_02_40)
816 data |= 0x00002000; 816 data |= 0x00002000;
817 if (!(next->bios.ramcfg_11_07_10)) 817 if (!next->bios.ramcfg_11_07_10)
818 data |= 0x00004000; 818 data |= 0x00004000;
819 if (!(next->bios.ramcfg_11_07_08)) 819 if (!next->bios.ramcfg_11_07_08)
820 data |= 0x00000003; 820 data |= 0x00000003;
821 else 821 else
822 data |= 0x14000000; 822 data |= 0x14000000;
@@ -844,16 +844,16 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
844 844
845 mask = 0x33f00000; 845 mask = 0x33f00000;
846 data = 0x00000000; 846 data = 0x00000000;
847 if (!(next->bios.ramcfg_11_01_04)) 847 if (!next->bios.ramcfg_11_01_04)
848 data |= 0x20200000; 848 data |= 0x20200000;
849 if (!(next->bios.ramcfg_11_07_80)) 849 if (!next->bios.ramcfg_11_07_80)
850 data |= 0x12800000; 850 data |= 0x12800000;
851 /*XXX: see note above about there probably being some condition 851 /*XXX: see note above about there probably being some condition
852 * for the 10f824 stuff that uses ramcfg 3... 852 * for the 10f824 stuff that uses ramcfg 3...
853 */ 853 */
854 if ( (next->bios.ramcfg_11_03_f0)) { 854 if (next->bios.ramcfg_11_03_f0) {
855 if (next->bios.rammap_11_08_0c) { 855 if (next->bios.rammap_11_08_0c) {
856 if (!(next->bios.ramcfg_11_07_80)) 856 if (!next->bios.ramcfg_11_07_80)
857 mask |= 0x00000020; 857 mask |= 0x00000020;
858 else 858 else
859 data |= 0x00000020; 859 data |= 0x00000020;
@@ -876,7 +876,7 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
876 data = next->bios.timing_20_2c_1fc0; 876 data = next->bios.timing_20_2c_1fc0;
877 ram_mask(fuc, 0x10f24c, 0x7f000000, data << 24); 877 ram_mask(fuc, 0x10f24c, 0x7f000000, data << 24);
878 878
879 ram_mask(fuc, 0x10f224, 0x001f0000, next->bios.timing_20_30_f8); 879 ram_mask(fuc, 0x10f224, 0x001f0000, next->bios.timing_20_30_f8 << 16);
880 880
881 ram_wr32(fuc, 0x10f090, 0x4000007f); 881 ram_wr32(fuc, 0x10f090, 0x4000007f);
882 ram_nsec(fuc, 1000); 882 ram_nsec(fuc, 1000);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/gf117.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/gf117.c
new file mode 100644
index 000000000000..fa891c39866b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/gf117.c
@@ -0,0 +1,39 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "nv50.h"
26
27struct nouveau_oclass *
28gf117_i2c_oclass = &(struct nouveau_i2c_impl) {
29 .base.handle = NV_SUBDEV(I2C, 0xd7),
30 .base.ofuncs = &(struct nouveau_ofuncs) {
31 .ctor = _nouveau_i2c_ctor,
32 .dtor = _nouveau_i2c_dtor,
33 .init = _nouveau_i2c_init,
34 .fini = _nouveau_i2c_fini,
35 },
36 .sclass = nvd0_i2c_sclass,
37 .pad_x = &nv04_i2c_pad_oclass,
38 .pad_s = &nv04_i2c_pad_oclass,
39}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ibus/nve0.c b/drivers/gpu/drm/nouveau/core/subdev/ibus/nve0.c
index 7120124dceac..ebef970a0645 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/ibus/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/ibus/nve0.c
@@ -95,6 +95,23 @@ nve0_ibus_intr(struct nouveau_subdev *subdev)
95} 95}
96 96
97static int 97static int
98nve0_ibus_init(struct nouveau_object *object)
99{
100 struct nve0_ibus_priv *priv = (void *)object;
101 int ret = nouveau_ibus_init(&priv->base);
102 if (ret == 0) {
103 nv_mask(priv, 0x122318, 0x0003ffff, 0x00001000);
104 nv_mask(priv, 0x12231c, 0x0003ffff, 0x00000200);
105 nv_mask(priv, 0x122310, 0x0003ffff, 0x00000800);
106 nv_mask(priv, 0x122348, 0x0003ffff, 0x00000100);
107 nv_mask(priv, 0x1223b0, 0x0003ffff, 0x00000fff);
108 nv_mask(priv, 0x122348, 0x0003ffff, 0x00000200);
109 nv_mask(priv, 0x122358, 0x0003ffff, 0x00002880);
110 }
111 return ret;
112}
113
114static int
98nve0_ibus_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 115nve0_ibus_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
99 struct nouveau_oclass *oclass, void *data, u32 size, 116 struct nouveau_oclass *oclass, void *data, u32 size,
100 struct nouveau_object **pobject) 117 struct nouveau_object **pobject)
@@ -117,7 +134,7 @@ nve0_ibus_oclass = {
117 .ofuncs = &(struct nouveau_ofuncs) { 134 .ofuncs = &(struct nouveau_ofuncs) {
118 .ctor = nve0_ibus_ctor, 135 .ctor = nve0_ibus_ctor,
119 .dtor = _nouveau_ibus_dtor, 136 .dtor = _nouveau_ibus_dtor,
120 .init = _nouveau_ibus_init, 137 .init = nve0_ibus_init,
121 .fini = _nouveau_ibus_fini, 138 .fini = _nouveau_ibus_fini,
122 }, 139 },
123}; 140};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/host.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/host.fuc
index 2284ecb1c9b8..c2bb616a8da5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/host.fuc
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/host.fuc
@@ -83,7 +83,7 @@ host_send:
83 // increment GET 83 // increment GET
84 add b32 $r1 0x1 84 add b32 $r1 0x1
85 and $r14 $r1 #fifo_qmaskf 85 and $r14 $r1 #fifo_qmaskf
86 nv_iowr(NV_PPWR_FIFO_GET(0), $r1) 86 nv_iowr(NV_PPWR_FIFO_GET(0), $r14)
87 bra #host_send 87 bra #host_send
88 host_send_done: 88 host_send_done:
89 ret 89 ret
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h
index 4bd43a99fdcc..39a5dc150a05 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h
@@ -1018,7 +1018,7 @@ uint32_t nv108_pwr_code[] = {
1018 0xb600023f, 1018 0xb600023f,
1019 0x1ec40110, 1019 0x1ec40110,
1020 0x04b0400f, 1020 0x04b0400f,
1021 0xbd0001f6, 1021 0xbd000ef6,
1022 0xc70ef404, 1022 0xc70ef404,
1023/* 0x0328: host_send_done */ 1023/* 0x0328: host_send_done */
1024/* 0x032a: host_recv */ 1024/* 0x032a: host_recv */
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h
index 5a73fa620978..254205cd5166 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h
@@ -1124,7 +1124,7 @@ uint32_t nva3_pwr_code[] = {
1124 0x0f1ec401, 1124 0x0f1ec401,
1125 0x04b007f1, 1125 0x04b007f1,
1126 0xd00604b6, 1126 0xd00604b6,
1127 0x04bd0001, 1127 0x04bd000e,
1128/* 0x03cb: host_send_done */ 1128/* 0x03cb: host_send_done */
1129 0xf8ba0ef4, 1129 0xf8ba0ef4,
1130/* 0x03cd: host_recv */ 1130/* 0x03cd: host_recv */
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h
index 4dba00d2dd1a..7ac87405d01b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h
@@ -1124,7 +1124,7 @@ uint32_t nvc0_pwr_code[] = {
1124 0x0f1ec401, 1124 0x0f1ec401,
1125 0x04b007f1, 1125 0x04b007f1,
1126 0xd00604b6, 1126 0xd00604b6,
1127 0x04bd0001, 1127 0x04bd000e,
1128/* 0x03cb: host_send_done */ 1128/* 0x03cb: host_send_done */
1129 0xf8ba0ef4, 1129 0xf8ba0ef4,
1130/* 0x03cd: host_recv */ 1130/* 0x03cd: host_recv */
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h
index 5e24c6bc041d..cd9ff1a73284 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h
@@ -1033,7 +1033,7 @@ uint32_t nvd0_pwr_code[] = {
1033 0xb6026b21, 1033 0xb6026b21,
1034 0x1ec40110, 1034 0x1ec40110,
1035 0xb007f10f, 1035 0xb007f10f,
1036 0x0001d004, 1036 0x000ed004,
1037 0x0ef404bd, 1037 0x0ef404bd,
1038/* 0x0365: host_send_done */ 1038/* 0x0365: host_send_done */
1039/* 0x0367: host_recv */ 1039/* 0x0367: host_recv */
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 26b5647188ef..47ad74255bf1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -736,6 +736,9 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
736 fb->bits_per_pixel, fb->pitches[0], crtc->x, crtc->y, 736 fb->bits_per_pixel, fb->pitches[0], crtc->x, crtc->y,
737 new_bo->bo.offset }; 737 new_bo->bo.offset };
738 738
739 /* Keep vblanks on during flip, for the target crtc of this flip */
740 drm_vblank_get(dev, nouveau_crtc(crtc)->index);
741
739 /* Emit a page flip */ 742 /* Emit a page flip */
740 if (nv_device(drm->device)->card_type >= NV_50) { 743 if (nv_device(drm->device)->card_type >= NV_50) {
741 ret = nv50_display_flip_next(crtc, fb, chan, swap_interval); 744 ret = nv50_display_flip_next(crtc, fb, chan, swap_interval);
@@ -779,6 +782,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
779 return 0; 782 return 0;
780 783
781fail_unreserve: 784fail_unreserve:
785 drm_vblank_put(dev, nouveau_crtc(crtc)->index);
782 ttm_bo_unreserve(&old_bo->bo); 786 ttm_bo_unreserve(&old_bo->bo);
783fail_unpin: 787fail_unpin:
784 mutex_unlock(&chan->cli->mutex); 788 mutex_unlock(&chan->cli->mutex);
@@ -817,6 +821,9 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
817 drm_send_vblank_event(dev, crtcid, s->event); 821 drm_send_vblank_event(dev, crtcid, s->event);
818 } 822 }
819 823
824 /* Give up ownership of vblank for page-flipped crtc */
825 drm_vblank_put(dev, s->crtc);
826
820 list_del(&s->head); 827 list_del(&s->head);
821 if (ps) 828 if (ps)
822 *ps = *s; 829 *ps = *s;
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 26c12a3fe430..a03c73411a56 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1052,7 +1052,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
1052 int encoder_mode = atombios_get_encoder_mode(radeon_crtc->encoder); 1052 int encoder_mode = atombios_get_encoder_mode(radeon_crtc->encoder);
1053 1053
1054 /* pass the actual clock to atombios_crtc_program_pll for DCE5,6 for HDMI */ 1054 /* pass the actual clock to atombios_crtc_program_pll for DCE5,6 for HDMI */
1055 if (ASIC_IS_DCE5(rdev) && !ASIC_IS_DCE8(rdev) && 1055 if (ASIC_IS_DCE5(rdev) &&
1056 (encoder_mode == ATOM_ENCODER_MODE_HDMI) && 1056 (encoder_mode == ATOM_ENCODER_MODE_HDMI) &&
1057 (radeon_crtc->bpc > 8)) 1057 (radeon_crtc->bpc > 8))
1058 clock = radeon_crtc->adjusted_clock; 1058 clock = radeon_crtc->adjusted_clock;
@@ -1136,6 +1136,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
1136 u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE); 1136 u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE);
1137 u32 tmp, viewport_w, viewport_h; 1137 u32 tmp, viewport_w, viewport_h;
1138 int r; 1138 int r;
1139 bool bypass_lut = false;
1139 1140
1140 /* no fb bound */ 1141 /* no fb bound */
1141 if (!atomic && !crtc->primary->fb) { 1142 if (!atomic && !crtc->primary->fb) {
@@ -1174,33 +1175,73 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
1174 radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL); 1175 radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
1175 radeon_bo_unreserve(rbo); 1176 radeon_bo_unreserve(rbo);
1176 1177
1177 switch (target_fb->bits_per_pixel) { 1178 switch (target_fb->pixel_format) {
1178 case 8: 1179 case DRM_FORMAT_C8:
1179 fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_8BPP) | 1180 fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_8BPP) |
1180 EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_INDEXED)); 1181 EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_INDEXED));
1181 break; 1182 break;
1182 case 15: 1183 case DRM_FORMAT_XRGB4444:
1184 case DRM_FORMAT_ARGB4444:
1185 fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
1186 EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB4444));
1187#ifdef __BIG_ENDIAN
1188 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
1189#endif
1190 break;
1191 case DRM_FORMAT_XRGB1555:
1192 case DRM_FORMAT_ARGB1555:
1183 fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) | 1193 fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
1184 EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB1555)); 1194 EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB1555));
1195#ifdef __BIG_ENDIAN
1196 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
1197#endif
1198 break;
1199 case DRM_FORMAT_BGRX5551:
1200 case DRM_FORMAT_BGRA5551:
1201 fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
1202 EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_BGRA5551));
1203#ifdef __BIG_ENDIAN
1204 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
1205#endif
1185 break; 1206 break;
1186 case 16: 1207 case DRM_FORMAT_RGB565:
1187 fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) | 1208 fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
1188 EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB565)); 1209 EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB565));
1189#ifdef __BIG_ENDIAN 1210#ifdef __BIG_ENDIAN
1190 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16); 1211 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
1191#endif 1212#endif
1192 break; 1213 break;
1193 case 24: 1214 case DRM_FORMAT_XRGB8888:
1194 case 32: 1215 case DRM_FORMAT_ARGB8888:
1195 fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) | 1216 fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
1196 EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB8888)); 1217 EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB8888));
1197#ifdef __BIG_ENDIAN 1218#ifdef __BIG_ENDIAN
1198 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32); 1219 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
1199#endif 1220#endif
1200 break; 1221 break;
1222 case DRM_FORMAT_XRGB2101010:
1223 case DRM_FORMAT_ARGB2101010:
1224 fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
1225 EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB2101010));
1226#ifdef __BIG_ENDIAN
1227 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
1228#endif
1229 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1230 bypass_lut = true;
1231 break;
1232 case DRM_FORMAT_BGRX1010102:
1233 case DRM_FORMAT_BGRA1010102:
1234 fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
1235 EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_BGRA1010102));
1236#ifdef __BIG_ENDIAN
1237 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
1238#endif
1239 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1240 bypass_lut = true;
1241 break;
1201 default: 1242 default:
1202 DRM_ERROR("Unsupported screen depth %d\n", 1243 DRM_ERROR("Unsupported screen format %s\n",
1203 target_fb->bits_per_pixel); 1244 drm_get_format_name(target_fb->pixel_format));
1204 return -EINVAL; 1245 return -EINVAL;
1205 } 1246 }
1206 1247
@@ -1329,6 +1370,18 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
1329 WREG32(EVERGREEN_GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format); 1370 WREG32(EVERGREEN_GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format);
1330 WREG32(EVERGREEN_GRPH_SWAP_CONTROL + radeon_crtc->crtc_offset, fb_swap); 1371 WREG32(EVERGREEN_GRPH_SWAP_CONTROL + radeon_crtc->crtc_offset, fb_swap);
1331 1372
1373 /*
1374 * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
1375 * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
1376 * retain the full precision throughout the pipeline.
1377 */
1378 WREG32_P(EVERGREEN_GRPH_LUT_10BIT_BYPASS_CONTROL + radeon_crtc->crtc_offset,
1379 (bypass_lut ? EVERGREEN_LUT_10BIT_BYPASS_EN : 0),
1380 ~EVERGREEN_LUT_10BIT_BYPASS_EN);
1381
1382 if (bypass_lut)
1383 DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
1384
1332 WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0); 1385 WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0);
1333 WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0); 1386 WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0);
1334 WREG32(EVERGREEN_GRPH_X_START + radeon_crtc->crtc_offset, 0); 1387 WREG32(EVERGREEN_GRPH_X_START + radeon_crtc->crtc_offset, 0);
@@ -1396,6 +1449,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
1396 u32 fb_swap = R600_D1GRPH_SWAP_ENDIAN_NONE; 1449 u32 fb_swap = R600_D1GRPH_SWAP_ENDIAN_NONE;
1397 u32 tmp, viewport_w, viewport_h; 1450 u32 tmp, viewport_w, viewport_h;
1398 int r; 1451 int r;
1452 bool bypass_lut = false;
1399 1453
1400 /* no fb bound */ 1454 /* no fb bound */
1401 if (!atomic && !crtc->primary->fb) { 1455 if (!atomic && !crtc->primary->fb) {
@@ -1433,18 +1487,30 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
1433 radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL); 1487 radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
1434 radeon_bo_unreserve(rbo); 1488 radeon_bo_unreserve(rbo);
1435 1489
1436 switch (target_fb->bits_per_pixel) { 1490 switch (target_fb->pixel_format) {
1437 case 8: 1491 case DRM_FORMAT_C8:
1438 fb_format = 1492 fb_format =
1439 AVIVO_D1GRPH_CONTROL_DEPTH_8BPP | 1493 AVIVO_D1GRPH_CONTROL_DEPTH_8BPP |
1440 AVIVO_D1GRPH_CONTROL_8BPP_INDEXED; 1494 AVIVO_D1GRPH_CONTROL_8BPP_INDEXED;
1441 break; 1495 break;
1442 case 15: 1496 case DRM_FORMAT_XRGB4444:
1497 case DRM_FORMAT_ARGB4444:
1498 fb_format =
1499 AVIVO_D1GRPH_CONTROL_DEPTH_16BPP |
1500 AVIVO_D1GRPH_CONTROL_16BPP_ARGB4444;
1501#ifdef __BIG_ENDIAN
1502 fb_swap = R600_D1GRPH_SWAP_ENDIAN_16BIT;
1503#endif
1504 break;
1505 case DRM_FORMAT_XRGB1555:
1443 fb_format = 1506 fb_format =
1444 AVIVO_D1GRPH_CONTROL_DEPTH_16BPP | 1507 AVIVO_D1GRPH_CONTROL_DEPTH_16BPP |
1445 AVIVO_D1GRPH_CONTROL_16BPP_ARGB1555; 1508 AVIVO_D1GRPH_CONTROL_16BPP_ARGB1555;
1509#ifdef __BIG_ENDIAN
1510 fb_swap = R600_D1GRPH_SWAP_ENDIAN_16BIT;
1511#endif
1446 break; 1512 break;
1447 case 16: 1513 case DRM_FORMAT_RGB565:
1448 fb_format = 1514 fb_format =
1449 AVIVO_D1GRPH_CONTROL_DEPTH_16BPP | 1515 AVIVO_D1GRPH_CONTROL_DEPTH_16BPP |
1450 AVIVO_D1GRPH_CONTROL_16BPP_RGB565; 1516 AVIVO_D1GRPH_CONTROL_16BPP_RGB565;
@@ -1452,8 +1518,8 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
1452 fb_swap = R600_D1GRPH_SWAP_ENDIAN_16BIT; 1518 fb_swap = R600_D1GRPH_SWAP_ENDIAN_16BIT;
1453#endif 1519#endif
1454 break; 1520 break;
1455 case 24: 1521 case DRM_FORMAT_XRGB8888:
1456 case 32: 1522 case DRM_FORMAT_ARGB8888:
1457 fb_format = 1523 fb_format =
1458 AVIVO_D1GRPH_CONTROL_DEPTH_32BPP | 1524 AVIVO_D1GRPH_CONTROL_DEPTH_32BPP |
1459 AVIVO_D1GRPH_CONTROL_32BPP_ARGB8888; 1525 AVIVO_D1GRPH_CONTROL_32BPP_ARGB8888;
@@ -1461,9 +1527,20 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
1461 fb_swap = R600_D1GRPH_SWAP_ENDIAN_32BIT; 1527 fb_swap = R600_D1GRPH_SWAP_ENDIAN_32BIT;
1462#endif 1528#endif
1463 break; 1529 break;
1530 case DRM_FORMAT_XRGB2101010:
1531 case DRM_FORMAT_ARGB2101010:
1532 fb_format =
1533 AVIVO_D1GRPH_CONTROL_DEPTH_32BPP |
1534 AVIVO_D1GRPH_CONTROL_32BPP_ARGB2101010;
1535#ifdef __BIG_ENDIAN
1536 fb_swap = R600_D1GRPH_SWAP_ENDIAN_32BIT;
1537#endif
1538 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1539 bypass_lut = true;
1540 break;
1464 default: 1541 default:
1465 DRM_ERROR("Unsupported screen depth %d\n", 1542 DRM_ERROR("Unsupported screen format %s\n",
1466 target_fb->bits_per_pixel); 1543 drm_get_format_name(target_fb->pixel_format));
1467 return -EINVAL; 1544 return -EINVAL;
1468 } 1545 }
1469 1546
@@ -1502,6 +1579,13 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
1502 if (rdev->family >= CHIP_R600) 1579 if (rdev->family >= CHIP_R600)
1503 WREG32(R600_D1GRPH_SWAP_CONTROL + radeon_crtc->crtc_offset, fb_swap); 1580 WREG32(R600_D1GRPH_SWAP_CONTROL + radeon_crtc->crtc_offset, fb_swap);
1504 1581
1582 /* LUT only has 256 slots for 8 bpc fb. Bypass for > 8 bpc scanout for precision */
1583 WREG32_P(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset,
1584 (bypass_lut ? AVIVO_LUT_10BIT_BYPASS_EN : 0), ~AVIVO_LUT_10BIT_BYPASS_EN);
1585
1586 if (bypass_lut)
1587 DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
1588
1505 WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0); 1589 WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0);
1506 WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0); 1590 WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0);
1507 WREG32(AVIVO_D1GRPH_X_START + radeon_crtc->crtc_offset, 0); 1591 WREG32(AVIVO_D1GRPH_X_START + radeon_crtc->crtc_offset, 0);
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index a0f63ff5a5e9..333d143fca2c 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -116,6 +116,8 @@
116# define EVERGREEN_GRPH_ARRAY_LINEAR_ALIGNED 1 116# define EVERGREEN_GRPH_ARRAY_LINEAR_ALIGNED 1
117# define EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1 2 117# define EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1 2
118# define EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1 4 118# define EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1 4
119#define EVERGREEN_GRPH_LUT_10BIT_BYPASS_CONTROL 0x6808
120# define EVERGREEN_LUT_10BIT_BYPASS_EN (1 << 8)
119#define EVERGREEN_GRPH_SWAP_CONTROL 0x680c 121#define EVERGREEN_GRPH_SWAP_CONTROL 0x680c
120# define EVERGREEN_GRPH_ENDIAN_SWAP(x) (((x) & 0x3) << 0) 122# define EVERGREEN_GRPH_ENDIAN_SWAP(x) (((x) & 0x3) << 0)
121# define EVERGREEN_GRPH_ENDIAN_NONE 0 123# define EVERGREEN_GRPH_ENDIAN_NONE 0
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
index 1dd0d32993d5..136b7bc7cd20 100644
--- a/drivers/gpu/drm/radeon/r500_reg.h
+++ b/drivers/gpu/drm/radeon/r500_reg.h
@@ -402,6 +402,7 @@
402 * block and vice versa. This applies to GRPH, CUR, etc. 402 * block and vice versa. This applies to GRPH, CUR, etc.
403 */ 403 */
404#define AVIVO_D1GRPH_LUT_SEL 0x6108 404#define AVIVO_D1GRPH_LUT_SEL 0x6108
405# define AVIVO_LUT_10BIT_BYPASS_EN (1 << 8)
405#define AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110 406#define AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110
406#define R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6914 407#define R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6914
407#define R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6114 408#define R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6114
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 933c5c39654d..1b9177ed181f 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -1288,17 +1288,15 @@ static int radeon_dvi_mode_valid(struct drm_connector *connector,
1288 (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D) || 1288 (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D) ||
1289 (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_B)) 1289 (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_B))
1290 return MODE_OK; 1290 return MODE_OK;
1291 else if (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_A) { 1291 else if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) {
1292 if (ASIC_IS_DCE6(rdev)) { 1292 /* HDMI 1.3+ supports max clock of 340 Mhz */
1293 /* HDMI 1.3+ supports max clock of 340 Mhz */ 1293 if (mode->clock > 340000)
1294 if (mode->clock > 340000)
1295 return MODE_CLOCK_HIGH;
1296 else
1297 return MODE_OK;
1298 } else
1299 return MODE_CLOCK_HIGH; 1294 return MODE_CLOCK_HIGH;
1300 } else 1295 else
1296 return MODE_OK;
1297 } else {
1301 return MODE_CLOCK_HIGH; 1298 return MODE_CLOCK_HIGH;
1299 }
1302 } 1300 }
1303 1301
1304 /* check against the max pixel clock */ 1302 /* check against the max pixel clock */
@@ -1549,6 +1547,8 @@ out:
1549static int radeon_dp_mode_valid(struct drm_connector *connector, 1547static int radeon_dp_mode_valid(struct drm_connector *connector,
1550 struct drm_display_mode *mode) 1548 struct drm_display_mode *mode)
1551{ 1549{
1550 struct drm_device *dev = connector->dev;
1551 struct radeon_device *rdev = dev->dev_private;
1552 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 1552 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
1553 struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv; 1553 struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
1554 1554
@@ -1579,14 +1579,23 @@ static int radeon_dp_mode_valid(struct drm_connector *connector,
1579 return MODE_PANEL; 1579 return MODE_PANEL;
1580 } 1580 }
1581 } 1581 }
1582 return MODE_OK;
1583 } else { 1582 } else {
1584 if ((radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || 1583 if ((radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
1585 (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) 1584 (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
1586 return radeon_dp_mode_valid_helper(connector, mode); 1585 return radeon_dp_mode_valid_helper(connector, mode);
1587 else 1586 } else {
1588 return MODE_OK; 1587 if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) {
1588 /* HDMI 1.3+ supports max clock of 340 Mhz */
1589 if (mode->clock > 340000)
1590 return MODE_CLOCK_HIGH;
1591 } else {
1592 if (mode->clock > 165000)
1593 return MODE_CLOCK_HIGH;
1594 }
1595 }
1589 } 1596 }
1597
1598 return MODE_OK;
1590} 1599}
1591 1600
1592static const struct drm_connector_helper_funcs radeon_dp_connector_helper_funcs = { 1601static const struct drm_connector_helper_funcs radeon_dp_connector_helper_funcs = {
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 5ed617056b9c..8fc362aa6a1a 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -66,7 +66,8 @@ static void avivo_crtc_load_lut(struct drm_crtc *crtc)
66 (radeon_crtc->lut_b[i] << 0)); 66 (radeon_crtc->lut_b[i] << 0));
67 } 67 }
68 68
69 WREG32(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id); 69 /* Only change bit 0 of LUT_SEL, other bits are set elsewhere */
70 WREG32_P(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id, ~1);
70} 71}
71 72
72static void dce4_crtc_load_lut(struct drm_crtc *crtc) 73static void dce4_crtc_load_lut(struct drm_crtc *crtc)
@@ -357,8 +358,9 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
357 358
358 spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); 359 spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
359 360
361 drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id);
360 radeon_fence_unref(&work->fence); 362 radeon_fence_unref(&work->fence);
361 radeon_irq_kms_pflip_irq_get(rdev, work->crtc_id); 363 radeon_irq_kms_pflip_irq_put(rdev, work->crtc_id);
362 queue_work(radeon_crtc->flip_queue, &work->unpin_work); 364 queue_work(radeon_crtc->flip_queue, &work->unpin_work);
363} 365}
364 366
@@ -459,6 +461,12 @@ static void radeon_flip_work_func(struct work_struct *__work)
459 base &= ~7; 461 base &= ~7;
460 } 462 }
461 463
464 r = drm_vblank_get(crtc->dev, radeon_crtc->crtc_id);
465 if (r) {
466 DRM_ERROR("failed to get vblank before flip\n");
467 goto pflip_cleanup;
468 }
469
462 /* We borrow the event spin lock for protecting flip_work */ 470 /* We borrow the event spin lock for protecting flip_work */
463 spin_lock_irqsave(&crtc->dev->event_lock, flags); 471 spin_lock_irqsave(&crtc->dev->event_lock, flags);
464 472
@@ -473,6 +481,16 @@ static void radeon_flip_work_func(struct work_struct *__work)
473 481
474 return; 482 return;
475 483
484pflip_cleanup:
485 if (unlikely(radeon_bo_reserve(work->new_rbo, false) != 0)) {
486 DRM_ERROR("failed to reserve new rbo in error path\n");
487 goto cleanup;
488 }
489 if (unlikely(radeon_bo_unpin(work->new_rbo) != 0)) {
490 DRM_ERROR("failed to unpin new rbo in error path\n");
491 }
492 radeon_bo_unreserve(work->new_rbo);
493
476cleanup: 494cleanup:
477 drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); 495 drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
478 radeon_fence_unref(&work->fence); 496 radeon_fence_unref(&work->fence);
diff --git a/drivers/hsi/clients/Kconfig b/drivers/hsi/clients/Kconfig
index 71b9f9ab86e4..bc60dec3f586 100644
--- a/drivers/hsi/clients/Kconfig
+++ b/drivers/hsi/clients/Kconfig
@@ -15,7 +15,7 @@ config NOKIA_MODEM
15 15
16config SSI_PROTOCOL 16config SSI_PROTOCOL
17 tristate "SSI protocol" 17 tristate "SSI protocol"
18 depends on HSI && PHONET && (OMAP_SSI=y || OMAP_SSI=m) 18 depends on HSI && PHONET && OMAP_SSI
19 help 19 help
20 If you say Y here, you will enable the SSI protocol aka McSAAB. 20 If you say Y here, you will enable the SSI protocol aka McSAAB.
21 21
diff --git a/drivers/hsi/controllers/omap_ssi_port.c b/drivers/hsi/controllers/omap_ssi_port.c
index b8693f0b27fe..29aea0b93360 100644
--- a/drivers/hsi/controllers/omap_ssi_port.c
+++ b/drivers/hsi/controllers/omap_ssi_port.c
@@ -1116,8 +1116,7 @@ static int __init ssi_port_probe(struct platform_device *pd)
1116 1116
1117 dev_dbg(&pd->dev, "init ssi port...\n"); 1117 dev_dbg(&pd->dev, "init ssi port...\n");
1118 1118
1119 err = ref_module(THIS_MODULE, ssi->owner); 1119 if (!try_module_get(ssi->owner)) {
1120 if (err) {
1121 dev_err(&pd->dev, "could not increment parent module refcount (err=%d)\n", 1120 dev_err(&pd->dev, "could not increment parent module refcount (err=%d)\n",
1122 err); 1121 err);
1123 return -ENODEV; 1122 return -ENODEV;
@@ -1254,6 +1253,7 @@ static int __exit ssi_port_remove(struct platform_device *pd)
1254 1253
1255 omap_ssi->port[omap_port->port_id] = NULL; 1254 omap_ssi->port[omap_port->port_id] = NULL;
1256 platform_set_drvdata(pd, NULL); 1255 platform_set_drvdata(pd, NULL);
1256 module_put(ssi->owner);
1257 pm_runtime_disable(&pd->dev); 1257 pm_runtime_disable(&pd->dev);
1258 1258
1259 return 0; 1259 return 0;
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 00343166feb1..08531a128f53 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -1124,6 +1124,16 @@ config SENSORS_SHT21
1124 This driver can also be built as a module. If so, the module 1124 This driver can also be built as a module. If so, the module
1125 will be called sht21. 1125 will be called sht21.
1126 1126
1127config SENSORS_SHTC1
1128 tristate "Sensiron humidity and temperature sensors. SHTC1 and compat."
1129 depends on I2C
1130 help
1131 If you say yes here you get support for the Sensiron SHTC1 and SHTW1
1132 humidity and temperature sensors.
1133
1134 This driver can also be built as a module. If so, the module
1135 will be called shtc1.
1136
1127config SENSORS_S3C 1137config SENSORS_S3C
1128 tristate "Samsung built-in ADC" 1138 tristate "Samsung built-in ADC"
1129 depends on S3C_ADC 1139 depends on S3C_ADC
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 11798ad7e801..3dc0f02f71d2 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -126,6 +126,7 @@ obj-$(CONFIG_SENSORS_SCH5627) += sch5627.o
126obj-$(CONFIG_SENSORS_SCH5636) += sch5636.o 126obj-$(CONFIG_SENSORS_SCH5636) += sch5636.o
127obj-$(CONFIG_SENSORS_SHT15) += sht15.o 127obj-$(CONFIG_SENSORS_SHT15) += sht15.o
128obj-$(CONFIG_SENSORS_SHT21) += sht21.o 128obj-$(CONFIG_SENSORS_SHT21) += sht21.o
129obj-$(CONFIG_SENSORS_SHTC1) += shtc1.o
129obj-$(CONFIG_SENSORS_SIS5595) += sis5595.o 130obj-$(CONFIG_SENSORS_SIS5595) += sis5595.o
130obj-$(CONFIG_SENSORS_SMM665) += smm665.o 131obj-$(CONFIG_SENSORS_SMM665) += smm665.o
131obj-$(CONFIG_SENSORS_SMSC47B397)+= smsc47b397.o 132obj-$(CONFIG_SENSORS_SMSC47B397)+= smsc47b397.o
diff --git a/drivers/hwmon/atxp1.c b/drivers/hwmon/atxp1.c
index 6edce42c61d5..2ae8a304b5ef 100644
--- a/drivers/hwmon/atxp1.c
+++ b/drivers/hwmon/atxp1.c
@@ -45,30 +45,6 @@ MODULE_AUTHOR("Sebastian Witt <se.witt@gmx.net>");
45 45
46static const unsigned short normal_i2c[] = { 0x37, 0x4e, I2C_CLIENT_END }; 46static const unsigned short normal_i2c[] = { 0x37, 0x4e, I2C_CLIENT_END };
47 47
48static int atxp1_probe(struct i2c_client *client,
49 const struct i2c_device_id *id);
50static int atxp1_remove(struct i2c_client *client);
51static struct atxp1_data *atxp1_update_device(struct device *dev);
52static int atxp1_detect(struct i2c_client *client, struct i2c_board_info *info);
53
54static const struct i2c_device_id atxp1_id[] = {
55 { "atxp1", 0 },
56 { }
57};
58MODULE_DEVICE_TABLE(i2c, atxp1_id);
59
60static struct i2c_driver atxp1_driver = {
61 .class = I2C_CLASS_HWMON,
62 .driver = {
63 .name = "atxp1",
64 },
65 .probe = atxp1_probe,
66 .remove = atxp1_remove,
67 .id_table = atxp1_id,
68 .detect = atxp1_detect,
69 .address_list = normal_i2c,
70};
71
72struct atxp1_data { 48struct atxp1_data {
73 struct device *hwmon_dev; 49 struct device *hwmon_dev;
74 struct mutex update_lock; 50 struct mutex update_lock;
@@ -386,4 +362,22 @@ static int atxp1_remove(struct i2c_client *client)
386 return 0; 362 return 0;
387}; 363};
388 364
365static const struct i2c_device_id atxp1_id[] = {
366 { "atxp1", 0 },
367 { }
368};
369MODULE_DEVICE_TABLE(i2c, atxp1_id);
370
371static struct i2c_driver atxp1_driver = {
372 .class = I2C_CLASS_HWMON,
373 .driver = {
374 .name = "atxp1",
375 },
376 .probe = atxp1_probe,
377 .remove = atxp1_remove,
378 .id_table = atxp1_id,
379 .detect = atxp1_detect,
380 .address_list = normal_i2c,
381};
382
389module_i2c_driver(atxp1_driver); 383module_i2c_driver(atxp1_driver);
diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
index 93d26e8af3e2..bfd3f3eeabcd 100644
--- a/drivers/hwmon/ina2xx.c
+++ b/drivers/hwmon/ina2xx.c
@@ -148,7 +148,8 @@ static int ina2xx_get_value(struct ina2xx_data *data, u8 reg)
148 148
149 switch (reg) { 149 switch (reg) {
150 case INA2XX_SHUNT_VOLTAGE: 150 case INA2XX_SHUNT_VOLTAGE:
151 val = DIV_ROUND_CLOSEST(data->regs[reg], 151 /* signed register */
152 val = DIV_ROUND_CLOSEST((s16)data->regs[reg],
152 data->config->shunt_div); 153 data->config->shunt_div);
153 break; 154 break;
154 case INA2XX_BUS_VOLTAGE: 155 case INA2XX_BUS_VOLTAGE:
@@ -160,8 +161,8 @@ static int ina2xx_get_value(struct ina2xx_data *data, u8 reg)
160 val = data->regs[reg] * data->config->power_lsb; 161 val = data->regs[reg] * data->config->power_lsb;
161 break; 162 break;
162 case INA2XX_CURRENT: 163 case INA2XX_CURRENT:
163 /* LSB=1mA (selected). Is in mA */ 164 /* signed register, LSB=1mA (selected), in mA */
164 val = data->regs[reg]; 165 val = (s16)data->regs[reg];
165 break; 166 break;
166 default: 167 default:
167 /* programmer goofed */ 168 /* programmer goofed */
diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c
index bed4af358308..b0129a54e1a6 100644
--- a/drivers/hwmon/lm85.c
+++ b/drivers/hwmon/lm85.c
@@ -5,7 +5,7 @@
5 * Copyright (c) 2002, 2003 Philip Pokorny <ppokorny@penguincomputing.com> 5 * Copyright (c) 2002, 2003 Philip Pokorny <ppokorny@penguincomputing.com>
6 * Copyright (c) 2003 Margit Schubert-While <margitsw@t-online.de> 6 * Copyright (c) 2003 Margit Schubert-While <margitsw@t-online.de>
7 * Copyright (c) 2004 Justin Thiessen <jthiessen@penguincomputing.com> 7 * Copyright (c) 2004 Justin Thiessen <jthiessen@penguincomputing.com>
8 * Copyright (C) 2007--2009 Jean Delvare <jdelvare@suse.de> 8 * Copyright (C) 2007--2014 Jean Delvare <jdelvare@suse.de>
9 * 9 *
10 * Chip details at <http://www.national.com/ds/LM/LM85.pdf> 10 * Chip details at <http://www.national.com/ds/LM/LM85.pdf>
11 * 11 *
@@ -39,7 +39,7 @@
39static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END }; 39static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
40 40
41enum chips { 41enum chips {
42 any_chip, lm85b, lm85c, 42 lm85,
43 adm1027, adt7463, adt7468, 43 adm1027, adt7463, adt7468,
44 emc6d100, emc6d102, emc6d103, emc6d103s 44 emc6d100, emc6d102, emc6d103, emc6d103s
45}; 45};
@@ -75,9 +75,6 @@ enum chips {
75#define LM85_COMPANY_NATIONAL 0x01 75#define LM85_COMPANY_NATIONAL 0x01
76#define LM85_COMPANY_ANALOG_DEV 0x41 76#define LM85_COMPANY_ANALOG_DEV 0x41
77#define LM85_COMPANY_SMSC 0x5c 77#define LM85_COMPANY_SMSC 0x5c
78#define LM85_VERSTEP_VMASK 0xf0
79#define LM85_VERSTEP_GENERIC 0x60
80#define LM85_VERSTEP_GENERIC2 0x70
81#define LM85_VERSTEP_LM85C 0x60 78#define LM85_VERSTEP_LM85C 0x60
82#define LM85_VERSTEP_LM85B 0x62 79#define LM85_VERSTEP_LM85B 0x62
83#define LM85_VERSTEP_LM96000_1 0x68 80#define LM85_VERSTEP_LM96000_1 0x68
@@ -351,9 +348,9 @@ static const struct i2c_device_id lm85_id[] = {
351 { "adm1027", adm1027 }, 348 { "adm1027", adm1027 },
352 { "adt7463", adt7463 }, 349 { "adt7463", adt7463 },
353 { "adt7468", adt7468 }, 350 { "adt7468", adt7468 },
354 { "lm85", any_chip }, 351 { "lm85", lm85 },
355 { "lm85b", lm85b }, 352 { "lm85b", lm85 },
356 { "lm85c", lm85c }, 353 { "lm85c", lm85 },
357 { "emc6d100", emc6d100 }, 354 { "emc6d100", emc6d100 },
358 { "emc6d101", emc6d100 }, 355 { "emc6d101", emc6d100 },
359 { "emc6d102", emc6d102 }, 356 { "emc6d102", emc6d102 },
@@ -1281,7 +1278,7 @@ static int lm85_detect(struct i2c_client *client, struct i2c_board_info *info)
1281{ 1278{
1282 struct i2c_adapter *adapter = client->adapter; 1279 struct i2c_adapter *adapter = client->adapter;
1283 int address = client->addr; 1280 int address = client->addr;
1284 const char *type_name; 1281 const char *type_name = NULL;
1285 int company, verstep; 1282 int company, verstep;
1286 1283
1287 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { 1284 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
@@ -1297,16 +1294,6 @@ static int lm85_detect(struct i2c_client *client, struct i2c_board_info *info)
1297 "Detecting device at 0x%02x with COMPANY: 0x%02x and VERSTEP: 0x%02x\n", 1294 "Detecting device at 0x%02x with COMPANY: 0x%02x and VERSTEP: 0x%02x\n",
1298 address, company, verstep); 1295 address, company, verstep);
1299 1296
1300 /* All supported chips have the version in common */
1301 if ((verstep & LM85_VERSTEP_VMASK) != LM85_VERSTEP_GENERIC &&
1302 (verstep & LM85_VERSTEP_VMASK) != LM85_VERSTEP_GENERIC2) {
1303 dev_dbg(&adapter->dev,
1304 "Autodetection failed: unsupported version\n");
1305 return -ENODEV;
1306 }
1307 type_name = "lm85";
1308
1309 /* Now, refine the detection */
1310 if (company == LM85_COMPANY_NATIONAL) { 1297 if (company == LM85_COMPANY_NATIONAL) {
1311 switch (verstep) { 1298 switch (verstep) {
1312 case LM85_VERSTEP_LM85C: 1299 case LM85_VERSTEP_LM85C:
@@ -1323,6 +1310,7 @@ static int lm85_detect(struct i2c_client *client, struct i2c_board_info *info)
1323 "Found Winbond WPCD377I, ignoring\n"); 1310 "Found Winbond WPCD377I, ignoring\n");
1324 return -ENODEV; 1311 return -ENODEV;
1325 } 1312 }
1313 type_name = "lm85";
1326 break; 1314 break;
1327 } 1315 }
1328 } else if (company == LM85_COMPANY_ANALOG_DEV) { 1316 } else if (company == LM85_COMPANY_ANALOG_DEV) {
@@ -1357,12 +1345,11 @@ static int lm85_detect(struct i2c_client *client, struct i2c_board_info *info)
1357 type_name = "emc6d103s"; 1345 type_name = "emc6d103s";
1358 break; 1346 break;
1359 } 1347 }
1360 } else {
1361 dev_dbg(&adapter->dev,
1362 "Autodetection failed: unknown vendor\n");
1363 return -ENODEV;
1364 } 1348 }
1365 1349
1350 if (!type_name)
1351 return -ENODEV;
1352
1366 strlcpy(info->type, type_name, I2C_NAME_SIZE); 1353 strlcpy(info->type, type_name, I2C_NAME_SIZE);
1367 1354
1368 return 0; 1355 return 0;
diff --git a/drivers/hwmon/ltc4151.c b/drivers/hwmon/ltc4151.c
index af81be1237c9..c86a18402496 100644
--- a/drivers/hwmon/ltc4151.c
+++ b/drivers/hwmon/ltc4151.c
@@ -47,7 +47,7 @@
47#define LTC4151_ADIN_L 0x05 47#define LTC4151_ADIN_L 0x05
48 48
49struct ltc4151_data { 49struct ltc4151_data {
50 struct device *hwmon_dev; 50 struct i2c_client *client;
51 51
52 struct mutex update_lock; 52 struct mutex update_lock;
53 bool valid; 53 bool valid;
@@ -59,8 +59,8 @@ struct ltc4151_data {
59 59
60static struct ltc4151_data *ltc4151_update_device(struct device *dev) 60static struct ltc4151_data *ltc4151_update_device(struct device *dev)
61{ 61{
62 struct i2c_client *client = to_i2c_client(dev); 62 struct ltc4151_data *data = dev_get_drvdata(dev);
63 struct ltc4151_data *data = i2c_get_clientdata(client); 63 struct i2c_client *client = data->client;
64 struct ltc4151_data *ret = data; 64 struct ltc4151_data *ret = data;
65 65
66 mutex_lock(&data->update_lock); 66 mutex_lock(&data->update_lock);
@@ -159,7 +159,7 @@ static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, ltc4151_show_value, NULL,
159 * Finally, construct an array of pointers to members of the above objects, 159 * Finally, construct an array of pointers to members of the above objects,
160 * as required for sysfs_create_group() 160 * as required for sysfs_create_group()
161 */ 161 */
162static struct attribute *ltc4151_attributes[] = { 162static struct attribute *ltc4151_attrs[] = {
163 &sensor_dev_attr_in1_input.dev_attr.attr, 163 &sensor_dev_attr_in1_input.dev_attr.attr,
164 &sensor_dev_attr_in2_input.dev_attr.attr, 164 &sensor_dev_attr_in2_input.dev_attr.attr,
165 165
@@ -167,54 +167,30 @@ static struct attribute *ltc4151_attributes[] = {
167 167
168 NULL, 168 NULL,
169}; 169};
170 170ATTRIBUTE_GROUPS(ltc4151);
171static const struct attribute_group ltc4151_group = {
172 .attrs = ltc4151_attributes,
173};
174 171
175static int ltc4151_probe(struct i2c_client *client, 172static int ltc4151_probe(struct i2c_client *client,
176 const struct i2c_device_id *id) 173 const struct i2c_device_id *id)
177{ 174{
178 struct i2c_adapter *adapter = client->adapter; 175 struct i2c_adapter *adapter = client->adapter;
176 struct device *dev = &client->dev;
179 struct ltc4151_data *data; 177 struct ltc4151_data *data;
180 int ret; 178 struct device *hwmon_dev;
181 179
182 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) 180 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
183 return -ENODEV; 181 return -ENODEV;
184 182
185 data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL); 183 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
186 if (!data) 184 if (!data)
187 return -ENOMEM; 185 return -ENOMEM;
188 186
189 i2c_set_clientdata(client, data); 187 data->client = client;
190 mutex_init(&data->update_lock); 188 mutex_init(&data->update_lock);
191 189
192 /* Register sysfs hooks */ 190 hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
193 ret = sysfs_create_group(&client->dev.kobj, &ltc4151_group); 191 data,
194 if (ret) 192 ltc4151_groups);
195 return ret; 193 return PTR_ERR_OR_ZERO(hwmon_dev);
196
197 data->hwmon_dev = hwmon_device_register(&client->dev);
198 if (IS_ERR(data->hwmon_dev)) {
199 ret = PTR_ERR(data->hwmon_dev);
200 goto out_hwmon_device_register;
201 }
202
203 return 0;
204
205out_hwmon_device_register:
206 sysfs_remove_group(&client->dev.kobj, &ltc4151_group);
207 return ret;
208}
209
210static int ltc4151_remove(struct i2c_client *client)
211{
212 struct ltc4151_data *data = i2c_get_clientdata(client);
213
214 hwmon_device_unregister(data->hwmon_dev);
215 sysfs_remove_group(&client->dev.kobj, &ltc4151_group);
216
217 return 0;
218} 194}
219 195
220static const struct i2c_device_id ltc4151_id[] = { 196static const struct i2c_device_id ltc4151_id[] = {
@@ -229,7 +205,6 @@ static struct i2c_driver ltc4151_driver = {
229 .name = "ltc4151", 205 .name = "ltc4151",
230 }, 206 },
231 .probe = ltc4151_probe, 207 .probe = ltc4151_probe,
232 .remove = ltc4151_remove,
233 .id_table = ltc4151_id, 208 .id_table = ltc4151_id,
234}; 209};
235 210
diff --git a/drivers/hwmon/shtc1.c b/drivers/hwmon/shtc1.c
new file mode 100644
index 000000000000..decd7df995ab
--- /dev/null
+++ b/drivers/hwmon/shtc1.c
@@ -0,0 +1,251 @@
1/* Sensirion SHTC1 humidity and temperature sensor driver
2 *
3 * Copyright (C) 2014 Sensirion AG, Switzerland
4 * Author: Johannes Winkelmann <johannes.winkelmann@sensirion.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/module.h>
19#include <linux/init.h>
20#include <linux/slab.h>
21#include <linux/i2c.h>
22#include <linux/hwmon.h>
23#include <linux/hwmon-sysfs.h>
24#include <linux/err.h>
25#include <linux/delay.h>
26#include <linux/platform_data/shtc1.h>
27
28/* commands (high precision mode) */
29static const unsigned char shtc1_cmd_measure_blocking_hpm[] = { 0x7C, 0xA2 };
30static const unsigned char shtc1_cmd_measure_nonblocking_hpm[] = { 0x78, 0x66 };
31
32/* commands (low precision mode) */
33static const unsigned char shtc1_cmd_measure_blocking_lpm[] = { 0x64, 0x58 };
34static const unsigned char shtc1_cmd_measure_nonblocking_lpm[] = { 0x60, 0x9c };
35
36/* command for reading the ID register */
37static const unsigned char shtc1_cmd_read_id_reg[] = { 0xef, 0xc8 };
38
39/* constants for reading the ID register */
40#define SHTC1_ID 0x07
41#define SHTC1_ID_REG_MASK 0x1f
42
43/* delays for non-blocking i2c commands, both in us */
44#define SHTC1_NONBLOCKING_WAIT_TIME_HPM 14400
45#define SHTC1_NONBLOCKING_WAIT_TIME_LPM 1000
46
47#define SHTC1_CMD_LENGTH 2
48#define SHTC1_RESPONSE_LENGTH 6
49
50struct shtc1_data {
51 struct i2c_client *client;
52 struct mutex update_lock;
53 bool valid;
54 unsigned long last_updated; /* in jiffies */
55
56 const unsigned char *command;
57 unsigned int nonblocking_wait_time; /* in us */
58
59 struct shtc1_platform_data setup;
60
61 int temperature; /* 1000 * temperature in dgr C */
62 int humidity; /* 1000 * relative humidity in %RH */
63};
64
65static int shtc1_update_values(struct i2c_client *client,
66 struct shtc1_data *data,
67 char *buf, int bufsize)
68{
69 int ret = i2c_master_send(client, data->command, SHTC1_CMD_LENGTH);
70 if (ret != SHTC1_CMD_LENGTH) {
71 dev_err(&client->dev, "failed to send command: %d\n", ret);
72 return ret < 0 ? ret : -EIO;
73 }
74
75 /*
76 * In blocking mode (clock stretching mode) the I2C bus
77 * is blocked for other traffic, thus the call to i2c_master_recv()
78 * will wait until the data is ready. For non blocking mode, we
79 * have to wait ourselves.
80 */
81 if (!data->setup.blocking_io)
82 usleep_range(data->nonblocking_wait_time,
83 data->nonblocking_wait_time + 1000);
84
85 ret = i2c_master_recv(client, buf, bufsize);
86 if (ret != bufsize) {
87 dev_err(&client->dev, "failed to read values: %d\n", ret);
88 return ret < 0 ? ret : -EIO;
89 }
90
91 return 0;
92}
93
94/* sysfs attributes */
95static struct shtc1_data *shtc1_update_client(struct device *dev)
96{
97 struct shtc1_data *data = dev_get_drvdata(dev);
98 struct i2c_client *client = data->client;
99 unsigned char buf[SHTC1_RESPONSE_LENGTH];
100 int val;
101 int ret = 0;
102
103 mutex_lock(&data->update_lock);
104
105 if (time_after(jiffies, data->last_updated + HZ / 10) || !data->valid) {
106 ret = shtc1_update_values(client, data, buf, sizeof(buf));
107 if (ret)
108 goto out;
109
110 /*
111 * From datasheet:
112 * T = -45 + 175 * ST / 2^16
113 * RH = 100 * SRH / 2^16
114 *
115 * Adapted for integer fixed point (3 digit) arithmetic.
116 */
117 val = be16_to_cpup((__be16 *)buf);
118 data->temperature = ((21875 * val) >> 13) - 45000;
119 val = be16_to_cpup((__be16 *)(buf + 3));
120 data->humidity = ((12500 * val) >> 13);
121
122 data->last_updated = jiffies;
123 data->valid = true;
124 }
125
126out:
127 mutex_unlock(&data->update_lock);
128
129 return ret == 0 ? data : ERR_PTR(ret);
130}
131
132static ssize_t temp1_input_show(struct device *dev,
133 struct device_attribute *attr,
134 char *buf)
135{
136 struct shtc1_data *data = shtc1_update_client(dev);
137 if (IS_ERR(data))
138 return PTR_ERR(data);
139
140 return sprintf(buf, "%d\n", data->temperature);
141}
142
143static ssize_t humidity1_input_show(struct device *dev,
144 struct device_attribute *attr, char *buf)
145{
146 struct shtc1_data *data = shtc1_update_client(dev);
147 if (IS_ERR(data))
148 return PTR_ERR(data);
149
150 return sprintf(buf, "%d\n", data->humidity);
151}
152
153static DEVICE_ATTR_RO(temp1_input);
154static DEVICE_ATTR_RO(humidity1_input);
155
156static struct attribute *shtc1_attrs[] = {
157 &dev_attr_temp1_input.attr,
158 &dev_attr_humidity1_input.attr,
159 NULL
160};
161
162ATTRIBUTE_GROUPS(shtc1);
163
164static void shtc1_select_command(struct shtc1_data *data)
165{
166 if (data->setup.high_precision) {
167 data->command = data->setup.blocking_io ?
168 shtc1_cmd_measure_blocking_hpm :
169 shtc1_cmd_measure_nonblocking_hpm;
170 data->nonblocking_wait_time = SHTC1_NONBLOCKING_WAIT_TIME_HPM;
171
172 } else {
173 data->command = data->setup.blocking_io ?
174 shtc1_cmd_measure_blocking_lpm :
175 shtc1_cmd_measure_nonblocking_lpm;
176 data->nonblocking_wait_time = SHTC1_NONBLOCKING_WAIT_TIME_LPM;
177 }
178}
179
180static int shtc1_probe(struct i2c_client *client,
181 const struct i2c_device_id *id)
182{
183 int ret;
184 char id_reg[2];
185 struct shtc1_data *data;
186 struct device *hwmon_dev;
187 struct i2c_adapter *adap = client->adapter;
188 struct device *dev = &client->dev;
189
190 if (!i2c_check_functionality(adap, I2C_FUNC_I2C)) {
191 dev_err(dev, "plain i2c transactions not supported\n");
192 return -ENODEV;
193 }
194
195 ret = i2c_master_send(client, shtc1_cmd_read_id_reg, SHTC1_CMD_LENGTH);
196 if (ret != SHTC1_CMD_LENGTH) {
197 dev_err(dev, "could not send read_id_reg command: %d\n", ret);
198 return ret < 0 ? ret : -ENODEV;
199 }
200 ret = i2c_master_recv(client, id_reg, sizeof(id_reg));
201 if (ret != sizeof(id_reg)) {
202 dev_err(dev, "could not read ID register: %d\n", ret);
203 return -ENODEV;
204 }
205 if ((id_reg[1] & SHTC1_ID_REG_MASK) != SHTC1_ID) {
206 dev_err(dev, "ID register doesn't match\n");
207 return -ENODEV;
208 }
209
210 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
211 if (!data)
212 return -ENOMEM;
213
214 data->setup.blocking_io = false;
215 data->setup.high_precision = true;
216 data->client = client;
217
218 if (client->dev.platform_data)
219 data->setup = *(struct shtc1_platform_data *)dev->platform_data;
220 shtc1_select_command(data);
221 mutex_init(&data->update_lock);
222
223 hwmon_dev = devm_hwmon_device_register_with_groups(dev,
224 client->name,
225 data,
226 shtc1_groups);
227 if (IS_ERR(hwmon_dev))
228 dev_dbg(dev, "unable to register hwmon device\n");
229
230 return PTR_ERR_OR_ZERO(hwmon_dev);
231}
232
233/* device ID table */
234static const struct i2c_device_id shtc1_id[] = {
235 { "shtc1", 0 },
236 { "shtw1", 0 },
237 { }
238};
239MODULE_DEVICE_TABLE(i2c, shtc1_id);
240
241static struct i2c_driver shtc1_i2c_driver = {
242 .driver.name = "shtc1",
243 .probe = shtc1_probe,
244 .id_table = shtc1_id,
245};
246
247module_i2c_driver(shtc1_i2c_driver);
248
249MODULE_AUTHOR("Johannes Winkelmann <johannes.winkelmann@sensirion.com>");
250MODULE_DESCRIPTION("Sensirion SHTC1 humidity and temperature sensor driver");
251MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/vexpress.c b/drivers/hwmon/vexpress.c
index 611f34c7333d..c53619086f33 100644
--- a/drivers/hwmon/vexpress.c
+++ b/drivers/hwmon/vexpress.c
@@ -27,17 +27,8 @@
27struct vexpress_hwmon_data { 27struct vexpress_hwmon_data {
28 struct device *hwmon_dev; 28 struct device *hwmon_dev;
29 struct regmap *reg; 29 struct regmap *reg;
30 const char *name;
31}; 30};
32 31
33static ssize_t vexpress_hwmon_name_show(struct device *dev,
34 struct device_attribute *dev_attr, char *buffer)
35{
36 struct vexpress_hwmon_data *data = dev_get_drvdata(dev);
37
38 return sprintf(buffer, "%s\n", data->name);
39}
40
41static ssize_t vexpress_hwmon_label_show(struct device *dev, 32static ssize_t vexpress_hwmon_label_show(struct device *dev,
42 struct device_attribute *dev_attr, char *buffer) 33 struct device_attribute *dev_attr, char *buffer)
43{ 34{
@@ -95,16 +86,6 @@ static umode_t vexpress_hwmon_attr_is_visible(struct kobject *kobj,
95 return attr->mode; 86 return attr->mode;
96} 87}
97 88
98static DEVICE_ATTR(name, S_IRUGO, vexpress_hwmon_name_show, NULL);
99
100#define VEXPRESS_HWMON_ATTRS(_name, _label_attr, _input_attr) \
101struct attribute *vexpress_hwmon_attrs_##_name[] = { \
102 &dev_attr_name.attr, \
103 &dev_attr_##_label_attr.attr, \
104 &sensor_dev_attr_##_input_attr.dev_attr.attr, \
105 NULL \
106}
107
108struct vexpress_hwmon_type { 89struct vexpress_hwmon_type {
109 const char *name; 90 const char *name;
110 const struct attribute_group **attr_groups; 91 const struct attribute_group **attr_groups;
@@ -114,7 +95,11 @@ struct vexpress_hwmon_type {
114static DEVICE_ATTR(in1_label, S_IRUGO, vexpress_hwmon_label_show, NULL); 95static DEVICE_ATTR(in1_label, S_IRUGO, vexpress_hwmon_label_show, NULL);
115static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, vexpress_hwmon_u32_show, 96static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, vexpress_hwmon_u32_show,
116 NULL, 1000); 97 NULL, 1000);
117static VEXPRESS_HWMON_ATTRS(volt, in1_label, in1_input); 98static struct attribute *vexpress_hwmon_attrs_volt[] = {
99 &dev_attr_in1_label.attr,
100 &sensor_dev_attr_in1_input.dev_attr.attr,
101 NULL
102};
118static struct attribute_group vexpress_hwmon_group_volt = { 103static struct attribute_group vexpress_hwmon_group_volt = {
119 .is_visible = vexpress_hwmon_attr_is_visible, 104 .is_visible = vexpress_hwmon_attr_is_visible,
120 .attrs = vexpress_hwmon_attrs_volt, 105 .attrs = vexpress_hwmon_attrs_volt,
@@ -131,7 +116,11 @@ static struct vexpress_hwmon_type vexpress_hwmon_volt = {
131static DEVICE_ATTR(curr1_label, S_IRUGO, vexpress_hwmon_label_show, NULL); 116static DEVICE_ATTR(curr1_label, S_IRUGO, vexpress_hwmon_label_show, NULL);
132static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, vexpress_hwmon_u32_show, 117static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, vexpress_hwmon_u32_show,
133 NULL, 1000); 118 NULL, 1000);
134static VEXPRESS_HWMON_ATTRS(amp, curr1_label, curr1_input); 119static struct attribute *vexpress_hwmon_attrs_amp[] = {
120 &dev_attr_curr1_label.attr,
121 &sensor_dev_attr_curr1_input.dev_attr.attr,
122 NULL
123};
135static struct attribute_group vexpress_hwmon_group_amp = { 124static struct attribute_group vexpress_hwmon_group_amp = {
136 .is_visible = vexpress_hwmon_attr_is_visible, 125 .is_visible = vexpress_hwmon_attr_is_visible,
137 .attrs = vexpress_hwmon_attrs_amp, 126 .attrs = vexpress_hwmon_attrs_amp,
@@ -147,7 +136,11 @@ static struct vexpress_hwmon_type vexpress_hwmon_amp = {
147static DEVICE_ATTR(temp1_label, S_IRUGO, vexpress_hwmon_label_show, NULL); 136static DEVICE_ATTR(temp1_label, S_IRUGO, vexpress_hwmon_label_show, NULL);
148static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, vexpress_hwmon_u32_show, 137static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, vexpress_hwmon_u32_show,
149 NULL, 1000); 138 NULL, 1000);
150static VEXPRESS_HWMON_ATTRS(temp, temp1_label, temp1_input); 139static struct attribute *vexpress_hwmon_attrs_temp[] = {
140 &dev_attr_temp1_label.attr,
141 &sensor_dev_attr_temp1_input.dev_attr.attr,
142 NULL
143};
151static struct attribute_group vexpress_hwmon_group_temp = { 144static struct attribute_group vexpress_hwmon_group_temp = {
152 .is_visible = vexpress_hwmon_attr_is_visible, 145 .is_visible = vexpress_hwmon_attr_is_visible,
153 .attrs = vexpress_hwmon_attrs_temp, 146 .attrs = vexpress_hwmon_attrs_temp,
@@ -163,7 +156,11 @@ static struct vexpress_hwmon_type vexpress_hwmon_temp = {
163static DEVICE_ATTR(power1_label, S_IRUGO, vexpress_hwmon_label_show, NULL); 156static DEVICE_ATTR(power1_label, S_IRUGO, vexpress_hwmon_label_show, NULL);
164static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, vexpress_hwmon_u32_show, 157static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, vexpress_hwmon_u32_show,
165 NULL, 1); 158 NULL, 1);
166static VEXPRESS_HWMON_ATTRS(power, power1_label, power1_input); 159static struct attribute *vexpress_hwmon_attrs_power[] = {
160 &dev_attr_power1_label.attr,
161 &sensor_dev_attr_power1_input.dev_attr.attr,
162 NULL
163};
167static struct attribute_group vexpress_hwmon_group_power = { 164static struct attribute_group vexpress_hwmon_group_power = {
168 .is_visible = vexpress_hwmon_attr_is_visible, 165 .is_visible = vexpress_hwmon_attr_is_visible,
169 .attrs = vexpress_hwmon_attrs_power, 166 .attrs = vexpress_hwmon_attrs_power,
@@ -179,7 +176,11 @@ static struct vexpress_hwmon_type vexpress_hwmon_power = {
179static DEVICE_ATTR(energy1_label, S_IRUGO, vexpress_hwmon_label_show, NULL); 176static DEVICE_ATTR(energy1_label, S_IRUGO, vexpress_hwmon_label_show, NULL);
180static SENSOR_DEVICE_ATTR(energy1_input, S_IRUGO, vexpress_hwmon_u64_show, 177static SENSOR_DEVICE_ATTR(energy1_input, S_IRUGO, vexpress_hwmon_u64_show,
181 NULL, 1); 178 NULL, 1);
182static VEXPRESS_HWMON_ATTRS(energy, energy1_label, energy1_input); 179static struct attribute *vexpress_hwmon_attrs_energy[] = {
180 &dev_attr_energy1_label.attr,
181 &sensor_dev_attr_energy1_input.dev_attr.attr,
182 NULL
183};
183static struct attribute_group vexpress_hwmon_group_energy = { 184static struct attribute_group vexpress_hwmon_group_energy = {
184 .is_visible = vexpress_hwmon_attr_is_visible, 185 .is_visible = vexpress_hwmon_attr_is_visible,
185 .attrs = vexpress_hwmon_attrs_energy, 186 .attrs = vexpress_hwmon_attrs_energy,
@@ -218,7 +219,6 @@ MODULE_DEVICE_TABLE(of, vexpress_hwmon_of_match);
218 219
219static int vexpress_hwmon_probe(struct platform_device *pdev) 220static int vexpress_hwmon_probe(struct platform_device *pdev)
220{ 221{
221 int err;
222 const struct of_device_id *match; 222 const struct of_device_id *match;
223 struct vexpress_hwmon_data *data; 223 struct vexpress_hwmon_data *data;
224 const struct vexpress_hwmon_type *type; 224 const struct vexpress_hwmon_type *type;
@@ -232,45 +232,19 @@ static int vexpress_hwmon_probe(struct platform_device *pdev)
232 if (!match) 232 if (!match)
233 return -ENODEV; 233 return -ENODEV;
234 type = match->data; 234 type = match->data;
235 data->name = type->name;
236 235
237 data->reg = devm_regmap_init_vexpress_config(&pdev->dev); 236 data->reg = devm_regmap_init_vexpress_config(&pdev->dev);
238 if (IS_ERR(data->reg)) 237 if (IS_ERR(data->reg))
239 return PTR_ERR(data->reg); 238 return PTR_ERR(data->reg);
240 239
241 err = sysfs_create_groups(&pdev->dev.kobj, type->attr_groups); 240 data->hwmon_dev = devm_hwmon_device_register_with_groups(&pdev->dev,
242 if (err) 241 type->name, data, type->attr_groups);
243 goto error;
244
245 data->hwmon_dev = hwmon_device_register(&pdev->dev);
246 if (IS_ERR(data->hwmon_dev)) {
247 err = PTR_ERR(data->hwmon_dev);
248 goto error;
249 }
250
251 return 0;
252
253error:
254 sysfs_remove_group(&pdev->dev.kobj, match->data);
255 return err;
256}
257
258static int vexpress_hwmon_remove(struct platform_device *pdev)
259{
260 struct vexpress_hwmon_data *data = platform_get_drvdata(pdev);
261 const struct of_device_id *match;
262
263 hwmon_device_unregister(data->hwmon_dev);
264
265 match = of_match_device(vexpress_hwmon_of_match, &pdev->dev);
266 sysfs_remove_group(&pdev->dev.kobj, match->data);
267 242
268 return 0; 243 return PTR_ERR_OR_ZERO(data->hwmon_dev);
269} 244}
270 245
271static struct platform_driver vexpress_hwmon_driver = { 246static struct platform_driver vexpress_hwmon_driver = {
272 .probe = vexpress_hwmon_probe, 247 .probe = vexpress_hwmon_probe,
273 .remove = vexpress_hwmon_remove,
274 .driver = { 248 .driver = {
275 .name = DRVNAME, 249 .name = DRVNAME,
276 .owner = THIS_MODULE, 250 .owner = THIS_MODULE,
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 620d1004a1e7..9f7d5859cf65 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -676,6 +676,16 @@ config I2C_RIIC
676 This driver can also be built as a module. If so, the module 676 This driver can also be built as a module. If so, the module
677 will be called i2c-riic. 677 will be called i2c-riic.
678 678
679config I2C_RK3X
680 tristate "Rockchip RK3xxx I2C adapter"
681 depends on OF
682 help
683 Say Y here to include support for the I2C adapter in Rockchip RK3xxx
684 SoCs.
685
686 This driver can also be built as a module. If so, the module will
687 be called i2c-rk3x.
688
679config HAVE_S3C2410_I2C 689config HAVE_S3C2410_I2C
680 bool 690 bool
681 help 691 help
@@ -764,6 +774,19 @@ config I2C_STU300
764 This driver can also be built as a module. If so, the module 774 This driver can also be built as a module. If so, the module
765 will be called i2c-stu300. 775 will be called i2c-stu300.
766 776
777config I2C_SUN6I_P2WI
778 tristate "Allwinner sun6i internal P2WI controller"
779 depends on RESET_CONTROLLER
780 depends on MACH_SUN6I || COMPILE_TEST
781 help
782 If you say yes to this option, support will be included for the
783 P2WI (Push/Pull 2 Wire Interface) controller embedded in some sunxi
784 SOCs.
785 The P2WI looks like an SMBus controller (which supports only byte
786 accesses), except that it only supports one slave device.
787 This interface is used to connect to specific PMIC devices (like the
788 AXP221).
789
767config I2C_TEGRA 790config I2C_TEGRA
768 tristate "NVIDIA Tegra internal I2C controller" 791 tristate "NVIDIA Tegra internal I2C controller"
769 depends on ARCH_TEGRA 792 depends on ARCH_TEGRA
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 298692cc6000..dd9a7f8e873f 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -66,6 +66,7 @@ obj-$(CONFIG_I2C_PXA) += i2c-pxa.o
66obj-$(CONFIG_I2C_PXA_PCI) += i2c-pxa-pci.o 66obj-$(CONFIG_I2C_PXA_PCI) += i2c-pxa-pci.o
67obj-$(CONFIG_I2C_QUP) += i2c-qup.o 67obj-$(CONFIG_I2C_QUP) += i2c-qup.o
68obj-$(CONFIG_I2C_RIIC) += i2c-riic.o 68obj-$(CONFIG_I2C_RIIC) += i2c-riic.o
69obj-$(CONFIG_I2C_RK3X) += i2c-rk3x.o
69obj-$(CONFIG_I2C_S3C2410) += i2c-s3c2410.o 70obj-$(CONFIG_I2C_S3C2410) += i2c-s3c2410.o
70obj-$(CONFIG_I2C_S6000) += i2c-s6000.o 71obj-$(CONFIG_I2C_S6000) += i2c-s6000.o
71obj-$(CONFIG_I2C_SH7760) += i2c-sh7760.o 72obj-$(CONFIG_I2C_SH7760) += i2c-sh7760.o
@@ -74,6 +75,7 @@ obj-$(CONFIG_I2C_SIMTEC) += i2c-simtec.o
74obj-$(CONFIG_I2C_SIRF) += i2c-sirf.o 75obj-$(CONFIG_I2C_SIRF) += i2c-sirf.o
75obj-$(CONFIG_I2C_ST) += i2c-st.o 76obj-$(CONFIG_I2C_ST) += i2c-st.o
76obj-$(CONFIG_I2C_STU300) += i2c-stu300.o 77obj-$(CONFIG_I2C_STU300) += i2c-stu300.o
78obj-$(CONFIG_I2C_SUN6I_P2WI) += i2c-sun6i-p2wi.o
77obj-$(CONFIG_I2C_TEGRA) += i2c-tegra.o 79obj-$(CONFIG_I2C_TEGRA) += i2c-tegra.o
78obj-$(CONFIG_I2C_VERSATILE) += i2c-versatile.o 80obj-$(CONFIG_I2C_VERSATILE) += i2c-versatile.o
79obj-$(CONFIG_I2C_WMT) += i2c-wmt.o 81obj-$(CONFIG_I2C_WMT) += i2c-wmt.o
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
new file mode 100644
index 000000000000..a9791509966a
--- /dev/null
+++ b/drivers/i2c/busses/i2c-rk3x.c
@@ -0,0 +1,763 @@
1/*
2 * Driver for I2C adapter in Rockchip RK3xxx SoC
3 *
4 * Max Schwarz <max.schwarz@online.de>
5 * based on the patches by Rockchip Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/i2c.h>
15#include <linux/interrupt.h>
16#include <linux/errno.h>
17#include <linux/err.h>
18#include <linux/platform_device.h>
19#include <linux/io.h>
20#include <linux/of_address.h>
21#include <linux/of_irq.h>
22#include <linux/spinlock.h>
23#include <linux/clk.h>
24#include <linux/wait.h>
25#include <linux/mfd/syscon.h>
26#include <linux/regmap.h>
27
28
29/* Register Map */
30#define REG_CON 0x00 /* control register */
31#define REG_CLKDIV 0x04 /* clock divisor register */
32#define REG_MRXADDR 0x08 /* slave address for REGISTER_TX */
33#define REG_MRXRADDR 0x0c /* slave register address for REGISTER_TX */
34#define REG_MTXCNT 0x10 /* number of bytes to be transmitted */
35#define REG_MRXCNT 0x14 /* number of bytes to be received */
36#define REG_IEN 0x18 /* interrupt enable */
37#define REG_IPD 0x1c /* interrupt pending */
38#define REG_FCNT 0x20 /* finished count */
39
40/* Data buffer offsets */
41#define TXBUFFER_BASE 0x100
42#define RXBUFFER_BASE 0x200
43
44/* REG_CON bits */
45#define REG_CON_EN BIT(0)
46enum {
47 REG_CON_MOD_TX = 0, /* transmit data */
48 REG_CON_MOD_REGISTER_TX, /* select register and restart */
49 REG_CON_MOD_RX, /* receive data */
50 REG_CON_MOD_REGISTER_RX, /* broken: transmits read addr AND writes
51 * register addr */
52};
53#define REG_CON_MOD(mod) ((mod) << 1)
54#define REG_CON_MOD_MASK (BIT(1) | BIT(2))
55#define REG_CON_START BIT(3)
56#define REG_CON_STOP BIT(4)
57#define REG_CON_LASTACK BIT(5) /* 1: send NACK after last received byte */
58#define REG_CON_ACTACK BIT(6) /* 1: stop if NACK is received */
59
60/* REG_MRXADDR bits */
61#define REG_MRXADDR_VALID(x) BIT(24 + (x)) /* [x*8+7:x*8] of MRX[R]ADDR valid */
62
63/* REG_IEN/REG_IPD bits */
64#define REG_INT_BTF BIT(0) /* a byte was transmitted */
65#define REG_INT_BRF BIT(1) /* a byte was received */
66#define REG_INT_MBTF BIT(2) /* master data transmit finished */
67#define REG_INT_MBRF BIT(3) /* master data receive finished */
68#define REG_INT_START BIT(4) /* START condition generated */
69#define REG_INT_STOP BIT(5) /* STOP condition generated */
70#define REG_INT_NAKRCV BIT(6) /* NACK received */
71#define REG_INT_ALL 0x7f
72
73/* Constants */
74#define WAIT_TIMEOUT 200 /* ms */
75#define DEFAULT_SCL_RATE (100 * 1000) /* Hz */
76
77enum rk3x_i2c_state {
78 STATE_IDLE,
79 STATE_START,
80 STATE_READ,
81 STATE_WRITE,
82 STATE_STOP
83};
84
85/**
86 * @grf_offset: offset inside the grf regmap for setting the i2c type
87 */
88struct rk3x_i2c_soc_data {
89 int grf_offset;
90};
91
92struct rk3x_i2c {
93 struct i2c_adapter adap;
94 struct device *dev;
95 struct rk3x_i2c_soc_data *soc_data;
96
97 /* Hardware resources */
98 void __iomem *regs;
99 struct clk *clk;
100
101 /* Settings */
102 unsigned int scl_frequency;
103
104 /* Synchronization & notification */
105 spinlock_t lock;
106 wait_queue_head_t wait;
107 bool busy;
108
109 /* Current message */
110 struct i2c_msg *msg;
111 u8 addr;
112 unsigned int mode;
113 bool is_last_msg;
114
115 /* I2C state machine */
116 enum rk3x_i2c_state state;
117 unsigned int processed; /* sent/received bytes */
118 int error;
119};
120
121static inline void i2c_writel(struct rk3x_i2c *i2c, u32 value,
122 unsigned int offset)
123{
124 writel(value, i2c->regs + offset);
125}
126
127static inline u32 i2c_readl(struct rk3x_i2c *i2c, unsigned int offset)
128{
129 return readl(i2c->regs + offset);
130}
131
132/* Reset all interrupt pending bits */
133static inline void rk3x_i2c_clean_ipd(struct rk3x_i2c *i2c)
134{
135 i2c_writel(i2c, REG_INT_ALL, REG_IPD);
136}
137
138/**
139 * Generate a START condition, which triggers a REG_INT_START interrupt.
140 */
141static void rk3x_i2c_start(struct rk3x_i2c *i2c)
142{
143 u32 val;
144
145 rk3x_i2c_clean_ipd(i2c);
146 i2c_writel(i2c, REG_INT_START, REG_IEN);
147
148 /* enable adapter with correct mode, send START condition */
149 val = REG_CON_EN | REG_CON_MOD(i2c->mode) | REG_CON_START;
150
151 /* if we want to react to NACK, set ACTACK bit */
152 if (!(i2c->msg->flags & I2C_M_IGNORE_NAK))
153 val |= REG_CON_ACTACK;
154
155 i2c_writel(i2c, val, REG_CON);
156}
157
158/**
159 * Generate a STOP condition, which triggers a REG_INT_STOP interrupt.
160 *
161 * @error: Error code to return in rk3x_i2c_xfer
162 */
163static void rk3x_i2c_stop(struct rk3x_i2c *i2c, int error)
164{
165 unsigned int ctrl;
166
167 i2c->processed = 0;
168 i2c->msg = NULL;
169 i2c->error = error;
170
171 if (i2c->is_last_msg) {
172 /* Enable stop interrupt */
173 i2c_writel(i2c, REG_INT_STOP, REG_IEN);
174
175 i2c->state = STATE_STOP;
176
177 ctrl = i2c_readl(i2c, REG_CON);
178 ctrl |= REG_CON_STOP;
179 i2c_writel(i2c, ctrl, REG_CON);
180 } else {
181 /* Signal rk3x_i2c_xfer to start the next message. */
182 i2c->busy = false;
183 i2c->state = STATE_IDLE;
184
185 /*
186 * The HW is actually not capable of REPEATED START. But we can
187 * get the intended effect by resetting its internal state
188 * and issuing an ordinary START.
189 */
190 i2c_writel(i2c, 0, REG_CON);
191
192 /* signal that we are finished with the current msg */
193 wake_up(&i2c->wait);
194 }
195}
196
197/**
198 * Setup a read according to i2c->msg
199 */
200static void rk3x_i2c_prepare_read(struct rk3x_i2c *i2c)
201{
202 unsigned int len = i2c->msg->len - i2c->processed;
203 u32 con;
204
205 con = i2c_readl(i2c, REG_CON);
206
207 /*
208 * The hw can read up to 32 bytes at a time. If we need more than one
209 * chunk, send an ACK after the last byte of the current chunk.
210 */
211 if (unlikely(len > 32)) {
212 len = 32;
213 con &= ~REG_CON_LASTACK;
214 } else {
215 con |= REG_CON_LASTACK;
216 }
217
218 /* make sure we are in plain RX mode if we read a second chunk */
219 if (i2c->processed != 0) {
220 con &= ~REG_CON_MOD_MASK;
221 con |= REG_CON_MOD(REG_CON_MOD_RX);
222 }
223
224 i2c_writel(i2c, con, REG_CON);
225 i2c_writel(i2c, len, REG_MRXCNT);
226}
227
228/**
229 * Fill the transmit buffer with data from i2c->msg
230 */
231static void rk3x_i2c_fill_transmit_buf(struct rk3x_i2c *i2c)
232{
233 unsigned int i, j;
234 u32 cnt = 0;
235 u32 val;
236 u8 byte;
237
238 for (i = 0; i < 8; ++i) {
239 val = 0;
240 for (j = 0; j < 4; ++j) {
241 if (i2c->processed == i2c->msg->len)
242 break;
243
244 if (i2c->processed == 0 && cnt == 0)
245 byte = (i2c->addr & 0x7f) << 1;
246 else
247 byte = i2c->msg->buf[i2c->processed++];
248
249 val |= byte << (j * 8);
250 cnt++;
251 }
252
253 i2c_writel(i2c, val, TXBUFFER_BASE + 4 * i);
254
255 if (i2c->processed == i2c->msg->len)
256 break;
257 }
258
259 i2c_writel(i2c, cnt, REG_MTXCNT);
260}
261
262
263/* IRQ handlers for individual states */
264
265static void rk3x_i2c_handle_start(struct rk3x_i2c *i2c, unsigned int ipd)
266{
267 if (!(ipd & REG_INT_START)) {
268 rk3x_i2c_stop(i2c, -EIO);
269 dev_warn(i2c->dev, "unexpected irq in START: 0x%x\n", ipd);
270 rk3x_i2c_clean_ipd(i2c);
271 return;
272 }
273
274 /* ack interrupt */
275 i2c_writel(i2c, REG_INT_START, REG_IPD);
276
277 /* disable start bit */
278 i2c_writel(i2c, i2c_readl(i2c, REG_CON) & ~REG_CON_START, REG_CON);
279
280 /* enable appropriate interrupts and transition */
281 if (i2c->mode == REG_CON_MOD_TX) {
282 i2c_writel(i2c, REG_INT_MBTF | REG_INT_NAKRCV, REG_IEN);
283 i2c->state = STATE_WRITE;
284 rk3x_i2c_fill_transmit_buf(i2c);
285 } else {
286 /* in any other case, we are going to be reading. */
287 i2c_writel(i2c, REG_INT_MBRF | REG_INT_NAKRCV, REG_IEN);
288 i2c->state = STATE_READ;
289 rk3x_i2c_prepare_read(i2c);
290 }
291}
292
293static void rk3x_i2c_handle_write(struct rk3x_i2c *i2c, unsigned int ipd)
294{
295 if (!(ipd & REG_INT_MBTF)) {
296 rk3x_i2c_stop(i2c, -EIO);
297 dev_err(i2c->dev, "unexpected irq in WRITE: 0x%x\n", ipd);
298 rk3x_i2c_clean_ipd(i2c);
299 return;
300 }
301
302 /* ack interrupt */
303 i2c_writel(i2c, REG_INT_MBTF, REG_IPD);
304
305 /* are we finished? */
306 if (i2c->processed == i2c->msg->len)
307 rk3x_i2c_stop(i2c, i2c->error);
308 else
309 rk3x_i2c_fill_transmit_buf(i2c);
310}
311
312static void rk3x_i2c_handle_read(struct rk3x_i2c *i2c, unsigned int ipd)
313{
314 unsigned int i;
315 unsigned int len = i2c->msg->len - i2c->processed;
316 u32 uninitialized_var(val);
317 u8 byte;
318
319 /* we only care for MBRF here. */
320 if (!(ipd & REG_INT_MBRF))
321 return;
322
323 /* ack interrupt */
324 i2c_writel(i2c, REG_INT_MBRF, REG_IPD);
325
326 /* read the data from receive buffer */
327 for (i = 0; i < len; ++i) {
328 if (i % 4 == 0)
329 val = i2c_readl(i2c, RXBUFFER_BASE + (i / 4) * 4);
330
331 byte = (val >> ((i % 4) * 8)) & 0xff;
332 i2c->msg->buf[i2c->processed++] = byte;
333 }
334
335 /* are we finished? */
336 if (i2c->processed == i2c->msg->len)
337 rk3x_i2c_stop(i2c, i2c->error);
338 else
339 rk3x_i2c_prepare_read(i2c);
340}
341
342static void rk3x_i2c_handle_stop(struct rk3x_i2c *i2c, unsigned int ipd)
343{
344 unsigned int con;
345
346 if (!(ipd & REG_INT_STOP)) {
347 rk3x_i2c_stop(i2c, -EIO);
348 dev_err(i2c->dev, "unexpected irq in STOP: 0x%x\n", ipd);
349 rk3x_i2c_clean_ipd(i2c);
350 return;
351 }
352
353 /* ack interrupt */
354 i2c_writel(i2c, REG_INT_STOP, REG_IPD);
355
356 /* disable STOP bit */
357 con = i2c_readl(i2c, REG_CON);
358 con &= ~REG_CON_STOP;
359 i2c_writel(i2c, con, REG_CON);
360
361 i2c->busy = false;
362 i2c->state = STATE_IDLE;
363
364 /* signal rk3x_i2c_xfer that we are finished */
365 wake_up(&i2c->wait);
366}
367
368static irqreturn_t rk3x_i2c_irq(int irqno, void *dev_id)
369{
370 struct rk3x_i2c *i2c = dev_id;
371 unsigned int ipd;
372
373 spin_lock(&i2c->lock);
374
375 ipd = i2c_readl(i2c, REG_IPD);
376 if (i2c->state == STATE_IDLE) {
377 dev_warn(i2c->dev, "irq in STATE_IDLE, ipd = 0x%x\n", ipd);
378 rk3x_i2c_clean_ipd(i2c);
379 goto out;
380 }
381
382 dev_dbg(i2c->dev, "IRQ: state %d, ipd: %x\n", i2c->state, ipd);
383
384 /* Clean interrupt bits we don't care about */
385 ipd &= ~(REG_INT_BRF | REG_INT_BTF);
386
387 if (ipd & REG_INT_NAKRCV) {
388 /*
389 * We got a NACK in the last operation. Depending on whether
390 * IGNORE_NAK is set, we have to stop the operation and report
391 * an error.
392 */
393 i2c_writel(i2c, REG_INT_NAKRCV, REG_IPD);
394
395 ipd &= ~REG_INT_NAKRCV;
396
397 if (!(i2c->msg->flags & I2C_M_IGNORE_NAK))
398 rk3x_i2c_stop(i2c, -ENXIO);
399 }
400
401 /* is there anything left to handle? */
402 if (unlikely(ipd == 0))
403 goto out;
404
405 switch (i2c->state) {
406 case STATE_START:
407 rk3x_i2c_handle_start(i2c, ipd);
408 break;
409 case STATE_WRITE:
410 rk3x_i2c_handle_write(i2c, ipd);
411 break;
412 case STATE_READ:
413 rk3x_i2c_handle_read(i2c, ipd);
414 break;
415 case STATE_STOP:
416 rk3x_i2c_handle_stop(i2c, ipd);
417 break;
418 case STATE_IDLE:
419 break;
420 }
421
422out:
423 spin_unlock(&i2c->lock);
424 return IRQ_HANDLED;
425}
426
427static void rk3x_i2c_set_scl_rate(struct rk3x_i2c *i2c, unsigned long scl_rate)
428{
429 unsigned long i2c_rate = clk_get_rate(i2c->clk);
430 unsigned int div;
431
432 /* SCL rate = (clk rate) / (8 * DIV) */
433 div = DIV_ROUND_UP(i2c_rate, scl_rate * 8);
434
435 /* The lower and upper half of the CLKDIV reg describe the length of
436 * SCL low & high periods. */
437 div = DIV_ROUND_UP(div, 2);
438
439 i2c_writel(i2c, (div << 16) | (div & 0xffff), REG_CLKDIV);
440}
441
442/**
443 * Setup I2C registers for an I2C operation specified by msgs, num.
444 *
445 * Must be called with i2c->lock held.
446 *
447 * @msgs: I2C msgs to process
448 * @num: Number of msgs
449 *
450 * returns: Number of I2C msgs processed or negative in case of error
451 */
452static int rk3x_i2c_setup(struct rk3x_i2c *i2c, struct i2c_msg *msgs, int num)
453{
454 u32 addr = (msgs[0].addr & 0x7f) << 1;
455 int ret = 0;
456
457 /*
458 * The I2C adapter can issue a small (len < 4) write packet before
459 * reading. This speeds up SMBus-style register reads.
460 * The MRXADDR/MRXRADDR hold the slave address and the slave register
461 * address in this case.
462 */
463
464 if (num >= 2 && msgs[0].len < 4 &&
465 !(msgs[0].flags & I2C_M_RD) && (msgs[1].flags & I2C_M_RD)) {
466 u32 reg_addr = 0;
467 int i;
468
469 dev_dbg(i2c->dev, "Combined write/read from addr 0x%x\n",
470 addr >> 1);
471
472 /* Fill MRXRADDR with the register address(es) */
473 for (i = 0; i < msgs[0].len; ++i) {
474 reg_addr |= msgs[0].buf[i] << (i * 8);
475 reg_addr |= REG_MRXADDR_VALID(i);
476 }
477
478 /* msgs[0] is handled by hw. */
479 i2c->msg = &msgs[1];
480
481 i2c->mode = REG_CON_MOD_REGISTER_TX;
482
483 i2c_writel(i2c, addr | REG_MRXADDR_VALID(0), REG_MRXADDR);
484 i2c_writel(i2c, reg_addr, REG_MRXRADDR);
485
486 ret = 2;
487 } else {
488 /*
489 * We'll have to do it the boring way and process the msgs
490 * one-by-one.
491 */
492
493 if (msgs[0].flags & I2C_M_RD) {
494 addr |= 1; /* set read bit */
495
496 /*
497 * We have to transmit the slave addr first. Use
498 * MOD_REGISTER_TX for that purpose.
499 */
500 i2c->mode = REG_CON_MOD_REGISTER_TX;
501 i2c_writel(i2c, addr | REG_MRXADDR_VALID(0),
502 REG_MRXADDR);
503 i2c_writel(i2c, 0, REG_MRXRADDR);
504 } else {
505 i2c->mode = REG_CON_MOD_TX;
506 }
507
508 i2c->msg = &msgs[0];
509
510 ret = 1;
511 }
512
513 i2c->addr = msgs[0].addr;
514 i2c->busy = true;
515 i2c->state = STATE_START;
516 i2c->processed = 0;
517 i2c->error = 0;
518
519 rk3x_i2c_clean_ipd(i2c);
520
521 return ret;
522}
523
524static int rk3x_i2c_xfer(struct i2c_adapter *adap,
525 struct i2c_msg *msgs, int num)
526{
527 struct rk3x_i2c *i2c = (struct rk3x_i2c *)adap->algo_data;
528 unsigned long timeout, flags;
529 int ret = 0;
530 int i;
531
532 spin_lock_irqsave(&i2c->lock, flags);
533
534 clk_enable(i2c->clk);
535
536 /* The clock rate might have changed, so setup the divider again */
537 rk3x_i2c_set_scl_rate(i2c, i2c->scl_frequency);
538
539 i2c->is_last_msg = false;
540
541 /*
542 * Process msgs. We can handle more than one message at once (see
543 * rk3x_i2c_setup()).
544 */
545 for (i = 0; i < num; i += ret) {
546 ret = rk3x_i2c_setup(i2c, msgs + i, num - i);
547
548 if (ret < 0) {
549 dev_err(i2c->dev, "rk3x_i2c_setup() failed\n");
550 break;
551 }
552
553 if (i + ret >= num)
554 i2c->is_last_msg = true;
555
556 spin_unlock_irqrestore(&i2c->lock, flags);
557
558 rk3x_i2c_start(i2c);
559
560 timeout = wait_event_timeout(i2c->wait, !i2c->busy,
561 msecs_to_jiffies(WAIT_TIMEOUT));
562
563 spin_lock_irqsave(&i2c->lock, flags);
564
565 if (timeout == 0) {
566 dev_err(i2c->dev, "timeout, ipd: 0x%02x, state: %d\n",
567 i2c_readl(i2c, REG_IPD), i2c->state);
568
569 /* Force a STOP condition without interrupt */
570 i2c_writel(i2c, 0, REG_IEN);
571 i2c_writel(i2c, REG_CON_EN | REG_CON_STOP, REG_CON);
572
573 i2c->state = STATE_IDLE;
574
575 ret = -ETIMEDOUT;
576 break;
577 }
578
579 if (i2c->error) {
580 ret = i2c->error;
581 break;
582 }
583 }
584
585 clk_disable(i2c->clk);
586 spin_unlock_irqrestore(&i2c->lock, flags);
587
588 return ret;
589}
590
591static u32 rk3x_i2c_func(struct i2c_adapter *adap)
592{
593 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_PROTOCOL_MANGLING;
594}
595
596static const struct i2c_algorithm rk3x_i2c_algorithm = {
597 .master_xfer = rk3x_i2c_xfer,
598 .functionality = rk3x_i2c_func,
599};
600
601static struct rk3x_i2c_soc_data soc_data[3] = {
602 { .grf_offset = 0x154 }, /* rk3066 */
603 { .grf_offset = 0x0a4 }, /* rk3188 */
604 { .grf_offset = -1 }, /* no I2C switching needed */
605};
606
607static const struct of_device_id rk3x_i2c_match[] = {
608 { .compatible = "rockchip,rk3066-i2c", .data = (void *)&soc_data[0] },
609 { .compatible = "rockchip,rk3188-i2c", .data = (void *)&soc_data[1] },
610 { .compatible = "rockchip,rk3288-i2c", .data = (void *)&soc_data[2] },
611 {},
612};
613
614static int rk3x_i2c_probe(struct platform_device *pdev)
615{
616 struct device_node *np = pdev->dev.of_node;
617 const struct of_device_id *match;
618 struct rk3x_i2c *i2c;
619 struct resource *mem;
620 int ret = 0;
621 int bus_nr;
622 u32 value;
623 int irq;
624
625 i2c = devm_kzalloc(&pdev->dev, sizeof(struct rk3x_i2c), GFP_KERNEL);
626 if (!i2c)
627 return -ENOMEM;
628
629 match = of_match_node(rk3x_i2c_match, np);
630 i2c->soc_data = (struct rk3x_i2c_soc_data *)match->data;
631
632 if (of_property_read_u32(pdev->dev.of_node, "clock-frequency",
633 &i2c->scl_frequency)) {
634 dev_info(&pdev->dev, "using default SCL frequency: %d\n",
635 DEFAULT_SCL_RATE);
636 i2c->scl_frequency = DEFAULT_SCL_RATE;
637 }
638
639 if (i2c->scl_frequency == 0 || i2c->scl_frequency > 400 * 1000) {
640 dev_warn(&pdev->dev, "invalid SCL frequency specified.\n");
641 dev_warn(&pdev->dev, "using default SCL frequency: %d\n",
642 DEFAULT_SCL_RATE);
643 i2c->scl_frequency = DEFAULT_SCL_RATE;
644 }
645
646 strlcpy(i2c->adap.name, "rk3x-i2c", sizeof(i2c->adap.name));
647 i2c->adap.owner = THIS_MODULE;
648 i2c->adap.algo = &rk3x_i2c_algorithm;
649 i2c->adap.retries = 3;
650 i2c->adap.dev.of_node = np;
651 i2c->adap.algo_data = i2c;
652 i2c->adap.dev.parent = &pdev->dev;
653
654 i2c->dev = &pdev->dev;
655
656 spin_lock_init(&i2c->lock);
657 init_waitqueue_head(&i2c->wait);
658
659 i2c->clk = devm_clk_get(&pdev->dev, NULL);
660 if (IS_ERR(i2c->clk)) {
661 dev_err(&pdev->dev, "cannot get clock\n");
662 return PTR_ERR(i2c->clk);
663 }
664
665 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
666 i2c->regs = devm_ioremap_resource(&pdev->dev, mem);
667 if (IS_ERR(i2c->regs))
668 return PTR_ERR(i2c->regs);
669
670 /* Try to set the I2C adapter number from dt */
671 bus_nr = of_alias_get_id(np, "i2c");
672
673 /*
674 * Switch to new interface if the SoC also offers the old one.
675 * The control bit is located in the GRF register space.
676 */
677 if (i2c->soc_data->grf_offset >= 0) {
678 struct regmap *grf;
679
680 grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
681 if (IS_ERR(grf)) {
682 dev_err(&pdev->dev,
683 "rk3x-i2c needs 'rockchip,grf' property\n");
684 return PTR_ERR(grf);
685 }
686
687 if (bus_nr < 0) {
688 dev_err(&pdev->dev, "rk3x-i2c needs i2cX alias");
689 return -EINVAL;
690 }
691
692 /* 27+i: write mask, 11+i: value */
693 value = BIT(27 + bus_nr) | BIT(11 + bus_nr);
694
695 ret = regmap_write(grf, i2c->soc_data->grf_offset, value);
696 if (ret != 0) {
697 dev_err(i2c->dev, "Could not write to GRF: %d\n", ret);
698 return ret;
699 }
700 }
701
702 /* IRQ setup */
703 irq = platform_get_irq(pdev, 0);
704 if (irq < 0) {
705 dev_err(&pdev->dev, "cannot find rk3x IRQ\n");
706 return irq;
707 }
708
709 ret = devm_request_irq(&pdev->dev, irq, rk3x_i2c_irq,
710 0, dev_name(&pdev->dev), i2c);
711 if (ret < 0) {
712 dev_err(&pdev->dev, "cannot request IRQ\n");
713 return ret;
714 }
715
716 platform_set_drvdata(pdev, i2c);
717
718 ret = clk_prepare(i2c->clk);
719 if (ret < 0) {
720 dev_err(&pdev->dev, "Could not prepare clock\n");
721 return ret;
722 }
723
724 ret = i2c_add_adapter(&i2c->adap);
725 if (ret < 0) {
726 dev_err(&pdev->dev, "Could not register adapter\n");
727 goto err_clk;
728 }
729
730 dev_info(&pdev->dev, "Initialized RK3xxx I2C bus at %p\n", i2c->regs);
731
732 return 0;
733
734err_clk:
735 clk_unprepare(i2c->clk);
736 return ret;
737}
738
739static int rk3x_i2c_remove(struct platform_device *pdev)
740{
741 struct rk3x_i2c *i2c = platform_get_drvdata(pdev);
742
743 i2c_del_adapter(&i2c->adap);
744 clk_unprepare(i2c->clk);
745
746 return 0;
747}
748
749static struct platform_driver rk3x_i2c_driver = {
750 .probe = rk3x_i2c_probe,
751 .remove = rk3x_i2c_remove,
752 .driver = {
753 .owner = THIS_MODULE,
754 .name = "rk3x-i2c",
755 .of_match_table = rk3x_i2c_match,
756 },
757};
758
759module_platform_driver(rk3x_i2c_driver);
760
761MODULE_DESCRIPTION("Rockchip RK3xxx I2C Bus driver");
762MODULE_AUTHOR("Max Schwarz <max.schwarz@online.de>");
763MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/busses/i2c-sun6i-p2wi.c b/drivers/i2c/busses/i2c-sun6i-p2wi.c
new file mode 100644
index 000000000000..09de4fd12d57
--- /dev/null
+++ b/drivers/i2c/busses/i2c-sun6i-p2wi.c
@@ -0,0 +1,345 @@
1/*
2 * P2WI (Push-Pull Two Wire Interface) bus driver.
3 *
4 * Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
5 *
6 * This file is licensed under the terms of the GNU General Public License
7 * version 2. This program is licensed "as is" without any warranty of any
8 * kind, whether express or implied.
9 *
10 * The P2WI controller looks like an SMBus controller which only supports byte
11 * data transfers. But, it differs from standard SMBus protocol on several
12 * aspects:
13 * - it supports only one slave device, and thus drop the address field
14 * - it adds a parity bit every 8bits of data
15 * - only one read access is required to read a byte (instead of a write
16 * followed by a read access in standard SMBus protocol)
17 * - there's no Ack bit after each byte transfer
18 *
19 * This means this bus cannot be used to interface with standard SMBus
20 * devices (the only known device to support this interface is the AXP221
21 * PMIC).
22 *
23 */
24#include <linux/clk.h>
25#include <linux/module.h>
26#include <linux/i2c.h>
27#include <linux/io.h>
28#include <linux/interrupt.h>
29#include <linux/module.h>
30#include <linux/of.h>
31#include <linux/platform_device.h>
32#include <linux/reset.h>
33
34
35/* P2WI registers */
36#define P2WI_CTRL 0x0
37#define P2WI_CCR 0x4
38#define P2WI_INTE 0x8
39#define P2WI_INTS 0xc
40#define P2WI_DADDR0 0x10
41#define P2WI_DADDR1 0x14
42#define P2WI_DLEN 0x18
43#define P2WI_DATA0 0x1c
44#define P2WI_DATA1 0x20
45#define P2WI_LCR 0x24
46#define P2WI_PMCR 0x28
47
48/* CTRL fields */
49#define P2WI_CTRL_START_TRANS BIT(7)
50#define P2WI_CTRL_ABORT_TRANS BIT(6)
51#define P2WI_CTRL_GLOBAL_INT_ENB BIT(1)
52#define P2WI_CTRL_SOFT_RST BIT(0)
53
54/* CLK CTRL fields */
55#define P2WI_CCR_SDA_OUT_DELAY(v) (((v) & 0x7) << 8)
56#define P2WI_CCR_MAX_CLK_DIV 0xff
57#define P2WI_CCR_CLK_DIV(v) ((v) & P2WI_CCR_MAX_CLK_DIV)
58
59/* STATUS fields */
60#define P2WI_INTS_TRANS_ERR_ID(v) (((v) >> 8) & 0xff)
61#define P2WI_INTS_LOAD_BSY BIT(2)
62#define P2WI_INTS_TRANS_ERR BIT(1)
63#define P2WI_INTS_TRANS_OVER BIT(0)
64
65/* DATA LENGTH fields*/
66#define P2WI_DLEN_READ BIT(4)
67#define P2WI_DLEN_DATA_LENGTH(v) ((v - 1) & 0x7)
68
69/* LINE CTRL fields*/
70#define P2WI_LCR_SCL_STATE BIT(5)
71#define P2WI_LCR_SDA_STATE BIT(4)
72#define P2WI_LCR_SCL_CTL BIT(3)
73#define P2WI_LCR_SCL_CTL_EN BIT(2)
74#define P2WI_LCR_SDA_CTL BIT(1)
75#define P2WI_LCR_SDA_CTL_EN BIT(0)
76
77/* PMU MODE CTRL fields */
78#define P2WI_PMCR_PMU_INIT_SEND BIT(31)
79#define P2WI_PMCR_PMU_INIT_DATA(v) (((v) & 0xff) << 16)
80#define P2WI_PMCR_PMU_MODE_REG(v) (((v) & 0xff) << 8)
81#define P2WI_PMCR_PMU_DEV_ADDR(v) ((v) & 0xff)
82
83#define P2WI_MAX_FREQ 6000000
84
85struct p2wi {
86 struct i2c_adapter adapter;
87 struct completion complete;
88 unsigned int status;
89 void __iomem *regs;
90 struct clk *clk;
91 struct reset_control *rstc;
92 int slave_addr;
93};
94
95static irqreturn_t p2wi_interrupt(int irq, void *dev_id)
96{
97 struct p2wi *p2wi = dev_id;
98 unsigned long status;
99
100 status = readl(p2wi->regs + P2WI_INTS);
101 p2wi->status = status;
102
103 /* Clear interrupts */
104 status &= (P2WI_INTS_LOAD_BSY | P2WI_INTS_TRANS_ERR |
105 P2WI_INTS_TRANS_OVER);
106 writel(status, p2wi->regs + P2WI_INTS);
107
108 complete(&p2wi->complete);
109
110 return IRQ_HANDLED;
111}
112
113static u32 p2wi_functionality(struct i2c_adapter *adap)
114{
115 return I2C_FUNC_SMBUS_BYTE_DATA;
116}
117
118static int p2wi_smbus_xfer(struct i2c_adapter *adap, u16 addr,
119 unsigned short flags, char read_write,
120 u8 command, int size, union i2c_smbus_data *data)
121{
122 struct p2wi *p2wi = i2c_get_adapdata(adap);
123 unsigned long dlen = P2WI_DLEN_DATA_LENGTH(1);
124
125 if (p2wi->slave_addr >= 0 && addr != p2wi->slave_addr) {
126 dev_err(&adap->dev, "invalid P2WI address\n");
127 return -EINVAL;
128 }
129
130 if (!data)
131 return -EINVAL;
132
133 writel(command, p2wi->regs + P2WI_DADDR0);
134
135 if (read_write == I2C_SMBUS_READ)
136 dlen |= P2WI_DLEN_READ;
137 else
138 writel(data->byte, p2wi->regs + P2WI_DATA0);
139
140 writel(dlen, p2wi->regs + P2WI_DLEN);
141
142 if (readl(p2wi->regs + P2WI_CTRL) & P2WI_CTRL_START_TRANS) {
143 dev_err(&adap->dev, "P2WI bus busy\n");
144 return -EBUSY;
145 }
146
147 reinit_completion(&p2wi->complete);
148
149 writel(P2WI_INTS_LOAD_BSY | P2WI_INTS_TRANS_ERR | P2WI_INTS_TRANS_OVER,
150 p2wi->regs + P2WI_INTE);
151
152 writel(P2WI_CTRL_START_TRANS | P2WI_CTRL_GLOBAL_INT_ENB,
153 p2wi->regs + P2WI_CTRL);
154
155 wait_for_completion(&p2wi->complete);
156
157 if (p2wi->status & P2WI_INTS_LOAD_BSY) {
158 dev_err(&adap->dev, "P2WI bus busy\n");
159 return -EBUSY;
160 }
161
162 if (p2wi->status & P2WI_INTS_TRANS_ERR) {
163 dev_err(&adap->dev, "P2WI bus xfer error\n");
164 return -ENXIO;
165 }
166
167 if (read_write == I2C_SMBUS_READ)
168 data->byte = readl(p2wi->regs + P2WI_DATA0);
169
170 return 0;
171}
172
173static const struct i2c_algorithm p2wi_algo = {
174 .smbus_xfer = p2wi_smbus_xfer,
175 .functionality = p2wi_functionality,
176};
177
178static const struct of_device_id p2wi_of_match_table[] = {
179 { .compatible = "allwinner,sun6i-a31-p2wi" },
180 {}
181};
182MODULE_DEVICE_TABLE(of, p2wi_of_match_table);
183
184static int p2wi_probe(struct platform_device *pdev)
185{
186 struct device *dev = &pdev->dev;
187 struct device_node *np = dev->of_node;
188 struct device_node *childnp;
189 unsigned long parent_clk_freq;
190 u32 clk_freq = 100000;
191 struct resource *r;
192 struct p2wi *p2wi;
193 u32 slave_addr;
194 int clk_div;
195 int irq;
196 int ret;
197
198 of_property_read_u32(np, "clock-frequency", &clk_freq);
199 if (clk_freq > P2WI_MAX_FREQ) {
200 dev_err(dev,
201 "required clock-frequency (%u Hz) is too high (max = 6MHz)",
202 clk_freq);
203 return -EINVAL;
204 }
205
206 if (of_get_child_count(np) > 1) {
207 dev_err(dev, "P2WI only supports one slave device\n");
208 return -EINVAL;
209 }
210
211 p2wi = devm_kzalloc(dev, sizeof(struct p2wi), GFP_KERNEL);
212 if (!p2wi)
213 return -ENOMEM;
214
215 p2wi->slave_addr = -1;
216
217 /*
218 * Authorize a p2wi node without any children to be able to use an
219 * i2c-dev from userpace.
220 * In this case the slave_addr is set to -1 and won't be checked when
221 * launching a P2WI transfer.
222 */
223 childnp = of_get_next_available_child(np, NULL);
224 if (childnp) {
225 ret = of_property_read_u32(childnp, "reg", &slave_addr);
226 if (ret) {
227 dev_err(dev, "invalid slave address on node %s\n",
228 childnp->full_name);
229 return -EINVAL;
230 }
231
232 p2wi->slave_addr = slave_addr;
233 }
234
235 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
236 p2wi->regs = devm_ioremap_resource(dev, r);
237 if (IS_ERR(p2wi->regs))
238 return PTR_ERR(p2wi->regs);
239
240 strlcpy(p2wi->adapter.name, pdev->name, sizeof(p2wi->adapter.name));
241 irq = platform_get_irq(pdev, 0);
242 if (irq < 0) {
243 dev_err(dev, "failed to retrieve irq: %d\n", irq);
244 return irq;
245 }
246
247 p2wi->clk = devm_clk_get(dev, NULL);
248 if (IS_ERR(p2wi->clk)) {
249 ret = PTR_ERR(p2wi->clk);
250 dev_err(dev, "failed to retrieve clk: %d\n", ret);
251 return ret;
252 }
253
254 ret = clk_prepare_enable(p2wi->clk);
255 if (ret) {
256 dev_err(dev, "failed to enable clk: %d\n", ret);
257 return ret;
258 }
259
260 parent_clk_freq = clk_get_rate(p2wi->clk);
261
262 p2wi->rstc = devm_reset_control_get(dev, NULL);
263 if (IS_ERR(p2wi->rstc)) {
264 ret = PTR_ERR(p2wi->rstc);
265 dev_err(dev, "failed to retrieve reset controller: %d\n", ret);
266 goto err_clk_disable;
267 }
268
269 ret = reset_control_deassert(p2wi->rstc);
270 if (ret) {
271 dev_err(dev, "failed to deassert reset line: %d\n", ret);
272 goto err_clk_disable;
273 }
274
275 init_completion(&p2wi->complete);
276 p2wi->adapter.dev.parent = dev;
277 p2wi->adapter.algo = &p2wi_algo;
278 p2wi->adapter.owner = THIS_MODULE;
279 p2wi->adapter.dev.of_node = pdev->dev.of_node;
280 platform_set_drvdata(pdev, p2wi);
281 i2c_set_adapdata(&p2wi->adapter, p2wi);
282
283 ret = devm_request_irq(dev, irq, p2wi_interrupt, 0, pdev->name, p2wi);
284 if (ret) {
285 dev_err(dev, "can't register interrupt handler irq%d: %d\n",
286 irq, ret);
287 goto err_reset_assert;
288 }
289
290 writel(P2WI_CTRL_SOFT_RST, p2wi->regs + P2WI_CTRL);
291
292 clk_div = parent_clk_freq / clk_freq;
293 if (!clk_div) {
294 dev_warn(dev,
295 "clock-frequency is too high, setting it to %lu Hz\n",
296 parent_clk_freq);
297 clk_div = 1;
298 } else if (clk_div > P2WI_CCR_MAX_CLK_DIV) {
299 dev_warn(dev,
300 "clock-frequency is too low, setting it to %lu Hz\n",
301 parent_clk_freq / P2WI_CCR_MAX_CLK_DIV);
302 clk_div = P2WI_CCR_MAX_CLK_DIV;
303 }
304
305 writel(P2WI_CCR_SDA_OUT_DELAY(1) | P2WI_CCR_CLK_DIV(clk_div),
306 p2wi->regs + P2WI_CCR);
307
308 ret = i2c_add_adapter(&p2wi->adapter);
309 if (!ret)
310 return 0;
311
312err_reset_assert:
313 reset_control_assert(p2wi->rstc);
314
315err_clk_disable:
316 clk_disable_unprepare(p2wi->clk);
317
318 return ret;
319}
320
321static int p2wi_remove(struct platform_device *dev)
322{
323 struct p2wi *p2wi = platform_get_drvdata(dev);
324
325 reset_control_assert(p2wi->rstc);
326 clk_disable_unprepare(p2wi->clk);
327 i2c_del_adapter(&p2wi->adapter);
328
329 return 0;
330}
331
332static struct platform_driver p2wi_driver = {
333 .probe = p2wi_probe,
334 .remove = p2wi_remove,
335 .driver = {
336 .owner = THIS_MODULE,
337 .name = "i2c-sunxi-p2wi",
338 .of_match_table = p2wi_of_match_table,
339 },
340};
341module_platform_driver(p2wi_driver);
342
343MODULE_AUTHOR("Boris BREZILLON <boris.brezillon@free-electrons.com>");
344MODULE_DESCRIPTION("Allwinner P2WI driver");
345MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
index 3b5bacd4d8da..2b6a9ce9927c 100644
--- a/drivers/iio/adc/at91_adc.c
+++ b/drivers/iio/adc/at91_adc.c
@@ -510,12 +510,11 @@ static int at91_adc_channel_init(struct iio_dev *idev)
510 return idev->num_channels; 510 return idev->num_channels;
511} 511}
512 512
513static u8 at91_adc_get_trigger_value_by_name(struct iio_dev *idev, 513static int at91_adc_get_trigger_value_by_name(struct iio_dev *idev,
514 struct at91_adc_trigger *triggers, 514 struct at91_adc_trigger *triggers,
515 const char *trigger_name) 515 const char *trigger_name)
516{ 516{
517 struct at91_adc_state *st = iio_priv(idev); 517 struct at91_adc_state *st = iio_priv(idev);
518 u8 value = 0;
519 int i; 518 int i;
520 519
521 for (i = 0; i < st->trigger_number; i++) { 520 for (i = 0; i < st->trigger_number; i++) {
@@ -528,15 +527,16 @@ static u8 at91_adc_get_trigger_value_by_name(struct iio_dev *idev,
528 return -ENOMEM; 527 return -ENOMEM;
529 528
530 if (strcmp(trigger_name, name) == 0) { 529 if (strcmp(trigger_name, name) == 0) {
531 value = triggers[i].value;
532 kfree(name); 530 kfree(name);
533 break; 531 if (triggers[i].value == 0)
532 return -EINVAL;
533 return triggers[i].value;
534 } 534 }
535 535
536 kfree(name); 536 kfree(name);
537 } 537 }
538 538
539 return value; 539 return -EINVAL;
540} 540}
541 541
542static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state) 542static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
@@ -546,14 +546,14 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
546 struct iio_buffer *buffer = idev->buffer; 546 struct iio_buffer *buffer = idev->buffer;
547 struct at91_adc_reg_desc *reg = st->registers; 547 struct at91_adc_reg_desc *reg = st->registers;
548 u32 status = at91_adc_readl(st, reg->trigger_register); 548 u32 status = at91_adc_readl(st, reg->trigger_register);
549 u8 value; 549 int value;
550 u8 bit; 550 u8 bit;
551 551
552 value = at91_adc_get_trigger_value_by_name(idev, 552 value = at91_adc_get_trigger_value_by_name(idev,
553 st->trigger_list, 553 st->trigger_list,
554 idev->trig->name); 554 idev->trig->name);
555 if (value == 0) 555 if (value < 0)
556 return -EINVAL; 556 return value;
557 557
558 if (state) { 558 if (state) {
559 st->buffer = kmalloc(idev->scan_bytes, GFP_KERNEL); 559 st->buffer = kmalloc(idev->scan_bytes, GFP_KERNEL);
diff --git a/drivers/iio/adc/men_z188_adc.c b/drivers/iio/adc/men_z188_adc.c
index 6989c16aec2b..b58d6302521f 100644
--- a/drivers/iio/adc/men_z188_adc.c
+++ b/drivers/iio/adc/men_z188_adc.c
@@ -121,8 +121,8 @@ static int men_z188_probe(struct mcb_device *dev,
121 indio_dev->num_channels = ARRAY_SIZE(z188_adc_iio_channels); 121 indio_dev->num_channels = ARRAY_SIZE(z188_adc_iio_channels);
122 122
123 mem = mcb_request_mem(dev, "z188-adc"); 123 mem = mcb_request_mem(dev, "z188-adc");
124 if (!mem) 124 if (IS_ERR(mem))
125 return -ENOMEM; 125 return PTR_ERR(mem);
126 126
127 adc->base = ioremap(mem->start, resource_size(mem)); 127 adc->base = ioremap(mem->start, resource_size(mem));
128 if (adc->base == NULL) 128 if (adc->base == NULL)
diff --git a/drivers/iio/adc/twl4030-madc.c b/drivers/iio/adc/twl4030-madc.c
index 7de1c4c87942..eb86786e698e 100644
--- a/drivers/iio/adc/twl4030-madc.c
+++ b/drivers/iio/adc/twl4030-madc.c
@@ -645,6 +645,7 @@ int twl4030_get_madc_conversion(int channel_no)
645 req.channels = (1 << channel_no); 645 req.channels = (1 << channel_no);
646 req.method = TWL4030_MADC_SW2; 646 req.method = TWL4030_MADC_SW2;
647 req.active = 0; 647 req.active = 0;
648 req.raw = 0;
648 req.func_cb = NULL; 649 req.func_cb = NULL;
649 ret = twl4030_madc_conversion(&req); 650 ret = twl4030_madc_conversion(&req);
650 if (ret < 0) 651 if (ret < 0)
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
index 73282cee0c81..a3109a6f4d86 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
@@ -75,6 +75,9 @@ int hid_sensor_power_state(struct hid_sensor_common *st, bool state)
75 (s32)report_val); 75 (s32)report_val);
76 } 76 }
77 77
78 sensor_hub_get_feature(st->hsdev, st->power_state.report_id,
79 st->power_state.index,
80 &state_val);
78 return 0; 81 return 0;
79} 82}
80EXPORT_SYMBOL(hid_sensor_power_state); 83EXPORT_SYMBOL(hid_sensor_power_state);
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
index 09ea5c481f4c..ea08313af0d2 100644
--- a/drivers/iio/magnetometer/ak8975.c
+++ b/drivers/iio/magnetometer/ak8975.c
@@ -373,8 +373,6 @@ static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val)
373{ 373{
374 struct ak8975_data *data = iio_priv(indio_dev); 374 struct ak8975_data *data = iio_priv(indio_dev);
375 struct i2c_client *client = data->client; 375 struct i2c_client *client = data->client;
376 u16 meas_reg;
377 s16 raw;
378 int ret; 376 int ret;
379 377
380 mutex_lock(&data->lock); 378 mutex_lock(&data->lock);
@@ -422,16 +420,11 @@ static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val)
422 dev_err(&client->dev, "Read axis data fails\n"); 420 dev_err(&client->dev, "Read axis data fails\n");
423 goto exit; 421 goto exit;
424 } 422 }
425 meas_reg = ret;
426 423
427 mutex_unlock(&data->lock); 424 mutex_unlock(&data->lock);
428 425
429 /* Endian conversion of the measured values. */
430 raw = (s16) (le16_to_cpu(meas_reg));
431
432 /* Clamp to valid range. */ 426 /* Clamp to valid range. */
433 raw = clamp_t(s16, raw, -4096, 4095); 427 *val = clamp_t(s16, ret, -4096, 4095);
434 *val = raw;
435 return IIO_VAL_INT; 428 return IIO_VAL_INT;
436 429
437exit: 430exit:
diff --git a/drivers/iio/pressure/mpl3115.c b/drivers/iio/pressure/mpl3115.c
index ba6d0c520e63..01b2e0b18878 100644
--- a/drivers/iio/pressure/mpl3115.c
+++ b/drivers/iio/pressure/mpl3115.c
@@ -98,7 +98,7 @@ static int mpl3115_read_raw(struct iio_dev *indio_dev,
98 mutex_unlock(&data->lock); 98 mutex_unlock(&data->lock);
99 if (ret < 0) 99 if (ret < 0)
100 return ret; 100 return ret;
101 *val = sign_extend32(be32_to_cpu(tmp) >> 12, 23); 101 *val = be32_to_cpu(tmp) >> 12;
102 return IIO_VAL_INT; 102 return IIO_VAL_INT;
103 case IIO_TEMP: /* in 0.0625 celsius / LSB */ 103 case IIO_TEMP: /* in 0.0625 celsius / LSB */
104 mutex_lock(&data->lock); 104 mutex_lock(&data->lock);
@@ -112,7 +112,7 @@ static int mpl3115_read_raw(struct iio_dev *indio_dev,
112 mutex_unlock(&data->lock); 112 mutex_unlock(&data->lock);
113 if (ret < 0) 113 if (ret < 0)
114 return ret; 114 return ret;
115 *val = sign_extend32(be32_to_cpu(tmp) >> 20, 15); 115 *val = sign_extend32(be32_to_cpu(tmp) >> 20, 11);
116 return IIO_VAL_INT; 116 return IIO_VAL_INT;
117 default: 117 default:
118 return -EINVAL; 118 return -EINVAL;
@@ -185,7 +185,7 @@ static const struct iio_chan_spec mpl3115_channels[] = {
185 BIT(IIO_CHAN_INFO_SCALE), 185 BIT(IIO_CHAN_INFO_SCALE),
186 .scan_index = 0, 186 .scan_index = 0,
187 .scan_type = { 187 .scan_type = {
188 .sign = 's', 188 .sign = 'u',
189 .realbits = 20, 189 .realbits = 20,
190 .storagebits = 32, 190 .storagebits = 32,
191 .shift = 12, 191 .shift = 12,
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 20f1655e6d75..8108c698b548 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -93,7 +93,9 @@ config VIDEO_M32R_AR_M64278
93 93
94config VIDEO_OMAP3 94config VIDEO_OMAP3
95 tristate "OMAP 3 Camera support" 95 tristate "OMAP 3 Camera support"
96 depends on OMAP_IOVMM && VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API && ARCH_OMAP3 96 depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API && ARCH_OMAP3
97 select ARM_DMA_USE_IOMMU
98 select OMAP_IOMMU
97 ---help--- 99 ---help---
98 Driver for an OMAP 3 camera controller. 100 Driver for an OMAP 3 camera controller.
99 101
diff --git a/drivers/media/platform/omap3isp/Makefile b/drivers/media/platform/omap3isp/Makefile
index e8847e79e31a..254975a9174e 100644
--- a/drivers/media/platform/omap3isp/Makefile
+++ b/drivers/media/platform/omap3isp/Makefile
@@ -3,7 +3,7 @@
3ccflags-$(CONFIG_VIDEO_OMAP3_DEBUG) += -DDEBUG 3ccflags-$(CONFIG_VIDEO_OMAP3_DEBUG) += -DDEBUG
4 4
5omap3-isp-objs += \ 5omap3-isp-objs += \
6 isp.o ispqueue.o ispvideo.o \ 6 isp.o ispvideo.o \
7 ispcsiphy.o ispccp2.o ispcsi2.o \ 7 ispcsiphy.o ispccp2.o ispcsi2.o \
8 ispccdc.o isppreview.o ispresizer.o \ 8 ispccdc.o isppreview.o ispresizer.o \
9 ispstat.o isph3a_aewb.o isph3a_af.o isphist.o 9 ispstat.o isph3a_aewb.o isph3a_af.o isphist.o
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index 06a0df434249..2c7aa6720569 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -69,6 +69,8 @@
69#include <linux/sched.h> 69#include <linux/sched.h>
70#include <linux/vmalloc.h> 70#include <linux/vmalloc.h>
71 71
72#include <asm/dma-iommu.h>
73
72#include <media/v4l2-common.h> 74#include <media/v4l2-common.h>
73#include <media/v4l2-device.h> 75#include <media/v4l2-device.h>
74 76
@@ -1397,14 +1399,14 @@ int omap3isp_module_sync_idle(struct media_entity *me, wait_queue_head_t *wait,
1397 if (isp_pipeline_is_last(me)) { 1399 if (isp_pipeline_is_last(me)) {
1398 struct isp_video *video = pipe->output; 1400 struct isp_video *video = pipe->output;
1399 unsigned long flags; 1401 unsigned long flags;
1400 spin_lock_irqsave(&video->queue->irqlock, flags); 1402 spin_lock_irqsave(&video->irqlock, flags);
1401 if (video->dmaqueue_flags & ISP_VIDEO_DMAQUEUE_UNDERRUN) { 1403 if (video->dmaqueue_flags & ISP_VIDEO_DMAQUEUE_UNDERRUN) {
1402 spin_unlock_irqrestore(&video->queue->irqlock, flags); 1404 spin_unlock_irqrestore(&video->irqlock, flags);
1403 atomic_set(stopping, 0); 1405 atomic_set(stopping, 0);
1404 smp_mb(); 1406 smp_mb();
1405 return 0; 1407 return 0;
1406 } 1408 }
1407 spin_unlock_irqrestore(&video->queue->irqlock, flags); 1409 spin_unlock_irqrestore(&video->irqlock, flags);
1408 if (!wait_event_timeout(*wait, !atomic_read(stopping), 1410 if (!wait_event_timeout(*wait, !atomic_read(stopping),
1409 msecs_to_jiffies(1000))) { 1411 msecs_to_jiffies(1000))) {
1410 atomic_set(stopping, 0); 1412 atomic_set(stopping, 0);
@@ -1625,7 +1627,7 @@ struct isp_device *omap3isp_get(struct isp_device *isp)
1625 * Decrement the reference count on the ISP. If the last reference is released, 1627 * Decrement the reference count on the ISP. If the last reference is released,
1626 * power-down all submodules, disable clocks and free temporary buffers. 1628 * power-down all submodules, disable clocks and free temporary buffers.
1627 */ 1629 */
1628void omap3isp_put(struct isp_device *isp) 1630static void __omap3isp_put(struct isp_device *isp, bool save_ctx)
1629{ 1631{
1630 if (isp == NULL) 1632 if (isp == NULL)
1631 return; 1633 return;
@@ -1634,7 +1636,7 @@ void omap3isp_put(struct isp_device *isp)
1634 BUG_ON(isp->ref_count == 0); 1636 BUG_ON(isp->ref_count == 0);
1635 if (--isp->ref_count == 0) { 1637 if (--isp->ref_count == 0) {
1636 isp_disable_interrupts(isp); 1638 isp_disable_interrupts(isp);
1637 if (isp->domain) { 1639 if (save_ctx) {
1638 isp_save_ctx(isp); 1640 isp_save_ctx(isp);
1639 isp->has_context = 1; 1641 isp->has_context = 1;
1640 } 1642 }
@@ -1648,6 +1650,11 @@ void omap3isp_put(struct isp_device *isp)
1648 mutex_unlock(&isp->isp_mutex); 1650 mutex_unlock(&isp->isp_mutex);
1649} 1651}
1650 1652
1653void omap3isp_put(struct isp_device *isp)
1654{
1655 __omap3isp_put(isp, true);
1656}
1657
1651/* -------------------------------------------------------------------------- 1658/* --------------------------------------------------------------------------
1652 * Platform device driver 1659 * Platform device driver
1653 */ 1660 */
@@ -2120,6 +2127,61 @@ error_csiphy:
2120 return ret; 2127 return ret;
2121} 2128}
2122 2129
2130static void isp_detach_iommu(struct isp_device *isp)
2131{
2132 arm_iommu_release_mapping(isp->mapping);
2133 isp->mapping = NULL;
2134 iommu_group_remove_device(isp->dev);
2135}
2136
2137static int isp_attach_iommu(struct isp_device *isp)
2138{
2139 struct dma_iommu_mapping *mapping;
2140 struct iommu_group *group;
2141 int ret;
2142
2143 /* Create a device group and add the device to it. */
2144 group = iommu_group_alloc();
2145 if (IS_ERR(group)) {
2146 dev_err(isp->dev, "failed to allocate IOMMU group\n");
2147 return PTR_ERR(group);
2148 }
2149
2150 ret = iommu_group_add_device(group, isp->dev);
2151 iommu_group_put(group);
2152
2153 if (ret < 0) {
2154 dev_err(isp->dev, "failed to add device to IPMMU group\n");
2155 return ret;
2156 }
2157
2158 /*
2159 * Create the ARM mapping, used by the ARM DMA mapping core to allocate
2160 * VAs. This will allocate a corresponding IOMMU domain.
2161 */
2162 mapping = arm_iommu_create_mapping(&platform_bus_type, SZ_1G, SZ_2G);
2163 if (IS_ERR(mapping)) {
2164 dev_err(isp->dev, "failed to create ARM IOMMU mapping\n");
2165 ret = PTR_ERR(mapping);
2166 goto error;
2167 }
2168
2169 isp->mapping = mapping;
2170
2171 /* Attach the ARM VA mapping to the device. */
2172 ret = arm_iommu_attach_device(isp->dev, mapping);
2173 if (ret < 0) {
2174 dev_err(isp->dev, "failed to attach device to VA mapping\n");
2175 goto error;
2176 }
2177
2178 return 0;
2179
2180error:
2181 isp_detach_iommu(isp);
2182 return ret;
2183}
2184
2123/* 2185/*
2124 * isp_remove - Remove ISP platform device 2186 * isp_remove - Remove ISP platform device
2125 * @pdev: Pointer to ISP platform device 2187 * @pdev: Pointer to ISP platform device
@@ -2135,10 +2197,8 @@ static int isp_remove(struct platform_device *pdev)
2135 isp_xclk_cleanup(isp); 2197 isp_xclk_cleanup(isp);
2136 2198
2137 __omap3isp_get(isp, false); 2199 __omap3isp_get(isp, false);
2138 iommu_detach_device(isp->domain, &pdev->dev); 2200 isp_detach_iommu(isp);
2139 iommu_domain_free(isp->domain); 2201 __omap3isp_put(isp, false);
2140 isp->domain = NULL;
2141 omap3isp_put(isp);
2142 2202
2143 return 0; 2203 return 0;
2144} 2204}
@@ -2265,39 +2325,32 @@ static int isp_probe(struct platform_device *pdev)
2265 } 2325 }
2266 } 2326 }
2267 2327
2268 isp->domain = iommu_domain_alloc(pdev->dev.bus); 2328 /* IOMMU */
2269 if (!isp->domain) { 2329 ret = isp_attach_iommu(isp);
2270 dev_err(isp->dev, "can't alloc iommu domain\n"); 2330 if (ret < 0) {
2271 ret = -ENOMEM; 2331 dev_err(&pdev->dev, "unable to attach to IOMMU\n");
2272 goto error_isp; 2332 goto error_isp;
2273 } 2333 }
2274 2334
2275 ret = iommu_attach_device(isp->domain, &pdev->dev);
2276 if (ret) {
2277 dev_err(&pdev->dev, "can't attach iommu device: %d\n", ret);
2278 ret = -EPROBE_DEFER;
2279 goto free_domain;
2280 }
2281
2282 /* Interrupt */ 2335 /* Interrupt */
2283 isp->irq_num = platform_get_irq(pdev, 0); 2336 isp->irq_num = platform_get_irq(pdev, 0);
2284 if (isp->irq_num <= 0) { 2337 if (isp->irq_num <= 0) {
2285 dev_err(isp->dev, "No IRQ resource\n"); 2338 dev_err(isp->dev, "No IRQ resource\n");
2286 ret = -ENODEV; 2339 ret = -ENODEV;
2287 goto detach_dev; 2340 goto error_iommu;
2288 } 2341 }
2289 2342
2290 if (devm_request_irq(isp->dev, isp->irq_num, isp_isr, IRQF_SHARED, 2343 if (devm_request_irq(isp->dev, isp->irq_num, isp_isr, IRQF_SHARED,
2291 "OMAP3 ISP", isp)) { 2344 "OMAP3 ISP", isp)) {
2292 dev_err(isp->dev, "Unable to request IRQ\n"); 2345 dev_err(isp->dev, "Unable to request IRQ\n");
2293 ret = -EINVAL; 2346 ret = -EINVAL;
2294 goto detach_dev; 2347 goto error_iommu;
2295 } 2348 }
2296 2349
2297 /* Entities */ 2350 /* Entities */
2298 ret = isp_initialize_modules(isp); 2351 ret = isp_initialize_modules(isp);
2299 if (ret < 0) 2352 if (ret < 0)
2300 goto detach_dev; 2353 goto error_iommu;
2301 2354
2302 ret = isp_register_entities(isp); 2355 ret = isp_register_entities(isp);
2303 if (ret < 0) 2356 if (ret < 0)
@@ -2310,14 +2363,11 @@ static int isp_probe(struct platform_device *pdev)
2310 2363
2311error_modules: 2364error_modules:
2312 isp_cleanup_modules(isp); 2365 isp_cleanup_modules(isp);
2313detach_dev: 2366error_iommu:
2314 iommu_detach_device(isp->domain, &pdev->dev); 2367 isp_detach_iommu(isp);
2315free_domain:
2316 iommu_domain_free(isp->domain);
2317 isp->domain = NULL;
2318error_isp: 2368error_isp:
2319 isp_xclk_cleanup(isp); 2369 isp_xclk_cleanup(isp);
2320 omap3isp_put(isp); 2370 __omap3isp_put(isp, false);
2321error: 2371error:
2322 mutex_destroy(&isp->isp_mutex); 2372 mutex_destroy(&isp->isp_mutex);
2323 2373
diff --git a/drivers/media/platform/omap3isp/isp.h b/drivers/media/platform/omap3isp/isp.h
index 6d5e69711907..2c314eea1252 100644
--- a/drivers/media/platform/omap3isp/isp.h
+++ b/drivers/media/platform/omap3isp/isp.h
@@ -45,8 +45,6 @@
45#include "ispcsi2.h" 45#include "ispcsi2.h"
46#include "ispccp2.h" 46#include "ispccp2.h"
47 47
48#define IOMMU_FLAG (IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_8)
49
50#define ISP_TOK_TERM 0xFFFFFFFF /* 48#define ISP_TOK_TERM 0xFFFFFFFF /*
51 * terminating token for ISP 49 * terminating token for ISP
52 * modules reg list 50 * modules reg list
@@ -152,6 +150,7 @@ struct isp_xclk {
152 * regions. 150 * regions.
153 * @mmio_base_phys: Array with physical L4 bus addresses for ISP register 151 * @mmio_base_phys: Array with physical L4 bus addresses for ISP register
154 * regions. 152 * regions.
153 * @mapping: IOMMU mapping
155 * @stat_lock: Spinlock for handling statistics 154 * @stat_lock: Spinlock for handling statistics
156 * @isp_mutex: Mutex for serializing requests to ISP. 155 * @isp_mutex: Mutex for serializing requests to ISP.
157 * @stop_failure: Indicates that an entity failed to stop. 156 * @stop_failure: Indicates that an entity failed to stop.
@@ -171,7 +170,6 @@ struct isp_xclk {
171 * @isp_res: Pointer to current settings for ISP Resizer. 170 * @isp_res: Pointer to current settings for ISP Resizer.
172 * @isp_prev: Pointer to current settings for ISP Preview. 171 * @isp_prev: Pointer to current settings for ISP Preview.
173 * @isp_ccdc: Pointer to current settings for ISP CCDC. 172 * @isp_ccdc: Pointer to current settings for ISP CCDC.
174 * @iommu: Pointer to requested IOMMU instance for ISP.
175 * @platform_cb: ISP driver callback function pointers for platform code 173 * @platform_cb: ISP driver callback function pointers for platform code
176 * 174 *
177 * This structure is used to store the OMAP ISP Information. 175 * This structure is used to store the OMAP ISP Information.
@@ -189,6 +187,8 @@ struct isp_device {
189 void __iomem *mmio_base[OMAP3_ISP_IOMEM_LAST]; 187 void __iomem *mmio_base[OMAP3_ISP_IOMEM_LAST];
190 unsigned long mmio_base_phys[OMAP3_ISP_IOMEM_LAST]; 188 unsigned long mmio_base_phys[OMAP3_ISP_IOMEM_LAST];
191 189
190 struct dma_iommu_mapping *mapping;
191
192 /* ISP Obj */ 192 /* ISP Obj */
193 spinlock_t stat_lock; /* common lock for statistic drivers */ 193 spinlock_t stat_lock; /* common lock for statistic drivers */
194 struct mutex isp_mutex; /* For handling ref_count field */ 194 struct mutex isp_mutex; /* For handling ref_count field */
@@ -219,8 +219,6 @@ struct isp_device {
219 219
220 unsigned int sbl_resources; 220 unsigned int sbl_resources;
221 unsigned int subclk_resources; 221 unsigned int subclk_resources;
222
223 struct iommu_domain *domain;
224}; 222};
225 223
226#define v4l2_dev_to_isp_device(dev) \ 224#define v4l2_dev_to_isp_device(dev) \
diff --git a/drivers/media/platform/omap3isp/ispccdc.c b/drivers/media/platform/omap3isp/ispccdc.c
index 4d920c800ff5..9f727d20f06d 100644
--- a/drivers/media/platform/omap3isp/ispccdc.c
+++ b/drivers/media/platform/omap3isp/ispccdc.c
@@ -30,7 +30,6 @@
30#include <linux/device.h> 30#include <linux/device.h>
31#include <linux/dma-mapping.h> 31#include <linux/dma-mapping.h>
32#include <linux/mm.h> 32#include <linux/mm.h>
33#include <linux/omap-iommu.h>
34#include <linux/sched.h> 33#include <linux/sched.h>
35#include <linux/slab.h> 34#include <linux/slab.h>
36#include <media/v4l2-event.h> 35#include <media/v4l2-event.h>
@@ -206,7 +205,8 @@ static int ccdc_lsc_validate_config(struct isp_ccdc_device *ccdc,
206 * ccdc_lsc_program_table - Program Lens Shading Compensation table address. 205 * ccdc_lsc_program_table - Program Lens Shading Compensation table address.
207 * @ccdc: Pointer to ISP CCDC device. 206 * @ccdc: Pointer to ISP CCDC device.
208 */ 207 */
209static void ccdc_lsc_program_table(struct isp_ccdc_device *ccdc, u32 addr) 208static void ccdc_lsc_program_table(struct isp_ccdc_device *ccdc,
209 dma_addr_t addr)
210{ 210{
211 isp_reg_writel(to_isp_device(ccdc), addr, 211 isp_reg_writel(to_isp_device(ccdc), addr,
212 OMAP3_ISP_IOMEM_CCDC, ISPCCDC_LSC_TABLE_BASE); 212 OMAP3_ISP_IOMEM_CCDC, ISPCCDC_LSC_TABLE_BASE);
@@ -333,7 +333,7 @@ static int __ccdc_lsc_configure(struct isp_ccdc_device *ccdc,
333 return -EBUSY; 333 return -EBUSY;
334 334
335 ccdc_lsc_setup_regs(ccdc, &req->config); 335 ccdc_lsc_setup_regs(ccdc, &req->config);
336 ccdc_lsc_program_table(ccdc, req->table); 336 ccdc_lsc_program_table(ccdc, req->table.dma);
337 return 0; 337 return 0;
338} 338}
339 339
@@ -368,11 +368,12 @@ static void ccdc_lsc_free_request(struct isp_ccdc_device *ccdc,
368 if (req == NULL) 368 if (req == NULL)
369 return; 369 return;
370 370
371 if (req->iovm) 371 if (req->table.addr) {
372 dma_unmap_sg(isp->dev, req->iovm->sgt->sgl, 372 sg_free_table(&req->table.sgt);
373 req->iovm->sgt->nents, DMA_TO_DEVICE); 373 dma_free_coherent(isp->dev, req->config.size, req->table.addr,
374 if (req->table) 374 req->table.dma);
375 omap_iommu_vfree(isp->domain, isp->dev, req->table); 375 }
376
376 kfree(req); 377 kfree(req);
377} 378}
378 379
@@ -416,7 +417,6 @@ static int ccdc_lsc_config(struct isp_ccdc_device *ccdc,
416 struct isp_device *isp = to_isp_device(ccdc); 417 struct isp_device *isp = to_isp_device(ccdc);
417 struct ispccdc_lsc_config_req *req; 418 struct ispccdc_lsc_config_req *req;
418 unsigned long flags; 419 unsigned long flags;
419 void *table;
420 u16 update; 420 u16 update;
421 int ret; 421 int ret;
422 422
@@ -444,38 +444,31 @@ static int ccdc_lsc_config(struct isp_ccdc_device *ccdc,
444 444
445 req->enable = 1; 445 req->enable = 1;
446 446
447 req->table = omap_iommu_vmalloc(isp->domain, isp->dev, 0, 447 req->table.addr = dma_alloc_coherent(isp->dev, req->config.size,
448 req->config.size, IOMMU_FLAG); 448 &req->table.dma,
449 if (IS_ERR_VALUE(req->table)) { 449 GFP_KERNEL);
450 req->table = 0; 450 if (req->table.addr == NULL) {
451 ret = -ENOMEM;
452 goto done;
453 }
454
455 req->iovm = omap_find_iovm_area(isp->dev, req->table);
456 if (req->iovm == NULL) {
457 ret = -ENOMEM; 451 ret = -ENOMEM;
458 goto done; 452 goto done;
459 } 453 }
460 454
461 if (!dma_map_sg(isp->dev, req->iovm->sgt->sgl, 455 ret = dma_get_sgtable(isp->dev, &req->table.sgt,
462 req->iovm->sgt->nents, DMA_TO_DEVICE)) { 456 req->table.addr, req->table.dma,
463 ret = -ENOMEM; 457 req->config.size);
464 req->iovm = NULL; 458 if (ret < 0)
465 goto done; 459 goto done;
466 }
467 460
468 dma_sync_sg_for_cpu(isp->dev, req->iovm->sgt->sgl, 461 dma_sync_sg_for_cpu(isp->dev, req->table.sgt.sgl,
469 req->iovm->sgt->nents, DMA_TO_DEVICE); 462 req->table.sgt.nents, DMA_TO_DEVICE);
470 463
471 table = omap_da_to_va(isp->dev, req->table); 464 if (copy_from_user(req->table.addr, config->lsc,
472 if (copy_from_user(table, config->lsc, req->config.size)) { 465 req->config.size)) {
473 ret = -EFAULT; 466 ret = -EFAULT;
474 goto done; 467 goto done;
475 } 468 }
476 469
477 dma_sync_sg_for_device(isp->dev, req->iovm->sgt->sgl, 470 dma_sync_sg_for_device(isp->dev, req->table.sgt.sgl,
478 req->iovm->sgt->nents, DMA_TO_DEVICE); 471 req->table.sgt.nents, DMA_TO_DEVICE);
479 } 472 }
480 473
481 spin_lock_irqsave(&ccdc->lsc.req_lock, flags); 474 spin_lock_irqsave(&ccdc->lsc.req_lock, flags);
@@ -584,7 +577,7 @@ static void ccdc_configure_fpc(struct isp_ccdc_device *ccdc)
584 if (!ccdc->fpc_en) 577 if (!ccdc->fpc_en)
585 return; 578 return;
586 579
587 isp_reg_writel(isp, ccdc->fpc.fpcaddr, OMAP3_ISP_IOMEM_CCDC, 580 isp_reg_writel(isp, ccdc->fpc.dma, OMAP3_ISP_IOMEM_CCDC,
588 ISPCCDC_FPC_ADDR); 581 ISPCCDC_FPC_ADDR);
589 /* The FPNUM field must be set before enabling FPC. */ 582 /* The FPNUM field must be set before enabling FPC. */
590 isp_reg_writel(isp, (ccdc->fpc.fpnum << ISPCCDC_FPC_FPNUM_SHIFT), 583 isp_reg_writel(isp, (ccdc->fpc.fpnum << ISPCCDC_FPC_FPNUM_SHIFT),
@@ -724,8 +717,9 @@ static int ccdc_config(struct isp_ccdc_device *ccdc,
724 ccdc->shadow_update = 0; 717 ccdc->shadow_update = 0;
725 718
726 if (OMAP3ISP_CCDC_FPC & ccdc_struct->update) { 719 if (OMAP3ISP_CCDC_FPC & ccdc_struct->update) {
727 u32 table_old = 0; 720 struct omap3isp_ccdc_fpc fpc;
728 u32 table_new; 721 struct ispccdc_fpc fpc_old = { .addr = NULL, };
722 struct ispccdc_fpc fpc_new;
729 u32 size; 723 u32 size;
730 724
731 if (ccdc->state != ISP_PIPELINE_STREAM_STOPPED) 725 if (ccdc->state != ISP_PIPELINE_STREAM_STOPPED)
@@ -734,35 +728,39 @@ static int ccdc_config(struct isp_ccdc_device *ccdc,
734 ccdc->fpc_en = !!(OMAP3ISP_CCDC_FPC & ccdc_struct->flag); 728 ccdc->fpc_en = !!(OMAP3ISP_CCDC_FPC & ccdc_struct->flag);
735 729
736 if (ccdc->fpc_en) { 730 if (ccdc->fpc_en) {
737 if (copy_from_user(&ccdc->fpc, ccdc_struct->fpc, 731 if (copy_from_user(&fpc, ccdc_struct->fpc, sizeof(fpc)))
738 sizeof(ccdc->fpc)))
739 return -EFAULT; 732 return -EFAULT;
740 733
734 size = fpc.fpnum * 4;
735
741 /* 736 /*
742 * table_new must be 64-bytes aligned, but it's 737 * The table address must be 64-bytes aligned, which is
743 * already done by omap_iommu_vmalloc(). 738 * guaranteed by dma_alloc_coherent().
744 */ 739 */
745 size = ccdc->fpc.fpnum * 4; 740 fpc_new.fpnum = fpc.fpnum;
746 table_new = omap_iommu_vmalloc(isp->domain, isp->dev, 741 fpc_new.addr = dma_alloc_coherent(isp->dev, size,
747 0, size, IOMMU_FLAG); 742 &fpc_new.dma,
748 if (IS_ERR_VALUE(table_new)) 743 GFP_KERNEL);
744 if (fpc_new.addr == NULL)
749 return -ENOMEM; 745 return -ENOMEM;
750 746
751 if (copy_from_user(omap_da_to_va(isp->dev, table_new), 747 if (copy_from_user(fpc_new.addr,
752 (__force void __user *) 748 (__force void __user *)fpc.fpcaddr,
753 ccdc->fpc.fpcaddr, size)) { 749 size)) {
754 omap_iommu_vfree(isp->domain, isp->dev, 750 dma_free_coherent(isp->dev, size, fpc_new.addr,
755 table_new); 751 fpc_new.dma);
756 return -EFAULT; 752 return -EFAULT;
757 } 753 }
758 754
759 table_old = ccdc->fpc.fpcaddr; 755 fpc_old = ccdc->fpc;
760 ccdc->fpc.fpcaddr = table_new; 756 ccdc->fpc = fpc_new;
761 } 757 }
762 758
763 ccdc_configure_fpc(ccdc); 759 ccdc_configure_fpc(ccdc);
764 if (table_old != 0) 760
765 omap_iommu_vfree(isp->domain, isp->dev, table_old); 761 if (fpc_old.addr != NULL)
762 dma_free_coherent(isp->dev, fpc_old.fpnum * 4,
763 fpc_old.addr, fpc_old.dma);
766 } 764 }
767 765
768 return ccdc_lsc_config(ccdc, ccdc_struct); 766 return ccdc_lsc_config(ccdc, ccdc_struct);
@@ -1523,7 +1521,7 @@ static int ccdc_isr_buffer(struct isp_ccdc_device *ccdc)
1523 1521
1524 buffer = omap3isp_video_buffer_next(&ccdc->video_out); 1522 buffer = omap3isp_video_buffer_next(&ccdc->video_out);
1525 if (buffer != NULL) { 1523 if (buffer != NULL) {
1526 ccdc_set_outaddr(ccdc, buffer->isp_addr); 1524 ccdc_set_outaddr(ccdc, buffer->dma);
1527 restart = 1; 1525 restart = 1;
1528 } 1526 }
1529 1527
@@ -1662,7 +1660,7 @@ static int ccdc_video_queue(struct isp_video *video, struct isp_buffer *buffer)
1662 if (!(ccdc->output & CCDC_OUTPUT_MEMORY)) 1660 if (!(ccdc->output & CCDC_OUTPUT_MEMORY))
1663 return -ENODEV; 1661 return -ENODEV;
1664 1662
1665 ccdc_set_outaddr(ccdc, buffer->isp_addr); 1663 ccdc_set_outaddr(ccdc, buffer->dma);
1666 1664
1667 /* We now have a buffer queued on the output, restart the pipeline 1665 /* We now have a buffer queued on the output, restart the pipeline
1668 * on the next CCDC interrupt if running in continuous mode (or when 1666 * on the next CCDC interrupt if running in continuous mode (or when
@@ -2580,8 +2578,9 @@ void omap3isp_ccdc_cleanup(struct isp_device *isp)
2580 cancel_work_sync(&ccdc->lsc.table_work); 2578 cancel_work_sync(&ccdc->lsc.table_work);
2581 ccdc_lsc_free_queue(ccdc, &ccdc->lsc.free_queue); 2579 ccdc_lsc_free_queue(ccdc, &ccdc->lsc.free_queue);
2582 2580
2583 if (ccdc->fpc.fpcaddr != 0) 2581 if (ccdc->fpc.addr != NULL)
2584 omap_iommu_vfree(isp->domain, isp->dev, ccdc->fpc.fpcaddr); 2582 dma_free_coherent(isp->dev, ccdc->fpc.fpnum * 4, ccdc->fpc.addr,
2583 ccdc->fpc.dma);
2585 2584
2586 mutex_destroy(&ccdc->ioctl_lock); 2585 mutex_destroy(&ccdc->ioctl_lock);
2587} 2586}
diff --git a/drivers/media/platform/omap3isp/ispccdc.h b/drivers/media/platform/omap3isp/ispccdc.h
index 9d24e4107864..f65061602c71 100644
--- a/drivers/media/platform/omap3isp/ispccdc.h
+++ b/drivers/media/platform/omap3isp/ispccdc.h
@@ -46,6 +46,12 @@ enum ccdc_input_entity {
46 46
47#define OMAP3ISP_CCDC_NEVENTS 16 47#define OMAP3ISP_CCDC_NEVENTS 16
48 48
49struct ispccdc_fpc {
50 void *addr;
51 dma_addr_t dma;
52 unsigned int fpnum;
53};
54
49enum ispccdc_lsc_state { 55enum ispccdc_lsc_state {
50 LSC_STATE_STOPPED = 0, 56 LSC_STATE_STOPPED = 0,
51 LSC_STATE_STOPPING = 1, 57 LSC_STATE_STOPPING = 1,
@@ -57,8 +63,12 @@ struct ispccdc_lsc_config_req {
57 struct list_head list; 63 struct list_head list;
58 struct omap3isp_ccdc_lsc_config config; 64 struct omap3isp_ccdc_lsc_config config;
59 unsigned char enable; 65 unsigned char enable;
60 u32 table; 66
61 struct iovm_struct *iovm; 67 struct {
68 void *addr;
69 dma_addr_t dma;
70 struct sg_table sgt;
71 } table;
62}; 72};
63 73
64/* 74/*
@@ -136,7 +146,7 @@ struct isp_ccdc_device {
136 fpc_en:1; 146 fpc_en:1;
137 struct omap3isp_ccdc_blcomp blcomp; 147 struct omap3isp_ccdc_blcomp blcomp;
138 struct omap3isp_ccdc_bclamp clamp; 148 struct omap3isp_ccdc_bclamp clamp;
139 struct omap3isp_ccdc_fpc fpc; 149 struct ispccdc_fpc fpc;
140 struct ispccdc_lsc lsc; 150 struct ispccdc_lsc lsc;
141 unsigned int update; 151 unsigned int update;
142 unsigned int shadow_update; 152 unsigned int shadow_update;
diff --git a/drivers/media/platform/omap3isp/ispccp2.c b/drivers/media/platform/omap3isp/ispccp2.c
index b30b67d22a58..f3801db9095c 100644
--- a/drivers/media/platform/omap3isp/ispccp2.c
+++ b/drivers/media/platform/omap3isp/ispccp2.c
@@ -549,7 +549,7 @@ static void ccp2_isr_buffer(struct isp_ccp2_device *ccp2)
549 549
550 buffer = omap3isp_video_buffer_next(&ccp2->video_in); 550 buffer = omap3isp_video_buffer_next(&ccp2->video_in);
551 if (buffer != NULL) 551 if (buffer != NULL)
552 ccp2_set_inaddr(ccp2, buffer->isp_addr); 552 ccp2_set_inaddr(ccp2, buffer->dma);
553 553
554 pipe->state |= ISP_PIPELINE_IDLE_INPUT; 554 pipe->state |= ISP_PIPELINE_IDLE_INPUT;
555 555
@@ -940,7 +940,7 @@ static int ccp2_video_queue(struct isp_video *video, struct isp_buffer *buffer)
940{ 940{
941 struct isp_ccp2_device *ccp2 = &video->isp->isp_ccp2; 941 struct isp_ccp2_device *ccp2 = &video->isp->isp_ccp2;
942 942
943 ccp2_set_inaddr(ccp2, buffer->isp_addr); 943 ccp2_set_inaddr(ccp2, buffer->dma);
944 return 0; 944 return 0;
945} 945}
946 946
diff --git a/drivers/media/platform/omap3isp/ispcsi2.c b/drivers/media/platform/omap3isp/ispcsi2.c
index 620560828a48..5a2e47e58b84 100644
--- a/drivers/media/platform/omap3isp/ispcsi2.c
+++ b/drivers/media/platform/omap3isp/ispcsi2.c
@@ -695,7 +695,7 @@ static void csi2_isr_buffer(struct isp_csi2_device *csi2)
695 if (buffer == NULL) 695 if (buffer == NULL)
696 return; 696 return;
697 697
698 csi2_set_outaddr(csi2, buffer->isp_addr); 698 csi2_set_outaddr(csi2, buffer->dma);
699 csi2_ctx_enable(isp, csi2, 0, 1); 699 csi2_ctx_enable(isp, csi2, 0, 1);
700} 700}
701 701
@@ -812,7 +812,7 @@ static int csi2_queue(struct isp_video *video, struct isp_buffer *buffer)
812 struct isp_device *isp = video->isp; 812 struct isp_device *isp = video->isp;
813 struct isp_csi2_device *csi2 = &isp->isp_csi2a; 813 struct isp_csi2_device *csi2 = &isp->isp_csi2a;
814 814
815 csi2_set_outaddr(csi2, buffer->isp_addr); 815 csi2_set_outaddr(csi2, buffer->dma);
816 816
817 /* 817 /*
818 * If streaming was enabled before there was a buffer queued 818 * If streaming was enabled before there was a buffer queued
diff --git a/drivers/media/platform/omap3isp/isph3a_aewb.c b/drivers/media/platform/omap3isp/isph3a_aewb.c
index 75fd82b152ba..d6811ce263eb 100644
--- a/drivers/media/platform/omap3isp/isph3a_aewb.c
+++ b/drivers/media/platform/omap3isp/isph3a_aewb.c
@@ -47,7 +47,7 @@ static void h3a_aewb_setup_regs(struct ispstat *aewb, void *priv)
47 if (aewb->state == ISPSTAT_DISABLED) 47 if (aewb->state == ISPSTAT_DISABLED)
48 return; 48 return;
49 49
50 isp_reg_writel(aewb->isp, aewb->active_buf->iommu_addr, 50 isp_reg_writel(aewb->isp, aewb->active_buf->dma_addr,
51 OMAP3_ISP_IOMEM_H3A, ISPH3A_AEWBUFST); 51 OMAP3_ISP_IOMEM_H3A, ISPH3A_AEWBUFST);
52 52
53 if (!aewb->update) 53 if (!aewb->update)
diff --git a/drivers/media/platform/omap3isp/isph3a_af.c b/drivers/media/platform/omap3isp/isph3a_af.c
index a0bf5af32438..6fc960cd30f5 100644
--- a/drivers/media/platform/omap3isp/isph3a_af.c
+++ b/drivers/media/platform/omap3isp/isph3a_af.c
@@ -51,7 +51,7 @@ static void h3a_af_setup_regs(struct ispstat *af, void *priv)
51 if (af->state == ISPSTAT_DISABLED) 51 if (af->state == ISPSTAT_DISABLED)
52 return; 52 return;
53 53
54 isp_reg_writel(af->isp, af->active_buf->iommu_addr, OMAP3_ISP_IOMEM_H3A, 54 isp_reg_writel(af->isp, af->active_buf->dma_addr, OMAP3_ISP_IOMEM_H3A,
55 ISPH3A_AFBUFST); 55 ISPH3A_AFBUFST);
56 56
57 if (!af->update) 57 if (!af->update)
diff --git a/drivers/media/platform/omap3isp/isppreview.c b/drivers/media/platform/omap3isp/isppreview.c
index 395b2b068c75..720809b07e75 100644
--- a/drivers/media/platform/omap3isp/isppreview.c
+++ b/drivers/media/platform/omap3isp/isppreview.c
@@ -1499,14 +1499,14 @@ static void preview_isr_buffer(struct isp_prev_device *prev)
1499 if (prev->input == PREVIEW_INPUT_MEMORY) { 1499 if (prev->input == PREVIEW_INPUT_MEMORY) {
1500 buffer = omap3isp_video_buffer_next(&prev->video_in); 1500 buffer = omap3isp_video_buffer_next(&prev->video_in);
1501 if (buffer != NULL) 1501 if (buffer != NULL)
1502 preview_set_inaddr(prev, buffer->isp_addr); 1502 preview_set_inaddr(prev, buffer->dma);
1503 pipe->state |= ISP_PIPELINE_IDLE_INPUT; 1503 pipe->state |= ISP_PIPELINE_IDLE_INPUT;
1504 } 1504 }
1505 1505
1506 if (prev->output & PREVIEW_OUTPUT_MEMORY) { 1506 if (prev->output & PREVIEW_OUTPUT_MEMORY) {
1507 buffer = omap3isp_video_buffer_next(&prev->video_out); 1507 buffer = omap3isp_video_buffer_next(&prev->video_out);
1508 if (buffer != NULL) { 1508 if (buffer != NULL) {
1509 preview_set_outaddr(prev, buffer->isp_addr); 1509 preview_set_outaddr(prev, buffer->dma);
1510 restart = 1; 1510 restart = 1;
1511 } 1511 }
1512 pipe->state |= ISP_PIPELINE_IDLE_OUTPUT; 1512 pipe->state |= ISP_PIPELINE_IDLE_OUTPUT;
@@ -1577,10 +1577,10 @@ static int preview_video_queue(struct isp_video *video,
1577 struct isp_prev_device *prev = &video->isp->isp_prev; 1577 struct isp_prev_device *prev = &video->isp->isp_prev;
1578 1578
1579 if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) 1579 if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
1580 preview_set_inaddr(prev, buffer->isp_addr); 1580 preview_set_inaddr(prev, buffer->dma);
1581 1581
1582 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 1582 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
1583 preview_set_outaddr(prev, buffer->isp_addr); 1583 preview_set_outaddr(prev, buffer->dma);
1584 1584
1585 return 0; 1585 return 0;
1586} 1586}
diff --git a/drivers/media/platform/omap3isp/ispqueue.c b/drivers/media/platform/omap3isp/ispqueue.c
deleted file mode 100644
index a5e65858e799..000000000000
--- a/drivers/media/platform/omap3isp/ispqueue.c
+++ /dev/null
@@ -1,1161 +0,0 @@
1/*
2 * ispqueue.c
3 *
4 * TI OMAP3 ISP - Video buffers queue handling
5 *
6 * Copyright (C) 2010 Nokia Corporation
7 *
8 * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
9 * Sakari Ailus <sakari.ailus@iki.fi>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
23 * 02110-1301 USA
24 */
25
26#include <asm/cacheflush.h>
27#include <linux/dma-mapping.h>
28#include <linux/mm.h>
29#include <linux/pagemap.h>
30#include <linux/poll.h>
31#include <linux/scatterlist.h>
32#include <linux/sched.h>
33#include <linux/slab.h>
34#include <linux/vmalloc.h>
35
36#include "ispqueue.h"
37
38/* -----------------------------------------------------------------------------
39 * Video buffers management
40 */
41
42/*
43 * isp_video_buffer_cache_sync - Keep the buffers coherent between CPU and ISP
44 *
45 * The typical operation required here is Cache Invalidation across
46 * the (user space) buffer address range. And this _must_ be done
47 * at QBUF stage (and *only* at QBUF).
48 *
49 * We try to use optimal cache invalidation function:
50 * - dmac_map_area:
51 * - used when the number of pages are _low_.
52 * - it becomes quite slow as the number of pages increase.
53 * - for 648x492 viewfinder (150 pages) it takes 1.3 ms.
54 * - for 5 Mpix buffer (2491 pages) it takes between 25-50 ms.
55 *
56 * - flush_cache_all:
57 * - used when the number of pages are _high_.
58 * - time taken in the range of 500-900 us.
59 * - has a higher penalty but, as whole dcache + icache is invalidated
60 */
61/*
62 * FIXME: dmac_inv_range crashes randomly on the user space buffer
63 * address. Fall back to flush_cache_all for now.
64 */
65#define ISP_CACHE_FLUSH_PAGES_MAX 0
66
67static void isp_video_buffer_cache_sync(struct isp_video_buffer *buf)
68{
69 if (buf->skip_cache)
70 return;
71
72 if (buf->vbuf.m.userptr == 0 || buf->npages == 0 ||
73 buf->npages > ISP_CACHE_FLUSH_PAGES_MAX)
74 flush_cache_all();
75 else {
76 dmac_map_area((void *)buf->vbuf.m.userptr, buf->vbuf.length,
77 DMA_FROM_DEVICE);
78 outer_inv_range(buf->vbuf.m.userptr,
79 buf->vbuf.m.userptr + buf->vbuf.length);
80 }
81}
82
83/*
84 * isp_video_buffer_lock_vma - Prevent VMAs from being unmapped
85 *
86 * Lock the VMAs underlying the given buffer into memory. This avoids the
87 * userspace buffer mapping from being swapped out, making VIPT cache handling
88 * easier.
89 *
90 * Note that the pages will not be freed as the buffers have been locked to
91 * memory using by a call to get_user_pages(), but the userspace mapping could
92 * still disappear if the VMAs are not locked. This is caused by the memory
93 * management code trying to be as lock-less as possible, which results in the
94 * userspace mapping manager not finding out that the pages are locked under
95 * some conditions.
96 */
97static int isp_video_buffer_lock_vma(struct isp_video_buffer *buf, int lock)
98{
99 struct vm_area_struct *vma;
100 unsigned long start;
101 unsigned long end;
102 int ret = 0;
103
104 if (buf->vbuf.memory == V4L2_MEMORY_MMAP)
105 return 0;
106
107 /* We can be called from workqueue context if the current task dies to
108 * unlock the VMAs. In that case there's no current memory management
109 * context so unlocking can't be performed, but the VMAs have been or
110 * are getting destroyed anyway so it doesn't really matter.
111 */
112 if (!current || !current->mm)
113 return lock ? -EINVAL : 0;
114
115 start = buf->vbuf.m.userptr;
116 end = buf->vbuf.m.userptr + buf->vbuf.length - 1;
117
118 down_write(&current->mm->mmap_sem);
119 spin_lock(&current->mm->page_table_lock);
120
121 do {
122 vma = find_vma(current->mm, start);
123 if (vma == NULL) {
124 ret = -EFAULT;
125 goto out;
126 }
127
128 if (lock)
129 vma->vm_flags |= VM_LOCKED;
130 else
131 vma->vm_flags &= ~VM_LOCKED;
132
133 start = vma->vm_end + 1;
134 } while (vma->vm_end < end);
135
136 if (lock)
137 buf->vm_flags |= VM_LOCKED;
138 else
139 buf->vm_flags &= ~VM_LOCKED;
140
141out:
142 spin_unlock(&current->mm->page_table_lock);
143 up_write(&current->mm->mmap_sem);
144 return ret;
145}
146
147/*
148 * isp_video_buffer_sglist_kernel - Build a scatter list for a vmalloc'ed buffer
149 *
150 * Iterate over the vmalloc'ed area and create a scatter list entry for every
151 * page.
152 */
153static int isp_video_buffer_sglist_kernel(struct isp_video_buffer *buf)
154{
155 struct scatterlist *sglist;
156 unsigned int npages;
157 unsigned int i;
158 void *addr;
159
160 addr = buf->vaddr;
161 npages = PAGE_ALIGN(buf->vbuf.length) >> PAGE_SHIFT;
162
163 sglist = vmalloc(npages * sizeof(*sglist));
164 if (sglist == NULL)
165 return -ENOMEM;
166
167 sg_init_table(sglist, npages);
168
169 for (i = 0; i < npages; ++i, addr += PAGE_SIZE) {
170 struct page *page = vmalloc_to_page(addr);
171
172 if (page == NULL || PageHighMem(page)) {
173 vfree(sglist);
174 return -EINVAL;
175 }
176
177 sg_set_page(&sglist[i], page, PAGE_SIZE, 0);
178 }
179
180 buf->sglen = npages;
181 buf->sglist = sglist;
182
183 return 0;
184}
185
186/*
187 * isp_video_buffer_sglist_user - Build a scatter list for a userspace buffer
188 *
189 * Walk the buffer pages list and create a 1:1 mapping to a scatter list.
190 */
191static int isp_video_buffer_sglist_user(struct isp_video_buffer *buf)
192{
193 struct scatterlist *sglist;
194 unsigned int offset = buf->offset;
195 unsigned int i;
196
197 sglist = vmalloc(buf->npages * sizeof(*sglist));
198 if (sglist == NULL)
199 return -ENOMEM;
200
201 sg_init_table(sglist, buf->npages);
202
203 for (i = 0; i < buf->npages; ++i) {
204 if (PageHighMem(buf->pages[i])) {
205 vfree(sglist);
206 return -EINVAL;
207 }
208
209 sg_set_page(&sglist[i], buf->pages[i], PAGE_SIZE - offset,
210 offset);
211 offset = 0;
212 }
213
214 buf->sglen = buf->npages;
215 buf->sglist = sglist;
216
217 return 0;
218}
219
220/*
221 * isp_video_buffer_sglist_pfnmap - Build a scatter list for a VM_PFNMAP buffer
222 *
223 * Create a scatter list of physically contiguous pages starting at the buffer
224 * memory physical address.
225 */
226static int isp_video_buffer_sglist_pfnmap(struct isp_video_buffer *buf)
227{
228 struct scatterlist *sglist;
229 unsigned int offset = buf->offset;
230 unsigned long pfn = buf->paddr >> PAGE_SHIFT;
231 unsigned int i;
232
233 sglist = vmalloc(buf->npages * sizeof(*sglist));
234 if (sglist == NULL)
235 return -ENOMEM;
236
237 sg_init_table(sglist, buf->npages);
238
239 for (i = 0; i < buf->npages; ++i, ++pfn) {
240 sg_set_page(&sglist[i], pfn_to_page(pfn), PAGE_SIZE - offset,
241 offset);
242 /* PFNMAP buffers will not get DMA-mapped, set the DMA address
243 * manually.
244 */
245 sg_dma_address(&sglist[i]) = (pfn << PAGE_SHIFT) + offset;
246 offset = 0;
247 }
248
249 buf->sglen = buf->npages;
250 buf->sglist = sglist;
251
252 return 0;
253}
254
255/*
256 * isp_video_buffer_cleanup - Release pages for a userspace VMA.
257 *
258 * Release pages locked by a call isp_video_buffer_prepare_user and free the
259 * pages table.
260 */
261static void isp_video_buffer_cleanup(struct isp_video_buffer *buf)
262{
263 enum dma_data_direction direction;
264 unsigned int i;
265
266 if (buf->queue->ops->buffer_cleanup)
267 buf->queue->ops->buffer_cleanup(buf);
268
269 if (!(buf->vm_flags & VM_PFNMAP)) {
270 direction = buf->vbuf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE
271 ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
272 dma_unmap_sg(buf->queue->dev, buf->sglist, buf->sglen,
273 direction);
274 }
275
276 vfree(buf->sglist);
277 buf->sglist = NULL;
278 buf->sglen = 0;
279
280 if (buf->pages != NULL) {
281 isp_video_buffer_lock_vma(buf, 0);
282
283 for (i = 0; i < buf->npages; ++i)
284 page_cache_release(buf->pages[i]);
285
286 vfree(buf->pages);
287 buf->pages = NULL;
288 }
289
290 buf->npages = 0;
291 buf->skip_cache = false;
292}
293
294/*
295 * isp_video_buffer_prepare_user - Pin userspace VMA pages to memory.
296 *
297 * This function creates a list of pages for a userspace VMA. The number of
298 * pages is first computed based on the buffer size, and pages are then
299 * retrieved by a call to get_user_pages.
300 *
301 * Pages are pinned to memory by get_user_pages, making them available for DMA
302 * transfers. However, due to memory management optimization, it seems the
303 * get_user_pages doesn't guarantee that the pinned pages will not be written
304 * to swap and removed from the userspace mapping(s). When this happens, a page
305 * fault can be generated when accessing those unmapped pages.
306 *
307 * If the fault is triggered by a page table walk caused by VIPT cache
308 * management operations, the page fault handler might oops if the MM semaphore
309 * is held, as it can't handle kernel page faults in that case. To fix that, a
310 * fixup entry needs to be added to the cache management code, or the userspace
311 * VMA must be locked to avoid removing pages from the userspace mapping in the
312 * first place.
313 *
314 * If the number of pages retrieved is smaller than the number required by the
315 * buffer size, the function returns -EFAULT.
316 */
317static int isp_video_buffer_prepare_user(struct isp_video_buffer *buf)
318{
319 unsigned long data;
320 unsigned int first;
321 unsigned int last;
322 int ret;
323
324 data = buf->vbuf.m.userptr;
325 first = (data & PAGE_MASK) >> PAGE_SHIFT;
326 last = ((data + buf->vbuf.length - 1) & PAGE_MASK) >> PAGE_SHIFT;
327
328 buf->offset = data & ~PAGE_MASK;
329 buf->npages = last - first + 1;
330 buf->pages = vmalloc(buf->npages * sizeof(buf->pages[0]));
331 if (buf->pages == NULL)
332 return -ENOMEM;
333
334 down_read(&current->mm->mmap_sem);
335 ret = get_user_pages(current, current->mm, data & PAGE_MASK,
336 buf->npages,
337 buf->vbuf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE, 0,
338 buf->pages, NULL);
339 up_read(&current->mm->mmap_sem);
340
341 if (ret != buf->npages) {
342 buf->npages = ret < 0 ? 0 : ret;
343 isp_video_buffer_cleanup(buf);
344 return -EFAULT;
345 }
346
347 ret = isp_video_buffer_lock_vma(buf, 1);
348 if (ret < 0)
349 isp_video_buffer_cleanup(buf);
350
351 return ret;
352}
353
354/*
355 * isp_video_buffer_prepare_pfnmap - Validate a VM_PFNMAP userspace buffer
356 *
357 * Userspace VM_PFNMAP buffers are supported only if they are contiguous in
358 * memory and if they span a single VMA.
359 *
360 * Return 0 if the buffer is valid, or -EFAULT otherwise.
361 */
362static int isp_video_buffer_prepare_pfnmap(struct isp_video_buffer *buf)
363{
364 struct vm_area_struct *vma;
365 unsigned long prev_pfn;
366 unsigned long this_pfn;
367 unsigned long start;
368 unsigned long end;
369 dma_addr_t pa = 0;
370 int ret = -EFAULT;
371
372 start = buf->vbuf.m.userptr;
373 end = buf->vbuf.m.userptr + buf->vbuf.length - 1;
374
375 buf->offset = start & ~PAGE_MASK;
376 buf->npages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1;
377 buf->pages = NULL;
378
379 down_read(&current->mm->mmap_sem);
380 vma = find_vma(current->mm, start);
381 if (vma == NULL || vma->vm_end < end)
382 goto done;
383
384 for (prev_pfn = 0; start <= end; start += PAGE_SIZE) {
385 ret = follow_pfn(vma, start, &this_pfn);
386 if (ret)
387 goto done;
388
389 if (prev_pfn == 0)
390 pa = this_pfn << PAGE_SHIFT;
391 else if (this_pfn != prev_pfn + 1) {
392 ret = -EFAULT;
393 goto done;
394 }
395
396 prev_pfn = this_pfn;
397 }
398
399 buf->paddr = pa + buf->offset;
400 ret = 0;
401
402done:
403 up_read(&current->mm->mmap_sem);
404 return ret;
405}
406
407/*
408 * isp_video_buffer_prepare_vm_flags - Get VMA flags for a userspace address
409 *
410 * This function locates the VMAs for the buffer's userspace address and checks
411 * that their flags match. The only flag that we need to care for at the moment
412 * is VM_PFNMAP.
413 *
414 * The buffer vm_flags field is set to the first VMA flags.
415 *
416 * Return -EFAULT if no VMA can be found for part of the buffer, or if the VMAs
417 * have incompatible flags.
418 */
419static int isp_video_buffer_prepare_vm_flags(struct isp_video_buffer *buf)
420{
421 struct vm_area_struct *vma;
422 pgprot_t uninitialized_var(vm_page_prot);
423 unsigned long start;
424 unsigned long end;
425 int ret = -EFAULT;
426
427 start = buf->vbuf.m.userptr;
428 end = buf->vbuf.m.userptr + buf->vbuf.length - 1;
429
430 down_read(&current->mm->mmap_sem);
431
432 do {
433 vma = find_vma(current->mm, start);
434 if (vma == NULL)
435 goto done;
436
437 if (start == buf->vbuf.m.userptr) {
438 buf->vm_flags = vma->vm_flags;
439 vm_page_prot = vma->vm_page_prot;
440 }
441
442 if ((buf->vm_flags ^ vma->vm_flags) & VM_PFNMAP)
443 goto done;
444
445 if (vm_page_prot != vma->vm_page_prot)
446 goto done;
447
448 start = vma->vm_end + 1;
449 } while (vma->vm_end < end);
450
451 /* Skip cache management to enhance performances for non-cached or
452 * write-combining buffers.
453 */
454 if (vm_page_prot == pgprot_noncached(vm_page_prot) ||
455 vm_page_prot == pgprot_writecombine(vm_page_prot))
456 buf->skip_cache = true;
457
458 ret = 0;
459
460done:
461 up_read(&current->mm->mmap_sem);
462 return ret;
463}
464
465/*
466 * isp_video_buffer_prepare - Make a buffer ready for operation
467 *
468 * Preparing a buffer involves:
469 *
470 * - validating VMAs (userspace buffers only)
471 * - locking pages and VMAs into memory (userspace buffers only)
472 * - building page and scatter-gather lists
473 * - mapping buffers for DMA operation
474 * - performing driver-specific preparation
475 *
476 * The function must be called in userspace context with a valid mm context
477 * (this excludes cleanup paths such as sys_close when the userspace process
478 * segfaults).
479 */
480static int isp_video_buffer_prepare(struct isp_video_buffer *buf)
481{
482 enum dma_data_direction direction;
483 int ret;
484
485 switch (buf->vbuf.memory) {
486 case V4L2_MEMORY_MMAP:
487 ret = isp_video_buffer_sglist_kernel(buf);
488 break;
489
490 case V4L2_MEMORY_USERPTR:
491 ret = isp_video_buffer_prepare_vm_flags(buf);
492 if (ret < 0)
493 return ret;
494
495 if (buf->vm_flags & VM_PFNMAP) {
496 ret = isp_video_buffer_prepare_pfnmap(buf);
497 if (ret < 0)
498 return ret;
499
500 ret = isp_video_buffer_sglist_pfnmap(buf);
501 } else {
502 ret = isp_video_buffer_prepare_user(buf);
503 if (ret < 0)
504 return ret;
505
506 ret = isp_video_buffer_sglist_user(buf);
507 }
508 break;
509
510 default:
511 return -EINVAL;
512 }
513
514 if (ret < 0)
515 goto done;
516
517 if (!(buf->vm_flags & VM_PFNMAP)) {
518 direction = buf->vbuf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE
519 ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
520 ret = dma_map_sg(buf->queue->dev, buf->sglist, buf->sglen,
521 direction);
522 if (ret != buf->sglen) {
523 ret = -EFAULT;
524 goto done;
525 }
526 }
527
528 if (buf->queue->ops->buffer_prepare)
529 ret = buf->queue->ops->buffer_prepare(buf);
530
531done:
532 if (ret < 0) {
533 isp_video_buffer_cleanup(buf);
534 return ret;
535 }
536
537 return ret;
538}
539
540/*
541 * isp_video_queue_query - Query the status of a given buffer
542 *
543 * Locking: must be called with the queue lock held.
544 */
545static void isp_video_buffer_query(struct isp_video_buffer *buf,
546 struct v4l2_buffer *vbuf)
547{
548 memcpy(vbuf, &buf->vbuf, sizeof(*vbuf));
549
550 if (buf->vma_use_count)
551 vbuf->flags |= V4L2_BUF_FLAG_MAPPED;
552
553 switch (buf->state) {
554 case ISP_BUF_STATE_ERROR:
555 vbuf->flags |= V4L2_BUF_FLAG_ERROR;
556 /* Fallthrough */
557 case ISP_BUF_STATE_DONE:
558 vbuf->flags |= V4L2_BUF_FLAG_DONE;
559 break;
560 case ISP_BUF_STATE_QUEUED:
561 case ISP_BUF_STATE_ACTIVE:
562 vbuf->flags |= V4L2_BUF_FLAG_QUEUED;
563 break;
564 case ISP_BUF_STATE_IDLE:
565 default:
566 break;
567 }
568}
569
570/*
571 * isp_video_buffer_wait - Wait for a buffer to be ready
572 *
573 * In non-blocking mode, return immediately with 0 if the buffer is ready or
574 * -EAGAIN if the buffer is in the QUEUED or ACTIVE state.
575 *
576 * In blocking mode, wait (interruptibly but with no timeout) on the buffer wait
577 * queue using the same condition.
578 */
579static int isp_video_buffer_wait(struct isp_video_buffer *buf, int nonblocking)
580{
581 if (nonblocking) {
582 return (buf->state != ISP_BUF_STATE_QUEUED &&
583 buf->state != ISP_BUF_STATE_ACTIVE)
584 ? 0 : -EAGAIN;
585 }
586
587 return wait_event_interruptible(buf->wait,
588 buf->state != ISP_BUF_STATE_QUEUED &&
589 buf->state != ISP_BUF_STATE_ACTIVE);
590}
591
592/* -----------------------------------------------------------------------------
593 * Queue management
594 */
595
596/*
597 * isp_video_queue_free - Free video buffers memory
598 *
599 * Buffers can only be freed if the queue isn't streaming and if no buffer is
600 * mapped to userspace. Return -EBUSY if those conditions aren't satisfied.
601 *
602 * This function must be called with the queue lock held.
603 */
604static int isp_video_queue_free(struct isp_video_queue *queue)
605{
606 unsigned int i;
607
608 if (queue->streaming)
609 return -EBUSY;
610
611 for (i = 0; i < queue->count; ++i) {
612 if (queue->buffers[i]->vma_use_count != 0)
613 return -EBUSY;
614 }
615
616 for (i = 0; i < queue->count; ++i) {
617 struct isp_video_buffer *buf = queue->buffers[i];
618
619 isp_video_buffer_cleanup(buf);
620
621 vfree(buf->vaddr);
622 buf->vaddr = NULL;
623
624 kfree(buf);
625 queue->buffers[i] = NULL;
626 }
627
628 INIT_LIST_HEAD(&queue->queue);
629 queue->count = 0;
630 return 0;
631}
632
633/*
634 * isp_video_queue_alloc - Allocate video buffers memory
635 *
636 * This function must be called with the queue lock held.
637 */
638static int isp_video_queue_alloc(struct isp_video_queue *queue,
639 unsigned int nbuffers,
640 unsigned int size, enum v4l2_memory memory)
641{
642 struct isp_video_buffer *buf;
643 unsigned int i;
644 void *mem;
645 int ret;
646
647 /* Start by freeing the buffers. */
648 ret = isp_video_queue_free(queue);
649 if (ret < 0)
650 return ret;
651
652 /* Bail out if no buffers should be allocated. */
653 if (nbuffers == 0)
654 return 0;
655
656 /* Initialize the allocated buffers. */
657 for (i = 0; i < nbuffers; ++i) {
658 buf = kzalloc(queue->bufsize, GFP_KERNEL);
659 if (buf == NULL)
660 break;
661
662 if (memory == V4L2_MEMORY_MMAP) {
663 /* Allocate video buffers memory for mmap mode. Align
664 * the size to the page size.
665 */
666 mem = vmalloc_32_user(PAGE_ALIGN(size));
667 if (mem == NULL) {
668 kfree(buf);
669 break;
670 }
671
672 buf->vbuf.m.offset = i * PAGE_ALIGN(size);
673 buf->vaddr = mem;
674 }
675
676 buf->vbuf.index = i;
677 buf->vbuf.length = size;
678 buf->vbuf.type = queue->type;
679 buf->vbuf.flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
680 buf->vbuf.field = V4L2_FIELD_NONE;
681 buf->vbuf.memory = memory;
682
683 buf->queue = queue;
684 init_waitqueue_head(&buf->wait);
685
686 queue->buffers[i] = buf;
687 }
688
689 if (i == 0)
690 return -ENOMEM;
691
692 queue->count = i;
693 return nbuffers;
694}
695
696/**
697 * omap3isp_video_queue_cleanup - Clean up the video buffers queue
698 * @queue: Video buffers queue
699 *
700 * Free all allocated resources and clean up the video buffers queue. The queue
701 * must not be busy (no ongoing video stream) and buffers must have been
702 * unmapped.
703 *
704 * Return 0 on success or -EBUSY if the queue is busy or buffers haven't been
705 * unmapped.
706 */
707int omap3isp_video_queue_cleanup(struct isp_video_queue *queue)
708{
709 return isp_video_queue_free(queue);
710}
711
712/**
713 * omap3isp_video_queue_init - Initialize the video buffers queue
714 * @queue: Video buffers queue
715 * @type: V4L2 buffer type (capture or output)
716 * @ops: Driver-specific queue operations
717 * @dev: Device used for DMA operations
718 * @bufsize: Size of the driver-specific buffer structure
719 *
720 * Initialize the video buffers queue with the supplied parameters.
721 *
722 * The queue type must be one of V4L2_BUF_TYPE_VIDEO_CAPTURE or
723 * V4L2_BUF_TYPE_VIDEO_OUTPUT. Other buffer types are not supported yet.
724 *
725 * Buffer objects will be allocated using the given buffer size to allow room
726 * for driver-specific fields. Driver-specific buffer structures must start
727 * with a struct isp_video_buffer field. Drivers with no driver-specific buffer
728 * structure must pass the size of the isp_video_buffer structure in the bufsize
729 * parameter.
730 *
731 * Return 0 on success.
732 */
733int omap3isp_video_queue_init(struct isp_video_queue *queue,
734 enum v4l2_buf_type type,
735 const struct isp_video_queue_operations *ops,
736 struct device *dev, unsigned int bufsize)
737{
738 INIT_LIST_HEAD(&queue->queue);
739 mutex_init(&queue->lock);
740 spin_lock_init(&queue->irqlock);
741
742 queue->type = type;
743 queue->ops = ops;
744 queue->dev = dev;
745 queue->bufsize = bufsize;
746
747 return 0;
748}
749
750/* -----------------------------------------------------------------------------
751 * V4L2 operations
752 */
753
754/**
755 * omap3isp_video_queue_reqbufs - Allocate video buffers memory
756 *
757 * This function is intended to be used as a VIDIOC_REQBUFS ioctl handler. It
758 * allocated video buffer objects and, for MMAP buffers, buffer memory.
759 *
760 * If the number of buffers is 0, all buffers are freed and the function returns
761 * without performing any allocation.
762 *
763 * If the number of buffers is not 0, currently allocated buffers (if any) are
764 * freed and the requested number of buffers are allocated. Depending on
765 * driver-specific requirements and on memory availability, a number of buffer
766 * smaller or bigger than requested can be allocated. This isn't considered as
767 * an error.
768 *
769 * Return 0 on success or one of the following error codes:
770 *
771 * -EINVAL if the buffer type or index are invalid
772 * -EBUSY if the queue is busy (streaming or buffers mapped)
773 * -ENOMEM if the buffers can't be allocated due to an out-of-memory condition
774 */
775int omap3isp_video_queue_reqbufs(struct isp_video_queue *queue,
776 struct v4l2_requestbuffers *rb)
777{
778 unsigned int nbuffers = rb->count;
779 unsigned int size;
780 int ret;
781
782 if (rb->type != queue->type)
783 return -EINVAL;
784
785 queue->ops->queue_prepare(queue, &nbuffers, &size);
786 if (size == 0)
787 return -EINVAL;
788
789 nbuffers = min_t(unsigned int, nbuffers, ISP_VIDEO_MAX_BUFFERS);
790
791 mutex_lock(&queue->lock);
792
793 ret = isp_video_queue_alloc(queue, nbuffers, size, rb->memory);
794 if (ret < 0)
795 goto done;
796
797 rb->count = ret;
798 ret = 0;
799
800done:
801 mutex_unlock(&queue->lock);
802 return ret;
803}
804
805/**
806 * omap3isp_video_queue_querybuf - Query the status of a buffer in a queue
807 *
808 * This function is intended to be used as a VIDIOC_QUERYBUF ioctl handler. It
809 * returns the status of a given video buffer.
810 *
811 * Return 0 on success or -EINVAL if the buffer type or index are invalid.
812 */
813int omap3isp_video_queue_querybuf(struct isp_video_queue *queue,
814 struct v4l2_buffer *vbuf)
815{
816 struct isp_video_buffer *buf;
817 int ret = 0;
818
819 if (vbuf->type != queue->type)
820 return -EINVAL;
821
822 mutex_lock(&queue->lock);
823
824 if (vbuf->index >= queue->count) {
825 ret = -EINVAL;
826 goto done;
827 }
828
829 buf = queue->buffers[vbuf->index];
830 isp_video_buffer_query(buf, vbuf);
831
832done:
833 mutex_unlock(&queue->lock);
834 return ret;
835}
836
837/**
838 * omap3isp_video_queue_qbuf - Queue a buffer
839 *
840 * This function is intended to be used as a VIDIOC_QBUF ioctl handler.
841 *
842 * The v4l2_buffer structure passed from userspace is first sanity tested. If
843 * sane, the buffer is then processed and added to the main queue and, if the
844 * queue is streaming, to the IRQ queue.
845 *
846 * Before being enqueued, USERPTR buffers are checked for address changes. If
847 * the buffer has a different userspace address, the old memory area is unlocked
848 * and the new memory area is locked.
849 */
850int omap3isp_video_queue_qbuf(struct isp_video_queue *queue,
851 struct v4l2_buffer *vbuf)
852{
853 struct isp_video_buffer *buf;
854 unsigned long flags;
855 int ret = -EINVAL;
856
857 if (vbuf->type != queue->type)
858 goto done;
859
860 mutex_lock(&queue->lock);
861
862 if (vbuf->index >= queue->count)
863 goto done;
864
865 buf = queue->buffers[vbuf->index];
866
867 if (vbuf->memory != buf->vbuf.memory)
868 goto done;
869
870 if (buf->state != ISP_BUF_STATE_IDLE)
871 goto done;
872
873 if (vbuf->memory == V4L2_MEMORY_USERPTR &&
874 vbuf->length < buf->vbuf.length)
875 goto done;
876
877 if (vbuf->memory == V4L2_MEMORY_USERPTR &&
878 vbuf->m.userptr != buf->vbuf.m.userptr) {
879 isp_video_buffer_cleanup(buf);
880 buf->vbuf.m.userptr = vbuf->m.userptr;
881 buf->prepared = 0;
882 }
883
884 if (!buf->prepared) {
885 ret = isp_video_buffer_prepare(buf);
886 if (ret < 0)
887 goto done;
888 buf->prepared = 1;
889 }
890
891 isp_video_buffer_cache_sync(buf);
892
893 buf->state = ISP_BUF_STATE_QUEUED;
894 list_add_tail(&buf->stream, &queue->queue);
895
896 if (queue->streaming) {
897 spin_lock_irqsave(&queue->irqlock, flags);
898 queue->ops->buffer_queue(buf);
899 spin_unlock_irqrestore(&queue->irqlock, flags);
900 }
901
902 ret = 0;
903
904done:
905 mutex_unlock(&queue->lock);
906 return ret;
907}
908
909/**
910 * omap3isp_video_queue_dqbuf - Dequeue a buffer
911 *
912 * This function is intended to be used as a VIDIOC_DQBUF ioctl handler.
913 *
914 * Wait until a buffer is ready to be dequeued, remove it from the queue and
915 * copy its information to the v4l2_buffer structure.
916 *
917 * If the nonblocking argument is not zero and no buffer is ready, return
918 * -EAGAIN immediately instead of waiting.
919 *
920 * If no buffer has been enqueued, or if the requested buffer type doesn't match
921 * the queue type, return -EINVAL.
922 */
923int omap3isp_video_queue_dqbuf(struct isp_video_queue *queue,
924 struct v4l2_buffer *vbuf, int nonblocking)
925{
926 struct isp_video_buffer *buf;
927 int ret;
928
929 if (vbuf->type != queue->type)
930 return -EINVAL;
931
932 mutex_lock(&queue->lock);
933
934 if (list_empty(&queue->queue)) {
935 ret = -EINVAL;
936 goto done;
937 }
938
939 buf = list_first_entry(&queue->queue, struct isp_video_buffer, stream);
940 ret = isp_video_buffer_wait(buf, nonblocking);
941 if (ret < 0)
942 goto done;
943
944 list_del(&buf->stream);
945
946 isp_video_buffer_query(buf, vbuf);
947 buf->state = ISP_BUF_STATE_IDLE;
948 vbuf->flags &= ~V4L2_BUF_FLAG_QUEUED;
949
950done:
951 mutex_unlock(&queue->lock);
952 return ret;
953}
954
955/**
956 * omap3isp_video_queue_streamon - Start streaming
957 *
958 * This function is intended to be used as a VIDIOC_STREAMON ioctl handler. It
959 * starts streaming on the queue and calls the buffer_queue operation for all
960 * queued buffers.
961 *
962 * Return 0 on success.
963 */
964int omap3isp_video_queue_streamon(struct isp_video_queue *queue)
965{
966 struct isp_video_buffer *buf;
967 unsigned long flags;
968
969 mutex_lock(&queue->lock);
970
971 if (queue->streaming)
972 goto done;
973
974 queue->streaming = 1;
975
976 spin_lock_irqsave(&queue->irqlock, flags);
977 list_for_each_entry(buf, &queue->queue, stream)
978 queue->ops->buffer_queue(buf);
979 spin_unlock_irqrestore(&queue->irqlock, flags);
980
981done:
982 mutex_unlock(&queue->lock);
983 return 0;
984}
985
986/**
987 * omap3isp_video_queue_streamoff - Stop streaming
988 *
989 * This function is intended to be used as a VIDIOC_STREAMOFF ioctl handler. It
990 * stops streaming on the queue and wakes up all the buffers.
991 *
992 * Drivers must stop the hardware and synchronize with interrupt handlers and/or
993 * delayed works before calling this function to make sure no buffer will be
994 * touched by the driver and/or hardware.
995 */
996void omap3isp_video_queue_streamoff(struct isp_video_queue *queue)
997{
998 struct isp_video_buffer *buf;
999 unsigned long flags;
1000 unsigned int i;
1001
1002 mutex_lock(&queue->lock);
1003
1004 if (!queue->streaming)
1005 goto done;
1006
1007 queue->streaming = 0;
1008
1009 spin_lock_irqsave(&queue->irqlock, flags);
1010 for (i = 0; i < queue->count; ++i) {
1011 buf = queue->buffers[i];
1012
1013 if (buf->state == ISP_BUF_STATE_ACTIVE)
1014 wake_up(&buf->wait);
1015
1016 buf->state = ISP_BUF_STATE_IDLE;
1017 }
1018 spin_unlock_irqrestore(&queue->irqlock, flags);
1019
1020 INIT_LIST_HEAD(&queue->queue);
1021
1022done:
1023 mutex_unlock(&queue->lock);
1024}
1025
1026/**
1027 * omap3isp_video_queue_discard_done - Discard all buffers marked as DONE
1028 *
1029 * This function is intended to be used with suspend/resume operations. It
1030 * discards all 'done' buffers as they would be too old to be requested after
1031 * resume.
1032 *
1033 * Drivers must stop the hardware and synchronize with interrupt handlers and/or
1034 * delayed works before calling this function to make sure no buffer will be
1035 * touched by the driver and/or hardware.
1036 */
1037void omap3isp_video_queue_discard_done(struct isp_video_queue *queue)
1038{
1039 struct isp_video_buffer *buf;
1040 unsigned int i;
1041
1042 mutex_lock(&queue->lock);
1043
1044 if (!queue->streaming)
1045 goto done;
1046
1047 for (i = 0; i < queue->count; ++i) {
1048 buf = queue->buffers[i];
1049
1050 if (buf->state == ISP_BUF_STATE_DONE)
1051 buf->state = ISP_BUF_STATE_ERROR;
1052 }
1053
1054done:
1055 mutex_unlock(&queue->lock);
1056}
1057
1058static void isp_video_queue_vm_open(struct vm_area_struct *vma)
1059{
1060 struct isp_video_buffer *buf = vma->vm_private_data;
1061
1062 buf->vma_use_count++;
1063}
1064
1065static void isp_video_queue_vm_close(struct vm_area_struct *vma)
1066{
1067 struct isp_video_buffer *buf = vma->vm_private_data;
1068
1069 buf->vma_use_count--;
1070}
1071
1072static const struct vm_operations_struct isp_video_queue_vm_ops = {
1073 .open = isp_video_queue_vm_open,
1074 .close = isp_video_queue_vm_close,
1075};
1076
1077/**
1078 * omap3isp_video_queue_mmap - Map buffers to userspace
1079 *
1080 * This function is intended to be used as an mmap() file operation handler. It
1081 * maps a buffer to userspace based on the VMA offset.
1082 *
1083 * Only buffers of memory type MMAP are supported.
1084 */
1085int omap3isp_video_queue_mmap(struct isp_video_queue *queue,
1086 struct vm_area_struct *vma)
1087{
1088 struct isp_video_buffer *uninitialized_var(buf);
1089 unsigned long size;
1090 unsigned int i;
1091 int ret = 0;
1092
1093 mutex_lock(&queue->lock);
1094
1095 for (i = 0; i < queue->count; ++i) {
1096 buf = queue->buffers[i];
1097 if ((buf->vbuf.m.offset >> PAGE_SHIFT) == vma->vm_pgoff)
1098 break;
1099 }
1100
1101 if (i == queue->count) {
1102 ret = -EINVAL;
1103 goto done;
1104 }
1105
1106 size = vma->vm_end - vma->vm_start;
1107
1108 if (buf->vbuf.memory != V4L2_MEMORY_MMAP ||
1109 size != PAGE_ALIGN(buf->vbuf.length)) {
1110 ret = -EINVAL;
1111 goto done;
1112 }
1113
1114 ret = remap_vmalloc_range(vma, buf->vaddr, 0);
1115 if (ret < 0)
1116 goto done;
1117
1118 vma->vm_ops = &isp_video_queue_vm_ops;
1119 vma->vm_private_data = buf;
1120 isp_video_queue_vm_open(vma);
1121
1122done:
1123 mutex_unlock(&queue->lock);
1124 return ret;
1125}
1126
1127/**
1128 * omap3isp_video_queue_poll - Poll video queue state
1129 *
1130 * This function is intended to be used as a poll() file operation handler. It
1131 * polls the state of the video buffer at the front of the queue and returns an
1132 * events mask.
1133 *
1134 * If no buffer is present at the front of the queue, POLLERR is returned.
1135 */
1136unsigned int omap3isp_video_queue_poll(struct isp_video_queue *queue,
1137 struct file *file, poll_table *wait)
1138{
1139 struct isp_video_buffer *buf;
1140 unsigned int mask = 0;
1141
1142 mutex_lock(&queue->lock);
1143 if (list_empty(&queue->queue)) {
1144 mask |= POLLERR;
1145 goto done;
1146 }
1147 buf = list_first_entry(&queue->queue, struct isp_video_buffer, stream);
1148
1149 poll_wait(file, &buf->wait, wait);
1150 if (buf->state == ISP_BUF_STATE_DONE ||
1151 buf->state == ISP_BUF_STATE_ERROR) {
1152 if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
1153 mask |= POLLIN | POLLRDNORM;
1154 else
1155 mask |= POLLOUT | POLLWRNORM;
1156 }
1157
1158done:
1159 mutex_unlock(&queue->lock);
1160 return mask;
1161}
diff --git a/drivers/media/platform/omap3isp/ispqueue.h b/drivers/media/platform/omap3isp/ispqueue.h
deleted file mode 100644
index 3e048ad65647..000000000000
--- a/drivers/media/platform/omap3isp/ispqueue.h
+++ /dev/null
@@ -1,188 +0,0 @@
1/*
2 * ispqueue.h
3 *
4 * TI OMAP3 ISP - Video buffers queue handling
5 *
6 * Copyright (C) 2010 Nokia Corporation
7 *
8 * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
9 * Sakari Ailus <sakari.ailus@iki.fi>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
23 * 02110-1301 USA
24 */
25
26#ifndef OMAP3_ISP_QUEUE_H
27#define OMAP3_ISP_QUEUE_H
28
29#include <linux/kernel.h>
30#include <linux/list.h>
31#include <linux/mm_types.h>
32#include <linux/mutex.h>
33#include <linux/videodev2.h>
34#include <linux/wait.h>
35
36struct isp_video_queue;
37struct page;
38struct scatterlist;
39
40#define ISP_VIDEO_MAX_BUFFERS 16
41
42/**
43 * enum isp_video_buffer_state - ISP video buffer state
44 * @ISP_BUF_STATE_IDLE: The buffer is under userspace control (dequeued
45 * or not queued yet).
46 * @ISP_BUF_STATE_QUEUED: The buffer has been queued but isn't used by the
47 * device yet.
48 * @ISP_BUF_STATE_ACTIVE: The buffer is in use for an active video transfer.
49 * @ISP_BUF_STATE_ERROR: The device is done with the buffer and an error
50 * occurred. For capture device the buffer likely contains corrupted data or
51 * no data at all.
52 * @ISP_BUF_STATE_DONE: The device is done with the buffer and no error occurred.
53 * For capture devices the buffer contains valid data.
54 */
55enum isp_video_buffer_state {
56 ISP_BUF_STATE_IDLE,
57 ISP_BUF_STATE_QUEUED,
58 ISP_BUF_STATE_ACTIVE,
59 ISP_BUF_STATE_ERROR,
60 ISP_BUF_STATE_DONE,
61};
62
63/**
64 * struct isp_video_buffer - ISP video buffer
65 * @vma_use_count: Number of times the buffer is mmap'ed to userspace
66 * @stream: List head for insertion into main queue
67 * @queue: ISP buffers queue this buffer belongs to
68 * @prepared: Whether the buffer has been prepared
69 * @skip_cache: Whether to skip cache management operations for this buffer
70 * @vaddr: Memory virtual address (for kernel buffers)
71 * @vm_flags: Buffer VMA flags (for userspace buffers)
72 * @offset: Offset inside the first page (for userspace buffers)
73 * @npages: Number of pages (for userspace buffers)
74 * @pages: Pages table (for userspace non-VM_PFNMAP buffers)
75 * @paddr: Memory physical address (for userspace VM_PFNMAP buffers)
76 * @sglen: Number of elements in the scatter list (for non-VM_PFNMAP buffers)
77 * @sglist: Scatter list (for non-VM_PFNMAP buffers)
78 * @vbuf: V4L2 buffer
79 * @irqlist: List head for insertion into IRQ queue
80 * @state: Current buffer state
81 * @wait: Wait queue to signal buffer completion
82 */
83struct isp_video_buffer {
84 unsigned long vma_use_count;
85 struct list_head stream;
86 struct isp_video_queue *queue;
87 unsigned int prepared:1;
88 bool skip_cache;
89
90 /* For kernel buffers. */
91 void *vaddr;
92
93 /* For userspace buffers. */
94 vm_flags_t vm_flags;
95 unsigned long offset;
96 unsigned int npages;
97 struct page **pages;
98 dma_addr_t paddr;
99
100 /* For all buffers except VM_PFNMAP. */
101 unsigned int sglen;
102 struct scatterlist *sglist;
103
104 /* Touched by the interrupt handler. */
105 struct v4l2_buffer vbuf;
106 struct list_head irqlist;
107 enum isp_video_buffer_state state;
108 wait_queue_head_t wait;
109};
110
111#define to_isp_video_buffer(vb) container_of(vb, struct isp_video_buffer, vb)
112
113/**
114 * struct isp_video_queue_operations - Driver-specific operations
115 * @queue_prepare: Called before allocating buffers. Drivers should clamp the
116 * number of buffers according to their requirements, and must return the
117 * buffer size in bytes.
118 * @buffer_prepare: Called the first time a buffer is queued, or after changing
119 * the userspace memory address for a USERPTR buffer, with the queue lock
120 * held. Drivers should perform device-specific buffer preparation (such as
121 * mapping the buffer memory in an IOMMU). This operation is optional.
122 * @buffer_queue: Called when a buffer is being added to the queue with the
123 * queue irqlock spinlock held.
124 * @buffer_cleanup: Called before freeing buffers, or before changing the
125 * userspace memory address for a USERPTR buffer, with the queue lock held.
126 * Drivers must perform cleanup operations required to undo the
127 * buffer_prepare call. This operation is optional.
128 */
129struct isp_video_queue_operations {
130 void (*queue_prepare)(struct isp_video_queue *queue,
131 unsigned int *nbuffers, unsigned int *size);
132 int (*buffer_prepare)(struct isp_video_buffer *buf);
133 void (*buffer_queue)(struct isp_video_buffer *buf);
134 void (*buffer_cleanup)(struct isp_video_buffer *buf);
135};
136
137/**
138 * struct isp_video_queue - ISP video buffers queue
139 * @type: Type of video buffers handled by this queue
140 * @ops: Queue operations
141 * @dev: Device used for DMA operations
142 * @bufsize: Size of a driver-specific buffer object
143 * @count: Number of currently allocated buffers
144 * @buffers: ISP video buffers
145 * @lock: Mutex to protect access to the buffers, main queue and state
146 * @irqlock: Spinlock to protect access to the IRQ queue
147 * @streaming: Queue state, indicates whether the queue is streaming
148 * @queue: List of all queued buffers
149 */
150struct isp_video_queue {
151 enum v4l2_buf_type type;
152 const struct isp_video_queue_operations *ops;
153 struct device *dev;
154 unsigned int bufsize;
155
156 unsigned int count;
157 struct isp_video_buffer *buffers[ISP_VIDEO_MAX_BUFFERS];
158 struct mutex lock;
159 spinlock_t irqlock;
160
161 unsigned int streaming:1;
162
163 struct list_head queue;
164};
165
166int omap3isp_video_queue_cleanup(struct isp_video_queue *queue);
167int omap3isp_video_queue_init(struct isp_video_queue *queue,
168 enum v4l2_buf_type type,
169 const struct isp_video_queue_operations *ops,
170 struct device *dev, unsigned int bufsize);
171
172int omap3isp_video_queue_reqbufs(struct isp_video_queue *queue,
173 struct v4l2_requestbuffers *rb);
174int omap3isp_video_queue_querybuf(struct isp_video_queue *queue,
175 struct v4l2_buffer *vbuf);
176int omap3isp_video_queue_qbuf(struct isp_video_queue *queue,
177 struct v4l2_buffer *vbuf);
178int omap3isp_video_queue_dqbuf(struct isp_video_queue *queue,
179 struct v4l2_buffer *vbuf, int nonblocking);
180int omap3isp_video_queue_streamon(struct isp_video_queue *queue);
181void omap3isp_video_queue_streamoff(struct isp_video_queue *queue);
182void omap3isp_video_queue_discard_done(struct isp_video_queue *queue);
183int omap3isp_video_queue_mmap(struct isp_video_queue *queue,
184 struct vm_area_struct *vma);
185unsigned int omap3isp_video_queue_poll(struct isp_video_queue *queue,
186 struct file *file, poll_table *wait);
187
188#endif /* OMAP3_ISP_QUEUE_H */
diff --git a/drivers/media/platform/omap3isp/ispresizer.c b/drivers/media/platform/omap3isp/ispresizer.c
index 86369df81d74..6f077c2377db 100644
--- a/drivers/media/platform/omap3isp/ispresizer.c
+++ b/drivers/media/platform/omap3isp/ispresizer.c
@@ -1040,7 +1040,7 @@ static void resizer_isr_buffer(struct isp_res_device *res)
1040 */ 1040 */
1041 buffer = omap3isp_video_buffer_next(&res->video_out); 1041 buffer = omap3isp_video_buffer_next(&res->video_out);
1042 if (buffer != NULL) { 1042 if (buffer != NULL) {
1043 resizer_set_outaddr(res, buffer->isp_addr); 1043 resizer_set_outaddr(res, buffer->dma);
1044 restart = 1; 1044 restart = 1;
1045 } 1045 }
1046 1046
@@ -1049,7 +1049,7 @@ static void resizer_isr_buffer(struct isp_res_device *res)
1049 if (res->input == RESIZER_INPUT_MEMORY) { 1049 if (res->input == RESIZER_INPUT_MEMORY) {
1050 buffer = omap3isp_video_buffer_next(&res->video_in); 1050 buffer = omap3isp_video_buffer_next(&res->video_in);
1051 if (buffer != NULL) 1051 if (buffer != NULL)
1052 resizer_set_inaddr(res, buffer->isp_addr); 1052 resizer_set_inaddr(res, buffer->dma);
1053 pipe->state |= ISP_PIPELINE_IDLE_INPUT; 1053 pipe->state |= ISP_PIPELINE_IDLE_INPUT;
1054 } 1054 }
1055 1055
@@ -1101,7 +1101,7 @@ static int resizer_video_queue(struct isp_video *video,
1101 struct isp_res_device *res = &video->isp->isp_res; 1101 struct isp_res_device *res = &video->isp->isp_res;
1102 1102
1103 if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) 1103 if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
1104 resizer_set_inaddr(res, buffer->isp_addr); 1104 resizer_set_inaddr(res, buffer->dma);
1105 1105
1106 /* 1106 /*
1107 * We now have a buffer queued on the output. Despite what the 1107 * We now have a buffer queued on the output. Despite what the
@@ -1116,7 +1116,7 @@ static int resizer_video_queue(struct isp_video *video,
1116 * continuous mode or when starting the stream. 1116 * continuous mode or when starting the stream.
1117 */ 1117 */
1118 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 1118 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
1119 resizer_set_outaddr(res, buffer->isp_addr); 1119 resizer_set_outaddr(res, buffer->dma);
1120 1120
1121 return 0; 1121 return 0;
1122} 1122}
diff --git a/drivers/media/platform/omap3isp/ispstat.c b/drivers/media/platform/omap3isp/ispstat.c
index 5707f85c4cc4..e6cbc1eaf4ca 100644
--- a/drivers/media/platform/omap3isp/ispstat.c
+++ b/drivers/media/platform/omap3isp/ispstat.c
@@ -26,13 +26,12 @@
26 */ 26 */
27 27
28#include <linux/dma-mapping.h> 28#include <linux/dma-mapping.h>
29#include <linux/omap-iommu.h>
30#include <linux/slab.h> 29#include <linux/slab.h>
31#include <linux/uaccess.h> 30#include <linux/uaccess.h>
32 31
33#include "isp.h" 32#include "isp.h"
34 33
35#define IS_COHERENT_BUF(stat) ((stat)->dma_ch >= 0) 34#define ISP_STAT_USES_DMAENGINE(stat) ((stat)->dma_ch >= 0)
36 35
37/* 36/*
38 * MAGIC_SIZE must always be the greatest common divisor of 37 * MAGIC_SIZE must always be the greatest common divisor of
@@ -77,21 +76,10 @@ static void __isp_stat_buf_sync_magic(struct ispstat *stat,
77 dma_addr_t, unsigned long, size_t, 76 dma_addr_t, unsigned long, size_t,
78 enum dma_data_direction)) 77 enum dma_data_direction))
79{ 78{
80 struct device *dev = stat->isp->dev; 79 /* Sync the initial and final magic words. */
81 struct page *pg; 80 dma_sync(stat->isp->dev, buf->dma_addr, 0, MAGIC_SIZE, dir);
82 dma_addr_t dma_addr; 81 dma_sync(stat->isp->dev, buf->dma_addr + (buf_size & PAGE_MASK),
83 u32 offset; 82 buf_size & ~PAGE_MASK, MAGIC_SIZE, dir);
84
85 /* Initial magic words */
86 pg = vmalloc_to_page(buf->virt_addr);
87 dma_addr = pfn_to_dma(dev, page_to_pfn(pg));
88 dma_sync(dev, dma_addr, 0, MAGIC_SIZE, dir);
89
90 /* Final magic words */
91 pg = vmalloc_to_page(buf->virt_addr + buf_size);
92 dma_addr = pfn_to_dma(dev, page_to_pfn(pg));
93 offset = ((u32)buf->virt_addr + buf_size) & ~PAGE_MASK;
94 dma_sync(dev, dma_addr, offset, MAGIC_SIZE, dir);
95} 83}
96 84
97static void isp_stat_buf_sync_magic_for_device(struct ispstat *stat, 85static void isp_stat_buf_sync_magic_for_device(struct ispstat *stat,
@@ -99,7 +87,7 @@ static void isp_stat_buf_sync_magic_for_device(struct ispstat *stat,
99 u32 buf_size, 87 u32 buf_size,
100 enum dma_data_direction dir) 88 enum dma_data_direction dir)
101{ 89{
102 if (IS_COHERENT_BUF(stat)) 90 if (ISP_STAT_USES_DMAENGINE(stat))
103 return; 91 return;
104 92
105 __isp_stat_buf_sync_magic(stat, buf, buf_size, dir, 93 __isp_stat_buf_sync_magic(stat, buf, buf_size, dir,
@@ -111,7 +99,7 @@ static void isp_stat_buf_sync_magic_for_cpu(struct ispstat *stat,
111 u32 buf_size, 99 u32 buf_size,
112 enum dma_data_direction dir) 100 enum dma_data_direction dir)
113{ 101{
114 if (IS_COHERENT_BUF(stat)) 102 if (ISP_STAT_USES_DMAENGINE(stat))
115 return; 103 return;
116 104
117 __isp_stat_buf_sync_magic(stat, buf, buf_size, dir, 105 __isp_stat_buf_sync_magic(stat, buf, buf_size, dir,
@@ -180,21 +168,21 @@ static void isp_stat_buf_insert_magic(struct ispstat *stat,
180static void isp_stat_buf_sync_for_device(struct ispstat *stat, 168static void isp_stat_buf_sync_for_device(struct ispstat *stat,
181 struct ispstat_buffer *buf) 169 struct ispstat_buffer *buf)
182{ 170{
183 if (IS_COHERENT_BUF(stat)) 171 if (ISP_STAT_USES_DMAENGINE(stat))
184 return; 172 return;
185 173
186 dma_sync_sg_for_device(stat->isp->dev, buf->iovm->sgt->sgl, 174 dma_sync_sg_for_device(stat->isp->dev, buf->sgt.sgl,
187 buf->iovm->sgt->nents, DMA_FROM_DEVICE); 175 buf->sgt.nents, DMA_FROM_DEVICE);
188} 176}
189 177
190static void isp_stat_buf_sync_for_cpu(struct ispstat *stat, 178static void isp_stat_buf_sync_for_cpu(struct ispstat *stat,
191 struct ispstat_buffer *buf) 179 struct ispstat_buffer *buf)
192{ 180{
193 if (IS_COHERENT_BUF(stat)) 181 if (ISP_STAT_USES_DMAENGINE(stat))
194 return; 182 return;
195 183
196 dma_sync_sg_for_cpu(stat->isp->dev, buf->iovm->sgt->sgl, 184 dma_sync_sg_for_cpu(stat->isp->dev, buf->sgt.sgl,
197 buf->iovm->sgt->nents, DMA_FROM_DEVICE); 185 buf->sgt.nents, DMA_FROM_DEVICE);
198} 186}
199 187
200static void isp_stat_buf_clear(struct ispstat *stat) 188static void isp_stat_buf_clear(struct ispstat *stat)
@@ -354,29 +342,21 @@ static struct ispstat_buffer *isp_stat_buf_get(struct ispstat *stat,
354 342
355static void isp_stat_bufs_free(struct ispstat *stat) 343static void isp_stat_bufs_free(struct ispstat *stat)
356{ 344{
357 struct isp_device *isp = stat->isp; 345 struct device *dev = ISP_STAT_USES_DMAENGINE(stat)
358 int i; 346 ? NULL : stat->isp->dev;
347 unsigned int i;
359 348
360 for (i = 0; i < STAT_MAX_BUFS; i++) { 349 for (i = 0; i < STAT_MAX_BUFS; i++) {
361 struct ispstat_buffer *buf = &stat->buf[i]; 350 struct ispstat_buffer *buf = &stat->buf[i];
362 351
363 if (!IS_COHERENT_BUF(stat)) { 352 if (!buf->virt_addr)
364 if (IS_ERR_OR_NULL((void *)buf->iommu_addr)) 353 continue;
365 continue; 354
366 if (buf->iovm) 355 sg_free_table(&buf->sgt);
367 dma_unmap_sg(isp->dev, buf->iovm->sgt->sgl, 356
368 buf->iovm->sgt->nents, 357 dma_free_coherent(dev, stat->buf_alloc_size, buf->virt_addr,
369 DMA_FROM_DEVICE); 358 buf->dma_addr);
370 omap_iommu_vfree(isp->domain, isp->dev, 359
371 buf->iommu_addr);
372 } else {
373 if (!buf->virt_addr)
374 continue;
375 dma_free_coherent(stat->isp->dev, stat->buf_alloc_size,
376 buf->virt_addr, buf->dma_addr);
377 }
378 buf->iommu_addr = 0;
379 buf->iovm = NULL;
380 buf->dma_addr = 0; 360 buf->dma_addr = 0;
381 buf->virt_addr = NULL; 361 buf->virt_addr = NULL;
382 buf->empty = 1; 362 buf->empty = 1;
@@ -389,83 +369,51 @@ static void isp_stat_bufs_free(struct ispstat *stat)
389 stat->active_buf = NULL; 369 stat->active_buf = NULL;
390} 370}
391 371
392static int isp_stat_bufs_alloc_iommu(struct ispstat *stat, unsigned int size) 372static int isp_stat_bufs_alloc_one(struct device *dev,
393{ 373 struct ispstat_buffer *buf,
394 struct isp_device *isp = stat->isp; 374 unsigned int size)
395 int i;
396
397 stat->buf_alloc_size = size;
398
399 for (i = 0; i < STAT_MAX_BUFS; i++) {
400 struct ispstat_buffer *buf = &stat->buf[i];
401 struct iovm_struct *iovm;
402
403 WARN_ON(buf->dma_addr);
404 buf->iommu_addr = omap_iommu_vmalloc(isp->domain, isp->dev, 0,
405 size, IOMMU_FLAG);
406 if (IS_ERR((void *)buf->iommu_addr)) {
407 dev_err(stat->isp->dev,
408 "%s: Can't acquire memory for "
409 "buffer %d\n", stat->subdev.name, i);
410 isp_stat_bufs_free(stat);
411 return -ENOMEM;
412 }
413
414 iovm = omap_find_iovm_area(isp->dev, buf->iommu_addr);
415 if (!iovm ||
416 !dma_map_sg(isp->dev, iovm->sgt->sgl, iovm->sgt->nents,
417 DMA_FROM_DEVICE)) {
418 isp_stat_bufs_free(stat);
419 return -ENOMEM;
420 }
421 buf->iovm = iovm;
422
423 buf->virt_addr = omap_da_to_va(stat->isp->dev,
424 (u32)buf->iommu_addr);
425 buf->empty = 1;
426 dev_dbg(stat->isp->dev, "%s: buffer[%d] allocated."
427 "iommu_addr=0x%08lx virt_addr=0x%08lx",
428 stat->subdev.name, i, buf->iommu_addr,
429 (unsigned long)buf->virt_addr);
430 }
431
432 return 0;
433}
434
435static int isp_stat_bufs_alloc_dma(struct ispstat *stat, unsigned int size)
436{ 375{
437 int i; 376 int ret;
438
439 stat->buf_alloc_size = size;
440
441 for (i = 0; i < STAT_MAX_BUFS; i++) {
442 struct ispstat_buffer *buf = &stat->buf[i];
443
444 WARN_ON(buf->iommu_addr);
445 buf->virt_addr = dma_alloc_coherent(stat->isp->dev, size,
446 &buf->dma_addr, GFP_KERNEL | GFP_DMA);
447 377
448 if (!buf->virt_addr || !buf->dma_addr) { 378 buf->virt_addr = dma_alloc_coherent(dev, size, &buf->dma_addr,
449 dev_info(stat->isp->dev, 379 GFP_KERNEL | GFP_DMA);
450 "%s: Can't acquire memory for " 380 if (!buf->virt_addr)
451 "DMA buffer %d\n", stat->subdev.name, i); 381 return -ENOMEM;
452 isp_stat_bufs_free(stat);
453 return -ENOMEM;
454 }
455 buf->empty = 1;
456 382
457 dev_dbg(stat->isp->dev, "%s: buffer[%d] allocated." 383 ret = dma_get_sgtable(dev, &buf->sgt, buf->virt_addr, buf->dma_addr,
458 "dma_addr=0x%08lx virt_addr=0x%08lx\n", 384 size);
459 stat->subdev.name, i, (unsigned long)buf->dma_addr, 385 if (ret < 0) {
460 (unsigned long)buf->virt_addr); 386 dma_free_coherent(dev, size, buf->virt_addr, buf->dma_addr);
387 buf->virt_addr = NULL;
388 buf->dma_addr = 0;
389 return ret;
461 } 390 }
462 391
463 return 0; 392 return 0;
464} 393}
465 394
395/*
396 * The device passed to the DMA API depends on whether the statistics block uses
397 * ISP DMA, external DMA or PIO to transfer data.
398 *
399 * The first case (for the AEWB and AF engines) passes the ISP device, resulting
400 * in the DMA buffers being mapped through the ISP IOMMU.
401 *
402 * The second case (for the histogram engine) should pass the DMA engine device.
403 * As that device isn't accessible through the OMAP DMA engine API the driver
404 * passes NULL instead, resulting in the buffers being mapped directly as
405 * physical pages.
406 *
407 * The third case (for the histogram engine) doesn't require any mapping. The
408 * buffers could be allocated with kmalloc/vmalloc, but we still use
409 * dma_alloc_coherent() for consistency purpose.
410 */
466static int isp_stat_bufs_alloc(struct ispstat *stat, u32 size) 411static int isp_stat_bufs_alloc(struct ispstat *stat, u32 size)
467{ 412{
413 struct device *dev = ISP_STAT_USES_DMAENGINE(stat)
414 ? NULL : stat->isp->dev;
468 unsigned long flags; 415 unsigned long flags;
416 unsigned int i;
469 417
470 spin_lock_irqsave(&stat->isp->stat_lock, flags); 418 spin_lock_irqsave(&stat->isp->stat_lock, flags);
471 419
@@ -489,10 +437,31 @@ static int isp_stat_bufs_alloc(struct ispstat *stat, u32 size)
489 437
490 isp_stat_bufs_free(stat); 438 isp_stat_bufs_free(stat);
491 439
492 if (IS_COHERENT_BUF(stat)) 440 stat->buf_alloc_size = size;
493 return isp_stat_bufs_alloc_dma(stat, size); 441
494 else 442 for (i = 0; i < STAT_MAX_BUFS; i++) {
495 return isp_stat_bufs_alloc_iommu(stat, size); 443 struct ispstat_buffer *buf = &stat->buf[i];
444 int ret;
445
446 ret = isp_stat_bufs_alloc_one(dev, buf, size);
447 if (ret < 0) {
448 dev_err(stat->isp->dev,
449 "%s: Failed to allocate DMA buffer %u\n",
450 stat->subdev.name, i);
451 isp_stat_bufs_free(stat);
452 return ret;
453 }
454
455 buf->empty = 1;
456
457 dev_dbg(stat->isp->dev,
458 "%s: buffer[%u] allocated. dma=0x%08lx virt=0x%08lx",
459 stat->subdev.name, i,
460 (unsigned long)buf->dma_addr,
461 (unsigned long)buf->virt_addr);
462 }
463
464 return 0;
496} 465}
497 466
498static void isp_stat_queue_event(struct ispstat *stat, int err) 467static void isp_stat_queue_event(struct ispstat *stat, int err)
diff --git a/drivers/media/platform/omap3isp/ispstat.h b/drivers/media/platform/omap3isp/ispstat.h
index 9a047c929b9f..58d6ac7cb664 100644
--- a/drivers/media/platform/omap3isp/ispstat.h
+++ b/drivers/media/platform/omap3isp/ispstat.h
@@ -46,8 +46,7 @@
46struct ispstat; 46struct ispstat;
47 47
48struct ispstat_buffer { 48struct ispstat_buffer {
49 unsigned long iommu_addr; 49 struct sg_table sgt;
50 struct iovm_struct *iovm;
51 void *virt_addr; 50 void *virt_addr;
52 dma_addr_t dma_addr; 51 dma_addr_t dma_addr;
53 struct timespec ts; 52 struct timespec ts;
diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c
index 85b4036ba5e4..e36bac26476c 100644
--- a/drivers/media/platform/omap3isp/ispvideo.c
+++ b/drivers/media/platform/omap3isp/ispvideo.c
@@ -27,7 +27,6 @@
27#include <linux/clk.h> 27#include <linux/clk.h>
28#include <linux/mm.h> 28#include <linux/mm.h>
29#include <linux/module.h> 29#include <linux/module.h>
30#include <linux/omap-iommu.h>
31#include <linux/pagemap.h> 30#include <linux/pagemap.h>
32#include <linux/scatterlist.h> 31#include <linux/scatterlist.h>
33#include <linux/sched.h> 32#include <linux/sched.h>
@@ -35,6 +34,7 @@
35#include <linux/vmalloc.h> 34#include <linux/vmalloc.h>
36#include <media/v4l2-dev.h> 35#include <media/v4l2-dev.h>
37#include <media/v4l2-ioctl.h> 36#include <media/v4l2-ioctl.h>
37#include <media/videobuf2-dma-contig.h>
38 38
39#include "ispvideo.h" 39#include "ispvideo.h"
40#include "isp.h" 40#include "isp.h"
@@ -326,90 +326,36 @@ isp_video_check_format(struct isp_video *video, struct isp_video_fh *vfh)
326} 326}
327 327
328/* ----------------------------------------------------------------------------- 328/* -----------------------------------------------------------------------------
329 * IOMMU management
330 */
331
332#define IOMMU_FLAG (IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_8)
333
334/*
335 * ispmmu_vmap - Wrapper for Virtual memory mapping of a scatter gather list
336 * @isp: Device pointer specific to the OMAP3 ISP.
337 * @sglist: Pointer to source Scatter gather list to allocate.
338 * @sglen: Number of elements of the scatter-gatter list.
339 *
340 * Returns a resulting mapped device address by the ISP MMU, or -ENOMEM if
341 * we ran out of memory.
342 */
343static dma_addr_t
344ispmmu_vmap(struct isp_device *isp, const struct scatterlist *sglist, int sglen)
345{
346 struct sg_table *sgt;
347 u32 da;
348
349 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
350 if (sgt == NULL)
351 return -ENOMEM;
352
353 sgt->sgl = (struct scatterlist *)sglist;
354 sgt->nents = sglen;
355 sgt->orig_nents = sglen;
356
357 da = omap_iommu_vmap(isp->domain, isp->dev, 0, sgt, IOMMU_FLAG);
358 if (IS_ERR_VALUE(da))
359 kfree(sgt);
360
361 return da;
362}
363
364/*
365 * ispmmu_vunmap - Unmap a device address from the ISP MMU
366 * @isp: Device pointer specific to the OMAP3 ISP.
367 * @da: Device address generated from a ispmmu_vmap call.
368 */
369static void ispmmu_vunmap(struct isp_device *isp, dma_addr_t da)
370{
371 struct sg_table *sgt;
372
373 sgt = omap_iommu_vunmap(isp->domain, isp->dev, (u32)da);
374 kfree(sgt);
375}
376
377/* -----------------------------------------------------------------------------
378 * Video queue operations 329 * Video queue operations
379 */ 330 */
380 331
381static void isp_video_queue_prepare(struct isp_video_queue *queue, 332static int isp_video_queue_setup(struct vb2_queue *queue,
382 unsigned int *nbuffers, unsigned int *size) 333 const struct v4l2_format *fmt,
334 unsigned int *count, unsigned int *num_planes,
335 unsigned int sizes[], void *alloc_ctxs[])
383{ 336{
384 struct isp_video_fh *vfh = 337 struct isp_video_fh *vfh = vb2_get_drv_priv(queue);
385 container_of(queue, struct isp_video_fh, queue);
386 struct isp_video *video = vfh->video; 338 struct isp_video *video = vfh->video;
387 339
388 *size = vfh->format.fmt.pix.sizeimage; 340 *num_planes = 1;
389 if (*size == 0)
390 return;
391 341
392 *nbuffers = min(*nbuffers, video->capture_mem / PAGE_ALIGN(*size)); 342 sizes[0] = vfh->format.fmt.pix.sizeimage;
393} 343 if (sizes[0] == 0)
344 return -EINVAL;
394 345
395static void isp_video_buffer_cleanup(struct isp_video_buffer *buf) 346 alloc_ctxs[0] = video->alloc_ctx;
396{
397 struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue);
398 struct isp_buffer *buffer = to_isp_buffer(buf);
399 struct isp_video *video = vfh->video;
400 347
401 if (buffer->isp_addr) { 348 *count = min(*count, video->capture_mem / PAGE_ALIGN(sizes[0]));
402 ispmmu_vunmap(video->isp, buffer->isp_addr); 349
403 buffer->isp_addr = 0; 350 return 0;
404 }
405} 351}
406 352
407static int isp_video_buffer_prepare(struct isp_video_buffer *buf) 353static int isp_video_buffer_prepare(struct vb2_buffer *buf)
408{ 354{
409 struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue); 355 struct isp_video_fh *vfh = vb2_get_drv_priv(buf->vb2_queue);
410 struct isp_buffer *buffer = to_isp_buffer(buf); 356 struct isp_buffer *buffer = to_isp_buffer(buf);
411 struct isp_video *video = vfh->video; 357 struct isp_video *video = vfh->video;
412 unsigned long addr; 358 dma_addr_t addr;
413 359
414 /* Refuse to prepare the buffer is the video node has registered an 360 /* Refuse to prepare the buffer is the video node has registered an
415 * error. We don't need to take any lock here as the operation is 361 * error. We don't need to take any lock here as the operation is
@@ -420,19 +366,16 @@ static int isp_video_buffer_prepare(struct isp_video_buffer *buf)
420 if (unlikely(video->error)) 366 if (unlikely(video->error))
421 return -EIO; 367 return -EIO;
422 368
423 addr = ispmmu_vmap(video->isp, buf->sglist, buf->sglen); 369 addr = vb2_dma_contig_plane_dma_addr(buf, 0);
424 if (IS_ERR_VALUE(addr))
425 return -EIO;
426
427 if (!IS_ALIGNED(addr, 32)) { 370 if (!IS_ALIGNED(addr, 32)) {
428 dev_dbg(video->isp->dev, "Buffer address must be " 371 dev_dbg(video->isp->dev,
429 "aligned to 32 bytes boundary.\n"); 372 "Buffer address must be aligned to 32 bytes boundary.\n");
430 ispmmu_vunmap(video->isp, buffer->isp_addr);
431 return -EINVAL; 373 return -EINVAL;
432 } 374 }
433 375
434 buf->vbuf.bytesused = vfh->format.fmt.pix.sizeimage; 376 vb2_set_plane_payload(&buffer->vb, 0, vfh->format.fmt.pix.sizeimage);
435 buffer->isp_addr = addr; 377 buffer->dma = addr;
378
436 return 0; 379 return 0;
437} 380}
438 381
@@ -445,9 +388,9 @@ static int isp_video_buffer_prepare(struct isp_video_buffer *buf)
445 * If the pipeline is busy, it will be restarted in the output module interrupt 388 * If the pipeline is busy, it will be restarted in the output module interrupt
446 * handler. 389 * handler.
447 */ 390 */
448static void isp_video_buffer_queue(struct isp_video_buffer *buf) 391static void isp_video_buffer_queue(struct vb2_buffer *buf)
449{ 392{
450 struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue); 393 struct isp_video_fh *vfh = vb2_get_drv_priv(buf->vb2_queue);
451 struct isp_buffer *buffer = to_isp_buffer(buf); 394 struct isp_buffer *buffer = to_isp_buffer(buf);
452 struct isp_video *video = vfh->video; 395 struct isp_video *video = vfh->video;
453 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity); 396 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
@@ -456,14 +399,18 @@ static void isp_video_buffer_queue(struct isp_video_buffer *buf)
456 unsigned int empty; 399 unsigned int empty;
457 unsigned int start; 400 unsigned int start;
458 401
402 spin_lock_irqsave(&video->irqlock, flags);
403
459 if (unlikely(video->error)) { 404 if (unlikely(video->error)) {
460 buf->state = ISP_BUF_STATE_ERROR; 405 vb2_buffer_done(&buffer->vb, VB2_BUF_STATE_ERROR);
461 wake_up(&buf->wait); 406 spin_unlock_irqrestore(&video->irqlock, flags);
462 return; 407 return;
463 } 408 }
464 409
465 empty = list_empty(&video->dmaqueue); 410 empty = list_empty(&video->dmaqueue);
466 list_add_tail(&buffer->buffer.irqlist, &video->dmaqueue); 411 list_add_tail(&buffer->irqlist, &video->dmaqueue);
412
413 spin_unlock_irqrestore(&video->irqlock, flags);
467 414
468 if (empty) { 415 if (empty) {
469 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 416 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
@@ -487,23 +434,22 @@ static void isp_video_buffer_queue(struct isp_video_buffer *buf)
487 } 434 }
488} 435}
489 436
490static const struct isp_video_queue_operations isp_video_queue_ops = { 437static const struct vb2_ops isp_video_queue_ops = {
491 .queue_prepare = &isp_video_queue_prepare, 438 .queue_setup = isp_video_queue_setup,
492 .buffer_prepare = &isp_video_buffer_prepare, 439 .buf_prepare = isp_video_buffer_prepare,
493 .buffer_queue = &isp_video_buffer_queue, 440 .buf_queue = isp_video_buffer_queue,
494 .buffer_cleanup = &isp_video_buffer_cleanup,
495}; 441};
496 442
497/* 443/*
498 * omap3isp_video_buffer_next - Complete the current buffer and return the next 444 * omap3isp_video_buffer_next - Complete the current buffer and return the next
499 * @video: ISP video object 445 * @video: ISP video object
500 * 446 *
501 * Remove the current video buffer from the DMA queue and fill its timestamp, 447 * Remove the current video buffer from the DMA queue and fill its timestamp and
502 * field count and state fields before waking up its completion handler. 448 * field count before handing it back to videobuf2.
503 * 449 *
504 * For capture video nodes the buffer state is set to ISP_BUF_STATE_DONE if no 450 * For capture video nodes the buffer state is set to VB2_BUF_STATE_DONE if no
505 * error has been flagged in the pipeline, or to ISP_BUF_STATE_ERROR otherwise. 451 * error has been flagged in the pipeline, or to VB2_BUF_STATE_ERROR otherwise.
506 * For video output nodes the buffer state is always set to ISP_BUF_STATE_DONE. 452 * For video output nodes the buffer state is always set to VB2_BUF_STATE_DONE.
507 * 453 *
508 * The DMA queue is expected to contain at least one buffer. 454 * The DMA queue is expected to contain at least one buffer.
509 * 455 *
@@ -513,26 +459,25 @@ static const struct isp_video_queue_operations isp_video_queue_ops = {
513struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video) 459struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video)
514{ 460{
515 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity); 461 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
516 struct isp_video_queue *queue = video->queue;
517 enum isp_pipeline_state state; 462 enum isp_pipeline_state state;
518 struct isp_video_buffer *buf; 463 struct isp_buffer *buf;
519 unsigned long flags; 464 unsigned long flags;
520 struct timespec ts; 465 struct timespec ts;
521 466
522 spin_lock_irqsave(&queue->irqlock, flags); 467 spin_lock_irqsave(&video->irqlock, flags);
523 if (WARN_ON(list_empty(&video->dmaqueue))) { 468 if (WARN_ON(list_empty(&video->dmaqueue))) {
524 spin_unlock_irqrestore(&queue->irqlock, flags); 469 spin_unlock_irqrestore(&video->irqlock, flags);
525 return NULL; 470 return NULL;
526 } 471 }
527 472
528 buf = list_first_entry(&video->dmaqueue, struct isp_video_buffer, 473 buf = list_first_entry(&video->dmaqueue, struct isp_buffer,
529 irqlist); 474 irqlist);
530 list_del(&buf->irqlist); 475 list_del(&buf->irqlist);
531 spin_unlock_irqrestore(&queue->irqlock, flags); 476 spin_unlock_irqrestore(&video->irqlock, flags);
532 477
533 ktime_get_ts(&ts); 478 ktime_get_ts(&ts);
534 buf->vbuf.timestamp.tv_sec = ts.tv_sec; 479 buf->vb.v4l2_buf.timestamp.tv_sec = ts.tv_sec;
535 buf->vbuf.timestamp.tv_usec = ts.tv_nsec / NSEC_PER_USEC; 480 buf->vb.v4l2_buf.timestamp.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
536 481
537 /* Do frame number propagation only if this is the output video node. 482 /* Do frame number propagation only if this is the output video node.
538 * Frame number either comes from the CSI receivers or it gets 483 * Frame number either comes from the CSI receivers or it gets
@@ -541,22 +486,27 @@ struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video)
541 * first, so the input number might lag behind by 1 in some cases. 486 * first, so the input number might lag behind by 1 in some cases.
542 */ 487 */
543 if (video == pipe->output && !pipe->do_propagation) 488 if (video == pipe->output && !pipe->do_propagation)
544 buf->vbuf.sequence = atomic_inc_return(&pipe->frame_number); 489 buf->vb.v4l2_buf.sequence =
490 atomic_inc_return(&pipe->frame_number);
545 else 491 else
546 buf->vbuf.sequence = atomic_read(&pipe->frame_number); 492 buf->vb.v4l2_buf.sequence = atomic_read(&pipe->frame_number);
547 493
548 /* Report pipeline errors to userspace on the capture device side. */ 494 /* Report pipeline errors to userspace on the capture device side. */
549 if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->error) { 495 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->error) {
550 buf->state = ISP_BUF_STATE_ERROR; 496 state = VB2_BUF_STATE_ERROR;
551 pipe->error = false; 497 pipe->error = false;
552 } else { 498 } else {
553 buf->state = ISP_BUF_STATE_DONE; 499 state = VB2_BUF_STATE_DONE;
554 } 500 }
555 501
556 wake_up(&buf->wait); 502 vb2_buffer_done(&buf->vb, state);
503
504 spin_lock_irqsave(&video->irqlock, flags);
557 505
558 if (list_empty(&video->dmaqueue)) { 506 if (list_empty(&video->dmaqueue)) {
559 if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 507 spin_unlock_irqrestore(&video->irqlock, flags);
508
509 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
560 state = ISP_PIPELINE_QUEUE_OUTPUT 510 state = ISP_PIPELINE_QUEUE_OUTPUT
561 | ISP_PIPELINE_STREAM; 511 | ISP_PIPELINE_STREAM;
562 else 512 else
@@ -571,16 +521,19 @@ struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video)
571 return NULL; 521 return NULL;
572 } 522 }
573 523
574 if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->input != NULL) { 524 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->input != NULL) {
575 spin_lock_irqsave(&pipe->lock, flags); 525 spin_lock(&pipe->lock);
576 pipe->state &= ~ISP_PIPELINE_STREAM; 526 pipe->state &= ~ISP_PIPELINE_STREAM;
577 spin_unlock_irqrestore(&pipe->lock, flags); 527 spin_unlock(&pipe->lock);
578 } 528 }
579 529
580 buf = list_first_entry(&video->dmaqueue, struct isp_video_buffer, 530 buf = list_first_entry(&video->dmaqueue, struct isp_buffer,
581 irqlist); 531 irqlist);
582 buf->state = ISP_BUF_STATE_ACTIVE; 532 buf->vb.state = VB2_BUF_STATE_ACTIVE;
583 return to_isp_buffer(buf); 533
534 spin_unlock_irqrestore(&video->irqlock, flags);
535
536 return buf;
584} 537}
585 538
586/* 539/*
@@ -592,25 +545,22 @@ struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video)
592 */ 545 */
593void omap3isp_video_cancel_stream(struct isp_video *video) 546void omap3isp_video_cancel_stream(struct isp_video *video)
594{ 547{
595 struct isp_video_queue *queue = video->queue;
596 unsigned long flags; 548 unsigned long flags;
597 549
598 spin_lock_irqsave(&queue->irqlock, flags); 550 spin_lock_irqsave(&video->irqlock, flags);
599 551
600 while (!list_empty(&video->dmaqueue)) { 552 while (!list_empty(&video->dmaqueue)) {
601 struct isp_video_buffer *buf; 553 struct isp_buffer *buf;
602 554
603 buf = list_first_entry(&video->dmaqueue, 555 buf = list_first_entry(&video->dmaqueue,
604 struct isp_video_buffer, irqlist); 556 struct isp_buffer, irqlist);
605 list_del(&buf->irqlist); 557 list_del(&buf->irqlist);
606 558 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
607 buf->state = ISP_BUF_STATE_ERROR;
608 wake_up(&buf->wait);
609 } 559 }
610 560
611 video->error = true; 561 video->error = true;
612 562
613 spin_unlock_irqrestore(&queue->irqlock, flags); 563 spin_unlock_irqrestore(&video->irqlock, flags);
614} 564}
615 565
616/* 566/*
@@ -627,12 +577,15 @@ void omap3isp_video_resume(struct isp_video *video, int continuous)
627{ 577{
628 struct isp_buffer *buf = NULL; 578 struct isp_buffer *buf = NULL;
629 579
630 if (continuous && video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 580 if (continuous && video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
631 omap3isp_video_queue_discard_done(video->queue); 581 mutex_lock(&video->queue_lock);
582 vb2_discard_done(video->queue);
583 mutex_unlock(&video->queue_lock);
584 }
632 585
633 if (!list_empty(&video->dmaqueue)) { 586 if (!list_empty(&video->dmaqueue)) {
634 buf = list_first_entry(&video->dmaqueue, 587 buf = list_first_entry(&video->dmaqueue,
635 struct isp_buffer, buffer.irqlist); 588 struct isp_buffer, irqlist);
636 video->ops->queue(video, buf); 589 video->ops->queue(video, buf);
637 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_QUEUED; 590 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_QUEUED;
638 } else { 591 } else {
@@ -840,33 +793,56 @@ static int
840isp_video_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *rb) 793isp_video_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *rb)
841{ 794{
842 struct isp_video_fh *vfh = to_isp_video_fh(fh); 795 struct isp_video_fh *vfh = to_isp_video_fh(fh);
796 struct isp_video *video = video_drvdata(file);
797 int ret;
843 798
844 return omap3isp_video_queue_reqbufs(&vfh->queue, rb); 799 mutex_lock(&video->queue_lock);
800 ret = vb2_reqbufs(&vfh->queue, rb);
801 mutex_unlock(&video->queue_lock);
802
803 return ret;
845} 804}
846 805
847static int 806static int
848isp_video_querybuf(struct file *file, void *fh, struct v4l2_buffer *b) 807isp_video_querybuf(struct file *file, void *fh, struct v4l2_buffer *b)
849{ 808{
850 struct isp_video_fh *vfh = to_isp_video_fh(fh); 809 struct isp_video_fh *vfh = to_isp_video_fh(fh);
810 struct isp_video *video = video_drvdata(file);
811 int ret;
812
813 mutex_lock(&video->queue_lock);
814 ret = vb2_querybuf(&vfh->queue, b);
815 mutex_unlock(&video->queue_lock);
851 816
852 return omap3isp_video_queue_querybuf(&vfh->queue, b); 817 return ret;
853} 818}
854 819
855static int 820static int
856isp_video_qbuf(struct file *file, void *fh, struct v4l2_buffer *b) 821isp_video_qbuf(struct file *file, void *fh, struct v4l2_buffer *b)
857{ 822{
858 struct isp_video_fh *vfh = to_isp_video_fh(fh); 823 struct isp_video_fh *vfh = to_isp_video_fh(fh);
824 struct isp_video *video = video_drvdata(file);
825 int ret;
859 826
860 return omap3isp_video_queue_qbuf(&vfh->queue, b); 827 mutex_lock(&video->queue_lock);
828 ret = vb2_qbuf(&vfh->queue, b);
829 mutex_unlock(&video->queue_lock);
830
831 return ret;
861} 832}
862 833
863static int 834static int
864isp_video_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b) 835isp_video_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b)
865{ 836{
866 struct isp_video_fh *vfh = to_isp_video_fh(fh); 837 struct isp_video_fh *vfh = to_isp_video_fh(fh);
838 struct isp_video *video = video_drvdata(file);
839 int ret;
840
841 mutex_lock(&video->queue_lock);
842 ret = vb2_dqbuf(&vfh->queue, b, file->f_flags & O_NONBLOCK);
843 mutex_unlock(&video->queue_lock);
867 844
868 return omap3isp_video_queue_dqbuf(&vfh->queue, b, 845 return ret;
869 file->f_flags & O_NONBLOCK);
870} 846}
871 847
872static int isp_video_check_external_subdevs(struct isp_video *video, 848static int isp_video_check_external_subdevs(struct isp_video *video,
@@ -1006,11 +982,6 @@ isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
1006 982
1007 mutex_lock(&video->stream_lock); 983 mutex_lock(&video->stream_lock);
1008 984
1009 if (video->streaming) {
1010 mutex_unlock(&video->stream_lock);
1011 return -EBUSY;
1012 }
1013
1014 /* Start streaming on the pipeline. No link touching an entity in the 985 /* Start streaming on the pipeline. No link touching an entity in the
1015 * pipeline can be activated or deactivated once streaming is started. 986 * pipeline can be activated or deactivated once streaming is started.
1016 */ 987 */
@@ -1069,7 +1040,9 @@ isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
1069 INIT_LIST_HEAD(&video->dmaqueue); 1040 INIT_LIST_HEAD(&video->dmaqueue);
1070 atomic_set(&pipe->frame_number, -1); 1041 atomic_set(&pipe->frame_number, -1);
1071 1042
1072 ret = omap3isp_video_queue_streamon(&vfh->queue); 1043 mutex_lock(&video->queue_lock);
1044 ret = vb2_streamon(&vfh->queue, type);
1045 mutex_unlock(&video->queue_lock);
1073 if (ret < 0) 1046 if (ret < 0)
1074 goto err_check_format; 1047 goto err_check_format;
1075 1048
@@ -1082,19 +1055,19 @@ isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
1082 ISP_PIPELINE_STREAM_CONTINUOUS); 1055 ISP_PIPELINE_STREAM_CONTINUOUS);
1083 if (ret < 0) 1056 if (ret < 0)
1084 goto err_set_stream; 1057 goto err_set_stream;
1085 spin_lock_irqsave(&video->queue->irqlock, flags); 1058 spin_lock_irqsave(&video->irqlock, flags);
1086 if (list_empty(&video->dmaqueue)) 1059 if (list_empty(&video->dmaqueue))
1087 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN; 1060 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
1088 spin_unlock_irqrestore(&video->queue->irqlock, flags); 1061 spin_unlock_irqrestore(&video->irqlock, flags);
1089 } 1062 }
1090 1063
1091 video->streaming = 1;
1092
1093 mutex_unlock(&video->stream_lock); 1064 mutex_unlock(&video->stream_lock);
1094 return 0; 1065 return 0;
1095 1066
1096err_set_stream: 1067err_set_stream:
1097 omap3isp_video_queue_streamoff(&vfh->queue); 1068 mutex_lock(&video->queue_lock);
1069 vb2_streamoff(&vfh->queue, type);
1070 mutex_unlock(&video->queue_lock);
1098err_check_format: 1071err_check_format:
1099 media_entity_pipeline_stop(&video->video.entity); 1072 media_entity_pipeline_stop(&video->video.entity);
1100err_pipeline_start: 1073err_pipeline_start:
@@ -1130,9 +1103,9 @@ isp_video_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
1130 mutex_lock(&video->stream_lock); 1103 mutex_lock(&video->stream_lock);
1131 1104
1132 /* Make sure we're not streaming yet. */ 1105 /* Make sure we're not streaming yet. */
1133 mutex_lock(&vfh->queue.lock); 1106 mutex_lock(&video->queue_lock);
1134 streaming = vfh->queue.streaming; 1107 streaming = vb2_is_streaming(&vfh->queue);
1135 mutex_unlock(&vfh->queue.lock); 1108 mutex_unlock(&video->queue_lock);
1136 1109
1137 if (!streaming) 1110 if (!streaming)
1138 goto done; 1111 goto done;
@@ -1151,9 +1124,12 @@ isp_video_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
1151 1124
1152 /* Stop the stream. */ 1125 /* Stop the stream. */
1153 omap3isp_pipeline_set_stream(pipe, ISP_PIPELINE_STREAM_STOPPED); 1126 omap3isp_pipeline_set_stream(pipe, ISP_PIPELINE_STREAM_STOPPED);
1154 omap3isp_video_queue_streamoff(&vfh->queue); 1127 omap3isp_video_cancel_stream(video);
1128
1129 mutex_lock(&video->queue_lock);
1130 vb2_streamoff(&vfh->queue, type);
1131 mutex_unlock(&video->queue_lock);
1155 video->queue = NULL; 1132 video->queue = NULL;
1156 video->streaming = 0;
1157 video->error = false; 1133 video->error = false;
1158 1134
1159 if (video->isp->pdata->set_constraints) 1135 if (video->isp->pdata->set_constraints)
@@ -1223,6 +1199,7 @@ static int isp_video_open(struct file *file)
1223{ 1199{
1224 struct isp_video *video = video_drvdata(file); 1200 struct isp_video *video = video_drvdata(file);
1225 struct isp_video_fh *handle; 1201 struct isp_video_fh *handle;
1202 struct vb2_queue *queue;
1226 int ret = 0; 1203 int ret = 0;
1227 1204
1228 handle = kzalloc(sizeof(*handle), GFP_KERNEL); 1205 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
@@ -1244,9 +1221,20 @@ static int isp_video_open(struct file *file)
1244 goto done; 1221 goto done;
1245 } 1222 }
1246 1223
1247 omap3isp_video_queue_init(&handle->queue, video->type, 1224 queue = &handle->queue;
1248 &isp_video_queue_ops, video->isp->dev, 1225 queue->type = video->type;
1249 sizeof(struct isp_buffer)); 1226 queue->io_modes = VB2_MMAP | VB2_USERPTR;
1227 queue->drv_priv = handle;
1228 queue->ops = &isp_video_queue_ops;
1229 queue->mem_ops = &vb2_dma_contig_memops;
1230 queue->buf_struct_size = sizeof(struct isp_buffer);
1231 queue->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
1232
1233 ret = vb2_queue_init(&handle->queue);
1234 if (ret < 0) {
1235 omap3isp_put(video->isp);
1236 goto done;
1237 }
1250 1238
1251 memset(&handle->format, 0, sizeof(handle->format)); 1239 memset(&handle->format, 0, sizeof(handle->format));
1252 handle->format.type = video->type; 1240 handle->format.type = video->type;
@@ -1273,9 +1261,9 @@ static int isp_video_release(struct file *file)
1273 /* Disable streaming and free the buffers queue resources. */ 1261 /* Disable streaming and free the buffers queue resources. */
1274 isp_video_streamoff(file, vfh, video->type); 1262 isp_video_streamoff(file, vfh, video->type);
1275 1263
1276 mutex_lock(&handle->queue.lock); 1264 mutex_lock(&video->queue_lock);
1277 omap3isp_video_queue_cleanup(&handle->queue); 1265 vb2_queue_release(&handle->queue);
1278 mutex_unlock(&handle->queue.lock); 1266 mutex_unlock(&video->queue_lock);
1279 1267
1280 omap3isp_pipeline_pm_use(&video->video.entity, 0); 1268 omap3isp_pipeline_pm_use(&video->video.entity, 0);
1281 1269
@@ -1292,16 +1280,27 @@ static int isp_video_release(struct file *file)
1292static unsigned int isp_video_poll(struct file *file, poll_table *wait) 1280static unsigned int isp_video_poll(struct file *file, poll_table *wait)
1293{ 1281{
1294 struct isp_video_fh *vfh = to_isp_video_fh(file->private_data); 1282 struct isp_video_fh *vfh = to_isp_video_fh(file->private_data);
1295 struct isp_video_queue *queue = &vfh->queue; 1283 struct isp_video *video = video_drvdata(file);
1284 int ret;
1296 1285
1297 return omap3isp_video_queue_poll(queue, file, wait); 1286 mutex_lock(&video->queue_lock);
1287 ret = vb2_poll(&vfh->queue, file, wait);
1288 mutex_unlock(&video->queue_lock);
1289
1290 return ret;
1298} 1291}
1299 1292
1300static int isp_video_mmap(struct file *file, struct vm_area_struct *vma) 1293static int isp_video_mmap(struct file *file, struct vm_area_struct *vma)
1301{ 1294{
1302 struct isp_video_fh *vfh = to_isp_video_fh(file->private_data); 1295 struct isp_video_fh *vfh = to_isp_video_fh(file->private_data);
1296 struct isp_video *video = video_drvdata(file);
1297 int ret;
1298
1299 mutex_lock(&video->queue_lock);
1300 ret = vb2_mmap(&vfh->queue, vma);
1301 mutex_unlock(&video->queue_lock);
1303 1302
1304 return omap3isp_video_queue_mmap(&vfh->queue, vma); 1303 return ret;
1305} 1304}
1306 1305
1307static struct v4l2_file_operations isp_video_fops = { 1306static struct v4l2_file_operations isp_video_fops = {
@@ -1342,15 +1341,23 @@ int omap3isp_video_init(struct isp_video *video, const char *name)
1342 return -EINVAL; 1341 return -EINVAL;
1343 } 1342 }
1344 1343
1344 video->alloc_ctx = vb2_dma_contig_init_ctx(video->isp->dev);
1345 if (IS_ERR(video->alloc_ctx))
1346 return PTR_ERR(video->alloc_ctx);
1347
1345 ret = media_entity_init(&video->video.entity, 1, &video->pad, 0); 1348 ret = media_entity_init(&video->video.entity, 1, &video->pad, 0);
1346 if (ret < 0) 1349 if (ret < 0) {
1350 vb2_dma_contig_cleanup_ctx(video->alloc_ctx);
1347 return ret; 1351 return ret;
1352 }
1348 1353
1349 mutex_init(&video->mutex); 1354 mutex_init(&video->mutex);
1350 atomic_set(&video->active, 0); 1355 atomic_set(&video->active, 0);
1351 1356
1352 spin_lock_init(&video->pipe.lock); 1357 spin_lock_init(&video->pipe.lock);
1353 mutex_init(&video->stream_lock); 1358 mutex_init(&video->stream_lock);
1359 mutex_init(&video->queue_lock);
1360 spin_lock_init(&video->irqlock);
1354 1361
1355 /* Initialize the video device. */ 1362 /* Initialize the video device. */
1356 if (video->ops == NULL) 1363 if (video->ops == NULL)
@@ -1371,7 +1378,9 @@ int omap3isp_video_init(struct isp_video *video, const char *name)
1371 1378
1372void omap3isp_video_cleanup(struct isp_video *video) 1379void omap3isp_video_cleanup(struct isp_video *video)
1373{ 1380{
1381 vb2_dma_contig_cleanup_ctx(video->alloc_ctx);
1374 media_entity_cleanup(&video->video.entity); 1382 media_entity_cleanup(&video->video.entity);
1383 mutex_destroy(&video->queue_lock);
1375 mutex_destroy(&video->stream_lock); 1384 mutex_destroy(&video->stream_lock);
1376 mutex_destroy(&video->mutex); 1385 mutex_destroy(&video->mutex);
1377} 1386}
diff --git a/drivers/media/platform/omap3isp/ispvideo.h b/drivers/media/platform/omap3isp/ispvideo.h
index 4e194076cc60..7d2e82122ecd 100644
--- a/drivers/media/platform/omap3isp/ispvideo.h
+++ b/drivers/media/platform/omap3isp/ispvideo.h
@@ -30,8 +30,7 @@
30#include <media/media-entity.h> 30#include <media/media-entity.h>
31#include <media/v4l2-dev.h> 31#include <media/v4l2-dev.h>
32#include <media/v4l2-fh.h> 32#include <media/v4l2-fh.h>
33 33#include <media/videobuf2-core.h>
34#include "ispqueue.h"
35 34
36#define ISP_VIDEO_DRIVER_NAME "ispvideo" 35#define ISP_VIDEO_DRIVER_NAME "ispvideo"
37#define ISP_VIDEO_DRIVER_VERSION "0.0.2" 36#define ISP_VIDEO_DRIVER_VERSION "0.0.2"
@@ -124,17 +123,19 @@ static inline int isp_pipeline_ready(struct isp_pipeline *pipe)
124 ISP_PIPELINE_IDLE_OUTPUT); 123 ISP_PIPELINE_IDLE_OUTPUT);
125} 124}
126 125
127/* 126/**
128 * struct isp_buffer - ISP buffer 127 * struct isp_buffer - ISP video buffer
129 * @buffer: ISP video buffer 128 * @vb: videobuf2 buffer
130 * @isp_addr: MMU mapped address (a.k.a. device address) of the buffer. 129 * @irqlist: List head for insertion into IRQ queue
130 * @dma: DMA address
131 */ 131 */
132struct isp_buffer { 132struct isp_buffer {
133 struct isp_video_buffer buffer; 133 struct vb2_buffer vb;
134 dma_addr_t isp_addr; 134 struct list_head irqlist;
135 dma_addr_t dma;
135}; 136};
136 137
137#define to_isp_buffer(buf) container_of(buf, struct isp_buffer, buffer) 138#define to_isp_buffer(buf) container_of(buf, struct isp_buffer, vb)
138 139
139enum isp_video_dmaqueue_flags { 140enum isp_video_dmaqueue_flags {
140 /* Set if DMA queue becomes empty when ISP_PIPELINE_STREAM_CONTINUOUS */ 141 /* Set if DMA queue becomes empty when ISP_PIPELINE_STREAM_CONTINUOUS */
@@ -172,16 +173,16 @@ struct isp_video {
172 unsigned int bpl_value; /* bytes per line value */ 173 unsigned int bpl_value; /* bytes per line value */
173 unsigned int bpl_padding; /* padding at end of line */ 174 unsigned int bpl_padding; /* padding at end of line */
174 175
175 /* Entity video node streaming */
176 unsigned int streaming:1;
177
178 /* Pipeline state */ 176 /* Pipeline state */
179 struct isp_pipeline pipe; 177 struct isp_pipeline pipe;
180 struct mutex stream_lock; /* pipeline and stream states */ 178 struct mutex stream_lock; /* pipeline and stream states */
181 bool error; 179 bool error;
182 180
183 /* Video buffers queue */ 181 /* Video buffers queue */
184 struct isp_video_queue *queue; 182 void *alloc_ctx;
183 struct vb2_queue *queue;
184 struct mutex queue_lock; /* protects the queue */
185 spinlock_t irqlock; /* protects dmaqueue */
185 struct list_head dmaqueue; 186 struct list_head dmaqueue;
186 enum isp_video_dmaqueue_flags dmaqueue_flags; 187 enum isp_video_dmaqueue_flags dmaqueue_flags;
187 188
@@ -193,7 +194,7 @@ struct isp_video {
193struct isp_video_fh { 194struct isp_video_fh {
194 struct v4l2_fh vfh; 195 struct v4l2_fh vfh;
195 struct isp_video *video; 196 struct isp_video *video;
196 struct isp_video_queue queue; 197 struct vb2_queue queue;
197 struct v4l2_format format; 198 struct v4l2_format format;
198 struct v4l2_fract timeperframe; 199 struct v4l2_fract timeperframe;
199}; 200};
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 349e659d75fb..7c4489c42365 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -1200,6 +1200,30 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
1200EXPORT_SYMBOL_GPL(vb2_buffer_done); 1200EXPORT_SYMBOL_GPL(vb2_buffer_done);
1201 1201
1202/** 1202/**
1203 * vb2_discard_done() - discard all buffers marked as DONE
1204 * @q: videobuf2 queue
1205 *
1206 * This function is intended to be used with suspend/resume operations. It
1207 * discards all 'done' buffers as they would be too old to be requested after
1208 * resume.
1209 *
1210 * Drivers must stop the hardware and synchronize with interrupt handlers and/or
1211 * delayed works before calling this function to make sure no buffer will be
1212 * touched by the driver and/or hardware.
1213 */
1214void vb2_discard_done(struct vb2_queue *q)
1215{
1216 struct vb2_buffer *vb;
1217 unsigned long flags;
1218
1219 spin_lock_irqsave(&q->done_lock, flags);
1220 list_for_each_entry(vb, &q->done_list, done_entry)
1221 vb->state = VB2_BUF_STATE_ERROR;
1222 spin_unlock_irqrestore(&q->done_lock, flags);
1223}
1224EXPORT_SYMBOL_GPL(vb2_discard_done);
1225
1226/**
1203 * __fill_vb2_buffer() - fill a vb2_buffer with information provided in a 1227 * __fill_vb2_buffer() - fill a vb2_buffer with information provided in a
1204 * v4l2_buffer by the userspace. The caller has already verified that struct 1228 * v4l2_buffer by the userspace. The caller has already verified that struct
1205 * v4l2_buffer has a valid number of planes. 1229 * v4l2_buffer has a valid number of planes.
diff --git a/drivers/misc/vexpress-syscfg.c b/drivers/misc/vexpress-syscfg.c
index 73068e50e56d..3250fc1df0aa 100644
--- a/drivers/misc/vexpress-syscfg.c
+++ b/drivers/misc/vexpress-syscfg.c
@@ -199,7 +199,7 @@ static struct regmap *vexpress_syscfg_regmap_init(struct device *dev,
199 func = kzalloc(sizeof(*func) + sizeof(*func->template) * num, 199 func = kzalloc(sizeof(*func) + sizeof(*func->template) * num,
200 GFP_KERNEL); 200 GFP_KERNEL);
201 if (!func) 201 if (!func)
202 return NULL; 202 return ERR_PTR(-ENOMEM);
203 203
204 func->syscfg = syscfg; 204 func->syscfg = syscfg;
205 func->num_templates = num; 205 func->num_templates = num;
@@ -231,10 +231,14 @@ static struct regmap *vexpress_syscfg_regmap_init(struct device *dev,
231 func->regmap = regmap_init(dev, NULL, func, 231 func->regmap = regmap_init(dev, NULL, func,
232 &vexpress_syscfg_regmap_config); 232 &vexpress_syscfg_regmap_config);
233 233
234 if (IS_ERR(func->regmap)) 234 if (IS_ERR(func->regmap)) {
235 void *err = func->regmap;
236
235 kfree(func); 237 kfree(func);
236 else 238 return err;
237 list_add(&func->list, &syscfg->funcs); 239 }
240
241 list_add(&func->list, &syscfg->funcs);
238 242
239 return func->regmap; 243 return func->regmap;
240} 244}
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index 2421835d5daf..191617492181 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -17,7 +17,8 @@
17 * along with this program; if not, write to the Free Software 17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 * 19 *
20 * Maintained by: Dmitry Torokhov <dtor@vmware.com> 20 * Maintained by: Xavier Deguillard <xdeguillard@vmware.com>
21 * Philip Moltmann <moltmann@vmware.com>
21 */ 22 */
22 23
23/* 24/*
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 1610d51dbb5c..ade33ef82823 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1156,15 +1156,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
1156 if (!vs) 1156 if (!vs)
1157 goto drop; 1157 goto drop;
1158 1158
1159 /* If the NIC driver gave us an encapsulated packet 1159 skb_pop_rcv_encapsulation(skb);
1160 * with the encapsulation mark, the device checksummed it
1161 * for us. Otherwise force the upper layers to verify it.
1162 */
1163 if ((skb->ip_summed != CHECKSUM_UNNECESSARY && skb->ip_summed != CHECKSUM_PARTIAL) ||
1164 !skb->encapsulation)
1165 skb->ip_summed = CHECKSUM_NONE;
1166
1167 skb->encapsulation = 0;
1168 1160
1169 vs->rcv(vs, skb, vxh->vx_vni); 1161 vs->rcv(vs, skb, vxh->vx_vni);
1170 return 0; 1162 return 0;
@@ -1201,6 +1193,7 @@ static void vxlan_rcv(struct vxlan_sock *vs,
1201 skb_reset_mac_header(skb); 1193 skb_reset_mac_header(skb);
1202 skb_scrub_packet(skb, !net_eq(vxlan->net, dev_net(vxlan->dev))); 1194 skb_scrub_packet(skb, !net_eq(vxlan->net, dev_net(vxlan->dev)));
1203 skb->protocol = eth_type_trans(skb, vxlan->dev); 1195 skb->protocol = eth_type_trans(skb, vxlan->dev);
1196 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
1204 1197
1205 /* Ignore packet loops (and multicast echo) */ 1198 /* Ignore packet loops (and multicast echo) */
1206 if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr)) 1199 if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
@@ -2247,9 +2240,9 @@ static void vxlan_setup(struct net_device *dev)
2247 eth_hw_addr_random(dev); 2240 eth_hw_addr_random(dev);
2248 ether_setup(dev); 2241 ether_setup(dev);
2249 if (vxlan->default_dst.remote_ip.sa.sa_family == AF_INET6) 2242 if (vxlan->default_dst.remote_ip.sa.sa_family == AF_INET6)
2250 dev->hard_header_len = ETH_HLEN + VXLAN6_HEADROOM; 2243 dev->needed_headroom = ETH_HLEN + VXLAN6_HEADROOM;
2251 else 2244 else
2252 dev->hard_header_len = ETH_HLEN + VXLAN_HEADROOM; 2245 dev->needed_headroom = ETH_HLEN + VXLAN_HEADROOM;
2253 2246
2254 dev->netdev_ops = &vxlan_netdev_ops; 2247 dev->netdev_ops = &vxlan_netdev_ops;
2255 dev->destructor = free_netdev; 2248 dev->destructor = free_netdev;
@@ -2646,8 +2639,7 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
2646 if (!tb[IFLA_MTU]) 2639 if (!tb[IFLA_MTU])
2647 dev->mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); 2640 dev->mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
2648 2641
2649 /* update header length based on lower device */ 2642 dev->needed_headroom = lowerdev->hard_header_len +
2650 dev->hard_header_len = lowerdev->hard_header_len +
2651 (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); 2643 (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
2652 } else if (use_ipv6) 2644 } else if (use_ipv6)
2653 vxlan->flags |= VXLAN_F_IPV6; 2645 vxlan->flags |= VXLAN_F_IPV6;
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 8368d96ae7b4..b9864806e9b8 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -227,7 +227,8 @@ static int __of_node_add(struct device_node *np)
227 np->kobj.kset = of_kset; 227 np->kobj.kset = of_kset;
228 if (!np->parent) { 228 if (!np->parent) {
229 /* Nodes without parents are new top level trees */ 229 /* Nodes without parents are new top level trees */
230 rc = kobject_add(&np->kobj, NULL, safe_name(&of_kset->kobj, "base")); 230 rc = kobject_add(&np->kobj, NULL, "%s",
231 safe_name(&of_kset->kobj, "base"));
231 } else { 232 } else {
232 name = safe_name(&np->parent->kobj, kbasename(np->full_name)); 233 name = safe_name(&np->parent->kobj, kbasename(np->full_name));
233 if (!name || !name[0]) 234 if (!name || !name[0])
@@ -1960,9 +1961,9 @@ int of_attach_node(struct device_node *np)
1960 1961
1961 raw_spin_lock_irqsave(&devtree_lock, flags); 1962 raw_spin_lock_irqsave(&devtree_lock, flags);
1962 np->sibling = np->parent->child; 1963 np->sibling = np->parent->child;
1963 np->allnext = of_allnodes; 1964 np->allnext = np->parent->allnext;
1965 np->parent->allnext = np;
1964 np->parent->child = np; 1966 np->parent->child = np;
1965 of_allnodes = np;
1966 of_node_clear_flag(np, OF_DETACHED); 1967 of_node_clear_flag(np, OF_DETACHED);
1967 raw_spin_unlock_irqrestore(&devtree_lock, flags); 1968 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1968 1969
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 6c48d73a7fd7..500436f9be7f 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -166,10 +166,6 @@ static void of_dma_configure(struct platform_device *pdev)
166 int ret; 166 int ret;
167 struct device *dev = &pdev->dev; 167 struct device *dev = &pdev->dev;
168 168
169#if defined(CONFIG_MICROBLAZE)
170 pdev->archdata.dma_mask = 0xffffffffUL;
171#endif
172
173 /* 169 /*
174 * Set default dma-mask to 32 bit. Drivers are expected to setup 170 * Set default dma-mask to 32 bit. Drivers are expected to setup
175 * the correct supported dma_mask. 171 * the correct supported dma_mask.
diff --git a/drivers/regulator/as3722-regulator.c b/drivers/regulator/as3722-regulator.c
index 85585219ce82..ad9e0c9b7daf 100644
--- a/drivers/regulator/as3722-regulator.c
+++ b/drivers/regulator/as3722-regulator.c
@@ -433,6 +433,7 @@ static struct regulator_ops as3722_ldo3_extcntrl_ops = {
433}; 433};
434 434
435static const struct regulator_linear_range as3722_ldo_ranges[] = { 435static const struct regulator_linear_range as3722_ldo_ranges[] = {
436 REGULATOR_LINEAR_RANGE(0, 0x00, 0x00, 0),
436 REGULATOR_LINEAR_RANGE(825000, 0x01, 0x24, 25000), 437 REGULATOR_LINEAR_RANGE(825000, 0x01, 0x24, 25000),
437 REGULATOR_LINEAR_RANGE(1725000, 0x40, 0x7F, 25000), 438 REGULATOR_LINEAR_RANGE(1725000, 0x40, 0x7F, 25000),
438}; 439};
@@ -609,6 +610,7 @@ static bool as3722_sd0_is_low_voltage(struct as3722_regulators *as3722_regs)
609} 610}
610 611
611static const struct regulator_linear_range as3722_sd2345_ranges[] = { 612static const struct regulator_linear_range as3722_sd2345_ranges[] = {
613 REGULATOR_LINEAR_RANGE(0, 0x00, 0x00, 0),
612 REGULATOR_LINEAR_RANGE(612500, 0x01, 0x40, 12500), 614 REGULATOR_LINEAR_RANGE(612500, 0x01, 0x40, 12500),
613 REGULATOR_LINEAR_RANGE(1425000, 0x41, 0x70, 25000), 615 REGULATOR_LINEAR_RANGE(1425000, 0x41, 0x70, 25000),
614 REGULATOR_LINEAR_RANGE(2650000, 0x71, 0x7F, 50000), 616 REGULATOR_LINEAR_RANGE(2650000, 0x71, 0x7F, 50000),
diff --git a/drivers/regulator/ltc3589.c b/drivers/regulator/ltc3589.c
index 110a99ee1162..c8105182b8b8 100644
--- a/drivers/regulator/ltc3589.c
+++ b/drivers/regulator/ltc3589.c
@@ -255,7 +255,7 @@ static int ltc3589_parse_regulators_dt(struct ltc3589 *ltc3589)
255 struct device_node *node; 255 struct device_node *node;
256 int i, ret; 256 int i, ret;
257 257
258 node = of_find_node_by_name(dev->of_node, "regulators"); 258 node = of_get_child_by_name(dev->of_node, "regulators");
259 if (!node) { 259 if (!node) {
260 dev_err(dev, "regulators node not found\n"); 260 dev_err(dev, "regulators node not found\n");
261 return -EINVAL; 261 return -EINVAL;
diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c
index 864ed02ce4b7..b982f0ff4e01 100644
--- a/drivers/regulator/palmas-regulator.c
+++ b/drivers/regulator/palmas-regulator.c
@@ -37,12 +37,14 @@ struct regs_info {
37}; 37};
38 38
39static const struct regulator_linear_range smps_low_ranges[] = { 39static const struct regulator_linear_range smps_low_ranges[] = {
40 REGULATOR_LINEAR_RANGE(0, 0x0, 0x0, 0),
40 REGULATOR_LINEAR_RANGE(500000, 0x1, 0x6, 0), 41 REGULATOR_LINEAR_RANGE(500000, 0x1, 0x6, 0),
41 REGULATOR_LINEAR_RANGE(510000, 0x7, 0x79, 10000), 42 REGULATOR_LINEAR_RANGE(510000, 0x7, 0x79, 10000),
42 REGULATOR_LINEAR_RANGE(1650000, 0x7A, 0x7f, 0), 43 REGULATOR_LINEAR_RANGE(1650000, 0x7A, 0x7f, 0),
43}; 44};
44 45
45static const struct regulator_linear_range smps_high_ranges[] = { 46static const struct regulator_linear_range smps_high_ranges[] = {
47 REGULATOR_LINEAR_RANGE(0, 0x0, 0x0, 0),
46 REGULATOR_LINEAR_RANGE(1000000, 0x1, 0x6, 0), 48 REGULATOR_LINEAR_RANGE(1000000, 0x1, 0x6, 0),
47 REGULATOR_LINEAR_RANGE(1020000, 0x7, 0x79, 20000), 49 REGULATOR_LINEAR_RANGE(1020000, 0x7, 0x79, 20000),
48 REGULATOR_LINEAR_RANGE(3300000, 0x7A, 0x7f, 0), 50 REGULATOR_LINEAR_RANGE(3300000, 0x7A, 0x7f, 0),
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index ce1743d0b679..5e343bab9458 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -44,7 +44,7 @@ config STE_MODEM_RPROC
44config DA8XX_REMOTEPROC 44config DA8XX_REMOTEPROC
45 tristate "DA8xx/OMAP-L13x remoteproc support" 45 tristate "DA8xx/OMAP-L13x remoteproc support"
46 depends on ARCH_DAVINCI_DA8XX 46 depends on ARCH_DAVINCI_DA8XX
47 select CMA 47 select CMA if MMU
48 select REMOTEPROC 48 select REMOTEPROC
49 select RPMSG 49 select RPMSG
50 help 50 help
diff --git a/drivers/rtc/rtc-puv3.c b/drivers/rtc/rtc-puv3.c
index 1ecfe3bd92ac..1cff2a21db67 100644
--- a/drivers/rtc/rtc-puv3.c
+++ b/drivers/rtc/rtc-puv3.c
@@ -71,7 +71,7 @@ static int puv3_rtc_setpie(struct device *dev, int enabled)
71{ 71{
72 unsigned int tmp; 72 unsigned int tmp;
73 73
74 dev_debug(dev, "%s: pie=%d\n", __func__, enabled); 74 dev_dbg(dev, "%s: pie=%d\n", __func__, enabled);
75 75
76 spin_lock_irq(&puv3_rtc_pie_lock); 76 spin_lock_irq(&puv3_rtc_pie_lock);
77 tmp = readl(RTC_RTSR) & ~RTC_RTSR_HZE; 77 tmp = readl(RTC_RTSR) & ~RTC_RTSR_HZE;
@@ -140,7 +140,7 @@ static int puv3_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
140 rtc_tm_to_time(tm, &rtcalarm_count); 140 rtc_tm_to_time(tm, &rtcalarm_count);
141 writel(rtcalarm_count, RTC_RTAR); 141 writel(rtcalarm_count, RTC_RTAR);
142 142
143 puv3_rtc_setaie(&dev->dev, alrm->enabled); 143 puv3_rtc_setaie(dev, alrm->enabled);
144 144
145 if (alrm->enabled) 145 if (alrm->enabled)
146 enable_irq_wake(puv3_rtc_alarmno); 146 enable_irq_wake(puv3_rtc_alarmno);
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index ee0e85abe1fd..0f471750327e 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -593,7 +593,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
593 dev_info->start = dcssblk_find_lowest_addr(dev_info); 593 dev_info->start = dcssblk_find_lowest_addr(dev_info);
594 dev_info->end = dcssblk_find_highest_addr(dev_info); 594 dev_info->end = dcssblk_find_highest_addr(dev_info);
595 595
596 dev_set_name(&dev_info->dev, dev_info->segment_name); 596 dev_set_name(&dev_info->dev, "%s", dev_info->segment_name);
597 dev_info->dev.release = dcssblk_release_segment; 597 dev_info->dev.release = dcssblk_release_segment;
598 dev_info->dev.groups = dcssblk_dev_attr_groups; 598 dev_info->dev.groups = dcssblk_dev_attr_groups;
599 INIT_LIST_HEAD(&dev_info->lh); 599 INIT_LIST_HEAD(&dev_info->lh);
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index 629fcc275e92..78b6ace7edcb 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -19,7 +19,6 @@ obj-$(CONFIG_SCLP_VT220_TTY) += sclp_vt220.o
19obj-$(CONFIG_SCLP_CPI) += sclp_cpi.o 19obj-$(CONFIG_SCLP_CPI) += sclp_cpi.o
20obj-$(CONFIG_SCLP_ASYNC) += sclp_async.o 20obj-$(CONFIG_SCLP_ASYNC) += sclp_async.o
21 21
22obj-$(CONFIG_ZVM_WATCHDOG) += vmwatchdog.o
23obj-$(CONFIG_VMLOGRDR) += vmlogrdr.o 22obj-$(CONFIG_VMLOGRDR) += vmlogrdr.o
24obj-$(CONFIG_VMCP) += vmcp.o 23obj-$(CONFIG_VMCP) += vmcp.o
25 24
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
index cd9c91909596..b9a9f721716d 100644
--- a/drivers/s390/char/sclp_vt220.c
+++ b/drivers/s390/char/sclp_vt220.c
@@ -838,8 +838,6 @@ sclp_vt220_con_init(void)
838{ 838{
839 int rc; 839 int rc;
840 840
841 if (!CONSOLE_IS_SCLP)
842 return 0;
843 rc = __sclp_vt220_init(sclp_console_pages); 841 rc = __sclp_vt220_init(sclp_console_pages);
844 if (rc) 842 if (rc)
845 return rc; 843 return rc;
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index cf31d3321dab..a8848db7b09d 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -761,7 +761,7 @@ static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
761 761
762 dev = kzalloc(sizeof(struct device), GFP_KERNEL); 762 dev = kzalloc(sizeof(struct device), GFP_KERNEL);
763 if (dev) { 763 if (dev) {
764 dev_set_name(dev, priv->internal_name); 764 dev_set_name(dev, "%s", priv->internal_name);
765 dev->bus = &iucv_bus; 765 dev->bus = &iucv_bus;
766 dev->parent = iucv_root; 766 dev->parent = iucv_root;
767 dev->driver = &vmlogrdr_driver; 767 dev->driver = &vmlogrdr_driver;
diff --git a/drivers/s390/char/vmwatchdog.c b/drivers/s390/char/vmwatchdog.c
deleted file mode 100644
index d5eac985976b..000000000000
--- a/drivers/s390/char/vmwatchdog.c
+++ /dev/null
@@ -1,338 +0,0 @@
1/*
2 * Watchdog implementation based on z/VM Watchdog Timer API
3 *
4 * Copyright IBM Corp. 2004, 2009
5 *
6 * The user space watchdog daemon can use this driver as
7 * /dev/vmwatchdog to have z/VM execute the specified CP
8 * command when the timeout expires. The default command is
9 * "IPL", which which cause an immediate reboot.
10 */
11#define KMSG_COMPONENT "vmwatchdog"
12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13
14#include <linux/init.h>
15#include <linux/fs.h>
16#include <linux/kernel.h>
17#include <linux/miscdevice.h>
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/slab.h>
21#include <linux/suspend.h>
22#include <linux/watchdog.h>
23
24#include <asm/ebcdic.h>
25#include <asm/io.h>
26#include <asm/uaccess.h>
27
28#define MAX_CMDLEN 240
29#define MIN_INTERVAL 15
30static char vmwdt_cmd[MAX_CMDLEN] = "IPL";
31static bool vmwdt_conceal;
32
33static bool vmwdt_nowayout = WATCHDOG_NOWAYOUT;
34
35MODULE_LICENSE("GPL");
36MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
37MODULE_DESCRIPTION("z/VM Watchdog Timer");
38module_param_string(cmd, vmwdt_cmd, MAX_CMDLEN, 0644);
39MODULE_PARM_DESC(cmd, "CP command that is run when the watchdog triggers");
40module_param_named(conceal, vmwdt_conceal, bool, 0644);
41MODULE_PARM_DESC(conceal, "Enable the CONCEAL CP option while the watchdog "
42 " is active");
43module_param_named(nowayout, vmwdt_nowayout, bool, 0);
44MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started"
45 " (default=CONFIG_WATCHDOG_NOWAYOUT)");
46MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
47
48static unsigned int vmwdt_interval = 60;
49static unsigned long vmwdt_is_open;
50static int vmwdt_expect_close;
51
52static DEFINE_MUTEX(vmwdt_mutex);
53
54#define VMWDT_OPEN 0 /* devnode is open or suspend in progress */
55#define VMWDT_RUNNING 1 /* The watchdog is armed */
56
57enum vmwdt_func {
58 /* function codes */
59 wdt_init = 0,
60 wdt_change = 1,
61 wdt_cancel = 2,
62 /* flags */
63 wdt_conceal = 0x80000000,
64};
65
66static int __diag288(enum vmwdt_func func, unsigned int timeout,
67 char *cmd, size_t len)
68{
69 register unsigned long __func asm("2") = func;
70 register unsigned long __timeout asm("3") = timeout;
71 register unsigned long __cmdp asm("4") = virt_to_phys(cmd);
72 register unsigned long __cmdl asm("5") = len;
73 int err;
74
75 err = -EINVAL;
76 asm volatile(
77 " diag %1,%3,0x288\n"
78 "0: la %0,0\n"
79 "1:\n"
80 EX_TABLE(0b,1b)
81 : "+d" (err) : "d"(__func), "d"(__timeout),
82 "d"(__cmdp), "d"(__cmdl) : "1", "cc");
83 return err;
84}
85
86static int vmwdt_keepalive(void)
87{
88 /* we allocate new memory every time to avoid having
89 * to track the state. static allocation is not an
90 * option since that might not be contiguous in real
91 * storage in case of a modular build */
92 static char *ebc_cmd;
93 size_t len;
94 int ret;
95 unsigned int func;
96
97 ebc_cmd = kmalloc(MAX_CMDLEN, GFP_KERNEL);
98 if (!ebc_cmd)
99 return -ENOMEM;
100
101 len = strlcpy(ebc_cmd, vmwdt_cmd, MAX_CMDLEN);
102 ASCEBC(ebc_cmd, MAX_CMDLEN);
103 EBC_TOUPPER(ebc_cmd, MAX_CMDLEN);
104
105 func = vmwdt_conceal ? (wdt_init | wdt_conceal) : wdt_init;
106 set_bit(VMWDT_RUNNING, &vmwdt_is_open);
107 ret = __diag288(func, vmwdt_interval, ebc_cmd, len);
108 WARN_ON(ret != 0);
109 kfree(ebc_cmd);
110 return ret;
111}
112
113static int vmwdt_disable(void)
114{
115 char cmd[] = {'\0'};
116 int ret = __diag288(wdt_cancel, 0, cmd, 0);
117 WARN_ON(ret != 0);
118 clear_bit(VMWDT_RUNNING, &vmwdt_is_open);
119 return ret;
120}
121
122static int __init vmwdt_probe(void)
123{
124 /* there is no real way to see if the watchdog is supported,
125 * so we try initializing it with a NOP command ("BEGIN")
126 * that won't cause any harm even if the following disable
127 * fails for some reason */
128 char ebc_begin[] = {
129 194, 197, 199, 201, 213
130 };
131 if (__diag288(wdt_init, 15, ebc_begin, sizeof(ebc_begin)) != 0)
132 return -EINVAL;
133 return vmwdt_disable();
134}
135
136static int vmwdt_open(struct inode *i, struct file *f)
137{
138 int ret;
139 if (test_and_set_bit(VMWDT_OPEN, &vmwdt_is_open))
140 return -EBUSY;
141 ret = vmwdt_keepalive();
142 if (ret)
143 clear_bit(VMWDT_OPEN, &vmwdt_is_open);
144 return ret ? ret : nonseekable_open(i, f);
145}
146
147static int vmwdt_close(struct inode *i, struct file *f)
148{
149 if (vmwdt_expect_close == 42)
150 vmwdt_disable();
151 vmwdt_expect_close = 0;
152 clear_bit(VMWDT_OPEN, &vmwdt_is_open);
153 return 0;
154}
155
156static struct watchdog_info vmwdt_info = {
157 .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
158 .firmware_version = 0,
159 .identity = "z/VM Watchdog Timer",
160};
161
162static int __vmwdt_ioctl(unsigned int cmd, unsigned long arg)
163{
164 switch (cmd) {
165 case WDIOC_GETSUPPORT:
166 if (copy_to_user((void __user *)arg, &vmwdt_info,
167 sizeof(vmwdt_info)))
168 return -EFAULT;
169 return 0;
170 case WDIOC_GETSTATUS:
171 case WDIOC_GETBOOTSTATUS:
172 return put_user(0, (int __user *)arg);
173 case WDIOC_GETTEMP:
174 return -EINVAL;
175 case WDIOC_SETOPTIONS:
176 {
177 int options, ret;
178 if (get_user(options, (int __user *)arg))
179 return -EFAULT;
180 ret = -EINVAL;
181 if (options & WDIOS_DISABLECARD) {
182 ret = vmwdt_disable();
183 if (ret)
184 return ret;
185 }
186 if (options & WDIOS_ENABLECARD) {
187 ret = vmwdt_keepalive();
188 }
189 return ret;
190 }
191 case WDIOC_GETTIMEOUT:
192 return put_user(vmwdt_interval, (int __user *)arg);
193 case WDIOC_SETTIMEOUT:
194 {
195 int interval;
196 if (get_user(interval, (int __user *)arg))
197 return -EFAULT;
198 if (interval < MIN_INTERVAL)
199 return -EINVAL;
200 vmwdt_interval = interval;
201 }
202 return vmwdt_keepalive();
203 case WDIOC_KEEPALIVE:
204 return vmwdt_keepalive();
205 }
206 return -EINVAL;
207}
208
209static long vmwdt_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
210{
211 int rc;
212
213 mutex_lock(&vmwdt_mutex);
214 rc = __vmwdt_ioctl(cmd, arg);
215 mutex_unlock(&vmwdt_mutex);
216 return (long) rc;
217}
218
219static ssize_t vmwdt_write(struct file *f, const char __user *buf,
220 size_t count, loff_t *ppos)
221{
222 if(count) {
223 if (!vmwdt_nowayout) {
224 size_t i;
225
226 /* note: just in case someone wrote the magic character
227 * five months ago... */
228 vmwdt_expect_close = 0;
229
230 for (i = 0; i != count; i++) {
231 char c;
232 if (get_user(c, buf+i))
233 return -EFAULT;
234 if (c == 'V')
235 vmwdt_expect_close = 42;
236 }
237 }
238 /* someone wrote to us, we should restart timer */
239 vmwdt_keepalive();
240 }
241 return count;
242}
243
244static int vmwdt_resume(void)
245{
246 clear_bit(VMWDT_OPEN, &vmwdt_is_open);
247 return NOTIFY_DONE;
248}
249
250/*
251 * It makes no sense to go into suspend while the watchdog is running.
252 * Depending on the memory size, the watchdog might trigger, while we
253 * are still saving the memory.
254 * We reuse the open flag to ensure that suspend and watchdog open are
255 * exclusive operations
256 */
257static int vmwdt_suspend(void)
258{
259 if (test_and_set_bit(VMWDT_OPEN, &vmwdt_is_open)) {
260 pr_err("The system cannot be suspended while the watchdog"
261 " is in use\n");
262 return notifier_from_errno(-EBUSY);
263 }
264 if (test_bit(VMWDT_RUNNING, &vmwdt_is_open)) {
265 clear_bit(VMWDT_OPEN, &vmwdt_is_open);
266 pr_err("The system cannot be suspended while the watchdog"
267 " is running\n");
268 return notifier_from_errno(-EBUSY);
269 }
270 return NOTIFY_DONE;
271}
272
273/*
274 * This function is called for suspend and resume.
275 */
276static int vmwdt_power_event(struct notifier_block *this, unsigned long event,
277 void *ptr)
278{
279 switch (event) {
280 case PM_POST_HIBERNATION:
281 case PM_POST_SUSPEND:
282 return vmwdt_resume();
283 case PM_HIBERNATION_PREPARE:
284 case PM_SUSPEND_PREPARE:
285 return vmwdt_suspend();
286 default:
287 return NOTIFY_DONE;
288 }
289}
290
291static struct notifier_block vmwdt_power_notifier = {
292 .notifier_call = vmwdt_power_event,
293};
294
295static const struct file_operations vmwdt_fops = {
296 .open = &vmwdt_open,
297 .release = &vmwdt_close,
298 .unlocked_ioctl = &vmwdt_ioctl,
299 .write = &vmwdt_write,
300 .owner = THIS_MODULE,
301 .llseek = noop_llseek,
302};
303
304static struct miscdevice vmwdt_dev = {
305 .minor = WATCHDOG_MINOR,
306 .name = "watchdog",
307 .fops = &vmwdt_fops,
308};
309
310static int __init vmwdt_init(void)
311{
312 int ret;
313
314 ret = vmwdt_probe();
315 if (ret)
316 return ret;
317 ret = register_pm_notifier(&vmwdt_power_notifier);
318 if (ret)
319 return ret;
320 /*
321 * misc_register() has to be the last action in module_init(), because
322 * file operations will be available right after this.
323 */
324 ret = misc_register(&vmwdt_dev);
325 if (ret) {
326 unregister_pm_notifier(&vmwdt_power_notifier);
327 return ret;
328 }
329 return 0;
330}
331module_init(vmwdt_init);
332
333static void __exit vmwdt_exit(void)
334{
335 unregister_pm_notifier(&vmwdt_power_notifier);
336 misc_deregister(&vmwdt_dev);
337}
338module_exit(vmwdt_exit);
diff --git a/drivers/s390/cio/airq.c b/drivers/s390/cio/airq.c
index 445564c790f6..00bfbee0af9e 100644
--- a/drivers/s390/cio/airq.c
+++ b/drivers/s390/cio/airq.c
@@ -196,11 +196,11 @@ EXPORT_SYMBOL(airq_iv_release);
196 */ 196 */
197unsigned long airq_iv_alloc(struct airq_iv *iv, unsigned long num) 197unsigned long airq_iv_alloc(struct airq_iv *iv, unsigned long num)
198{ 198{
199 unsigned long bit, i; 199 unsigned long bit, i, flags;
200 200
201 if (!iv->avail || num == 0) 201 if (!iv->avail || num == 0)
202 return -1UL; 202 return -1UL;
203 spin_lock(&iv->lock); 203 spin_lock_irqsave(&iv->lock, flags);
204 bit = find_first_bit_inv(iv->avail, iv->bits); 204 bit = find_first_bit_inv(iv->avail, iv->bits);
205 while (bit + num <= iv->bits) { 205 while (bit + num <= iv->bits) {
206 for (i = 1; i < num; i++) 206 for (i = 1; i < num; i++)
@@ -218,9 +218,8 @@ unsigned long airq_iv_alloc(struct airq_iv *iv, unsigned long num)
218 } 218 }
219 if (bit + num > iv->bits) 219 if (bit + num > iv->bits)
220 bit = -1UL; 220 bit = -1UL;
221 spin_unlock(&iv->lock); 221 spin_unlock_irqrestore(&iv->lock, flags);
222 return bit; 222 return bit;
223
224} 223}
225EXPORT_SYMBOL(airq_iv_alloc); 224EXPORT_SYMBOL(airq_iv_alloc);
226 225
@@ -232,11 +231,11 @@ EXPORT_SYMBOL(airq_iv_alloc);
232 */ 231 */
233void airq_iv_free(struct airq_iv *iv, unsigned long bit, unsigned long num) 232void airq_iv_free(struct airq_iv *iv, unsigned long bit, unsigned long num)
234{ 233{
235 unsigned long i; 234 unsigned long i, flags;
236 235
237 if (!iv->avail || num == 0) 236 if (!iv->avail || num == 0)
238 return; 237 return;
239 spin_lock(&iv->lock); 238 spin_lock_irqsave(&iv->lock, flags);
240 for (i = 0; i < num; i++) { 239 for (i = 0; i < num; i++) {
241 /* Clear (possibly left over) interrupt bit */ 240 /* Clear (possibly left over) interrupt bit */
242 clear_bit_inv(bit + i, iv->vector); 241 clear_bit_inv(bit + i, iv->vector);
@@ -248,7 +247,7 @@ void airq_iv_free(struct airq_iv *iv, unsigned long bit, unsigned long num)
248 while (iv->end > 0 && !test_bit_inv(iv->end - 1, iv->avail)) 247 while (iv->end > 0 && !test_bit_inv(iv->end - 1, iv->avail))
249 iv->end--; 248 iv->end--;
250 } 249 }
251 spin_unlock(&iv->lock); 250 spin_unlock_irqrestore(&iv->lock, flags);
252} 251}
253EXPORT_SYMBOL(airq_iv_free); 252EXPORT_SYMBOL(airq_iv_free);
254 253
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index dfd7bc681c25..e443b0d0b236 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -184,7 +184,7 @@ static ssize_t ccwgroup_ungroup_store(struct device *dev,
184 const char *buf, size_t count) 184 const char *buf, size_t count)
185{ 185{
186 struct ccwgroup_device *gdev = to_ccwgroupdev(dev); 186 struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
187 int rc; 187 int rc = 0;
188 188
189 /* Prevent concurrent online/offline processing and ungrouping. */ 189 /* Prevent concurrent online/offline processing and ungrouping. */
190 if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0) 190 if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
@@ -196,11 +196,12 @@ static ssize_t ccwgroup_ungroup_store(struct device *dev,
196 196
197 if (device_remove_file_self(dev, attr)) 197 if (device_remove_file_self(dev, attr))
198 ccwgroup_ungroup(gdev); 198 ccwgroup_ungroup(gdev);
199 else
200 rc = -ENODEV;
199out: 201out:
200 if (rc) { 202 if (rc) {
201 if (rc != -EAGAIN) 203 /* Release onoff "lock" when ungrouping failed. */
202 /* Release onoff "lock" when ungrouping failed. */ 204 atomic_set(&gdev->onoff, 0);
203 atomic_set(&gdev->onoff, 0);
204 return rc; 205 return rc;
205 } 206 }
206 return count; 207 return count;
@@ -227,6 +228,7 @@ static void ccwgroup_ungroup_workfn(struct work_struct *work)
227 container_of(work, struct ccwgroup_device, ungroup_work); 228 container_of(work, struct ccwgroup_device, ungroup_work);
228 229
229 ccwgroup_ungroup(gdev); 230 ccwgroup_ungroup(gdev);
231 put_device(&gdev->dev);
230} 232}
231 233
232static void ccwgroup_release(struct device *dev) 234static void ccwgroup_release(struct device *dev)
@@ -412,8 +414,10 @@ static int ccwgroup_notifier(struct notifier_block *nb, unsigned long action,
412{ 414{
413 struct ccwgroup_device *gdev = to_ccwgroupdev(data); 415 struct ccwgroup_device *gdev = to_ccwgroupdev(data);
414 416
415 if (action == BUS_NOTIFY_UNBIND_DRIVER) 417 if (action == BUS_NOTIFY_UNBIND_DRIVER) {
418 get_device(&gdev->dev);
416 schedule_work(&gdev->ungroup_work); 419 schedule_work(&gdev->ungroup_work);
420 }
417 421
418 return NOTIFY_OK; 422 return NOTIFY_OK;
419} 423}
@@ -582,11 +586,7 @@ void ccwgroup_driver_unregister(struct ccwgroup_driver *cdriver)
582 __ccwgroup_match_all))) { 586 __ccwgroup_match_all))) {
583 struct ccwgroup_device *gdev = to_ccwgroupdev(dev); 587 struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
584 588
585 mutex_lock(&gdev->reg_mutex); 589 ccwgroup_ungroup(gdev);
586 __ccwgroup_remove_symlinks(gdev);
587 device_unregister(dev);
588 __ccwgroup_remove_cdev_refs(gdev);
589 mutex_unlock(&gdev->reg_mutex);
590 put_device(dev); 590 put_device(dev);
591 } 591 }
592 driver_unregister(&cdriver->driver); 592 driver_unregister(&cdriver->driver);
@@ -633,13 +633,7 @@ void ccwgroup_remove_ccwdev(struct ccw_device *cdev)
633 get_device(&gdev->dev); 633 get_device(&gdev->dev);
634 spin_unlock_irq(cdev->ccwlock); 634 spin_unlock_irq(cdev->ccwlock);
635 /* Unregister group device. */ 635 /* Unregister group device. */
636 mutex_lock(&gdev->reg_mutex); 636 ccwgroup_ungroup(gdev);
637 if (device_is_registered(&gdev->dev)) {
638 __ccwgroup_remove_symlinks(gdev);
639 device_unregister(&gdev->dev);
640 __ccwgroup_remove_cdev_refs(gdev);
641 }
642 mutex_unlock(&gdev->reg_mutex);
643 /* Release ccwgroup device reference for local processing. */ 637 /* Release ccwgroup device reference for local processing. */
644 put_device(&gdev->dev); 638 put_device(&gdev->dev);
645} 639}
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 77f9c92df4b9..2905d8b0ec95 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -602,6 +602,7 @@ void __init init_cio_interrupts(void)
602 602
603#ifdef CONFIG_CCW_CONSOLE 603#ifdef CONFIG_CCW_CONSOLE
604static struct subchannel *console_sch; 604static struct subchannel *console_sch;
605static struct lock_class_key console_sch_key;
605 606
606/* 607/*
607 * Use cio_tsch to update the subchannel status and call the interrupt handler 608 * Use cio_tsch to update the subchannel status and call the interrupt handler
@@ -686,6 +687,7 @@ struct subchannel *cio_probe_console(void)
686 if (IS_ERR(sch)) 687 if (IS_ERR(sch))
687 return sch; 688 return sch;
688 689
690 lockdep_set_class(sch->lock, &console_sch_key);
689 isc_register(CONSOLE_ISC); 691 isc_register(CONSOLE_ISC);
690 sch->config.isc = CONSOLE_ISC; 692 sch->config.isc = CONSOLE_ISC;
691 sch->config.intparm = (u32)(addr_t)sch; 693 sch->config.intparm = (u32)(addr_t)sch;
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index d8d9b5b5cc56..dfef5e63cb7b 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -678,18 +678,11 @@ static const struct attribute_group *ccwdev_attr_groups[] = {
678 NULL, 678 NULL,
679}; 679};
680 680
681/* this is a simple abstraction for device_register that sets the 681static int ccw_device_add(struct ccw_device *cdev)
682 * correct bus type and adds the bus specific files */
683static int ccw_device_register(struct ccw_device *cdev)
684{ 682{
685 struct device *dev = &cdev->dev; 683 struct device *dev = &cdev->dev;
686 int ret;
687 684
688 dev->bus = &ccw_bus_type; 685 dev->bus = &ccw_bus_type;
689 ret = dev_set_name(&cdev->dev, "0.%x.%04x", cdev->private->dev_id.ssid,
690 cdev->private->dev_id.devno);
691 if (ret)
692 return ret;
693 return device_add(dev); 686 return device_add(dev);
694} 687}
695 688
@@ -764,22 +757,46 @@ static void ccw_device_todo(struct work_struct *work);
764static int io_subchannel_initialize_dev(struct subchannel *sch, 757static int io_subchannel_initialize_dev(struct subchannel *sch,
765 struct ccw_device *cdev) 758 struct ccw_device *cdev)
766{ 759{
767 cdev->private->cdev = cdev; 760 struct ccw_device_private *priv = cdev->private;
768 cdev->private->int_class = IRQIO_CIO; 761 int ret;
769 atomic_set(&cdev->private->onoff, 0); 762
763 priv->cdev = cdev;
764 priv->int_class = IRQIO_CIO;
765 priv->state = DEV_STATE_NOT_OPER;
766 priv->dev_id.devno = sch->schib.pmcw.dev;
767 priv->dev_id.ssid = sch->schid.ssid;
768 priv->schid = sch->schid;
769
770 INIT_WORK(&priv->todo_work, ccw_device_todo);
771 INIT_LIST_HEAD(&priv->cmb_list);
772 init_waitqueue_head(&priv->wait_q);
773 init_timer(&priv->timer);
774
775 atomic_set(&priv->onoff, 0);
776 cdev->ccwlock = sch->lock;
770 cdev->dev.parent = &sch->dev; 777 cdev->dev.parent = &sch->dev;
771 cdev->dev.release = ccw_device_release; 778 cdev->dev.release = ccw_device_release;
772 INIT_WORK(&cdev->private->todo_work, ccw_device_todo);
773 cdev->dev.groups = ccwdev_attr_groups; 779 cdev->dev.groups = ccwdev_attr_groups;
774 /* Do first half of device_register. */ 780 /* Do first half of device_register. */
775 device_initialize(&cdev->dev); 781 device_initialize(&cdev->dev);
782 ret = dev_set_name(&cdev->dev, "0.%x.%04x", cdev->private->dev_id.ssid,
783 cdev->private->dev_id.devno);
784 if (ret)
785 goto out_put;
776 if (!get_device(&sch->dev)) { 786 if (!get_device(&sch->dev)) {
777 /* Release reference from device_initialize(). */ 787 ret = -ENODEV;
778 put_device(&cdev->dev); 788 goto out_put;
779 return -ENODEV;
780 } 789 }
781 cdev->private->flags.initialized = 1; 790 priv->flags.initialized = 1;
791 spin_lock_irq(sch->lock);
792 sch_set_cdev(sch, cdev);
793 spin_unlock_irq(sch->lock);
782 return 0; 794 return 0;
795
796out_put:
797 /* Release reference from device_initialize(). */
798 put_device(&cdev->dev);
799 return ret;
783} 800}
784 801
785static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch) 802static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch)
@@ -858,7 +875,7 @@ static void io_subchannel_register(struct ccw_device *cdev)
858 dev_set_uevent_suppress(&sch->dev, 0); 875 dev_set_uevent_suppress(&sch->dev, 0);
859 kobject_uevent(&sch->dev.kobj, KOBJ_ADD); 876 kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
860 /* make it known to the system */ 877 /* make it known to the system */
861 ret = ccw_device_register(cdev); 878 ret = ccw_device_add(cdev);
862 if (ret) { 879 if (ret) {
863 CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n", 880 CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n",
864 cdev->private->dev_id.ssid, 881 cdev->private->dev_id.ssid,
@@ -923,26 +940,11 @@ io_subchannel_recog_done(struct ccw_device *cdev)
923 940
924static void io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch) 941static void io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
925{ 942{
926 struct ccw_device_private *priv;
927
928 cdev->ccwlock = sch->lock;
929
930 /* Init private data. */
931 priv = cdev->private;
932 priv->dev_id.devno = sch->schib.pmcw.dev;
933 priv->dev_id.ssid = sch->schid.ssid;
934 priv->schid = sch->schid;
935 priv->state = DEV_STATE_NOT_OPER;
936 INIT_LIST_HEAD(&priv->cmb_list);
937 init_waitqueue_head(&priv->wait_q);
938 init_timer(&priv->timer);
939
940 /* Increase counter of devices currently in recognition. */ 943 /* Increase counter of devices currently in recognition. */
941 atomic_inc(&ccw_device_init_count); 944 atomic_inc(&ccw_device_init_count);
942 945
943 /* Start async. device sensing. */ 946 /* Start async. device sensing. */
944 spin_lock_irq(sch->lock); 947 spin_lock_irq(sch->lock);
945 sch_set_cdev(sch, cdev);
946 ccw_device_recognition(cdev); 948 ccw_device_recognition(cdev);
947 spin_unlock_irq(sch->lock); 949 spin_unlock_irq(sch->lock);
948} 950}
@@ -1083,7 +1085,7 @@ static int io_subchannel_probe(struct subchannel *sch)
1083 dev_set_uevent_suppress(&sch->dev, 0); 1085 dev_set_uevent_suppress(&sch->dev, 0);
1084 kobject_uevent(&sch->dev.kobj, KOBJ_ADD); 1086 kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
1085 cdev = sch_get_cdev(sch); 1087 cdev = sch_get_cdev(sch);
1086 rc = ccw_device_register(cdev); 1088 rc = ccw_device_add(cdev);
1087 if (rc) { 1089 if (rc) {
1088 /* Release online reference. */ 1090 /* Release online reference. */
1089 put_device(&cdev->dev); 1091 put_device(&cdev->dev);
@@ -1597,7 +1599,6 @@ int __init ccw_device_enable_console(struct ccw_device *cdev)
1597 if (rc) 1599 if (rc)
1598 return rc; 1600 return rc;
1599 sch->driver = &io_subchannel_driver; 1601 sch->driver = &io_subchannel_driver;
1600 sch_set_cdev(sch, cdev);
1601 io_subchannel_recog(cdev, sch); 1602 io_subchannel_recog(cdev, sch);
1602 /* Now wait for the async. recognition to come to an end. */ 1603 /* Now wait for the async. recognition to come to an end. */
1603 spin_lock_irq(cdev->ccwlock); 1604 spin_lock_irq(cdev->ccwlock);
@@ -1639,6 +1640,7 @@ struct ccw_device * __init ccw_device_create_console(struct ccw_driver *drv)
1639 put_device(&sch->dev); 1640 put_device(&sch->dev);
1640 return ERR_PTR(-ENOMEM); 1641 return ERR_PTR(-ENOMEM);
1641 } 1642 }
1643 set_io_private(sch, io_priv);
1642 cdev = io_subchannel_create_ccwdev(sch); 1644 cdev = io_subchannel_create_ccwdev(sch);
1643 if (IS_ERR(cdev)) { 1645 if (IS_ERR(cdev)) {
1644 put_device(&sch->dev); 1646 put_device(&sch->dev);
@@ -1646,7 +1648,6 @@ struct ccw_device * __init ccw_device_create_console(struct ccw_driver *drv)
1646 return cdev; 1648 return cdev;
1647 } 1649 }
1648 cdev->drv = drv; 1650 cdev->drv = drv;
1649 set_io_private(sch, io_priv);
1650 ccw_device_set_int_class(cdev); 1651 ccw_device_set_int_class(cdev);
1651 return cdev; 1652 return cdev;
1652} 1653}
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index 4221b02085ad..f1f3baa8e6e4 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -7,6 +7,7 @@
7#include <linux/debugfs.h> 7#include <linux/debugfs.h>
8#include <linux/uaccess.h> 8#include <linux/uaccess.h>
9#include <linux/export.h> 9#include <linux/export.h>
10#include <linux/slab.h>
10#include <asm/debug.h> 11#include <asm/debug.h>
11#include "qdio_debug.h" 12#include "qdio_debug.h"
12#include "qdio.h" 13#include "qdio.h"
@@ -16,11 +17,51 @@ debug_info_t *qdio_dbf_error;
16 17
17static struct dentry *debugfs_root; 18static struct dentry *debugfs_root;
18#define QDIO_DEBUGFS_NAME_LEN 10 19#define QDIO_DEBUGFS_NAME_LEN 10
20#define QDIO_DBF_NAME_LEN 20
19 21
20void qdio_allocate_dbf(struct qdio_initialize *init_data, 22struct qdio_dbf_entry {
23 char dbf_name[QDIO_DBF_NAME_LEN];
24 debug_info_t *dbf_info;
25 struct list_head dbf_list;
26};
27
28static LIST_HEAD(qdio_dbf_list);
29static DEFINE_MUTEX(qdio_dbf_list_mutex);
30
31static debug_info_t *qdio_get_dbf_entry(char *name)
32{
33 struct qdio_dbf_entry *entry;
34 debug_info_t *rc = NULL;
35
36 mutex_lock(&qdio_dbf_list_mutex);
37 list_for_each_entry(entry, &qdio_dbf_list, dbf_list) {
38 if (strcmp(entry->dbf_name, name) == 0) {
39 rc = entry->dbf_info;
40 break;
41 }
42 }
43 mutex_unlock(&qdio_dbf_list_mutex);
44 return rc;
45}
46
47static void qdio_clear_dbf_list(void)
48{
49 struct qdio_dbf_entry *entry, *tmp;
50
51 mutex_lock(&qdio_dbf_list_mutex);
52 list_for_each_entry_safe(entry, tmp, &qdio_dbf_list, dbf_list) {
53 list_del(&entry->dbf_list);
54 debug_unregister(entry->dbf_info);
55 kfree(entry);
56 }
57 mutex_unlock(&qdio_dbf_list_mutex);
58}
59
60int qdio_allocate_dbf(struct qdio_initialize *init_data,
21 struct qdio_irq *irq_ptr) 61 struct qdio_irq *irq_ptr)
22{ 62{
23 char text[20]; 63 char text[QDIO_DBF_NAME_LEN];
64 struct qdio_dbf_entry *new_entry;
24 65
25 DBF_EVENT("qfmt:%1d", init_data->q_format); 66 DBF_EVENT("qfmt:%1d", init_data->q_format);
26 DBF_HEX(init_data->adapter_name, 8); 67 DBF_HEX(init_data->adapter_name, 8);
@@ -38,11 +79,34 @@ void qdio_allocate_dbf(struct qdio_initialize *init_data,
38 DBF_EVENT("irq:%8lx", (unsigned long)irq_ptr); 79 DBF_EVENT("irq:%8lx", (unsigned long)irq_ptr);
39 80
40 /* allocate trace view for the interface */ 81 /* allocate trace view for the interface */
41 snprintf(text, 20, "qdio_%s", dev_name(&init_data->cdev->dev)); 82 snprintf(text, QDIO_DBF_NAME_LEN, "qdio_%s",
42 irq_ptr->debug_area = debug_register(text, 2, 1, 16); 83 dev_name(&init_data->cdev->dev));
43 debug_register_view(irq_ptr->debug_area, &debug_hex_ascii_view); 84 irq_ptr->debug_area = qdio_get_dbf_entry(text);
44 debug_set_level(irq_ptr->debug_area, DBF_WARN); 85 if (irq_ptr->debug_area)
45 DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf created"); 86 DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf reused");
87 else {
88 irq_ptr->debug_area = debug_register(text, 2, 1, 16);
89 if (!irq_ptr->debug_area)
90 return -ENOMEM;
91 if (debug_register_view(irq_ptr->debug_area,
92 &debug_hex_ascii_view)) {
93 debug_unregister(irq_ptr->debug_area);
94 return -ENOMEM;
95 }
96 debug_set_level(irq_ptr->debug_area, DBF_WARN);
97 DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf created");
98 new_entry = kzalloc(sizeof(struct qdio_dbf_entry), GFP_KERNEL);
99 if (!new_entry) {
100 debug_unregister(irq_ptr->debug_area);
101 return -ENOMEM;
102 }
103 strlcpy(new_entry->dbf_name, text, QDIO_DBF_NAME_LEN);
104 new_entry->dbf_info = irq_ptr->debug_area;
105 mutex_lock(&qdio_dbf_list_mutex);
106 list_add(&new_entry->dbf_list, &qdio_dbf_list);
107 mutex_unlock(&qdio_dbf_list_mutex);
108 }
109 return 0;
46} 110}
47 111
48static int qstat_show(struct seq_file *m, void *v) 112static int qstat_show(struct seq_file *m, void *v)
@@ -300,6 +364,7 @@ int __init qdio_debug_init(void)
300 364
301void qdio_debug_exit(void) 365void qdio_debug_exit(void)
302{ 366{
367 qdio_clear_dbf_list();
303 debugfs_remove(debugfs_root); 368 debugfs_remove(debugfs_root);
304 if (qdio_dbf_setup) 369 if (qdio_dbf_setup)
305 debug_unregister(qdio_dbf_setup); 370 debug_unregister(qdio_dbf_setup);
diff --git a/drivers/s390/cio/qdio_debug.h b/drivers/s390/cio/qdio_debug.h
index dfac9bfefea3..f33ce8577619 100644
--- a/drivers/s390/cio/qdio_debug.h
+++ b/drivers/s390/cio/qdio_debug.h
@@ -75,7 +75,7 @@ static inline void DBF_DEV_HEX(struct qdio_irq *dev, void *addr,
75 } 75 }
76} 76}
77 77
78void qdio_allocate_dbf(struct qdio_initialize *init_data, 78int qdio_allocate_dbf(struct qdio_initialize *init_data,
79 struct qdio_irq *irq_ptr); 79 struct qdio_irq *irq_ptr);
80void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, 80void qdio_setup_debug_entries(struct qdio_irq *irq_ptr,
81 struct ccw_device *cdev); 81 struct ccw_device *cdev);
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 77466c4faabb..848e3b64ea6e 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -409,17 +409,16 @@ static inline void qdio_stop_polling(struct qdio_q *q)
409 set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT); 409 set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
410} 410}
411 411
412static inline void account_sbals(struct qdio_q *q, int count) 412static inline void account_sbals(struct qdio_q *q, unsigned int count)
413{ 413{
414 int pos = 0; 414 int pos;
415 415
416 q->q_stats.nr_sbal_total += count; 416 q->q_stats.nr_sbal_total += count;
417 if (count == QDIO_MAX_BUFFERS_MASK) { 417 if (count == QDIO_MAX_BUFFERS_MASK) {
418 q->q_stats.nr_sbals[7]++; 418 q->q_stats.nr_sbals[7]++;
419 return; 419 return;
420 } 420 }
421 while (count >>= 1) 421 pos = ilog2(count);
422 pos++;
423 q->q_stats.nr_sbals[pos]++; 422 q->q_stats.nr_sbals[pos]++;
424} 423}
425 424
@@ -1234,12 +1233,10 @@ int qdio_free(struct ccw_device *cdev)
1234 return -ENODEV; 1233 return -ENODEV;
1235 1234
1236 DBF_EVENT("qfree:%4x", cdev->private->schid.sch_no); 1235 DBF_EVENT("qfree:%4x", cdev->private->schid.sch_no);
1236 DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf abandoned");
1237 mutex_lock(&irq_ptr->setup_mutex); 1237 mutex_lock(&irq_ptr->setup_mutex);
1238 1238
1239 if (irq_ptr->debug_area != NULL) { 1239 irq_ptr->debug_area = NULL;
1240 debug_unregister(irq_ptr->debug_area);
1241 irq_ptr->debug_area = NULL;
1242 }
1243 cdev->private->qdio_data = NULL; 1240 cdev->private->qdio_data = NULL;
1244 mutex_unlock(&irq_ptr->setup_mutex); 1241 mutex_unlock(&irq_ptr->setup_mutex);
1245 1242
@@ -1276,7 +1273,8 @@ int qdio_allocate(struct qdio_initialize *init_data)
1276 goto out_err; 1273 goto out_err;
1277 1274
1278 mutex_init(&irq_ptr->setup_mutex); 1275 mutex_init(&irq_ptr->setup_mutex);
1279 qdio_allocate_dbf(init_data, irq_ptr); 1276 if (qdio_allocate_dbf(init_data, irq_ptr))
1277 goto out_rel;
1280 1278
1281 /* 1279 /*
1282 * Allocate a page for the chsc calls in qdio_establish. 1280 * Allocate a page for the chsc calls in qdio_establish.
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 8eec1653c9cc..69ef4f8cfac8 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -77,12 +77,12 @@ MODULE_ALIAS("z90crypt");
77 * Module parameter 77 * Module parameter
78 */ 78 */
79int ap_domain_index = -1; /* Adjunct Processor Domain Index */ 79int ap_domain_index = -1; /* Adjunct Processor Domain Index */
80module_param_named(domain, ap_domain_index, int, 0000); 80module_param_named(domain, ap_domain_index, int, S_IRUSR|S_IRGRP);
81MODULE_PARM_DESC(domain, "domain index for ap devices"); 81MODULE_PARM_DESC(domain, "domain index for ap devices");
82EXPORT_SYMBOL(ap_domain_index); 82EXPORT_SYMBOL(ap_domain_index);
83 83
84static int ap_thread_flag = 0; 84static int ap_thread_flag = 0;
85module_param_named(poll_thread, ap_thread_flag, int, 0000); 85module_param_named(poll_thread, ap_thread_flag, int, S_IRUSR|S_IRGRP);
86MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 0 (off)."); 86MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 0 (off).");
87 87
88static struct device *ap_root_device = NULL; 88static struct device *ap_root_device = NULL;
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 5222ebe15705..0e18c5dcd91f 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -356,7 +356,7 @@ struct zcrypt_ops *zcrypt_msgtype_request(unsigned char *name, int variant)
356 356
357 zops = __ops_lookup(name, variant); 357 zops = __ops_lookup(name, variant);
358 if (!zops) { 358 if (!zops) {
359 request_module(name); 359 request_module("%s", name);
360 zops = __ops_lookup(name, variant); 360 zops = __ops_lookup(name, variant);
361 } 361 }
362 if ((!zops) || (!try_module_get(zops->owner))) 362 if ((!zops) || (!try_module_get(zops->owner)))
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 5858600bfe59..31184b35370f 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -48,6 +48,7 @@
48#include <linux/bitmap.h> 48#include <linux/bitmap.h>
49#include <linux/atomic.h> 49#include <linux/atomic.h>
50#include <linux/jiffies.h> 50#include <linux/jiffies.h>
51#include <linux/percpu.h>
51#include <asm/div64.h> 52#include <asm/div64.h>
52#include "hpsa_cmd.h" 53#include "hpsa_cmd.h"
53#include "hpsa.h" 54#include "hpsa.h"
@@ -193,7 +194,8 @@ static int number_of_controllers;
193static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id); 194static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
194static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id); 195static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
195static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg); 196static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg);
196static void start_io(struct ctlr_info *h); 197static void lock_and_start_io(struct ctlr_info *h);
198static void start_io(struct ctlr_info *h, unsigned long *flags);
197 199
198#ifdef CONFIG_COMPAT 200#ifdef CONFIG_COMPAT
199static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg); 201static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg);
@@ -695,7 +697,7 @@ static inline void addQ(struct list_head *list, struct CommandList *c)
695static inline u32 next_command(struct ctlr_info *h, u8 q) 697static inline u32 next_command(struct ctlr_info *h, u8 q)
696{ 698{
697 u32 a; 699 u32 a;
698 struct reply_pool *rq = &h->reply_queue[q]; 700 struct reply_queue_buffer *rq = &h->reply_queue[q];
699 unsigned long flags; 701 unsigned long flags;
700 702
701 if (h->transMethod & CFGTBL_Trans_io_accel1) 703 if (h->transMethod & CFGTBL_Trans_io_accel1)
@@ -844,8 +846,8 @@ static void enqueue_cmd_and_start_io(struct ctlr_info *h,
844 spin_lock_irqsave(&h->lock, flags); 846 spin_lock_irqsave(&h->lock, flags);
845 addQ(&h->reqQ, c); 847 addQ(&h->reqQ, c);
846 h->Qdepth++; 848 h->Qdepth++;
849 start_io(h, &flags);
847 spin_unlock_irqrestore(&h->lock, flags); 850 spin_unlock_irqrestore(&h->lock, flags);
848 start_io(h);
849} 851}
850 852
851static inline void removeQ(struct CommandList *c) 853static inline void removeQ(struct CommandList *c)
@@ -1554,9 +1556,13 @@ static int handle_ioaccel_mode2_error(struct ctlr_info *h,
1554 dev_warn(&h->pdev->dev, 1556 dev_warn(&h->pdev->dev,
1555 "%s: task complete with check condition.\n", 1557 "%s: task complete with check condition.\n",
1556 "HP SSD Smart Path"); 1558 "HP SSD Smart Path");
1559 cmd->result |= SAM_STAT_CHECK_CONDITION;
1557 if (c2->error_data.data_present != 1560 if (c2->error_data.data_present !=
1558 IOACCEL2_SENSE_DATA_PRESENT) 1561 IOACCEL2_SENSE_DATA_PRESENT) {
1562 memset(cmd->sense_buffer, 0,
1563 SCSI_SENSE_BUFFERSIZE);
1559 break; 1564 break;
1565 }
1560 /* copy the sense data */ 1566 /* copy the sense data */
1561 data_len = c2->error_data.sense_data_len; 1567 data_len = c2->error_data.sense_data_len;
1562 if (data_len > SCSI_SENSE_BUFFERSIZE) 1568 if (data_len > SCSI_SENSE_BUFFERSIZE)
@@ -1566,7 +1572,6 @@ static int handle_ioaccel_mode2_error(struct ctlr_info *h,
1566 sizeof(c2->error_data.sense_data_buff); 1572 sizeof(c2->error_data.sense_data_buff);
1567 memcpy(cmd->sense_buffer, 1573 memcpy(cmd->sense_buffer,
1568 c2->error_data.sense_data_buff, data_len); 1574 c2->error_data.sense_data_buff, data_len);
1569 cmd->result |= SAM_STAT_CHECK_CONDITION;
1570 retry = 1; 1575 retry = 1;
1571 break; 1576 break;
1572 case IOACCEL2_STATUS_SR_TASK_COMP_BUSY: 1577 case IOACCEL2_STATUS_SR_TASK_COMP_BUSY:
@@ -1651,16 +1656,6 @@ static void process_ioaccel2_completion(struct ctlr_info *h,
1651 if (is_logical_dev_addr_mode(dev->scsi3addr) && 1656 if (is_logical_dev_addr_mode(dev->scsi3addr) &&
1652 c2->error_data.serv_response == 1657 c2->error_data.serv_response ==
1653 IOACCEL2_SERV_RESPONSE_FAILURE) { 1658 IOACCEL2_SERV_RESPONSE_FAILURE) {
1654 if (c2->error_data.status ==
1655 IOACCEL2_STATUS_SR_IOACCEL_DISABLED)
1656 dev_warn(&h->pdev->dev,
1657 "%s: Path is unavailable, retrying on standard path.\n",
1658 "HP SSD Smart Path");
1659 else
1660 dev_warn(&h->pdev->dev,
1661 "%s: Error 0x%02x, retrying on standard path.\n",
1662 "HP SSD Smart Path", c2->error_data.status);
1663
1664 dev->offload_enabled = 0; 1659 dev->offload_enabled = 0;
1665 h->drv_req_rescan = 1; /* schedule controller for a rescan */ 1660 h->drv_req_rescan = 1; /* schedule controller for a rescan */
1666 cmd->result = DID_SOFT_ERROR << 16; 1661 cmd->result = DID_SOFT_ERROR << 16;
@@ -1991,20 +1986,26 @@ static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
1991 wait_for_completion(&wait); 1986 wait_for_completion(&wait);
1992} 1987}
1993 1988
1989static u32 lockup_detected(struct ctlr_info *h)
1990{
1991 int cpu;
1992 u32 rc, *lockup_detected;
1993
1994 cpu = get_cpu();
1995 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
1996 rc = *lockup_detected;
1997 put_cpu();
1998 return rc;
1999}
2000
1994static void hpsa_scsi_do_simple_cmd_core_if_no_lockup(struct ctlr_info *h, 2001static void hpsa_scsi_do_simple_cmd_core_if_no_lockup(struct ctlr_info *h,
1995 struct CommandList *c) 2002 struct CommandList *c)
1996{ 2003{
1997 unsigned long flags;
1998
1999 /* If controller lockup detected, fake a hardware error. */ 2004 /* If controller lockup detected, fake a hardware error. */
2000 spin_lock_irqsave(&h->lock, flags); 2005 if (unlikely(lockup_detected(h)))
2001 if (unlikely(h->lockup_detected)) {
2002 spin_unlock_irqrestore(&h->lock, flags);
2003 c->err_info->CommandStatus = CMD_HARDWARE_ERR; 2006 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
2004 } else { 2007 else
2005 spin_unlock_irqrestore(&h->lock, flags);
2006 hpsa_scsi_do_simple_cmd_core(h, c); 2008 hpsa_scsi_do_simple_cmd_core(h, c);
2007 }
2008} 2009}
2009 2010
2010#define MAX_DRIVER_CMD_RETRIES 25 2011#define MAX_DRIVER_CMD_RETRIES 25
@@ -2429,7 +2430,7 @@ static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
2429 buflen = 16; 2430 buflen = 16;
2430 buf = kzalloc(64, GFP_KERNEL); 2431 buf = kzalloc(64, GFP_KERNEL);
2431 if (!buf) 2432 if (!buf)
2432 return -1; 2433 return -ENOMEM;
2433 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0x83, buf, 64); 2434 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0x83, buf, 64);
2434 if (rc == 0) 2435 if (rc == 0)
2435 memcpy(device_id, &buf[8], buflen); 2436 memcpy(device_id, &buf[8], buflen);
@@ -2515,27 +2516,21 @@ static int hpsa_get_volume_status(struct ctlr_info *h,
2515 return HPSA_VPD_LV_STATUS_UNSUPPORTED; 2516 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
2516 2517
2517 /* Does controller have VPD for logical volume status? */ 2518 /* Does controller have VPD for logical volume status? */
2518 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS)) { 2519 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS))
2519 dev_warn(&h->pdev->dev, "Logical volume status VPD page is unsupported.\n");
2520 goto exit_failed; 2520 goto exit_failed;
2521 }
2522 2521
2523 /* Get the size of the VPD return buffer */ 2522 /* Get the size of the VPD return buffer */
2524 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS, 2523 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
2525 buf, HPSA_VPD_HEADER_SZ); 2524 buf, HPSA_VPD_HEADER_SZ);
2526 if (rc != 0) { 2525 if (rc != 0)
2527 dev_warn(&h->pdev->dev, "Logical volume status VPD inquiry failed.\n");
2528 goto exit_failed; 2526 goto exit_failed;
2529 }
2530 size = buf[3]; 2527 size = buf[3];
2531 2528
2532 /* Now get the whole VPD buffer */ 2529 /* Now get the whole VPD buffer */
2533 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS, 2530 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
2534 buf, size + HPSA_VPD_HEADER_SZ); 2531 buf, size + HPSA_VPD_HEADER_SZ);
2535 if (rc != 0) { 2532 if (rc != 0)
2536 dev_warn(&h->pdev->dev, "Logical volume status VPD inquiry failed.\n");
2537 goto exit_failed; 2533 goto exit_failed;
2538 }
2539 status = buf[4]; /* status byte */ 2534 status = buf[4]; /* status byte */
2540 2535
2541 kfree(buf); 2536 kfree(buf);
@@ -2548,11 +2543,11 @@ exit_failed:
2548/* Determine offline status of a volume. 2543/* Determine offline status of a volume.
2549 * Return either: 2544 * Return either:
2550 * 0 (not offline) 2545 * 0 (not offline)
2551 * -1 (offline for unknown reasons) 2546 * 0xff (offline for unknown reasons)
2552 * # (integer code indicating one of several NOT READY states 2547 * # (integer code indicating one of several NOT READY states
2553 * describing why a volume is to be kept offline) 2548 * describing why a volume is to be kept offline)
2554 */ 2549 */
2555static unsigned char hpsa_volume_offline(struct ctlr_info *h, 2550static int hpsa_volume_offline(struct ctlr_info *h,
2556 unsigned char scsi3addr[]) 2551 unsigned char scsi3addr[])
2557{ 2552{
2558 struct CommandList *c; 2553 struct CommandList *c;
@@ -2651,11 +2646,15 @@ static int hpsa_update_device_info(struct ctlr_info *h,
2651 2646
2652 if (this_device->devtype == TYPE_DISK && 2647 if (this_device->devtype == TYPE_DISK &&
2653 is_logical_dev_addr_mode(scsi3addr)) { 2648 is_logical_dev_addr_mode(scsi3addr)) {
2649 int volume_offline;
2650
2654 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level); 2651 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
2655 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC) 2652 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
2656 hpsa_get_ioaccel_status(h, scsi3addr, this_device); 2653 hpsa_get_ioaccel_status(h, scsi3addr, this_device);
2657 this_device->volume_offline = 2654 volume_offline = hpsa_volume_offline(h, scsi3addr);
2658 hpsa_volume_offline(h, scsi3addr); 2655 if (volume_offline < 0 || volume_offline > 0xff)
2656 volume_offline = HPSA_VPD_LV_STATUS_UNSUPPORTED;
2657 this_device->volume_offline = volume_offline & 0xff;
2659 } else { 2658 } else {
2660 this_device->raid_level = RAID_UNKNOWN; 2659 this_device->raid_level = RAID_UNKNOWN;
2661 this_device->offload_config = 0; 2660 this_device->offload_config = 0;
@@ -2861,26 +2860,20 @@ static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
2861 nphysicals = be32_to_cpu(*((__be32 *)physicals->LUNListLength)) / 2860 nphysicals = be32_to_cpu(*((__be32 *)physicals->LUNListLength)) /
2862 responsesize; 2861 responsesize;
2863 2862
2864
2865 /* find ioaccel2 handle in list of physicals: */ 2863 /* find ioaccel2 handle in list of physicals: */
2866 for (i = 0; i < nphysicals; i++) { 2864 for (i = 0; i < nphysicals; i++) {
2865 struct ext_report_lun_entry *entry = &physicals->LUN[i];
2866
2867 /* handle is in bytes 28-31 of each lun */ 2867 /* handle is in bytes 28-31 of each lun */
2868 if (memcmp(&((struct ReportExtendedLUNdata *) 2868 if (entry->ioaccel_handle != find)
2869 physicals)->LUN[i][20], &find, 4) != 0) {
2870 continue; /* didn't match */ 2869 continue; /* didn't match */
2871 }
2872 found = 1; 2870 found = 1;
2873 memcpy(scsi3addr, &((struct ReportExtendedLUNdata *) 2871 memcpy(scsi3addr, entry->lunid, 8);
2874 physicals)->LUN[i][0], 8);
2875 if (h->raid_offload_debug > 0) 2872 if (h->raid_offload_debug > 0)
2876 dev_info(&h->pdev->dev, 2873 dev_info(&h->pdev->dev,
2877 "%s: Searched h=0x%08x, Found h=0x%08x, scsiaddr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 2874 "%s: Searched h=0x%08x, Found h=0x%08x, scsiaddr 0x%8phN\n",
2878 __func__, find, 2875 __func__, find,
2879 ((struct ReportExtendedLUNdata *) 2876 entry->ioaccel_handle, scsi3addr);
2880 physicals)->LUN[i][20],
2881 scsi3addr[0], scsi3addr[1], scsi3addr[2],
2882 scsi3addr[3], scsi3addr[4], scsi3addr[5],
2883 scsi3addr[6], scsi3addr[7]);
2884 break; /* found it */ 2877 break; /* found it */
2885 } 2878 }
2886 2879
@@ -2965,7 +2958,8 @@ u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i,
2965 return RAID_CTLR_LUNID; 2958 return RAID_CTLR_LUNID;
2966 2959
2967 if (i < logicals_start) 2960 if (i < logicals_start)
2968 return &physdev_list->LUN[i - (raid_ctlr_position == 0)][0]; 2961 return &physdev_list->LUN[i -
2962 (raid_ctlr_position == 0)].lunid[0];
2969 2963
2970 if (i < last_device) 2964 if (i < last_device)
2971 return &logdev_list->LUN[i - nphysicals - 2965 return &logdev_list->LUN[i - nphysicals -
@@ -3074,7 +3068,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
3074 ndev_allocated++; 3068 ndev_allocated++;
3075 } 3069 }
3076 3070
3077 if (unlikely(is_scsi_rev_5(h))) 3071 if (is_scsi_rev_5(h))
3078 raid_ctlr_position = 0; 3072 raid_ctlr_position = 0;
3079 else 3073 else
3080 raid_ctlr_position = nphysicals + nlogicals; 3074 raid_ctlr_position = nphysicals + nlogicals;
@@ -3971,7 +3965,6 @@ static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
3971 struct hpsa_scsi_dev_t *dev; 3965 struct hpsa_scsi_dev_t *dev;
3972 unsigned char scsi3addr[8]; 3966 unsigned char scsi3addr[8];
3973 struct CommandList *c; 3967 struct CommandList *c;
3974 unsigned long flags;
3975 int rc = 0; 3968 int rc = 0;
3976 3969
3977 /* Get the ptr to our adapter structure out of cmd->host. */ 3970 /* Get the ptr to our adapter structure out of cmd->host. */
@@ -3984,14 +3977,11 @@ static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
3984 } 3977 }
3985 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr)); 3978 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
3986 3979
3987 spin_lock_irqsave(&h->lock, flags); 3980 if (unlikely(lockup_detected(h))) {
3988 if (unlikely(h->lockup_detected)) {
3989 spin_unlock_irqrestore(&h->lock, flags);
3990 cmd->result = DID_ERROR << 16; 3981 cmd->result = DID_ERROR << 16;
3991 done(cmd); 3982 done(cmd);
3992 return 0; 3983 return 0;
3993 } 3984 }
3994 spin_unlock_irqrestore(&h->lock, flags);
3995 c = cmd_alloc(h); 3985 c = cmd_alloc(h);
3996 if (c == NULL) { /* trouble... */ 3986 if (c == NULL) { /* trouble... */
3997 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n"); 3987 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
@@ -4103,16 +4093,13 @@ static int do_not_scan_if_controller_locked_up(struct ctlr_info *h)
4103 * we can prevent new rescan threads from piling up on a 4093 * we can prevent new rescan threads from piling up on a
4104 * locked up controller. 4094 * locked up controller.
4105 */ 4095 */
4106 spin_lock_irqsave(&h->lock, flags); 4096 if (unlikely(lockup_detected(h))) {
4107 if (unlikely(h->lockup_detected)) {
4108 spin_unlock_irqrestore(&h->lock, flags);
4109 spin_lock_irqsave(&h->scan_lock, flags); 4097 spin_lock_irqsave(&h->scan_lock, flags);
4110 h->scan_finished = 1; 4098 h->scan_finished = 1;
4111 wake_up_all(&h->scan_wait_queue); 4099 wake_up_all(&h->scan_wait_queue);
4112 spin_unlock_irqrestore(&h->scan_lock, flags); 4100 spin_unlock_irqrestore(&h->scan_lock, flags);
4113 return 1; 4101 return 1;
4114 } 4102 }
4115 spin_unlock_irqrestore(&h->lock, flags);
4116 return 0; 4103 return 0;
4117} 4104}
4118 4105
@@ -4963,7 +4950,7 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
4963 buff = kmalloc(iocommand.buf_size, GFP_KERNEL); 4950 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
4964 if (buff == NULL) 4951 if (buff == NULL)
4965 return -EFAULT; 4952 return -EFAULT;
4966 if (iocommand.Request.Type.Direction == XFER_WRITE) { 4953 if (iocommand.Request.Type.Direction & XFER_WRITE) {
4967 /* Copy the data into the buffer we created */ 4954 /* Copy the data into the buffer we created */
4968 if (copy_from_user(buff, iocommand.buf, 4955 if (copy_from_user(buff, iocommand.buf,
4969 iocommand.buf_size)) { 4956 iocommand.buf_size)) {
@@ -5026,7 +5013,7 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
5026 rc = -EFAULT; 5013 rc = -EFAULT;
5027 goto out; 5014 goto out;
5028 } 5015 }
5029 if (iocommand.Request.Type.Direction == XFER_READ && 5016 if ((iocommand.Request.Type.Direction & XFER_READ) &&
5030 iocommand.buf_size > 0) { 5017 iocommand.buf_size > 0) {
5031 /* Copy the data out of the buffer we created */ 5018 /* Copy the data out of the buffer we created */
5032 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) { 5019 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
@@ -5103,7 +5090,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
5103 status = -ENOMEM; 5090 status = -ENOMEM;
5104 goto cleanup1; 5091 goto cleanup1;
5105 } 5092 }
5106 if (ioc->Request.Type.Direction == XFER_WRITE) { 5093 if (ioc->Request.Type.Direction & XFER_WRITE) {
5107 if (copy_from_user(buff[sg_used], data_ptr, sz)) { 5094 if (copy_from_user(buff[sg_used], data_ptr, sz)) {
5108 status = -ENOMEM; 5095 status = -ENOMEM;
5109 goto cleanup1; 5096 goto cleanup1;
@@ -5155,7 +5142,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
5155 status = -EFAULT; 5142 status = -EFAULT;
5156 goto cleanup0; 5143 goto cleanup0;
5157 } 5144 }
5158 if (ioc->Request.Type.Direction == XFER_READ && ioc->buf_size > 0) { 5145 if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) {
5159 /* Copy the data out of the buffer we created */ 5146 /* Copy the data out of the buffer we created */
5160 BYTE __user *ptr = ioc->buf; 5147 BYTE __user *ptr = ioc->buf;
5161 for (i = 0; i < sg_used; i++) { 5148 for (i = 0; i < sg_used; i++) {
@@ -5459,13 +5446,12 @@ static void __iomem *remap_pci_mem(ulong base, ulong size)
5459 5446
5460/* Takes cmds off the submission queue and sends them to the hardware, 5447/* Takes cmds off the submission queue and sends them to the hardware,
5461 * then puts them on the queue of cmds waiting for completion. 5448 * then puts them on the queue of cmds waiting for completion.
5449 * Assumes h->lock is held
5462 */ 5450 */
5463static void start_io(struct ctlr_info *h) 5451static void start_io(struct ctlr_info *h, unsigned long *flags)
5464{ 5452{
5465 struct CommandList *c; 5453 struct CommandList *c;
5466 unsigned long flags;
5467 5454
5468 spin_lock_irqsave(&h->lock, flags);
5469 while (!list_empty(&h->reqQ)) { 5455 while (!list_empty(&h->reqQ)) {
5470 c = list_entry(h->reqQ.next, struct CommandList, list); 5456 c = list_entry(h->reqQ.next, struct CommandList, list);
5471 /* can't do anything if fifo is full */ 5457 /* can't do anything if fifo is full */
@@ -5488,14 +5474,20 @@ static void start_io(struct ctlr_info *h)
5488 * condition. 5474 * condition.
5489 */ 5475 */
5490 h->commands_outstanding++; 5476 h->commands_outstanding++;
5491 if (h->commands_outstanding > h->max_outstanding)
5492 h->max_outstanding = h->commands_outstanding;
5493 5477
5494 /* Tell the controller execute command */ 5478 /* Tell the controller execute command */
5495 spin_unlock_irqrestore(&h->lock, flags); 5479 spin_unlock_irqrestore(&h->lock, *flags);
5496 h->access.submit_command(h, c); 5480 h->access.submit_command(h, c);
5497 spin_lock_irqsave(&h->lock, flags); 5481 spin_lock_irqsave(&h->lock, *flags);
5498 } 5482 }
5483}
5484
5485static void lock_and_start_io(struct ctlr_info *h)
5486{
5487 unsigned long flags;
5488
5489 spin_lock_irqsave(&h->lock, flags);
5490 start_io(h, &flags);
5499 spin_unlock_irqrestore(&h->lock, flags); 5491 spin_unlock_irqrestore(&h->lock, flags);
5500} 5492}
5501 5493
@@ -5563,7 +5555,7 @@ static inline void finish_cmd(struct CommandList *c)
5563 else if (c->cmd_type == CMD_IOCTL_PEND) 5555 else if (c->cmd_type == CMD_IOCTL_PEND)
5564 complete(c->waiting); 5556 complete(c->waiting);
5565 if (unlikely(io_may_be_stalled)) 5557 if (unlikely(io_may_be_stalled))
5566 start_io(h); 5558 lock_and_start_io(h);
5567} 5559}
5568 5560
5569static inline u32 hpsa_tag_contains_index(u32 tag) 5561static inline u32 hpsa_tag_contains_index(u32 tag)
@@ -5840,12 +5832,12 @@ static int hpsa_controller_hard_reset(struct pci_dev *pdev,
5840 dev_info(&pdev->dev, "using doorbell to reset controller\n"); 5832 dev_info(&pdev->dev, "using doorbell to reset controller\n");
5841 writel(use_doorbell, vaddr + SA5_DOORBELL); 5833 writel(use_doorbell, vaddr + SA5_DOORBELL);
5842 5834
5843 /* PMC hardware guys tell us we need a 5 second delay after 5835 /* PMC hardware guys tell us we need a 10 second delay after
5844 * doorbell reset and before any attempt to talk to the board 5836 * doorbell reset and before any attempt to talk to the board
5845 * at all to ensure that this actually works and doesn't fall 5837 * at all to ensure that this actually works and doesn't fall
5846 * over in some weird corner cases. 5838 * over in some weird corner cases.
5847 */ 5839 */
5848 msleep(5000); 5840 msleep(10000);
5849 } else { /* Try to do it the PCI power state way */ 5841 } else { /* Try to do it the PCI power state way */
5850 5842
5851 /* Quoting from the Open CISS Specification: "The Power 5843 /* Quoting from the Open CISS Specification: "The Power
@@ -6166,6 +6158,8 @@ static void hpsa_interrupt_mode(struct ctlr_info *h)
6166 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) { 6158 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
6167 dev_info(&h->pdev->dev, "MSIX\n"); 6159 dev_info(&h->pdev->dev, "MSIX\n");
6168 h->msix_vector = MAX_REPLY_QUEUES; 6160 h->msix_vector = MAX_REPLY_QUEUES;
6161 if (h->msix_vector > num_online_cpus())
6162 h->msix_vector = num_online_cpus();
6169 err = pci_enable_msix(h->pdev, hpsa_msix_entries, 6163 err = pci_enable_msix(h->pdev, hpsa_msix_entries,
6170 h->msix_vector); 6164 h->msix_vector);
6171 if (err > 0) { 6165 if (err > 0) {
@@ -6615,6 +6609,17 @@ static void hpsa_free_cmd_pool(struct ctlr_info *h)
6615 h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle); 6609 h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle);
6616} 6610}
6617 6611
6612static void hpsa_irq_affinity_hints(struct ctlr_info *h)
6613{
6614 int i, cpu, rc;
6615
6616 cpu = cpumask_first(cpu_online_mask);
6617 for (i = 0; i < h->msix_vector; i++) {
6618 rc = irq_set_affinity_hint(h->intr[i], get_cpu_mask(cpu));
6619 cpu = cpumask_next(cpu, cpu_online_mask);
6620 }
6621}
6622
6618static int hpsa_request_irq(struct ctlr_info *h, 6623static int hpsa_request_irq(struct ctlr_info *h,
6619 irqreturn_t (*msixhandler)(int, void *), 6624 irqreturn_t (*msixhandler)(int, void *),
6620 irqreturn_t (*intxhandler)(int, void *)) 6625 irqreturn_t (*intxhandler)(int, void *))
@@ -6634,6 +6639,7 @@ static int hpsa_request_irq(struct ctlr_info *h,
6634 rc = request_irq(h->intr[i], msixhandler, 6639 rc = request_irq(h->intr[i], msixhandler,
6635 0, h->devname, 6640 0, h->devname,
6636 &h->q[i]); 6641 &h->q[i]);
6642 hpsa_irq_affinity_hints(h);
6637 } else { 6643 } else {
6638 /* Use single reply pool */ 6644 /* Use single reply pool */
6639 if (h->msix_vector > 0 || h->msi_vector) { 6645 if (h->msix_vector > 0 || h->msi_vector) {
@@ -6685,12 +6691,15 @@ static void free_irqs(struct ctlr_info *h)
6685 if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) { 6691 if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) {
6686 /* Single reply queue, only one irq to free */ 6692 /* Single reply queue, only one irq to free */
6687 i = h->intr_mode; 6693 i = h->intr_mode;
6694 irq_set_affinity_hint(h->intr[i], NULL);
6688 free_irq(h->intr[i], &h->q[i]); 6695 free_irq(h->intr[i], &h->q[i]);
6689 return; 6696 return;
6690 } 6697 }
6691 6698
6692 for (i = 0; i < h->msix_vector; i++) 6699 for (i = 0; i < h->msix_vector; i++) {
6700 irq_set_affinity_hint(h->intr[i], NULL);
6693 free_irq(h->intr[i], &h->q[i]); 6701 free_irq(h->intr[i], &h->q[i]);
6702 }
6694} 6703}
6695 6704
6696static void hpsa_free_irqs_and_disable_msix(struct ctlr_info *h) 6705static void hpsa_free_irqs_and_disable_msix(struct ctlr_info *h)
@@ -6707,6 +6716,20 @@ static void hpsa_free_irqs_and_disable_msix(struct ctlr_info *h)
6707#endif /* CONFIG_PCI_MSI */ 6716#endif /* CONFIG_PCI_MSI */
6708} 6717}
6709 6718
6719static void hpsa_free_reply_queues(struct ctlr_info *h)
6720{
6721 int i;
6722
6723 for (i = 0; i < h->nreply_queues; i++) {
6724 if (!h->reply_queue[i].head)
6725 continue;
6726 pci_free_consistent(h->pdev, h->reply_queue_size,
6727 h->reply_queue[i].head, h->reply_queue[i].busaddr);
6728 h->reply_queue[i].head = NULL;
6729 h->reply_queue[i].busaddr = 0;
6730 }
6731}
6732
6710static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h) 6733static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
6711{ 6734{
6712 hpsa_free_irqs_and_disable_msix(h); 6735 hpsa_free_irqs_and_disable_msix(h);
@@ -6714,8 +6737,7 @@ static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
6714 hpsa_free_cmd_pool(h); 6737 hpsa_free_cmd_pool(h);
6715 kfree(h->ioaccel1_blockFetchTable); 6738 kfree(h->ioaccel1_blockFetchTable);
6716 kfree(h->blockFetchTable); 6739 kfree(h->blockFetchTable);
6717 pci_free_consistent(h->pdev, h->reply_pool_size, 6740 hpsa_free_reply_queues(h);
6718 h->reply_pool, h->reply_pool_dhandle);
6719 if (h->vaddr) 6741 if (h->vaddr)
6720 iounmap(h->vaddr); 6742 iounmap(h->vaddr);
6721 if (h->transtable) 6743 if (h->transtable)
@@ -6740,16 +6762,38 @@ static void fail_all_cmds_on_list(struct ctlr_info *h, struct list_head *list)
6740 } 6762 }
6741} 6763}
6742 6764
6765static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value)
6766{
6767 int i, cpu;
6768
6769 cpu = cpumask_first(cpu_online_mask);
6770 for (i = 0; i < num_online_cpus(); i++) {
6771 u32 *lockup_detected;
6772 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
6773 *lockup_detected = value;
6774 cpu = cpumask_next(cpu, cpu_online_mask);
6775 }
6776 wmb(); /* be sure the per-cpu variables are out to memory */
6777}
6778
6743static void controller_lockup_detected(struct ctlr_info *h) 6779static void controller_lockup_detected(struct ctlr_info *h)
6744{ 6780{
6745 unsigned long flags; 6781 unsigned long flags;
6782 u32 lockup_detected;
6746 6783
6747 h->access.set_intr_mask(h, HPSA_INTR_OFF); 6784 h->access.set_intr_mask(h, HPSA_INTR_OFF);
6748 spin_lock_irqsave(&h->lock, flags); 6785 spin_lock_irqsave(&h->lock, flags);
6749 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); 6786 lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
6787 if (!lockup_detected) {
6788 /* no heartbeat, but controller gave us a zero. */
6789 dev_warn(&h->pdev->dev,
6790 "lockup detected but scratchpad register is zero\n");
6791 lockup_detected = 0xffffffff;
6792 }
6793 set_lockup_detected_for_all_cpus(h, lockup_detected);
6750 spin_unlock_irqrestore(&h->lock, flags); 6794 spin_unlock_irqrestore(&h->lock, flags);
6751 dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x\n", 6795 dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x\n",
6752 h->lockup_detected); 6796 lockup_detected);
6753 pci_disable_device(h->pdev); 6797 pci_disable_device(h->pdev);
6754 spin_lock_irqsave(&h->lock, flags); 6798 spin_lock_irqsave(&h->lock, flags);
6755 fail_all_cmds_on_list(h, &h->cmpQ); 6799 fail_all_cmds_on_list(h, &h->cmpQ);
@@ -6884,7 +6928,7 @@ static void hpsa_monitor_ctlr_worker(struct work_struct *work)
6884 struct ctlr_info *h = container_of(to_delayed_work(work), 6928 struct ctlr_info *h = container_of(to_delayed_work(work),
6885 struct ctlr_info, monitor_ctlr_work); 6929 struct ctlr_info, monitor_ctlr_work);
6886 detect_controller_lockup(h); 6930 detect_controller_lockup(h);
6887 if (h->lockup_detected) 6931 if (lockup_detected(h))
6888 return; 6932 return;
6889 6933
6890 if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) { 6934 if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) {
@@ -6934,7 +6978,6 @@ reinit_after_soft_reset:
6934 * the 5 lower bits of the address are used by the hardware. and by 6978 * the 5 lower bits of the address are used by the hardware. and by
6935 * the driver. See comments in hpsa.h for more info. 6979 * the driver. See comments in hpsa.h for more info.
6936 */ 6980 */
6937#define COMMANDLIST_ALIGNMENT 128
6938 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT); 6981 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
6939 h = kzalloc(sizeof(*h), GFP_KERNEL); 6982 h = kzalloc(sizeof(*h), GFP_KERNEL);
6940 if (!h) 6983 if (!h)
@@ -6949,6 +6992,13 @@ reinit_after_soft_reset:
6949 spin_lock_init(&h->offline_device_lock); 6992 spin_lock_init(&h->offline_device_lock);
6950 spin_lock_init(&h->scan_lock); 6993 spin_lock_init(&h->scan_lock);
6951 spin_lock_init(&h->passthru_count_lock); 6994 spin_lock_init(&h->passthru_count_lock);
6995
6996 /* Allocate and clear per-cpu variable lockup_detected */
6997 h->lockup_detected = alloc_percpu(u32);
6998 if (!h->lockup_detected)
6999 goto clean1;
7000 set_lockup_detected_for_all_cpus(h, 0);
7001
6952 rc = hpsa_pci_init(h); 7002 rc = hpsa_pci_init(h);
6953 if (rc != 0) 7003 if (rc != 0)
6954 goto clean1; 7004 goto clean1;
@@ -7072,6 +7122,8 @@ clean4:
7072 free_irqs(h); 7122 free_irqs(h);
7073clean2: 7123clean2:
7074clean1: 7124clean1:
7125 if (h->lockup_detected)
7126 free_percpu(h->lockup_detected);
7075 kfree(h); 7127 kfree(h);
7076 return rc; 7128 return rc;
7077} 7129}
@@ -7080,16 +7132,10 @@ static void hpsa_flush_cache(struct ctlr_info *h)
7080{ 7132{
7081 char *flush_buf; 7133 char *flush_buf;
7082 struct CommandList *c; 7134 struct CommandList *c;
7083 unsigned long flags;
7084 7135
7085 /* Don't bother trying to flush the cache if locked up */ 7136 /* Don't bother trying to flush the cache if locked up */
7086 spin_lock_irqsave(&h->lock, flags); 7137 if (unlikely(lockup_detected(h)))
7087 if (unlikely(h->lockup_detected)) {
7088 spin_unlock_irqrestore(&h->lock, flags);
7089 return; 7138 return;
7090 }
7091 spin_unlock_irqrestore(&h->lock, flags);
7092
7093 flush_buf = kzalloc(4, GFP_KERNEL); 7139 flush_buf = kzalloc(4, GFP_KERNEL);
7094 if (!flush_buf) 7140 if (!flush_buf)
7095 return; 7141 return;
@@ -7165,8 +7211,7 @@ static void hpsa_remove_one(struct pci_dev *pdev)
7165 pci_free_consistent(h->pdev, 7211 pci_free_consistent(h->pdev,
7166 h->nr_cmds * sizeof(struct ErrorInfo), 7212 h->nr_cmds * sizeof(struct ErrorInfo),
7167 h->errinfo_pool, h->errinfo_pool_dhandle); 7213 h->errinfo_pool, h->errinfo_pool_dhandle);
7168 pci_free_consistent(h->pdev, h->reply_pool_size, 7214 hpsa_free_reply_queues(h);
7169 h->reply_pool, h->reply_pool_dhandle);
7170 kfree(h->cmd_pool_bits); 7215 kfree(h->cmd_pool_bits);
7171 kfree(h->blockFetchTable); 7216 kfree(h->blockFetchTable);
7172 kfree(h->ioaccel1_blockFetchTable); 7217 kfree(h->ioaccel1_blockFetchTable);
@@ -7174,6 +7219,7 @@ static void hpsa_remove_one(struct pci_dev *pdev)
7174 kfree(h->hba_inquiry_data); 7219 kfree(h->hba_inquiry_data);
7175 pci_disable_device(pdev); 7220 pci_disable_device(pdev);
7176 pci_release_regions(pdev); 7221 pci_release_regions(pdev);
7222 free_percpu(h->lockup_detected);
7177 kfree(h); 7223 kfree(h);
7178} 7224}
7179 7225
@@ -7278,8 +7324,16 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
7278 * 10 = 6 s/g entry or 24k 7324 * 10 = 6 s/g entry or 24k
7279 */ 7325 */
7280 7326
7327 /* If the controller supports either ioaccel method then
7328 * we can also use the RAID stack submit path that does not
7329 * perform the superfluous readl() after each command submission.
7330 */
7331 if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
7332 access = SA5_performant_access_no_read;
7333
7281 /* Controller spec: zero out this buffer. */ 7334 /* Controller spec: zero out this buffer. */
7282 memset(h->reply_pool, 0, h->reply_pool_size); 7335 for (i = 0; i < h->nreply_queues; i++)
7336 memset(h->reply_queue[i].head, 0, h->reply_queue_size);
7283 7337
7284 bft[7] = SG_ENTRIES_IN_CMD + 4; 7338 bft[7] = SG_ENTRIES_IN_CMD + 4;
7285 calc_bucket_map(bft, ARRAY_SIZE(bft), 7339 calc_bucket_map(bft, ARRAY_SIZE(bft),
@@ -7295,8 +7349,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
7295 7349
7296 for (i = 0; i < h->nreply_queues; i++) { 7350 for (i = 0; i < h->nreply_queues; i++) {
7297 writel(0, &h->transtable->RepQAddr[i].upper); 7351 writel(0, &h->transtable->RepQAddr[i].upper);
7298 writel(h->reply_pool_dhandle + 7352 writel(h->reply_queue[i].busaddr,
7299 (h->max_commands * sizeof(u64) * i),
7300 &h->transtable->RepQAddr[i].lower); 7353 &h->transtable->RepQAddr[i].lower);
7301 } 7354 }
7302 7355
@@ -7344,8 +7397,10 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
7344 h->ioaccel1_blockFetchTable); 7397 h->ioaccel1_blockFetchTable);
7345 7398
7346 /* initialize all reply queue entries to unused */ 7399 /* initialize all reply queue entries to unused */
7347 memset(h->reply_pool, (u8) IOACCEL_MODE1_REPLY_UNUSED, 7400 for (i = 0; i < h->nreply_queues; i++)
7348 h->reply_pool_size); 7401 memset(h->reply_queue[i].head,
7402 (u8) IOACCEL_MODE1_REPLY_UNUSED,
7403 h->reply_queue_size);
7349 7404
7350 /* set all the constant fields in the accelerator command 7405 /* set all the constant fields in the accelerator command
7351 * frames once at init time to save CPU cycles later. 7406 * frames once at init time to save CPU cycles later.
@@ -7407,7 +7462,6 @@ static int hpsa_alloc_ioaccel_cmd_and_bft(struct ctlr_info *h)
7407 * because the 7 lower bits of the address are used by the 7462 * because the 7 lower bits of the address are used by the
7408 * hardware. 7463 * hardware.
7409 */ 7464 */
7410#define IOACCEL1_COMMANDLIST_ALIGNMENT 128
7411 BUILD_BUG_ON(sizeof(struct io_accel1_cmd) % 7465 BUILD_BUG_ON(sizeof(struct io_accel1_cmd) %
7412 IOACCEL1_COMMANDLIST_ALIGNMENT); 7466 IOACCEL1_COMMANDLIST_ALIGNMENT);
7413 h->ioaccel_cmd_pool = 7467 h->ioaccel_cmd_pool =
@@ -7445,7 +7499,6 @@ static int ioaccel2_alloc_cmds_and_bft(struct ctlr_info *h)
7445 if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES) 7499 if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES)
7446 h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES; 7500 h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES;
7447 7501
7448#define IOACCEL2_COMMANDLIST_ALIGNMENT 128
7449 BUILD_BUG_ON(sizeof(struct io_accel2_cmd) % 7502 BUILD_BUG_ON(sizeof(struct io_accel2_cmd) %
7450 IOACCEL2_COMMANDLIST_ALIGNMENT); 7503 IOACCEL2_COMMANDLIST_ALIGNMENT);
7451 h->ioaccel2_cmd_pool = 7504 h->ioaccel2_cmd_pool =
@@ -7503,16 +7556,17 @@ static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
7503 } 7556 }
7504 } 7557 }
7505 7558
7506 /* TODO, check that this next line h->nreply_queues is correct */
7507 h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1; 7559 h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1;
7508 hpsa_get_max_perf_mode_cmds(h); 7560 hpsa_get_max_perf_mode_cmds(h);
7509 /* Performant mode ring buffer and supporting data structures */ 7561 /* Performant mode ring buffer and supporting data structures */
7510 h->reply_pool_size = h->max_commands * sizeof(u64) * h->nreply_queues; 7562 h->reply_queue_size = h->max_commands * sizeof(u64);
7511 h->reply_pool = pci_alloc_consistent(h->pdev, h->reply_pool_size,
7512 &(h->reply_pool_dhandle));
7513 7563
7514 for (i = 0; i < h->nreply_queues; i++) { 7564 for (i = 0; i < h->nreply_queues; i++) {
7515 h->reply_queue[i].head = &h->reply_pool[h->max_commands * i]; 7565 h->reply_queue[i].head = pci_alloc_consistent(h->pdev,
7566 h->reply_queue_size,
7567 &(h->reply_queue[i].busaddr));
7568 if (!h->reply_queue[i].head)
7569 goto clean_up;
7516 h->reply_queue[i].size = h->max_commands; 7570 h->reply_queue[i].size = h->max_commands;
7517 h->reply_queue[i].wraparound = 1; /* spec: init to 1 */ 7571 h->reply_queue[i].wraparound = 1; /* spec: init to 1 */
7518 h->reply_queue[i].current_entry = 0; 7572 h->reply_queue[i].current_entry = 0;
@@ -7521,18 +7575,14 @@ static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
7521 /* Need a block fetch table for performant mode */ 7575 /* Need a block fetch table for performant mode */
7522 h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) * 7576 h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
7523 sizeof(u32)), GFP_KERNEL); 7577 sizeof(u32)), GFP_KERNEL);
7524 7578 if (!h->blockFetchTable)
7525 if ((h->reply_pool == NULL)
7526 || (h->blockFetchTable == NULL))
7527 goto clean_up; 7579 goto clean_up;
7528 7580
7529 hpsa_enter_performant_mode(h, trans_support); 7581 hpsa_enter_performant_mode(h, trans_support);
7530 return; 7582 return;
7531 7583
7532clean_up: 7584clean_up:
7533 if (h->reply_pool) 7585 hpsa_free_reply_queues(h);
7534 pci_free_consistent(h->pdev, h->reply_pool_size,
7535 h->reply_pool, h->reply_pool_dhandle);
7536 kfree(h->blockFetchTable); 7586 kfree(h->blockFetchTable);
7537} 7587}
7538 7588
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 1e3cf33a82cf..24472cec7de3 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -57,11 +57,12 @@ struct hpsa_scsi_dev_t {
57 57
58}; 58};
59 59
60struct reply_pool { 60struct reply_queue_buffer {
61 u64 *head; 61 u64 *head;
62 size_t size; 62 size_t size;
63 u8 wraparound; 63 u8 wraparound;
64 u32 current_entry; 64 u32 current_entry;
65 dma_addr_t busaddr;
65}; 66};
66 67
67#pragma pack(1) 68#pragma pack(1)
@@ -116,11 +117,8 @@ struct ctlr_info {
116 int nr_cmds; /* Number of commands allowed on this controller */ 117 int nr_cmds; /* Number of commands allowed on this controller */
117 struct CfgTable __iomem *cfgtable; 118 struct CfgTable __iomem *cfgtable;
118 int interrupts_enabled; 119 int interrupts_enabled;
119 int major;
120 int max_commands; 120 int max_commands;
121 int commands_outstanding; 121 int commands_outstanding;
122 int max_outstanding; /* Debug */
123 int usage_count; /* number of opens all all minor devices */
124# define PERF_MODE_INT 0 122# define PERF_MODE_INT 0
125# define DOORBELL_INT 1 123# define DOORBELL_INT 1
126# define SIMPLE_MODE_INT 2 124# define SIMPLE_MODE_INT 2
@@ -177,11 +175,9 @@ struct ctlr_info {
177 /* 175 /*
178 * Performant mode completion buffers 176 * Performant mode completion buffers
179 */ 177 */
180 u64 *reply_pool; 178 size_t reply_queue_size;
181 size_t reply_pool_size; 179 struct reply_queue_buffer reply_queue[MAX_REPLY_QUEUES];
182 struct reply_pool reply_queue[MAX_REPLY_QUEUES];
183 u8 nreply_queues; 180 u8 nreply_queues;
184 dma_addr_t reply_pool_dhandle;
185 u32 *blockFetchTable; 181 u32 *blockFetchTable;
186 u32 *ioaccel1_blockFetchTable; 182 u32 *ioaccel1_blockFetchTable;
187 u32 *ioaccel2_blockFetchTable; 183 u32 *ioaccel2_blockFetchTable;
@@ -196,7 +192,7 @@ struct ctlr_info {
196 u64 last_heartbeat_timestamp; 192 u64 last_heartbeat_timestamp;
197 u32 heartbeat_sample_interval; 193 u32 heartbeat_sample_interval;
198 atomic_t firmware_flash_in_progress; 194 atomic_t firmware_flash_in_progress;
199 u32 lockup_detected; 195 u32 *lockup_detected;
200 struct delayed_work monitor_ctlr_work; 196 struct delayed_work monitor_ctlr_work;
201 int remove_in_progress; 197 int remove_in_progress;
202 u32 fifo_recently_full; 198 u32 fifo_recently_full;
@@ -233,11 +229,9 @@ struct ctlr_info {
233#define CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE (1 << 31) 229#define CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE (1 << 31)
234 230
235#define RESCAN_REQUIRED_EVENT_BITS \ 231#define RESCAN_REQUIRED_EVENT_BITS \
236 (CTLR_STATE_CHANGE_EVENT | \ 232 (CTLR_ENCLOSURE_HOT_PLUG_EVENT | \
237 CTLR_ENCLOSURE_HOT_PLUG_EVENT | \
238 CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV | \ 233 CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV | \
239 CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV | \ 234 CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV | \
240 CTLR_STATE_CHANGE_EVENT_REDUNDANT_CNTRL | \
241 CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED | \ 235 CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED | \
242 CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE) 236 CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE)
243 spinlock_t offline_device_lock; 237 spinlock_t offline_device_lock;
@@ -346,22 +340,23 @@ struct offline_device_entry {
346static void SA5_submit_command(struct ctlr_info *h, 340static void SA5_submit_command(struct ctlr_info *h,
347 struct CommandList *c) 341 struct CommandList *c)
348{ 342{
349 dev_dbg(&h->pdev->dev, "Sending %x, tag = %x\n", c->busaddr,
350 c->Header.Tag.lower);
351 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); 343 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
352 (void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); 344 (void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
353} 345}
354 346
347static void SA5_submit_command_no_read(struct ctlr_info *h,
348 struct CommandList *c)
349{
350 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
351}
352
355static void SA5_submit_command_ioaccel2(struct ctlr_info *h, 353static void SA5_submit_command_ioaccel2(struct ctlr_info *h,
356 struct CommandList *c) 354 struct CommandList *c)
357{ 355{
358 dev_dbg(&h->pdev->dev, "Sending %x, tag = %x\n", c->busaddr,
359 c->Header.Tag.lower);
360 if (c->cmd_type == CMD_IOACCEL2) 356 if (c->cmd_type == CMD_IOACCEL2)
361 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32); 357 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
362 else 358 else
363 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); 359 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
364 (void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
365} 360}
366 361
367/* 362/*
@@ -399,7 +394,7 @@ static void SA5_performant_intr_mask(struct ctlr_info *h, unsigned long val)
399 394
400static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q) 395static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q)
401{ 396{
402 struct reply_pool *rq = &h->reply_queue[q]; 397 struct reply_queue_buffer *rq = &h->reply_queue[q];
403 unsigned long flags, register_value = FIFO_EMPTY; 398 unsigned long flags, register_value = FIFO_EMPTY;
404 399
405 /* msi auto clears the interrupt pending bit. */ 400 /* msi auto clears the interrupt pending bit. */
@@ -478,7 +473,6 @@ static bool SA5_intr_pending(struct ctlr_info *h)
478{ 473{
479 unsigned long register_value = 474 unsigned long register_value =
480 readl(h->vaddr + SA5_INTR_STATUS); 475 readl(h->vaddr + SA5_INTR_STATUS);
481 dev_dbg(&h->pdev->dev, "intr_pending %lx\n", register_value);
482 return register_value & SA5_INTR_PENDING; 476 return register_value & SA5_INTR_PENDING;
483} 477}
484 478
@@ -515,7 +509,7 @@ static bool SA5_ioaccel_mode1_intr_pending(struct ctlr_info *h)
515static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q) 509static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
516{ 510{
517 u64 register_value; 511 u64 register_value;
518 struct reply_pool *rq = &h->reply_queue[q]; 512 struct reply_queue_buffer *rq = &h->reply_queue[q];
519 unsigned long flags; 513 unsigned long flags;
520 514
521 BUG_ON(q >= h->nreply_queues); 515 BUG_ON(q >= h->nreply_queues);
@@ -573,6 +567,14 @@ static struct access_method SA5_performant_access = {
573 SA5_performant_completed, 567 SA5_performant_completed,
574}; 568};
575 569
570static struct access_method SA5_performant_access_no_read = {
571 SA5_submit_command_no_read,
572 SA5_performant_intr_mask,
573 SA5_fifo_full,
574 SA5_performant_intr_pending,
575 SA5_performant_completed,
576};
577
576struct board_type { 578struct board_type {
577 u32 board_id; 579 u32 board_id;
578 char *product_name; 580 char *product_name;
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index b5cc7052339f..b5125dc31439 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -151,7 +151,7 @@
151#define HPSA_VPD_HEADER_SZ 4 151#define HPSA_VPD_HEADER_SZ 4
152 152
153/* Logical volume states */ 153/* Logical volume states */
154#define HPSA_VPD_LV_STATUS_UNSUPPORTED -1 154#define HPSA_VPD_LV_STATUS_UNSUPPORTED 0xff
155#define HPSA_LV_OK 0x0 155#define HPSA_LV_OK 0x0
156#define HPSA_LV_UNDERGOING_ERASE 0x0F 156#define HPSA_LV_UNDERGOING_ERASE 0x0F
157#define HPSA_LV_UNDERGOING_RPI 0x12 157#define HPSA_LV_UNDERGOING_RPI 0x12
@@ -238,11 +238,21 @@ struct ReportLUNdata {
238 u8 LUN[HPSA_MAX_LUN][8]; 238 u8 LUN[HPSA_MAX_LUN][8];
239}; 239};
240 240
241struct ext_report_lun_entry {
242 u8 lunid[8];
243 u8 wwid[8];
244 u8 device_type;
245 u8 device_flags;
246 u8 lun_count; /* multi-lun device, how many luns */
247 u8 redundant_paths;
248 u32 ioaccel_handle; /* ioaccel1 only uses lower 16 bits */
249};
250
241struct ReportExtendedLUNdata { 251struct ReportExtendedLUNdata {
242 u8 LUNListLength[4]; 252 u8 LUNListLength[4];
243 u8 extended_response_flag; 253 u8 extended_response_flag;
244 u8 reserved[3]; 254 u8 reserved[3];
245 u8 LUN[HPSA_MAX_LUN][24]; 255 struct ext_report_lun_entry LUN[HPSA_MAX_LUN];
246}; 256};
247 257
248struct SenseSubsystem_info { 258struct SenseSubsystem_info {
@@ -375,6 +385,7 @@ struct ctlr_info; /* defined in hpsa.h */
375 * or a bus address. 385 * or a bus address.
376 */ 386 */
377 387
388#define COMMANDLIST_ALIGNMENT 128
378struct CommandList { 389struct CommandList {
379 struct CommandListHeader Header; 390 struct CommandListHeader Header;
380 struct RequestBlock Request; 391 struct RequestBlock Request;
@@ -389,21 +400,7 @@ struct CommandList {
389 struct list_head list; 400 struct list_head list;
390 struct completion *waiting; 401 struct completion *waiting;
391 void *scsi_cmd; 402 void *scsi_cmd;
392 403} __aligned(COMMANDLIST_ALIGNMENT);
393/* on 64 bit architectures, to get this to be 32-byte-aligned
394 * it so happens we need PAD_64 bytes of padding, on 32 bit systems,
395 * we need PAD_32 bytes of padding (see below). This does that.
396 * If it happens that 64 bit and 32 bit systems need different
397 * padding, PAD_32 and PAD_64 can be set independently, and.
398 * the code below will do the right thing.
399 */
400#define IS_32_BIT ((8 - sizeof(long))/4)
401#define IS_64_BIT (!IS_32_BIT)
402#define PAD_32 (40)
403#define PAD_64 (12)
404#define COMMANDLIST_PAD (IS_32_BIT * PAD_32 + IS_64_BIT * PAD_64)
405 u8 pad[COMMANDLIST_PAD];
406};
407 404
408/* Max S/G elements in I/O accelerator command */ 405/* Max S/G elements in I/O accelerator command */
409#define IOACCEL1_MAXSGENTRIES 24 406#define IOACCEL1_MAXSGENTRIES 24
@@ -413,6 +410,7 @@ struct CommandList {
413 * Structure for I/O accelerator (mode 1) commands. 410 * Structure for I/O accelerator (mode 1) commands.
414 * Note that this structure must be 128-byte aligned in size. 411 * Note that this structure must be 128-byte aligned in size.
415 */ 412 */
413#define IOACCEL1_COMMANDLIST_ALIGNMENT 128
416struct io_accel1_cmd { 414struct io_accel1_cmd {
417 u16 dev_handle; /* 0x00 - 0x01 */ 415 u16 dev_handle; /* 0x00 - 0x01 */
418 u8 reserved1; /* 0x02 */ 416 u8 reserved1; /* 0x02 */
@@ -440,12 +438,7 @@ struct io_accel1_cmd {
440 struct vals32 host_addr; /* 0x70 - 0x77 */ 438 struct vals32 host_addr; /* 0x70 - 0x77 */
441 u8 CISS_LUN[8]; /* 0x78 - 0x7F */ 439 u8 CISS_LUN[8]; /* 0x78 - 0x7F */
442 struct SGDescriptor SG[IOACCEL1_MAXSGENTRIES]; 440 struct SGDescriptor SG[IOACCEL1_MAXSGENTRIES];
443#define IOACCEL1_PAD_64 0 441} __aligned(IOACCEL1_COMMANDLIST_ALIGNMENT);
444#define IOACCEL1_PAD_32 0
445#define IOACCEL1_PAD (IS_32_BIT * IOACCEL1_PAD_32 + \
446 IS_64_BIT * IOACCEL1_PAD_64)
447 u8 pad[IOACCEL1_PAD];
448};
449 442
450#define IOACCEL1_FUNCTION_SCSIIO 0x00 443#define IOACCEL1_FUNCTION_SCSIIO 0x00
451#define IOACCEL1_SGLOFFSET 32 444#define IOACCEL1_SGLOFFSET 32
@@ -510,14 +503,11 @@ struct io_accel2_scsi_response {
510 u8 sense_data_buff[32]; /* sense/response data buffer */ 503 u8 sense_data_buff[32]; /* sense/response data buffer */
511}; 504};
512 505
513#define IOACCEL2_64_PAD 76
514#define IOACCEL2_32_PAD 76
515#define IOACCEL2_PAD (IS_32_BIT * IOACCEL2_32_PAD + \
516 IS_64_BIT * IOACCEL2_64_PAD)
517/* 506/*
518 * Structure for I/O accelerator (mode 2 or m2) commands. 507 * Structure for I/O accelerator (mode 2 or m2) commands.
519 * Note that this structure must be 128-byte aligned in size. 508 * Note that this structure must be 128-byte aligned in size.
520 */ 509 */
510#define IOACCEL2_COMMANDLIST_ALIGNMENT 128
521struct io_accel2_cmd { 511struct io_accel2_cmd {
522 u8 IU_type; /* IU Type */ 512 u8 IU_type; /* IU Type */
523 u8 direction; /* direction, memtype, and encryption */ 513 u8 direction; /* direction, memtype, and encryption */
@@ -544,8 +534,7 @@ struct io_accel2_cmd {
544 u32 tweak_upper; /* Encryption tweak, upper 4 bytes */ 534 u32 tweak_upper; /* Encryption tweak, upper 4 bytes */
545 struct ioaccel2_sg_element sg[IOACCEL2_MAXSGENTRIES]; 535 struct ioaccel2_sg_element sg[IOACCEL2_MAXSGENTRIES];
546 struct io_accel2_scsi_response error_data; 536 struct io_accel2_scsi_response error_data;
547 u8 pad[IOACCEL2_PAD]; 537} __aligned(IOACCEL2_COMMANDLIST_ALIGNMENT);
548};
549 538
550/* 539/*
551 * defines for Mode 2 command struct 540 * defines for Mode 2 command struct
@@ -636,7 +625,7 @@ struct TransTable_struct {
636 u32 RepQCount; 625 u32 RepQCount;
637 u32 RepQCtrAddrLow32; 626 u32 RepQCtrAddrLow32;
638 u32 RepQCtrAddrHigh32; 627 u32 RepQCtrAddrHigh32;
639#define MAX_REPLY_QUEUES 8 628#define MAX_REPLY_QUEUES 64
640 struct vals32 RepQAddr[MAX_REPLY_QUEUES]; 629 struct vals32 RepQAddr[MAX_REPLY_QUEUES];
641}; 630};
642 631
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 94a3cafe7197..434e9037908e 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2013 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2014 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -640,6 +640,7 @@ struct lpfc_hba {
640#define HBA_DEVLOSS_TMO 0x2000 /* HBA in devloss timeout */ 640#define HBA_DEVLOSS_TMO 0x2000 /* HBA in devloss timeout */
641#define HBA_RRQ_ACTIVE 0x4000 /* process the rrq active list */ 641#define HBA_RRQ_ACTIVE 0x4000 /* process the rrq active list */
642#define HBA_FCP_IOQ_FLUSH 0x8000 /* FCP I/O queues being flushed */ 642#define HBA_FCP_IOQ_FLUSH 0x8000 /* FCP I/O queues being flushed */
643#define HBA_FW_DUMP_OP 0x10000 /* Skips fn reset before FW dump */
643 uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/ 644 uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
644 struct lpfc_dmabuf slim2p; 645 struct lpfc_dmabuf slim2p;
645 646
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 8d5b6ceec9c9..1d7a5c34ee8c 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2013 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2014 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -919,10 +919,15 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
919 phba->cfg_sriov_nr_virtfn = 0; 919 phba->cfg_sriov_nr_virtfn = 0;
920 } 920 }
921 921
922 if (opcode == LPFC_FW_DUMP)
923 phba->hba_flag |= HBA_FW_DUMP_OP;
924
922 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); 925 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
923 926
924 if (status != 0) 927 if (status != 0) {
928 phba->hba_flag &= ~HBA_FW_DUMP_OP;
925 return status; 929 return status;
930 }
926 931
927 /* wait for the device to be quiesced before firmware reset */ 932 /* wait for the device to be quiesced before firmware reset */
928 msleep(100); 933 msleep(100);
@@ -2364,7 +2369,7 @@ lpfc_oas_tgt_store(struct device *dev, struct device_attribute *attr,
2364 uint8_t wwpn[WWN_SZ]; 2369 uint8_t wwpn[WWN_SZ];
2365 int rc; 2370 int rc;
2366 2371
2367 if (!phba->cfg_EnableXLane) 2372 if (!phba->cfg_fof)
2368 return -EPERM; 2373 return -EPERM;
2369 2374
2370 /* count may include a LF at end of string */ 2375 /* count may include a LF at end of string */
@@ -2432,7 +2437,7 @@ lpfc_oas_vpt_store(struct device *dev, struct device_attribute *attr,
2432 uint8_t wwpn[WWN_SZ]; 2437 uint8_t wwpn[WWN_SZ];
2433 int rc; 2438 int rc;
2434 2439
2435 if (!phba->cfg_EnableXLane) 2440 if (!phba->cfg_fof)
2436 return -EPERM; 2441 return -EPERM;
2437 2442
2438 /* count may include a LF at end of string */ 2443 /* count may include a LF at end of string */
@@ -2499,7 +2504,7 @@ lpfc_oas_lun_state_store(struct device *dev, struct device_attribute *attr,
2499 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 2504 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
2500 int val = 0; 2505 int val = 0;
2501 2506
2502 if (!phba->cfg_EnableXLane) 2507 if (!phba->cfg_fof)
2503 return -EPERM; 2508 return -EPERM;
2504 2509
2505 if (!isdigit(buf[0])) 2510 if (!isdigit(buf[0]))
@@ -2565,7 +2570,7 @@ lpfc_oas_lun_state_set(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
2565 2570
2566 int rc = 0; 2571 int rc = 0;
2567 2572
2568 if (!phba->cfg_EnableXLane) 2573 if (!phba->cfg_fof)
2569 return -EPERM; 2574 return -EPERM;
2570 2575
2571 if (oas_state) { 2576 if (oas_state) {
@@ -2670,7 +2675,7 @@ lpfc_oas_lun_show(struct device *dev, struct device_attribute *attr,
2670 uint64_t oas_lun; 2675 uint64_t oas_lun;
2671 int len = 0; 2676 int len = 0;
2672 2677
2673 if (!phba->cfg_EnableXLane) 2678 if (!phba->cfg_fof)
2674 return -EPERM; 2679 return -EPERM;
2675 2680
2676 if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0) 2681 if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0)
@@ -2716,7 +2721,7 @@ lpfc_oas_lun_store(struct device *dev, struct device_attribute *attr,
2716 uint64_t scsi_lun; 2721 uint64_t scsi_lun;
2717 ssize_t rc; 2722 ssize_t rc;
2718 2723
2719 if (!phba->cfg_EnableXLane) 2724 if (!phba->cfg_fof)
2720 return -EPERM; 2725 return -EPERM;
2721 2726
2722 if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0) 2727 if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0)
@@ -4655,7 +4660,7 @@ LPFC_ATTR_R(EnableXLane, 0, 0, 1, "Enable Express Lane Feature.");
4655# 0x0 - 0x7f = CS_CTL field in FC header (high 7 bits) 4660# 0x0 - 0x7f = CS_CTL field in FC header (high 7 bits)
4656# Value range is [0x0,0x7f]. Default value is 0 4661# Value range is [0x0,0x7f]. Default value is 0
4657*/ 4662*/
4658LPFC_ATTR_R(XLanePriority, 0, 0x0, 0x7f, "CS_CTL for Express Lane Feature."); 4663LPFC_ATTR_RW(XLanePriority, 0, 0x0, 0x7f, "CS_CTL for Express Lane Feature.");
4659 4664
4660/* 4665/*
4661# lpfc_enable_bg: Enable BlockGuard (Emulex's Implementation of T10-DIF) 4666# lpfc_enable_bg: Enable BlockGuard (Emulex's Implementation of T10-DIF)
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index ca2f4ea7cdef..5b5c825d9576 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2009-2013 Emulex. All rights reserved. * 4 * Copyright (C) 2009-2014 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index a94d4c9dfaa5..928ef609f363 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2010-2012 Emulex. All rights reserved. * 4 * Copyright (C) 2010-2014 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index adda0bf7a244..db5604f01a1a 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2013 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2014 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -289,6 +289,7 @@ int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t,
289void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t); 289void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t);
290void lpfc_sli_bemem_bcopy(void *, void *, uint32_t); 290void lpfc_sli_bemem_bcopy(void *, void *, uint32_t);
291void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *); 291void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *);
292void lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba);
292void lpfc_sli_hba_iocb_abort(struct lpfc_hba *); 293void lpfc_sli_hba_iocb_abort(struct lpfc_hba *);
293void lpfc_sli_flush_fcp_rings(struct lpfc_hba *); 294void lpfc_sli_flush_fcp_rings(struct lpfc_hba *);
294int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *, 295int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *,
@@ -310,6 +311,9 @@ int lpfc_sli_issue_abort_iotag(struct lpfc_hba *, struct lpfc_sli_ring *,
310int lpfc_sli_sum_iocb(struct lpfc_vport *, uint16_t, uint64_t, lpfc_ctx_cmd); 311int lpfc_sli_sum_iocb(struct lpfc_vport *, uint16_t, uint64_t, lpfc_ctx_cmd);
311int lpfc_sli_abort_iocb(struct lpfc_vport *, struct lpfc_sli_ring *, uint16_t, 312int lpfc_sli_abort_iocb(struct lpfc_vport *, struct lpfc_sli_ring *, uint16_t,
312 uint64_t, lpfc_ctx_cmd); 313 uint64_t, lpfc_ctx_cmd);
314int
315lpfc_sli_abort_taskmgmt(struct lpfc_vport *, struct lpfc_sli_ring *,
316 uint16_t, uint64_t, lpfc_ctx_cmd);
313 317
314void lpfc_mbox_timeout(unsigned long); 318void lpfc_mbox_timeout(unsigned long);
315void lpfc_mbox_timeout_handler(struct lpfc_hba *); 319void lpfc_mbox_timeout_handler(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 828c08e9389e..b0aedce3f54b 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2007-2012 Emulex. All rights reserved. * 4 * Copyright (C) 2007-2014 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -2314,7 +2314,7 @@ proc_cq:
2314 goto too_big; 2314 goto too_big;
2315 } 2315 }
2316 2316
2317 if (phba->cfg_EnableXLane) { 2317 if (phba->cfg_fof) {
2318 2318
2319 /* OAS CQ */ 2319 /* OAS CQ */
2320 qp = phba->sli4_hba.oas_cq; 2320 qp = phba->sli4_hba.oas_cq;
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 624fe0b3cc0b..7a5d81a65be8 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2013 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2014 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 294c072e9083..2a17e31265b8 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2013 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2014 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -5634,6 +5634,9 @@ lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
5634 ndlp->active_rrqs_xri_bitmap = 5634 ndlp->active_rrqs_xri_bitmap =
5635 mempool_alloc(vport->phba->active_rrq_pool, 5635 mempool_alloc(vport->phba->active_rrq_pool,
5636 GFP_KERNEL); 5636 GFP_KERNEL);
5637 if (ndlp->active_rrqs_xri_bitmap)
5638 memset(ndlp->active_rrqs_xri_bitmap, 0,
5639 ndlp->phba->cfg_rrq_xri_bitmap_sz);
5637 } 5640 }
5638 5641
5639 5642
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 3d9438ce59ab..236259252379 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2013 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2014 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index fd79f7de7666..f432ec180cf8 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2009-2013 Emulex. All rights reserved. * 4 * Copyright (C) 2009-2014 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 635eeb3d6987..06f9a5b79e66 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2013 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2014 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -820,57 +820,153 @@ lpfc_hba_down_prep(struct lpfc_hba *phba)
820} 820}
821 821
822/** 822/**
823 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset 823 * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free
824 * rspiocb which got deferred
825 *
824 * @phba: pointer to lpfc HBA data structure. 826 * @phba: pointer to lpfc HBA data structure.
825 * 827 *
826 * This routine will do uninitialization after the HBA is reset when bring 828 * This routine will cleanup completed slow path events after HBA is reset
827 * down the SLI Layer. 829 * when bringing down the SLI Layer.
830 *
828 * 831 *
829 * Return codes 832 * Return codes
830 * 0 - success. 833 * void.
831 * Any other value - error.
832 **/ 834 **/
833static int 835static void
834lpfc_hba_down_post_s3(struct lpfc_hba *phba) 836lpfc_sli4_free_sp_events(struct lpfc_hba *phba)
837{
838 struct lpfc_iocbq *rspiocbq;
839 struct hbq_dmabuf *dmabuf;
840 struct lpfc_cq_event *cq_event;
841
842 spin_lock_irq(&phba->hbalock);
843 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
844 spin_unlock_irq(&phba->hbalock);
845
846 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
847 /* Get the response iocb from the head of work queue */
848 spin_lock_irq(&phba->hbalock);
849 list_remove_head(&phba->sli4_hba.sp_queue_event,
850 cq_event, struct lpfc_cq_event, list);
851 spin_unlock_irq(&phba->hbalock);
852
853 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
854 case CQE_CODE_COMPL_WQE:
855 rspiocbq = container_of(cq_event, struct lpfc_iocbq,
856 cq_event);
857 lpfc_sli_release_iocbq(phba, rspiocbq);
858 break;
859 case CQE_CODE_RECEIVE:
860 case CQE_CODE_RECEIVE_V1:
861 dmabuf = container_of(cq_event, struct hbq_dmabuf,
862 cq_event);
863 lpfc_in_buf_free(phba, &dmabuf->dbuf);
864 }
865 }
866}
867
868/**
869 * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset
870 * @phba: pointer to lpfc HBA data structure.
871 *
872 * This routine will cleanup posted ELS buffers after the HBA is reset
873 * when bringing down the SLI Layer.
874 *
875 *
876 * Return codes
877 * void.
878 **/
879static void
880lpfc_hba_free_post_buf(struct lpfc_hba *phba)
835{ 881{
836 struct lpfc_sli *psli = &phba->sli; 882 struct lpfc_sli *psli = &phba->sli;
837 struct lpfc_sli_ring *pring; 883 struct lpfc_sli_ring *pring;
838 struct lpfc_dmabuf *mp, *next_mp; 884 struct lpfc_dmabuf *mp, *next_mp;
839 LIST_HEAD(completions); 885 LIST_HEAD(buflist);
840 int i; 886 int count;
841 887
842 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 888 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
843 lpfc_sli_hbqbuf_free_all(phba); 889 lpfc_sli_hbqbuf_free_all(phba);
844 else { 890 else {
845 /* Cleanup preposted buffers on the ELS ring */ 891 /* Cleanup preposted buffers on the ELS ring */
846 pring = &psli->ring[LPFC_ELS_RING]; 892 pring = &psli->ring[LPFC_ELS_RING];
847 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 893 spin_lock_irq(&phba->hbalock);
894 list_splice_init(&pring->postbufq, &buflist);
895 spin_unlock_irq(&phba->hbalock);
896
897 count = 0;
898 list_for_each_entry_safe(mp, next_mp, &buflist, list) {
848 list_del(&mp->list); 899 list_del(&mp->list);
849 pring->postbufq_cnt--; 900 count++;
850 lpfc_mbuf_free(phba, mp->virt, mp->phys); 901 lpfc_mbuf_free(phba, mp->virt, mp->phys);
851 kfree(mp); 902 kfree(mp);
852 } 903 }
904
905 spin_lock_irq(&phba->hbalock);
906 pring->postbufq_cnt -= count;
907 spin_unlock_irq(&phba->hbalock);
853 } 908 }
909}
910
911/**
912 * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset
913 * @phba: pointer to lpfc HBA data structure.
914 *
915 * This routine will cleanup the txcmplq after the HBA is reset when bringing
916 * down the SLI Layer.
917 *
918 * Return codes
919 * void
920 **/
921static void
922lpfc_hba_clean_txcmplq(struct lpfc_hba *phba)
923{
924 struct lpfc_sli *psli = &phba->sli;
925 struct lpfc_sli_ring *pring;
926 LIST_HEAD(completions);
927 int i;
854 928
855 spin_lock_irq(&phba->hbalock);
856 for (i = 0; i < psli->num_rings; i++) { 929 for (i = 0; i < psli->num_rings; i++) {
857 pring = &psli->ring[i]; 930 pring = &psli->ring[i];
858 931 if (phba->sli_rev >= LPFC_SLI_REV4)
932 spin_lock_irq(&pring->ring_lock);
933 else
934 spin_lock_irq(&phba->hbalock);
859 /* At this point in time the HBA is either reset or DOA. Either 935 /* At this point in time the HBA is either reset or DOA. Either
860 * way, nothing should be on txcmplq as it will NEVER complete. 936 * way, nothing should be on txcmplq as it will NEVER complete.
861 */ 937 */
862 list_splice_init(&pring->txcmplq, &completions); 938 list_splice_init(&pring->txcmplq, &completions);
863 spin_unlock_irq(&phba->hbalock); 939 pring->txcmplq_cnt = 0;
940
941 if (phba->sli_rev >= LPFC_SLI_REV4)
942 spin_unlock_irq(&pring->ring_lock);
943 else
944 spin_unlock_irq(&phba->hbalock);
864 945
865 /* Cancel all the IOCBs from the completions list */ 946 /* Cancel all the IOCBs from the completions list */
866 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 947 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
867 IOERR_SLI_ABORTED); 948 IOERR_SLI_ABORTED);
868
869 lpfc_sli_abort_iocb_ring(phba, pring); 949 lpfc_sli_abort_iocb_ring(phba, pring);
870 spin_lock_irq(&phba->hbalock);
871 } 950 }
872 spin_unlock_irq(&phba->hbalock); 951}
873 952
953/**
954 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
955 int i;
956 * @phba: pointer to lpfc HBA data structure.
957 *
958 * This routine will do uninitialization after the HBA is reset when bring
959 * down the SLI Layer.
960 *
961 * Return codes
962 * 0 - success.
963 * Any other value - error.
964 **/
965static int
966lpfc_hba_down_post_s3(struct lpfc_hba *phba)
967{
968 lpfc_hba_free_post_buf(phba);
969 lpfc_hba_clean_txcmplq(phba);
874 return 0; 970 return 0;
875} 971}
876 972
@@ -890,13 +986,12 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
890{ 986{
891 struct lpfc_scsi_buf *psb, *psb_next; 987 struct lpfc_scsi_buf *psb, *psb_next;
892 LIST_HEAD(aborts); 988 LIST_HEAD(aborts);
893 int ret;
894 unsigned long iflag = 0; 989 unsigned long iflag = 0;
895 struct lpfc_sglq *sglq_entry = NULL; 990 struct lpfc_sglq *sglq_entry = NULL;
896 991
897 ret = lpfc_hba_down_post_s3(phba); 992 lpfc_hba_free_post_buf(phba);
898 if (ret) 993 lpfc_hba_clean_txcmplq(phba);
899 return ret; 994
900 /* At this point in time the HBA is either reset or DOA. Either 995 /* At this point in time the HBA is either reset or DOA. Either
901 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be 996 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
902 * on the lpfc_sgl_list so that it can either be freed if the 997 * on the lpfc_sgl_list so that it can either be freed if the
@@ -932,6 +1027,8 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
932 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag); 1027 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
933 list_splice(&aborts, &phba->lpfc_scsi_buf_list_put); 1028 list_splice(&aborts, &phba->lpfc_scsi_buf_list_put);
934 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag); 1029 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
1030
1031 lpfc_sli4_free_sp_events(phba);
935 return 0; 1032 return 0;
936} 1033}
937 1034
@@ -1250,7 +1347,6 @@ static void
1250lpfc_handle_deferred_eratt(struct lpfc_hba *phba) 1347lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1251{ 1348{
1252 uint32_t old_host_status = phba->work_hs; 1349 uint32_t old_host_status = phba->work_hs;
1253 struct lpfc_sli_ring *pring;
1254 struct lpfc_sli *psli = &phba->sli; 1350 struct lpfc_sli *psli = &phba->sli;
1255 1351
1256 /* If the pci channel is offline, ignore possible errors, 1352 /* If the pci channel is offline, ignore possible errors,
@@ -1279,8 +1375,7 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1279 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the 1375 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
1280 * SCSI layer retry it after re-establishing link. 1376 * SCSI layer retry it after re-establishing link.
1281 */ 1377 */
1282 pring = &psli->ring[psli->fcp_ring]; 1378 lpfc_sli_abort_fcp_rings(phba);
1283 lpfc_sli_abort_iocb_ring(phba, pring);
1284 1379
1285 /* 1380 /*
1286 * There was a firmware error. Take the hba offline and then 1381 * There was a firmware error. Take the hba offline and then
@@ -1348,7 +1443,6 @@ lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1348{ 1443{
1349 struct lpfc_vport *vport = phba->pport; 1444 struct lpfc_vport *vport = phba->pport;
1350 struct lpfc_sli *psli = &phba->sli; 1445 struct lpfc_sli *psli = &phba->sli;
1351 struct lpfc_sli_ring *pring;
1352 uint32_t event_data; 1446 uint32_t event_data;
1353 unsigned long temperature; 1447 unsigned long temperature;
1354 struct temp_event temp_event_data; 1448 struct temp_event temp_event_data;
@@ -1400,8 +1494,7 @@ lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1400 * Error iocb (I/O) on txcmplq and let the SCSI layer 1494 * Error iocb (I/O) on txcmplq and let the SCSI layer
1401 * retry it after re-establishing link. 1495 * retry it after re-establishing link.
1402 */ 1496 */
1403 pring = &psli->ring[psli->fcp_ring]; 1497 lpfc_sli_abort_fcp_rings(phba);
1404 lpfc_sli_abort_iocb_ring(phba, pring);
1405 1498
1406 /* 1499 /*
1407 * There was a firmware error. Take the hba offline and then 1500 * There was a firmware error. Take the hba offline and then
@@ -1940,78 +2033,81 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1940 2033
1941 switch (dev_id) { 2034 switch (dev_id) {
1942 case PCI_DEVICE_ID_FIREFLY: 2035 case PCI_DEVICE_ID_FIREFLY:
1943 m = (typeof(m)){"LP6000", "PCI", "Fibre Channel Adapter"}; 2036 m = (typeof(m)){"LP6000", "PCI",
2037 "Obsolete, Unsupported Fibre Channel Adapter"};
1944 break; 2038 break;
1945 case PCI_DEVICE_ID_SUPERFLY: 2039 case PCI_DEVICE_ID_SUPERFLY:
1946 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) 2040 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
1947 m = (typeof(m)){"LP7000", "PCI", 2041 m = (typeof(m)){"LP7000", "PCI", ""};
1948 "Fibre Channel Adapter"};
1949 else 2042 else
1950 m = (typeof(m)){"LP7000E", "PCI", 2043 m = (typeof(m)){"LP7000E", "PCI", ""};
1951 "Fibre Channel Adapter"}; 2044 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
1952 break; 2045 break;
1953 case PCI_DEVICE_ID_DRAGONFLY: 2046 case PCI_DEVICE_ID_DRAGONFLY:
1954 m = (typeof(m)){"LP8000", "PCI", 2047 m = (typeof(m)){"LP8000", "PCI",
1955 "Fibre Channel Adapter"}; 2048 "Obsolete, Unsupported Fibre Channel Adapter"};
1956 break; 2049 break;
1957 case PCI_DEVICE_ID_CENTAUR: 2050 case PCI_DEVICE_ID_CENTAUR:
1958 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) 2051 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
1959 m = (typeof(m)){"LP9002", "PCI", 2052 m = (typeof(m)){"LP9002", "PCI", ""};
1960 "Fibre Channel Adapter"};
1961 else 2053 else
1962 m = (typeof(m)){"LP9000", "PCI", 2054 m = (typeof(m)){"LP9000", "PCI", ""};
1963 "Fibre Channel Adapter"}; 2055 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
1964 break; 2056 break;
1965 case PCI_DEVICE_ID_RFLY: 2057 case PCI_DEVICE_ID_RFLY:
1966 m = (typeof(m)){"LP952", "PCI", 2058 m = (typeof(m)){"LP952", "PCI",
1967 "Fibre Channel Adapter"}; 2059 "Obsolete, Unsupported Fibre Channel Adapter"};
1968 break; 2060 break;
1969 case PCI_DEVICE_ID_PEGASUS: 2061 case PCI_DEVICE_ID_PEGASUS:
1970 m = (typeof(m)){"LP9802", "PCI-X", 2062 m = (typeof(m)){"LP9802", "PCI-X",
1971 "Fibre Channel Adapter"}; 2063 "Obsolete, Unsupported Fibre Channel Adapter"};
1972 break; 2064 break;
1973 case PCI_DEVICE_ID_THOR: 2065 case PCI_DEVICE_ID_THOR:
1974 m = (typeof(m)){"LP10000", "PCI-X", 2066 m = (typeof(m)){"LP10000", "PCI-X",
1975 "Fibre Channel Adapter"}; 2067 "Obsolete, Unsupported Fibre Channel Adapter"};
1976 break; 2068 break;
1977 case PCI_DEVICE_ID_VIPER: 2069 case PCI_DEVICE_ID_VIPER:
1978 m = (typeof(m)){"LPX1000", "PCI-X", 2070 m = (typeof(m)){"LPX1000", "PCI-X",
1979 "Fibre Channel Adapter"}; 2071 "Obsolete, Unsupported Fibre Channel Adapter"};
1980 break; 2072 break;
1981 case PCI_DEVICE_ID_PFLY: 2073 case PCI_DEVICE_ID_PFLY:
1982 m = (typeof(m)){"LP982", "PCI-X", 2074 m = (typeof(m)){"LP982", "PCI-X",
1983 "Fibre Channel Adapter"}; 2075 "Obsolete, Unsupported Fibre Channel Adapter"};
1984 break; 2076 break;
1985 case PCI_DEVICE_ID_TFLY: 2077 case PCI_DEVICE_ID_TFLY:
1986 m = (typeof(m)){"LP1050", "PCI-X", 2078 m = (typeof(m)){"LP1050", "PCI-X",
1987 "Fibre Channel Adapter"}; 2079 "Obsolete, Unsupported Fibre Channel Adapter"};
1988 break; 2080 break;
1989 case PCI_DEVICE_ID_HELIOS: 2081 case PCI_DEVICE_ID_HELIOS:
1990 m = (typeof(m)){"LP11000", "PCI-X2", 2082 m = (typeof(m)){"LP11000", "PCI-X2",
1991 "Fibre Channel Adapter"}; 2083 "Obsolete, Unsupported Fibre Channel Adapter"};
1992 break; 2084 break;
1993 case PCI_DEVICE_ID_HELIOS_SCSP: 2085 case PCI_DEVICE_ID_HELIOS_SCSP:
1994 m = (typeof(m)){"LP11000-SP", "PCI-X2", 2086 m = (typeof(m)){"LP11000-SP", "PCI-X2",
1995 "Fibre Channel Adapter"}; 2087 "Obsolete, Unsupported Fibre Channel Adapter"};
1996 break; 2088 break;
1997 case PCI_DEVICE_ID_HELIOS_DCSP: 2089 case PCI_DEVICE_ID_HELIOS_DCSP:
1998 m = (typeof(m)){"LP11002-SP", "PCI-X2", 2090 m = (typeof(m)){"LP11002-SP", "PCI-X2",
1999 "Fibre Channel Adapter"}; 2091 "Obsolete, Unsupported Fibre Channel Adapter"};
2000 break; 2092 break;
2001 case PCI_DEVICE_ID_NEPTUNE: 2093 case PCI_DEVICE_ID_NEPTUNE:
2002 m = (typeof(m)){"LPe1000", "PCIe", "Fibre Channel Adapter"}; 2094 m = (typeof(m)){"LPe1000", "PCIe",
2095 "Obsolete, Unsupported Fibre Channel Adapter"};
2003 break; 2096 break;
2004 case PCI_DEVICE_ID_NEPTUNE_SCSP: 2097 case PCI_DEVICE_ID_NEPTUNE_SCSP:
2005 m = (typeof(m)){"LPe1000-SP", "PCIe", "Fibre Channel Adapter"}; 2098 m = (typeof(m)){"LPe1000-SP", "PCIe",
2099 "Obsolete, Unsupported Fibre Channel Adapter"};
2006 break; 2100 break;
2007 case PCI_DEVICE_ID_NEPTUNE_DCSP: 2101 case PCI_DEVICE_ID_NEPTUNE_DCSP:
2008 m = (typeof(m)){"LPe1002-SP", "PCIe", "Fibre Channel Adapter"}; 2102 m = (typeof(m)){"LPe1002-SP", "PCIe",
2103 "Obsolete, Unsupported Fibre Channel Adapter"};
2009 break; 2104 break;
2010 case PCI_DEVICE_ID_BMID: 2105 case PCI_DEVICE_ID_BMID:
2011 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"}; 2106 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
2012 break; 2107 break;
2013 case PCI_DEVICE_ID_BSMB: 2108 case PCI_DEVICE_ID_BSMB:
2014 m = (typeof(m)){"LP111", "PCI-X2", "Fibre Channel Adapter"}; 2109 m = (typeof(m)){"LP111", "PCI-X2",
2110 "Obsolete, Unsupported Fibre Channel Adapter"};
2015 break; 2111 break;
2016 case PCI_DEVICE_ID_ZEPHYR: 2112 case PCI_DEVICE_ID_ZEPHYR:
2017 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 2113 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
@@ -2030,16 +2126,20 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
2030 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"}; 2126 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
2031 break; 2127 break;
2032 case PCI_DEVICE_ID_LP101: 2128 case PCI_DEVICE_ID_LP101:
2033 m = (typeof(m)){"LP101", "PCI-X", "Fibre Channel Adapter"}; 2129 m = (typeof(m)){"LP101", "PCI-X",
2130 "Obsolete, Unsupported Fibre Channel Adapter"};
2034 break; 2131 break;
2035 case PCI_DEVICE_ID_LP10000S: 2132 case PCI_DEVICE_ID_LP10000S:
2036 m = (typeof(m)){"LP10000-S", "PCI", "Fibre Channel Adapter"}; 2133 m = (typeof(m)){"LP10000-S", "PCI",
2134 "Obsolete, Unsupported Fibre Channel Adapter"};
2037 break; 2135 break;
2038 case PCI_DEVICE_ID_LP11000S: 2136 case PCI_DEVICE_ID_LP11000S:
2039 m = (typeof(m)){"LP11000-S", "PCI-X2", "Fibre Channel Adapter"}; 2137 m = (typeof(m)){"LP11000-S", "PCI-X2",
2138 "Obsolete, Unsupported Fibre Channel Adapter"};
2040 break; 2139 break;
2041 case PCI_DEVICE_ID_LPE11000S: 2140 case PCI_DEVICE_ID_LPE11000S:
2042 m = (typeof(m)){"LPe11000-S", "PCIe", "Fibre Channel Adapter"}; 2141 m = (typeof(m)){"LPe11000-S", "PCIe",
2142 "Obsolete, Unsupported Fibre Channel Adapter"};
2043 break; 2143 break;
2044 case PCI_DEVICE_ID_SAT: 2144 case PCI_DEVICE_ID_SAT:
2045 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"}; 2145 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
@@ -2060,20 +2160,21 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
2060 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"}; 2160 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
2061 break; 2161 break;
2062 case PCI_DEVICE_ID_HORNET: 2162 case PCI_DEVICE_ID_HORNET:
2063 m = (typeof(m)){"LP21000", "PCIe", "FCoE Adapter"}; 2163 m = (typeof(m)){"LP21000", "PCIe",
2164 "Obsolete, Unsupported FCoE Adapter"};
2064 GE = 1; 2165 GE = 1;
2065 break; 2166 break;
2066 case PCI_DEVICE_ID_PROTEUS_VF: 2167 case PCI_DEVICE_ID_PROTEUS_VF:
2067 m = (typeof(m)){"LPev12000", "PCIe IOV", 2168 m = (typeof(m)){"LPev12000", "PCIe IOV",
2068 "Fibre Channel Adapter"}; 2169 "Obsolete, Unsupported Fibre Channel Adapter"};
2069 break; 2170 break;
2070 case PCI_DEVICE_ID_PROTEUS_PF: 2171 case PCI_DEVICE_ID_PROTEUS_PF:
2071 m = (typeof(m)){"LPev12000", "PCIe IOV", 2172 m = (typeof(m)){"LPev12000", "PCIe IOV",
2072 "Fibre Channel Adapter"}; 2173 "Obsolete, Unsupported Fibre Channel Adapter"};
2073 break; 2174 break;
2074 case PCI_DEVICE_ID_PROTEUS_S: 2175 case PCI_DEVICE_ID_PROTEUS_S:
2075 m = (typeof(m)){"LPemv12002-S", "PCIe IOV", 2176 m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
2076 "Fibre Channel Adapter"}; 2177 "Obsolete, Unsupported Fibre Channel Adapter"};
2077 break; 2178 break;
2078 case PCI_DEVICE_ID_TIGERSHARK: 2179 case PCI_DEVICE_ID_TIGERSHARK:
2079 oneConnect = 1; 2180 oneConnect = 1;
@@ -2089,17 +2190,24 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
2089 break; 2190 break;
2090 case PCI_DEVICE_ID_BALIUS: 2191 case PCI_DEVICE_ID_BALIUS:
2091 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O", 2192 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
2092 "Fibre Channel Adapter"}; 2193 "Obsolete, Unsupported Fibre Channel Adapter"};
2093 break; 2194 break;
2094 case PCI_DEVICE_ID_LANCER_FC: 2195 case PCI_DEVICE_ID_LANCER_FC:
2095 case PCI_DEVICE_ID_LANCER_FC_VF:
2096 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"}; 2196 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
2097 break; 2197 break;
2198 case PCI_DEVICE_ID_LANCER_FC_VF:
2199 m = (typeof(m)){"LPe16000", "PCIe",
2200 "Obsolete, Unsupported Fibre Channel Adapter"};
2201 break;
2098 case PCI_DEVICE_ID_LANCER_FCOE: 2202 case PCI_DEVICE_ID_LANCER_FCOE:
2099 case PCI_DEVICE_ID_LANCER_FCOE_VF:
2100 oneConnect = 1; 2203 oneConnect = 1;
2101 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"}; 2204 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
2102 break; 2205 break;
2206 case PCI_DEVICE_ID_LANCER_FCOE_VF:
2207 oneConnect = 1;
2208 m = (typeof(m)){"OCe15100", "PCIe",
2209 "Obsolete, Unsupported FCoE"};
2210 break;
2103 case PCI_DEVICE_ID_SKYHAWK: 2211 case PCI_DEVICE_ID_SKYHAWK:
2104 case PCI_DEVICE_ID_SKYHAWK_VF: 2212 case PCI_DEVICE_ID_SKYHAWK_VF:
2105 oneConnect = 1; 2213 oneConnect = 1;
@@ -4614,7 +4722,10 @@ lpfc_reset_hba(struct lpfc_hba *phba)
4614 phba->link_state = LPFC_HBA_ERROR; 4722 phba->link_state = LPFC_HBA_ERROR;
4615 return; 4723 return;
4616 } 4724 }
4617 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 4725 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
4726 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
4727 else
4728 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
4618 lpfc_offline(phba); 4729 lpfc_offline(phba);
4619 lpfc_sli_brdrestart(phba); 4730 lpfc_sli_brdrestart(phba);
4620 lpfc_online(phba); 4731 lpfc_online(phba);
@@ -9663,9 +9774,6 @@ lpfc_pci_resume_one_s3(struct pci_dev *pdev)
9663static void 9774static void
9664lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba) 9775lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
9665{ 9776{
9666 struct lpfc_sli *psli = &phba->sli;
9667 struct lpfc_sli_ring *pring;
9668
9669 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9777 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9670 "2723 PCI channel I/O abort preparing for recovery\n"); 9778 "2723 PCI channel I/O abort preparing for recovery\n");
9671 9779
@@ -9673,8 +9781,7 @@ lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
9673 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 9781 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
9674 * and let the SCSI mid-layer to retry them to recover. 9782 * and let the SCSI mid-layer to retry them to recover.
9675 */ 9783 */
9676 pring = &psli->ring[psli->fcp_ring]; 9784 lpfc_sli_abort_fcp_rings(phba);
9677 lpfc_sli_abort_iocb_ring(phba, pring);
9678} 9785}
9679 9786
9680/** 9787/**
@@ -10417,17 +10524,13 @@ lpfc_pci_resume_one_s4(struct pci_dev *pdev)
10417static void 10524static void
10418lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba) 10525lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
10419{ 10526{
10420 struct lpfc_sli *psli = &phba->sli;
10421 struct lpfc_sli_ring *pring;
10422
10423 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10527 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10424 "2828 PCI channel I/O abort preparing for recovery\n"); 10528 "2828 PCI channel I/O abort preparing for recovery\n");
10425 /* 10529 /*
10426 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 10530 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
10427 * and let the SCSI mid-layer to retry them to recover. 10531 * and let the SCSI mid-layer to retry them to recover.
10428 */ 10532 */
10429 pring = &psli->ring[psli->fcp_ring]; 10533 lpfc_sli_abort_fcp_rings(phba);
10430 lpfc_sli_abort_iocb_ring(phba, pring);
10431} 10534}
10432 10535
10433/** 10536/**
@@ -10898,7 +11001,7 @@ lpfc_sli4_oas_verify(struct lpfc_hba *phba)
10898 if (phba->sli4_hba.pc_sli4_params.oas_supported) { 11001 if (phba->sli4_hba.pc_sli4_params.oas_supported) {
10899 phba->cfg_fof = 1; 11002 phba->cfg_fof = 1;
10900 } else { 11003 } else {
10901 phba->cfg_EnableXLane = 0; 11004 phba->cfg_fof = 0;
10902 if (phba->device_data_mem_pool) 11005 if (phba->device_data_mem_pool)
10903 mempool_destroy(phba->device_data_mem_pool); 11006 mempool_destroy(phba->device_data_mem_pool);
10904 phba->device_data_mem_pool = NULL; 11007 phba->device_data_mem_pool = NULL;
@@ -10928,7 +11031,7 @@ lpfc_fof_queue_setup(struct lpfc_hba *phba)
10928 if (rc) 11031 if (rc)
10929 return -ENOMEM; 11032 return -ENOMEM;
10930 11033
10931 if (phba->cfg_EnableXLane) { 11034 if (phba->cfg_fof) {
10932 11035
10933 rc = lpfc_cq_create(phba, phba->sli4_hba.oas_cq, 11036 rc = lpfc_cq_create(phba, phba->sli4_hba.oas_cq,
10934 phba->sli4_hba.fof_eq, LPFC_WCQ, LPFC_FCP); 11037 phba->sli4_hba.fof_eq, LPFC_WCQ, LPFC_FCP);
@@ -10947,8 +11050,7 @@ lpfc_fof_queue_setup(struct lpfc_hba *phba)
10947 return 0; 11050 return 0;
10948 11051
10949out_oas_wq: 11052out_oas_wq:
10950 if (phba->cfg_EnableXLane) 11053 lpfc_cq_destroy(phba, phba->sli4_hba.oas_cq);
10951 lpfc_cq_destroy(phba, phba->sli4_hba.oas_cq);
10952out_oas_cq: 11054out_oas_cq:
10953 lpfc_eq_destroy(phba, phba->sli4_hba.fof_eq); 11055 lpfc_eq_destroy(phba, phba->sli4_hba.fof_eq);
10954 return rc; 11056 return rc;
@@ -10982,7 +11084,7 @@ lpfc_fof_queue_create(struct lpfc_hba *phba)
10982 11084
10983 phba->sli4_hba.fof_eq = qdesc; 11085 phba->sli4_hba.fof_eq = qdesc;
10984 11086
10985 if (phba->cfg_EnableXLane) { 11087 if (phba->cfg_fof) {
10986 11088
10987 /* Create OAS CQ */ 11089 /* Create OAS CQ */
10988 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 11090 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index ed419aad2b1f..3fa65338d3f5 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2012 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2014 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 462453ee0bda..2df11daad85b 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2013 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2014 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -73,7 +73,7 @@ lpfc_rport_data_from_scsi_device(struct scsi_device *sdev)
73{ 73{
74 struct lpfc_vport *vport = (struct lpfc_vport *)sdev->host->hostdata; 74 struct lpfc_vport *vport = (struct lpfc_vport *)sdev->host->hostdata;
75 75
76 if (vport->phba->cfg_EnableXLane) 76 if (vport->phba->cfg_fof)
77 return ((struct lpfc_device_data *)sdev->hostdata)->rport_data; 77 return ((struct lpfc_device_data *)sdev->hostdata)->rport_data;
78 else 78 else
79 return (struct lpfc_rport_data *)sdev->hostdata; 79 return (struct lpfc_rport_data *)sdev->hostdata;
@@ -3462,7 +3462,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
3462 * If the OAS driver feature is enabled and the lun is enabled for 3462 * If the OAS driver feature is enabled and the lun is enabled for
3463 * OAS, set the oas iocb related flags. 3463 * OAS, set the oas iocb related flags.
3464 */ 3464 */
3465 if ((phba->cfg_EnableXLane) && ((struct lpfc_device_data *) 3465 if ((phba->cfg_fof) && ((struct lpfc_device_data *)
3466 scsi_cmnd->device->hostdata)->oas_enabled) 3466 scsi_cmnd->device->hostdata)->oas_enabled)
3467 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_OAS; 3467 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_OAS;
3468 return 0; 3468 return 0;
@@ -4314,6 +4314,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
4314 fcp_cmnd->fcpCntl1 = SIMPLE_Q; 4314 fcp_cmnd->fcpCntl1 = SIMPLE_Q;
4315 4315
4316 sli4 = (phba->sli_rev == LPFC_SLI_REV4); 4316 sli4 = (phba->sli_rev == LPFC_SLI_REV4);
4317 piocbq->iocb.un.fcpi.fcpi_XRdy = 0;
4317 4318
4318 /* 4319 /*
4319 * There are three possibilities here - use scatter-gather segment, use 4320 * There are three possibilities here - use scatter-gather segment, use
@@ -4782,7 +4783,9 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
4782 struct lpfc_scsi_buf *lpfc_cmd; 4783 struct lpfc_scsi_buf *lpfc_cmd;
4783 IOCB_t *cmd, *icmd; 4784 IOCB_t *cmd, *icmd;
4784 int ret = SUCCESS, status = 0; 4785 int ret = SUCCESS, status = 0;
4785 unsigned long flags; 4786 struct lpfc_sli_ring *pring_s4;
4787 int ring_number, ret_val;
4788 unsigned long flags, iflags;
4786 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); 4789 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
4787 4790
4788 status = fc_block_scsi_eh(cmnd); 4791 status = fc_block_scsi_eh(cmnd);
@@ -4833,6 +4836,14 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
4833 4836
4834 BUG_ON(iocb->context1 != lpfc_cmd); 4837 BUG_ON(iocb->context1 != lpfc_cmd);
4835 4838
4839 /* abort issued in recovery is still in progress */
4840 if (iocb->iocb_flag & LPFC_DRIVER_ABORTED) {
4841 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4842 "3389 SCSI Layer I/O Abort Request is pending\n");
4843 spin_unlock_irqrestore(&phba->hbalock, flags);
4844 goto wait_for_cmpl;
4845 }
4846
4836 abtsiocb = __lpfc_sli_get_iocbq(phba); 4847 abtsiocb = __lpfc_sli_get_iocbq(phba);
4837 if (abtsiocb == NULL) { 4848 if (abtsiocb == NULL) {
4838 ret = FAILED; 4849 ret = FAILED;
@@ -4871,11 +4882,23 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
4871 4882
4872 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 4883 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
4873 abtsiocb->vport = vport; 4884 abtsiocb->vport = vport;
4885 if (phba->sli_rev == LPFC_SLI_REV4) {
4886 ring_number = MAX_SLI3_CONFIGURED_RINGS + iocb->fcp_wqidx;
4887 pring_s4 = &phba->sli.ring[ring_number];
4888 /* Note: both hbalock and ring_lock must be set here */
4889 spin_lock_irqsave(&pring_s4->ring_lock, iflags);
4890 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
4891 abtsiocb, 0);
4892 spin_unlock_irqrestore(&pring_s4->ring_lock, iflags);
4893 } else {
4894 ret_val = __lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
4895 abtsiocb, 0);
4896 }
4874 /* no longer need the lock after this point */ 4897 /* no longer need the lock after this point */
4875 spin_unlock_irqrestore(&phba->hbalock, flags); 4898 spin_unlock_irqrestore(&phba->hbalock, flags);
4876 4899
4877 if (lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, abtsiocb, 0) == 4900
4878 IOCB_ERROR) { 4901 if (ret_val == IOCB_ERROR) {
4879 lpfc_sli_release_iocbq(phba, abtsiocb); 4902 lpfc_sli_release_iocbq(phba, abtsiocb);
4880 ret = FAILED; 4903 ret = FAILED;
4881 goto out; 4904 goto out;
@@ -4885,12 +4908,16 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
4885 lpfc_sli_handle_fast_ring_event(phba, 4908 lpfc_sli_handle_fast_ring_event(phba,
4886 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ); 4909 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
4887 4910
4911wait_for_cmpl:
4888 lpfc_cmd->waitq = &waitq; 4912 lpfc_cmd->waitq = &waitq;
4889 /* Wait for abort to complete */ 4913 /* Wait for abort to complete */
4890 wait_event_timeout(waitq, 4914 wait_event_timeout(waitq,
4891 (lpfc_cmd->pCmd != cmnd), 4915 (lpfc_cmd->pCmd != cmnd),
4892 msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000)); 4916 msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000));
4917
4918 spin_lock_irqsave(shost->host_lock, flags);
4893 lpfc_cmd->waitq = NULL; 4919 lpfc_cmd->waitq = NULL;
4920 spin_unlock_irqrestore(shost->host_lock, flags);
4894 4921
4895 if (lpfc_cmd->pCmd == cmnd) { 4922 if (lpfc_cmd->pCmd == cmnd) {
4896 ret = FAILED; 4923 ret = FAILED;
@@ -5172,8 +5199,9 @@ lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
5172 5199
5173 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context); 5200 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
5174 if (cnt) 5201 if (cnt)
5175 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring], 5202 lpfc_sli_abort_taskmgmt(vport,
5176 tgt_id, lun_id, context); 5203 &phba->sli.ring[phba->sli.fcp_ring],
5204 tgt_id, lun_id, context);
5177 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies; 5205 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
5178 while (time_after(later, jiffies) && cnt) { 5206 while (time_after(later, jiffies) && cnt) {
5179 schedule_timeout_uninterruptible(msecs_to_jiffies(20)); 5207 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
@@ -5491,7 +5519,7 @@ lpfc_slave_alloc(struct scsi_device *sdev)
5491 if (!rport || fc_remote_port_chkready(rport)) 5519 if (!rport || fc_remote_port_chkready(rport))
5492 return -ENXIO; 5520 return -ENXIO;
5493 5521
5494 if (phba->cfg_EnableXLane) { 5522 if (phba->cfg_fof) {
5495 5523
5496 /* 5524 /*
5497 * Check to see if the device data structure for the lun 5525 * Check to see if the device data structure for the lun
@@ -5616,7 +5644,7 @@ lpfc_slave_destroy(struct scsi_device *sdev)
5616 struct lpfc_device_data *device_data = sdev->hostdata; 5644 struct lpfc_device_data *device_data = sdev->hostdata;
5617 5645
5618 atomic_dec(&phba->sdev_cnt); 5646 atomic_dec(&phba->sdev_cnt);
5619 if ((phba->cfg_EnableXLane) && (device_data)) { 5647 if ((phba->cfg_fof) && (device_data)) {
5620 spin_lock_irqsave(&phba->devicelock, flags); 5648 spin_lock_irqsave(&phba->devicelock, flags);
5621 device_data->available = false; 5649 device_data->available = false;
5622 if (!device_data->oas_enabled) 5650 if (!device_data->oas_enabled)
@@ -5655,7 +5683,7 @@ lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5655 int memory_flags; 5683 int memory_flags;
5656 5684
5657 if (unlikely(!phba) || !vport_wwpn || !target_wwpn || 5685 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
5658 !(phba->cfg_EnableXLane)) 5686 !(phba->cfg_fof))
5659 return NULL; 5687 return NULL;
5660 5688
5661 /* Attempt to create the device data to contain lun info */ 5689 /* Attempt to create the device data to contain lun info */
@@ -5693,7 +5721,7 @@ lpfc_delete_device_data(struct lpfc_hba *phba,
5693{ 5721{
5694 5722
5695 if (unlikely(!phba) || !lun_info || 5723 if (unlikely(!phba) || !lun_info ||
5696 !(phba->cfg_EnableXLane)) 5724 !(phba->cfg_fof))
5697 return; 5725 return;
5698 5726
5699 if (!list_empty(&lun_info->listentry)) 5727 if (!list_empty(&lun_info->listentry))
@@ -5727,7 +5755,7 @@ __lpfc_get_device_data(struct lpfc_hba *phba, struct list_head *list,
5727 struct lpfc_device_data *lun_info; 5755 struct lpfc_device_data *lun_info;
5728 5756
5729 if (unlikely(!phba) || !list || !vport_wwpn || !target_wwpn || 5757 if (unlikely(!phba) || !list || !vport_wwpn || !target_wwpn ||
5730 !phba->cfg_EnableXLane) 5758 !phba->cfg_fof)
5731 return NULL; 5759 return NULL;
5732 5760
5733 /* Check to see if the lun is already enabled for OAS. */ 5761 /* Check to see if the lun is already enabled for OAS. */
@@ -5789,7 +5817,7 @@ lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5789 !starting_lun || !found_vport_wwpn || 5817 !starting_lun || !found_vport_wwpn ||
5790 !found_target_wwpn || !found_lun || !found_lun_status || 5818 !found_target_wwpn || !found_lun || !found_lun_status ||
5791 (*starting_lun == NO_MORE_OAS_LUN) || 5819 (*starting_lun == NO_MORE_OAS_LUN) ||
5792 !phba->cfg_EnableXLane) 5820 !phba->cfg_fof)
5793 return false; 5821 return false;
5794 5822
5795 lun = *starting_lun; 5823 lun = *starting_lun;
@@ -5873,7 +5901,7 @@ lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5873 unsigned long flags; 5901 unsigned long flags;
5874 5902
5875 if (unlikely(!phba) || !vport_wwpn || !target_wwpn || 5903 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
5876 !phba->cfg_EnableXLane) 5904 !phba->cfg_fof)
5877 return false; 5905 return false;
5878 5906
5879 spin_lock_irqsave(&phba->devicelock, flags); 5907 spin_lock_irqsave(&phba->devicelock, flags);
@@ -5930,7 +5958,7 @@ lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5930 unsigned long flags; 5958 unsigned long flags;
5931 5959
5932 if (unlikely(!phba) || !vport_wwpn || !target_wwpn || 5960 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
5933 !phba->cfg_EnableXLane) 5961 !phba->cfg_fof)
5934 return false; 5962 return false;
5935 5963
5936 spin_lock_irqsave(&phba->devicelock, flags); 5964 spin_lock_irqsave(&phba->devicelock, flags);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index 0120bfccf50b..0389ac1e7b83 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2013 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2014 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 393662c24df5..32ada0505576 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2013 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2014 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -3532,14 +3532,27 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3532 /* Error everything on txq and txcmplq 3532 /* Error everything on txq and txcmplq
3533 * First do the txq. 3533 * First do the txq.
3534 */ 3534 */
3535 spin_lock_irq(&phba->hbalock); 3535 if (phba->sli_rev >= LPFC_SLI_REV4) {
3536 list_splice_init(&pring->txq, &completions); 3536 spin_lock_irq(&pring->ring_lock);
3537 list_splice_init(&pring->txq, &completions);
3538 pring->txq_cnt = 0;
3539 spin_unlock_irq(&pring->ring_lock);
3537 3540
3538 /* Next issue ABTS for everything on the txcmplq */ 3541 spin_lock_irq(&phba->hbalock);
3539 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 3542 /* Next issue ABTS for everything on the txcmplq */
3540 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 3543 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3544 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3545 spin_unlock_irq(&phba->hbalock);
3546 } else {
3547 spin_lock_irq(&phba->hbalock);
3548 list_splice_init(&pring->txq, &completions);
3549 pring->txq_cnt = 0;
3541 3550
3542 spin_unlock_irq(&phba->hbalock); 3551 /* Next issue ABTS for everything on the txcmplq */
3552 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3553 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3554 spin_unlock_irq(&phba->hbalock);
3555 }
3543 3556
3544 /* Cancel all the IOCBs from the completions list */ 3557 /* Cancel all the IOCBs from the completions list */
3545 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 3558 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
@@ -3547,6 +3560,36 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3547} 3560}
3548 3561
3549/** 3562/**
3563 * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
3564 * @phba: Pointer to HBA context object.
3565 * @pring: Pointer to driver SLI ring object.
3566 *
3567 * This function aborts all iocbs in FCP rings and frees all the iocb
3568 * objects in txq. This function issues an abort iocb for all the iocb commands
3569 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3570 * the return of this function. The caller is not required to hold any locks.
3571 **/
3572void
3573lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
3574{
3575 struct lpfc_sli *psli = &phba->sli;
3576 struct lpfc_sli_ring *pring;
3577 uint32_t i;
3578
3579 /* Look on all the FCP Rings for the iotag */
3580 if (phba->sli_rev >= LPFC_SLI_REV4) {
3581 for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
3582 pring = &psli->ring[i + MAX_SLI3_CONFIGURED_RINGS];
3583 lpfc_sli_abort_iocb_ring(phba, pring);
3584 }
3585 } else {
3586 pring = &psli->ring[psli->fcp_ring];
3587 lpfc_sli_abort_iocb_ring(phba, pring);
3588 }
3589}
3590
3591
3592/**
3550 * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring 3593 * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring
3551 * @phba: Pointer to HBA context object. 3594 * @phba: Pointer to HBA context object.
3552 * 3595 *
@@ -3563,28 +3606,55 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
3563 LIST_HEAD(txcmplq); 3606 LIST_HEAD(txcmplq);
3564 struct lpfc_sli *psli = &phba->sli; 3607 struct lpfc_sli *psli = &phba->sli;
3565 struct lpfc_sli_ring *pring; 3608 struct lpfc_sli_ring *pring;
3566 3609 uint32_t i;
3567 /* Currently, only one fcp ring */
3568 pring = &psli->ring[psli->fcp_ring];
3569 3610
3570 spin_lock_irq(&phba->hbalock); 3611 spin_lock_irq(&phba->hbalock);
3571 /* Retrieve everything on txq */
3572 list_splice_init(&pring->txq, &txq);
3573
3574 /* Retrieve everything on the txcmplq */
3575 list_splice_init(&pring->txcmplq, &txcmplq);
3576
3577 /* Indicate the I/O queues are flushed */ 3612 /* Indicate the I/O queues are flushed */
3578 phba->hba_flag |= HBA_FCP_IOQ_FLUSH; 3613 phba->hba_flag |= HBA_FCP_IOQ_FLUSH;
3579 spin_unlock_irq(&phba->hbalock); 3614 spin_unlock_irq(&phba->hbalock);
3580 3615
3581 /* Flush the txq */ 3616 /* Look on all the FCP Rings for the iotag */
3582 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT, 3617 if (phba->sli_rev >= LPFC_SLI_REV4) {
3583 IOERR_SLI_DOWN); 3618 for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
3619 pring = &psli->ring[i + MAX_SLI3_CONFIGURED_RINGS];
3620
3621 spin_lock_irq(&pring->ring_lock);
3622 /* Retrieve everything on txq */
3623 list_splice_init(&pring->txq, &txq);
3624 /* Retrieve everything on the txcmplq */
3625 list_splice_init(&pring->txcmplq, &txcmplq);
3626 pring->txq_cnt = 0;
3627 pring->txcmplq_cnt = 0;
3628 spin_unlock_irq(&pring->ring_lock);
3629
3630 /* Flush the txq */
3631 lpfc_sli_cancel_iocbs(phba, &txq,
3632 IOSTAT_LOCAL_REJECT,
3633 IOERR_SLI_DOWN);
3634 /* Flush the txcmpq */
3635 lpfc_sli_cancel_iocbs(phba, &txcmplq,
3636 IOSTAT_LOCAL_REJECT,
3637 IOERR_SLI_DOWN);
3638 }
3639 } else {
3640 pring = &psli->ring[psli->fcp_ring];
3584 3641
3585 /* Flush the txcmpq */ 3642 spin_lock_irq(&phba->hbalock);
3586 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT, 3643 /* Retrieve everything on txq */
3587 IOERR_SLI_DOWN); 3644 list_splice_init(&pring->txq, &txq);
3645 /* Retrieve everything on the txcmplq */
3646 list_splice_init(&pring->txcmplq, &txcmplq);
3647 pring->txq_cnt = 0;
3648 pring->txcmplq_cnt = 0;
3649 spin_unlock_irq(&phba->hbalock);
3650
3651 /* Flush the txq */
3652 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
3653 IOERR_SLI_DOWN);
3654 /* Flush the txcmpq */
3655 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
3656 IOERR_SLI_DOWN);
3657 }
3588} 3658}
3589 3659
3590/** 3660/**
@@ -3987,12 +4057,13 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
3987{ 4057{
3988 struct lpfc_sli *psli = &phba->sli; 4058 struct lpfc_sli *psli = &phba->sli;
3989 uint16_t cfg_value; 4059 uint16_t cfg_value;
3990 int rc; 4060 int rc = 0;
3991 4061
3992 /* Reset HBA */ 4062 /* Reset HBA */
3993 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4063 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3994 "0295 Reset HBA Data: x%x x%x\n", 4064 "0295 Reset HBA Data: x%x x%x x%x\n",
3995 phba->pport->port_state, psli->sli_flag); 4065 phba->pport->port_state, psli->sli_flag,
4066 phba->hba_flag);
3996 4067
3997 /* perform board reset */ 4068 /* perform board reset */
3998 phba->fc_eventTag = 0; 4069 phba->fc_eventTag = 0;
@@ -4005,6 +4076,12 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
4005 phba->fcf.fcf_flag = 0; 4076 phba->fcf.fcf_flag = 0;
4006 spin_unlock_irq(&phba->hbalock); 4077 spin_unlock_irq(&phba->hbalock);
4007 4078
4079 /* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */
4080 if (phba->hba_flag & HBA_FW_DUMP_OP) {
4081 phba->hba_flag &= ~HBA_FW_DUMP_OP;
4082 return rc;
4083 }
4084
4008 /* Now physically reset the device */ 4085 /* Now physically reset the device */
4009 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4086 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4010 "0389 Performing PCI function reset!\n"); 4087 "0389 Performing PCI function reset!\n");
@@ -5002,7 +5079,7 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
5002 } while (++fcp_eqidx < phba->cfg_fcp_io_channel); 5079 } while (++fcp_eqidx < phba->cfg_fcp_io_channel);
5003 } 5080 }
5004 5081
5005 if (phba->cfg_EnableXLane) 5082 if (phba->cfg_fof)
5006 lpfc_sli4_cq_release(phba->sli4_hba.oas_cq, LPFC_QUEUE_REARM); 5083 lpfc_sli4_cq_release(phba->sli4_hba.oas_cq, LPFC_QUEUE_REARM);
5007 5084
5008 if (phba->sli4_hba.hba_eq) { 5085 if (phba->sli4_hba.hba_eq) {
@@ -6722,7 +6799,6 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
6722 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active; 6799 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
6723 MAILBOX_t *mb = &pmbox->u.mb; 6800 MAILBOX_t *mb = &pmbox->u.mb;
6724 struct lpfc_sli *psli = &phba->sli; 6801 struct lpfc_sli *psli = &phba->sli;
6725 struct lpfc_sli_ring *pring;
6726 6802
6727 /* If the mailbox completed, process the completion and return */ 6803 /* If the mailbox completed, process the completion and return */
6728 if (lpfc_sli4_process_missed_mbox_completions(phba)) 6804 if (lpfc_sli4_process_missed_mbox_completions(phba))
@@ -6764,8 +6840,7 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
6764 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 6840 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
6765 spin_unlock_irq(&phba->hbalock); 6841 spin_unlock_irq(&phba->hbalock);
6766 6842
6767 pring = &psli->ring[psli->fcp_ring]; 6843 lpfc_sli_abort_fcp_rings(phba);
6768 lpfc_sli_abort_iocb_ring(phba, pring);
6769 6844
6770 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6845 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6771 "0345 Resetting board due to mailbox timeout\n"); 6846 "0345 Resetting board due to mailbox timeout\n");
@@ -8133,6 +8208,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
8133 abort_tag = (uint32_t) iocbq->iotag; 8208 abort_tag = (uint32_t) iocbq->iotag;
8134 xritag = iocbq->sli4_xritag; 8209 xritag = iocbq->sli4_xritag;
8135 wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */ 8210 wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */
8211 wqe->generic.wqe_com.word10 = 0;
8136 /* words0-2 bpl convert bde */ 8212 /* words0-2 bpl convert bde */
8137 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { 8213 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
8138 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / 8214 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
@@ -8639,8 +8715,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
8639 8715
8640 if ((piocb->iocb_flag & LPFC_IO_FCP) || 8716 if ((piocb->iocb_flag & LPFC_IO_FCP) ||
8641 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { 8717 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
8642 if (!phba->cfg_EnableXLane || (!(piocb->iocb_flag & 8718 if (!phba->cfg_fof || (!(piocb->iocb_flag & LPFC_IO_OAS))) {
8643 LPFC_IO_OAS))) {
8644 wq = phba->sli4_hba.fcp_wq[piocb->fcp_wqidx]; 8719 wq = phba->sli4_hba.fcp_wq[piocb->fcp_wqidx];
8645 } else { 8720 } else {
8646 wq = phba->sli4_hba.oas_wq; 8721 wq = phba->sli4_hba.oas_wq;
@@ -8735,7 +8810,7 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
8735 8810
8736 if (phba->sli_rev == LPFC_SLI_REV4) { 8811 if (phba->sli_rev == LPFC_SLI_REV4) {
8737 if (piocb->iocb_flag & LPFC_IO_FCP) { 8812 if (piocb->iocb_flag & LPFC_IO_FCP) {
8738 if (!phba->cfg_EnableXLane || (!(piocb->iocb_flag & 8813 if (!phba->cfg_fof || (!(piocb->iocb_flag &
8739 LPFC_IO_OAS))) { 8814 LPFC_IO_OAS))) {
8740 if (unlikely(!phba->sli4_hba.fcp_wq)) 8815 if (unlikely(!phba->sli4_hba.fcp_wq))
8741 return IOCB_ERROR; 8816 return IOCB_ERROR;
@@ -9170,6 +9245,7 @@ lpfc_sli_queue_setup(struct lpfc_hba *phba)
9170 pring->sli.sli3.next_cmdidx = 0; 9245 pring->sli.sli3.next_cmdidx = 0;
9171 pring->sli.sli3.local_getidx = 0; 9246 pring->sli.sli3.local_getidx = 0;
9172 pring->sli.sli3.cmdidx = 0; 9247 pring->sli.sli3.cmdidx = 0;
9248 pring->flag = 0;
9173 INIT_LIST_HEAD(&pring->txq); 9249 INIT_LIST_HEAD(&pring->txq);
9174 INIT_LIST_HEAD(&pring->txcmplq); 9250 INIT_LIST_HEAD(&pring->txcmplq);
9175 INIT_LIST_HEAD(&pring->iocb_continueq); 9251 INIT_LIST_HEAD(&pring->iocb_continueq);
@@ -9805,43 +9881,6 @@ abort_iotag_exit:
9805} 9881}
9806 9882
9807/** 9883/**
9808 * lpfc_sli_iocb_ring_abort - Unconditionally abort all iocbs on an iocb ring
9809 * @phba: Pointer to HBA context object.
9810 * @pring: Pointer to driver SLI ring object.
9811 *
9812 * This function aborts all iocbs in the given ring and frees all the iocb
9813 * objects in txq. This function issues abort iocbs unconditionally for all
9814 * the iocb commands in txcmplq. The iocbs in the txcmplq is not guaranteed
9815 * to complete before the return of this function. The caller is not required
9816 * to hold any locks.
9817 **/
9818static void
9819lpfc_sli_iocb_ring_abort(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
9820{
9821 LIST_HEAD(completions);
9822 struct lpfc_iocbq *iocb, *next_iocb;
9823
9824 if (pring->ringno == LPFC_ELS_RING)
9825 lpfc_fabric_abort_hba(phba);
9826
9827 spin_lock_irq(&phba->hbalock);
9828
9829 /* Take off all the iocbs on txq for cancelling */
9830 list_splice_init(&pring->txq, &completions);
9831 pring->txq_cnt = 0;
9832
9833 /* Next issue ABTS for everything on the txcmplq */
9834 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
9835 lpfc_sli_abort_iotag_issue(phba, pring, iocb);
9836
9837 spin_unlock_irq(&phba->hbalock);
9838
9839 /* Cancel all the IOCBs from the completions list */
9840 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
9841 IOERR_SLI_ABORTED);
9842}
9843
9844/**
9845 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba. 9884 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
9846 * @phba: pointer to lpfc HBA data structure. 9885 * @phba: pointer to lpfc HBA data structure.
9847 * 9886 *
@@ -9856,7 +9895,7 @@ lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
9856 9895
9857 for (i = 0; i < psli->num_rings; i++) { 9896 for (i = 0; i < psli->num_rings; i++) {
9858 pring = &psli->ring[i]; 9897 pring = &psli->ring[i];
9859 lpfc_sli_iocb_ring_abort(phba, pring); 9898 lpfc_sli_abort_iocb_ring(phba, pring);
9860 } 9899 }
9861} 9900}
9862 9901
@@ -10081,6 +10120,124 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
10081} 10120}
10082 10121
10083/** 10122/**
10123 * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN
10124 * @vport: Pointer to virtual port.
10125 * @pring: Pointer to driver SLI ring object.
10126 * @tgt_id: SCSI ID of the target.
10127 * @lun_id: LUN ID of the scsi device.
10128 * @taskmgmt_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
10129 *
10130 * This function sends an abort command for every SCSI command
10131 * associated with the given virtual port pending on the ring
10132 * filtered by lpfc_sli_validate_fcp_iocb function.
10133 * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the
10134 * FCP iocbs associated with lun specified by tgt_id and lun_id
10135 * parameters
10136 * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the
10137 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
10138 * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all
10139 * FCP iocbs associated with virtual port.
10140 * This function returns number of iocbs it aborted .
10141 * This function is called with no locks held right after a taskmgmt
10142 * command is sent.
10143 **/
10144int
10145lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
10146 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
10147{
10148 struct lpfc_hba *phba = vport->phba;
10149 struct lpfc_iocbq *abtsiocbq;
10150 struct lpfc_iocbq *iocbq;
10151 IOCB_t *icmd;
10152 int sum, i, ret_val;
10153 unsigned long iflags;
10154 struct lpfc_sli_ring *pring_s4;
10155 uint32_t ring_number;
10156
10157 spin_lock_irq(&phba->hbalock);
10158
10159 /* all I/Os are in process of being flushed */
10160 if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
10161 spin_unlock_irq(&phba->hbalock);
10162 return 0;
10163 }
10164 sum = 0;
10165
10166 for (i = 1; i <= phba->sli.last_iotag; i++) {
10167 iocbq = phba->sli.iocbq_lookup[i];
10168
10169 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
10170 cmd) != 0)
10171 continue;
10172
10173 /*
10174 * If the iocbq is already being aborted, don't take a second
10175 * action, but do count it.
10176 */
10177 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
10178 continue;
10179
10180 /* issue ABTS for this IOCB based on iotag */
10181 abtsiocbq = __lpfc_sli_get_iocbq(phba);
10182 if (abtsiocbq == NULL)
10183 continue;
10184
10185 icmd = &iocbq->iocb;
10186 abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
10187 abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext;
10188 if (phba->sli_rev == LPFC_SLI_REV4)
10189 abtsiocbq->iocb.un.acxri.abortIoTag =
10190 iocbq->sli4_xritag;
10191 else
10192 abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag;
10193 abtsiocbq->iocb.ulpLe = 1;
10194 abtsiocbq->iocb.ulpClass = icmd->ulpClass;
10195 abtsiocbq->vport = vport;
10196
10197 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
10198 abtsiocbq->fcp_wqidx = iocbq->fcp_wqidx;
10199 if (iocbq->iocb_flag & LPFC_IO_FCP)
10200 abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
10201
10202 if (lpfc_is_link_up(phba))
10203 abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN;
10204 else
10205 abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
10206
10207 /* Setup callback routine and issue the command. */
10208 abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
10209
10210 /*
10211 * Indicate the IO is being aborted by the driver and set
10212 * the caller's flag into the aborted IO.
10213 */
10214 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
10215
10216 if (phba->sli_rev == LPFC_SLI_REV4) {
10217 ring_number = MAX_SLI3_CONFIGURED_RINGS +
10218 iocbq->fcp_wqidx;
10219 pring_s4 = &phba->sli.ring[ring_number];
10220 /* Note: both hbalock and ring_lock must be set here */
10221 spin_lock_irqsave(&pring_s4->ring_lock, iflags);
10222 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
10223 abtsiocbq, 0);
10224 spin_unlock_irqrestore(&pring_s4->ring_lock, iflags);
10225 } else {
10226 ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
10227 abtsiocbq, 0);
10228 }
10229
10230
10231 if (ret_val == IOCB_ERROR)
10232 __lpfc_sli_release_iocbq(phba, abtsiocbq);
10233 else
10234 sum++;
10235 }
10236 spin_unlock_irq(&phba->hbalock);
10237 return sum;
10238}
10239
10240/**
10084 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler 10241 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
10085 * @phba: Pointer to HBA context object. 10242 * @phba: Pointer to HBA context object.
10086 * @cmdiocbq: Pointer to command iocb. 10243 * @cmdiocbq: Pointer to command iocb.
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 6f04080f4ea8..edb48832c39b 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2013 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2014 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 9b8cda866176..7f50aa04d66a 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2009-2013 Emulex. All rights reserved. * 4 * Copyright (C) 2009-2014 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index e32cbec70324..41675c1193e7 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2013 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2014 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.3.45" 21#define LPFC_DRIVER_VERSION "10.2.8001.0."
22#define LPFC_DRIVER_NAME "lpfc" 22#define LPFC_DRIVER_NAME "lpfc"
23 23
24/* Used for SLI 2/3 */ 24/* Used for SLI 2/3 */
@@ -30,4 +30,4 @@
30 30
31#define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \ 31#define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \
32 LPFC_DRIVER_VERSION 32 LPFC_DRIVER_VERSION
33#define LPFC_COPYRIGHT "Copyright(c) 2004-2013 Emulex. All rights reserved." 33#define LPFC_COPYRIGHT "Copyright(c) 2004-2014 Emulex. All rights reserved."
diff --git a/drivers/scsi/mvsas/mv_94xx.c b/drivers/scsi/mvsas/mv_94xx.c
index 1e4479f3331a..9270d15ff1a4 100644
--- a/drivers/scsi/mvsas/mv_94xx.c
+++ b/drivers/scsi/mvsas/mv_94xx.c
@@ -564,7 +564,7 @@ static void mvs_94xx_interrupt_enable(struct mvs_info *mvi)
564 u32 tmp; 564 u32 tmp;
565 565
566 tmp = mr32(MVS_GBL_CTL); 566 tmp = mr32(MVS_GBL_CTL);
567 tmp |= (IRQ_SAS_A | IRQ_SAS_B); 567 tmp |= (MVS_IRQ_SAS_A | MVS_IRQ_SAS_B);
568 mw32(MVS_GBL_INT_STAT, tmp); 568 mw32(MVS_GBL_INT_STAT, tmp);
569 writel(tmp, regs + 0x0C); 569 writel(tmp, regs + 0x0C);
570 writel(tmp, regs + 0x10); 570 writel(tmp, regs + 0x10);
@@ -580,7 +580,7 @@ static void mvs_94xx_interrupt_disable(struct mvs_info *mvi)
580 580
581 tmp = mr32(MVS_GBL_CTL); 581 tmp = mr32(MVS_GBL_CTL);
582 582
583 tmp &= ~(IRQ_SAS_A | IRQ_SAS_B); 583 tmp &= ~(MVS_IRQ_SAS_A | MVS_IRQ_SAS_B);
584 mw32(MVS_GBL_INT_STAT, tmp); 584 mw32(MVS_GBL_INT_STAT, tmp);
585 writel(tmp, regs + 0x0C); 585 writel(tmp, regs + 0x0C);
586 writel(tmp, regs + 0x10); 586 writel(tmp, regs + 0x10);
@@ -596,7 +596,7 @@ static u32 mvs_94xx_isr_status(struct mvs_info *mvi, int irq)
596 if (!(mvi->flags & MVF_FLAG_SOC)) { 596 if (!(mvi->flags & MVF_FLAG_SOC)) {
597 stat = mr32(MVS_GBL_INT_STAT); 597 stat = mr32(MVS_GBL_INT_STAT);
598 598
599 if (!(stat & (IRQ_SAS_A | IRQ_SAS_B))) 599 if (!(stat & (MVS_IRQ_SAS_A | MVS_IRQ_SAS_B)))
600 return 0; 600 return 0;
601 } 601 }
602 return stat; 602 return stat;
@@ -606,8 +606,8 @@ static irqreturn_t mvs_94xx_isr(struct mvs_info *mvi, int irq, u32 stat)
606{ 606{
607 void __iomem *regs = mvi->regs; 607 void __iomem *regs = mvi->regs;
608 608
609 if (((stat & IRQ_SAS_A) && mvi->id == 0) || 609 if (((stat & MVS_IRQ_SAS_A) && mvi->id == 0) ||
610 ((stat & IRQ_SAS_B) && mvi->id == 1)) { 610 ((stat & MVS_IRQ_SAS_B) && mvi->id == 1)) {
611 mw32_f(MVS_INT_STAT, CINT_DONE); 611 mw32_f(MVS_INT_STAT, CINT_DONE);
612 612
613 spin_lock(&mvi->lock); 613 spin_lock(&mvi->lock);
diff --git a/drivers/scsi/mvsas/mv_94xx.h b/drivers/scsi/mvsas/mv_94xx.h
index 487aa6f97412..14e197497b46 100644
--- a/drivers/scsi/mvsas/mv_94xx.h
+++ b/drivers/scsi/mvsas/mv_94xx.h
@@ -150,35 +150,35 @@ enum chip_register_bits {
150 150
151enum pci_interrupt_cause { 151enum pci_interrupt_cause {
152 /* MAIN_IRQ_CAUSE (R10200) Bits*/ 152 /* MAIN_IRQ_CAUSE (R10200) Bits*/
153 IRQ_COM_IN_I2O_IOP0 = (1 << 0), 153 MVS_IRQ_COM_IN_I2O_IOP0 = (1 << 0),
154 IRQ_COM_IN_I2O_IOP1 = (1 << 1), 154 MVS_IRQ_COM_IN_I2O_IOP1 = (1 << 1),
155 IRQ_COM_IN_I2O_IOP2 = (1 << 2), 155 MVS_IRQ_COM_IN_I2O_IOP2 = (1 << 2),
156 IRQ_COM_IN_I2O_IOP3 = (1 << 3), 156 MVS_IRQ_COM_IN_I2O_IOP3 = (1 << 3),
157 IRQ_COM_OUT_I2O_HOS0 = (1 << 4), 157 MVS_IRQ_COM_OUT_I2O_HOS0 = (1 << 4),
158 IRQ_COM_OUT_I2O_HOS1 = (1 << 5), 158 MVS_IRQ_COM_OUT_I2O_HOS1 = (1 << 5),
159 IRQ_COM_OUT_I2O_HOS2 = (1 << 6), 159 MVS_IRQ_COM_OUT_I2O_HOS2 = (1 << 6),
160 IRQ_COM_OUT_I2O_HOS3 = (1 << 7), 160 MVS_IRQ_COM_OUT_I2O_HOS3 = (1 << 7),
161 IRQ_PCIF_TO_CPU_DRBL0 = (1 << 8), 161 MVS_IRQ_PCIF_TO_CPU_DRBL0 = (1 << 8),
162 IRQ_PCIF_TO_CPU_DRBL1 = (1 << 9), 162 MVS_IRQ_PCIF_TO_CPU_DRBL1 = (1 << 9),
163 IRQ_PCIF_TO_CPU_DRBL2 = (1 << 10), 163 MVS_IRQ_PCIF_TO_CPU_DRBL2 = (1 << 10),
164 IRQ_PCIF_TO_CPU_DRBL3 = (1 << 11), 164 MVS_IRQ_PCIF_TO_CPU_DRBL3 = (1 << 11),
165 IRQ_PCIF_DRBL0 = (1 << 12), 165 MVS_IRQ_PCIF_DRBL0 = (1 << 12),
166 IRQ_PCIF_DRBL1 = (1 << 13), 166 MVS_IRQ_PCIF_DRBL1 = (1 << 13),
167 IRQ_PCIF_DRBL2 = (1 << 14), 167 MVS_IRQ_PCIF_DRBL2 = (1 << 14),
168 IRQ_PCIF_DRBL3 = (1 << 15), 168 MVS_IRQ_PCIF_DRBL3 = (1 << 15),
169 IRQ_XOR_A = (1 << 16), 169 MVS_IRQ_XOR_A = (1 << 16),
170 IRQ_XOR_B = (1 << 17), 170 MVS_IRQ_XOR_B = (1 << 17),
171 IRQ_SAS_A = (1 << 18), 171 MVS_IRQ_SAS_A = (1 << 18),
172 IRQ_SAS_B = (1 << 19), 172 MVS_IRQ_SAS_B = (1 << 19),
173 IRQ_CPU_CNTRL = (1 << 20), 173 MVS_IRQ_CPU_CNTRL = (1 << 20),
174 IRQ_GPIO = (1 << 21), 174 MVS_IRQ_GPIO = (1 << 21),
175 IRQ_UART = (1 << 22), 175 MVS_IRQ_UART = (1 << 22),
176 IRQ_SPI = (1 << 23), 176 MVS_IRQ_SPI = (1 << 23),
177 IRQ_I2C = (1 << 24), 177 MVS_IRQ_I2C = (1 << 24),
178 IRQ_SGPIO = (1 << 25), 178 MVS_IRQ_SGPIO = (1 << 25),
179 IRQ_COM_ERR = (1 << 29), 179 MVS_IRQ_COM_ERR = (1 << 29),
180 IRQ_I2O_ERR = (1 << 30), 180 MVS_IRQ_I2O_ERR = (1 << 30),
181 IRQ_PCIE_ERR = (1 << 31), 181 MVS_IRQ_PCIE_ERR = (1 << 31),
182}; 182};
183 183
184union reg_phy_cfg { 184union reg_phy_cfg {
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 1fa010448666..de5d0ae19d83 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -1648,16 +1648,16 @@ typedef struct {
1648 */ 1648 */
1649struct crc_context { 1649struct crc_context {
1650 uint32_t handle; /* System handle. */ 1650 uint32_t handle; /* System handle. */
1651 uint32_t ref_tag; 1651 __le32 ref_tag;
1652 uint16_t app_tag; 1652 __le16 app_tag;
1653 uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/ 1653 uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/
1654 uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/ 1654 uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/
1655 uint16_t guard_seed; /* Initial Guard Seed */ 1655 __le16 guard_seed; /* Initial Guard Seed */
1656 uint16_t prot_opts; /* Requested Data Protection Mode */ 1656 __le16 prot_opts; /* Requested Data Protection Mode */
1657 uint16_t blk_size; /* Data size in bytes */ 1657 __le16 blk_size; /* Data size in bytes */
1658 uint16_t runt_blk_guard; /* Guard value for runt block (tape 1658 uint16_t runt_blk_guard; /* Guard value for runt block (tape
1659 * only) */ 1659 * only) */
1660 uint32_t byte_count; /* Total byte count/ total data 1660 __le32 byte_count; /* Total byte count/ total data
1661 * transfer count */ 1661 * transfer count */
1662 union { 1662 union {
1663 struct { 1663 struct {
@@ -1671,10 +1671,10 @@ struct crc_context {
1671 uint32_t reserved_6; 1671 uint32_t reserved_6;
1672 } nobundling; 1672 } nobundling;
1673 struct { 1673 struct {
1674 uint32_t dif_byte_count; /* Total DIF byte 1674 __le32 dif_byte_count; /* Total DIF byte
1675 * count */ 1675 * count */
1676 uint16_t reserved_1; 1676 uint16_t reserved_1;
1677 uint16_t dseg_count; /* Data segment count */ 1677 __le16 dseg_count; /* Data segment count */
1678 uint32_t reserved_2; 1678 uint32_t reserved_2;
1679 uint32_t data_address[2]; 1679 uint32_t data_address[2];
1680 uint32_t data_length; 1680 uint32_t data_length;
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 8d85ed8d8917..4b188b0164e9 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -1996,7 +1996,7 @@ qlt_set_t10dif_tags(struct se_cmd *se_cmd, struct crc_context *ctx)
1996 * have been immplemented by TCM, before AppTag is avail. 1996 * have been immplemented by TCM, before AppTag is avail.
1997 * Look for modesense_handlers[] 1997 * Look for modesense_handlers[]
1998 */ 1998 */
1999 ctx->app_tag = __constant_cpu_to_le16(0); 1999 ctx->app_tag = 0;
2000 ctx->app_tag_mask[0] = 0x0; 2000 ctx->app_tag_mask[0] = 0x0;
2001 ctx->app_tag_mask[1] = 0x0; 2001 ctx->app_tag_mask[1] = 0x0;
2002 2002
@@ -2078,6 +2078,7 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
2078 struct se_cmd *se_cmd = &cmd->se_cmd; 2078 struct se_cmd *se_cmd = &cmd->se_cmd;
2079 uint32_t h; 2079 uint32_t h;
2080 struct atio_from_isp *atio = &prm->cmd->atio; 2080 struct atio_from_isp *atio = &prm->cmd->atio;
2081 uint16_t t16;
2081 2082
2082 sgc = 0; 2083 sgc = 0;
2083 ha = vha->hw; 2084 ha = vha->hw;
@@ -2174,8 +2175,13 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
2174 pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 2175 pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
2175 pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 2176 pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
2176 pkt->exchange_addr = atio->u.isp24.exchange_addr; 2177 pkt->exchange_addr = atio->u.isp24.exchange_addr;
2177 pkt->ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); 2178
2178 pkt->flags |= (atio->u.isp24.attr << 9); 2179 /* silence compile warning */
2180 t16 = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
2181 pkt->ox_id = cpu_to_le16(t16);
2182
2183 t16 = (atio->u.isp24.attr << 9);
2184 pkt->flags |= cpu_to_le16(t16);
2179 pkt->relative_offset = cpu_to_le32(prm->cmd->offset); 2185 pkt->relative_offset = cpu_to_le32(prm->cmd->offset);
2180 2186
2181 /* Set transfer direction */ 2187 /* Set transfer direction */
@@ -2250,8 +2256,7 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
2250 2256
2251 if (bundling && prm->prot_seg_cnt) { 2257 if (bundling && prm->prot_seg_cnt) {
2252 /* Walks dif segments */ 2258 /* Walks dif segments */
2253 pkt->add_flags |= 2259 pkt->add_flags |= CTIO_CRC2_AF_DIF_DSD_ENA;
2254 __constant_cpu_to_le16(CTIO_CRC2_AF_DIF_DSD_ENA);
2255 2260
2256 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address; 2261 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
2257 if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd, 2262 if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd,
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 5c9f185a8ebd..e0a58fd13f66 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -316,7 +316,7 @@ struct fcp_hdr {
316 uint8_t seq_id; 316 uint8_t seq_id;
317 uint8_t df_ctl; 317 uint8_t df_ctl;
318 uint16_t seq_cnt; 318 uint16_t seq_cnt;
319 uint16_t ox_id; 319 __be16 ox_id;
320 uint16_t rx_id; 320 uint16_t rx_id;
321 uint32_t parameter; 321 uint32_t parameter;
322} __packed; 322} __packed;
@@ -441,7 +441,7 @@ struct ctio7_to_24xx {
441 union { 441 union {
442 struct { 442 struct {
443 uint16_t reserved1; 443 uint16_t reserved1;
444 uint16_t flags; 444 __le16 flags;
445 uint32_t residual; 445 uint32_t residual;
446 uint16_t ox_id; 446 uint16_t ox_id;
447 uint16_t scsi_status; 447 uint16_t scsi_status;
@@ -527,7 +527,7 @@ struct ctio_crc2_to_fw {
527 527
528 uint32_t handle; /* System handle. */ 528 uint32_t handle; /* System handle. */
529 uint16_t nport_handle; /* N_PORT handle. */ 529 uint16_t nport_handle; /* N_PORT handle. */
530 uint16_t timeout; /* Command timeout. */ 530 __le16 timeout; /* Command timeout. */
531 531
532 uint16_t dseg_count; /* Data segment count. */ 532 uint16_t dseg_count; /* Data segment count. */
533 uint8_t vp_index; 533 uint8_t vp_index;
@@ -538,15 +538,15 @@ struct ctio_crc2_to_fw {
538 uint8_t reserved1; 538 uint8_t reserved1;
539 uint32_t exchange_addr; /* rcv exchange address */ 539 uint32_t exchange_addr; /* rcv exchange address */
540 uint16_t reserved2; 540 uint16_t reserved2;
541 uint16_t flags; /* refer to CTIO7 flags values */ 541 __le16 flags; /* refer to CTIO7 flags values */
542 uint32_t residual; 542 uint32_t residual;
543 uint16_t ox_id; 543 __le16 ox_id;
544 uint16_t scsi_status; 544 uint16_t scsi_status;
545 uint32_t relative_offset; 545 __le32 relative_offset;
546 uint32_t reserved5; 546 uint32_t reserved5;
547 uint32_t transfer_length; /* total fc transfer length */ 547 __le32 transfer_length; /* total fc transfer length */
548 uint32_t reserved6; 548 uint32_t reserved6;
549 uint32_t crc_context_address[2];/* Data segment address. */ 549 __le32 crc_context_address[2];/* Data segment address. */
550 uint16_t crc_context_len; /* Data segment length. */ 550 uint16_t crc_context_len; /* Data segment length. */
551 uint16_t reserved_1; /* MUST be set to 0. */ 551 uint16_t reserved_1; /* MUST be set to 0. */
552} __packed; 552} __packed;
diff --git a/drivers/spi/spi-pxa2xx-dma.c b/drivers/spi/spi-pxa2xx-dma.c
index f6759dc0153b..c41ff148a2b4 100644
--- a/drivers/spi/spi-pxa2xx-dma.c
+++ b/drivers/spi/spi-pxa2xx-dma.c
@@ -368,7 +368,7 @@ int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip,
368 * otherwise we use the default. Also we use the default FIFO 368 * otherwise we use the default. Also we use the default FIFO
369 * thresholds for now. 369 * thresholds for now.
370 */ 370 */
371 *burst_code = chip_info ? chip_info->dma_burst_size : 16; 371 *burst_code = chip_info ? chip_info->dma_burst_size : 1;
372 *threshold = SSCR1_RxTresh(RX_THRESH_DFLT) 372 *threshold = SSCR1_RxTresh(RX_THRESH_DFLT)
373 | SSCR1_TxTresh(TX_THRESH_DFLT); 373 | SSCR1_TxTresh(TX_THRESH_DFLT);
374 374
diff --git a/drivers/staging/android/timed_output.c b/drivers/staging/android/timed_output.c
index 2c617834dc46..c341ac11c5a3 100644
--- a/drivers/staging/android/timed_output.c
+++ b/drivers/staging/android/timed_output.c
@@ -97,7 +97,6 @@ void timed_output_dev_unregister(struct timed_output_dev *tdev)
97{ 97{
98 tdev->enable(tdev, 0); 98 tdev->enable(tdev, 0);
99 device_destroy(timed_output_class, MKDEV(0, tdev->index)); 99 device_destroy(timed_output_class, MKDEV(0, tdev->index));
100 dev_set_drvdata(tdev->dev, NULL);
101} 100}
102EXPORT_SYMBOL_GPL(timed_output_dev_unregister); 101EXPORT_SYMBOL_GPL(timed_output_dev_unregister);
103 102
diff --git a/drivers/staging/comedi/Kconfig b/drivers/staging/comedi/Kconfig
index 5d56428d508a..a2f6957e7ee9 100644
--- a/drivers/staging/comedi/Kconfig
+++ b/drivers/staging/comedi/Kconfig
@@ -651,6 +651,7 @@ config COMEDI_ADDI_APCI_1516
651 651
652config COMEDI_ADDI_APCI_1564 652config COMEDI_ADDI_APCI_1564
653 tristate "ADDI-DATA APCI_1564 support" 653 tristate "ADDI-DATA APCI_1564 support"
654 select COMEDI_ADDI_WATCHDOG
654 ---help--- 655 ---help---
655 Enable support for ADDI-DATA APCI_1564 cards 656 Enable support for ADDI-DATA APCI_1564 cards
656 657
diff --git a/drivers/staging/iio/Kconfig b/drivers/staging/iio/Kconfig
index b36feb080cba..fa38be0982f9 100644
--- a/drivers/staging/iio/Kconfig
+++ b/drivers/staging/iio/Kconfig
@@ -36,10 +36,11 @@ config IIO_SIMPLE_DUMMY_EVENTS
36 Add some dummy events to the simple dummy driver. 36 Add some dummy events to the simple dummy driver.
37 37
38config IIO_SIMPLE_DUMMY_BUFFER 38config IIO_SIMPLE_DUMMY_BUFFER
39 boolean "Buffered capture support" 39 boolean "Buffered capture support"
40 select IIO_KFIFO_BUF 40 select IIO_BUFFER
41 help 41 select IIO_KFIFO_BUF
42 Add buffered data capture to the simple dummy driver. 42 help
43 Add buffered data capture to the simple dummy driver.
43 44
44endif # IIO_SIMPLE_DUMMY 45endif # IIO_SIMPLE_DUMMY
45 46
diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/staging/iio/adc/mxs-lradc.c
index dae8d1a9038e..52d7517b342e 100644
--- a/drivers/staging/iio/adc/mxs-lradc.c
+++ b/drivers/staging/iio/adc/mxs-lradc.c
@@ -846,6 +846,14 @@ static int mxs_lradc_read_single(struct iio_dev *iio_dev, int chan, int *val)
846 LRADC_CTRL1); 846 LRADC_CTRL1);
847 mxs_lradc_reg_clear(lradc, 0xff, LRADC_CTRL0); 847 mxs_lradc_reg_clear(lradc, 0xff, LRADC_CTRL0);
848 848
849 /* Enable / disable the divider per requirement */
850 if (test_bit(chan, &lradc->is_divided))
851 mxs_lradc_reg_set(lradc, 1 << LRADC_CTRL2_DIVIDE_BY_TWO_OFFSET,
852 LRADC_CTRL2);
853 else
854 mxs_lradc_reg_clear(lradc,
855 1 << LRADC_CTRL2_DIVIDE_BY_TWO_OFFSET, LRADC_CTRL2);
856
849 /* Clean the slot's previous content, then set new one. */ 857 /* Clean the slot's previous content, then set new one. */
850 mxs_lradc_reg_clear(lradc, LRADC_CTRL4_LRADCSELECT_MASK(0), 858 mxs_lradc_reg_clear(lradc, LRADC_CTRL4_LRADCSELECT_MASK(0),
851 LRADC_CTRL4); 859 LRADC_CTRL4);
@@ -961,15 +969,11 @@ static int mxs_lradc_write_raw(struct iio_dev *iio_dev,
961 if (val == scale_avail[MXS_LRADC_DIV_DISABLED].integer && 969 if (val == scale_avail[MXS_LRADC_DIV_DISABLED].integer &&
962 val2 == scale_avail[MXS_LRADC_DIV_DISABLED].nano) { 970 val2 == scale_avail[MXS_LRADC_DIV_DISABLED].nano) {
963 /* divider by two disabled */ 971 /* divider by two disabled */
964 writel(1 << LRADC_CTRL2_DIVIDE_BY_TWO_OFFSET,
965 lradc->base + LRADC_CTRL2 + STMP_OFFSET_REG_CLR);
966 clear_bit(chan->channel, &lradc->is_divided); 972 clear_bit(chan->channel, &lradc->is_divided);
967 ret = 0; 973 ret = 0;
968 } else if (val == scale_avail[MXS_LRADC_DIV_ENABLED].integer && 974 } else if (val == scale_avail[MXS_LRADC_DIV_ENABLED].integer &&
969 val2 == scale_avail[MXS_LRADC_DIV_ENABLED].nano) { 975 val2 == scale_avail[MXS_LRADC_DIV_ENABLED].nano) {
970 /* divider by two enabled */ 976 /* divider by two enabled */
971 writel(1 << LRADC_CTRL2_DIVIDE_BY_TWO_OFFSET,
972 lradc->base + LRADC_CTRL2 + STMP_OFFSET_REG_SET);
973 set_bit(chan->channel, &lradc->is_divided); 977 set_bit(chan->channel, &lradc->is_divided);
974 ret = 0; 978 ret = 0;
975 } 979 }
diff --git a/drivers/staging/iio/light/tsl2x7x_core.c b/drivers/staging/iio/light/tsl2x7x_core.c
index 9e0f2a9c73ae..ab338e3ddd05 100644
--- a/drivers/staging/iio/light/tsl2x7x_core.c
+++ b/drivers/staging/iio/light/tsl2x7x_core.c
@@ -667,9 +667,13 @@ static int tsl2x7x_chip_on(struct iio_dev *indio_dev)
667 chip->tsl2x7x_config[TSL2X7X_PRX_COUNT] = 667 chip->tsl2x7x_config[TSL2X7X_PRX_COUNT] =
668 chip->tsl2x7x_settings.prox_pulse_count; 668 chip->tsl2x7x_settings.prox_pulse_count;
669 chip->tsl2x7x_config[TSL2X7X_PRX_MINTHRESHLO] = 669 chip->tsl2x7x_config[TSL2X7X_PRX_MINTHRESHLO] =
670 chip->tsl2x7x_settings.prox_thres_low; 670 (chip->tsl2x7x_settings.prox_thres_low) & 0xFF;
671 chip->tsl2x7x_config[TSL2X7X_PRX_MINTHRESHHI] =
672 (chip->tsl2x7x_settings.prox_thres_low >> 8) & 0xFF;
671 chip->tsl2x7x_config[TSL2X7X_PRX_MAXTHRESHLO] = 673 chip->tsl2x7x_config[TSL2X7X_PRX_MAXTHRESHLO] =
672 chip->tsl2x7x_settings.prox_thres_high; 674 (chip->tsl2x7x_settings.prox_thres_high) & 0xFF;
675 chip->tsl2x7x_config[TSL2X7X_PRX_MAXTHRESHHI] =
676 (chip->tsl2x7x_settings.prox_thres_high >> 8) & 0xFF;
673 677
674 /* and make sure we're not already on */ 678 /* and make sure we're not already on */
675 if (chip->tsl2x7x_chip_status == TSL2X7X_CHIP_WORKING) { 679 if (chip->tsl2x7x_chip_status == TSL2X7X_CHIP_WORKING) {
diff --git a/drivers/staging/imx-drm/parallel-display.c b/drivers/staging/imx-drm/parallel-display.c
index b5678328fc40..4ca61afdf622 100644
--- a/drivers/staging/imx-drm/parallel-display.c
+++ b/drivers/staging/imx-drm/parallel-display.c
@@ -173,6 +173,13 @@ static int imx_pd_register(struct drm_device *drm,
173 if (ret) 173 if (ret)
174 return ret; 174 return ret;
175 175
176 /* set the connector's dpms to OFF so that
177 * drm_helper_connector_dpms() won't return
178 * immediately since the current state is ON
179 * at this point.
180 */
181 imxpd->connector.dpms = DRM_MODE_DPMS_OFF;
182
176 drm_encoder_helper_add(&imxpd->encoder, &imx_pd_encoder_helper_funcs); 183 drm_encoder_helper_add(&imxpd->encoder, &imx_pd_encoder_helper_funcs);
177 drm_encoder_init(drm, &imxpd->encoder, &imx_pd_encoder_funcs, 184 drm_encoder_init(drm, &imxpd->encoder, &imx_pd_encoder_funcs,
178 DRM_MODE_ENCODER_NONE); 185 DRM_MODE_ENCODER_NONE);
diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c
index ded31ea6bd39..cbf455d66f73 100644
--- a/drivers/staging/media/omap4iss/iss_video.c
+++ b/drivers/staging/media/omap4iss/iss_video.c
@@ -396,7 +396,7 @@ static void iss_video_buf_queue(struct vb2_buffer *vb)
396 } 396 }
397} 397}
398 398
399static struct vb2_ops iss_video_vb2ops = { 399static const struct vb2_ops iss_video_vb2ops = {
400 .queue_setup = iss_video_queue_setup, 400 .queue_setup = iss_video_queue_setup,
401 .buf_prepare = iss_video_buf_prepare, 401 .buf_prepare = iss_video_buf_prepare,
402 .buf_queue = iss_video_buf_queue, 402 .buf_queue = iss_video_buf_queue,
diff --git a/drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c b/drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c
index 0acacab95a48..46f5abcbaeeb 100644
--- a/drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c
+++ b/drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c
@@ -298,7 +298,7 @@ int rtl8723a_FirmwareDownload(struct rtw_adapter *padapter)
298 RT_TRACE(_module_hal_init_c_, _drv_info_, ("+%s\n", __func__)); 298 RT_TRACE(_module_hal_init_c_, _drv_info_, ("+%s\n", __func__));
299 299
300 if (IS_8723A_A_CUT(pHalData->VersionID)) { 300 if (IS_8723A_A_CUT(pHalData->VersionID)) {
301 fw_name = "rtlwifi/rtl8723aufw.bin"; 301 fw_name = "rtlwifi/rtl8723aufw_A.bin";
302 RT_TRACE(_module_hal_init_c_, _drv_info_, 302 RT_TRACE(_module_hal_init_c_, _drv_info_,
303 ("rtl8723a_FirmwareDownload: R8723FwImageArray_UMC " 303 ("rtl8723a_FirmwareDownload: R8723FwImageArray_UMC "
304 "for RTL8723A A CUT\n")); 304 "for RTL8723A A CUT\n"));
diff --git a/drivers/staging/rtl8723au/os_dep/os_intfs.c b/drivers/staging/rtl8723au/os_dep/os_intfs.c
index 4e32003a4437..1fb34386a4e5 100644
--- a/drivers/staging/rtl8723au/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8723au/os_dep/os_intfs.c
@@ -29,7 +29,9 @@ MODULE_AUTHOR("Realtek Semiconductor Corp.");
29MODULE_AUTHOR("Larry Finger <Larry.Finger@lwfinger.net>"); 29MODULE_AUTHOR("Larry Finger <Larry.Finger@lwfinger.net>");
30MODULE_AUTHOR("Jes Sorensen <Jes.Sorensen@redhat.com>"); 30MODULE_AUTHOR("Jes Sorensen <Jes.Sorensen@redhat.com>");
31MODULE_VERSION(DRIVERVERSION); 31MODULE_VERSION(DRIVERVERSION);
32MODULE_FIRMWARE("rtlwifi/rtl8821aefw.bin"); 32MODULE_FIRMWARE("rtlwifi/rtl8723aufw_A.bin");
33MODULE_FIRMWARE("rtlwifi/rtl8723aufw_B.bin");
34MODULE_FIRMWARE("rtlwifi/rtl8723aufw_B_NoBT.bin");
33 35
34/* module param defaults */ 36/* module param defaults */
35static int rtw_chip_version = 0x00; 37static int rtw_chip_version = 0x00;
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index f95569dedc88..f44f1ba762c3 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -1214,15 +1214,16 @@ static void n_tty_receive_parity_error(struct tty_struct *tty, unsigned char c)
1214{ 1214{
1215 struct n_tty_data *ldata = tty->disc_data; 1215 struct n_tty_data *ldata = tty->disc_data;
1216 1216
1217 if (I_IGNPAR(tty)) 1217 if (I_INPCK(tty)) {
1218 return; 1218 if (I_IGNPAR(tty))
1219 if (I_PARMRK(tty)) { 1219 return;
1220 put_tty_queue('\377', ldata); 1220 if (I_PARMRK(tty)) {
1221 put_tty_queue('\0', ldata); 1221 put_tty_queue('\377', ldata);
1222 put_tty_queue(c, ldata); 1222 put_tty_queue('\0', ldata);
1223 } else if (I_INPCK(tty)) 1223 put_tty_queue(c, ldata);
1224 put_tty_queue('\0', ldata); 1224 } else
1225 else 1225 put_tty_queue('\0', ldata);
1226 } else
1226 put_tty_queue(c, ldata); 1227 put_tty_queue(c, ldata);
1227 if (waitqueue_active(&tty->read_wait)) 1228 if (waitqueue_active(&tty->read_wait))
1228 wake_up_interruptible(&tty->read_wait); 1229 wake_up_interruptible(&tty->read_wait);
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index 27f7ad6b74c1..7a91c6d1eb7d 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -2357,7 +2357,7 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
2357 port->read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR; 2357 port->read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
2358 if (termios->c_iflag & INPCK) 2358 if (termios->c_iflag & INPCK)
2359 port->read_status_mask |= UART_LSR_FE | UART_LSR_PE; 2359 port->read_status_mask |= UART_LSR_FE | UART_LSR_PE;
2360 if (termios->c_iflag & (BRKINT | PARMRK)) 2360 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
2361 port->read_status_mask |= UART_LSR_BI; 2361 port->read_status_mask |= UART_LSR_BI;
2362 2362
2363 /* 2363 /*
diff --git a/drivers/tty/serial/8250/8250_early.c b/drivers/tty/serial/8250/8250_early.c
index cfef801a49d4..4858b8a99d3b 100644
--- a/drivers/tty/serial/8250/8250_early.c
+++ b/drivers/tty/serial/8250/8250_early.c
@@ -144,8 +144,11 @@ static int __init early_serial8250_setup(struct earlycon_device *device,
144 if (!(device->port.membase || device->port.iobase)) 144 if (!(device->port.membase || device->port.iobase))
145 return 0; 145 return 0;
146 146
147 if (!device->baud) 147 if (!device->baud) {
148 device->baud = probe_baud(&device->port); 148 device->baud = probe_baud(&device->port);
149 snprintf(device->options, sizeof(device->options), "%u",
150 device->baud);
151 }
149 152
150 init_port(device); 153 init_port(device);
151 154
diff --git a/drivers/tty/serial/altera_uart.c b/drivers/tty/serial/altera_uart.c
index 501667e3e3f5..323376668b72 100644
--- a/drivers/tty/serial/altera_uart.c
+++ b/drivers/tty/serial/altera_uart.c
@@ -185,6 +185,12 @@ static void altera_uart_set_termios(struct uart_port *port,
185 uart_update_timeout(port, termios->c_cflag, baud); 185 uart_update_timeout(port, termios->c_cflag, baud);
186 altera_uart_writel(port, baudclk, ALTERA_UART_DIVISOR_REG); 186 altera_uart_writel(port, baudclk, ALTERA_UART_DIVISOR_REG);
187 spin_unlock_irqrestore(&port->lock, flags); 187 spin_unlock_irqrestore(&port->lock, flags);
188
189 /*
190 * FIXME: port->read_status_mask and port->ignore_status_mask
191 * need to be initialized based on termios settings for
192 * INPCK, IGNBRK, IGNPAR, PARMRK, BRKINT
193 */
188} 194}
189 195
190static void altera_uart_rx_chars(struct altera_uart *pp) 196static void altera_uart_rx_chars(struct altera_uart *pp)
diff --git a/drivers/tty/serial/amba-pl010.c b/drivers/tty/serial/amba-pl010.c
index 01c9e72433e1..971af1e22d0f 100644
--- a/drivers/tty/serial/amba-pl010.c
+++ b/drivers/tty/serial/amba-pl010.c
@@ -420,7 +420,7 @@ pl010_set_termios(struct uart_port *port, struct ktermios *termios,
420 uap->port.read_status_mask = UART01x_RSR_OE; 420 uap->port.read_status_mask = UART01x_RSR_OE;
421 if (termios->c_iflag & INPCK) 421 if (termios->c_iflag & INPCK)
422 uap->port.read_status_mask |= UART01x_RSR_FE | UART01x_RSR_PE; 422 uap->port.read_status_mask |= UART01x_RSR_FE | UART01x_RSR_PE;
423 if (termios->c_iflag & (BRKINT | PARMRK)) 423 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
424 uap->port.read_status_mask |= UART01x_RSR_BE; 424 uap->port.read_status_mask |= UART01x_RSR_BE;
425 425
426 /* 426 /*
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 908a6e3142a2..0e26dcbd5ea4 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -1744,7 +1744,7 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios,
1744 port->read_status_mask = UART011_DR_OE | 255; 1744 port->read_status_mask = UART011_DR_OE | 255;
1745 if (termios->c_iflag & INPCK) 1745 if (termios->c_iflag & INPCK)
1746 port->read_status_mask |= UART011_DR_FE | UART011_DR_PE; 1746 port->read_status_mask |= UART011_DR_FE | UART011_DR_PE;
1747 if (termios->c_iflag & (BRKINT | PARMRK)) 1747 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
1748 port->read_status_mask |= UART011_DR_BE; 1748 port->read_status_mask |= UART011_DR_BE;
1749 1749
1750 /* 1750 /*
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 3fceae099c44..c4f750314100 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -1932,7 +1932,7 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
1932 port->read_status_mask = ATMEL_US_OVRE; 1932 port->read_status_mask = ATMEL_US_OVRE;
1933 if (termios->c_iflag & INPCK) 1933 if (termios->c_iflag & INPCK)
1934 port->read_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE); 1934 port->read_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE);
1935 if (termios->c_iflag & (BRKINT | PARMRK)) 1935 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
1936 port->read_status_mask |= ATMEL_US_RXBRK; 1936 port->read_status_mask |= ATMEL_US_RXBRK;
1937 1937
1938 if (atmel_use_pdc_rx(port)) 1938 if (atmel_use_pdc_rx(port))
diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c
index a47421e4627c..231519022b73 100644
--- a/drivers/tty/serial/bcm63xx_uart.c
+++ b/drivers/tty/serial/bcm63xx_uart.c
@@ -567,7 +567,7 @@ static void bcm_uart_set_termios(struct uart_port *port,
567 port->read_status_mask |= UART_FIFO_FRAMEERR_MASK; 567 port->read_status_mask |= UART_FIFO_FRAMEERR_MASK;
568 port->read_status_mask |= UART_FIFO_PARERR_MASK; 568 port->read_status_mask |= UART_FIFO_PARERR_MASK;
569 } 569 }
570 if (new->c_iflag & (BRKINT)) 570 if (new->c_iflag & (IGNBRK | BRKINT))
571 port->read_status_mask |= UART_FIFO_BRKDET_MASK; 571 port->read_status_mask |= UART_FIFO_BRKDET_MASK;
572 572
573 port->ignore_status_mask = 0; 573 port->ignore_status_mask = 0;
diff --git a/drivers/tty/serial/bfin_uart.c b/drivers/tty/serial/bfin_uart.c
index 869ceba2ec57..ac86a20992e9 100644
--- a/drivers/tty/serial/bfin_uart.c
+++ b/drivers/tty/serial/bfin_uart.c
@@ -833,7 +833,7 @@ bfin_serial_set_termios(struct uart_port *port, struct ktermios *termios,
833 port->read_status_mask = OE; 833 port->read_status_mask = OE;
834 if (termios->c_iflag & INPCK) 834 if (termios->c_iflag & INPCK)
835 port->read_status_mask |= (FE | PE); 835 port->read_status_mask |= (FE | PE);
836 if (termios->c_iflag & (BRKINT | PARMRK)) 836 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
837 port->read_status_mask |= BI; 837 port->read_status_mask |= BI;
838 838
839 /* 839 /*
diff --git a/drivers/tty/serial/dz.c b/drivers/tty/serial/dz.c
index 2f2b2e538a54..cdbbc788230a 100644
--- a/drivers/tty/serial/dz.c
+++ b/drivers/tty/serial/dz.c
@@ -625,7 +625,7 @@ static void dz_set_termios(struct uart_port *uport, struct ktermios *termios,
625 dport->port.read_status_mask = DZ_OERR; 625 dport->port.read_status_mask = DZ_OERR;
626 if (termios->c_iflag & INPCK) 626 if (termios->c_iflag & INPCK)
627 dport->port.read_status_mask |= DZ_FERR | DZ_PERR; 627 dport->port.read_status_mask |= DZ_FERR | DZ_PERR;
628 if (termios->c_iflag & (BRKINT | PARMRK)) 628 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
629 dport->port.read_status_mask |= DZ_BREAK; 629 dport->port.read_status_mask |= DZ_BREAK;
630 630
631 /* characters to ignore */ 631 /* characters to ignore */
diff --git a/drivers/tty/serial/earlycon.c b/drivers/tty/serial/earlycon.c
index 5131b5ee6164..a514ee6f5406 100644
--- a/drivers/tty/serial/earlycon.c
+++ b/drivers/tty/serial/earlycon.c
@@ -25,7 +25,7 @@
25#include <asm/serial.h> 25#include <asm/serial.h>
26 26
27static struct console early_con = { 27static struct console early_con = {
28 .name = "earlycon", 28 .name = "uart", /* 8250 console switch requires this name */
29 .flags = CON_PRINTBUFFER | CON_BOOT, 29 .flags = CON_PRINTBUFFER | CON_BOOT,
30 .index = -1, 30 .index = -1,
31}; 31};
diff --git a/drivers/tty/serial/efm32-uart.c b/drivers/tty/serial/efm32-uart.c
index b373f6416e8c..3b0ee9afd76f 100644
--- a/drivers/tty/serial/efm32-uart.c
+++ b/drivers/tty/serial/efm32-uart.c
@@ -407,7 +407,7 @@ static void efm32_uart_set_termios(struct uart_port *port,
407 if (new->c_iflag & INPCK) 407 if (new->c_iflag & INPCK)
408 port->read_status_mask |= 408 port->read_status_mask |=
409 UARTn_RXDATAX_FERR | UARTn_RXDATAX_PERR; 409 UARTn_RXDATAX_FERR | UARTn_RXDATAX_PERR;
410 if (new->c_iflag & (BRKINT | PARMRK)) 410 if (new->c_iflag & (IGNBRK | BRKINT | PARMRK))
411 port->read_status_mask |= SW_UARTn_RXDATAX_BERR; 411 port->read_status_mask |= SW_UARTn_RXDATAX_BERR;
412 412
413 port->ignore_status_mask = 0; 413 port->ignore_status_mask = 0;
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index c5eb897de9de..49385c86cfba 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -902,7 +902,7 @@ lpuart_set_termios(struct uart_port *port, struct ktermios *termios,
902 sport->port.read_status_mask = 0; 902 sport->port.read_status_mask = 0;
903 if (termios->c_iflag & INPCK) 903 if (termios->c_iflag & INPCK)
904 sport->port.read_status_mask |= (UARTSR1_FE | UARTSR1_PE); 904 sport->port.read_status_mask |= (UARTSR1_FE | UARTSR1_PE);
905 if (termios->c_iflag & (BRKINT | PARMRK)) 905 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
906 sport->port.read_status_mask |= UARTSR1_FE; 906 sport->port.read_status_mask |= UARTSR1_FE;
907 907
908 /* characters to ignore */ 908 /* characters to ignore */
diff --git a/drivers/tty/serial/ip22zilog.c b/drivers/tty/serial/ip22zilog.c
index 1d9420548e16..1efd4c36ba0c 100644
--- a/drivers/tty/serial/ip22zilog.c
+++ b/drivers/tty/serial/ip22zilog.c
@@ -850,7 +850,7 @@ ip22zilog_convert_to_zs(struct uart_ip22zilog_port *up, unsigned int cflag,
850 up->port.read_status_mask = Rx_OVR; 850 up->port.read_status_mask = Rx_OVR;
851 if (iflag & INPCK) 851 if (iflag & INPCK)
852 up->port.read_status_mask |= CRC_ERR | PAR_ERR; 852 up->port.read_status_mask |= CRC_ERR | PAR_ERR;
853 if (iflag & (BRKINT | PARMRK)) 853 if (iflag & (IGNBRK | BRKINT | PARMRK))
854 up->port.read_status_mask |= BRK_ABRT; 854 up->port.read_status_mask |= BRK_ABRT;
855 855
856 up->port.ignore_status_mask = 0; 856 up->port.ignore_status_mask = 0;
diff --git a/drivers/tty/serial/m32r_sio.c b/drivers/tty/serial/m32r_sio.c
index 9cd9b4eba9fc..68f2c53e0b54 100644
--- a/drivers/tty/serial/m32r_sio.c
+++ b/drivers/tty/serial/m32r_sio.c
@@ -737,7 +737,7 @@ static void m32r_sio_set_termios(struct uart_port *port,
737 up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR; 737 up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
738 if (termios->c_iflag & INPCK) 738 if (termios->c_iflag & INPCK)
739 up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE; 739 up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE;
740 if (termios->c_iflag & (BRKINT | PARMRK)) 740 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
741 up->port.read_status_mask |= UART_LSR_BI; 741 up->port.read_status_mask |= UART_LSR_BI;
742 742
743 /* 743 /*
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index 2a99d0c61b9e..ba285cd45b59 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -835,7 +835,7 @@ static void max310x_set_termios(struct uart_port *port,
835 if (termios->c_iflag & INPCK) 835 if (termios->c_iflag & INPCK)
836 port->read_status_mask |= MAX310X_LSR_RXPAR_BIT | 836 port->read_status_mask |= MAX310X_LSR_RXPAR_BIT |
837 MAX310X_LSR_FRERR_BIT; 837 MAX310X_LSR_FRERR_BIT;
838 if (termios->c_iflag & (BRKINT | PARMRK)) 838 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
839 port->read_status_mask |= MAX310X_LSR_RXBRK_BIT; 839 port->read_status_mask |= MAX310X_LSR_RXBRK_BIT;
840 840
841 /* Set status ignore mask */ 841 /* Set status ignore mask */
diff --git a/drivers/tty/serial/mcf.c b/drivers/tty/serial/mcf.c
index 0edfaf8cd269..a6f085717f94 100644
--- a/drivers/tty/serial/mcf.c
+++ b/drivers/tty/serial/mcf.c
@@ -248,6 +248,12 @@ static void mcf_set_termios(struct uart_port *port, struct ktermios *termios,
248 mr1 |= MCFUART_MR1_PARITYNONE; 248 mr1 |= MCFUART_MR1_PARITYNONE;
249 } 249 }
250 250
251 /*
252 * FIXME: port->read_status_mask and port->ignore_status_mask
253 * need to be initialized based on termios settings for
254 * INPCK, IGNBRK, IGNPAR, PARMRK, BRKINT
255 */
256
251 if (termios->c_cflag & CSTOPB) 257 if (termios->c_cflag & CSTOPB)
252 mr2 |= MCFUART_MR2_STOP2; 258 mr2 |= MCFUART_MR2_STOP2;
253 else 259 else
diff --git a/drivers/tty/serial/mfd.c b/drivers/tty/serial/mfd.c
index 52c930fac210..445799dc9846 100644
--- a/drivers/tty/serial/mfd.c
+++ b/drivers/tty/serial/mfd.c
@@ -977,7 +977,7 @@ serial_hsu_set_termios(struct uart_port *port, struct ktermios *termios,
977 up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR; 977 up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
978 if (termios->c_iflag & INPCK) 978 if (termios->c_iflag & INPCK)
979 up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE; 979 up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE;
980 if (termios->c_iflag & (BRKINT | PARMRK)) 980 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
981 up->port.read_status_mask |= UART_LSR_BI; 981 up->port.read_status_mask |= UART_LSR_BI;
982 982
983 /* Characters to ignore */ 983 /* Characters to ignore */
diff --git a/drivers/tty/serial/mpsc.c b/drivers/tty/serial/mpsc.c
index e30a3ca3cea3..759c6a6fa74a 100644
--- a/drivers/tty/serial/mpsc.c
+++ b/drivers/tty/serial/mpsc.c
@@ -1458,7 +1458,7 @@ static void mpsc_set_termios(struct uart_port *port, struct ktermios *termios,
1458 pi->port.read_status_mask |= SDMA_DESC_CMDSTAT_PE 1458 pi->port.read_status_mask |= SDMA_DESC_CMDSTAT_PE
1459 | SDMA_DESC_CMDSTAT_FR; 1459 | SDMA_DESC_CMDSTAT_FR;
1460 1460
1461 if (termios->c_iflag & (BRKINT | PARMRK)) 1461 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
1462 pi->port.read_status_mask |= SDMA_DESC_CMDSTAT_BR; 1462 pi->port.read_status_mask |= SDMA_DESC_CMDSTAT_BR;
1463 1463
1464 /* Characters/events to ignore */ 1464 /* Characters/events to ignore */
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index 778e376f197e..c41aca4dfc43 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -582,7 +582,7 @@ static void msm_set_termios(struct uart_port *port, struct ktermios *termios,
582 port->read_status_mask = 0; 582 port->read_status_mask = 0;
583 if (termios->c_iflag & INPCK) 583 if (termios->c_iflag & INPCK)
584 port->read_status_mask |= UART_SR_PAR_FRAME_ERR; 584 port->read_status_mask |= UART_SR_PAR_FRAME_ERR;
585 if (termios->c_iflag & (BRKINT | PARMRK)) 585 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
586 port->read_status_mask |= UART_SR_RX_BREAK; 586 port->read_status_mask |= UART_SR_RX_BREAK;
587 587
588 uart_update_timeout(port, termios->c_cflag, baud); 588 uart_update_timeout(port, termios->c_cflag, baud);
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index 4b5b3c2fe328..86de4477d98a 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -604,7 +604,7 @@ static void mxs_auart_settermios(struct uart_port *u,
604 604
605 if (termios->c_iflag & INPCK) 605 if (termios->c_iflag & INPCK)
606 u->read_status_mask |= AUART_STAT_PERR; 606 u->read_status_mask |= AUART_STAT_PERR;
607 if (termios->c_iflag & (BRKINT | PARMRK)) 607 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
608 u->read_status_mask |= AUART_STAT_BERR; 608 u->read_status_mask |= AUART_STAT_BERR;
609 609
610 /* 610 /*
diff --git a/drivers/tty/serial/netx-serial.c b/drivers/tty/serial/netx-serial.c
index 0a4dd70d29eb..7a6745601d4e 100644
--- a/drivers/tty/serial/netx-serial.c
+++ b/drivers/tty/serial/netx-serial.c
@@ -419,7 +419,7 @@ netx_set_termios(struct uart_port *port, struct ktermios *termios,
419 } 419 }
420 420
421 port->read_status_mask = 0; 421 port->read_status_mask = 0;
422 if (termios->c_iflag & (BRKINT | PARMRK)) 422 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
423 port->read_status_mask |= SR_BE; 423 port->read_status_mask |= SR_BE;
424 if (termios->c_iflag & INPCK) 424 if (termios->c_iflag & INPCK)
425 port->read_status_mask |= SR_PE | SR_FE; 425 port->read_status_mask |= SR_PE | SR_FE;
diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
index e9d420ff3931..8193635103ee 100644
--- a/drivers/tty/serial/pmac_zilog.c
+++ b/drivers/tty/serial/pmac_zilog.c
@@ -1092,7 +1092,7 @@ static void pmz_convert_to_zs(struct uart_pmac_port *uap, unsigned int cflag,
1092 uap->port.read_status_mask = Rx_OVR; 1092 uap->port.read_status_mask = Rx_OVR;
1093 if (iflag & INPCK) 1093 if (iflag & INPCK)
1094 uap->port.read_status_mask |= CRC_ERR | PAR_ERR; 1094 uap->port.read_status_mask |= CRC_ERR | PAR_ERR;
1095 if (iflag & (BRKINT | PARMRK)) 1095 if (iflag & (IGNBRK | BRKINT | PARMRK))
1096 uap->port.read_status_mask |= BRK_ABRT; 1096 uap->port.read_status_mask |= BRK_ABRT;
1097 1097
1098 uap->port.ignore_status_mask = 0; 1098 uap->port.ignore_status_mask = 0;
diff --git a/drivers/tty/serial/pnx8xxx_uart.c b/drivers/tty/serial/pnx8xxx_uart.c
index de6c05c63683..2ba24a45c97f 100644
--- a/drivers/tty/serial/pnx8xxx_uart.c
+++ b/drivers/tty/serial/pnx8xxx_uart.c
@@ -477,7 +477,7 @@ pnx8xxx_set_termios(struct uart_port *port, struct ktermios *termios,
477 sport->port.read_status_mask |= 477 sport->port.read_status_mask |=
478 FIFO_TO_SM(PNX8XXX_UART_FIFO_RXFE) | 478 FIFO_TO_SM(PNX8XXX_UART_FIFO_RXFE) |
479 FIFO_TO_SM(PNX8XXX_UART_FIFO_RXPAR); 479 FIFO_TO_SM(PNX8XXX_UART_FIFO_RXPAR);
480 if (termios->c_iflag & (BRKINT | PARMRK)) 480 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
481 sport->port.read_status_mask |= 481 sport->port.read_status_mask |=
482 ISTAT_TO_SM(PNX8XXX_UART_INT_BREAK); 482 ISTAT_TO_SM(PNX8XXX_UART_INT_BREAK);
483 483
diff --git a/drivers/tty/serial/pxa.c b/drivers/tty/serial/pxa.c
index 9e7ee39f8b2a..c638c53cd2b6 100644
--- a/drivers/tty/serial/pxa.c
+++ b/drivers/tty/serial/pxa.c
@@ -492,7 +492,7 @@ serial_pxa_set_termios(struct uart_port *port, struct ktermios *termios,
492 up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR; 492 up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
493 if (termios->c_iflag & INPCK) 493 if (termios->c_iflag & INPCK)
494 up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE; 494 up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE;
495 if (termios->c_iflag & (BRKINT | PARMRK)) 495 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
496 up->port.read_status_mask |= UART_LSR_BI; 496 up->port.read_status_mask |= UART_LSR_BI;
497 497
498 /* 498 /*
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index 329337711bb0..c1d3ebdf3b97 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -66,7 +66,7 @@ static void dbg(const char *fmt, ...)
66 char buff[256]; 66 char buff[256];
67 67
68 va_start(va, fmt); 68 va_start(va, fmt);
69 vscnprintf(buff, sizeof(buf), fmt, va); 69 vscnprintf(buff, sizeof(buff), fmt, va);
70 va_end(va); 70 va_end(va);
71 71
72 printascii(buff); 72 printascii(buff);
diff --git a/drivers/tty/serial/sb1250-duart.c b/drivers/tty/serial/sb1250-duart.c
index a7cdec2962dd..771f361c47ea 100644
--- a/drivers/tty/serial/sb1250-duart.c
+++ b/drivers/tty/serial/sb1250-duart.c
@@ -596,7 +596,7 @@ static void sbd_set_termios(struct uart_port *uport, struct ktermios *termios,
596 if (termios->c_iflag & INPCK) 596 if (termios->c_iflag & INPCK)
597 uport->read_status_mask |= M_DUART_FRM_ERR | 597 uport->read_status_mask |= M_DUART_FRM_ERR |
598 M_DUART_PARITY_ERR; 598 M_DUART_PARITY_ERR;
599 if (termios->c_iflag & (BRKINT | PARMRK)) 599 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
600 uport->read_status_mask |= M_DUART_RCVD_BRK; 600 uport->read_status_mask |= M_DUART_RCVD_BRK;
601 601
602 uport->ignore_status_mask = 0; 602 uport->ignore_status_mask = 0;
diff --git a/drivers/tty/serial/sccnxp.c b/drivers/tty/serial/sccnxp.c
index 5443b46345ed..e84b6a3bdd18 100644
--- a/drivers/tty/serial/sccnxp.c
+++ b/drivers/tty/serial/sccnxp.c
@@ -665,7 +665,7 @@ static void sccnxp_set_termios(struct uart_port *port,
665 port->read_status_mask = SR_OVR; 665 port->read_status_mask = SR_OVR;
666 if (termios->c_iflag & INPCK) 666 if (termios->c_iflag & INPCK)
667 port->read_status_mask |= SR_PE | SR_FE; 667 port->read_status_mask |= SR_PE | SR_FE;
668 if (termios->c_iflag & (BRKINT | PARMRK)) 668 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
669 port->read_status_mask |= SR_BRK; 669 port->read_status_mask |= SR_BRK;
670 670
671 /* Set status ignore mask */ 671 /* Set status ignore mask */
diff --git a/drivers/tty/serial/serial_ks8695.c b/drivers/tty/serial/serial_ks8695.c
index e1caa99e3d3b..5c79bdab985d 100644
--- a/drivers/tty/serial/serial_ks8695.c
+++ b/drivers/tty/serial/serial_ks8695.c
@@ -437,7 +437,7 @@ static void ks8695uart_set_termios(struct uart_port *port, struct ktermios *term
437 port->read_status_mask = URLS_URROE; 437 port->read_status_mask = URLS_URROE;
438 if (termios->c_iflag & INPCK) 438 if (termios->c_iflag & INPCK)
439 port->read_status_mask |= (URLS_URFE | URLS_URPE); 439 port->read_status_mask |= (URLS_URFE | URLS_URPE);
440 if (termios->c_iflag & (BRKINT | PARMRK)) 440 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
441 port->read_status_mask |= URLS_URBI; 441 port->read_status_mask |= URLS_URBI;
442 442
443 /* 443 /*
diff --git a/drivers/tty/serial/serial_txx9.c b/drivers/tty/serial/serial_txx9.c
index 60f49b9d7e39..ea8546092c7e 100644
--- a/drivers/tty/serial/serial_txx9.c
+++ b/drivers/tty/serial/serial_txx9.c
@@ -697,7 +697,7 @@ serial_txx9_set_termios(struct uart_port *port, struct ktermios *termios,
697 TXX9_SIDISR_TDIS | TXX9_SIDISR_RDIS; 697 TXX9_SIDISR_TDIS | TXX9_SIDISR_RDIS;
698 if (termios->c_iflag & INPCK) 698 if (termios->c_iflag & INPCK)
699 up->port.read_status_mask |= TXX9_SIDISR_UFER | TXX9_SIDISR_UPER; 699 up->port.read_status_mask |= TXX9_SIDISR_UFER | TXX9_SIDISR_UPER;
700 if (termios->c_iflag & (BRKINT | PARMRK)) 700 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
701 up->port.read_status_mask |= TXX9_SIDISR_UBRK; 701 up->port.read_status_mask |= TXX9_SIDISR_UBRK;
702 702
703 /* 703 /*
diff --git a/drivers/tty/serial/sirfsoc_uart.c b/drivers/tty/serial/sirfsoc_uart.c
index 1f2be48c92ce..9b4d71cff00d 100644
--- a/drivers/tty/serial/sirfsoc_uart.c
+++ b/drivers/tty/serial/sirfsoc_uart.c
@@ -896,7 +896,7 @@ static void sirfsoc_uart_set_termios(struct uart_port *port,
896 if (termios->c_iflag & INPCK) 896 if (termios->c_iflag & INPCK)
897 port->read_status_mask |= uint_en->sirfsoc_frm_err_en; 897 port->read_status_mask |= uint_en->sirfsoc_frm_err_en;
898 } 898 }
899 if (termios->c_iflag & (BRKINT | PARMRK)) 899 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
900 port->read_status_mask |= uint_en->sirfsoc_rxd_brk_en; 900 port->read_status_mask |= uint_en->sirfsoc_rxd_brk_en;
901 if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) { 901 if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
902 if (termios->c_iflag & IGNPAR) 902 if (termios->c_iflag & IGNPAR)
diff --git a/drivers/tty/serial/st-asc.c b/drivers/tty/serial/st-asc.c
index c7f61ac27132..f48b1cc07eea 100644
--- a/drivers/tty/serial/st-asc.c
+++ b/drivers/tty/serial/st-asc.c
@@ -547,7 +547,7 @@ static void asc_set_termios(struct uart_port *port, struct ktermios *termios,
547 ascport->port.read_status_mask = ASC_RXBUF_DUMMY_OE; 547 ascport->port.read_status_mask = ASC_RXBUF_DUMMY_OE;
548 if (termios->c_iflag & INPCK) 548 if (termios->c_iflag & INPCK)
549 ascport->port.read_status_mask |= ASC_RXBUF_FE | ASC_RXBUF_PE; 549 ascport->port.read_status_mask |= ASC_RXBUF_FE | ASC_RXBUF_PE;
550 if (termios->c_iflag & (BRKINT | PARMRK)) 550 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
551 ascport->port.read_status_mask |= ASC_RXBUF_DUMMY_BE; 551 ascport->port.read_status_mask |= ASC_RXBUF_DUMMY_BE;
552 552
553 /* 553 /*
diff --git a/drivers/tty/serial/sunsab.c b/drivers/tty/serial/sunsab.c
index 5faa8e905e98..80a58eca785b 100644
--- a/drivers/tty/serial/sunsab.c
+++ b/drivers/tty/serial/sunsab.c
@@ -719,7 +719,7 @@ static void sunsab_convert_to_sab(struct uart_sunsab_port *up, unsigned int cfla
719 if (iflag & INPCK) 719 if (iflag & INPCK)
720 up->port.read_status_mask |= (SAB82532_ISR0_PERR | 720 up->port.read_status_mask |= (SAB82532_ISR0_PERR |
721 SAB82532_ISR0_FERR); 721 SAB82532_ISR0_FERR);
722 if (iflag & (BRKINT | PARMRK)) 722 if (iflag & (IGNBRK | BRKINT | PARMRK))
723 up->port.read_status_mask |= (SAB82532_ISR1_BRK << 8); 723 up->port.read_status_mask |= (SAB82532_ISR1_BRK << 8);
724 724
725 /* 725 /*
diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c
index 9a0f24f83720..5326ae195e5f 100644
--- a/drivers/tty/serial/sunsu.c
+++ b/drivers/tty/serial/sunsu.c
@@ -834,7 +834,7 @@ sunsu_change_speed(struct uart_port *port, unsigned int cflag,
834 up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR; 834 up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
835 if (iflag & INPCK) 835 if (iflag & INPCK)
836 up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE; 836 up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE;
837 if (iflag & (BRKINT | PARMRK)) 837 if (iflag & (IGNBRK | BRKINT | PARMRK))
838 up->port.read_status_mask |= UART_LSR_BI; 838 up->port.read_status_mask |= UART_LSR_BI;
839 839
840 /* 840 /*
diff --git a/drivers/tty/serial/sunzilog.c b/drivers/tty/serial/sunzilog.c
index a2c40ed287d2..a85db8b87156 100644
--- a/drivers/tty/serial/sunzilog.c
+++ b/drivers/tty/serial/sunzilog.c
@@ -915,7 +915,7 @@ sunzilog_convert_to_zs(struct uart_sunzilog_port *up, unsigned int cflag,
915 up->port.read_status_mask = Rx_OVR; 915 up->port.read_status_mask = Rx_OVR;
916 if (iflag & INPCK) 916 if (iflag & INPCK)
917 up->port.read_status_mask |= CRC_ERR | PAR_ERR; 917 up->port.read_status_mask |= CRC_ERR | PAR_ERR;
918 if (iflag & (BRKINT | PARMRK)) 918 if (iflag & (IGNBRK | BRKINT | PARMRK))
919 up->port.read_status_mask |= BRK_ABRT; 919 up->port.read_status_mask |= BRK_ABRT;
920 920
921 up->port.ignore_status_mask = 0; 921 up->port.ignore_status_mask = 0;
diff --git a/drivers/tty/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c
index d569ca58bab6..1c52074c38df 100644
--- a/drivers/tty/serial/ucc_uart.c
+++ b/drivers/tty/serial/ucc_uart.c
@@ -936,7 +936,7 @@ static void qe_uart_set_termios(struct uart_port *port,
936 port->read_status_mask = BD_SC_EMPTY | BD_SC_OV; 936 port->read_status_mask = BD_SC_EMPTY | BD_SC_OV;
937 if (termios->c_iflag & INPCK) 937 if (termios->c_iflag & INPCK)
938 port->read_status_mask |= BD_SC_FR | BD_SC_PR; 938 port->read_status_mask |= BD_SC_FR | BD_SC_PR;
939 if (termios->c_iflag & (BRKINT | PARMRK)) 939 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
940 port->read_status_mask |= BD_SC_BR; 940 port->read_status_mask |= BD_SC_BR;
941 941
942 /* 942 /*
diff --git a/drivers/tty/serial/vr41xx_siu.c b/drivers/tty/serial/vr41xx_siu.c
index a63c14bc9a24..db0c8a4ab03e 100644
--- a/drivers/tty/serial/vr41xx_siu.c
+++ b/drivers/tty/serial/vr41xx_siu.c
@@ -559,7 +559,7 @@ static void siu_set_termios(struct uart_port *port, struct ktermios *new,
559 port->read_status_mask = UART_LSR_THRE | UART_LSR_OE | UART_LSR_DR; 559 port->read_status_mask = UART_LSR_THRE | UART_LSR_OE | UART_LSR_DR;
560 if (c_iflag & INPCK) 560 if (c_iflag & INPCK)
561 port->read_status_mask |= UART_LSR_FE | UART_LSR_PE; 561 port->read_status_mask |= UART_LSR_FE | UART_LSR_PE;
562 if (c_iflag & (BRKINT | PARMRK)) 562 if (c_iflag & (IGNBRK | BRKINT | PARMRK))
563 port->read_status_mask |= UART_LSR_BI; 563 port->read_status_mask |= UART_LSR_BI;
564 564
565 port->ignore_status_mask = 0; 565 port->ignore_status_mask = 0;
diff --git a/drivers/tty/serial/zs.c b/drivers/tty/serial/zs.c
index 6a169877109b..2b65bb7ffb8a 100644
--- a/drivers/tty/serial/zs.c
+++ b/drivers/tty/serial/zs.c
@@ -923,7 +923,7 @@ static void zs_set_termios(struct uart_port *uport, struct ktermios *termios,
923 uport->read_status_mask = Rx_OVR; 923 uport->read_status_mask = Rx_OVR;
924 if (termios->c_iflag & INPCK) 924 if (termios->c_iflag & INPCK)
925 uport->read_status_mask |= FRM_ERR | PAR_ERR; 925 uport->read_status_mask |= FRM_ERR | PAR_ERR;
926 if (termios->c_iflag & (BRKINT | PARMRK)) 926 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
927 uport->read_status_mask |= Rx_BRK; 927 uport->read_status_mask |= Rx_BRK;
928 928
929 uport->ignore_status_mask = 0; 929 uport->ignore_status_mask = 0;
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 5e0f6ff2e2f5..b33b00b386de 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -3226,8 +3226,7 @@ int do_unbind_con_driver(const struct consw *csw, int first, int last, int deflt
3226 for (i = 0; i < MAX_NR_CON_DRIVER; i++) { 3226 for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
3227 con_back = &registered_con_driver[i]; 3227 con_back = &registered_con_driver[i];
3228 3228
3229 if (con_back->con && 3229 if (con_back->con && con_back->con != csw) {
3230 !(con_back->flag & CON_DRIVER_FLAG_MODULE)) {
3231 defcsw = con_back->con; 3230 defcsw = con_back->con;
3232 retval = 0; 3231 retval = 0;
3233 break; 3232 break;
@@ -3332,6 +3331,7 @@ static int vt_unbind(struct con_driver *con)
3332{ 3331{
3333 const struct consw *csw = NULL; 3332 const struct consw *csw = NULL;
3334 int i, more = 1, first = -1, last = -1, deflt = 0; 3333 int i, more = 1, first = -1, last = -1, deflt = 0;
3334 int ret;
3335 3335
3336 if (!con->con || !(con->flag & CON_DRIVER_FLAG_MODULE) || 3336 if (!con->con || !(con->flag & CON_DRIVER_FLAG_MODULE) ||
3337 con_is_graphics(con->con, con->first, con->last)) 3337 con_is_graphics(con->con, con->first, con->last))
@@ -3357,8 +3357,10 @@ static int vt_unbind(struct con_driver *con)
3357 3357
3358 if (first != -1) { 3358 if (first != -1) {
3359 console_lock(); 3359 console_lock();
3360 do_unbind_con_driver(csw, first, last, deflt); 3360 ret = do_unbind_con_driver(csw, first, last, deflt);
3361 console_unlock(); 3361 console_unlock();
3362 if (ret != 0)
3363 return ret;
3362 } 3364 }
3363 3365
3364 first = -1; 3366 first = -1;
@@ -3645,17 +3647,20 @@ err:
3645 */ 3647 */
3646int do_unregister_con_driver(const struct consw *csw) 3648int do_unregister_con_driver(const struct consw *csw)
3647{ 3649{
3648 int i, retval = -ENODEV; 3650 int i;
3649 3651
3650 /* cannot unregister a bound driver */ 3652 /* cannot unregister a bound driver */
3651 if (con_is_bound(csw)) 3653 if (con_is_bound(csw))
3652 goto err; 3654 return -EBUSY;
3655
3656 if (csw == conswitchp)
3657 return -EINVAL;
3653 3658
3654 for (i = 0; i < MAX_NR_CON_DRIVER; i++) { 3659 for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
3655 struct con_driver *con_driver = &registered_con_driver[i]; 3660 struct con_driver *con_driver = &registered_con_driver[i];
3656 3661
3657 if (con_driver->con == csw && 3662 if (con_driver->con == csw &&
3658 con_driver->flag & CON_DRIVER_FLAG_MODULE) { 3663 con_driver->flag & CON_DRIVER_FLAG_INIT) {
3659 vtconsole_deinit_device(con_driver); 3664 vtconsole_deinit_device(con_driver);
3660 device_destroy(vtconsole_class, 3665 device_destroy(vtconsole_class,
3661 MKDEV(0, con_driver->node)); 3666 MKDEV(0, con_driver->node));
@@ -3666,12 +3671,11 @@ int do_unregister_con_driver(const struct consw *csw)
3666 con_driver->flag = 0; 3671 con_driver->flag = 0;
3667 con_driver->first = 0; 3672 con_driver->first = 0;
3668 con_driver->last = 0; 3673 con_driver->last = 0;
3669 retval = 0; 3674 return 0;
3670 break;
3671 } 3675 }
3672 } 3676 }
3673err: 3677
3674 return retval; 3678 return -ENODEV;
3675} 3679}
3676EXPORT_SYMBOL_GPL(do_unregister_con_driver); 3680EXPORT_SYMBOL_GPL(do_unregister_con_driver);
3677 3681
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index e371f5af11f5..a673e5b6a2e0 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -655,7 +655,7 @@ static int uio_mmap_physical(struct vm_area_struct *vma)
655 655
656 if (mem->addr & ~PAGE_MASK) 656 if (mem->addr & ~PAGE_MASK)
657 return -ENODEV; 657 return -ENODEV;
658 if (vma->vm_end - vma->vm_start > PAGE_ALIGN(mem->size)) 658 if (vma->vm_end - vma->vm_start > mem->size)
659 return -EINVAL; 659 return -EINVAL;
660 660
661 vma->vm_ops = &uio_physical_vm_ops; 661 vma->vm_ops = &uio_physical_vm_ops;
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 879b66e13370..21b99b4b4082 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -1526,18 +1526,6 @@ static int hub_configure(struct usb_hub *hub,
1526 dev_dbg(hub_dev, "%umA bus power budget for each child\n", 1526 dev_dbg(hub_dev, "%umA bus power budget for each child\n",
1527 hub->mA_per_port); 1527 hub->mA_per_port);
1528 1528
1529 /* Update the HCD's internal representation of this hub before khubd
1530 * starts getting port status changes for devices under the hub.
1531 */
1532 if (hcd->driver->update_hub_device) {
1533 ret = hcd->driver->update_hub_device(hcd, hdev,
1534 &hub->tt, GFP_KERNEL);
1535 if (ret < 0) {
1536 message = "can't update HCD hub info";
1537 goto fail;
1538 }
1539 }
1540
1541 ret = hub_hub_status(hub, &hubstatus, &hubchange); 1529 ret = hub_hub_status(hub, &hubstatus, &hubchange);
1542 if (ret < 0) { 1530 if (ret < 0) {
1543 message = "can't get hub status"; 1531 message = "can't get hub status";
@@ -1589,10 +1577,28 @@ static int hub_configure(struct usb_hub *hub,
1589 } 1577 }
1590 } 1578 }
1591 hdev->maxchild = i; 1579 hdev->maxchild = i;
1580 for (i = 0; i < hdev->maxchild; i++) {
1581 struct usb_port *port_dev = hub->ports[i];
1582
1583 pm_runtime_put(&port_dev->dev);
1584 }
1585
1592 mutex_unlock(&usb_port_peer_mutex); 1586 mutex_unlock(&usb_port_peer_mutex);
1593 if (ret < 0) 1587 if (ret < 0)
1594 goto fail; 1588 goto fail;
1595 1589
1590 /* Update the HCD's internal representation of this hub before khubd
1591 * starts getting port status changes for devices under the hub.
1592 */
1593 if (hcd->driver->update_hub_device) {
1594 ret = hcd->driver->update_hub_device(hcd, hdev,
1595 &hub->tt, GFP_KERNEL);
1596 if (ret < 0) {
1597 message = "can't update HCD hub info";
1598 goto fail;
1599 }
1600 }
1601
1596 usb_hub_adjust_deviceremovable(hdev, hub->descriptor); 1602 usb_hub_adjust_deviceremovable(hdev, hub->descriptor);
1597 1603
1598 hub_activate(hub, HUB_INIT); 1604 hub_activate(hub, HUB_INIT);
@@ -3458,7 +3464,8 @@ static int hub_suspend(struct usb_interface *intf, pm_message_t msg)
3458 struct usb_device *udev = port_dev->child; 3464 struct usb_device *udev = port_dev->child;
3459 3465
3460 if (udev && udev->can_submit) { 3466 if (udev && udev->can_submit) {
3461 dev_warn(&port_dev->dev, "not suspended yet\n"); 3467 dev_warn(&port_dev->dev, "device %s not suspended yet\n",
3468 dev_name(&udev->dev));
3462 if (PMSG_IS_AUTO(msg)) 3469 if (PMSG_IS_AUTO(msg))
3463 return -EBUSY; 3470 return -EBUSY;
3464 } 3471 }
diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h
index 0a7cdc0ef0a9..326308e53961 100644
--- a/drivers/usb/core/hub.h
+++ b/drivers/usb/core/hub.h
@@ -84,6 +84,7 @@ struct usb_hub {
84 * @dev: generic device interface 84 * @dev: generic device interface
85 * @port_owner: port's owner 85 * @port_owner: port's owner
86 * @peer: related usb2 and usb3 ports (share the same connector) 86 * @peer: related usb2 and usb3 ports (share the same connector)
87 * @req: default pm qos request for hubs without port power control
87 * @connect_type: port's connect type 88 * @connect_type: port's connect type
88 * @location: opaque representation of platform connector location 89 * @location: opaque representation of platform connector location
89 * @status_lock: synchronize port_event() vs usb_port_{suspend|resume} 90 * @status_lock: synchronize port_event() vs usb_port_{suspend|resume}
@@ -95,6 +96,7 @@ struct usb_port {
95 struct device dev; 96 struct device dev;
96 struct usb_dev_state *port_owner; 97 struct usb_dev_state *port_owner;
97 struct usb_port *peer; 98 struct usb_port *peer;
99 struct dev_pm_qos_request *req;
98 enum usb_port_connect_type connect_type; 100 enum usb_port_connect_type connect_type;
99 usb_port_location_t location; 101 usb_port_location_t location;
100 struct mutex status_lock; 102 struct mutex status_lock;
diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
index 62036faf56c0..fe1b6d0967e3 100644
--- a/drivers/usb/core/port.c
+++ b/drivers/usb/core/port.c
@@ -21,6 +21,8 @@
21 21
22#include "hub.h" 22#include "hub.h"
23 23
24static int usb_port_block_power_off;
25
24static const struct attribute_group *port_dev_group[]; 26static const struct attribute_group *port_dev_group[];
25 27
26static ssize_t connect_type_show(struct device *dev, 28static ssize_t connect_type_show(struct device *dev,
@@ -66,6 +68,7 @@ static void usb_port_device_release(struct device *dev)
66{ 68{
67 struct usb_port *port_dev = to_usb_port(dev); 69 struct usb_port *port_dev = to_usb_port(dev);
68 70
71 kfree(port_dev->req);
69 kfree(port_dev); 72 kfree(port_dev);
70} 73}
71 74
@@ -142,6 +145,9 @@ static int usb_port_runtime_suspend(struct device *dev)
142 == PM_QOS_FLAGS_ALL) 145 == PM_QOS_FLAGS_ALL)
143 return -EAGAIN; 146 return -EAGAIN;
144 147
148 if (usb_port_block_power_off)
149 return -EBUSY;
150
145 usb_autopm_get_interface(intf); 151 usb_autopm_get_interface(intf);
146 retval = usb_hub_set_port_power(hdev, hub, port1, false); 152 retval = usb_hub_set_port_power(hdev, hub, port1, false);
147 usb_clear_port_feature(hdev, port1, USB_PORT_FEAT_C_CONNECTION); 153 usb_clear_port_feature(hdev, port1, USB_PORT_FEAT_C_CONNECTION);
@@ -190,11 +196,19 @@ static int link_peers(struct usb_port *left, struct usb_port *right)
190 if (left->peer || right->peer) { 196 if (left->peer || right->peer) {
191 struct usb_port *lpeer = left->peer; 197 struct usb_port *lpeer = left->peer;
192 struct usb_port *rpeer = right->peer; 198 struct usb_port *rpeer = right->peer;
193 199 char *method;
194 WARN(1, "failed to peer %s and %s (%s -> %p) (%s -> %p)\n", 200
195 dev_name(&left->dev), dev_name(&right->dev), 201 if (left->location && left->location == right->location)
196 dev_name(&left->dev), lpeer, 202 method = "location";
197 dev_name(&right->dev), rpeer); 203 else
204 method = "default";
205
206 pr_warn("usb: failed to peer %s and %s by %s (%s:%s) (%s:%s)\n",
207 dev_name(&left->dev), dev_name(&right->dev), method,
208 dev_name(&left->dev),
209 lpeer ? dev_name(&lpeer->dev) : "none",
210 dev_name(&right->dev),
211 rpeer ? dev_name(&rpeer->dev) : "none");
198 return -EBUSY; 212 return -EBUSY;
199 } 213 }
200 214
@@ -251,6 +265,7 @@ static void link_peers_report(struct usb_port *left, struct usb_port *right)
251 dev_warn(&left->dev, "failed to peer to %s (%d)\n", 265 dev_warn(&left->dev, "failed to peer to %s (%d)\n",
252 dev_name(&right->dev), rc); 266 dev_name(&right->dev), rc);
253 pr_warn_once("usb: port power management may be unreliable\n"); 267 pr_warn_once("usb: port power management may be unreliable\n");
268 usb_port_block_power_off = 1;
254 } 269 }
255} 270}
256 271
@@ -386,9 +401,13 @@ int usb_hub_create_port_device(struct usb_hub *hub, int port1)
386 int retval; 401 int retval;
387 402
388 port_dev = kzalloc(sizeof(*port_dev), GFP_KERNEL); 403 port_dev = kzalloc(sizeof(*port_dev), GFP_KERNEL);
389 if (!port_dev) { 404 if (!port_dev)
390 retval = -ENOMEM; 405 return -ENOMEM;
391 goto exit; 406
407 port_dev->req = kzalloc(sizeof(*(port_dev->req)), GFP_KERNEL);
408 if (!port_dev->req) {
409 kfree(port_dev);
410 return -ENOMEM;
392 } 411 }
393 412
394 hub->ports[port1 - 1] = port_dev; 413 hub->ports[port1 - 1] = port_dev;
@@ -404,31 +423,53 @@ int usb_hub_create_port_device(struct usb_hub *hub, int port1)
404 port1); 423 port1);
405 mutex_init(&port_dev->status_lock); 424 mutex_init(&port_dev->status_lock);
406 retval = device_register(&port_dev->dev); 425 retval = device_register(&port_dev->dev);
407 if (retval) 426 if (retval) {
408 goto error_register; 427 put_device(&port_dev->dev);
428 return retval;
429 }
430
431 /* Set default policy of port-poweroff disabled. */
432 retval = dev_pm_qos_add_request(&port_dev->dev, port_dev->req,
433 DEV_PM_QOS_FLAGS, PM_QOS_FLAG_NO_POWER_OFF);
434 if (retval < 0) {
435 device_unregister(&port_dev->dev);
436 return retval;
437 }
409 438
410 find_and_link_peer(hub, port1); 439 find_and_link_peer(hub, port1);
411 440
441 /*
442 * Enable runtime pm and hold a refernce that hub_configure()
443 * will drop once the PM_QOS_NO_POWER_OFF flag state has been set
444 * and the hub has been fully registered (hdev->maxchild set).
445 */
412 pm_runtime_set_active(&port_dev->dev); 446 pm_runtime_set_active(&port_dev->dev);
447 pm_runtime_get_noresume(&port_dev->dev);
448 pm_runtime_enable(&port_dev->dev);
449 device_enable_async_suspend(&port_dev->dev);
413 450
414 /* 451 /*
415 * Do not enable port runtime pm if the hub does not support 452 * Keep hidden the ability to enable port-poweroff if the hub
416 * power switching. Also, userspace must have final say of 453 * does not support power switching.
417 * whether a port is permitted to power-off. Do not enable
418 * runtime pm if we fail to expose pm_qos_no_power_off.
419 */ 454 */
420 if (hub_is_port_power_switchable(hub) 455 if (!hub_is_port_power_switchable(hub))
421 && dev_pm_qos_expose_flags(&port_dev->dev, 456 return 0;
422 PM_QOS_FLAG_NO_POWER_OFF) == 0)
423 pm_runtime_enable(&port_dev->dev);
424 457
425 device_enable_async_suspend(&port_dev->dev); 458 /* Attempt to let userspace take over the policy. */
426 return 0; 459 retval = dev_pm_qos_expose_flags(&port_dev->dev,
460 PM_QOS_FLAG_NO_POWER_OFF);
461 if (retval < 0) {
462 dev_warn(&port_dev->dev, "failed to expose pm_qos_no_poweroff\n");
463 return 0;
464 }
427 465
428error_register: 466 /* Userspace owns the policy, drop the kernel 'no_poweroff' request. */
429 put_device(&port_dev->dev); 467 retval = dev_pm_qos_remove_request(port_dev->req);
430exit: 468 if (retval >= 0) {
431 return retval; 469 kfree(port_dev->req);
470 port_dev->req = NULL;
471 }
472 return 0;
432} 473}
433 474
434void usb_hub_remove_port_device(struct usb_hub *hub, int port1) 475void usb_hub_remove_port_device(struct usb_hub *hub, int port1)
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 4a6d3dd68572..2f3acebb577a 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -656,6 +656,14 @@ static const struct dmi_system_id ehci_dmi_nohandoff_table[] = {
656 DMI_MATCH(DMI_BIOS_VERSION, "Lucid-"), 656 DMI_MATCH(DMI_BIOS_VERSION, "Lucid-"),
657 }, 657 },
658 }, 658 },
659 {
660 /* HASEE E200 */
661 .matches = {
662 DMI_MATCH(DMI_BOARD_VENDOR, "HASEE"),
663 DMI_MATCH(DMI_BOARD_NAME, "E210"),
664 DMI_MATCH(DMI_BIOS_VERSION, "6.00"),
665 },
666 },
659 { } 667 { }
660}; 668};
661 669
@@ -665,9 +673,14 @@ static void ehci_bios_handoff(struct pci_dev *pdev,
665{ 673{
666 int try_handoff = 1, tried_handoff = 0; 674 int try_handoff = 1, tried_handoff = 0;
667 675
668 /* The Pegatron Lucid tablet sporadically waits for 98 seconds trying 676 /*
669 * the handoff on its unused controller. Skip it. */ 677 * The Pegatron Lucid tablet sporadically waits for 98 seconds trying
670 if (pdev->vendor == 0x8086 && pdev->device == 0x283a) { 678 * the handoff on its unused controller. Skip it.
679 *
680 * The HASEE E200 hangs when the semaphore is set (bugzilla #77021).
681 */
682 if (pdev->vendor == 0x8086 && (pdev->device == 0x283a ||
683 pdev->device == 0x27cc)) {
671 if (dmi_check_system(ehci_dmi_nohandoff_table)) 684 if (dmi_check_system(ehci_dmi_nohandoff_table))
672 try_handoff = 0; 685 try_handoff = 0;
673 } 686 }
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 6231ce6aa0c3..2b998c60faf2 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -287,7 +287,7 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
287 if (virt_dev->eps[i].ring && virt_dev->eps[i].ring->dequeue) { 287 if (virt_dev->eps[i].ring && virt_dev->eps[i].ring->dequeue) {
288 struct xhci_command *command; 288 struct xhci_command *command;
289 command = xhci_alloc_command(xhci, false, false, 289 command = xhci_alloc_command(xhci, false, false,
290 GFP_NOIO); 290 GFP_NOWAIT);
291 if (!command) { 291 if (!command) {
292 spin_unlock_irqrestore(&xhci->lock, flags); 292 spin_unlock_irqrestore(&xhci->lock, flags);
293 xhci_free_command(xhci, cmd); 293 xhci_free_command(xhci, cmd);
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index 51a6da256772..829f446064ea 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -7,7 +7,7 @@
7#include <linux/moduleparam.h> 7#include <linux/moduleparam.h>
8#include <linux/scatterlist.h> 8#include <linux/scatterlist.h>
9#include <linux/mutex.h> 9#include <linux/mutex.h>
10 10#include <linux/timer.h>
11#include <linux/usb.h> 11#include <linux/usb.h>
12 12
13#define SIMPLE_IO_TIMEOUT 10000 /* in milliseconds */ 13#define SIMPLE_IO_TIMEOUT 10000 /* in milliseconds */
@@ -484,6 +484,14 @@ alloc_sglist(int nents, int max, int vary)
484 return sg; 484 return sg;
485} 485}
486 486
487static void sg_timeout(unsigned long _req)
488{
489 struct usb_sg_request *req = (struct usb_sg_request *) _req;
490
491 req->status = -ETIMEDOUT;
492 usb_sg_cancel(req);
493}
494
487static int perform_sglist( 495static int perform_sglist(
488 struct usbtest_dev *tdev, 496 struct usbtest_dev *tdev,
489 unsigned iterations, 497 unsigned iterations,
@@ -495,6 +503,9 @@ static int perform_sglist(
495{ 503{
496 struct usb_device *udev = testdev_to_usbdev(tdev); 504 struct usb_device *udev = testdev_to_usbdev(tdev);
497 int retval = 0; 505 int retval = 0;
506 struct timer_list sg_timer;
507
508 setup_timer_on_stack(&sg_timer, sg_timeout, (unsigned long) req);
498 509
499 while (retval == 0 && iterations-- > 0) { 510 while (retval == 0 && iterations-- > 0) {
500 retval = usb_sg_init(req, udev, pipe, 511 retval = usb_sg_init(req, udev, pipe,
@@ -505,7 +516,10 @@ static int perform_sglist(
505 516
506 if (retval) 517 if (retval)
507 break; 518 break;
519 mod_timer(&sg_timer, jiffies +
520 msecs_to_jiffies(SIMPLE_IO_TIMEOUT));
508 usb_sg_wait(req); 521 usb_sg_wait(req);
522 del_timer_sync(&sg_timer);
509 retval = req->status; 523 retval = req->status;
510 524
511 /* FIXME check resulting data pattern */ 525 /* FIXME check resulting data pattern */
diff --git a/drivers/video/console/dummycon.c b/drivers/video/console/dummycon.c
index b63860f7beab..40bec8d64b0a 100644
--- a/drivers/video/console/dummycon.c
+++ b/drivers/video/console/dummycon.c
@@ -77,3 +77,4 @@ const struct consw dummy_con = {
77 .con_set_palette = DUMMY, 77 .con_set_palette = DUMMY,
78 .con_scrolldelta = DUMMY, 78 .con_scrolldelta = DUMMY,
79}; 79};
80EXPORT_SYMBOL_GPL(dummy_con);
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index f267284b423b..6e6aa704fe84 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -1441,5 +1441,6 @@ const struct consw vga_con = {
1441 .con_build_attr = vgacon_build_attr, 1441 .con_build_attr = vgacon_build_attr,
1442 .con_invert_region = vgacon_invert_region, 1442 .con_invert_region = vgacon_invert_region,
1443}; 1443};
1444EXPORT_SYMBOL(vga_con);
1444 1445
1445MODULE_LICENSE("GPL"); 1446MODULE_LICENSE("GPL");
diff --git a/drivers/video/fbdev/offb.c b/drivers/video/fbdev/offb.c
index 7d44d669d5b6..43a0a52fc527 100644
--- a/drivers/video/fbdev/offb.c
+++ b/drivers/video/fbdev/offb.c
@@ -91,15 +91,6 @@ extern boot_infos_t *boot_infos;
91#define AVIVO_DC_LUTB_WHITE_OFFSET_GREEN 0x6cd4 91#define AVIVO_DC_LUTB_WHITE_OFFSET_GREEN 0x6cd4
92#define AVIVO_DC_LUTB_WHITE_OFFSET_RED 0x6cd8 92#define AVIVO_DC_LUTB_WHITE_OFFSET_RED 0x6cd8
93 93
94#define FB_RIGHT_POS(p, bpp) (fb_be_math(p) ? 0 : (32 - (bpp)))
95
96static inline u32 offb_cmap_byteswap(struct fb_info *info, u32 value)
97{
98 u32 bpp = info->var.bits_per_pixel;
99
100 return cpu_to_be32(value) >> FB_RIGHT_POS(info, bpp);
101}
102
103 /* 94 /*
104 * Set a single color register. The values supplied are already 95 * Set a single color register. The values supplied are already
105 * rounded down to the hardware's capabilities (according to the 96 * rounded down to the hardware's capabilities (according to the
@@ -129,7 +120,7 @@ static int offb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
129 mask <<= info->var.transp.offset; 120 mask <<= info->var.transp.offset;
130 value |= mask; 121 value |= mask;
131 } 122 }
132 pal[regno] = offb_cmap_byteswap(info, value); 123 pal[regno] = value;
133 return 0; 124 return 0;
134 } 125 }
135 126
diff --git a/drivers/w1/masters/mxc_w1.c b/drivers/w1/masters/mxc_w1.c
index 67b067a3e2ab..a5df5e89d456 100644
--- a/drivers/w1/masters/mxc_w1.c
+++ b/drivers/w1/masters/mxc_w1.c
@@ -66,7 +66,7 @@ static u8 mxc_w1_ds2_reset_bus(void *data)
66 66
67 udelay(100); 67 udelay(100);
68 } 68 }
69 return !!(reg_val & MXC_W1_CONTROL_PST); 69 return !(reg_val & MXC_W1_CONTROL_PST);
70} 70}
71 71
72/* 72/*
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index c845527b503a..76dd54122f76 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -1280,14 +1280,17 @@ config WATCHDOG_RTAS
1280 1280
1281# S390 Architecture 1281# S390 Architecture
1282 1282
1283config ZVM_WATCHDOG 1283config DIAG288_WATCHDOG
1284 tristate "z/VM Watchdog Timer" 1284 tristate "System z diag288 Watchdog"
1285 depends on S390 1285 depends on S390
1286 select WATCHDOG_CORE
1286 help 1287 help
1287 IBM s/390 and zSeries machines running under z/VM 5.1 or later 1288 IBM s/390 and zSeries machines running under z/VM 5.1 or later
1288 provide a virtual watchdog timer to their guest that cause a 1289 provide a virtual watchdog timer to their guest that cause a
1289 user define Control Program command to be executed after a 1290 user define Control Program command to be executed after a
1290 timeout. 1291 timeout.
1292 LPAR provides a very similar interface. This driver handles
1293 both.
1291 1294
1292 To compile this driver as a module, choose M here. The module 1295 To compile this driver as a module, choose M here. The module
1293 will be called vmwatchdog. 1296 will be called vmwatchdog.
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 7b8a91ed20e7..468c3204c3b1 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -153,6 +153,7 @@ obj-$(CONFIG_MEN_A21_WDT) += mena21_wdt.o
153obj-$(CONFIG_WATCHDOG_RTAS) += wdrtas.o 153obj-$(CONFIG_WATCHDOG_RTAS) += wdrtas.o
154 154
155# S390 Architecture 155# S390 Architecture
156obj-$(CONFIG_DIAG288_WATCHDOG) += diag288_wdt.o
156 157
157# SUPERH (sh + sh64) Architecture 158# SUPERH (sh + sh64) Architecture
158obj-$(CONFIG_SH_WDT) += shwdt.o 159obj-$(CONFIG_SH_WDT) += shwdt.o
diff --git a/drivers/watchdog/diag288_wdt.c b/drivers/watchdog/diag288_wdt.c
new file mode 100644
index 000000000000..429494b6c822
--- /dev/null
+++ b/drivers/watchdog/diag288_wdt.c
@@ -0,0 +1,316 @@
1/*
2 * Watchdog driver for z/VM and LPAR using the diag 288 interface.
3 *
4 * Under z/VM, expiration of the watchdog will send a "system restart" command
5 * to CP.
6 *
7 * The command can be altered using the module parameter "cmd". This is
8 * not recommended because it's only supported on z/VM but not whith LPAR.
9 *
10 * On LPAR, the watchdog will always trigger a system restart. the module
11 * paramter cmd is meaningless here.
12 *
13 *
14 * Copyright IBM Corp. 2004, 2013
15 * Author(s): Arnd Bergmann (arndb@de.ibm.com)
16 * Philipp Hachtmann (phacht@de.ibm.com)
17 *
18 */
19
20#define KMSG_COMPONENT "diag288_wdt"
21#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
22
23#include <linux/init.h>
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/moduleparam.h>
27#include <linux/slab.h>
28#include <linux/miscdevice.h>
29#include <linux/watchdog.h>
30#include <linux/suspend.h>
31#include <asm/ebcdic.h>
32#include <linux/io.h>
33#include <linux/uaccess.h>
34
35#define MAX_CMDLEN 240
36#define DEFAULT_CMD "SYSTEM RESTART"
37
38#define MIN_INTERVAL 15 /* Minimal time supported by diag88 */
39#define MAX_INTERVAL 3600 /* One hour should be enough - pure estimation */
40
41#define WDT_DEFAULT_TIMEOUT 30
42
43/* Function codes - init, change, cancel */
44#define WDT_FUNC_INIT 0
45#define WDT_FUNC_CHANGE 1
46#define WDT_FUNC_CANCEL 2
47#define WDT_FUNC_CONCEAL 0x80000000
48
49/* Action codes for LPAR watchdog */
50#define LPARWDT_RESTART 0
51
52static char wdt_cmd[MAX_CMDLEN] = DEFAULT_CMD;
53static bool conceal_on;
54static bool nowayout_info = WATCHDOG_NOWAYOUT;
55
56MODULE_LICENSE("GPL");
57MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
58MODULE_AUTHOR("Philipp Hachtmann <phacht@de.ibm.com>");
59
60MODULE_DESCRIPTION("System z diag288 Watchdog Timer");
61
62module_param_string(cmd, wdt_cmd, MAX_CMDLEN, 0644);
63MODULE_PARM_DESC(cmd, "CP command that is run when the watchdog triggers (z/VM only)");
64
65module_param_named(conceal, conceal_on, bool, 0644);
66MODULE_PARM_DESC(conceal, "Enable the CONCEAL CP option while the watchdog is active (z/VM only)");
67
68module_param_named(nowayout, nowayout_info, bool, 0444);
69MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default = CONFIG_WATCHDOG_NOWAYOUT)");
70
71MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
72MODULE_ALIAS("vmwatchdog");
73
74static int __diag288(unsigned int func, unsigned int timeout,
75 unsigned long action, unsigned int len)
76{
77 register unsigned long __func asm("2") = func;
78 register unsigned long __timeout asm("3") = timeout;
79 register unsigned long __action asm("4") = action;
80 register unsigned long __len asm("5") = len;
81 int err;
82
83 err = -EINVAL;
84 asm volatile(
85 " diag %1, %3, 0x288\n"
86 "0: la %0, 0\n"
87 "1:\n"
88 EX_TABLE(0b, 1b)
89 : "+d" (err) : "d"(__func), "d"(__timeout),
90 "d"(__action), "d"(__len) : "1", "cc");
91 return err;
92}
93
94static int __diag288_vm(unsigned int func, unsigned int timeout,
95 char *cmd, size_t len)
96{
97 return __diag288(func, timeout, virt_to_phys(cmd), len);
98}
99
100static int __diag288_lpar(unsigned int func, unsigned int timeout,
101 unsigned long action)
102{
103 return __diag288(func, timeout, action, 0);
104}
105
106static int wdt_start(struct watchdog_device *dev)
107{
108 char *ebc_cmd;
109 size_t len;
110 int ret;
111 unsigned int func;
112
113 ret = -ENODEV;
114
115 if (MACHINE_IS_VM) {
116 ebc_cmd = kmalloc(MAX_CMDLEN, GFP_KERNEL);
117 if (!ebc_cmd)
118 return -ENOMEM;
119 len = strlcpy(ebc_cmd, wdt_cmd, MAX_CMDLEN);
120 ASCEBC(ebc_cmd, MAX_CMDLEN);
121 EBC_TOUPPER(ebc_cmd, MAX_CMDLEN);
122
123 func = conceal_on ? (WDT_FUNC_INIT | WDT_FUNC_CONCEAL)
124 : WDT_FUNC_INIT;
125 ret = __diag288_vm(func, dev->timeout, ebc_cmd, len);
126 WARN_ON(ret != 0);
127 kfree(ebc_cmd);
128 }
129
130 if (MACHINE_IS_LPAR) {
131 ret = __diag288_lpar(WDT_FUNC_INIT,
132 dev->timeout, LPARWDT_RESTART);
133 }
134
135 if (ret) {
136 pr_err("The watchdog cannot be activated\n");
137 return ret;
138 }
139 pr_info("The watchdog was activated\n");
140 return 0;
141}
142
143static int wdt_stop(struct watchdog_device *dev)
144{
145 int ret;
146
147 ret = __diag288(WDT_FUNC_CANCEL, 0, 0, 0);
148 pr_info("The watchdog was deactivated\n");
149 return ret;
150}
151
152static int wdt_ping(struct watchdog_device *dev)
153{
154 char *ebc_cmd;
155 size_t len;
156 int ret;
157 unsigned int func;
158
159 ret = -ENODEV;
160
161 if (MACHINE_IS_VM) {
162 ebc_cmd = kmalloc(MAX_CMDLEN, GFP_KERNEL);
163 if (!ebc_cmd)
164 return -ENOMEM;
165 len = strlcpy(ebc_cmd, wdt_cmd, MAX_CMDLEN);
166 ASCEBC(ebc_cmd, MAX_CMDLEN);
167 EBC_TOUPPER(ebc_cmd, MAX_CMDLEN);
168
169 /*
170 * It seems to be ok to z/VM to use the init function to
171 * retrigger the watchdog. On LPAR WDT_FUNC_CHANGE must
172 * be used when the watchdog is running.
173 */
174 func = conceal_on ? (WDT_FUNC_INIT | WDT_FUNC_CONCEAL)
175 : WDT_FUNC_INIT;
176
177 ret = __diag288_vm(func, dev->timeout, ebc_cmd, len);
178 WARN_ON(ret != 0);
179 kfree(ebc_cmd);
180 }
181
182 if (MACHINE_IS_LPAR)
183 ret = __diag288_lpar(WDT_FUNC_CHANGE, dev->timeout, 0);
184
185 if (ret)
186 pr_err("The watchdog timer cannot be started or reset\n");
187 return ret;
188}
189
190static int wdt_set_timeout(struct watchdog_device * dev, unsigned int new_to)
191{
192 dev->timeout = new_to;
193 return wdt_ping(dev);
194}
195
196static struct watchdog_ops wdt_ops = {
197 .owner = THIS_MODULE,
198 .start = wdt_start,
199 .stop = wdt_stop,
200 .ping = wdt_ping,
201 .set_timeout = wdt_set_timeout,
202};
203
204static struct watchdog_info wdt_info = {
205 .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE,
206 .firmware_version = 0,
207 .identity = "z Watchdog",
208};
209
210static struct watchdog_device wdt_dev = {
211 .parent = NULL,
212 .info = &wdt_info,
213 .ops = &wdt_ops,
214 .bootstatus = 0,
215 .timeout = WDT_DEFAULT_TIMEOUT,
216 .min_timeout = MIN_INTERVAL,
217 .max_timeout = MAX_INTERVAL,
218};
219
220/*
221 * It makes no sense to go into suspend while the watchdog is running.
222 * Depending on the memory size, the watchdog might trigger, while we
223 * are still saving the memory.
224 * We reuse the open flag to ensure that suspend and watchdog open are
225 * exclusive operations
226 */
227static int wdt_suspend(void)
228{
229 if (test_and_set_bit(WDOG_DEV_OPEN, &wdt_dev.status)) {
230 pr_err("Linux cannot be suspended while the watchdog is in use\n");
231 return notifier_from_errno(-EBUSY);
232 }
233 if (test_bit(WDOG_ACTIVE, &wdt_dev.status)) {
234 clear_bit(WDOG_DEV_OPEN, &wdt_dev.status);
235 pr_err("Linux cannot be suspended while the watchdog is in use\n");
236 return notifier_from_errno(-EBUSY);
237 }
238 return NOTIFY_DONE;
239}
240
241static int wdt_resume(void)
242{
243 clear_bit(WDOG_DEV_OPEN, &wdt_dev.status);
244 return NOTIFY_DONE;
245}
246
247static int wdt_power_event(struct notifier_block *this, unsigned long event,
248 void *ptr)
249{
250 switch (event) {
251 case PM_POST_HIBERNATION:
252 case PM_POST_SUSPEND:
253 return wdt_resume();
254 case PM_HIBERNATION_PREPARE:
255 case PM_SUSPEND_PREPARE:
256 return wdt_suspend();
257 default:
258 return NOTIFY_DONE;
259 }
260}
261
262static struct notifier_block wdt_power_notifier = {
263 .notifier_call = wdt_power_event,
264};
265
266static int __init diag288_init(void)
267{
268 int ret;
269 char ebc_begin[] = {
270 194, 197, 199, 201, 213
271 };
272
273 watchdog_set_nowayout(&wdt_dev, nowayout_info);
274
275 if (MACHINE_IS_VM) {
276 pr_info("The watchdog device driver detected a z/VM environment\n");
277 if (__diag288_vm(WDT_FUNC_INIT, 15,
278 ebc_begin, sizeof(ebc_begin)) != 0) {
279 pr_err("The watchdog cannot be initialized\n");
280 return -EINVAL;
281 }
282 } else if (MACHINE_IS_LPAR) {
283 pr_info("The watchdog device driver detected an LPAR environment\n");
284 if (__diag288_lpar(WDT_FUNC_INIT, 30, LPARWDT_RESTART)) {
285 pr_err("The watchdog cannot be initialized\n");
286 return -EINVAL;
287 }
288 } else {
289 pr_err("Linux runs in an environment that does not support the diag288 watchdog\n");
290 return -ENODEV;
291 }
292
293 if (__diag288_lpar(WDT_FUNC_CANCEL, 0, 0)) {
294 pr_err("The watchdog cannot be deactivated\n");
295 return -EINVAL;
296 }
297
298 ret = register_pm_notifier(&wdt_power_notifier);
299 if (ret)
300 return ret;
301
302 ret = watchdog_register_device(&wdt_dev);
303 if (ret)
304 unregister_pm_notifier(&wdt_power_notifier);
305
306 return ret;
307}
308
309static void __exit diag288_exit(void)
310{
311 watchdog_unregister_device(&wdt_dev);
312 unregister_pm_notifier(&wdt_power_notifier);
313}
314
315module_init(diag288_init);
316module_exit(diag288_exit);
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 6d325bda76da..5d4de88fe5b8 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -1168,7 +1168,8 @@ int gnttab_resume(void)
1168 1168
1169int gnttab_suspend(void) 1169int gnttab_suspend(void)
1170{ 1170{
1171 gnttab_interface->unmap_frames(); 1171 if (!xen_feature(XENFEAT_auto_translated_physmap))
1172 gnttab_interface->unmap_frames();
1172 return 0; 1173 return 0;
1173} 1174}
1174 1175
diff --git a/fs/aio.c b/fs/aio.c
index 56b28607c32d..4f078c054b41 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -477,7 +477,7 @@ void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel)
477} 477}
478EXPORT_SYMBOL(kiocb_set_cancel_fn); 478EXPORT_SYMBOL(kiocb_set_cancel_fn);
479 479
480static int kiocb_cancel(struct kioctx *ctx, struct kiocb *kiocb) 480static int kiocb_cancel(struct kiocb *kiocb)
481{ 481{
482 kiocb_cancel_fn *old, *cancel; 482 kiocb_cancel_fn *old, *cancel;
483 483
@@ -538,7 +538,7 @@ static void free_ioctx_users(struct percpu_ref *ref)
538 struct kiocb, ki_list); 538 struct kiocb, ki_list);
539 539
540 list_del_init(&req->ki_list); 540 list_del_init(&req->ki_list);
541 kiocb_cancel(ctx, req); 541 kiocb_cancel(req);
542 } 542 }
543 543
544 spin_unlock_irq(&ctx->ctx_lock); 544 spin_unlock_irq(&ctx->ctx_lock);
@@ -727,42 +727,42 @@ err:
727 * when the processes owning a context have all exited to encourage 727 * when the processes owning a context have all exited to encourage
728 * the rapid destruction of the kioctx. 728 * the rapid destruction of the kioctx.
729 */ 729 */
730static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx, 730static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
731 struct completion *requests_done) 731 struct completion *requests_done)
732{ 732{
733 if (!atomic_xchg(&ctx->dead, 1)) { 733 struct kioctx_table *table;
734 struct kioctx_table *table;
735 734
736 spin_lock(&mm->ioctx_lock); 735 if (atomic_xchg(&ctx->dead, 1))
737 rcu_read_lock(); 736 return -EINVAL;
738 table = rcu_dereference(mm->ioctx_table);
739 737
740 WARN_ON(ctx != table->table[ctx->id]);
741 table->table[ctx->id] = NULL;
742 rcu_read_unlock();
743 spin_unlock(&mm->ioctx_lock);
744 738
745 /* percpu_ref_kill() will do the necessary call_rcu() */ 739 spin_lock(&mm->ioctx_lock);
746 wake_up_all(&ctx->wait); 740 rcu_read_lock();
741 table = rcu_dereference(mm->ioctx_table);
747 742
748 /* 743 WARN_ON(ctx != table->table[ctx->id]);
749 * It'd be more correct to do this in free_ioctx(), after all 744 table->table[ctx->id] = NULL;
750 * the outstanding kiocbs have finished - but by then io_destroy 745 rcu_read_unlock();
751 * has already returned, so io_setup() could potentially return 746 spin_unlock(&mm->ioctx_lock);
752 * -EAGAIN with no ioctxs actually in use (as far as userspace
753 * could tell).
754 */
755 aio_nr_sub(ctx->max_reqs);
756 747
757 if (ctx->mmap_size) 748 /* percpu_ref_kill() will do the necessary call_rcu() */
758 vm_munmap(ctx->mmap_base, ctx->mmap_size); 749 wake_up_all(&ctx->wait);
759 750
760 ctx->requests_done = requests_done; 751 /*
761 percpu_ref_kill(&ctx->users); 752 * It'd be more correct to do this in free_ioctx(), after all
762 } else { 753 * the outstanding kiocbs have finished - but by then io_destroy
763 if (requests_done) 754 * has already returned, so io_setup() could potentially return
764 complete(requests_done); 755 * -EAGAIN with no ioctxs actually in use (as far as userspace
765 } 756 * could tell).
757 */
758 aio_nr_sub(ctx->max_reqs);
759
760 if (ctx->mmap_size)
761 vm_munmap(ctx->mmap_base, ctx->mmap_size);
762
763 ctx->requests_done = requests_done;
764 percpu_ref_kill(&ctx->users);
765 return 0;
766} 766}
767 767
768/* wait_on_sync_kiocb: 768/* wait_on_sync_kiocb:
@@ -1219,21 +1219,23 @@ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
1219 if (likely(NULL != ioctx)) { 1219 if (likely(NULL != ioctx)) {
1220 struct completion requests_done = 1220 struct completion requests_done =
1221 COMPLETION_INITIALIZER_ONSTACK(requests_done); 1221 COMPLETION_INITIALIZER_ONSTACK(requests_done);
1222 int ret;
1222 1223
1223 /* Pass requests_done to kill_ioctx() where it can be set 1224 /* Pass requests_done to kill_ioctx() where it can be set
1224 * in a thread-safe way. If we try to set it here then we have 1225 * in a thread-safe way. If we try to set it here then we have
1225 * a race condition if two io_destroy() called simultaneously. 1226 * a race condition if two io_destroy() called simultaneously.
1226 */ 1227 */
1227 kill_ioctx(current->mm, ioctx, &requests_done); 1228 ret = kill_ioctx(current->mm, ioctx, &requests_done);
1228 percpu_ref_put(&ioctx->users); 1229 percpu_ref_put(&ioctx->users);
1229 1230
1230 /* Wait until all IO for the context are done. Otherwise kernel 1231 /* Wait until all IO for the context are done. Otherwise kernel
1231 * keep using user-space buffers even if user thinks the context 1232 * keep using user-space buffers even if user thinks the context
1232 * is destroyed. 1233 * is destroyed.
1233 */ 1234 */
1234 wait_for_completion(&requests_done); 1235 if (!ret)
1236 wait_for_completion(&requests_done);
1235 1237
1236 return 0; 1238 return ret;
1237 } 1239 }
1238 pr_debug("EINVAL: io_destroy: invalid context id\n"); 1240 pr_debug("EINVAL: io_destroy: invalid context id\n");
1239 return -EINVAL; 1241 return -EINVAL;
@@ -1595,7 +1597,7 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
1595 1597
1596 kiocb = lookup_kiocb(ctx, iocb, key); 1598 kiocb = lookup_kiocb(ctx, iocb, key);
1597 if (kiocb) 1599 if (kiocb)
1598 ret = kiocb_cancel(ctx, kiocb); 1600 ret = kiocb_cancel(kiocb);
1599 else 1601 else
1600 ret = -EINVAL; 1602 ret = -EINVAL;
1601 1603
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index b7e2c1c1ef36..be91397f4e92 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -1259,11 +1259,19 @@ struct btrfs_block_group_cache {
1259 spinlock_t lock; 1259 spinlock_t lock;
1260 u64 pinned; 1260 u64 pinned;
1261 u64 reserved; 1261 u64 reserved;
1262 u64 delalloc_bytes;
1262 u64 bytes_super; 1263 u64 bytes_super;
1263 u64 flags; 1264 u64 flags;
1264 u64 sectorsize; 1265 u64 sectorsize;
1265 u64 cache_generation; 1266 u64 cache_generation;
1266 1267
1268 /*
1269 * It is just used for the delayed data space allocation because
1270 * only the data space allocation and the relative metadata update
1271 * can be done cross the transaction.
1272 */
1273 struct rw_semaphore data_rwsem;
1274
1267 /* for raid56, this is a full stripe, without parity */ 1275 /* for raid56, this is a full stripe, without parity */
1268 unsigned long full_stripe_len; 1276 unsigned long full_stripe_len;
1269 1277
@@ -3316,7 +3324,7 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
3316 struct btrfs_key *ins); 3324 struct btrfs_key *ins);
3317int btrfs_reserve_extent(struct btrfs_root *root, u64 num_bytes, 3325int btrfs_reserve_extent(struct btrfs_root *root, u64 num_bytes,
3318 u64 min_alloc_size, u64 empty_size, u64 hint_byte, 3326 u64 min_alloc_size, u64 empty_size, u64 hint_byte,
3319 struct btrfs_key *ins, int is_data); 3327 struct btrfs_key *ins, int is_data, int delalloc);
3320int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, 3328int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3321 struct extent_buffer *buf, int full_backref, int no_quota); 3329 struct extent_buffer *buf, int full_backref, int no_quota);
3322int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, 3330int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
@@ -3330,7 +3338,8 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans,
3330 u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid, 3338 u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
3331 u64 owner, u64 offset, int no_quota); 3339 u64 owner, u64 offset, int no_quota);
3332 3340
3333int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len); 3341int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len,
3342 int delalloc);
3334int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root, 3343int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
3335 u64 start, u64 len); 3344 u64 start, u64 len);
3336void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans, 3345void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index fafb3e53ecde..99c253918208 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -105,7 +105,8 @@ static int find_next_key(struct btrfs_path *path, int level,
105static void dump_space_info(struct btrfs_space_info *info, u64 bytes, 105static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
106 int dump_block_groups); 106 int dump_block_groups);
107static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache, 107static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
108 u64 num_bytes, int reserve); 108 u64 num_bytes, int reserve,
109 int delalloc);
109static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv, 110static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
110 u64 num_bytes); 111 u64 num_bytes);
111int btrfs_pin_extent(struct btrfs_root *root, 112int btrfs_pin_extent(struct btrfs_root *root,
@@ -3260,7 +3261,8 @@ again:
3260 3261
3261 spin_lock(&block_group->lock); 3262 spin_lock(&block_group->lock);
3262 if (block_group->cached != BTRFS_CACHE_FINISHED || 3263 if (block_group->cached != BTRFS_CACHE_FINISHED ||
3263 !btrfs_test_opt(root, SPACE_CACHE)) { 3264 !btrfs_test_opt(root, SPACE_CACHE) ||
3265 block_group->delalloc_bytes) {
3264 /* 3266 /*
3265 * don't bother trying to write stuff out _if_ 3267 * don't bother trying to write stuff out _if_
3266 * a) we're not cached, 3268 * a) we're not cached,
@@ -5613,6 +5615,7 @@ int btrfs_exclude_logged_extents(struct btrfs_root *log,
5613 * @cache: The cache we are manipulating 5615 * @cache: The cache we are manipulating
5614 * @num_bytes: The number of bytes in question 5616 * @num_bytes: The number of bytes in question
5615 * @reserve: One of the reservation enums 5617 * @reserve: One of the reservation enums
5618 * @delalloc: The blocks are allocated for the delalloc write
5616 * 5619 *
5617 * This is called by the allocator when it reserves space, or by somebody who is 5620 * This is called by the allocator when it reserves space, or by somebody who is
5618 * freeing space that was never actually used on disk. For example if you 5621 * freeing space that was never actually used on disk. For example if you
@@ -5631,7 +5634,7 @@ int btrfs_exclude_logged_extents(struct btrfs_root *log,
5631 * succeeds. 5634 * succeeds.
5632 */ 5635 */
5633static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache, 5636static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
5634 u64 num_bytes, int reserve) 5637 u64 num_bytes, int reserve, int delalloc)
5635{ 5638{
5636 struct btrfs_space_info *space_info = cache->space_info; 5639 struct btrfs_space_info *space_info = cache->space_info;
5637 int ret = 0; 5640 int ret = 0;
@@ -5650,12 +5653,18 @@ static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
5650 num_bytes, 0); 5653 num_bytes, 0);
5651 space_info->bytes_may_use -= num_bytes; 5654 space_info->bytes_may_use -= num_bytes;
5652 } 5655 }
5656
5657 if (delalloc)
5658 cache->delalloc_bytes += num_bytes;
5653 } 5659 }
5654 } else { 5660 } else {
5655 if (cache->ro) 5661 if (cache->ro)
5656 space_info->bytes_readonly += num_bytes; 5662 space_info->bytes_readonly += num_bytes;
5657 cache->reserved -= num_bytes; 5663 cache->reserved -= num_bytes;
5658 space_info->bytes_reserved -= num_bytes; 5664 space_info->bytes_reserved -= num_bytes;
5665
5666 if (delalloc)
5667 cache->delalloc_bytes -= num_bytes;
5659 } 5668 }
5660 spin_unlock(&cache->lock); 5669 spin_unlock(&cache->lock);
5661 spin_unlock(&space_info->lock); 5670 spin_unlock(&space_info->lock);
@@ -6206,7 +6215,7 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
6206 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)); 6215 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
6207 6216
6208 btrfs_add_free_space(cache, buf->start, buf->len); 6217 btrfs_add_free_space(cache, buf->start, buf->len);
6209 btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE); 6218 btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE, 0);
6210 trace_btrfs_reserved_extent_free(root, buf->start, buf->len); 6219 trace_btrfs_reserved_extent_free(root, buf->start, buf->len);
6211 pin = 0; 6220 pin = 0;
6212 } 6221 }
@@ -6365,6 +6374,70 @@ enum btrfs_loop_type {
6365 LOOP_NO_EMPTY_SIZE = 3, 6374 LOOP_NO_EMPTY_SIZE = 3,
6366}; 6375};
6367 6376
6377static inline void
6378btrfs_lock_block_group(struct btrfs_block_group_cache *cache,
6379 int delalloc)
6380{
6381 if (delalloc)
6382 down_read(&cache->data_rwsem);
6383}
6384
6385static inline void
6386btrfs_grab_block_group(struct btrfs_block_group_cache *cache,
6387 int delalloc)
6388{
6389 btrfs_get_block_group(cache);
6390 if (delalloc)
6391 down_read(&cache->data_rwsem);
6392}
6393
6394static struct btrfs_block_group_cache *
6395btrfs_lock_cluster(struct btrfs_block_group_cache *block_group,
6396 struct btrfs_free_cluster *cluster,
6397 int delalloc)
6398{
6399 struct btrfs_block_group_cache *used_bg;
6400 bool locked = false;
6401again:
6402 spin_lock(&cluster->refill_lock);
6403 if (locked) {
6404 if (used_bg == cluster->block_group)
6405 return used_bg;
6406
6407 up_read(&used_bg->data_rwsem);
6408 btrfs_put_block_group(used_bg);
6409 }
6410
6411 used_bg = cluster->block_group;
6412 if (!used_bg)
6413 return NULL;
6414
6415 if (used_bg == block_group)
6416 return used_bg;
6417
6418 btrfs_get_block_group(used_bg);
6419
6420 if (!delalloc)
6421 return used_bg;
6422
6423 if (down_read_trylock(&used_bg->data_rwsem))
6424 return used_bg;
6425
6426 spin_unlock(&cluster->refill_lock);
6427 down_read(&used_bg->data_rwsem);
6428 locked = true;
6429 goto again;
6430}
6431
6432static inline void
6433btrfs_release_block_group(struct btrfs_block_group_cache *cache,
6434 int delalloc)
6435{
6436 if (delalloc)
6437 up_read(&cache->data_rwsem);
6438 btrfs_put_block_group(cache);
6439}
6440
6368/* 6441/*
6369 * walks the btree of allocated extents and find a hole of a given size. 6442 * walks the btree of allocated extents and find a hole of a given size.
6370 * The key ins is changed to record the hole: 6443 * The key ins is changed to record the hole:
@@ -6379,7 +6452,7 @@ enum btrfs_loop_type {
6379static noinline int find_free_extent(struct btrfs_root *orig_root, 6452static noinline int find_free_extent(struct btrfs_root *orig_root,
6380 u64 num_bytes, u64 empty_size, 6453 u64 num_bytes, u64 empty_size,
6381 u64 hint_byte, struct btrfs_key *ins, 6454 u64 hint_byte, struct btrfs_key *ins,
6382 u64 flags) 6455 u64 flags, int delalloc)
6383{ 6456{
6384 int ret = 0; 6457 int ret = 0;
6385 struct btrfs_root *root = orig_root->fs_info->extent_root; 6458 struct btrfs_root *root = orig_root->fs_info->extent_root;
@@ -6467,6 +6540,7 @@ static noinline int find_free_extent(struct btrfs_root *orig_root,
6467 up_read(&space_info->groups_sem); 6540 up_read(&space_info->groups_sem);
6468 } else { 6541 } else {
6469 index = get_block_group_index(block_group); 6542 index = get_block_group_index(block_group);
6543 btrfs_lock_block_group(block_group, delalloc);
6470 goto have_block_group; 6544 goto have_block_group;
6471 } 6545 }
6472 } else if (block_group) { 6546 } else if (block_group) {
@@ -6481,7 +6555,7 @@ search:
6481 u64 offset; 6555 u64 offset;
6482 int cached; 6556 int cached;
6483 6557
6484 btrfs_get_block_group(block_group); 6558 btrfs_grab_block_group(block_group, delalloc);
6485 search_start = block_group->key.objectid; 6559 search_start = block_group->key.objectid;
6486 6560
6487 /* 6561 /*
@@ -6529,16 +6603,16 @@ have_block_group:
6529 * the refill lock keeps out other 6603 * the refill lock keeps out other
6530 * people trying to start a new cluster 6604 * people trying to start a new cluster
6531 */ 6605 */
6532 spin_lock(&last_ptr->refill_lock); 6606 used_block_group = btrfs_lock_cluster(block_group,
6533 used_block_group = last_ptr->block_group; 6607 last_ptr,
6534 if (used_block_group != block_group && 6608 delalloc);
6535 (!used_block_group || 6609 if (!used_block_group)
6536 used_block_group->ro ||
6537 !block_group_bits(used_block_group, flags)))
6538 goto refill_cluster; 6610 goto refill_cluster;
6539 6611
6540 if (used_block_group != block_group) 6612 if (used_block_group != block_group &&
6541 btrfs_get_block_group(used_block_group); 6613 (used_block_group->ro ||
6614 !block_group_bits(used_block_group, flags)))
6615 goto release_cluster;
6542 6616
6543 offset = btrfs_alloc_from_cluster(used_block_group, 6617 offset = btrfs_alloc_from_cluster(used_block_group,
6544 last_ptr, 6618 last_ptr,
@@ -6552,16 +6626,15 @@ have_block_group:
6552 used_block_group, 6626 used_block_group,
6553 search_start, num_bytes); 6627 search_start, num_bytes);
6554 if (used_block_group != block_group) { 6628 if (used_block_group != block_group) {
6555 btrfs_put_block_group(block_group); 6629 btrfs_release_block_group(block_group,
6630 delalloc);
6556 block_group = used_block_group; 6631 block_group = used_block_group;
6557 } 6632 }
6558 goto checks; 6633 goto checks;
6559 } 6634 }
6560 6635
6561 WARN_ON(last_ptr->block_group != used_block_group); 6636 WARN_ON(last_ptr->block_group != used_block_group);
6562 if (used_block_group != block_group) 6637release_cluster:
6563 btrfs_put_block_group(used_block_group);
6564refill_cluster:
6565 /* If we are on LOOP_NO_EMPTY_SIZE, we can't 6638 /* If we are on LOOP_NO_EMPTY_SIZE, we can't
6566 * set up a new clusters, so lets just skip it 6639 * set up a new clusters, so lets just skip it
6567 * and let the allocator find whatever block 6640 * and let the allocator find whatever block
@@ -6578,8 +6651,10 @@ refill_cluster:
6578 * succeeding in the unclustered 6651 * succeeding in the unclustered
6579 * allocation. */ 6652 * allocation. */
6580 if (loop >= LOOP_NO_EMPTY_SIZE && 6653 if (loop >= LOOP_NO_EMPTY_SIZE &&
6581 last_ptr->block_group != block_group) { 6654 used_block_group != block_group) {
6582 spin_unlock(&last_ptr->refill_lock); 6655 spin_unlock(&last_ptr->refill_lock);
6656 btrfs_release_block_group(used_block_group,
6657 delalloc);
6583 goto unclustered_alloc; 6658 goto unclustered_alloc;
6584 } 6659 }
6585 6660
@@ -6589,6 +6664,10 @@ refill_cluster:
6589 */ 6664 */
6590 btrfs_return_cluster_to_free_space(NULL, last_ptr); 6665 btrfs_return_cluster_to_free_space(NULL, last_ptr);
6591 6666
6667 if (used_block_group != block_group)
6668 btrfs_release_block_group(used_block_group,
6669 delalloc);
6670refill_cluster:
6592 if (loop >= LOOP_NO_EMPTY_SIZE) { 6671 if (loop >= LOOP_NO_EMPTY_SIZE) {
6593 spin_unlock(&last_ptr->refill_lock); 6672 spin_unlock(&last_ptr->refill_lock);
6594 goto unclustered_alloc; 6673 goto unclustered_alloc;
@@ -6696,7 +6775,7 @@ checks:
6696 BUG_ON(offset > search_start); 6775 BUG_ON(offset > search_start);
6697 6776
6698 ret = btrfs_update_reserved_bytes(block_group, num_bytes, 6777 ret = btrfs_update_reserved_bytes(block_group, num_bytes,
6699 alloc_type); 6778 alloc_type, delalloc);
6700 if (ret == -EAGAIN) { 6779 if (ret == -EAGAIN) {
6701 btrfs_add_free_space(block_group, offset, num_bytes); 6780 btrfs_add_free_space(block_group, offset, num_bytes);
6702 goto loop; 6781 goto loop;
@@ -6708,13 +6787,13 @@ checks:
6708 6787
6709 trace_btrfs_reserve_extent(orig_root, block_group, 6788 trace_btrfs_reserve_extent(orig_root, block_group,
6710 search_start, num_bytes); 6789 search_start, num_bytes);
6711 btrfs_put_block_group(block_group); 6790 btrfs_release_block_group(block_group, delalloc);
6712 break; 6791 break;
6713loop: 6792loop:
6714 failed_cluster_refill = false; 6793 failed_cluster_refill = false;
6715 failed_alloc = false; 6794 failed_alloc = false;
6716 BUG_ON(index != get_block_group_index(block_group)); 6795 BUG_ON(index != get_block_group_index(block_group));
6717 btrfs_put_block_group(block_group); 6796 btrfs_release_block_group(block_group, delalloc);
6718 } 6797 }
6719 up_read(&space_info->groups_sem); 6798 up_read(&space_info->groups_sem);
6720 6799
@@ -6827,7 +6906,7 @@ again:
6827int btrfs_reserve_extent(struct btrfs_root *root, 6906int btrfs_reserve_extent(struct btrfs_root *root,
6828 u64 num_bytes, u64 min_alloc_size, 6907 u64 num_bytes, u64 min_alloc_size,
6829 u64 empty_size, u64 hint_byte, 6908 u64 empty_size, u64 hint_byte,
6830 struct btrfs_key *ins, int is_data) 6909 struct btrfs_key *ins, int is_data, int delalloc)
6831{ 6910{
6832 bool final_tried = false; 6911 bool final_tried = false;
6833 u64 flags; 6912 u64 flags;
@@ -6837,7 +6916,7 @@ int btrfs_reserve_extent(struct btrfs_root *root,
6837again: 6916again:
6838 WARN_ON(num_bytes < root->sectorsize); 6917 WARN_ON(num_bytes < root->sectorsize);
6839 ret = find_free_extent(root, num_bytes, empty_size, hint_byte, ins, 6918 ret = find_free_extent(root, num_bytes, empty_size, hint_byte, ins,
6840 flags); 6919 flags, delalloc);
6841 6920
6842 if (ret == -ENOSPC) { 6921 if (ret == -ENOSPC) {
6843 if (!final_tried && ins->offset) { 6922 if (!final_tried && ins->offset) {
@@ -6862,7 +6941,8 @@ again:
6862} 6941}
6863 6942
6864static int __btrfs_free_reserved_extent(struct btrfs_root *root, 6943static int __btrfs_free_reserved_extent(struct btrfs_root *root,
6865 u64 start, u64 len, int pin) 6944 u64 start, u64 len,
6945 int pin, int delalloc)
6866{ 6946{
6867 struct btrfs_block_group_cache *cache; 6947 struct btrfs_block_group_cache *cache;
6868 int ret = 0; 6948 int ret = 0;
@@ -6881,7 +6961,7 @@ static int __btrfs_free_reserved_extent(struct btrfs_root *root,
6881 pin_down_extent(root, cache, start, len, 1); 6961 pin_down_extent(root, cache, start, len, 1);
6882 else { 6962 else {
6883 btrfs_add_free_space(cache, start, len); 6963 btrfs_add_free_space(cache, start, len);
6884 btrfs_update_reserved_bytes(cache, len, RESERVE_FREE); 6964 btrfs_update_reserved_bytes(cache, len, RESERVE_FREE, delalloc);
6885 } 6965 }
6886 btrfs_put_block_group(cache); 6966 btrfs_put_block_group(cache);
6887 6967
@@ -6891,15 +6971,15 @@ static int __btrfs_free_reserved_extent(struct btrfs_root *root,
6891} 6971}
6892 6972
6893int btrfs_free_reserved_extent(struct btrfs_root *root, 6973int btrfs_free_reserved_extent(struct btrfs_root *root,
6894 u64 start, u64 len) 6974 u64 start, u64 len, int delalloc)
6895{ 6975{
6896 return __btrfs_free_reserved_extent(root, start, len, 0); 6976 return __btrfs_free_reserved_extent(root, start, len, 0, delalloc);
6897} 6977}
6898 6978
6899int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root, 6979int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
6900 u64 start, u64 len) 6980 u64 start, u64 len)
6901{ 6981{
6902 return __btrfs_free_reserved_extent(root, start, len, 1); 6982 return __btrfs_free_reserved_extent(root, start, len, 1, 0);
6903} 6983}
6904 6984
6905static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans, 6985static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
@@ -7114,7 +7194,7 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
7114 return -EINVAL; 7194 return -EINVAL;
7115 7195
7116 ret = btrfs_update_reserved_bytes(block_group, ins->offset, 7196 ret = btrfs_update_reserved_bytes(block_group, ins->offset,
7117 RESERVE_ALLOC_NO_ACCOUNT); 7197 RESERVE_ALLOC_NO_ACCOUNT, 0);
7118 BUG_ON(ret); /* logic error */ 7198 BUG_ON(ret); /* logic error */
7119 ret = alloc_reserved_file_extent(trans, root, 0, root_objectid, 7199 ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
7120 0, owner, offset, ins, 1); 7200 0, owner, offset, ins, 1);
@@ -7256,7 +7336,7 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
7256 return ERR_CAST(block_rsv); 7336 return ERR_CAST(block_rsv);
7257 7337
7258 ret = btrfs_reserve_extent(root, blocksize, blocksize, 7338 ret = btrfs_reserve_extent(root, blocksize, blocksize,
7259 empty_size, hint, &ins, 0); 7339 empty_size, hint, &ins, 0, 0);
7260 if (ret) { 7340 if (ret) {
7261 unuse_block_rsv(root->fs_info, block_rsv, blocksize); 7341 unuse_block_rsv(root->fs_info, block_rsv, blocksize);
7262 return ERR_PTR(ret); 7342 return ERR_PTR(ret);
@@ -8659,6 +8739,7 @@ btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
8659 start); 8739 start);
8660 atomic_set(&cache->count, 1); 8740 atomic_set(&cache->count, 1);
8661 spin_lock_init(&cache->lock); 8741 spin_lock_init(&cache->lock);
8742 init_rwsem(&cache->data_rwsem);
8662 INIT_LIST_HEAD(&cache->list); 8743 INIT_LIST_HEAD(&cache->list);
8663 INIT_LIST_HEAD(&cache->cluster_list); 8744 INIT_LIST_HEAD(&cache->cluster_list);
8664 INIT_LIST_HEAD(&cache->new_bg_list); 8745 INIT_LIST_HEAD(&cache->new_bg_list);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index f25a9092b946..a389820d158b 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2354,7 +2354,7 @@ int end_extent_writepage(struct page *page, int err, u64 start, u64 end)
2354{ 2354{
2355 int uptodate = (err == 0); 2355 int uptodate = (err == 0);
2356 struct extent_io_tree *tree; 2356 struct extent_io_tree *tree;
2357 int ret; 2357 int ret = 0;
2358 2358
2359 tree = &BTRFS_I(page->mapping->host)->io_tree; 2359 tree = &BTRFS_I(page->mapping->host)->io_tree;
2360 2360
@@ -5068,6 +5068,43 @@ void read_extent_buffer(struct extent_buffer *eb, void *dstv,
5068 } 5068 }
5069} 5069}
5070 5070
5071int read_extent_buffer_to_user(struct extent_buffer *eb, void __user *dstv,
5072 unsigned long start,
5073 unsigned long len)
5074{
5075 size_t cur;
5076 size_t offset;
5077 struct page *page;
5078 char *kaddr;
5079 char __user *dst = (char __user *)dstv;
5080 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
5081 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
5082 int ret = 0;
5083
5084 WARN_ON(start > eb->len);
5085 WARN_ON(start + len > eb->start + eb->len);
5086
5087 offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1);
5088
5089 while (len > 0) {
5090 page = extent_buffer_page(eb, i);
5091
5092 cur = min(len, (PAGE_CACHE_SIZE - offset));
5093 kaddr = page_address(page);
5094 if (copy_to_user(dst, kaddr + offset, cur)) {
5095 ret = -EFAULT;
5096 break;
5097 }
5098
5099 dst += cur;
5100 len -= cur;
5101 offset = 0;
5102 i++;
5103 }
5104
5105 return ret;
5106}
5107
5071int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start, 5108int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
5072 unsigned long min_len, char **map, 5109 unsigned long min_len, char **map,
5073 unsigned long *map_start, 5110 unsigned long *map_start,
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 8b63f2d46518..ccc264e7bde1 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -158,7 +158,6 @@ struct extent_buffer {
158 * to unlock 158 * to unlock
159 */ 159 */
160 wait_queue_head_t read_lock_wq; 160 wait_queue_head_t read_lock_wq;
161 wait_queue_head_t lock_wq;
162 struct page *pages[INLINE_EXTENT_BUFFER_PAGES]; 161 struct page *pages[INLINE_EXTENT_BUFFER_PAGES];
163#ifdef CONFIG_BTRFS_DEBUG 162#ifdef CONFIG_BTRFS_DEBUG
164 struct list_head leak_list; 163 struct list_head leak_list;
@@ -304,6 +303,9 @@ int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
304void read_extent_buffer(struct extent_buffer *eb, void *dst, 303void read_extent_buffer(struct extent_buffer *eb, void *dst,
305 unsigned long start, 304 unsigned long start,
306 unsigned long len); 305 unsigned long len);
306int read_extent_buffer_to_user(struct extent_buffer *eb, void __user *dst,
307 unsigned long start,
308 unsigned long len);
307void write_extent_buffer(struct extent_buffer *eb, const void *src, 309void write_extent_buffer(struct extent_buffer *eb, const void *src,
308 unsigned long start, unsigned long len); 310 unsigned long start, unsigned long len);
309void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src, 311void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index 1874aee69c86..225302b39afb 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -75,6 +75,8 @@ void free_extent_map(struct extent_map *em)
75 if (atomic_dec_and_test(&em->refs)) { 75 if (atomic_dec_and_test(&em->refs)) {
76 WARN_ON(extent_map_in_tree(em)); 76 WARN_ON(extent_map_in_tree(em));
77 WARN_ON(!list_empty(&em->list)); 77 WARN_ON(!list_empty(&em->list));
78 if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags))
79 kfree(em->bdev);
78 kmem_cache_free(extent_map_cache, em); 80 kmem_cache_free(extent_map_cache, em);
79 } 81 }
80} 82}
diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h
index e7fd8a56a140..b2991fd8583e 100644
--- a/fs/btrfs/extent_map.h
+++ b/fs/btrfs/extent_map.h
@@ -15,6 +15,7 @@
15#define EXTENT_FLAG_PREALLOC 3 /* pre-allocated extent */ 15#define EXTENT_FLAG_PREALLOC 3 /* pre-allocated extent */
16#define EXTENT_FLAG_LOGGING 4 /* Logging this extent */ 16#define EXTENT_FLAG_LOGGING 4 /* Logging this extent */
17#define EXTENT_FLAG_FILLING 5 /* Filling in a preallocated extent */ 17#define EXTENT_FLAG_FILLING 5 /* Filling in a preallocated extent */
18#define EXTENT_FLAG_FS_MAPPING 6 /* filesystem extent mapping type */
18 19
19struct extent_map { 20struct extent_map {
20 struct rb_node rb_node; 21 struct rb_node rb_node;
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 372b05ff1943..2b0a627cb5f9 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -274,18 +274,32 @@ struct io_ctl {
274}; 274};
275 275
276static int io_ctl_init(struct io_ctl *io_ctl, struct inode *inode, 276static int io_ctl_init(struct io_ctl *io_ctl, struct inode *inode,
277 struct btrfs_root *root) 277 struct btrfs_root *root, int write)
278{ 278{
279 int num_pages;
280 int check_crcs = 0;
281
282 num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
283 PAGE_CACHE_SHIFT;
284
285 if (btrfs_ino(inode) != BTRFS_FREE_INO_OBJECTID)
286 check_crcs = 1;
287
288 /* Make sure we can fit our crcs into the first page */
289 if (write && check_crcs &&
290 (num_pages * sizeof(u32)) >= PAGE_CACHE_SIZE)
291 return -ENOSPC;
292
279 memset(io_ctl, 0, sizeof(struct io_ctl)); 293 memset(io_ctl, 0, sizeof(struct io_ctl));
280 io_ctl->num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> 294
281 PAGE_CACHE_SHIFT; 295 io_ctl->pages = kzalloc(sizeof(struct page *) * num_pages, GFP_NOFS);
282 io_ctl->pages = kzalloc(sizeof(struct page *) * io_ctl->num_pages,
283 GFP_NOFS);
284 if (!io_ctl->pages) 296 if (!io_ctl->pages)
285 return -ENOMEM; 297 return -ENOMEM;
298
299 io_ctl->num_pages = num_pages;
286 io_ctl->root = root; 300 io_ctl->root = root;
287 if (btrfs_ino(inode) != BTRFS_FREE_INO_OBJECTID) 301 io_ctl->check_crcs = check_crcs;
288 io_ctl->check_crcs = 1; 302
289 return 0; 303 return 0;
290} 304}
291 305
@@ -666,6 +680,13 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
666 generation = btrfs_free_space_generation(leaf, header); 680 generation = btrfs_free_space_generation(leaf, header);
667 btrfs_release_path(path); 681 btrfs_release_path(path);
668 682
683 if (!BTRFS_I(inode)->generation) {
684 btrfs_info(root->fs_info,
685 "The free space cache file (%llu) is invalid. skip it\n",
686 offset);
687 return 0;
688 }
689
669 if (BTRFS_I(inode)->generation != generation) { 690 if (BTRFS_I(inode)->generation != generation) {
670 btrfs_err(root->fs_info, 691 btrfs_err(root->fs_info,
671 "free space inode generation (%llu) " 692 "free space inode generation (%llu) "
@@ -677,7 +698,7 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
677 if (!num_entries) 698 if (!num_entries)
678 return 0; 699 return 0;
679 700
680 ret = io_ctl_init(&io_ctl, inode, root); 701 ret = io_ctl_init(&io_ctl, inode, root, 0);
681 if (ret) 702 if (ret)
682 return ret; 703 return ret;
683 704
@@ -957,19 +978,18 @@ fail:
957} 978}
958 979
959static noinline_for_stack int 980static noinline_for_stack int
960add_ioctl_entries(struct btrfs_root *root, 981write_pinned_extent_entries(struct btrfs_root *root,
961 struct inode *inode, 982 struct btrfs_block_group_cache *block_group,
962 struct btrfs_block_group_cache *block_group, 983 struct io_ctl *io_ctl,
963 struct io_ctl *io_ctl, 984 int *entries)
964 struct extent_state **cached_state,
965 struct list_head *bitmap_list,
966 int *entries)
967{ 985{
968 u64 start, extent_start, extent_end, len; 986 u64 start, extent_start, extent_end, len;
969 struct list_head *pos, *n;
970 struct extent_io_tree *unpin = NULL; 987 struct extent_io_tree *unpin = NULL;
971 int ret; 988 int ret;
972 989
990 if (!block_group)
991 return 0;
992
973 /* 993 /*
974 * We want to add any pinned extents to our free space cache 994 * We want to add any pinned extents to our free space cache
975 * so we don't leak the space 995 * so we don't leak the space
@@ -979,23 +999,19 @@ add_ioctl_entries(struct btrfs_root *root,
979 */ 999 */
980 unpin = root->fs_info->pinned_extents; 1000 unpin = root->fs_info->pinned_extents;
981 1001
982 if (block_group) 1002 start = block_group->key.objectid;
983 start = block_group->key.objectid;
984 1003
985 while (block_group && (start < block_group->key.objectid + 1004 while (start < block_group->key.objectid + block_group->key.offset) {
986 block_group->key.offset)) {
987 ret = find_first_extent_bit(unpin, start, 1005 ret = find_first_extent_bit(unpin, start,
988 &extent_start, &extent_end, 1006 &extent_start, &extent_end,
989 EXTENT_DIRTY, NULL); 1007 EXTENT_DIRTY, NULL);
990 if (ret) { 1008 if (ret)
991 ret = 0; 1009 return 0;
992 break;
993 }
994 1010
995 /* This pinned extent is out of our range */ 1011 /* This pinned extent is out of our range */
996 if (extent_start >= block_group->key.objectid + 1012 if (extent_start >= block_group->key.objectid +
997 block_group->key.offset) 1013 block_group->key.offset)
998 break; 1014 return 0;
999 1015
1000 extent_start = max(extent_start, start); 1016 extent_start = max(extent_start, start);
1001 extent_end = min(block_group->key.objectid + 1017 extent_end = min(block_group->key.objectid +
@@ -1005,11 +1021,20 @@ add_ioctl_entries(struct btrfs_root *root,
1005 *entries += 1; 1021 *entries += 1;
1006 ret = io_ctl_add_entry(io_ctl, extent_start, len, NULL); 1022 ret = io_ctl_add_entry(io_ctl, extent_start, len, NULL);
1007 if (ret) 1023 if (ret)
1008 goto out_nospc; 1024 return -ENOSPC;
1009 1025
1010 start = extent_end; 1026 start = extent_end;
1011 } 1027 }
1012 1028
1029 return 0;
1030}
1031
1032static noinline_for_stack int
1033write_bitmap_entries(struct io_ctl *io_ctl, struct list_head *bitmap_list)
1034{
1035 struct list_head *pos, *n;
1036 int ret;
1037
1013 /* Write out the bitmaps */ 1038 /* Write out the bitmaps */
1014 list_for_each_safe(pos, n, bitmap_list) { 1039 list_for_each_safe(pos, n, bitmap_list) {
1015 struct btrfs_free_space *entry = 1040 struct btrfs_free_space *entry =
@@ -1017,36 +1042,24 @@ add_ioctl_entries(struct btrfs_root *root,
1017 1042
1018 ret = io_ctl_add_bitmap(io_ctl, entry->bitmap); 1043 ret = io_ctl_add_bitmap(io_ctl, entry->bitmap);
1019 if (ret) 1044 if (ret)
1020 goto out_nospc; 1045 return -ENOSPC;
1021 list_del_init(&entry->list); 1046 list_del_init(&entry->list);
1022 } 1047 }
1023 1048
1024 /* Zero out the rest of the pages just to make sure */ 1049 return 0;
1025 io_ctl_zero_remaining_pages(io_ctl); 1050}
1026
1027 ret = btrfs_dirty_pages(root, inode, io_ctl->pages, io_ctl->num_pages,
1028 0, i_size_read(inode), cached_state);
1029 io_ctl_drop_pages(io_ctl);
1030 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
1031 i_size_read(inode) - 1, cached_state, GFP_NOFS);
1032 1051
1033 if (ret) 1052static int flush_dirty_cache(struct inode *inode)
1034 goto fail; 1053{
1054 int ret;
1035 1055
1036 ret = btrfs_wait_ordered_range(inode, 0, (u64)-1); 1056 ret = btrfs_wait_ordered_range(inode, 0, (u64)-1);
1037 if (ret) { 1057 if (ret)
1038 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1, 1058 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
1039 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, NULL, 1059 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, NULL,
1040 GFP_NOFS); 1060 GFP_NOFS);
1041 goto fail;
1042 }
1043 return 0;
1044 1061
1045fail: 1062 return ret;
1046 return -1;
1047
1048out_nospc:
1049 return -ENOSPC;
1050} 1063}
1051 1064
1052static void noinline_for_stack 1065static void noinline_for_stack
@@ -1056,6 +1069,7 @@ cleanup_write_cache_enospc(struct inode *inode,
1056 struct list_head *bitmap_list) 1069 struct list_head *bitmap_list)
1057{ 1070{
1058 struct list_head *pos, *n; 1071 struct list_head *pos, *n;
1072
1059 list_for_each_safe(pos, n, bitmap_list) { 1073 list_for_each_safe(pos, n, bitmap_list) {
1060 struct btrfs_free_space *entry = 1074 struct btrfs_free_space *entry =
1061 list_entry(pos, struct btrfs_free_space, list); 1075 list_entry(pos, struct btrfs_free_space, list);
@@ -1088,64 +1102,104 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
1088{ 1102{
1089 struct extent_state *cached_state = NULL; 1103 struct extent_state *cached_state = NULL;
1090 struct io_ctl io_ctl; 1104 struct io_ctl io_ctl;
1091 struct list_head bitmap_list; 1105 LIST_HEAD(bitmap_list);
1092 int entries = 0; 1106 int entries = 0;
1093 int bitmaps = 0; 1107 int bitmaps = 0;
1094 int ret; 1108 int ret;
1095 int err = -1;
1096
1097 INIT_LIST_HEAD(&bitmap_list);
1098 1109
1099 if (!i_size_read(inode)) 1110 if (!i_size_read(inode))
1100 return -1; 1111 return -1;
1101 1112
1102 ret = io_ctl_init(&io_ctl, inode, root); 1113 ret = io_ctl_init(&io_ctl, inode, root, 1);
1103 if (ret) 1114 if (ret)
1104 return -1; 1115 return -1;
1105 1116
1117 if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) {
1118 down_write(&block_group->data_rwsem);
1119 spin_lock(&block_group->lock);
1120 if (block_group->delalloc_bytes) {
1121 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
1122 spin_unlock(&block_group->lock);
1123 up_write(&block_group->data_rwsem);
1124 BTRFS_I(inode)->generation = 0;
1125 ret = 0;
1126 goto out;
1127 }
1128 spin_unlock(&block_group->lock);
1129 }
1130
1106 /* Lock all pages first so we can lock the extent safely. */ 1131 /* Lock all pages first so we can lock the extent safely. */
1107 io_ctl_prepare_pages(&io_ctl, inode, 0); 1132 io_ctl_prepare_pages(&io_ctl, inode, 0);
1108 1133
1109 lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, 1134 lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
1110 0, &cached_state); 1135 0, &cached_state);
1111 1136
1112
1113 /* Make sure we can fit our crcs into the first page */
1114 if (io_ctl.check_crcs &&
1115 (io_ctl.num_pages * sizeof(u32)) >= PAGE_CACHE_SIZE)
1116 goto out_nospc;
1117
1118 io_ctl_set_generation(&io_ctl, trans->transid); 1137 io_ctl_set_generation(&io_ctl, trans->transid);
1119 1138
1139 /* Write out the extent entries in the free space cache */
1120 ret = write_cache_extent_entries(&io_ctl, ctl, 1140 ret = write_cache_extent_entries(&io_ctl, ctl,
1121 block_group, &entries, &bitmaps, 1141 block_group, &entries, &bitmaps,
1122 &bitmap_list); 1142 &bitmap_list);
1123 if (ret) 1143 if (ret)
1124 goto out_nospc; 1144 goto out_nospc;
1125 1145
1126 ret = add_ioctl_entries(root, inode, block_group, &io_ctl, 1146 /*
1127 &cached_state, &bitmap_list, &entries); 1147 * Some spaces that are freed in the current transaction are pinned,
1148 * they will be added into free space cache after the transaction is
1149 * committed, we shouldn't lose them.
1150 */
1151 ret = write_pinned_extent_entries(root, block_group, &io_ctl, &entries);
1152 if (ret)
1153 goto out_nospc;
1128 1154
1129 if (ret == -ENOSPC) 1155 /* At last, we write out all the bitmaps. */
1156 ret = write_bitmap_entries(&io_ctl, &bitmap_list);
1157 if (ret)
1130 goto out_nospc; 1158 goto out_nospc;
1131 else if (ret) 1159
1160 /* Zero out the rest of the pages just to make sure */
1161 io_ctl_zero_remaining_pages(&io_ctl);
1162
1163 /* Everything is written out, now we dirty the pages in the file. */
1164 ret = btrfs_dirty_pages(root, inode, io_ctl.pages, io_ctl.num_pages,
1165 0, i_size_read(inode), &cached_state);
1166 if (ret)
1167 goto out_nospc;
1168
1169 if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA))
1170 up_write(&block_group->data_rwsem);
1171 /*
1172 * Release the pages and unlock the extent, we will flush
1173 * them out later
1174 */
1175 io_ctl_drop_pages(&io_ctl);
1176
1177 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
1178 i_size_read(inode) - 1, &cached_state, GFP_NOFS);
1179
1180 /* Flush the dirty pages in the cache file. */
1181 ret = flush_dirty_cache(inode);
1182 if (ret)
1132 goto out; 1183 goto out;
1133 1184
1134 err = update_cache_item(trans, root, inode, path, offset, 1185 /* Update the cache item to tell everyone this cache file is valid. */
1186 ret = update_cache_item(trans, root, inode, path, offset,
1135 entries, bitmaps); 1187 entries, bitmaps);
1136
1137out: 1188out:
1138 io_ctl_free(&io_ctl); 1189 io_ctl_free(&io_ctl);
1139 if (err) { 1190 if (ret) {
1140 invalidate_inode_pages2(inode->i_mapping); 1191 invalidate_inode_pages2(inode->i_mapping);
1141 BTRFS_I(inode)->generation = 0; 1192 BTRFS_I(inode)->generation = 0;
1142 } 1193 }
1143 btrfs_update_inode(trans, root, inode); 1194 btrfs_update_inode(trans, root, inode);
1144 return err; 1195 return ret;
1145 1196
1146out_nospc: 1197out_nospc:
1147
1148 cleanup_write_cache_enospc(inode, &io_ctl, &cached_state, &bitmap_list); 1198 cleanup_write_cache_enospc(inode, &io_ctl, &cached_state, &bitmap_list);
1199
1200 if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA))
1201 up_write(&block_group->data_rwsem);
1202
1149 goto out; 1203 goto out;
1150} 1204}
1151 1205
@@ -1165,6 +1219,12 @@ int btrfs_write_out_cache(struct btrfs_root *root,
1165 spin_unlock(&block_group->lock); 1219 spin_unlock(&block_group->lock);
1166 return 0; 1220 return 0;
1167 } 1221 }
1222
1223 if (block_group->delalloc_bytes) {
1224 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
1225 spin_unlock(&block_group->lock);
1226 return 0;
1227 }
1168 spin_unlock(&block_group->lock); 1228 spin_unlock(&block_group->lock);
1169 1229
1170 inode = lookup_free_space_inode(root, block_group, path); 1230 inode = lookup_free_space_inode(root, block_group, path);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 8925f66a1411..3668048e16f8 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -693,7 +693,7 @@ retry:
693 ret = btrfs_reserve_extent(root, 693 ret = btrfs_reserve_extent(root,
694 async_extent->compressed_size, 694 async_extent->compressed_size,
695 async_extent->compressed_size, 695 async_extent->compressed_size,
696 0, alloc_hint, &ins, 1); 696 0, alloc_hint, &ins, 1, 1);
697 if (ret) { 697 if (ret) {
698 int i; 698 int i;
699 699
@@ -794,7 +794,7 @@ retry:
794out: 794out:
795 return ret; 795 return ret;
796out_free_reserve: 796out_free_reserve:
797 btrfs_free_reserved_extent(root, ins.objectid, ins.offset); 797 btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
798out_free: 798out_free:
799 extent_clear_unlock_delalloc(inode, async_extent->start, 799 extent_clear_unlock_delalloc(inode, async_extent->start,
800 async_extent->start + 800 async_extent->start +
@@ -917,7 +917,7 @@ static noinline int cow_file_range(struct inode *inode,
917 cur_alloc_size = disk_num_bytes; 917 cur_alloc_size = disk_num_bytes;
918 ret = btrfs_reserve_extent(root, cur_alloc_size, 918 ret = btrfs_reserve_extent(root, cur_alloc_size,
919 root->sectorsize, 0, alloc_hint, 919 root->sectorsize, 0, alloc_hint,
920 &ins, 1); 920 &ins, 1, 1);
921 if (ret < 0) 921 if (ret < 0)
922 goto out_unlock; 922 goto out_unlock;
923 923
@@ -995,7 +995,7 @@ out:
995 return ret; 995 return ret;
996 996
997out_reserve: 997out_reserve:
998 btrfs_free_reserved_extent(root, ins.objectid, ins.offset); 998 btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
999out_unlock: 999out_unlock:
1000 extent_clear_unlock_delalloc(inode, start, end, locked_page, 1000 extent_clear_unlock_delalloc(inode, start, end, locked_page,
1001 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING | 1001 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
@@ -2599,6 +2599,21 @@ out_kfree:
2599 return NULL; 2599 return NULL;
2600} 2600}
2601 2601
2602static void btrfs_release_delalloc_bytes(struct btrfs_root *root,
2603 u64 start, u64 len)
2604{
2605 struct btrfs_block_group_cache *cache;
2606
2607 cache = btrfs_lookup_block_group(root->fs_info, start);
2608 ASSERT(cache);
2609
2610 spin_lock(&cache->lock);
2611 cache->delalloc_bytes -= len;
2612 spin_unlock(&cache->lock);
2613
2614 btrfs_put_block_group(cache);
2615}
2616
2602/* as ordered data IO finishes, this gets called so we can finish 2617/* as ordered data IO finishes, this gets called so we can finish
2603 * an ordered extent if the range of bytes in the file it covers are 2618 * an ordered extent if the range of bytes in the file it covers are
2604 * fully written. 2619 * fully written.
@@ -2698,6 +2713,10 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
2698 logical_len, logical_len, 2713 logical_len, logical_len,
2699 compress_type, 0, 0, 2714 compress_type, 0, 0,
2700 BTRFS_FILE_EXTENT_REG); 2715 BTRFS_FILE_EXTENT_REG);
2716 if (!ret)
2717 btrfs_release_delalloc_bytes(root,
2718 ordered_extent->start,
2719 ordered_extent->disk_len);
2701 } 2720 }
2702 unpin_extent_cache(&BTRFS_I(inode)->extent_tree, 2721 unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
2703 ordered_extent->file_offset, ordered_extent->len, 2722 ordered_extent->file_offset, ordered_extent->len,
@@ -2750,7 +2769,7 @@ out:
2750 !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) && 2769 !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
2751 !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) 2770 !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags))
2752 btrfs_free_reserved_extent(root, ordered_extent->start, 2771 btrfs_free_reserved_extent(root, ordered_extent->start,
2753 ordered_extent->disk_len); 2772 ordered_extent->disk_len, 1);
2754 } 2773 }
2755 2774
2756 2775
@@ -6535,21 +6554,21 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
6535 6554
6536 alloc_hint = get_extent_allocation_hint(inode, start, len); 6555 alloc_hint = get_extent_allocation_hint(inode, start, len);
6537 ret = btrfs_reserve_extent(root, len, root->sectorsize, 0, 6556 ret = btrfs_reserve_extent(root, len, root->sectorsize, 0,
6538 alloc_hint, &ins, 1); 6557 alloc_hint, &ins, 1, 1);
6539 if (ret) 6558 if (ret)
6540 return ERR_PTR(ret); 6559 return ERR_PTR(ret);
6541 6560
6542 em = create_pinned_em(inode, start, ins.offset, start, ins.objectid, 6561 em = create_pinned_em(inode, start, ins.offset, start, ins.objectid,
6543 ins.offset, ins.offset, ins.offset, 0); 6562 ins.offset, ins.offset, ins.offset, 0);
6544 if (IS_ERR(em)) { 6563 if (IS_ERR(em)) {
6545 btrfs_free_reserved_extent(root, ins.objectid, ins.offset); 6564 btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
6546 return em; 6565 return em;
6547 } 6566 }
6548 6567
6549 ret = btrfs_add_ordered_extent_dio(inode, start, ins.objectid, 6568 ret = btrfs_add_ordered_extent_dio(inode, start, ins.objectid,
6550 ins.offset, ins.offset, 0); 6569 ins.offset, ins.offset, 0);
6551 if (ret) { 6570 if (ret) {
6552 btrfs_free_reserved_extent(root, ins.objectid, ins.offset); 6571 btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
6553 free_extent_map(em); 6572 free_extent_map(em);
6554 return ERR_PTR(ret); 6573 return ERR_PTR(ret);
6555 } 6574 }
@@ -7437,7 +7456,7 @@ free_ordered:
7437 if (!test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags) && 7456 if (!test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags) &&
7438 !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) 7457 !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags))
7439 btrfs_free_reserved_extent(root, ordered->start, 7458 btrfs_free_reserved_extent(root, ordered->start,
7440 ordered->disk_len); 7459 ordered->disk_len, 1);
7441 btrfs_put_ordered_extent(ordered); 7460 btrfs_put_ordered_extent(ordered);
7442 btrfs_put_ordered_extent(ordered); 7461 btrfs_put_ordered_extent(ordered);
7443 } 7462 }
@@ -8808,7 +8827,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
8808 cur_bytes = min(num_bytes, 256ULL * 1024 * 1024); 8827 cur_bytes = min(num_bytes, 256ULL * 1024 * 1024);
8809 cur_bytes = max(cur_bytes, min_size); 8828 cur_bytes = max(cur_bytes, min_size);
8810 ret = btrfs_reserve_extent(root, cur_bytes, min_size, 0, 8829 ret = btrfs_reserve_extent(root, cur_bytes, min_size, 0,
8811 *alloc_hint, &ins, 1); 8830 *alloc_hint, &ins, 1, 0);
8812 if (ret) { 8831 if (ret) {
8813 if (own_trans) 8832 if (own_trans)
8814 btrfs_end_transaction(trans, root); 8833 btrfs_end_transaction(trans, root);
@@ -8822,7 +8841,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
8822 BTRFS_FILE_EXTENT_PREALLOC); 8841 BTRFS_FILE_EXTENT_PREALLOC);
8823 if (ret) { 8842 if (ret) {
8824 btrfs_free_reserved_extent(root, ins.objectid, 8843 btrfs_free_reserved_extent(root, ins.objectid,
8825 ins.offset); 8844 ins.offset, 0);
8826 btrfs_abort_transaction(trans, root, ret); 8845 btrfs_abort_transaction(trans, root, ret);
8827 if (own_trans) 8846 if (own_trans)
8828 btrfs_end_transaction(trans, root); 8847 btrfs_end_transaction(trans, root);
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 82c18ba12e3f..0d321c23069a 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -1957,7 +1957,8 @@ static noinline int copy_to_sk(struct btrfs_root *root,
1957 struct btrfs_path *path, 1957 struct btrfs_path *path,
1958 struct btrfs_key *key, 1958 struct btrfs_key *key,
1959 struct btrfs_ioctl_search_key *sk, 1959 struct btrfs_ioctl_search_key *sk,
1960 char *buf, 1960 size_t *buf_size,
1961 char __user *ubuf,
1961 unsigned long *sk_offset, 1962 unsigned long *sk_offset,
1962 int *num_found) 1963 int *num_found)
1963{ 1964{
@@ -1989,13 +1990,25 @@ static noinline int copy_to_sk(struct btrfs_root *root,
1989 if (!key_in_sk(key, sk)) 1990 if (!key_in_sk(key, sk))
1990 continue; 1991 continue;
1991 1992
1992 if (sizeof(sh) + item_len > BTRFS_SEARCH_ARGS_BUFSIZE) 1993 if (sizeof(sh) + item_len > *buf_size) {
1994 if (*num_found) {
1995 ret = 1;
1996 goto out;
1997 }
1998
1999 /*
2000 * return one empty item back for v1, which does not
2001 * handle -EOVERFLOW
2002 */
2003
2004 *buf_size = sizeof(sh) + item_len;
1993 item_len = 0; 2005 item_len = 0;
2006 ret = -EOVERFLOW;
2007 }
1994 2008
1995 if (sizeof(sh) + item_len + *sk_offset > 2009 if (sizeof(sh) + item_len + *sk_offset > *buf_size) {
1996 BTRFS_SEARCH_ARGS_BUFSIZE) {
1997 ret = 1; 2010 ret = 1;
1998 goto overflow; 2011 goto out;
1999 } 2012 }
2000 2013
2001 sh.objectid = key->objectid; 2014 sh.objectid = key->objectid;
@@ -2005,20 +2018,33 @@ static noinline int copy_to_sk(struct btrfs_root *root,
2005 sh.transid = found_transid; 2018 sh.transid = found_transid;
2006 2019
2007 /* copy search result header */ 2020 /* copy search result header */
2008 memcpy(buf + *sk_offset, &sh, sizeof(sh)); 2021 if (copy_to_user(ubuf + *sk_offset, &sh, sizeof(sh))) {
2022 ret = -EFAULT;
2023 goto out;
2024 }
2025
2009 *sk_offset += sizeof(sh); 2026 *sk_offset += sizeof(sh);
2010 2027
2011 if (item_len) { 2028 if (item_len) {
2012 char *p = buf + *sk_offset; 2029 char __user *up = ubuf + *sk_offset;
2013 /* copy the item */ 2030 /* copy the item */
2014 read_extent_buffer(leaf, p, 2031 if (read_extent_buffer_to_user(leaf, up,
2015 item_off, item_len); 2032 item_off, item_len)) {
2033 ret = -EFAULT;
2034 goto out;
2035 }
2036
2016 *sk_offset += item_len; 2037 *sk_offset += item_len;
2017 } 2038 }
2018 (*num_found)++; 2039 (*num_found)++;
2019 2040
2020 if (*num_found >= sk->nr_items) 2041 if (ret) /* -EOVERFLOW from above */
2021 break; 2042 goto out;
2043
2044 if (*num_found >= sk->nr_items) {
2045 ret = 1;
2046 goto out;
2047 }
2022 } 2048 }
2023advance_key: 2049advance_key:
2024 ret = 0; 2050 ret = 0;
@@ -2033,22 +2059,37 @@ advance_key:
2033 key->objectid++; 2059 key->objectid++;
2034 } else 2060 } else
2035 ret = 1; 2061 ret = 1;
2036overflow: 2062out:
2063 /*
2064 * 0: all items from this leaf copied, continue with next
2065 * 1: * more items can be copied, but unused buffer is too small
2066 * * all items were found
2067 * Either way, it will stops the loop which iterates to the next
2068 * leaf
2069 * -EOVERFLOW: item was to large for buffer
2070 * -EFAULT: could not copy extent buffer back to userspace
2071 */
2037 return ret; 2072 return ret;
2038} 2073}
2039 2074
2040static noinline int search_ioctl(struct inode *inode, 2075static noinline int search_ioctl(struct inode *inode,
2041 struct btrfs_ioctl_search_args *args) 2076 struct btrfs_ioctl_search_key *sk,
2077 size_t *buf_size,
2078 char __user *ubuf)
2042{ 2079{
2043 struct btrfs_root *root; 2080 struct btrfs_root *root;
2044 struct btrfs_key key; 2081 struct btrfs_key key;
2045 struct btrfs_path *path; 2082 struct btrfs_path *path;
2046 struct btrfs_ioctl_search_key *sk = &args->key;
2047 struct btrfs_fs_info *info = BTRFS_I(inode)->root->fs_info; 2083 struct btrfs_fs_info *info = BTRFS_I(inode)->root->fs_info;
2048 int ret; 2084 int ret;
2049 int num_found = 0; 2085 int num_found = 0;
2050 unsigned long sk_offset = 0; 2086 unsigned long sk_offset = 0;
2051 2087
2088 if (*buf_size < sizeof(struct btrfs_ioctl_search_header)) {
2089 *buf_size = sizeof(struct btrfs_ioctl_search_header);
2090 return -EOVERFLOW;
2091 }
2092
2052 path = btrfs_alloc_path(); 2093 path = btrfs_alloc_path();
2053 if (!path) 2094 if (!path)
2054 return -ENOMEM; 2095 return -ENOMEM;
@@ -2082,14 +2123,15 @@ static noinline int search_ioctl(struct inode *inode,
2082 ret = 0; 2123 ret = 0;
2083 goto err; 2124 goto err;
2084 } 2125 }
2085 ret = copy_to_sk(root, path, &key, sk, args->buf, 2126 ret = copy_to_sk(root, path, &key, sk, buf_size, ubuf,
2086 &sk_offset, &num_found); 2127 &sk_offset, &num_found);
2087 btrfs_release_path(path); 2128 btrfs_release_path(path);
2088 if (ret || num_found >= sk->nr_items) 2129 if (ret)
2089 break; 2130 break;
2090 2131
2091 } 2132 }
2092 ret = 0; 2133 if (ret > 0)
2134 ret = 0;
2093err: 2135err:
2094 sk->nr_items = num_found; 2136 sk->nr_items = num_found;
2095 btrfs_free_path(path); 2137 btrfs_free_path(path);
@@ -2099,22 +2141,73 @@ err:
2099static noinline int btrfs_ioctl_tree_search(struct file *file, 2141static noinline int btrfs_ioctl_tree_search(struct file *file,
2100 void __user *argp) 2142 void __user *argp)
2101{ 2143{
2102 struct btrfs_ioctl_search_args *args; 2144 struct btrfs_ioctl_search_args __user *uargs;
2103 struct inode *inode; 2145 struct btrfs_ioctl_search_key sk;
2104 int ret; 2146 struct inode *inode;
2147 int ret;
2148 size_t buf_size;
2105 2149
2106 if (!capable(CAP_SYS_ADMIN)) 2150 if (!capable(CAP_SYS_ADMIN))
2107 return -EPERM; 2151 return -EPERM;
2108 2152
2109 args = memdup_user(argp, sizeof(*args)); 2153 uargs = (struct btrfs_ioctl_search_args __user *)argp;
2110 if (IS_ERR(args)) 2154
2111 return PTR_ERR(args); 2155 if (copy_from_user(&sk, &uargs->key, sizeof(sk)))
2156 return -EFAULT;
2157
2158 buf_size = sizeof(uargs->buf);
2112 2159
2113 inode = file_inode(file); 2160 inode = file_inode(file);
2114 ret = search_ioctl(inode, args); 2161 ret = search_ioctl(inode, &sk, &buf_size, uargs->buf);
2115 if (ret == 0 && copy_to_user(argp, args, sizeof(*args))) 2162
2163 /*
2164 * In the origin implementation an overflow is handled by returning a
2165 * search header with a len of zero, so reset ret.
2166 */
2167 if (ret == -EOVERFLOW)
2168 ret = 0;
2169
2170 if (ret == 0 && copy_to_user(&uargs->key, &sk, sizeof(sk)))
2116 ret = -EFAULT; 2171 ret = -EFAULT;
2117 kfree(args); 2172 return ret;
2173}
2174
2175static noinline int btrfs_ioctl_tree_search_v2(struct file *file,
2176 void __user *argp)
2177{
2178 struct btrfs_ioctl_search_args_v2 __user *uarg;
2179 struct btrfs_ioctl_search_args_v2 args;
2180 struct inode *inode;
2181 int ret;
2182 size_t buf_size;
2183 const size_t buf_limit = 16 * 1024 * 1024;
2184
2185 if (!capable(CAP_SYS_ADMIN))
2186 return -EPERM;
2187
2188 /* copy search header and buffer size */
2189 uarg = (struct btrfs_ioctl_search_args_v2 __user *)argp;
2190 if (copy_from_user(&args, uarg, sizeof(args)))
2191 return -EFAULT;
2192
2193 buf_size = args.buf_size;
2194
2195 if (buf_size < sizeof(struct btrfs_ioctl_search_header))
2196 return -EOVERFLOW;
2197
2198 /* limit result size to 16MB */
2199 if (buf_size > buf_limit)
2200 buf_size = buf_limit;
2201
2202 inode = file_inode(file);
2203 ret = search_ioctl(inode, &args.key, &buf_size,
2204 (char *)(&uarg->buf[0]));
2205 if (ret == 0 && copy_to_user(&uarg->key, &args.key, sizeof(args.key)))
2206 ret = -EFAULT;
2207 else if (ret == -EOVERFLOW &&
2208 copy_to_user(&uarg->buf_size, &buf_size, sizeof(buf_size)))
2209 ret = -EFAULT;
2210
2118 return ret; 2211 return ret;
2119} 2212}
2120 2213
@@ -5198,6 +5291,8 @@ long btrfs_ioctl(struct file *file, unsigned int
5198 return btrfs_ioctl_trans_end(file); 5291 return btrfs_ioctl_trans_end(file);
5199 case BTRFS_IOC_TREE_SEARCH: 5292 case BTRFS_IOC_TREE_SEARCH:
5200 return btrfs_ioctl_tree_search(file, argp); 5293 return btrfs_ioctl_tree_search(file, argp);
5294 case BTRFS_IOC_TREE_SEARCH_V2:
5295 return btrfs_ioctl_tree_search_v2(file, argp);
5201 case BTRFS_IOC_INO_LOOKUP: 5296 case BTRFS_IOC_INO_LOOKUP:
5202 return btrfs_ioctl_ino_lookup(file, argp); 5297 return btrfs_ioctl_ino_lookup(file, argp);
5203 case BTRFS_IOC_INO_PATHS: 5298 case BTRFS_IOC_INO_PATHS:
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
index 01277b8f2373..5665d2149249 100644
--- a/fs/btrfs/locking.c
+++ b/fs/btrfs/locking.c
@@ -33,14 +33,14 @@ static void btrfs_assert_tree_read_locked(struct extent_buffer *eb);
33 */ 33 */
34void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw) 34void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw)
35{ 35{
36 if (eb->lock_nested) { 36 /*
37 read_lock(&eb->lock); 37 * no lock is required. The lock owner may change if
38 if (eb->lock_nested && current->pid == eb->lock_owner) { 38 * we have a read lock, but it won't change to or away
39 read_unlock(&eb->lock); 39 * from us. If we have the write lock, we are the owner
40 return; 40 * and it'll never change.
41 } 41 */
42 read_unlock(&eb->lock); 42 if (eb->lock_nested && current->pid == eb->lock_owner)
43 } 43 return;
44 if (rw == BTRFS_WRITE_LOCK) { 44 if (rw == BTRFS_WRITE_LOCK) {
45 if (atomic_read(&eb->blocking_writers) == 0) { 45 if (atomic_read(&eb->blocking_writers) == 0) {
46 WARN_ON(atomic_read(&eb->spinning_writers) != 1); 46 WARN_ON(atomic_read(&eb->spinning_writers) != 1);
@@ -65,14 +65,15 @@ void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw)
65 */ 65 */
66void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw) 66void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw)
67{ 67{
68 if (eb->lock_nested) { 68 /*
69 read_lock(&eb->lock); 69 * no lock is required. The lock owner may change if
70 if (eb->lock_nested && current->pid == eb->lock_owner) { 70 * we have a read lock, but it won't change to or away
71 read_unlock(&eb->lock); 71 * from us. If we have the write lock, we are the owner
72 return; 72 * and it'll never change.
73 } 73 */
74 read_unlock(&eb->lock); 74 if (eb->lock_nested && current->pid == eb->lock_owner)
75 } 75 return;
76
76 if (rw == BTRFS_WRITE_LOCK_BLOCKING) { 77 if (rw == BTRFS_WRITE_LOCK_BLOCKING) {
77 BUG_ON(atomic_read(&eb->blocking_writers) != 1); 78 BUG_ON(atomic_read(&eb->blocking_writers) != 1);
78 write_lock(&eb->lock); 79 write_lock(&eb->lock);
@@ -99,6 +100,9 @@ void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw)
99void btrfs_tree_read_lock(struct extent_buffer *eb) 100void btrfs_tree_read_lock(struct extent_buffer *eb)
100{ 101{
101again: 102again:
103 BUG_ON(!atomic_read(&eb->blocking_writers) &&
104 current->pid == eb->lock_owner);
105
102 read_lock(&eb->lock); 106 read_lock(&eb->lock);
103 if (atomic_read(&eb->blocking_writers) && 107 if (atomic_read(&eb->blocking_writers) &&
104 current->pid == eb->lock_owner) { 108 current->pid == eb->lock_owner) {
@@ -132,7 +136,9 @@ int btrfs_try_tree_read_lock(struct extent_buffer *eb)
132 if (atomic_read(&eb->blocking_writers)) 136 if (atomic_read(&eb->blocking_writers))
133 return 0; 137 return 0;
134 138
135 read_lock(&eb->lock); 139 if (!read_trylock(&eb->lock))
140 return 0;
141
136 if (atomic_read(&eb->blocking_writers)) { 142 if (atomic_read(&eb->blocking_writers)) {
137 read_unlock(&eb->lock); 143 read_unlock(&eb->lock);
138 return 0; 144 return 0;
@@ -151,7 +157,10 @@ int btrfs_try_tree_write_lock(struct extent_buffer *eb)
151 if (atomic_read(&eb->blocking_writers) || 157 if (atomic_read(&eb->blocking_writers) ||
152 atomic_read(&eb->blocking_readers)) 158 atomic_read(&eb->blocking_readers))
153 return 0; 159 return 0;
154 write_lock(&eb->lock); 160
161 if (!write_trylock(&eb->lock))
162 return 0;
163
155 if (atomic_read(&eb->blocking_writers) || 164 if (atomic_read(&eb->blocking_writers) ||
156 atomic_read(&eb->blocking_readers)) { 165 atomic_read(&eb->blocking_readers)) {
157 write_unlock(&eb->lock); 166 write_unlock(&eb->lock);
@@ -168,14 +177,15 @@ int btrfs_try_tree_write_lock(struct extent_buffer *eb)
168 */ 177 */
169void btrfs_tree_read_unlock(struct extent_buffer *eb) 178void btrfs_tree_read_unlock(struct extent_buffer *eb)
170{ 179{
171 if (eb->lock_nested) { 180 /*
172 read_lock(&eb->lock); 181 * if we're nested, we have the write lock. No new locking
173 if (eb->lock_nested && current->pid == eb->lock_owner) { 182 * is needed as long as we are the lock owner.
174 eb->lock_nested = 0; 183 * The write unlock will do a barrier for us, and the lock_nested
175 read_unlock(&eb->lock); 184 * field only matters to the lock owner.
176 return; 185 */
177 } 186 if (eb->lock_nested && current->pid == eb->lock_owner) {
178 read_unlock(&eb->lock); 187 eb->lock_nested = 0;
188 return;
179 } 189 }
180 btrfs_assert_tree_read_locked(eb); 190 btrfs_assert_tree_read_locked(eb);
181 WARN_ON(atomic_read(&eb->spinning_readers) == 0); 191 WARN_ON(atomic_read(&eb->spinning_readers) == 0);
@@ -189,14 +199,15 @@ void btrfs_tree_read_unlock(struct extent_buffer *eb)
189 */ 199 */
190void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb) 200void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
191{ 201{
192 if (eb->lock_nested) { 202 /*
193 read_lock(&eb->lock); 203 * if we're nested, we have the write lock. No new locking
194 if (eb->lock_nested && current->pid == eb->lock_owner) { 204 * is needed as long as we are the lock owner.
195 eb->lock_nested = 0; 205 * The write unlock will do a barrier for us, and the lock_nested
196 read_unlock(&eb->lock); 206 * field only matters to the lock owner.
197 return; 207 */
198 } 208 if (eb->lock_nested && current->pid == eb->lock_owner) {
199 read_unlock(&eb->lock); 209 eb->lock_nested = 0;
210 return;
200 } 211 }
201 btrfs_assert_tree_read_locked(eb); 212 btrfs_assert_tree_read_locked(eb);
202 WARN_ON(atomic_read(&eb->blocking_readers) == 0); 213 WARN_ON(atomic_read(&eb->blocking_readers) == 0);
@@ -244,6 +255,7 @@ void btrfs_tree_unlock(struct extent_buffer *eb)
244 BUG_ON(blockers > 1); 255 BUG_ON(blockers > 1);
245 256
246 btrfs_assert_tree_locked(eb); 257 btrfs_assert_tree_locked(eb);
258 eb->lock_owner = 0;
247 atomic_dec(&eb->write_locks); 259 atomic_dec(&eb->write_locks);
248 260
249 if (blockers) { 261 if (blockers) {
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index cf5aead95a7f..98cb6b2630f9 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -1798,8 +1798,10 @@ static int qgroup_shared_accounting(struct btrfs_trans_handle *trans,
1798 return -ENOMEM; 1798 return -ENOMEM;
1799 1799
1800 tmp = ulist_alloc(GFP_NOFS); 1800 tmp = ulist_alloc(GFP_NOFS);
1801 if (!tmp) 1801 if (!tmp) {
1802 ulist_free(qgroups);
1802 return -ENOMEM; 1803 return -ENOMEM;
1804 }
1803 1805
1804 btrfs_get_tree_mod_seq(fs_info, &elem); 1806 btrfs_get_tree_mod_seq(fs_info, &elem);
1805 ret = btrfs_find_all_roots(trans, fs_info, oper->bytenr, elem.seq, 1807 ret = btrfs_find_all_roots(trans, fs_info, oper->bytenr, elem.seq,
diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c
index 30947f923620..09230cf3a244 100644
--- a/fs/btrfs/reada.c
+++ b/fs/btrfs/reada.c
@@ -428,8 +428,13 @@ static struct reada_extent *reada_find_extent(struct btrfs_root *root,
428 continue; 428 continue;
429 } 429 }
430 if (!dev->bdev) { 430 if (!dev->bdev) {
431 /* cannot read ahead on missing device */ 431 /*
432 continue; 432 * cannot read ahead on missing device, but for RAID5/6,
433 * REQ_GET_READ_MIRRORS return 1. So don't skip missing
434 * device for such case.
435 */
436 if (nzones > 1)
437 continue;
433 } 438 }
434 if (dev_replace_is_ongoing && 439 if (dev_replace_is_ongoing &&
435 dev == fs_info->dev_replace.tgtdev) { 440 dev == fs_info->dev_replace.tgtdev) {
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index ac80188eec88..b6d198f5181e 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -2725,11 +2725,8 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
2725 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 2725 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
2726 length = btrfs_dev_extent_length(l, dev_extent); 2726 length = btrfs_dev_extent_length(l, dev_extent);
2727 2727
2728 if (found_key.offset + length <= start) { 2728 if (found_key.offset + length <= start)
2729 key.offset = found_key.offset + length; 2729 goto skip;
2730 btrfs_release_path(path);
2731 continue;
2732 }
2733 2730
2734 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent); 2731 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
2735 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent); 2732 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
@@ -2740,10 +2737,12 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
2740 * the chunk from going away while we scrub it 2737 * the chunk from going away while we scrub it
2741 */ 2738 */
2742 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 2739 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
2743 if (!cache) { 2740
2744 ret = -ENOENT; 2741 /* some chunks are removed but not committed to disk yet,
2745 break; 2742 * continue scrubbing */
2746 } 2743 if (!cache)
2744 goto skip;
2745
2747 dev_replace->cursor_right = found_key.offset + length; 2746 dev_replace->cursor_right = found_key.offset + length;
2748 dev_replace->cursor_left = found_key.offset; 2747 dev_replace->cursor_left = found_key.offset;
2749 dev_replace->item_needs_writeback = 1; 2748 dev_replace->item_needs_writeback = 1;
@@ -2802,7 +2801,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
2802 2801
2803 dev_replace->cursor_left = dev_replace->cursor_right; 2802 dev_replace->cursor_left = dev_replace->cursor_right;
2804 dev_replace->item_needs_writeback = 1; 2803 dev_replace->item_needs_writeback = 1;
2805 2804skip:
2806 key.offset = found_key.offset + length; 2805 key.offset = found_key.offset + length;
2807 btrfs_release_path(path); 2806 btrfs_release_path(path);
2808 } 2807 }
diff --git a/fs/btrfs/tests/btrfs-tests.c b/fs/btrfs/tests/btrfs-tests.c
index a5dcacb5df9c..9626252ee6b4 100644
--- a/fs/btrfs/tests/btrfs-tests.c
+++ b/fs/btrfs/tests/btrfs-tests.c
@@ -135,7 +135,7 @@ restart:
135 radix_tree_for_each_slot(slot, &fs_info->buffer_radix, &iter, 0) { 135 radix_tree_for_each_slot(slot, &fs_info->buffer_radix, &iter, 0) {
136 struct extent_buffer *eb; 136 struct extent_buffer *eb;
137 137
138 eb = radix_tree_deref_slot(slot); 138 eb = radix_tree_deref_slot_protected(slot, &fs_info->buffer_lock);
139 if (!eb) 139 if (!eb)
140 continue; 140 continue;
141 /* Shouldn't happen but that kind of thinking creates CVE's */ 141 /* Shouldn't happen but that kind of thinking creates CVE's */
diff --git a/fs/btrfs/tests/qgroup-tests.c b/fs/btrfs/tests/qgroup-tests.c
index fa691b754aaf..ec3dcb202357 100644
--- a/fs/btrfs/tests/qgroup-tests.c
+++ b/fs/btrfs/tests/qgroup-tests.c
@@ -415,6 +415,8 @@ int btrfs_test_qgroups(void)
415 ret = -ENOMEM; 415 ret = -ENOMEM;
416 goto out; 416 goto out;
417 } 417 }
418 btrfs_set_header_level(root->node, 0);
419 btrfs_set_header_nritems(root->node, 0);
418 root->alloc_bytenr += 8192; 420 root->alloc_bytenr += 8192;
419 421
420 tmp_root = btrfs_alloc_dummy_root(); 422 tmp_root = btrfs_alloc_dummy_root();
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 9630f10f8e1e..511839c04f11 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -1284,11 +1284,13 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
1284 goto fail; 1284 goto fail;
1285 } 1285 }
1286 1286
1287 pending->error = btrfs_qgroup_inherit(trans, fs_info, 1287 ret = btrfs_qgroup_inherit(trans, fs_info,
1288 root->root_key.objectid, 1288 root->root_key.objectid,
1289 objectid, pending->inherit); 1289 objectid, pending->inherit);
1290 if (pending->error) 1290 if (ret) {
1291 goto no_free_objectid; 1291 btrfs_abort_transaction(trans, root, ret);
1292 goto fail;
1293 }
1292 1294
1293 /* see comments in should_cow_block() */ 1295 /* see comments in should_cow_block() */
1294 set_bit(BTRFS_ROOT_FORCE_COW, &root->state); 1296 set_bit(BTRFS_ROOT_FORCE_COW, &root->state);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index ffeed6d6326f..c83b24251e53 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -2543,9 +2543,6 @@ static int btrfs_relocate_chunk(struct btrfs_root *root,
2543 remove_extent_mapping(em_tree, em); 2543 remove_extent_mapping(em_tree, em);
2544 write_unlock(&em_tree->lock); 2544 write_unlock(&em_tree->lock);
2545 2545
2546 kfree(map);
2547 em->bdev = NULL;
2548
2549 /* once for the tree */ 2546 /* once for the tree */
2550 free_extent_map(em); 2547 free_extent_map(em);
2551 /* once for us */ 2548 /* once for us */
@@ -4301,9 +4298,11 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
4301 4298
4302 em = alloc_extent_map(); 4299 em = alloc_extent_map();
4303 if (!em) { 4300 if (!em) {
4301 kfree(map);
4304 ret = -ENOMEM; 4302 ret = -ENOMEM;
4305 goto error; 4303 goto error;
4306 } 4304 }
4305 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
4307 em->bdev = (struct block_device *)map; 4306 em->bdev = (struct block_device *)map;
4308 em->start = start; 4307 em->start = start;
4309 em->len = num_bytes; 4308 em->len = num_bytes;
@@ -4346,7 +4345,6 @@ error_del_extent:
4346 /* One for the tree reference */ 4345 /* One for the tree reference */
4347 free_extent_map(em); 4346 free_extent_map(em);
4348error: 4347error:
4349 kfree(map);
4350 kfree(devices_info); 4348 kfree(devices_info);
4351 return ret; 4349 return ret;
4352} 4350}
@@ -4558,7 +4556,6 @@ void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
4558 write_unlock(&tree->map_tree.lock); 4556 write_unlock(&tree->map_tree.lock);
4559 if (!em) 4557 if (!em)
4560 break; 4558 break;
4561 kfree(em->bdev);
4562 /* once for us */ 4559 /* once for us */
4563 free_extent_map(em); 4560 free_extent_map(em);
4564 /* once for the tree */ 4561 /* once for the tree */
@@ -5362,6 +5359,15 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
5362 return 0; 5359 return 0;
5363} 5360}
5364 5361
5362static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio, int err)
5363{
5364 if (likely(bbio->flags & BTRFS_BIO_ORIG_BIO_SUBMITTED))
5365 bio_endio_nodec(bio, err);
5366 else
5367 bio_endio(bio, err);
5368 kfree(bbio);
5369}
5370
5365static void btrfs_end_bio(struct bio *bio, int err) 5371static void btrfs_end_bio(struct bio *bio, int err)
5366{ 5372{
5367 struct btrfs_bio *bbio = bio->bi_private; 5373 struct btrfs_bio *bbio = bio->bi_private;
@@ -5402,12 +5408,6 @@ static void btrfs_end_bio(struct bio *bio, int err)
5402 bio = bbio->orig_bio; 5408 bio = bbio->orig_bio;
5403 } 5409 }
5404 5410
5405 /*
5406 * We have original bio now. So increment bi_remaining to
5407 * account for it in endio
5408 */
5409 atomic_inc(&bio->bi_remaining);
5410
5411 bio->bi_private = bbio->private; 5411 bio->bi_private = bbio->private;
5412 bio->bi_end_io = bbio->end_io; 5412 bio->bi_end_io = bbio->end_io;
5413 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num; 5413 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
@@ -5424,9 +5424,8 @@ static void btrfs_end_bio(struct bio *bio, int err)
5424 set_bit(BIO_UPTODATE, &bio->bi_flags); 5424 set_bit(BIO_UPTODATE, &bio->bi_flags);
5425 err = 0; 5425 err = 0;
5426 } 5426 }
5427 kfree(bbio);
5428 5427
5429 bio_endio(bio, err); 5428 btrfs_end_bbio(bbio, bio, err);
5430 } else if (!is_orig_bio) { 5429 } else if (!is_orig_bio) {
5431 bio_put(bio); 5430 bio_put(bio);
5432 } 5431 }
@@ -5589,12 +5588,15 @@ static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
5589{ 5588{
5590 atomic_inc(&bbio->error); 5589 atomic_inc(&bbio->error);
5591 if (atomic_dec_and_test(&bbio->stripes_pending)) { 5590 if (atomic_dec_and_test(&bbio->stripes_pending)) {
5591 /* Shoud be the original bio. */
5592 WARN_ON(bio != bbio->orig_bio);
5593
5592 bio->bi_private = bbio->private; 5594 bio->bi_private = bbio->private;
5593 bio->bi_end_io = bbio->end_io; 5595 bio->bi_end_io = bbio->end_io;
5594 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num; 5596 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
5595 bio->bi_iter.bi_sector = logical >> 9; 5597 bio->bi_iter.bi_sector = logical >> 9;
5596 kfree(bbio); 5598
5597 bio_endio(bio, -EIO); 5599 btrfs_end_bbio(bbio, bio, -EIO);
5598 } 5600 }
5599} 5601}
5600 5602
@@ -5681,6 +5683,7 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
5681 BUG_ON(!bio); /* -ENOMEM */ 5683 BUG_ON(!bio); /* -ENOMEM */
5682 } else { 5684 } else {
5683 bio = first_bio; 5685 bio = first_bio;
5686 bbio->flags |= BTRFS_BIO_ORIG_BIO_SUBMITTED;
5684 } 5687 }
5685 5688
5686 submit_stripe_bio(root, bbio, bio, 5689 submit_stripe_bio(root, bbio, bio,
@@ -5822,6 +5825,7 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
5822 return -ENOMEM; 5825 return -ENOMEM;
5823 } 5826 }
5824 5827
5828 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
5825 em->bdev = (struct block_device *)map; 5829 em->bdev = (struct block_device *)map;
5826 em->start = logical; 5830 em->start = logical;
5827 em->len = length; 5831 em->len = length;
@@ -5846,7 +5850,6 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
5846 map->stripes[i].dev = btrfs_find_device(root->fs_info, devid, 5850 map->stripes[i].dev = btrfs_find_device(root->fs_info, devid,
5847 uuid, NULL); 5851 uuid, NULL);
5848 if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) { 5852 if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
5849 kfree(map);
5850 free_extent_map(em); 5853 free_extent_map(em);
5851 return -EIO; 5854 return -EIO;
5852 } 5855 }
@@ -5854,7 +5857,6 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
5854 map->stripes[i].dev = 5857 map->stripes[i].dev =
5855 add_missing_dev(root, devid, uuid); 5858 add_missing_dev(root, devid, uuid);
5856 if (!map->stripes[i].dev) { 5859 if (!map->stripes[i].dev) {
5857 kfree(map);
5858 free_extent_map(em); 5860 free_extent_map(em);
5859 return -EIO; 5861 return -EIO;
5860 } 5862 }
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 1a15bbeb65e2..2aaa00c47816 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -190,11 +190,14 @@ struct btrfs_bio_stripe {
190struct btrfs_bio; 190struct btrfs_bio;
191typedef void (btrfs_bio_end_io_t) (struct btrfs_bio *bio, int err); 191typedef void (btrfs_bio_end_io_t) (struct btrfs_bio *bio, int err);
192 192
193#define BTRFS_BIO_ORIG_BIO_SUBMITTED 0x1
194
193struct btrfs_bio { 195struct btrfs_bio {
194 atomic_t stripes_pending; 196 atomic_t stripes_pending;
195 struct btrfs_fs_info *fs_info; 197 struct btrfs_fs_info *fs_info;
196 bio_end_io_t *end_io; 198 bio_end_io_t *end_io;
197 struct bio *orig_bio; 199 struct bio *orig_bio;
200 unsigned long flags;
198 void *private; 201 void *private;
199 atomic_t error; 202 atomic_t error;
200 int max_errors; 203 int max_errors;
diff --git a/fs/ceph/acl.c b/fs/ceph/acl.c
index 21887d63dad5..469f2e8657e8 100644
--- a/fs/ceph/acl.c
+++ b/fs/ceph/acl.c
@@ -104,12 +104,6 @@ int ceph_set_acl(struct inode *inode, struct posix_acl *acl, int type)
104 umode_t new_mode = inode->i_mode, old_mode = inode->i_mode; 104 umode_t new_mode = inode->i_mode, old_mode = inode->i_mode;
105 struct dentry *dentry; 105 struct dentry *dentry;
106 106
107 if (acl) {
108 ret = posix_acl_valid(acl);
109 if (ret < 0)
110 goto out;
111 }
112
113 switch (type) { 107 switch (type) {
114 case ACL_TYPE_ACCESS: 108 case ACL_TYPE_ACCESS:
115 name = POSIX_ACL_XATTR_ACCESS; 109 name = POSIX_ACL_XATTR_ACCESS;
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 4f3f69079f36..90b3954d48ed 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -211,18 +211,15 @@ static int readpage_nounlock(struct file *filp, struct page *page)
211 SetPageError(page); 211 SetPageError(page);
212 ceph_fscache_readpage_cancel(inode, page); 212 ceph_fscache_readpage_cancel(inode, page);
213 goto out; 213 goto out;
214 } else {
215 if (err < PAGE_CACHE_SIZE) {
216 /* zero fill remainder of page */
217 zero_user_segment(page, err, PAGE_CACHE_SIZE);
218 } else {
219 flush_dcache_page(page);
220 }
221 } 214 }
222 SetPageUptodate(page); 215 if (err < PAGE_CACHE_SIZE)
216 /* zero fill remainder of page */
217 zero_user_segment(page, err, PAGE_CACHE_SIZE);
218 else
219 flush_dcache_page(page);
223 220
224 if (err >= 0) 221 SetPageUptodate(page);
225 ceph_readpage_to_fscache(inode, page); 222 ceph_readpage_to_fscache(inode, page);
226 223
227out: 224out:
228 return err < 0 ? err : 0; 225 return err < 0 ? err : 0;
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index c561b628ebce..1fde164b74b5 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -221,8 +221,8 @@ int ceph_unreserve_caps(struct ceph_mds_client *mdsc,
221 return 0; 221 return 0;
222} 222}
223 223
224static struct ceph_cap *get_cap(struct ceph_mds_client *mdsc, 224struct ceph_cap *ceph_get_cap(struct ceph_mds_client *mdsc,
225 struct ceph_cap_reservation *ctx) 225 struct ceph_cap_reservation *ctx)
226{ 226{
227 struct ceph_cap *cap = NULL; 227 struct ceph_cap *cap = NULL;
228 228
@@ -508,15 +508,14 @@ static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap,
508 * it is < 0. (This is so we can atomically add the cap and add an 508 * it is < 0. (This is so we can atomically add the cap and add an
509 * open file reference to it.) 509 * open file reference to it.)
510 */ 510 */
511int ceph_add_cap(struct inode *inode, 511void ceph_add_cap(struct inode *inode,
512 struct ceph_mds_session *session, u64 cap_id, 512 struct ceph_mds_session *session, u64 cap_id,
513 int fmode, unsigned issued, unsigned wanted, 513 int fmode, unsigned issued, unsigned wanted,
514 unsigned seq, unsigned mseq, u64 realmino, int flags, 514 unsigned seq, unsigned mseq, u64 realmino, int flags,
515 struct ceph_cap_reservation *caps_reservation) 515 struct ceph_cap **new_cap)
516{ 516{
517 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc; 517 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
518 struct ceph_inode_info *ci = ceph_inode(inode); 518 struct ceph_inode_info *ci = ceph_inode(inode);
519 struct ceph_cap *new_cap = NULL;
520 struct ceph_cap *cap; 519 struct ceph_cap *cap;
521 int mds = session->s_mds; 520 int mds = session->s_mds;
522 int actual_wanted; 521 int actual_wanted;
@@ -531,20 +530,10 @@ int ceph_add_cap(struct inode *inode,
531 if (fmode >= 0) 530 if (fmode >= 0)
532 wanted |= ceph_caps_for_mode(fmode); 531 wanted |= ceph_caps_for_mode(fmode);
533 532
534retry:
535 spin_lock(&ci->i_ceph_lock);
536 cap = __get_cap_for_mds(ci, mds); 533 cap = __get_cap_for_mds(ci, mds);
537 if (!cap) { 534 if (!cap) {
538 if (new_cap) { 535 cap = *new_cap;
539 cap = new_cap; 536 *new_cap = NULL;
540 new_cap = NULL;
541 } else {
542 spin_unlock(&ci->i_ceph_lock);
543 new_cap = get_cap(mdsc, caps_reservation);
544 if (new_cap == NULL)
545 return -ENOMEM;
546 goto retry;
547 }
548 537
549 cap->issued = 0; 538 cap->issued = 0;
550 cap->implemented = 0; 539 cap->implemented = 0;
@@ -562,9 +551,6 @@ retry:
562 session->s_nr_caps++; 551 session->s_nr_caps++;
563 spin_unlock(&session->s_cap_lock); 552 spin_unlock(&session->s_cap_lock);
564 } else { 553 } else {
565 if (new_cap)
566 ceph_put_cap(mdsc, new_cap);
567
568 /* 554 /*
569 * auth mds of the inode changed. we received the cap export 555 * auth mds of the inode changed. we received the cap export
570 * message, but still haven't received the cap import message. 556 * message, but still haven't received the cap import message.
@@ -626,7 +612,6 @@ retry:
626 ci->i_auth_cap = cap; 612 ci->i_auth_cap = cap;
627 cap->mds_wanted = wanted; 613 cap->mds_wanted = wanted;
628 } 614 }
629 ci->i_cap_exporting_issued = 0;
630 } else { 615 } else {
631 WARN_ON(ci->i_auth_cap == cap); 616 WARN_ON(ci->i_auth_cap == cap);
632 } 617 }
@@ -648,9 +633,6 @@ retry:
648 633
649 if (fmode >= 0) 634 if (fmode >= 0)
650 __ceph_get_fmode(ci, fmode); 635 __ceph_get_fmode(ci, fmode);
651 spin_unlock(&ci->i_ceph_lock);
652 wake_up_all(&ci->i_cap_wq);
653 return 0;
654} 636}
655 637
656/* 638/*
@@ -685,7 +667,7 @@ static int __cap_is_valid(struct ceph_cap *cap)
685 */ 667 */
686int __ceph_caps_issued(struct ceph_inode_info *ci, int *implemented) 668int __ceph_caps_issued(struct ceph_inode_info *ci, int *implemented)
687{ 669{
688 int have = ci->i_snap_caps | ci->i_cap_exporting_issued; 670 int have = ci->i_snap_caps;
689 struct ceph_cap *cap; 671 struct ceph_cap *cap;
690 struct rb_node *p; 672 struct rb_node *p;
691 673
@@ -900,7 +882,7 @@ int __ceph_caps_mds_wanted(struct ceph_inode_info *ci)
900 */ 882 */
901static int __ceph_is_any_caps(struct ceph_inode_info *ci) 883static int __ceph_is_any_caps(struct ceph_inode_info *ci)
902{ 884{
903 return !RB_EMPTY_ROOT(&ci->i_caps) || ci->i_cap_exporting_issued; 885 return !RB_EMPTY_ROOT(&ci->i_caps);
904} 886}
905 887
906int ceph_is_any_caps(struct inode *inode) 888int ceph_is_any_caps(struct inode *inode)
@@ -2397,32 +2379,30 @@ static void invalidate_aliases(struct inode *inode)
2397 * actually be a revocation if it specifies a smaller cap set.) 2379 * actually be a revocation if it specifies a smaller cap set.)
2398 * 2380 *
2399 * caller holds s_mutex and i_ceph_lock, we drop both. 2381 * caller holds s_mutex and i_ceph_lock, we drop both.
2400 *
2401 * return value:
2402 * 0 - ok
2403 * 1 - check_caps on auth cap only (writeback)
2404 * 2 - check_caps (ack revoke)
2405 */ 2382 */
2406static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant, 2383static void handle_cap_grant(struct ceph_mds_client *mdsc,
2384 struct inode *inode, struct ceph_mds_caps *grant,
2385 void *snaptrace, int snaptrace_len,
2386 struct ceph_buffer *xattr_buf,
2407 struct ceph_mds_session *session, 2387 struct ceph_mds_session *session,
2408 struct ceph_cap *cap, 2388 struct ceph_cap *cap, int issued)
2409 struct ceph_buffer *xattr_buf) 2389 __releases(ci->i_ceph_lock)
2410 __releases(ci->i_ceph_lock)
2411{ 2390{
2412 struct ceph_inode_info *ci = ceph_inode(inode); 2391 struct ceph_inode_info *ci = ceph_inode(inode);
2413 int mds = session->s_mds; 2392 int mds = session->s_mds;
2414 int seq = le32_to_cpu(grant->seq); 2393 int seq = le32_to_cpu(grant->seq);
2415 int newcaps = le32_to_cpu(grant->caps); 2394 int newcaps = le32_to_cpu(grant->caps);
2416 int issued, implemented, used, wanted, dirty; 2395 int used, wanted, dirty;
2417 u64 size = le64_to_cpu(grant->size); 2396 u64 size = le64_to_cpu(grant->size);
2418 u64 max_size = le64_to_cpu(grant->max_size); 2397 u64 max_size = le64_to_cpu(grant->max_size);
2419 struct timespec mtime, atime, ctime; 2398 struct timespec mtime, atime, ctime;
2420 int check_caps = 0; 2399 int check_caps = 0;
2421 int wake = 0; 2400 bool wake = 0;
2422 int writeback = 0; 2401 bool writeback = 0;
2423 int queue_invalidate = 0; 2402 bool queue_trunc = 0;
2424 int deleted_inode = 0; 2403 bool queue_invalidate = 0;
2425 int queue_revalidate = 0; 2404 bool queue_revalidate = 0;
2405 bool deleted_inode = 0;
2426 2406
2427 dout("handle_cap_grant inode %p cap %p mds%d seq %d %s\n", 2407 dout("handle_cap_grant inode %p cap %p mds%d seq %d %s\n",
2428 inode, cap, mds, seq, ceph_cap_string(newcaps)); 2408 inode, cap, mds, seq, ceph_cap_string(newcaps));
@@ -2466,16 +2446,13 @@ static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
2466 } 2446 }
2467 2447
2468 /* side effects now are allowed */ 2448 /* side effects now are allowed */
2469
2470 issued = __ceph_caps_issued(ci, &implemented);
2471 issued |= implemented | __ceph_caps_dirty(ci);
2472
2473 cap->cap_gen = session->s_cap_gen; 2449 cap->cap_gen = session->s_cap_gen;
2474 cap->seq = seq; 2450 cap->seq = seq;
2475 2451
2476 __check_cap_issue(ci, cap, newcaps); 2452 __check_cap_issue(ci, cap, newcaps);
2477 2453
2478 if ((issued & CEPH_CAP_AUTH_EXCL) == 0) { 2454 if ((newcaps & CEPH_CAP_AUTH_SHARED) &&
2455 (issued & CEPH_CAP_AUTH_EXCL) == 0) {
2479 inode->i_mode = le32_to_cpu(grant->mode); 2456 inode->i_mode = le32_to_cpu(grant->mode);
2480 inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(grant->uid)); 2457 inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(grant->uid));
2481 inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(grant->gid)); 2458 inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(grant->gid));
@@ -2484,7 +2461,8 @@ static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
2484 from_kgid(&init_user_ns, inode->i_gid)); 2461 from_kgid(&init_user_ns, inode->i_gid));
2485 } 2462 }
2486 2463
2487 if ((issued & CEPH_CAP_LINK_EXCL) == 0) { 2464 if ((newcaps & CEPH_CAP_AUTH_SHARED) &&
2465 (issued & CEPH_CAP_LINK_EXCL) == 0) {
2488 set_nlink(inode, le32_to_cpu(grant->nlink)); 2466 set_nlink(inode, le32_to_cpu(grant->nlink));
2489 if (inode->i_nlink == 0 && 2467 if (inode->i_nlink == 0 &&
2490 (newcaps & (CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL))) 2468 (newcaps & (CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL)))
@@ -2511,30 +2489,35 @@ static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
2511 if ((issued & CEPH_CAP_FILE_CACHE) && ci->i_rdcache_gen > 1) 2489 if ((issued & CEPH_CAP_FILE_CACHE) && ci->i_rdcache_gen > 1)
2512 queue_revalidate = 1; 2490 queue_revalidate = 1;
2513 2491
2514 /* size/ctime/mtime/atime? */ 2492 if (newcaps & CEPH_CAP_ANY_RD) {
2515 ceph_fill_file_size(inode, issued, 2493 /* ctime/mtime/atime? */
2516 le32_to_cpu(grant->truncate_seq), 2494 ceph_decode_timespec(&mtime, &grant->mtime);
2517 le64_to_cpu(grant->truncate_size), size); 2495 ceph_decode_timespec(&atime, &grant->atime);
2518 ceph_decode_timespec(&mtime, &grant->mtime); 2496 ceph_decode_timespec(&ctime, &grant->ctime);
2519 ceph_decode_timespec(&atime, &grant->atime); 2497 ceph_fill_file_time(inode, issued,
2520 ceph_decode_timespec(&ctime, &grant->ctime); 2498 le32_to_cpu(grant->time_warp_seq),
2521 ceph_fill_file_time(inode, issued, 2499 &ctime, &mtime, &atime);
2522 le32_to_cpu(grant->time_warp_seq), &ctime, &mtime, 2500 }
2523 &atime); 2501
2524 2502 if (newcaps & (CEPH_CAP_ANY_FILE_RD | CEPH_CAP_ANY_FILE_WR)) {
2525 2503 /* file layout may have changed */
2526 /* file layout may have changed */ 2504 ci->i_layout = grant->layout;
2527 ci->i_layout = grant->layout; 2505 /* size/truncate_seq? */
2528 2506 queue_trunc = ceph_fill_file_size(inode, issued,
2529 /* max size increase? */ 2507 le32_to_cpu(grant->truncate_seq),
2530 if (ci->i_auth_cap == cap && max_size != ci->i_max_size) { 2508 le64_to_cpu(grant->truncate_size),
2531 dout("max_size %lld -> %llu\n", ci->i_max_size, max_size); 2509 size);
2532 ci->i_max_size = max_size; 2510 /* max size increase? */
2533 if (max_size >= ci->i_wanted_max_size) { 2511 if (ci->i_auth_cap == cap && max_size != ci->i_max_size) {
2534 ci->i_wanted_max_size = 0; /* reset */ 2512 dout("max_size %lld -> %llu\n",
2535 ci->i_requested_max_size = 0; 2513 ci->i_max_size, max_size);
2514 ci->i_max_size = max_size;
2515 if (max_size >= ci->i_wanted_max_size) {
2516 ci->i_wanted_max_size = 0; /* reset */
2517 ci->i_requested_max_size = 0;
2518 }
2519 wake = 1;
2536 } 2520 }
2537 wake = 1;
2538 } 2521 }
2539 2522
2540 /* check cap bits */ 2523 /* check cap bits */
@@ -2595,6 +2578,23 @@ static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
2595 2578
2596 spin_unlock(&ci->i_ceph_lock); 2579 spin_unlock(&ci->i_ceph_lock);
2597 2580
2581 if (le32_to_cpu(grant->op) == CEPH_CAP_OP_IMPORT) {
2582 down_write(&mdsc->snap_rwsem);
2583 ceph_update_snap_trace(mdsc, snaptrace,
2584 snaptrace + snaptrace_len, false);
2585 downgrade_write(&mdsc->snap_rwsem);
2586 kick_flushing_inode_caps(mdsc, session, inode);
2587 up_read(&mdsc->snap_rwsem);
2588 if (newcaps & ~issued)
2589 wake = 1;
2590 }
2591
2592 if (queue_trunc) {
2593 ceph_queue_vmtruncate(inode);
2594 ceph_queue_revalidate(inode);
2595 } else if (queue_revalidate)
2596 ceph_queue_revalidate(inode);
2597
2598 if (writeback) 2598 if (writeback)
2599 /* 2599 /*
2600 * queue inode for writeback: we can't actually call 2600 * queue inode for writeback: we can't actually call
@@ -2606,8 +2606,6 @@ static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
2606 ceph_queue_invalidate(inode); 2606 ceph_queue_invalidate(inode);
2607 if (deleted_inode) 2607 if (deleted_inode)
2608 invalidate_aliases(inode); 2608 invalidate_aliases(inode);
2609 if (queue_revalidate)
2610 ceph_queue_revalidate(inode);
2611 if (wake) 2609 if (wake)
2612 wake_up_all(&ci->i_cap_wq); 2610 wake_up_all(&ci->i_cap_wq);
2613 2611
@@ -2784,7 +2782,7 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
2784{ 2782{
2785 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc; 2783 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
2786 struct ceph_mds_session *tsession = NULL; 2784 struct ceph_mds_session *tsession = NULL;
2787 struct ceph_cap *cap, *tcap; 2785 struct ceph_cap *cap, *tcap, *new_cap = NULL;
2788 struct ceph_inode_info *ci = ceph_inode(inode); 2786 struct ceph_inode_info *ci = ceph_inode(inode);
2789 u64 t_cap_id; 2787 u64 t_cap_id;
2790 unsigned mseq = le32_to_cpu(ex->migrate_seq); 2788 unsigned mseq = le32_to_cpu(ex->migrate_seq);
@@ -2807,7 +2805,7 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
2807retry: 2805retry:
2808 spin_lock(&ci->i_ceph_lock); 2806 spin_lock(&ci->i_ceph_lock);
2809 cap = __get_cap_for_mds(ci, mds); 2807 cap = __get_cap_for_mds(ci, mds);
2810 if (!cap) 2808 if (!cap || cap->cap_id != le64_to_cpu(ex->cap_id))
2811 goto out_unlock; 2809 goto out_unlock;
2812 2810
2813 if (target < 0) { 2811 if (target < 0) {
@@ -2846,15 +2844,14 @@ retry:
2846 } 2844 }
2847 __ceph_remove_cap(cap, false); 2845 __ceph_remove_cap(cap, false);
2848 goto out_unlock; 2846 goto out_unlock;
2849 } 2847 } else if (tsession) {
2850
2851 if (tsession) {
2852 int flag = (cap == ci->i_auth_cap) ? CEPH_CAP_FLAG_AUTH : 0;
2853 spin_unlock(&ci->i_ceph_lock);
2854 /* add placeholder for the export tagert */ 2848 /* add placeholder for the export tagert */
2849 int flag = (cap == ci->i_auth_cap) ? CEPH_CAP_FLAG_AUTH : 0;
2855 ceph_add_cap(inode, tsession, t_cap_id, -1, issued, 0, 2850 ceph_add_cap(inode, tsession, t_cap_id, -1, issued, 0,
2856 t_seq - 1, t_mseq, (u64)-1, flag, NULL); 2851 t_seq - 1, t_mseq, (u64)-1, flag, &new_cap);
2857 goto retry; 2852
2853 __ceph_remove_cap(cap, false);
2854 goto out_unlock;
2858 } 2855 }
2859 2856
2860 spin_unlock(&ci->i_ceph_lock); 2857 spin_unlock(&ci->i_ceph_lock);
@@ -2873,6 +2870,7 @@ retry:
2873 SINGLE_DEPTH_NESTING); 2870 SINGLE_DEPTH_NESTING);
2874 } 2871 }
2875 ceph_add_cap_releases(mdsc, tsession); 2872 ceph_add_cap_releases(mdsc, tsession);
2873 new_cap = ceph_get_cap(mdsc, NULL);
2876 } else { 2874 } else {
2877 WARN_ON(1); 2875 WARN_ON(1);
2878 tsession = NULL; 2876 tsession = NULL;
@@ -2887,24 +2885,27 @@ out_unlock:
2887 mutex_unlock(&tsession->s_mutex); 2885 mutex_unlock(&tsession->s_mutex);
2888 ceph_put_mds_session(tsession); 2886 ceph_put_mds_session(tsession);
2889 } 2887 }
2888 if (new_cap)
2889 ceph_put_cap(mdsc, new_cap);
2890} 2890}
2891 2891
2892/* 2892/*
2893 * Handle cap IMPORT. If there are temp bits from an older EXPORT, 2893 * Handle cap IMPORT.
2894 * clean them up.
2895 * 2894 *
2896 * caller holds s_mutex. 2895 * caller holds s_mutex. acquires i_ceph_lock
2897 */ 2896 */
2898static void handle_cap_import(struct ceph_mds_client *mdsc, 2897static void handle_cap_import(struct ceph_mds_client *mdsc,
2899 struct inode *inode, struct ceph_mds_caps *im, 2898 struct inode *inode, struct ceph_mds_caps *im,
2900 struct ceph_mds_cap_peer *ph, 2899 struct ceph_mds_cap_peer *ph,
2901 struct ceph_mds_session *session, 2900 struct ceph_mds_session *session,
2902 void *snaptrace, int snaptrace_len) 2901 struct ceph_cap **target_cap, int *old_issued)
2902 __acquires(ci->i_ceph_lock)
2903{ 2903{
2904 struct ceph_inode_info *ci = ceph_inode(inode); 2904 struct ceph_inode_info *ci = ceph_inode(inode);
2905 struct ceph_cap *cap; 2905 struct ceph_cap *cap, *ocap, *new_cap = NULL;
2906 int mds = session->s_mds; 2906 int mds = session->s_mds;
2907 unsigned issued = le32_to_cpu(im->caps); 2907 int issued;
2908 unsigned caps = le32_to_cpu(im->caps);
2908 unsigned wanted = le32_to_cpu(im->wanted); 2909 unsigned wanted = le32_to_cpu(im->wanted);
2909 unsigned seq = le32_to_cpu(im->seq); 2910 unsigned seq = le32_to_cpu(im->seq);
2910 unsigned mseq = le32_to_cpu(im->migrate_seq); 2911 unsigned mseq = le32_to_cpu(im->migrate_seq);
@@ -2924,40 +2925,52 @@ static void handle_cap_import(struct ceph_mds_client *mdsc,
2924 dout("handle_cap_import inode %p ci %p mds%d mseq %d peer %d\n", 2925 dout("handle_cap_import inode %p ci %p mds%d mseq %d peer %d\n",
2925 inode, ci, mds, mseq, peer); 2926 inode, ci, mds, mseq, peer);
2926 2927
2928retry:
2927 spin_lock(&ci->i_ceph_lock); 2929 spin_lock(&ci->i_ceph_lock);
2928 cap = peer >= 0 ? __get_cap_for_mds(ci, peer) : NULL; 2930 cap = __get_cap_for_mds(ci, mds);
2929 if (cap && cap->cap_id == p_cap_id) { 2931 if (!cap) {
2932 if (!new_cap) {
2933 spin_unlock(&ci->i_ceph_lock);
2934 new_cap = ceph_get_cap(mdsc, NULL);
2935 goto retry;
2936 }
2937 cap = new_cap;
2938 } else {
2939 if (new_cap) {
2940 ceph_put_cap(mdsc, new_cap);
2941 new_cap = NULL;
2942 }
2943 }
2944
2945 __ceph_caps_issued(ci, &issued);
2946 issued |= __ceph_caps_dirty(ci);
2947
2948 ceph_add_cap(inode, session, cap_id, -1, caps, wanted, seq, mseq,
2949 realmino, CEPH_CAP_FLAG_AUTH, &new_cap);
2950
2951 ocap = peer >= 0 ? __get_cap_for_mds(ci, peer) : NULL;
2952 if (ocap && ocap->cap_id == p_cap_id) {
2930 dout(" remove export cap %p mds%d flags %d\n", 2953 dout(" remove export cap %p mds%d flags %d\n",
2931 cap, peer, ph->flags); 2954 ocap, peer, ph->flags);
2932 if ((ph->flags & CEPH_CAP_FLAG_AUTH) && 2955 if ((ph->flags & CEPH_CAP_FLAG_AUTH) &&
2933 (cap->seq != le32_to_cpu(ph->seq) || 2956 (ocap->seq != le32_to_cpu(ph->seq) ||
2934 cap->mseq != le32_to_cpu(ph->mseq))) { 2957 ocap->mseq != le32_to_cpu(ph->mseq))) {
2935 pr_err("handle_cap_import: mismatched seq/mseq: " 2958 pr_err("handle_cap_import: mismatched seq/mseq: "
2936 "ino (%llx.%llx) mds%d seq %d mseq %d " 2959 "ino (%llx.%llx) mds%d seq %d mseq %d "
2937 "importer mds%d has peer seq %d mseq %d\n", 2960 "importer mds%d has peer seq %d mseq %d\n",
2938 ceph_vinop(inode), peer, cap->seq, 2961 ceph_vinop(inode), peer, ocap->seq,
2939 cap->mseq, mds, le32_to_cpu(ph->seq), 2962 ocap->mseq, mds, le32_to_cpu(ph->seq),
2940 le32_to_cpu(ph->mseq)); 2963 le32_to_cpu(ph->mseq));
2941 } 2964 }
2942 ci->i_cap_exporting_issued = cap->issued; 2965 __ceph_remove_cap(ocap, (ph->flags & CEPH_CAP_FLAG_RELEASE));
2943 __ceph_remove_cap(cap, (ph->flags & CEPH_CAP_FLAG_RELEASE));
2944 } 2966 }
2945 2967
2946 /* make sure we re-request max_size, if necessary */ 2968 /* make sure we re-request max_size, if necessary */
2947 ci->i_wanted_max_size = 0; 2969 ci->i_wanted_max_size = 0;
2948 ci->i_requested_max_size = 0; 2970 ci->i_requested_max_size = 0;
2949 spin_unlock(&ci->i_ceph_lock);
2950
2951 down_write(&mdsc->snap_rwsem);
2952 ceph_update_snap_trace(mdsc, snaptrace, snaptrace+snaptrace_len,
2953 false);
2954 downgrade_write(&mdsc->snap_rwsem);
2955 ceph_add_cap(inode, session, cap_id, -1,
2956 issued, wanted, seq, mseq, realmino, CEPH_CAP_FLAG_AUTH,
2957 NULL /* no caps context */);
2958 kick_flushing_inode_caps(mdsc, session, inode);
2959 up_read(&mdsc->snap_rwsem);
2960 2971
2972 *old_issued = issued;
2973 *target_cap = cap;
2961} 2974}
2962 2975
2963/* 2976/*
@@ -2977,7 +2990,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
2977 struct ceph_mds_caps *h; 2990 struct ceph_mds_caps *h;
2978 struct ceph_mds_cap_peer *peer = NULL; 2991 struct ceph_mds_cap_peer *peer = NULL;
2979 int mds = session->s_mds; 2992 int mds = session->s_mds;
2980 int op; 2993 int op, issued;
2981 u32 seq, mseq; 2994 u32 seq, mseq;
2982 struct ceph_vino vino; 2995 struct ceph_vino vino;
2983 u64 cap_id; 2996 u64 cap_id;
@@ -3069,7 +3082,10 @@ void ceph_handle_caps(struct ceph_mds_session *session,
3069 3082
3070 case CEPH_CAP_OP_IMPORT: 3083 case CEPH_CAP_OP_IMPORT:
3071 handle_cap_import(mdsc, inode, h, peer, session, 3084 handle_cap_import(mdsc, inode, h, peer, session,
3072 snaptrace, snaptrace_len); 3085 &cap, &issued);
3086 handle_cap_grant(mdsc, inode, h, snaptrace, snaptrace_len,
3087 msg->middle, session, cap, issued);
3088 goto done_unlocked;
3073 } 3089 }
3074 3090
3075 /* the rest require a cap */ 3091 /* the rest require a cap */
@@ -3086,8 +3102,10 @@ void ceph_handle_caps(struct ceph_mds_session *session,
3086 switch (op) { 3102 switch (op) {
3087 case CEPH_CAP_OP_REVOKE: 3103 case CEPH_CAP_OP_REVOKE:
3088 case CEPH_CAP_OP_GRANT: 3104 case CEPH_CAP_OP_GRANT:
3089 case CEPH_CAP_OP_IMPORT: 3105 __ceph_caps_issued(ci, &issued);
3090 handle_cap_grant(inode, h, session, cap, msg->middle); 3106 issued |= __ceph_caps_dirty(ci);
3107 handle_cap_grant(mdsc, inode, h, NULL, 0, msg->middle,
3108 session, cap, issued);
3091 goto done_unlocked; 3109 goto done_unlocked;
3092 3110
3093 case CEPH_CAP_OP_FLUSH_ACK: 3111 case CEPH_CAP_OP_FLUSH_ACK:
diff --git a/fs/ceph/export.c b/fs/ceph/export.c
index 00d6af6a32ec..8d7d782f4382 100644
--- a/fs/ceph/export.c
+++ b/fs/ceph/export.c
@@ -169,7 +169,7 @@ static struct dentry *__get_parent(struct super_block *sb,
169 return dentry; 169 return dentry;
170} 170}
171 171
172struct dentry *ceph_get_parent(struct dentry *child) 172static struct dentry *ceph_get_parent(struct dentry *child)
173{ 173{
174 /* don't re-export snaps */ 174 /* don't re-export snaps */
175 if (ceph_snap(child->d_inode) != CEPH_NOSNAP) 175 if (ceph_snap(child->d_inode) != CEPH_NOSNAP)
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index e4fff9ff1c27..04c89c266cec 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -10,6 +10,7 @@
10#include <linux/writeback.h> 10#include <linux/writeback.h>
11#include <linux/vmalloc.h> 11#include <linux/vmalloc.h>
12#include <linux/posix_acl.h> 12#include <linux/posix_acl.h>
13#include <linux/random.h>
13 14
14#include "super.h" 15#include "super.h"
15#include "mds_client.h" 16#include "mds_client.h"
@@ -179,9 +180,8 @@ struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f)
179 * specified, copy the frag delegation info to the caller if 180 * specified, copy the frag delegation info to the caller if
180 * it is present. 181 * it is present.
181 */ 182 */
182u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v, 183static u32 __ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
183 struct ceph_inode_frag *pfrag, 184 struct ceph_inode_frag *pfrag, int *found)
184 int *found)
185{ 185{
186 u32 t = ceph_frag_make(0, 0); 186 u32 t = ceph_frag_make(0, 0);
187 struct ceph_inode_frag *frag; 187 struct ceph_inode_frag *frag;
@@ -191,7 +191,6 @@ u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
191 if (found) 191 if (found)
192 *found = 0; 192 *found = 0;
193 193
194 mutex_lock(&ci->i_fragtree_mutex);
195 while (1) { 194 while (1) {
196 WARN_ON(!ceph_frag_contains_value(t, v)); 195 WARN_ON(!ceph_frag_contains_value(t, v));
197 frag = __ceph_find_frag(ci, t); 196 frag = __ceph_find_frag(ci, t);
@@ -220,10 +219,19 @@ u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
220 } 219 }
221 dout("choose_frag(%x) = %x\n", v, t); 220 dout("choose_frag(%x) = %x\n", v, t);
222 221
223 mutex_unlock(&ci->i_fragtree_mutex);
224 return t; 222 return t;
225} 223}
226 224
225u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
226 struct ceph_inode_frag *pfrag, int *found)
227{
228 u32 ret;
229 mutex_lock(&ci->i_fragtree_mutex);
230 ret = __ceph_choose_frag(ci, v, pfrag, found);
231 mutex_unlock(&ci->i_fragtree_mutex);
232 return ret;
233}
234
227/* 235/*
228 * Process dirfrag (delegation) info from the mds. Include leaf 236 * Process dirfrag (delegation) info from the mds. Include leaf
229 * fragment in tree ONLY if ndist > 0. Otherwise, only 237 * fragment in tree ONLY if ndist > 0. Otherwise, only
@@ -237,11 +245,17 @@ static int ceph_fill_dirfrag(struct inode *inode,
237 u32 id = le32_to_cpu(dirinfo->frag); 245 u32 id = le32_to_cpu(dirinfo->frag);
238 int mds = le32_to_cpu(dirinfo->auth); 246 int mds = le32_to_cpu(dirinfo->auth);
239 int ndist = le32_to_cpu(dirinfo->ndist); 247 int ndist = le32_to_cpu(dirinfo->ndist);
248 int diri_auth = -1;
240 int i; 249 int i;
241 int err = 0; 250 int err = 0;
242 251
252 spin_lock(&ci->i_ceph_lock);
253 if (ci->i_auth_cap)
254 diri_auth = ci->i_auth_cap->mds;
255 spin_unlock(&ci->i_ceph_lock);
256
243 mutex_lock(&ci->i_fragtree_mutex); 257 mutex_lock(&ci->i_fragtree_mutex);
244 if (ndist == 0) { 258 if (ndist == 0 && mds == diri_auth) {
245 /* no delegation info needed. */ 259 /* no delegation info needed. */
246 frag = __ceph_find_frag(ci, id); 260 frag = __ceph_find_frag(ci, id);
247 if (!frag) 261 if (!frag)
@@ -286,6 +300,75 @@ out:
286 return err; 300 return err;
287} 301}
288 302
303static int ceph_fill_fragtree(struct inode *inode,
304 struct ceph_frag_tree_head *fragtree,
305 struct ceph_mds_reply_dirfrag *dirinfo)
306{
307 struct ceph_inode_info *ci = ceph_inode(inode);
308 struct ceph_inode_frag *frag;
309 struct rb_node *rb_node;
310 int i;
311 u32 id, nsplits;
312 bool update = false;
313
314 mutex_lock(&ci->i_fragtree_mutex);
315 nsplits = le32_to_cpu(fragtree->nsplits);
316 if (nsplits) {
317 i = prandom_u32() % nsplits;
318 id = le32_to_cpu(fragtree->splits[i].frag);
319 if (!__ceph_find_frag(ci, id))
320 update = true;
321 } else if (!RB_EMPTY_ROOT(&ci->i_fragtree)) {
322 rb_node = rb_first(&ci->i_fragtree);
323 frag = rb_entry(rb_node, struct ceph_inode_frag, node);
324 if (frag->frag != ceph_frag_make(0, 0) || rb_next(rb_node))
325 update = true;
326 }
327 if (!update && dirinfo) {
328 id = le32_to_cpu(dirinfo->frag);
329 if (id != __ceph_choose_frag(ci, id, NULL, NULL))
330 update = true;
331 }
332 if (!update)
333 goto out_unlock;
334
335 dout("fill_fragtree %llx.%llx\n", ceph_vinop(inode));
336 rb_node = rb_first(&ci->i_fragtree);
337 for (i = 0; i < nsplits; i++) {
338 id = le32_to_cpu(fragtree->splits[i].frag);
339 frag = NULL;
340 while (rb_node) {
341 frag = rb_entry(rb_node, struct ceph_inode_frag, node);
342 if (ceph_frag_compare(frag->frag, id) >= 0) {
343 if (frag->frag != id)
344 frag = NULL;
345 else
346 rb_node = rb_next(rb_node);
347 break;
348 }
349 rb_node = rb_next(rb_node);
350 rb_erase(&frag->node, &ci->i_fragtree);
351 kfree(frag);
352 frag = NULL;
353 }
354 if (!frag) {
355 frag = __get_or_create_frag(ci, id);
356 if (IS_ERR(frag))
357 continue;
358 }
359 frag->split_by = le32_to_cpu(fragtree->splits[i].by);
360 dout(" frag %x split by %d\n", frag->frag, frag->split_by);
361 }
362 while (rb_node) {
363 frag = rb_entry(rb_node, struct ceph_inode_frag, node);
364 rb_node = rb_next(rb_node);
365 rb_erase(&frag->node, &ci->i_fragtree);
366 kfree(frag);
367 }
368out_unlock:
369 mutex_unlock(&ci->i_fragtree_mutex);
370 return 0;
371}
289 372
290/* 373/*
291 * initialize a newly allocated inode. 374 * initialize a newly allocated inode.
@@ -341,7 +424,6 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
341 INIT_LIST_HEAD(&ci->i_cap_snaps); 424 INIT_LIST_HEAD(&ci->i_cap_snaps);
342 ci->i_head_snapc = NULL; 425 ci->i_head_snapc = NULL;
343 ci->i_snap_caps = 0; 426 ci->i_snap_caps = 0;
344 ci->i_cap_exporting_issued = 0;
345 427
346 for (i = 0; i < CEPH_FILE_MODE_NUM; i++) 428 for (i = 0; i < CEPH_FILE_MODE_NUM; i++)
347 ci->i_nr_by_mode[i] = 0; 429 ci->i_nr_by_mode[i] = 0;
@@ -407,7 +489,7 @@ void ceph_destroy_inode(struct inode *inode)
407 489
408 /* 490 /*
409 * we may still have a snap_realm reference if there are stray 491 * we may still have a snap_realm reference if there are stray
410 * caps in i_cap_exporting_issued or i_snap_caps. 492 * caps in i_snap_caps.
411 */ 493 */
412 if (ci->i_snap_realm) { 494 if (ci->i_snap_realm) {
413 struct ceph_mds_client *mdsc = 495 struct ceph_mds_client *mdsc =
@@ -582,22 +664,26 @@ static int fill_inode(struct inode *inode,
582 unsigned long ttl_from, int cap_fmode, 664 unsigned long ttl_from, int cap_fmode,
583 struct ceph_cap_reservation *caps_reservation) 665 struct ceph_cap_reservation *caps_reservation)
584{ 666{
667 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
585 struct ceph_mds_reply_inode *info = iinfo->in; 668 struct ceph_mds_reply_inode *info = iinfo->in;
586 struct ceph_inode_info *ci = ceph_inode(inode); 669 struct ceph_inode_info *ci = ceph_inode(inode);
587 int i; 670 int issued = 0, implemented, new_issued;
588 int issued = 0, implemented;
589 struct timespec mtime, atime, ctime; 671 struct timespec mtime, atime, ctime;
590 u32 nsplits;
591 struct ceph_inode_frag *frag;
592 struct rb_node *rb_node;
593 struct ceph_buffer *xattr_blob = NULL; 672 struct ceph_buffer *xattr_blob = NULL;
673 struct ceph_cap *new_cap = NULL;
594 int err = 0; 674 int err = 0;
595 int queue_trunc = 0; 675 bool wake = false;
676 bool queue_trunc = false;
677 bool new_version = false;
596 678
597 dout("fill_inode %p ino %llx.%llx v %llu had %llu\n", 679 dout("fill_inode %p ino %llx.%llx v %llu had %llu\n",
598 inode, ceph_vinop(inode), le64_to_cpu(info->version), 680 inode, ceph_vinop(inode), le64_to_cpu(info->version),
599 ci->i_version); 681 ci->i_version);
600 682
683 /* prealloc new cap struct */
684 if (info->cap.caps && ceph_snap(inode) == CEPH_NOSNAP)
685 new_cap = ceph_get_cap(mdsc, caps_reservation);
686
601 /* 687 /*
602 * prealloc xattr data, if it looks like we'll need it. only 688 * prealloc xattr data, if it looks like we'll need it. only
603 * if len > 4 (meaning there are actually xattrs; the first 4 689 * if len > 4 (meaning there are actually xattrs; the first 4
@@ -623,19 +709,23 @@ static int fill_inode(struct inode *inode,
623 * 3 2 skip 709 * 3 2 skip
624 * 3 3 update 710 * 3 3 update
625 */ 711 */
626 if (le64_to_cpu(info->version) > 0 && 712 if (ci->i_version == 0 ||
627 (ci->i_version & ~1) >= le64_to_cpu(info->version)) 713 ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
628 goto no_change; 714 le64_to_cpu(info->version) > (ci->i_version & ~1)))
629 715 new_version = true;
716
630 issued = __ceph_caps_issued(ci, &implemented); 717 issued = __ceph_caps_issued(ci, &implemented);
631 issued |= implemented | __ceph_caps_dirty(ci); 718 issued |= implemented | __ceph_caps_dirty(ci);
719 new_issued = ~issued & le32_to_cpu(info->cap.caps);
632 720
633 /* update inode */ 721 /* update inode */
634 ci->i_version = le64_to_cpu(info->version); 722 ci->i_version = le64_to_cpu(info->version);
635 inode->i_version++; 723 inode->i_version++;
636 inode->i_rdev = le32_to_cpu(info->rdev); 724 inode->i_rdev = le32_to_cpu(info->rdev);
725 inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
637 726
638 if ((issued & CEPH_CAP_AUTH_EXCL) == 0) { 727 if ((new_version || (new_issued & CEPH_CAP_AUTH_SHARED)) &&
728 (issued & CEPH_CAP_AUTH_EXCL) == 0) {
639 inode->i_mode = le32_to_cpu(info->mode); 729 inode->i_mode = le32_to_cpu(info->mode);
640 inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(info->uid)); 730 inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(info->uid));
641 inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(info->gid)); 731 inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(info->gid));
@@ -644,23 +734,35 @@ static int fill_inode(struct inode *inode,
644 from_kgid(&init_user_ns, inode->i_gid)); 734 from_kgid(&init_user_ns, inode->i_gid));
645 } 735 }
646 736
647 if ((issued & CEPH_CAP_LINK_EXCL) == 0) 737 if ((new_version || (new_issued & CEPH_CAP_LINK_SHARED)) &&
738 (issued & CEPH_CAP_LINK_EXCL) == 0)
648 set_nlink(inode, le32_to_cpu(info->nlink)); 739 set_nlink(inode, le32_to_cpu(info->nlink));
649 740
650 /* be careful with mtime, atime, size */ 741 if (new_version || (new_issued & CEPH_CAP_ANY_RD)) {
651 ceph_decode_timespec(&atime, &info->atime); 742 /* be careful with mtime, atime, size */
652 ceph_decode_timespec(&mtime, &info->mtime); 743 ceph_decode_timespec(&atime, &info->atime);
653 ceph_decode_timespec(&ctime, &info->ctime); 744 ceph_decode_timespec(&mtime, &info->mtime);
654 queue_trunc = ceph_fill_file_size(inode, issued, 745 ceph_decode_timespec(&ctime, &info->ctime);
655 le32_to_cpu(info->truncate_seq), 746 ceph_fill_file_time(inode, issued,
656 le64_to_cpu(info->truncate_size), 747 le32_to_cpu(info->time_warp_seq),
657 le64_to_cpu(info->size)); 748 &ctime, &mtime, &atime);
658 ceph_fill_file_time(inode, issued, 749 }
659 le32_to_cpu(info->time_warp_seq), 750
660 &ctime, &mtime, &atime); 751 if (new_version ||
661 752 (new_issued & (CEPH_CAP_ANY_FILE_RD | CEPH_CAP_ANY_FILE_WR))) {
662 ci->i_layout = info->layout; 753 ci->i_layout = info->layout;
663 inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1; 754 queue_trunc = ceph_fill_file_size(inode, issued,
755 le32_to_cpu(info->truncate_seq),
756 le64_to_cpu(info->truncate_size),
757 le64_to_cpu(info->size));
758 /* only update max_size on auth cap */
759 if ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
760 ci->i_max_size != le64_to_cpu(info->max_size)) {
761 dout("max_size %lld -> %llu\n", ci->i_max_size,
762 le64_to_cpu(info->max_size));
763 ci->i_max_size = le64_to_cpu(info->max_size);
764 }
765 }
664 766
665 /* xattrs */ 767 /* xattrs */
666 /* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */ 768 /* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */
@@ -745,58 +847,6 @@ static int fill_inode(struct inode *inode,
745 dout(" marking %p complete (empty)\n", inode); 847 dout(" marking %p complete (empty)\n", inode);
746 __ceph_dir_set_complete(ci, atomic_read(&ci->i_release_count)); 848 __ceph_dir_set_complete(ci, atomic_read(&ci->i_release_count));
747 } 849 }
748no_change:
749 /* only update max_size on auth cap */
750 if ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
751 ci->i_max_size != le64_to_cpu(info->max_size)) {
752 dout("max_size %lld -> %llu\n", ci->i_max_size,
753 le64_to_cpu(info->max_size));
754 ci->i_max_size = le64_to_cpu(info->max_size);
755 }
756
757 spin_unlock(&ci->i_ceph_lock);
758
759 /* queue truncate if we saw i_size decrease */
760 if (queue_trunc)
761 ceph_queue_vmtruncate(inode);
762
763 /* populate frag tree */
764 /* FIXME: move me up, if/when version reflects fragtree changes */
765 nsplits = le32_to_cpu(info->fragtree.nsplits);
766 mutex_lock(&ci->i_fragtree_mutex);
767 rb_node = rb_first(&ci->i_fragtree);
768 for (i = 0; i < nsplits; i++) {
769 u32 id = le32_to_cpu(info->fragtree.splits[i].frag);
770 frag = NULL;
771 while (rb_node) {
772 frag = rb_entry(rb_node, struct ceph_inode_frag, node);
773 if (ceph_frag_compare(frag->frag, id) >= 0) {
774 if (frag->frag != id)
775 frag = NULL;
776 else
777 rb_node = rb_next(rb_node);
778 break;
779 }
780 rb_node = rb_next(rb_node);
781 rb_erase(&frag->node, &ci->i_fragtree);
782 kfree(frag);
783 frag = NULL;
784 }
785 if (!frag) {
786 frag = __get_or_create_frag(ci, id);
787 if (IS_ERR(frag))
788 continue;
789 }
790 frag->split_by = le32_to_cpu(info->fragtree.splits[i].by);
791 dout(" frag %x split by %d\n", frag->frag, frag->split_by);
792 }
793 while (rb_node) {
794 frag = rb_entry(rb_node, struct ceph_inode_frag, node);
795 rb_node = rb_next(rb_node);
796 rb_erase(&frag->node, &ci->i_fragtree);
797 kfree(frag);
798 }
799 mutex_unlock(&ci->i_fragtree_mutex);
800 850
801 /* were we issued a capability? */ 851 /* were we issued a capability? */
802 if (info->cap.caps) { 852 if (info->cap.caps) {
@@ -809,30 +859,41 @@ no_change:
809 le32_to_cpu(info->cap.seq), 859 le32_to_cpu(info->cap.seq),
810 le32_to_cpu(info->cap.mseq), 860 le32_to_cpu(info->cap.mseq),
811 le64_to_cpu(info->cap.realm), 861 le64_to_cpu(info->cap.realm),
812 info->cap.flags, 862 info->cap.flags, &new_cap);
813 caps_reservation); 863 wake = true;
814 } else { 864 } else {
815 spin_lock(&ci->i_ceph_lock);
816 dout(" %p got snap_caps %s\n", inode, 865 dout(" %p got snap_caps %s\n", inode,
817 ceph_cap_string(le32_to_cpu(info->cap.caps))); 866 ceph_cap_string(le32_to_cpu(info->cap.caps)));
818 ci->i_snap_caps |= le32_to_cpu(info->cap.caps); 867 ci->i_snap_caps |= le32_to_cpu(info->cap.caps);
819 if (cap_fmode >= 0) 868 if (cap_fmode >= 0)
820 __ceph_get_fmode(ci, cap_fmode); 869 __ceph_get_fmode(ci, cap_fmode);
821 spin_unlock(&ci->i_ceph_lock);
822 } 870 }
823 } else if (cap_fmode >= 0) { 871 } else if (cap_fmode >= 0) {
824 pr_warn("mds issued no caps on %llx.%llx\n", 872 pr_warn("mds issued no caps on %llx.%llx\n",
825 ceph_vinop(inode)); 873 ceph_vinop(inode));
826 __ceph_get_fmode(ci, cap_fmode); 874 __ceph_get_fmode(ci, cap_fmode);
827 } 875 }
876 spin_unlock(&ci->i_ceph_lock);
877
878 if (wake)
879 wake_up_all(&ci->i_cap_wq);
880
881 /* queue truncate if we saw i_size decrease */
882 if (queue_trunc)
883 ceph_queue_vmtruncate(inode);
884
885 /* populate frag tree */
886 if (S_ISDIR(inode->i_mode))
887 ceph_fill_fragtree(inode, &info->fragtree, dirinfo);
828 888
829 /* update delegation info? */ 889 /* update delegation info? */
830 if (dirinfo) 890 if (dirinfo)
831 ceph_fill_dirfrag(inode, dirinfo); 891 ceph_fill_dirfrag(inode, dirinfo);
832 892
833 err = 0; 893 err = 0;
834
835out: 894out:
895 if (new_cap)
896 ceph_put_cap(mdsc, new_cap);
836 if (xattr_blob) 897 if (xattr_blob)
837 ceph_buffer_put(xattr_blob); 898 ceph_buffer_put(xattr_blob);
838 return err; 899 return err;
@@ -1485,7 +1546,7 @@ static void ceph_invalidate_work(struct work_struct *work)
1485 orig_gen = ci->i_rdcache_gen; 1546 orig_gen = ci->i_rdcache_gen;
1486 spin_unlock(&ci->i_ceph_lock); 1547 spin_unlock(&ci->i_ceph_lock);
1487 1548
1488 truncate_inode_pages(inode->i_mapping, 0); 1549 truncate_pagecache(inode, 0);
1489 1550
1490 spin_lock(&ci->i_ceph_lock); 1551 spin_lock(&ci->i_ceph_lock);
1491 if (orig_gen == ci->i_rdcache_gen && 1552 if (orig_gen == ci->i_rdcache_gen &&
@@ -1588,7 +1649,7 @@ retry:
1588 ci->i_truncate_pending, to); 1649 ci->i_truncate_pending, to);
1589 spin_unlock(&ci->i_ceph_lock); 1650 spin_unlock(&ci->i_ceph_lock);
1590 1651
1591 truncate_inode_pages(inode->i_mapping, to); 1652 truncate_pagecache(inode, to);
1592 1653
1593 spin_lock(&ci->i_ceph_lock); 1654 spin_lock(&ci->i_ceph_lock);
1594 if (to == ci->i_truncate_size) { 1655 if (to == ci->i_truncate_size) {
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 9a33b98cb000..92a2548278fc 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -1558,6 +1558,8 @@ ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode)
1558 init_completion(&req->r_safe_completion); 1558 init_completion(&req->r_safe_completion);
1559 INIT_LIST_HEAD(&req->r_unsafe_item); 1559 INIT_LIST_HEAD(&req->r_unsafe_item);
1560 1560
1561 req->r_stamp = CURRENT_TIME;
1562
1561 req->r_op = op; 1563 req->r_op = op;
1562 req->r_direct_mode = mode; 1564 req->r_direct_mode = mode;
1563 return req; 1565 return req;
@@ -1783,7 +1785,8 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
1783 } 1785 }
1784 1786
1785 len = sizeof(*head) + 1787 len = sizeof(*head) +
1786 pathlen1 + pathlen2 + 2*(1 + sizeof(u32) + sizeof(u64)); 1788 pathlen1 + pathlen2 + 2*(1 + sizeof(u32) + sizeof(u64)) +
1789 sizeof(struct timespec);
1787 1790
1788 /* calculate (max) length for cap releases */ 1791 /* calculate (max) length for cap releases */
1789 len += sizeof(struct ceph_mds_request_release) * 1792 len += sizeof(struct ceph_mds_request_release) *
@@ -1800,6 +1803,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
1800 goto out_free2; 1803 goto out_free2;
1801 } 1804 }
1802 1805
1806 msg->hdr.version = 2;
1803 msg->hdr.tid = cpu_to_le64(req->r_tid); 1807 msg->hdr.tid = cpu_to_le64(req->r_tid);
1804 1808
1805 head = msg->front.iov_base; 1809 head = msg->front.iov_base;
@@ -1836,6 +1840,9 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
1836 mds, req->r_old_inode_drop, req->r_old_inode_unless, 0); 1840 mds, req->r_old_inode_drop, req->r_old_inode_unless, 0);
1837 head->num_releases = cpu_to_le16(releases); 1841 head->num_releases = cpu_to_le16(releases);
1838 1842
1843 /* time stamp */
1844 ceph_encode_copy(&p, &req->r_stamp, sizeof(req->r_stamp));
1845
1839 BUG_ON(p > end); 1846 BUG_ON(p > end);
1840 msg->front.iov_len = p - msg->front.iov_base; 1847 msg->front.iov_len = p - msg->front.iov_base;
1841 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); 1848 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index e90cfccf93bd..e00737cf523c 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -194,6 +194,7 @@ struct ceph_mds_request {
194 int r_fmode; /* file mode, if expecting cap */ 194 int r_fmode; /* file mode, if expecting cap */
195 kuid_t r_uid; 195 kuid_t r_uid;
196 kgid_t r_gid; 196 kgid_t r_gid;
197 struct timespec r_stamp;
197 198
198 /* for choosing which mds to send this request to */ 199 /* for choosing which mds to send this request to */
199 int r_direct_mode; 200 int r_direct_mode;
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index ead05cc1f447..12b20744e386 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -292,7 +292,6 @@ struct ceph_inode_info {
292 struct ceph_snap_context *i_head_snapc; /* set if wr_buffer_head > 0 or 292 struct ceph_snap_context *i_head_snapc; /* set if wr_buffer_head > 0 or
293 dirty|flushing caps */ 293 dirty|flushing caps */
294 unsigned i_snap_caps; /* cap bits for snapped files */ 294 unsigned i_snap_caps; /* cap bits for snapped files */
295 unsigned i_cap_exporting_issued;
296 295
297 int i_nr_by_mode[CEPH_FILE_MODE_NUM]; /* open file counts */ 296 int i_nr_by_mode[CEPH_FILE_MODE_NUM]; /* open file counts */
298 297
@@ -775,11 +774,13 @@ static inline void ceph_forget_all_cached_acls(struct inode *inode)
775extern const char *ceph_cap_string(int c); 774extern const char *ceph_cap_string(int c);
776extern void ceph_handle_caps(struct ceph_mds_session *session, 775extern void ceph_handle_caps(struct ceph_mds_session *session,
777 struct ceph_msg *msg); 776 struct ceph_msg *msg);
778extern int ceph_add_cap(struct inode *inode, 777extern struct ceph_cap *ceph_get_cap(struct ceph_mds_client *mdsc,
779 struct ceph_mds_session *session, u64 cap_id, 778 struct ceph_cap_reservation *ctx);
780 int fmode, unsigned issued, unsigned wanted, 779extern void ceph_add_cap(struct inode *inode,
781 unsigned cap, unsigned seq, u64 realmino, int flags, 780 struct ceph_mds_session *session, u64 cap_id,
782 struct ceph_cap_reservation *caps_reservation); 781 int fmode, unsigned issued, unsigned wanted,
782 unsigned cap, unsigned seq, u64 realmino, int flags,
783 struct ceph_cap **new_cap);
783extern void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release); 784extern void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release);
784extern void ceph_put_cap(struct ceph_mds_client *mdsc, 785extern void ceph_put_cap(struct ceph_mds_client *mdsc,
785 struct ceph_cap *cap); 786 struct ceph_cap *cap);
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index 1e5b45359509..d08e079ea5d3 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -617,6 +617,11 @@ static void retry_failed_sctp_send(struct connection *recv_con,
617 int nodeid = sn_send_failed->ssf_info.sinfo_ppid; 617 int nodeid = sn_send_failed->ssf_info.sinfo_ppid;
618 618
619 log_print("Retry sending %d bytes to node id %d", len, nodeid); 619 log_print("Retry sending %d bytes to node id %d", len, nodeid);
620
621 if (!nodeid) {
622 log_print("Shouldn't resend data via listening connection.");
623 return;
624 }
620 625
621 con = nodeid2con(nodeid, 0); 626 con = nodeid2con(nodeid, 0);
622 if (!con) { 627 if (!con) {
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index b73e0621ce9e..b10b48c2a7af 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -910,7 +910,7 @@ static const struct file_operations eventpoll_fops = {
910void eventpoll_release_file(struct file *file) 910void eventpoll_release_file(struct file *file)
911{ 911{
912 struct eventpoll *ep; 912 struct eventpoll *ep;
913 struct epitem *epi; 913 struct epitem *epi, *next;
914 914
915 /* 915 /*
916 * We don't want to get "file->f_lock" because it is not 916 * We don't want to get "file->f_lock" because it is not
@@ -926,7 +926,7 @@ void eventpoll_release_file(struct file *file)
926 * Besides, ep_remove() acquires the lock, so we can't hold it here. 926 * Besides, ep_remove() acquires the lock, so we can't hold it here.
927 */ 927 */
928 mutex_lock(&epmutex); 928 mutex_lock(&epmutex);
929 list_for_each_entry_rcu(epi, &file->f_ep_links, fllink) { 929 list_for_each_entry_safe(epi, next, &file->f_ep_links, fllink) {
930 ep = epi->ep; 930 ep = epi->ep;
931 mutex_lock_nested(&ep->mtx, 0); 931 mutex_lock_nested(&ep->mtx, 0);
932 ep_remove(ep, epi); 932 ep_remove(ep, epi);
diff --git a/fs/locks.c b/fs/locks.c
index da57c9b7e844..717fbc404e6b 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -431,7 +431,7 @@ static int lease_init(struct file *filp, long type, struct file_lock *fl)
431 if (assign_type(fl, type) != 0) 431 if (assign_type(fl, type) != 0)
432 return -EINVAL; 432 return -EINVAL;
433 433
434 fl->fl_owner = (fl_owner_t)filp; 434 fl->fl_owner = (fl_owner_t)current->files;
435 fl->fl_pid = current->tgid; 435 fl->fl_pid = current->tgid;
436 436
437 fl->fl_file = filp; 437 fl->fl_file = filp;
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index c0d45cec9958..2204e1fe5725 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -41,6 +41,7 @@
41#include <linux/ratelimit.h> 41#include <linux/ratelimit.h>
42#include <linux/sunrpc/svcauth_gss.h> 42#include <linux/sunrpc/svcauth_gss.h>
43#include <linux/sunrpc/addr.h> 43#include <linux/sunrpc/addr.h>
44#include <linux/hash.h>
44#include "xdr4.h" 45#include "xdr4.h"
45#include "xdr4cb.h" 46#include "xdr4cb.h"
46#include "vfs.h" 47#include "vfs.h"
@@ -364,6 +365,79 @@ static struct nfs4_ol_stateid * nfs4_alloc_stateid(struct nfs4_client *clp)
364 return openlockstateid(nfs4_alloc_stid(clp, stateid_slab)); 365 return openlockstateid(nfs4_alloc_stid(clp, stateid_slab));
365} 366}
366 367
368/*
369 * When we recall a delegation, we should be careful not to hand it
370 * out again straight away.
371 * To ensure this we keep a pair of bloom filters ('new' and 'old')
372 * in which the filehandles of recalled delegations are "stored".
373 * If a filehandle appear in either filter, a delegation is blocked.
374 * When a delegation is recalled, the filehandle is stored in the "new"
375 * filter.
376 * Every 30 seconds we swap the filters and clear the "new" one,
377 * unless both are empty of course.
378 *
379 * Each filter is 256 bits. We hash the filehandle to 32bit and use the
380 * low 3 bytes as hash-table indices.
381 *
382 * 'state_lock', which is always held when block_delegations() is called,
383 * is used to manage concurrent access. Testing does not need the lock
384 * except when swapping the two filters.
385 */
386static struct bloom_pair {
387 int entries, old_entries;
388 time_t swap_time;
389 int new; /* index into 'set' */
390 DECLARE_BITMAP(set[2], 256);
391} blocked_delegations;
392
393static int delegation_blocked(struct knfsd_fh *fh)
394{
395 u32 hash;
396 struct bloom_pair *bd = &blocked_delegations;
397
398 if (bd->entries == 0)
399 return 0;
400 if (seconds_since_boot() - bd->swap_time > 30) {
401 spin_lock(&state_lock);
402 if (seconds_since_boot() - bd->swap_time > 30) {
403 bd->entries -= bd->old_entries;
404 bd->old_entries = bd->entries;
405 memset(bd->set[bd->new], 0,
406 sizeof(bd->set[0]));
407 bd->new = 1-bd->new;
408 bd->swap_time = seconds_since_boot();
409 }
410 spin_unlock(&state_lock);
411 }
412 hash = arch_fast_hash(&fh->fh_base, fh->fh_size, 0);
413 if (test_bit(hash&255, bd->set[0]) &&
414 test_bit((hash>>8)&255, bd->set[0]) &&
415 test_bit((hash>>16)&255, bd->set[0]))
416 return 1;
417
418 if (test_bit(hash&255, bd->set[1]) &&
419 test_bit((hash>>8)&255, bd->set[1]) &&
420 test_bit((hash>>16)&255, bd->set[1]))
421 return 1;
422
423 return 0;
424}
425
426static void block_delegations(struct knfsd_fh *fh)
427{
428 u32 hash;
429 struct bloom_pair *bd = &blocked_delegations;
430
431 hash = arch_fast_hash(&fh->fh_base, fh->fh_size, 0);
432
433 __set_bit(hash&255, bd->set[bd->new]);
434 __set_bit((hash>>8)&255, bd->set[bd->new]);
435 __set_bit((hash>>16)&255, bd->set[bd->new]);
436 if (bd->entries == 0)
437 bd->swap_time = seconds_since_boot();
438 bd->entries += 1;
439}
440
367static struct nfs4_delegation * 441static struct nfs4_delegation *
368alloc_init_deleg(struct nfs4_client *clp, struct nfs4_ol_stateid *stp, struct svc_fh *current_fh) 442alloc_init_deleg(struct nfs4_client *clp, struct nfs4_ol_stateid *stp, struct svc_fh *current_fh)
369{ 443{
@@ -372,6 +446,8 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_ol_stateid *stp, struct sv
372 dprintk("NFSD alloc_init_deleg\n"); 446 dprintk("NFSD alloc_init_deleg\n");
373 if (num_delegations > max_delegations) 447 if (num_delegations > max_delegations)
374 return NULL; 448 return NULL;
449 if (delegation_blocked(&current_fh->fh_handle))
450 return NULL;
375 dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab)); 451 dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab));
376 if (dp == NULL) 452 if (dp == NULL)
377 return dp; 453 return dp;
@@ -2770,6 +2846,8 @@ static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
2770 /* Only place dl_time is set; protected by i_lock: */ 2846 /* Only place dl_time is set; protected by i_lock: */
2771 dp->dl_time = get_seconds(); 2847 dp->dl_time = get_seconds();
2772 2848
2849 block_delegations(&dp->dl_fh);
2850
2773 nfsd4_cb_recall(dp); 2851 nfsd4_cb_recall(dp);
2774} 2852}
2775 2853
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 2d305a121f37..83baf2bfe9e9 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -2687,6 +2687,7 @@ nfsd4_encode_dirent(void *ccdv, const char *name, int namlen,
2687 nfserr = nfserr_toosmall; 2687 nfserr = nfserr_toosmall;
2688 goto fail; 2688 goto fail;
2689 case nfserr_noent: 2689 case nfserr_noent:
2690 xdr_truncate_encode(xdr, start_offset);
2690 goto skip_entry; 2691 goto skip_entry;
2691 default: 2692 default:
2692 /* 2693 /*
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index 6eb1d3cb5104..9b9b6f29bbf3 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -53,7 +53,7 @@ struct acpi_power_register {
53 u8 bit_offset; 53 u8 bit_offset;
54 u8 access_size; 54 u8 access_size;
55 u64 address; 55 u64 address;
56} __attribute__ ((packed)); 56} __packed;
57 57
58struct acpi_processor_cx { 58struct acpi_processor_cx {
59 u8 valid; 59 u8 valid;
@@ -83,7 +83,7 @@ struct acpi_psd_package {
83 u64 domain; 83 u64 domain;
84 u64 coord_type; 84 u64 coord_type;
85 u64 num_processors; 85 u64 num_processors;
86} __attribute__ ((packed)); 86} __packed;
87 87
88struct acpi_pct_register { 88struct acpi_pct_register {
89 u8 descriptor; 89 u8 descriptor;
@@ -93,7 +93,7 @@ struct acpi_pct_register {
93 u8 bit_offset; 93 u8 bit_offset;
94 u8 reserved; 94 u8 reserved;
95 u64 address; 95 u64 address;
96} __attribute__ ((packed)); 96} __packed;
97 97
98struct acpi_processor_px { 98struct acpi_processor_px {
99 u64 core_frequency; /* megahertz */ 99 u64 core_frequency; /* megahertz */
@@ -124,7 +124,7 @@ struct acpi_tsd_package {
124 u64 domain; 124 u64 domain;
125 u64 coord_type; 125 u64 coord_type;
126 u64 num_processors; 126 u64 num_processors;
127} __attribute__ ((packed)); 127} __packed;
128 128
129struct acpi_ptc_register { 129struct acpi_ptc_register {
130 u8 descriptor; 130 u8 descriptor;
@@ -134,7 +134,7 @@ struct acpi_ptc_register {
134 u8 bit_offset; 134 u8 bit_offset;
135 u8 reserved; 135 u8 reserved;
136 u64 address; 136 u64 address;
137} __attribute__ ((packed)); 137} __packed;
138 138
139struct acpi_processor_tx_tss { 139struct acpi_processor_tx_tss {
140 u64 freqpercentage; /* */ 140 u64 freqpercentage; /* */
diff --git a/include/drm/i915_powerwell.h b/include/drm/i915_powerwell.h
index cfdc884405b7..2baba9996094 100644
--- a/include/drm/i915_powerwell.h
+++ b/include/drm/i915_powerwell.h
@@ -30,7 +30,7 @@
30#define _I915_POWERWELL_H_ 30#define _I915_POWERWELL_H_
31 31
32/* For use by hda_i915 driver */ 32/* For use by hda_i915 driver */
33extern void i915_request_power_well(void); 33extern int i915_request_power_well(void);
34extern void i915_release_power_well(void); 34extern int i915_release_power_well(void);
35 35
36#endif /* _I915_POWERWELL_H_ */ 36#endif /* _I915_POWERWELL_H_ */
diff --git a/include/dt-bindings/clk/ti-dra7-atl.h b/include/dt-bindings/clk/ti-dra7-atl.h
new file mode 100644
index 000000000000..42dd4164f6f4
--- /dev/null
+++ b/include/dt-bindings/clk/ti-dra7-atl.h
@@ -0,0 +1,40 @@
1/*
2 * This header provides constants for DRA7 ATL (Audio Tracking Logic)
3 *
4 * The constants defined in this header are used in dts files
5 *
6 * Copyright (C) 2013 Texas Instruments, Inc.
7 *
8 * Peter Ujfalusi <peter.ujfalusi@ti.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
15 * kind, whether express or implied; without even the implied warranty
16 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19
20#ifndef _DT_BINDINGS_CLK_DRA7_ATL_H
21#define _DT_BINDINGS_CLK_DRA7_ATL_H
22
23#define DRA7_ATL_WS_MCASP1_FSR 0
24#define DRA7_ATL_WS_MCASP1_FSX 1
25#define DRA7_ATL_WS_MCASP2_FSR 2
26#define DRA7_ATL_WS_MCASP2_FSX 3
27#define DRA7_ATL_WS_MCASP3_FSX 4
28#define DRA7_ATL_WS_MCASP4_FSX 5
29#define DRA7_ATL_WS_MCASP5_FSX 6
30#define DRA7_ATL_WS_MCASP6_FSX 7
31#define DRA7_ATL_WS_MCASP7_FSX 8
32#define DRA7_ATL_WS_MCASP8_FSX 9
33#define DRA7_ATL_WS_MCASP8_AHCLKX 10
34#define DRA7_ATL_WS_XREF_CLK3 11
35#define DRA7_ATL_WS_XREF_CLK0 12
36#define DRA7_ATL_WS_XREF_CLK1 13
37#define DRA7_ATL_WS_XREF_CLK2 14
38#define DRA7_ATL_WS_OSC1_X1 15
39
40#endif
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index a002cf191427..eb726b9c5762 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -42,7 +42,7 @@ struct blk_mq_hw_ctx {
42 unsigned int nr_ctx; 42 unsigned int nr_ctx;
43 struct blk_mq_ctx **ctxs; 43 struct blk_mq_ctx **ctxs;
44 44
45 unsigned int wait_index; 45 atomic_t wait_index;
46 46
47 struct blk_mq_tags *tags; 47 struct blk_mq_tags *tags;
48 48
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 31e11051f1ba..713f8b62b435 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -920,7 +920,7 @@ static inline unsigned int blk_max_size_offset(struct request_queue *q,
920 sector_t offset) 920 sector_t offset)
921{ 921{
922 if (!q->limits.chunk_sectors) 922 if (!q->limits.chunk_sectors)
923 return q->limits.max_hw_sectors; 923 return q->limits.max_sectors;
924 924
925 return q->limits.chunk_sectors - 925 return q->limits.chunk_sectors -
926 (offset & (q->limits.chunk_sectors - 1)); 926 (offset & (q->limits.chunk_sectors - 1));
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index 5f6db18d72e8..3c97d5e9b951 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -625,6 +625,8 @@ int ceph_flags_to_mode(int flags);
625 CEPH_CAP_LINK_EXCL | \ 625 CEPH_CAP_LINK_EXCL | \
626 CEPH_CAP_XATTR_EXCL | \ 626 CEPH_CAP_XATTR_EXCL | \
627 CEPH_CAP_FILE_EXCL) 627 CEPH_CAP_FILE_EXCL)
628#define CEPH_CAP_ANY_FILE_RD (CEPH_CAP_FILE_RD | CEPH_CAP_FILE_CACHE | \
629 CEPH_CAP_FILE_SHARED)
628#define CEPH_CAP_ANY_FILE_WR (CEPH_CAP_FILE_WR | CEPH_CAP_FILE_BUFFER | \ 630#define CEPH_CAP_ANY_FILE_WR (CEPH_CAP_FILE_WR | CEPH_CAP_FILE_BUFFER | \
629 CEPH_CAP_FILE_EXCL) 631 CEPH_CAP_FILE_EXCL)
630#define CEPH_CAP_ANY_WR (CEPH_CAP_ANY_EXCL | CEPH_CAP_ANY_FILE_WR) 632#define CEPH_CAP_ANY_WR (CEPH_CAP_ANY_EXCL | CEPH_CAP_ANY_FILE_WR)
diff --git a/include/linux/ceph/mon_client.h b/include/linux/ceph/mon_client.h
index a486f390dfbe..deb47e45ac7c 100644
--- a/include/linux/ceph/mon_client.h
+++ b/include/linux/ceph/mon_client.h
@@ -40,9 +40,9 @@ struct ceph_mon_request {
40}; 40};
41 41
42/* 42/*
43 * ceph_mon_generic_request is being used for the statfs and poolop requests 43 * ceph_mon_generic_request is being used for the statfs, poolop and
44 * which are bening done a bit differently because we need to get data back 44 * mon_get_version requests which are being done a bit differently
45 * to the caller 45 * because we need to get data back to the caller
46 */ 46 */
47struct ceph_mon_generic_request { 47struct ceph_mon_generic_request {
48 struct kref kref; 48 struct kref kref;
@@ -104,10 +104,15 @@ extern int ceph_monc_got_mdsmap(struct ceph_mon_client *monc, u32 have);
104extern int ceph_monc_got_osdmap(struct ceph_mon_client *monc, u32 have); 104extern int ceph_monc_got_osdmap(struct ceph_mon_client *monc, u32 have);
105 105
106extern void ceph_monc_request_next_osdmap(struct ceph_mon_client *monc); 106extern void ceph_monc_request_next_osdmap(struct ceph_mon_client *monc);
107extern int ceph_monc_wait_osdmap(struct ceph_mon_client *monc, u32 epoch,
108 unsigned long timeout);
107 109
108extern int ceph_monc_do_statfs(struct ceph_mon_client *monc, 110extern int ceph_monc_do_statfs(struct ceph_mon_client *monc,
109 struct ceph_statfs *buf); 111 struct ceph_statfs *buf);
110 112
113extern int ceph_monc_do_get_version(struct ceph_mon_client *monc,
114 const char *what, u64 *newest);
115
111extern int ceph_monc_open_session(struct ceph_mon_client *monc); 116extern int ceph_monc_open_session(struct ceph_mon_client *monc);
112 117
113extern int ceph_monc_validate_auth(struct ceph_mon_client *monc); 118extern int ceph_monc_validate_auth(struct ceph_mon_client *monc);
diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h
index 4a21a872dbbd..e8d8a35034a5 100644
--- a/include/linux/clk/ti.h
+++ b/include/linux/clk/ti.h
@@ -41,6 +41,8 @@
41 * @idlest_reg: register containing the DPLL idle status bitfield 41 * @idlest_reg: register containing the DPLL idle status bitfield
42 * @autoidle_mask: mask of the DPLL autoidle mode bitfield in @autoidle_reg 42 * @autoidle_mask: mask of the DPLL autoidle mode bitfield in @autoidle_reg
43 * @freqsel_mask: mask of the DPLL jitter correction bitfield in @control_reg 43 * @freqsel_mask: mask of the DPLL jitter correction bitfield in @control_reg
44 * @dcc_mask: mask of the DPLL DCC correction bitfield @mult_div1_reg
45 * @dcc_rate: rate atleast which DCC @dcc_mask must be set
44 * @idlest_mask: mask of the DPLL idle status bitfield in @idlest_reg 46 * @idlest_mask: mask of the DPLL idle status bitfield in @idlest_reg
45 * @lpmode_mask: mask of the DPLL low-power mode bitfield in @control_reg 47 * @lpmode_mask: mask of the DPLL low-power mode bitfield in @control_reg
46 * @m4xen_mask: mask of the DPLL M4X multiplier bitfield in @control_reg 48 * @m4xen_mask: mask of the DPLL M4X multiplier bitfield in @control_reg
@@ -86,6 +88,8 @@ struct dpll_data {
86 u32 idlest_mask; 88 u32 idlest_mask;
87 u32 dco_mask; 89 u32 dco_mask;
88 u32 sddiv_mask; 90 u32 sddiv_mask;
91 u32 dcc_mask;
92 unsigned long dcc_rate;
89 u32 lpmode_mask; 93 u32 lpmode_mask;
90 u32 m4xen_mask; 94 u32 m4xen_mask;
91 u8 auto_recal_bit; 95 u8 auto_recal_bit;
@@ -94,7 +98,26 @@ struct dpll_data {
94 u8 flags; 98 u8 flags;
95}; 99};
96 100
97struct clk_hw_omap_ops; 101struct clk_hw_omap;
102
103/**
104 * struct clk_hw_omap_ops - OMAP clk ops
105 * @find_idlest: find idlest register information for a clock
106 * @find_companion: find companion clock register information for a clock,
107 * basically converts CM_ICLKEN* <-> CM_FCLKEN*
108 * @allow_idle: enables autoidle hardware functionality for a clock
109 * @deny_idle: prevent autoidle hardware functionality for a clock
110 */
111struct clk_hw_omap_ops {
112 void (*find_idlest)(struct clk_hw_omap *oclk,
113 void __iomem **idlest_reg,
114 u8 *idlest_bit, u8 *idlest_val);
115 void (*find_companion)(struct clk_hw_omap *oclk,
116 void __iomem **other_reg,
117 u8 *other_bit);
118 void (*allow_idle)(struct clk_hw_omap *oclk);
119 void (*deny_idle)(struct clk_hw_omap *oclk);
120};
98 121
99/** 122/**
100 * struct clk_hw_omap - OMAP struct clk 123 * struct clk_hw_omap - OMAP struct clk
@@ -259,6 +282,12 @@ int omap2_dflt_clk_enable(struct clk_hw *hw);
259void omap2_dflt_clk_disable(struct clk_hw *hw); 282void omap2_dflt_clk_disable(struct clk_hw *hw);
260int omap2_dflt_clk_is_enabled(struct clk_hw *hw); 283int omap2_dflt_clk_is_enabled(struct clk_hw *hw);
261void omap3_clk_lock_dpll5(void); 284void omap3_clk_lock_dpll5(void);
285unsigned long omap2_dpllcore_recalc(struct clk_hw *hw,
286 unsigned long parent_rate);
287int omap2_reprogram_dpllcore(struct clk_hw *clk, unsigned long rate,
288 unsigned long parent_rate);
289void omap2xxx_clkt_dpllcore_init(struct clk_hw *hw);
290void omap2xxx_clkt_vps_init(void);
262 291
263void __iomem *ti_clk_get_reg_addr(struct device_node *node, int index); 292void __iomem *ti_clk_get_reg_addr(struct device_node *node, int index);
264void ti_dt_clocks_register(struct ti_dt_clk *oclks); 293void ti_dt_clocks_register(struct ti_dt_clk *oclks);
@@ -278,6 +307,8 @@ int omap5xxx_dt_clk_init(void);
278int dra7xx_dt_clk_init(void); 307int dra7xx_dt_clk_init(void);
279int am33xx_dt_clk_init(void); 308int am33xx_dt_clk_init(void);
280int am43xx_dt_clk_init(void); 309int am43xx_dt_clk_init(void);
310int omap2420_dt_clk_init(void);
311int omap2430_dt_clk_init(void);
281 312
282#ifdef CONFIG_OF 313#ifdef CONFIG_OF
283void of_ti_clk_allow_autoidle_all(void); 314void of_ti_clk_allow_autoidle_all(void);
@@ -287,6 +318,8 @@ static inline void of_ti_clk_allow_autoidle_all(void) { }
287static inline void of_ti_clk_deny_autoidle_all(void) { } 318static inline void of_ti_clk_deny_autoidle_all(void) { }
288#endif 319#endif
289 320
321extern const struct clk_hw_omap_ops clkhwops_omap2xxx_dpll;
322extern const struct clk_hw_omap_ops clkhwops_omap2430_i2chs_wait;
290extern const struct clk_hw_omap_ops clkhwops_omap3_dpll; 323extern const struct clk_hw_omap_ops clkhwops_omap3_dpll;
291extern const struct clk_hw_omap_ops clkhwops_omap4_dpllmx; 324extern const struct clk_hw_omap_ops clkhwops_omap4_dpllmx;
292extern const struct clk_hw_omap_ops clkhwops_wait; 325extern const struct clk_hw_omap_ops clkhwops_wait;
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 4ff262e2bf37..e2a6bd7fb133 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -133,7 +133,6 @@ extern struct request *elv_latter_request(struct request_queue *, struct request
133extern int elv_register_queue(struct request_queue *q); 133extern int elv_register_queue(struct request_queue *q);
134extern void elv_unregister_queue(struct request_queue *q); 134extern void elv_unregister_queue(struct request_queue *q);
135extern int elv_may_queue(struct request_queue *, int); 135extern int elv_may_queue(struct request_queue *, int);
136extern void elv_abort_queue(struct request_queue *);
137extern void elv_completed_request(struct request_queue *, struct request *); 136extern void elv_completed_request(struct request_queue *, struct request *);
138extern int elv_set_request(struct request_queue *q, struct request *rq, 137extern int elv_set_request(struct request_queue *q, struct request *rq,
139 struct bio *bio, gfp_t gfp_mask); 138 struct bio *bio, gfp_t gfp_mask);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 338e6f758c6d..e11d60cc867b 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1921,6 +1921,12 @@ static inline int break_lease(struct inode *inode, unsigned int mode)
1921 1921
1922static inline int break_deleg(struct inode *inode, unsigned int mode) 1922static inline int break_deleg(struct inode *inode, unsigned int mode)
1923{ 1923{
1924 /*
1925 * Since this check is lockless, we must ensure that any refcounts
1926 * taken are done before checking inode->i_flock. Otherwise, we could
1927 * end up racing with tasks trying to set a new lease on this file.
1928 */
1929 smp_mb();
1924 if (inode->i_flock) 1930 if (inode->i_flock)
1925 return __break_lease(inode, mode, FL_DELEG); 1931 return __break_lease(inode, mode, FL_DELEG);
1926 return 0; 1932 return 0;
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index e5a589435e2b..d99800cbdcf3 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -117,6 +117,7 @@ enum {
117#define NETIF_F_GSO_IPIP __NETIF_F(GSO_IPIP) 117#define NETIF_F_GSO_IPIP __NETIF_F(GSO_IPIP)
118#define NETIF_F_GSO_SIT __NETIF_F(GSO_SIT) 118#define NETIF_F_GSO_SIT __NETIF_F(GSO_SIT)
119#define NETIF_F_GSO_UDP_TUNNEL __NETIF_F(GSO_UDP_TUNNEL) 119#define NETIF_F_GSO_UDP_TUNNEL __NETIF_F(GSO_UDP_TUNNEL)
120#define NETIF_F_GSO_UDP_TUNNEL_CSUM __NETIF_F(GSO_UDP_TUNNEL_CSUM)
120#define NETIF_F_GSO_MPLS __NETIF_F(GSO_MPLS) 121#define NETIF_F_GSO_MPLS __NETIF_F(GSO_MPLS)
121#define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER) 122#define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER)
122#define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX) 123#define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX)
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index abe3de1db932..66f9a04ec270 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -3305,6 +3305,13 @@ static inline bool net_gso_ok(netdev_features_t features, int gso_type)
3305 BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT)); 3305 BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
3306 BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT)); 3306 BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
3307 BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT)); 3307 BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
3308 BUILD_BUG_ON(SKB_GSO_GRE != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT));
3309 BUILD_BUG_ON(SKB_GSO_GRE_CSUM != (NETIF_F_GSO_GRE_CSUM >> NETIF_F_GSO_SHIFT));
3310 BUILD_BUG_ON(SKB_GSO_IPIP != (NETIF_F_GSO_IPIP >> NETIF_F_GSO_SHIFT));
3311 BUILD_BUG_ON(SKB_GSO_SIT != (NETIF_F_GSO_SIT >> NETIF_F_GSO_SHIFT));
3312 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT));
3313 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT));
3314 BUILD_BUG_ON(SKB_GSO_MPLS != (NETIF_F_GSO_MPLS >> NETIF_F_GSO_SHIFT));
3308 3315
3309 return (features & feature) == feature; 3316 return (features & feature) == feature;
3310} 3317}
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index a50173ca1d72..2bf403195c09 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * Definitions for the NVM Express interface 2 * Definitions for the NVM Express interface
3 * Copyright (c) 2011-2013, Intel Corporation. 3 * Copyright (c) 2011-2014, Intel Corporation.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License, 6 * under the terms and conditions of the GNU General Public License,
@@ -10,10 +10,6 @@
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 */ 13 */
18 14
19#ifndef _LINUX_NVME_H 15#ifndef _LINUX_NVME_H
@@ -66,8 +62,8 @@ enum {
66 62
67#define NVME_VS(major, minor) (major << 16 | minor) 63#define NVME_VS(major, minor) (major << 16 | minor)
68 64
69extern unsigned char io_timeout; 65extern unsigned char nvme_io_timeout;
70#define NVME_IO_TIMEOUT (io_timeout * HZ) 66#define NVME_IO_TIMEOUT (nvme_io_timeout * HZ)
71 67
72/* 68/*
73 * Represents an NVM Express device. Each nvme_dev is a PCI function. 69 * Represents an NVM Express device. Each nvme_dev is a PCI function.
@@ -94,7 +90,7 @@ struct nvme_dev {
94 struct miscdevice miscdev; 90 struct miscdevice miscdev;
95 work_func_t reset_workfn; 91 work_func_t reset_workfn;
96 struct work_struct reset_work; 92 struct work_struct reset_work;
97 struct notifier_block nb; 93 struct work_struct cpu_work;
98 char name[12]; 94 char name[12];
99 char serial[20]; 95 char serial[20];
100 char model[40]; 96 char model[40];
@@ -103,6 +99,7 @@ struct nvme_dev {
103 u32 stripe_size; 99 u32 stripe_size;
104 u16 oncs; 100 u16 oncs;
105 u16 abort_limit; 101 u16 abort_limit;
102 u8 vwc;
106 u8 initialized; 103 u8 initialized;
107}; 104};
108 105
@@ -159,7 +156,6 @@ struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
159void nvme_unmap_user_pages(struct nvme_dev *dev, int write, 156void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
160 struct nvme_iod *iod); 157 struct nvme_iod *iod);
161int nvme_submit_io_cmd(struct nvme_dev *, struct nvme_command *, u32 *); 158int nvme_submit_io_cmd(struct nvme_dev *, struct nvme_command *, u32 *);
162int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns);
163int nvme_submit_admin_cmd(struct nvme_dev *, struct nvme_command *, 159int nvme_submit_admin_cmd(struct nvme_dev *, struct nvme_command *,
164 u32 *result); 160 u32 *result);
165int nvme_identify(struct nvme_dev *, unsigned nsid, unsigned cns, 161int nvme_identify(struct nvme_dev *, unsigned nsid, unsigned cns,
diff --git a/include/linux/platform_data/shtc1.h b/include/linux/platform_data/shtc1.h
new file mode 100644
index 000000000000..7b8c353f7dc8
--- /dev/null
+++ b/include/linux/platform_data/shtc1.h
@@ -0,0 +1,23 @@
1/*
2 * Copyright (C) 2014 Sensirion AG, Switzerland
3 * Author: Johannes Winkelmann <johannes.winkelmann@sensirion.com>
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15
16#ifndef __SHTC1_H_
17#define __SHTC1_H_
18
19struct shtc1_platform_data {
20 bool blocking_io;
21 bool high_precision;
22};
23#endif /* __SHTC1_H_ */
diff --git a/include/linux/profile.h b/include/linux/profile.h
index aaad3861beb8..b537a25ffa17 100644
--- a/include/linux/profile.h
+++ b/include/linux/profile.h
@@ -44,6 +44,7 @@ extern int prof_on __read_mostly;
44int profile_init(void); 44int profile_init(void);
45int profile_setup(char *str); 45int profile_setup(char *str);
46void profile_tick(int type); 46void profile_tick(int type);
47int setup_profiling_timer(unsigned int multiplier);
47 48
48/* 49/*
49 * Add multiple profiler hits to a given address: 50 * Add multiple profiler hits to a given address:
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index a2d9d81038d1..14ec18d5e18b 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -395,6 +395,11 @@ static inline void regulator_bulk_free(int num_consumers,
395{ 395{
396} 396}
397 397
398static inline int regulator_can_change_voltage(struct regulator *regulator)
399{
400 return 0;
401}
402
398static inline int regulator_set_voltage(struct regulator *regulator, 403static inline int regulator_set_voltage(struct regulator *regulator,
399 int min_uV, int max_uV) 404 int min_uV, int max_uV)
400{ 405{
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 5b5cd3189c98..ec89301ada41 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -338,17 +338,18 @@ enum {
338 338
339 SKB_GSO_GRE = 1 << 6, 339 SKB_GSO_GRE = 1 << 6,
340 340
341 SKB_GSO_IPIP = 1 << 7, 341 SKB_GSO_GRE_CSUM = 1 << 7,
342 342
343 SKB_GSO_SIT = 1 << 8, 343 SKB_GSO_IPIP = 1 << 8,
344 344
345 SKB_GSO_UDP_TUNNEL = 1 << 9, 345 SKB_GSO_SIT = 1 << 9,
346 346
347 SKB_GSO_MPLS = 1 << 10, 347 SKB_GSO_UDP_TUNNEL = 1 << 10,
348 348
349 SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11, 349 SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11,
350 350
351 SKB_GSO_GRE_CSUM = 1 << 12, 351 SKB_GSO_MPLS = 1 << 12,
352
352}; 353};
353 354
354#if BITS_PER_LONG > 32 355#if BITS_PER_LONG > 32
@@ -1853,6 +1854,18 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
1853 return pskb_may_pull(skb, skb_network_offset(skb) + len); 1854 return pskb_may_pull(skb, skb_network_offset(skb) + len);
1854} 1855}
1855 1856
1857static inline void skb_pop_rcv_encapsulation(struct sk_buff *skb)
1858{
1859 /* Only continue with checksum unnecessary if device indicated
1860 * it is valid across encapsulation (skb->encapsulation was set).
1861 */
1862 if (skb->ip_summed == CHECKSUM_UNNECESSARY && !skb->encapsulation)
1863 skb->ip_summed = CHECKSUM_NONE;
1864
1865 skb->encapsulation = 0;
1866 skb->csum_valid = 0;
1867}
1868
1856/* 1869/*
1857 * CPUs often take a performance hit when accessing unaligned memory 1870 * CPUs often take a performance hit when accessing unaligned memory
1858 * locations. The actual performance hit varies, it can be small if the 1871 * locations. The actual performance hit varies, it can be small if the
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index f76994b9396c..519064e0c943 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -327,6 +327,7 @@ extern unsigned long get_safe_page(gfp_t gfp_mask);
327extern void hibernation_set_ops(const struct platform_hibernation_ops *ops); 327extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
328extern int hibernate(void); 328extern int hibernate(void);
329extern bool system_entering_hibernation(void); 329extern bool system_entering_hibernation(void);
330extern bool hibernation_available(void);
330asmlinkage int swsusp_save(void); 331asmlinkage int swsusp_save(void);
331extern struct pbe *restore_pblist; 332extern struct pbe *restore_pblist;
332#else /* CONFIG_HIBERNATION */ 333#else /* CONFIG_HIBERNATION */
@@ -339,6 +340,7 @@ static inline void swsusp_unset_page_free(struct page *p) {}
339static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {} 340static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {}
340static inline int hibernate(void) { return -ENOSYS; } 341static inline int hibernate(void) { return -ENOSYS; }
341static inline bool system_entering_hibernation(void) { return false; } 342static inline bool system_entering_hibernation(void) { return false; }
343static inline bool hibernation_available(void) { return false; }
342#endif /* CONFIG_HIBERNATION */ 344#endif /* CONFIG_HIBERNATION */
343 345
344/* Hibernation and suspend events */ 346/* Hibernation and suspend events */
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index bca25dc53f9d..8fab6fa0dbfb 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -432,6 +432,7 @@ void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no);
432void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no); 432void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no);
433 433
434void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state); 434void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state);
435void vb2_discard_done(struct vb2_queue *q);
435int vb2_wait_for_all_buffers(struct vb2_queue *q); 436int vb2_wait_for_all_buffers(struct vb2_queue *q);
436 437
437int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b); 438int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b);
diff --git a/include/net/udp.h b/include/net/udp.h
index 2ecfc6e15609..68a1fefe3dfe 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -111,7 +111,9 @@ struct sk_buff;
111 */ 111 */
112static inline __sum16 __udp_lib_checksum_complete(struct sk_buff *skb) 112static inline __sum16 __udp_lib_checksum_complete(struct sk_buff *skb)
113{ 113{
114 return __skb_checksum_complete_head(skb, UDP_SKB_CB(skb)->cscov); 114 return (UDP_SKB_CB(skb)->cscov == skb->len ?
115 __skb_checksum_complete(skb) :
116 __skb_checksum_complete_head(skb, UDP_SKB_CB(skb)->cscov));
115} 117}
116 118
117static inline int udp_lib_checksum_complete(struct sk_buff *skb) 119static inline int udp_lib_checksum_complete(struct sk_buff *skb)
diff --git a/include/sound/core.h b/include/sound/core.h
index eedda2cdfe57..1df3f2fe5350 100644
--- a/include/sound/core.h
+++ b/include/sound/core.h
@@ -116,6 +116,8 @@ struct snd_card {
116 int user_ctl_count; /* count of all user controls */ 116 int user_ctl_count; /* count of all user controls */
117 struct list_head controls; /* all controls for this card */ 117 struct list_head controls; /* all controls for this card */
118 struct list_head ctl_files; /* active control files */ 118 struct list_head ctl_files; /* active control files */
119 struct mutex user_ctl_lock; /* protects user controls against
120 concurrent access */
119 121
120 struct snd_info_entry *proc_root; /* root for soundcard specific files */ 122 struct snd_info_entry *proc_root; /* root for soundcard specific files */
121 struct snd_info_entry *proc_id; /* the card id */ 123 struct snd_info_entry *proc_id; /* the card id */
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index b4d6697085fe..d854fb31c000 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -932,7 +932,7 @@ static inline void snd_pcm_gettime(struct snd_pcm_runtime *runtime,
932 struct timespec *tv) 932 struct timespec *tv)
933{ 933{
934 if (runtime->tstamp_type == SNDRV_PCM_TSTAMP_TYPE_MONOTONIC) 934 if (runtime->tstamp_type == SNDRV_PCM_TSTAMP_TYPE_MONOTONIC)
935 do_posix_clock_monotonic_gettime(tv); 935 ktime_get_ts(tv);
936 else 936 else
937 getnstimeofday(tv); 937 getnstimeofday(tv);
938} 938}
diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
index 7554fd381a56..6f9c38ce45c7 100644
--- a/include/uapi/linux/btrfs.h
+++ b/include/uapi/linux/btrfs.h
@@ -306,6 +306,14 @@ struct btrfs_ioctl_search_args {
306 char buf[BTRFS_SEARCH_ARGS_BUFSIZE]; 306 char buf[BTRFS_SEARCH_ARGS_BUFSIZE];
307}; 307};
308 308
309struct btrfs_ioctl_search_args_v2 {
310 struct btrfs_ioctl_search_key key; /* in/out - search parameters */
311 __u64 buf_size; /* in - size of buffer
312 * out - on EOVERFLOW: needed size
313 * to store item */
314 __u64 buf[0]; /* out - found items */
315};
316
309struct btrfs_ioctl_clone_range_args { 317struct btrfs_ioctl_clone_range_args {
310 __s64 src_fd; 318 __s64 src_fd;
311 __u64 src_offset, src_length; 319 __u64 src_offset, src_length;
@@ -558,6 +566,8 @@ static inline char *btrfs_err_str(enum btrfs_err_code err_code)
558 struct btrfs_ioctl_defrag_range_args) 566 struct btrfs_ioctl_defrag_range_args)
559#define BTRFS_IOC_TREE_SEARCH _IOWR(BTRFS_IOCTL_MAGIC, 17, \ 567#define BTRFS_IOC_TREE_SEARCH _IOWR(BTRFS_IOCTL_MAGIC, 17, \
560 struct btrfs_ioctl_search_args) 568 struct btrfs_ioctl_search_args)
569#define BTRFS_IOC_TREE_SEARCH_V2 _IOWR(BTRFS_IOCTL_MAGIC, 17, \
570 struct btrfs_ioctl_search_args_v2)
561#define BTRFS_IOC_INO_LOOKUP _IOWR(BTRFS_IOCTL_MAGIC, 18, \ 571#define BTRFS_IOC_INO_LOOKUP _IOWR(BTRFS_IOCTL_MAGIC, 18, \
562 struct btrfs_ioctl_ino_lookup_args) 572 struct btrfs_ioctl_ino_lookup_args)
563#define BTRFS_IOC_DEFAULT_SUBVOL _IOW(BTRFS_IOCTL_MAGIC, 19, __u64) 573#define BTRFS_IOC_DEFAULT_SUBVOL _IOW(BTRFS_IOCTL_MAGIC, 19, __u64)
diff --git a/include/uapi/linux/nvme.h b/include/uapi/linux/nvme.h
index 096fe1c6f83d..29a7d8619d8d 100644
--- a/include/uapi/linux/nvme.h
+++ b/include/uapi/linux/nvme.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * Definitions for the NVM Express interface 2 * Definitions for the NVM Express interface
3 * Copyright (c) 2011-2013, Intel Corporation. 3 * Copyright (c) 2011-2014, Intel Corporation.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License, 6 * under the terms and conditions of the GNU General Public License,
@@ -10,10 +10,6 @@
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 */ 13 */
18 14
19#ifndef _UAPI_LINUX_NVME_H 15#ifndef _UAPI_LINUX_NVME_H
@@ -31,7 +27,12 @@ struct nvme_id_power_state {
31 __u8 read_lat; 27 __u8 read_lat;
32 __u8 write_tput; 28 __u8 write_tput;
33 __u8 write_lat; 29 __u8 write_lat;
34 __u8 rsvd16[16]; 30 __le16 idle_power;
31 __u8 idle_scale;
32 __u8 rsvd19;
33 __le16 active_power;
34 __u8 active_work_scale;
35 __u8 rsvd23[9];
35}; 36};
36 37
37enum { 38enum {
@@ -49,7 +50,9 @@ struct nvme_id_ctrl {
49 __u8 ieee[3]; 50 __u8 ieee[3];
50 __u8 mic; 51 __u8 mic;
51 __u8 mdts; 52 __u8 mdts;
52 __u8 rsvd78[178]; 53 __u16 cntlid;
54 __u32 ver;
55 __u8 rsvd84[172];
53 __le16 oacs; 56 __le16 oacs;
54 __u8 acl; 57 __u8 acl;
55 __u8 aerl; 58 __u8 aerl;
@@ -57,7 +60,11 @@ struct nvme_id_ctrl {
57 __u8 lpa; 60 __u8 lpa;
58 __u8 elpe; 61 __u8 elpe;
59 __u8 npss; 62 __u8 npss;
60 __u8 rsvd264[248]; 63 __u8 avscc;
64 __u8 apsta;
65 __le16 wctemp;
66 __le16 cctemp;
67 __u8 rsvd270[242];
61 __u8 sqes; 68 __u8 sqes;
62 __u8 cqes; 69 __u8 cqes;
63 __u8 rsvd514[2]; 70 __u8 rsvd514[2];
@@ -68,7 +75,12 @@ struct nvme_id_ctrl {
68 __u8 vwc; 75 __u8 vwc;
69 __le16 awun; 76 __le16 awun;
70 __le16 awupf; 77 __le16 awupf;
71 __u8 rsvd530[1518]; 78 __u8 nvscc;
79 __u8 rsvd531;
80 __le16 acwu;
81 __u8 rsvd534[2];
82 __le32 sgls;
83 __u8 rsvd540[1508];
72 struct nvme_id_power_state psd[32]; 84 struct nvme_id_power_state psd[32];
73 __u8 vs[1024]; 85 __u8 vs[1024];
74}; 86};
@@ -77,6 +89,7 @@ enum {
77 NVME_CTRL_ONCS_COMPARE = 1 << 0, 89 NVME_CTRL_ONCS_COMPARE = 1 << 0,
78 NVME_CTRL_ONCS_WRITE_UNCORRECTABLE = 1 << 1, 90 NVME_CTRL_ONCS_WRITE_UNCORRECTABLE = 1 << 1,
79 NVME_CTRL_ONCS_DSM = 1 << 2, 91 NVME_CTRL_ONCS_DSM = 1 << 2,
92 NVME_CTRL_VWC_PRESENT = 1 << 0,
80}; 93};
81 94
82struct nvme_lbaf { 95struct nvme_lbaf {
@@ -95,7 +108,15 @@ struct nvme_id_ns {
95 __u8 mc; 108 __u8 mc;
96 __u8 dpc; 109 __u8 dpc;
97 __u8 dps; 110 __u8 dps;
98 __u8 rsvd30[98]; 111 __u8 nmic;
112 __u8 rescap;
113 __u8 fpi;
114 __u8 rsvd33;
115 __le16 nawun;
116 __le16 nawupf;
117 __le16 nacwu;
118 __u8 rsvd40[80];
119 __u8 eui64[8];
99 struct nvme_lbaf lbaf[16]; 120 struct nvme_lbaf lbaf[16];
100 __u8 rsvd192[192]; 121 __u8 rsvd192[192];
101 __u8 vs[3712]; 122 __u8 vs[3712];
@@ -126,7 +147,10 @@ struct nvme_smart_log {
126 __u8 unsafe_shutdowns[16]; 147 __u8 unsafe_shutdowns[16];
127 __u8 media_errors[16]; 148 __u8 media_errors[16];
128 __u8 num_err_log_entries[16]; 149 __u8 num_err_log_entries[16];
129 __u8 rsvd192[320]; 150 __le32 warning_temp_time;
151 __le32 critical_comp_time;
152 __le16 temp_sensor[8];
153 __u8 rsvd216[296];
130}; 154};
131 155
132enum { 156enum {
@@ -282,6 +306,10 @@ enum {
282 NVME_FEAT_WRITE_ATOMIC = 0x0a, 306 NVME_FEAT_WRITE_ATOMIC = 0x0a,
283 NVME_FEAT_ASYNC_EVENT = 0x0b, 307 NVME_FEAT_ASYNC_EVENT = 0x0b,
284 NVME_FEAT_SW_PROGRESS = 0x0c, 308 NVME_FEAT_SW_PROGRESS = 0x0c,
309 NVME_LOG_ERROR = 0x01,
310 NVME_LOG_SMART = 0x02,
311 NVME_LOG_FW_SLOT = 0x03,
312 NVME_LOG_RESERVATION = 0x80,
285 NVME_FWACT_REPL = (0 << 3), 313 NVME_FWACT_REPL = (0 << 3),
286 NVME_FWACT_REPL_ACTV = (1 << 3), 314 NVME_FWACT_REPL_ACTV = (1 << 3),
287 NVME_FWACT_ACTV = (2 << 3), 315 NVME_FWACT_ACTV = (2 << 3),
diff --git a/include/uapi/sound/compress_offload.h b/include/uapi/sound/compress_offload.h
index 5759810e1c1b..21eed488783f 100644
--- a/include/uapi/sound/compress_offload.h
+++ b/include/uapi/sound/compress_offload.h
@@ -80,7 +80,7 @@ struct snd_compr_tstamp {
80struct snd_compr_avail { 80struct snd_compr_avail {
81 __u64 avail; 81 __u64 avail;
82 struct snd_compr_tstamp tstamp; 82 struct snd_compr_tstamp tstamp;
83}; 83} __attribute__((packed));
84 84
85enum snd_compr_direction { 85enum snd_compr_direction {
86 SND_COMPRESS_PLAYBACK = 0, 86 SND_COMPRESS_PLAYBACK = 0,
diff --git a/kernel/locking/rtmutex-debug.h b/kernel/locking/rtmutex-debug.h
index 14193d596d78..ab29b6a22669 100644
--- a/kernel/locking/rtmutex-debug.h
+++ b/kernel/locking/rtmutex-debug.h
@@ -31,3 +31,8 @@ static inline int debug_rt_mutex_detect_deadlock(struct rt_mutex_waiter *waiter,
31{ 31{
32 return (waiter != NULL); 32 return (waiter != NULL);
33} 33}
34
35static inline void rt_mutex_print_deadlock(struct rt_mutex_waiter *w)
36{
37 debug_rt_mutex_print_deadlock(w);
38}
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index a620d4d08ca6..fc605941b9b8 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -83,6 +83,47 @@ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
83 owner = *p; 83 owner = *p;
84 } while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner); 84 } while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner);
85} 85}
86
87/*
88 * Safe fastpath aware unlock:
89 * 1) Clear the waiters bit
90 * 2) Drop lock->wait_lock
91 * 3) Try to unlock the lock with cmpxchg
92 */
93static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
94 __releases(lock->wait_lock)
95{
96 struct task_struct *owner = rt_mutex_owner(lock);
97
98 clear_rt_mutex_waiters(lock);
99 raw_spin_unlock(&lock->wait_lock);
100 /*
101 * If a new waiter comes in between the unlock and the cmpxchg
102 * we have two situations:
103 *
104 * unlock(wait_lock);
105 * lock(wait_lock);
106 * cmpxchg(p, owner, 0) == owner
107 * mark_rt_mutex_waiters(lock);
108 * acquire(lock);
109 * or:
110 *
111 * unlock(wait_lock);
112 * lock(wait_lock);
113 * mark_rt_mutex_waiters(lock);
114 *
115 * cmpxchg(p, owner, 0) != owner
116 * enqueue_waiter();
117 * unlock(wait_lock);
118 * lock(wait_lock);
119 * wake waiter();
120 * unlock(wait_lock);
121 * lock(wait_lock);
122 * acquire(lock);
123 */
124 return rt_mutex_cmpxchg(lock, owner, NULL);
125}
126
86#else 127#else
87# define rt_mutex_cmpxchg(l,c,n) (0) 128# define rt_mutex_cmpxchg(l,c,n) (0)
88static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) 129static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
@@ -90,6 +131,17 @@ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
90 lock->owner = (struct task_struct *) 131 lock->owner = (struct task_struct *)
91 ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS); 132 ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
92} 133}
134
135/*
136 * Simple slow path only version: lock->owner is protected by lock->wait_lock.
137 */
138static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
139 __releases(lock->wait_lock)
140{
141 lock->owner = NULL;
142 raw_spin_unlock(&lock->wait_lock);
143 return true;
144}
93#endif 145#endif
94 146
95static inline int 147static inline int
@@ -260,27 +312,36 @@ static void rt_mutex_adjust_prio(struct task_struct *task)
260 */ 312 */
261int max_lock_depth = 1024; 313int max_lock_depth = 1024;
262 314
315static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
316{
317 return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL;
318}
319
263/* 320/*
264 * Adjust the priority chain. Also used for deadlock detection. 321 * Adjust the priority chain. Also used for deadlock detection.
265 * Decreases task's usage by one - may thus free the task. 322 * Decreases task's usage by one - may thus free the task.
266 * 323 *
267 * @task: the task owning the mutex (owner) for which a chain walk is probably 324 * @task: the task owning the mutex (owner) for which a chain walk is
268 * needed 325 * probably needed
269 * @deadlock_detect: do we have to carry out deadlock detection? 326 * @deadlock_detect: do we have to carry out deadlock detection?
270 * @orig_lock: the mutex (can be NULL if we are walking the chain to recheck 327 * @orig_lock: the mutex (can be NULL if we are walking the chain to recheck
271 * things for a task that has just got its priority adjusted, and 328 * things for a task that has just got its priority adjusted, and
272 * is waiting on a mutex) 329 * is waiting on a mutex)
330 * @next_lock: the mutex on which the owner of @orig_lock was blocked before
331 * we dropped its pi_lock. Is never dereferenced, only used for
332 * comparison to detect lock chain changes.
273 * @orig_waiter: rt_mutex_waiter struct for the task that has just donated 333 * @orig_waiter: rt_mutex_waiter struct for the task that has just donated
274 * its priority to the mutex owner (can be NULL in the case 334 * its priority to the mutex owner (can be NULL in the case
275 * depicted above or if the top waiter is gone away and we are 335 * depicted above or if the top waiter is gone away and we are
276 * actually deboosting the owner) 336 * actually deboosting the owner)
277 * @top_task: the current top waiter 337 * @top_task: the current top waiter
278 * 338 *
279 * Returns 0 or -EDEADLK. 339 * Returns 0 or -EDEADLK.
280 */ 340 */
281static int rt_mutex_adjust_prio_chain(struct task_struct *task, 341static int rt_mutex_adjust_prio_chain(struct task_struct *task,
282 int deadlock_detect, 342 int deadlock_detect,
283 struct rt_mutex *orig_lock, 343 struct rt_mutex *orig_lock,
344 struct rt_mutex *next_lock,
284 struct rt_mutex_waiter *orig_waiter, 345 struct rt_mutex_waiter *orig_waiter,
285 struct task_struct *top_task) 346 struct task_struct *top_task)
286{ 347{
@@ -314,7 +375,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
314 } 375 }
315 put_task_struct(task); 376 put_task_struct(task);
316 377
317 return deadlock_detect ? -EDEADLK : 0; 378 return -EDEADLK;
318 } 379 }
319 retry: 380 retry:
320 /* 381 /*
@@ -339,6 +400,18 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
339 goto out_unlock_pi; 400 goto out_unlock_pi;
340 401
341 /* 402 /*
403 * We dropped all locks after taking a refcount on @task, so
404 * the task might have moved on in the lock chain or even left
405 * the chain completely and blocks now on an unrelated lock or
406 * on @orig_lock.
407 *
408 * We stored the lock on which @task was blocked in @next_lock,
409 * so we can detect the chain change.
410 */
411 if (next_lock != waiter->lock)
412 goto out_unlock_pi;
413
414 /*
342 * Drop out, when the task has no waiters. Note, 415 * Drop out, when the task has no waiters. Note,
343 * top_waiter can be NULL, when we are in the deboosting 416 * top_waiter can be NULL, when we are in the deboosting
344 * mode! 417 * mode!
@@ -377,7 +450,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
377 if (lock == orig_lock || rt_mutex_owner(lock) == top_task) { 450 if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
378 debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock); 451 debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
379 raw_spin_unlock(&lock->wait_lock); 452 raw_spin_unlock(&lock->wait_lock);
380 ret = deadlock_detect ? -EDEADLK : 0; 453 ret = -EDEADLK;
381 goto out_unlock_pi; 454 goto out_unlock_pi;
382 } 455 }
383 456
@@ -422,11 +495,26 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
422 __rt_mutex_adjust_prio(task); 495 __rt_mutex_adjust_prio(task);
423 } 496 }
424 497
498 /*
499 * Check whether the task which owns the current lock is pi
500 * blocked itself. If yes we store a pointer to the lock for
501 * the lock chain change detection above. After we dropped
502 * task->pi_lock next_lock cannot be dereferenced anymore.
503 */
504 next_lock = task_blocked_on_lock(task);
505
425 raw_spin_unlock_irqrestore(&task->pi_lock, flags); 506 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
426 507
427 top_waiter = rt_mutex_top_waiter(lock); 508 top_waiter = rt_mutex_top_waiter(lock);
428 raw_spin_unlock(&lock->wait_lock); 509 raw_spin_unlock(&lock->wait_lock);
429 510
511 /*
512 * We reached the end of the lock chain. Stop right here. No
513 * point to go back just to figure that out.
514 */
515 if (!next_lock)
516 goto out_put_task;
517
430 if (!detect_deadlock && waiter != top_waiter) 518 if (!detect_deadlock && waiter != top_waiter)
431 goto out_put_task; 519 goto out_put_task;
432 520
@@ -536,8 +624,9 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
536{ 624{
537 struct task_struct *owner = rt_mutex_owner(lock); 625 struct task_struct *owner = rt_mutex_owner(lock);
538 struct rt_mutex_waiter *top_waiter = waiter; 626 struct rt_mutex_waiter *top_waiter = waiter;
539 unsigned long flags; 627 struct rt_mutex *next_lock;
540 int chain_walk = 0, res; 628 int chain_walk = 0, res;
629 unsigned long flags;
541 630
542 /* 631 /*
543 * Early deadlock detection. We really don't want the task to 632 * Early deadlock detection. We really don't want the task to
@@ -548,7 +637,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
548 * which is wrong, as the other waiter is not in a deadlock 637 * which is wrong, as the other waiter is not in a deadlock
549 * situation. 638 * situation.
550 */ 639 */
551 if (detect_deadlock && owner == task) 640 if (owner == task)
552 return -EDEADLK; 641 return -EDEADLK;
553 642
554 raw_spin_lock_irqsave(&task->pi_lock, flags); 643 raw_spin_lock_irqsave(&task->pi_lock, flags);
@@ -569,20 +658,28 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
569 if (!owner) 658 if (!owner)
570 return 0; 659 return 0;
571 660
661 raw_spin_lock_irqsave(&owner->pi_lock, flags);
572 if (waiter == rt_mutex_top_waiter(lock)) { 662 if (waiter == rt_mutex_top_waiter(lock)) {
573 raw_spin_lock_irqsave(&owner->pi_lock, flags);
574 rt_mutex_dequeue_pi(owner, top_waiter); 663 rt_mutex_dequeue_pi(owner, top_waiter);
575 rt_mutex_enqueue_pi(owner, waiter); 664 rt_mutex_enqueue_pi(owner, waiter);
576 665
577 __rt_mutex_adjust_prio(owner); 666 __rt_mutex_adjust_prio(owner);
578 if (owner->pi_blocked_on) 667 if (owner->pi_blocked_on)
579 chain_walk = 1; 668 chain_walk = 1;
580 raw_spin_unlock_irqrestore(&owner->pi_lock, flags); 669 } else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) {
581 }
582 else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock))
583 chain_walk = 1; 670 chain_walk = 1;
671 }
584 672
585 if (!chain_walk) 673 /* Store the lock on which owner is blocked or NULL */
674 next_lock = task_blocked_on_lock(owner);
675
676 raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
677 /*
678 * Even if full deadlock detection is on, if the owner is not
679 * blocked itself, we can avoid finding this out in the chain
680 * walk.
681 */
682 if (!chain_walk || !next_lock)
586 return 0; 683 return 0;
587 684
588 /* 685 /*
@@ -594,8 +691,8 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
594 691
595 raw_spin_unlock(&lock->wait_lock); 692 raw_spin_unlock(&lock->wait_lock);
596 693
597 res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter, 694 res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock,
598 task); 695 next_lock, waiter, task);
599 696
600 raw_spin_lock(&lock->wait_lock); 697 raw_spin_lock(&lock->wait_lock);
601 698
@@ -605,7 +702,8 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
605/* 702/*
606 * Wake up the next waiter on the lock. 703 * Wake up the next waiter on the lock.
607 * 704 *
608 * Remove the top waiter from the current tasks waiter list and wake it up. 705 * Remove the top waiter from the current tasks pi waiter list and
706 * wake it up.
609 * 707 *
610 * Called with lock->wait_lock held. 708 * Called with lock->wait_lock held.
611 */ 709 */
@@ -626,10 +724,23 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
626 */ 724 */
627 rt_mutex_dequeue_pi(current, waiter); 725 rt_mutex_dequeue_pi(current, waiter);
628 726
629 rt_mutex_set_owner(lock, NULL); 727 /*
728 * As we are waking up the top waiter, and the waiter stays
729 * queued on the lock until it gets the lock, this lock
730 * obviously has waiters. Just set the bit here and this has
731 * the added benefit of forcing all new tasks into the
732 * slow path making sure no task of lower priority than
733 * the top waiter can steal this lock.
734 */
735 lock->owner = (void *) RT_MUTEX_HAS_WAITERS;
630 736
631 raw_spin_unlock_irqrestore(&current->pi_lock, flags); 737 raw_spin_unlock_irqrestore(&current->pi_lock, flags);
632 738
739 /*
740 * It's safe to dereference waiter as it cannot go away as
741 * long as we hold lock->wait_lock. The waiter task needs to
742 * acquire it in order to dequeue the waiter.
743 */
633 wake_up_process(waiter->task); 744 wake_up_process(waiter->task);
634} 745}
635 746
@@ -644,8 +755,8 @@ static void remove_waiter(struct rt_mutex *lock,
644{ 755{
645 int first = (waiter == rt_mutex_top_waiter(lock)); 756 int first = (waiter == rt_mutex_top_waiter(lock));
646 struct task_struct *owner = rt_mutex_owner(lock); 757 struct task_struct *owner = rt_mutex_owner(lock);
758 struct rt_mutex *next_lock = NULL;
647 unsigned long flags; 759 unsigned long flags;
648 int chain_walk = 0;
649 760
650 raw_spin_lock_irqsave(&current->pi_lock, flags); 761 raw_spin_lock_irqsave(&current->pi_lock, flags);
651 rt_mutex_dequeue(lock, waiter); 762 rt_mutex_dequeue(lock, waiter);
@@ -669,13 +780,13 @@ static void remove_waiter(struct rt_mutex *lock,
669 } 780 }
670 __rt_mutex_adjust_prio(owner); 781 __rt_mutex_adjust_prio(owner);
671 782
672 if (owner->pi_blocked_on) 783 /* Store the lock on which owner is blocked or NULL */
673 chain_walk = 1; 784 next_lock = task_blocked_on_lock(owner);
674 785
675 raw_spin_unlock_irqrestore(&owner->pi_lock, flags); 786 raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
676 } 787 }
677 788
678 if (!chain_walk) 789 if (!next_lock)
679 return; 790 return;
680 791
681 /* gets dropped in rt_mutex_adjust_prio_chain()! */ 792 /* gets dropped in rt_mutex_adjust_prio_chain()! */
@@ -683,7 +794,7 @@ static void remove_waiter(struct rt_mutex *lock,
683 794
684 raw_spin_unlock(&lock->wait_lock); 795 raw_spin_unlock(&lock->wait_lock);
685 796
686 rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current); 797 rt_mutex_adjust_prio_chain(owner, 0, lock, next_lock, NULL, current);
687 798
688 raw_spin_lock(&lock->wait_lock); 799 raw_spin_lock(&lock->wait_lock);
689} 800}
@@ -696,6 +807,7 @@ static void remove_waiter(struct rt_mutex *lock,
696void rt_mutex_adjust_pi(struct task_struct *task) 807void rt_mutex_adjust_pi(struct task_struct *task)
697{ 808{
698 struct rt_mutex_waiter *waiter; 809 struct rt_mutex_waiter *waiter;
810 struct rt_mutex *next_lock;
699 unsigned long flags; 811 unsigned long flags;
700 812
701 raw_spin_lock_irqsave(&task->pi_lock, flags); 813 raw_spin_lock_irqsave(&task->pi_lock, flags);
@@ -706,12 +818,13 @@ void rt_mutex_adjust_pi(struct task_struct *task)
706 raw_spin_unlock_irqrestore(&task->pi_lock, flags); 818 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
707 return; 819 return;
708 } 820 }
709 821 next_lock = waiter->lock;
710 raw_spin_unlock_irqrestore(&task->pi_lock, flags); 822 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
711 823
712 /* gets dropped in rt_mutex_adjust_prio_chain()! */ 824 /* gets dropped in rt_mutex_adjust_prio_chain()! */
713 get_task_struct(task); 825 get_task_struct(task);
714 rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task); 826
827 rt_mutex_adjust_prio_chain(task, 0, NULL, next_lock, NULL, task);
715} 828}
716 829
717/** 830/**
@@ -763,6 +876,26 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
763 return ret; 876 return ret;
764} 877}
765 878
879static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
880 struct rt_mutex_waiter *w)
881{
882 /*
883 * If the result is not -EDEADLOCK or the caller requested
884 * deadlock detection, nothing to do here.
885 */
886 if (res != -EDEADLOCK || detect_deadlock)
887 return;
888
889 /*
890 * Yell lowdly and stop the task right here.
891 */
892 rt_mutex_print_deadlock(w);
893 while (1) {
894 set_current_state(TASK_INTERRUPTIBLE);
895 schedule();
896 }
897}
898
766/* 899/*
767 * Slow path lock function: 900 * Slow path lock function:
768 */ 901 */
@@ -802,8 +935,10 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
802 935
803 set_current_state(TASK_RUNNING); 936 set_current_state(TASK_RUNNING);
804 937
805 if (unlikely(ret)) 938 if (unlikely(ret)) {
806 remove_waiter(lock, &waiter); 939 remove_waiter(lock, &waiter);
940 rt_mutex_handle_deadlock(ret, detect_deadlock, &waiter);
941 }
807 942
808 /* 943 /*
809 * try_to_take_rt_mutex() sets the waiter bit 944 * try_to_take_rt_mutex() sets the waiter bit
@@ -859,12 +994,49 @@ rt_mutex_slowunlock(struct rt_mutex *lock)
859 994
860 rt_mutex_deadlock_account_unlock(current); 995 rt_mutex_deadlock_account_unlock(current);
861 996
862 if (!rt_mutex_has_waiters(lock)) { 997 /*
863 lock->owner = NULL; 998 * We must be careful here if the fast path is enabled. If we
864 raw_spin_unlock(&lock->wait_lock); 999 * have no waiters queued we cannot set owner to NULL here
865 return; 1000 * because of:
1001 *
1002 * foo->lock->owner = NULL;
1003 * rtmutex_lock(foo->lock); <- fast path
1004 * free = atomic_dec_and_test(foo->refcnt);
1005 * rtmutex_unlock(foo->lock); <- fast path
1006 * if (free)
1007 * kfree(foo);
1008 * raw_spin_unlock(foo->lock->wait_lock);
1009 *
1010 * So for the fastpath enabled kernel:
1011 *
1012 * Nothing can set the waiters bit as long as we hold
1013 * lock->wait_lock. So we do the following sequence:
1014 *
1015 * owner = rt_mutex_owner(lock);
1016 * clear_rt_mutex_waiters(lock);
1017 * raw_spin_unlock(&lock->wait_lock);
1018 * if (cmpxchg(&lock->owner, owner, 0) == owner)
1019 * return;
1020 * goto retry;
1021 *
1022 * The fastpath disabled variant is simple as all access to
1023 * lock->owner is serialized by lock->wait_lock:
1024 *
1025 * lock->owner = NULL;
1026 * raw_spin_unlock(&lock->wait_lock);
1027 */
1028 while (!rt_mutex_has_waiters(lock)) {
1029 /* Drops lock->wait_lock ! */
1030 if (unlock_rt_mutex_safe(lock) == true)
1031 return;
1032 /* Relock the rtmutex and try again */
1033 raw_spin_lock(&lock->wait_lock);
866 } 1034 }
867 1035
1036 /*
1037 * The wakeup next waiter path does not suffer from the above
1038 * race. See the comments there.
1039 */
868 wakeup_next_waiter(lock); 1040 wakeup_next_waiter(lock);
869 1041
870 raw_spin_unlock(&lock->wait_lock); 1042 raw_spin_unlock(&lock->wait_lock);
@@ -1112,7 +1284,8 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
1112 return 1; 1284 return 1;
1113 } 1285 }
1114 1286
1115 ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock); 1287 /* We enforce deadlock detection for futexes */
1288 ret = task_blocks_on_rt_mutex(lock, waiter, task, 1);
1116 1289
1117 if (ret && !rt_mutex_owner(lock)) { 1290 if (ret && !rt_mutex_owner(lock)) {
1118 /* 1291 /*
diff --git a/kernel/locking/rtmutex.h b/kernel/locking/rtmutex.h
index a1a1dd06421d..f6a1f3c133b1 100644
--- a/kernel/locking/rtmutex.h
+++ b/kernel/locking/rtmutex.h
@@ -24,3 +24,8 @@
24#define debug_rt_mutex_print_deadlock(w) do { } while (0) 24#define debug_rt_mutex_print_deadlock(w) do { } while (0)
25#define debug_rt_mutex_detect_deadlock(w,d) (d) 25#define debug_rt_mutex_detect_deadlock(w,d) (d)
26#define debug_rt_mutex_reset_waiter(w) do { } while (0) 26#define debug_rt_mutex_reset_waiter(w) do { } while (0)
27
28static inline void rt_mutex_print_deadlock(struct rt_mutex_waiter *w)
29{
30 WARN(1, "rtmutex deadlock detected\n");
31}
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index 49e0a20fd010..fcc2611d3f14 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -35,6 +35,7 @@
35 35
36static int nocompress; 36static int nocompress;
37static int noresume; 37static int noresume;
38static int nohibernate;
38static int resume_wait; 39static int resume_wait;
39static unsigned int resume_delay; 40static unsigned int resume_delay;
40static char resume_file[256] = CONFIG_PM_STD_PARTITION; 41static char resume_file[256] = CONFIG_PM_STD_PARTITION;
@@ -62,6 +63,11 @@ bool freezer_test_done;
62 63
63static const struct platform_hibernation_ops *hibernation_ops; 64static const struct platform_hibernation_ops *hibernation_ops;
64 65
66bool hibernation_available(void)
67{
68 return (nohibernate == 0);
69}
70
65/** 71/**
66 * hibernation_set_ops - Set the global hibernate operations. 72 * hibernation_set_ops - Set the global hibernate operations.
67 * @ops: Hibernation operations to use in subsequent hibernation transitions. 73 * @ops: Hibernation operations to use in subsequent hibernation transitions.
@@ -642,6 +648,11 @@ int hibernate(void)
642{ 648{
643 int error; 649 int error;
644 650
651 if (!hibernation_available()) {
652 pr_debug("PM: Hibernation not available.\n");
653 return -EPERM;
654 }
655
645 lock_system_sleep(); 656 lock_system_sleep();
646 /* The snapshot device should not be opened while we're running */ 657 /* The snapshot device should not be opened while we're running */
647 if (!atomic_add_unless(&snapshot_device_available, -1, 0)) { 658 if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
@@ -734,7 +745,7 @@ static int software_resume(void)
734 /* 745 /*
735 * If the user said "noresume".. bail out early. 746 * If the user said "noresume".. bail out early.
736 */ 747 */
737 if (noresume) 748 if (noresume || !hibernation_available())
738 return 0; 749 return 0;
739 750
740 /* 751 /*
@@ -900,6 +911,9 @@ static ssize_t disk_show(struct kobject *kobj, struct kobj_attribute *attr,
900 int i; 911 int i;
901 char *start = buf; 912 char *start = buf;
902 913
914 if (!hibernation_available())
915 return sprintf(buf, "[disabled]\n");
916
903 for (i = HIBERNATION_FIRST; i <= HIBERNATION_MAX; i++) { 917 for (i = HIBERNATION_FIRST; i <= HIBERNATION_MAX; i++) {
904 if (!hibernation_modes[i]) 918 if (!hibernation_modes[i])
905 continue; 919 continue;
@@ -934,6 +948,9 @@ static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr,
934 char *p; 948 char *p;
935 int mode = HIBERNATION_INVALID; 949 int mode = HIBERNATION_INVALID;
936 950
951 if (!hibernation_available())
952 return -EPERM;
953
937 p = memchr(buf, '\n', n); 954 p = memchr(buf, '\n', n);
938 len = p ? p - buf : n; 955 len = p ? p - buf : n;
939 956
@@ -1101,6 +1118,10 @@ static int __init hibernate_setup(char *str)
1101 noresume = 1; 1118 noresume = 1;
1102 else if (!strncmp(str, "nocompress", 10)) 1119 else if (!strncmp(str, "nocompress", 10))
1103 nocompress = 1; 1120 nocompress = 1;
1121 else if (!strncmp(str, "no", 2)) {
1122 noresume = 1;
1123 nohibernate = 1;
1124 }
1104 return 1; 1125 return 1;
1105} 1126}
1106 1127
@@ -1125,9 +1146,23 @@ static int __init resumedelay_setup(char *str)
1125 return 1; 1146 return 1;
1126} 1147}
1127 1148
1149static int __init nohibernate_setup(char *str)
1150{
1151 noresume = 1;
1152 nohibernate = 1;
1153 return 1;
1154}
1155
1156static int __init kaslr_nohibernate_setup(char *str)
1157{
1158 return nohibernate_setup(str);
1159}
1160
1128__setup("noresume", noresume_setup); 1161__setup("noresume", noresume_setup);
1129__setup("resume_offset=", resume_offset_setup); 1162__setup("resume_offset=", resume_offset_setup);
1130__setup("resume=", resume_setup); 1163__setup("resume=", resume_setup);
1131__setup("hibernate=", hibernate_setup); 1164__setup("hibernate=", hibernate_setup);
1132__setup("resumewait", resumewait_setup); 1165__setup("resumewait", resumewait_setup);
1133__setup("resumedelay=", resumedelay_setup); 1166__setup("resumedelay=", resumedelay_setup);
1167__setup("nohibernate", nohibernate_setup);
1168__setup("kaslr", kaslr_nohibernate_setup);
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 573410d6647e..8e90f330f139 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -300,13 +300,11 @@ static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
300 s += sprintf(s,"%s ", pm_states[i].label); 300 s += sprintf(s,"%s ", pm_states[i].label);
301 301
302#endif 302#endif
303#ifdef CONFIG_HIBERNATION 303 if (hibernation_available())
304 s += sprintf(s, "%s\n", "disk"); 304 s += sprintf(s, "disk ");
305#else
306 if (s != buf) 305 if (s != buf)
307 /* convert the last space to a newline */ 306 /* convert the last space to a newline */
308 *(s-1) = '\n'; 307 *(s-1) = '\n';
309#endif
310 return (s - buf); 308 return (s - buf);
311} 309}
312 310
diff --git a/kernel/power/user.c b/kernel/power/user.c
index 98d357584cd6..526e8911460a 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -49,6 +49,9 @@ static int snapshot_open(struct inode *inode, struct file *filp)
49 struct snapshot_data *data; 49 struct snapshot_data *data;
50 int error; 50 int error;
51 51
52 if (!hibernation_available())
53 return -EPERM;
54
52 lock_system_sleep(); 55 lock_system_sleep();
53 56
54 if (!atomic_add_unless(&snapshot_device_available, -1, 0)) { 57 if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index ba9ed453c4ed..7de6555cfea0 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -152,10 +152,6 @@ static unsigned long hung_task_timeout_max = (LONG_MAX/HZ);
152#ifdef CONFIG_SPARC 152#ifdef CONFIG_SPARC
153#endif 153#endif
154 154
155#ifdef CONFIG_SPARC64
156extern int sysctl_tsb_ratio;
157#endif
158
159#ifdef __hppa__ 155#ifdef __hppa__
160extern int pwrsw_enabled; 156extern int pwrsw_enabled;
161#endif 157#endif
diff --git a/mm/page_io.c b/mm/page_io.c
index 243a9b76e5ce..955db8b0d497 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -274,8 +274,8 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
274 .count = PAGE_SIZE, 274 .count = PAGE_SIZE,
275 .iov_offset = 0, 275 .iov_offset = 0,
276 .nr_segs = 1, 276 .nr_segs = 1,
277 .bvec = &bv
278 }; 277 };
278 from.bvec = &bv; /* older gcc versions are broken */
279 279
280 init_sync_kiocb(&kiocb, swap_file); 280 init_sync_kiocb(&kiocb, swap_file);
281 kiocb.ki_pos = page_file_offset(page); 281 kiocb.ki_pos = page_file_offset(page);
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index 67d7721d237e..1675021d8c12 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -72,6 +72,8 @@ const char *ceph_msg_type_name(int type)
72 case CEPH_MSG_MON_SUBSCRIBE_ACK: return "mon_subscribe_ack"; 72 case CEPH_MSG_MON_SUBSCRIBE_ACK: return "mon_subscribe_ack";
73 case CEPH_MSG_STATFS: return "statfs"; 73 case CEPH_MSG_STATFS: return "statfs";
74 case CEPH_MSG_STATFS_REPLY: return "statfs_reply"; 74 case CEPH_MSG_STATFS_REPLY: return "statfs_reply";
75 case CEPH_MSG_MON_GET_VERSION: return "mon_get_version";
76 case CEPH_MSG_MON_GET_VERSION_REPLY: return "mon_get_version_reply";
75 case CEPH_MSG_MDS_MAP: return "mds_map"; 77 case CEPH_MSG_MDS_MAP: return "mds_map";
76 case CEPH_MSG_CLIENT_SESSION: return "client_session"; 78 case CEPH_MSG_CLIENT_SESSION: return "client_session";
77 case CEPH_MSG_CLIENT_RECONNECT: return "client_reconnect"; 79 case CEPH_MSG_CLIENT_RECONNECT: return "client_reconnect";
diff --git a/net/ceph/debugfs.c b/net/ceph/debugfs.c
index 10421a4b76f8..d1a62c69a9f4 100644
--- a/net/ceph/debugfs.c
+++ b/net/ceph/debugfs.c
@@ -126,9 +126,13 @@ static int monc_show(struct seq_file *s, void *p)
126 req = rb_entry(rp, struct ceph_mon_generic_request, node); 126 req = rb_entry(rp, struct ceph_mon_generic_request, node);
127 op = le16_to_cpu(req->request->hdr.type); 127 op = le16_to_cpu(req->request->hdr.type);
128 if (op == CEPH_MSG_STATFS) 128 if (op == CEPH_MSG_STATFS)
129 seq_printf(s, "%lld statfs\n", req->tid); 129 seq_printf(s, "%llu statfs\n", req->tid);
130 else if (op == CEPH_MSG_POOLOP)
131 seq_printf(s, "%llu poolop\n", req->tid);
132 else if (op == CEPH_MSG_MON_GET_VERSION)
133 seq_printf(s, "%llu mon_get_version", req->tid);
130 else 134 else
131 seq_printf(s, "%lld unknown\n", req->tid); 135 seq_printf(s, "%llu unknown\n", req->tid);
132 } 136 }
133 137
134 mutex_unlock(&monc->mutex); 138 mutex_unlock(&monc->mutex);
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index 2ac9ef35110b..067d3af2eaf6 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -296,6 +296,33 @@ void ceph_monc_request_next_osdmap(struct ceph_mon_client *monc)
296 __send_subscribe(monc); 296 __send_subscribe(monc);
297 mutex_unlock(&monc->mutex); 297 mutex_unlock(&monc->mutex);
298} 298}
299EXPORT_SYMBOL(ceph_monc_request_next_osdmap);
300
301int ceph_monc_wait_osdmap(struct ceph_mon_client *monc, u32 epoch,
302 unsigned long timeout)
303{
304 unsigned long started = jiffies;
305 int ret;
306
307 mutex_lock(&monc->mutex);
308 while (monc->have_osdmap < epoch) {
309 mutex_unlock(&monc->mutex);
310
311 if (timeout != 0 && time_after_eq(jiffies, started + timeout))
312 return -ETIMEDOUT;
313
314 ret = wait_event_interruptible_timeout(monc->client->auth_wq,
315 monc->have_osdmap >= epoch, timeout);
316 if (ret < 0)
317 return ret;
318
319 mutex_lock(&monc->mutex);
320 }
321
322 mutex_unlock(&monc->mutex);
323 return 0;
324}
325EXPORT_SYMBOL(ceph_monc_wait_osdmap);
299 326
300/* 327/*
301 * 328 *
@@ -477,14 +504,13 @@ static struct ceph_msg *get_generic_reply(struct ceph_connection *con,
477 return m; 504 return m;
478} 505}
479 506
480static int do_generic_request(struct ceph_mon_client *monc, 507static int __do_generic_request(struct ceph_mon_client *monc, u64 tid,
481 struct ceph_mon_generic_request *req) 508 struct ceph_mon_generic_request *req)
482{ 509{
483 int err; 510 int err;
484 511
485 /* register request */ 512 /* register request */
486 mutex_lock(&monc->mutex); 513 req->tid = tid != 0 ? tid : ++monc->last_tid;
487 req->tid = ++monc->last_tid;
488 req->request->hdr.tid = cpu_to_le64(req->tid); 514 req->request->hdr.tid = cpu_to_le64(req->tid);
489 __insert_generic_request(monc, req); 515 __insert_generic_request(monc, req);
490 monc->num_generic_requests++; 516 monc->num_generic_requests++;
@@ -496,13 +522,24 @@ static int do_generic_request(struct ceph_mon_client *monc,
496 mutex_lock(&monc->mutex); 522 mutex_lock(&monc->mutex);
497 rb_erase(&req->node, &monc->generic_request_tree); 523 rb_erase(&req->node, &monc->generic_request_tree);
498 monc->num_generic_requests--; 524 monc->num_generic_requests--;
499 mutex_unlock(&monc->mutex);
500 525
501 if (!err) 526 if (!err)
502 err = req->result; 527 err = req->result;
503 return err; 528 return err;
504} 529}
505 530
531static int do_generic_request(struct ceph_mon_client *monc,
532 struct ceph_mon_generic_request *req)
533{
534 int err;
535
536 mutex_lock(&monc->mutex);
537 err = __do_generic_request(monc, 0, req);
538 mutex_unlock(&monc->mutex);
539
540 return err;
541}
542
506/* 543/*
507 * statfs 544 * statfs
508 */ 545 */
@@ -579,6 +616,96 @@ out:
579} 616}
580EXPORT_SYMBOL(ceph_monc_do_statfs); 617EXPORT_SYMBOL(ceph_monc_do_statfs);
581 618
619static void handle_get_version_reply(struct ceph_mon_client *monc,
620 struct ceph_msg *msg)
621{
622 struct ceph_mon_generic_request *req;
623 u64 tid = le64_to_cpu(msg->hdr.tid);
624 void *p = msg->front.iov_base;
625 void *end = p + msg->front_alloc_len;
626 u64 handle;
627
628 dout("%s %p tid %llu\n", __func__, msg, tid);
629
630 ceph_decode_need(&p, end, 2*sizeof(u64), bad);
631 handle = ceph_decode_64(&p);
632 if (tid != 0 && tid != handle)
633 goto bad;
634
635 mutex_lock(&monc->mutex);
636 req = __lookup_generic_req(monc, handle);
637 if (req) {
638 *(u64 *)req->buf = ceph_decode_64(&p);
639 req->result = 0;
640 get_generic_request(req);
641 }
642 mutex_unlock(&monc->mutex);
643 if (req) {
644 complete_all(&req->completion);
645 put_generic_request(req);
646 }
647
648 return;
649bad:
650 pr_err("corrupt mon_get_version reply\n");
651 ceph_msg_dump(msg);
652}
653
654/*
655 * Send MMonGetVersion and wait for the reply.
656 *
657 * @what: one of "mdsmap", "osdmap" or "monmap"
658 */
659int ceph_monc_do_get_version(struct ceph_mon_client *monc, const char *what,
660 u64 *newest)
661{
662 struct ceph_mon_generic_request *req;
663 void *p, *end;
664 u64 tid;
665 int err;
666
667 req = kzalloc(sizeof(*req), GFP_NOFS);
668 if (!req)
669 return -ENOMEM;
670
671 kref_init(&req->kref);
672 req->buf = newest;
673 req->buf_len = sizeof(*newest);
674 init_completion(&req->completion);
675
676 req->request = ceph_msg_new(CEPH_MSG_MON_GET_VERSION,
677 sizeof(u64) + sizeof(u32) + strlen(what),
678 GFP_NOFS, true);
679 if (!req->request) {
680 err = -ENOMEM;
681 goto out;
682 }
683
684 req->reply = ceph_msg_new(CEPH_MSG_MON_GET_VERSION_REPLY, 1024,
685 GFP_NOFS, true);
686 if (!req->reply) {
687 err = -ENOMEM;
688 goto out;
689 }
690
691 p = req->request->front.iov_base;
692 end = p + req->request->front_alloc_len;
693
694 /* fill out request */
695 mutex_lock(&monc->mutex);
696 tid = ++monc->last_tid;
697 ceph_encode_64(&p, tid); /* handle */
698 ceph_encode_string(&p, end, what, strlen(what));
699
700 err = __do_generic_request(monc, tid, req);
701
702 mutex_unlock(&monc->mutex);
703out:
704 kref_put(&req->kref, release_generic_request);
705 return err;
706}
707EXPORT_SYMBOL(ceph_monc_do_get_version);
708
582/* 709/*
583 * pool ops 710 * pool ops
584 */ 711 */
@@ -981,6 +1108,10 @@ static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
981 handle_statfs_reply(monc, msg); 1108 handle_statfs_reply(monc, msg);
982 break; 1109 break;
983 1110
1111 case CEPH_MSG_MON_GET_VERSION_REPLY:
1112 handle_get_version_reply(monc, msg);
1113 break;
1114
984 case CEPH_MSG_POOLOP_REPLY: 1115 case CEPH_MSG_POOLOP_REPLY:
985 handle_poolop_reply(monc, msg); 1116 handle_poolop_reply(monc, msg);
986 break; 1117 break;
@@ -1029,6 +1160,15 @@ static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con,
1029 case CEPH_MSG_AUTH_REPLY: 1160 case CEPH_MSG_AUTH_REPLY:
1030 m = ceph_msg_get(monc->m_auth_reply); 1161 m = ceph_msg_get(monc->m_auth_reply);
1031 break; 1162 break;
1163 case CEPH_MSG_MON_GET_VERSION_REPLY:
1164 if (le64_to_cpu(hdr->tid) != 0)
1165 return get_generic_reply(con, hdr, skip);
1166
1167 /*
1168 * Older OSDs don't set reply tid even if the orignal
1169 * request had a non-zero tid. Workaround this weirdness
1170 * by falling through to the allocate case.
1171 */
1032 case CEPH_MSG_MON_MAP: 1172 case CEPH_MSG_MON_MAP:
1033 case CEPH_MSG_MDS_MAP: 1173 case CEPH_MSG_MDS_MAP:
1034 case CEPH_MSG_OSD_MAP: 1174 case CEPH_MSG_OSD_MAP:
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 6b1c04ca1d50..488dd1a825c0 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -739,22 +739,38 @@ __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len)
739 __sum16 sum; 739 __sum16 sum;
740 740
741 sum = csum_fold(skb_checksum(skb, 0, len, skb->csum)); 741 sum = csum_fold(skb_checksum(skb, 0, len, skb->csum));
742 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && !sum && 742 if (likely(!sum)) {
743 !skb->csum_complete_sw) 743 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
744 netdev_rx_csum_fault(skb->dev); 744 !skb->csum_complete_sw)
745 745 netdev_rx_csum_fault(skb->dev);
746 /* Save checksum complete for later use */ 746 }
747 skb->csum = sum; 747 skb->csum_valid = !sum;
748 skb->ip_summed = CHECKSUM_COMPLETE;
749 skb->csum_complete_sw = 1;
750
751 return sum; 748 return sum;
752} 749}
753EXPORT_SYMBOL(__skb_checksum_complete_head); 750EXPORT_SYMBOL(__skb_checksum_complete_head);
754 751
755__sum16 __skb_checksum_complete(struct sk_buff *skb) 752__sum16 __skb_checksum_complete(struct sk_buff *skb)
756{ 753{
757 return __skb_checksum_complete_head(skb, skb->len); 754 __wsum csum;
755 __sum16 sum;
756
757 csum = skb_checksum(skb, 0, skb->len, 0);
758
759 /* skb->csum holds pseudo checksum */
760 sum = csum_fold(csum_add(skb->csum, csum));
761 if (likely(!sum)) {
762 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
763 !skb->csum_complete_sw)
764 netdev_rx_csum_fault(skb->dev);
765 }
766
767 /* Save full packet checksum */
768 skb->csum = csum;
769 skb->ip_summed = CHECKSUM_COMPLETE;
770 skb->csum_complete_sw = 1;
771 skb->csum_valid = !sum;
772
773 return sum;
758} 774}
759EXPORT_SYMBOL(__skb_checksum_complete); 775EXPORT_SYMBOL(__skb_checksum_complete);
760 776
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index bf92824af3f7..9cd5344fad73 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -689,6 +689,9 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
689 new->ooo_okay = old->ooo_okay; 689 new->ooo_okay = old->ooo_okay;
690 new->no_fcs = old->no_fcs; 690 new->no_fcs = old->no_fcs;
691 new->encapsulation = old->encapsulation; 691 new->encapsulation = old->encapsulation;
692 new->encap_hdr_csum = old->encap_hdr_csum;
693 new->csum_valid = old->csum_valid;
694 new->csum_complete_sw = old->csum_complete_sw;
692#ifdef CONFIG_XFRM 695#ifdef CONFIG_XFRM
693 new->sp = secpath_get(old->sp); 696 new->sp = secpath_get(old->sp);
694#endif 697#endif
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 185ed3e59802..d92f94b7e402 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1861,6 +1861,10 @@ static struct sock *__udp4_lib_mcast_demux_lookup(struct net *net,
1861 unsigned int count, slot = udp_hashfn(net, hnum, udp_table.mask); 1861 unsigned int count, slot = udp_hashfn(net, hnum, udp_table.mask);
1862 struct udp_hslot *hslot = &udp_table.hash[slot]; 1862 struct udp_hslot *hslot = &udp_table.hash[slot];
1863 1863
1864 /* Do not bother scanning a too big list */
1865 if (hslot->count > 10)
1866 return NULL;
1867
1864 rcu_read_lock(); 1868 rcu_read_lock();
1865begin: 1869begin:
1866 count = 0; 1870 count = 0;
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index 7e5eb7554990..dcb19592761e 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -34,6 +34,8 @@
34 * Sridhar Samudrala <sri@us.ibm.com> 34 * Sridhar Samudrala <sri@us.ibm.com>
35 */ 35 */
36 36
37#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
38
37#include <net/sctp/structs.h> 39#include <net/sctp/structs.h>
38#include <net/sctp/sctp.h> 40#include <net/sctp/sctp.h>
39#include <linux/sysctl.h> 41#include <linux/sysctl.h>
@@ -46,6 +48,11 @@ static int sack_timer_min = 1;
46static int sack_timer_max = 500; 48static int sack_timer_max = 500;
47static int addr_scope_max = 3; /* check sctp_scope_policy_t in include/net/sctp/constants.h for max entries */ 49static int addr_scope_max = 3; /* check sctp_scope_policy_t in include/net/sctp/constants.h for max entries */
48static int rwnd_scale_max = 16; 50static int rwnd_scale_max = 16;
51static int rto_alpha_min = 0;
52static int rto_beta_min = 0;
53static int rto_alpha_max = 1000;
54static int rto_beta_max = 1000;
55
49static unsigned long max_autoclose_min = 0; 56static unsigned long max_autoclose_min = 0;
50static unsigned long max_autoclose_max = 57static unsigned long max_autoclose_max =
51 (MAX_SCHEDULE_TIMEOUT / HZ > UINT_MAX) 58 (MAX_SCHEDULE_TIMEOUT / HZ > UINT_MAX)
@@ -64,6 +71,9 @@ static int proc_sctp_do_rto_min(struct ctl_table *ctl, int write,
64static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write, 71static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write,
65 void __user *buffer, size_t *lenp, 72 void __user *buffer, size_t *lenp,
66 loff_t *ppos); 73 loff_t *ppos);
74static int proc_sctp_do_alpha_beta(struct ctl_table *ctl, int write,
75 void __user *buffer, size_t *lenp,
76 loff_t *ppos);
67static int proc_sctp_do_auth(struct ctl_table *ctl, int write, 77static int proc_sctp_do_auth(struct ctl_table *ctl, int write,
68 void __user *buffer, size_t *lenp, 78 void __user *buffer, size_t *lenp,
69 loff_t *ppos); 79 loff_t *ppos);
@@ -126,15 +136,19 @@ static struct ctl_table sctp_net_table[] = {
126 .procname = "rto_alpha_exp_divisor", 136 .procname = "rto_alpha_exp_divisor",
127 .data = &init_net.sctp.rto_alpha, 137 .data = &init_net.sctp.rto_alpha,
128 .maxlen = sizeof(int), 138 .maxlen = sizeof(int),
129 .mode = 0444, 139 .mode = 0644,
130 .proc_handler = proc_dointvec, 140 .proc_handler = proc_sctp_do_alpha_beta,
141 .extra1 = &rto_alpha_min,
142 .extra2 = &rto_alpha_max,
131 }, 143 },
132 { 144 {
133 .procname = "rto_beta_exp_divisor", 145 .procname = "rto_beta_exp_divisor",
134 .data = &init_net.sctp.rto_beta, 146 .data = &init_net.sctp.rto_beta,
135 .maxlen = sizeof(int), 147 .maxlen = sizeof(int),
136 .mode = 0444, 148 .mode = 0644,
137 .proc_handler = proc_dointvec, 149 .proc_handler = proc_sctp_do_alpha_beta,
150 .extra1 = &rto_beta_min,
151 .extra2 = &rto_beta_max,
138 }, 152 },
139 { 153 {
140 .procname = "max_burst", 154 .procname = "max_burst",
@@ -403,6 +417,16 @@ static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write,
403 return ret; 417 return ret;
404} 418}
405 419
420static int proc_sctp_do_alpha_beta(struct ctl_table *ctl, int write,
421 void __user *buffer, size_t *lenp,
422 loff_t *ppos)
423{
424 pr_warn_once("Changing rto_alpha or rto_beta may lead to "
425 "suboptimal rtt/srtt estimations!\n");
426
427 return proc_dointvec_minmax(ctl, write, buffer, lenp, ppos);
428}
429
406static int proc_sctp_do_auth(struct ctl_table *ctl, int write, 430static int proc_sctp_do_auth(struct ctl_table *ctl, int write,
407 void __user *buffer, size_t *lenp, 431 void __user *buffer, size_t *lenp,
408 loff_t *ppos) 432 loff_t *ppos)
diff --git a/scripts/package/builddeb b/scripts/package/builddeb
index b5f08f727868..35d5a5877d04 100644
--- a/scripts/package/builddeb
+++ b/scripts/package/builddeb
@@ -289,14 +289,16 @@ EOF
289 289
290fi 290fi
291 291
292# Build header package 292# Build kernel header package
293(cd $srctree; find . -name Makefile\* -o -name Kconfig\* -o -name \*.pl > "$objtree/debian/hdrsrcfiles") 293(cd $srctree; find . -name Makefile\* -o -name Kconfig\* -o -name \*.pl) > "$objtree/debian/hdrsrcfiles"
294(cd $srctree; find arch/$SRCARCH/include include scripts -type f >> "$objtree/debian/hdrsrcfiles") 294(cd $srctree; find arch/$SRCARCH/include include scripts -type f) >> "$objtree/debian/hdrsrcfiles"
295(cd $objtree; find arch/$SRCARCH/include Module.symvers include scripts -type f >> "$objtree/debian/hdrobjfiles") 295(cd $srctree; find arch/$SRCARCH -name module.lds -o -name Kbuild.platforms -o -name Platform) >> "$objtree/debian/hdrsrcfiles"
296(cd $srctree; find $(find arch/$SRCARCH -name include -o -name scripts -type d) -type f) >> "$objtree/debian/hdrsrcfiles"
297(cd $objtree; find arch/$SRCARCH/include Module.symvers include scripts -type f) >> "$objtree/debian/hdrobjfiles"
296destdir=$kernel_headers_dir/usr/src/linux-headers-$version 298destdir=$kernel_headers_dir/usr/src/linux-headers-$version
297mkdir -p "$destdir" 299mkdir -p "$destdir"
298(cd $srctree; tar -c -f - -T "$objtree/debian/hdrsrcfiles") | (cd $destdir; tar -xf -) 300(cd $srctree; tar -c -f - -T -) < "$objtree/debian/hdrsrcfiles" | (cd $destdir; tar -xf -)
299(cd $objtree; tar -c -f - -T "$objtree/debian/hdrobjfiles") | (cd $destdir; tar -xf -) 301(cd $objtree; tar -c -f - -T -) < "$objtree/debian/hdrobjfiles" | (cd $destdir; tar -xf -)
300(cd $objtree; cp $KCONFIG_CONFIG $destdir/.config) # copy .config manually to be where it's expected to be 302(cd $objtree; cp $KCONFIG_CONFIG $destdir/.config) # copy .config manually to be where it's expected to be
301ln -sf "/usr/src/linux-headers-$version" "$kernel_headers_dir/lib/modules/$version/build" 303ln -sf "/usr/src/linux-headers-$version" "$kernel_headers_dir/lib/modules/$version/build"
302rm -f "$objtree/debian/hdrsrcfiles" "$objtree/debian/hdrobjfiles" 304rm -f "$objtree/debian/hdrsrcfiles" "$objtree/debian/hdrobjfiles"
diff --git a/scripts/package/buildtar b/scripts/package/buildtar
index 995c1eafaff6..e046bff33589 100644
--- a/scripts/package/buildtar
+++ b/scripts/package/buildtar
@@ -125,12 +125,11 @@ esac
125# Create the tarball 125# Create the tarball
126# 126#
127( 127(
128 cd "${tmpdir}"
129 opts= 128 opts=
130 if tar --owner=root --group=root --help >/dev/null 2>&1; then 129 if tar --owner=root --group=root --help >/dev/null 2>&1; then
131 opts="--owner=root --group=root" 130 opts="--owner=root --group=root"
132 fi 131 fi
133 tar cf - boot/* lib/* $opts | ${compress} > "${tarball}${file_ext}" 132 tar cf - -C "$tmpdir" boot/ lib/ $opts | ${compress} > "${tarball}${file_ext}"
134) 133)
135 134
136echo "Tarball successfully created in ${tarball}${file_ext}" 135echo "Tarball successfully created in ${tarball}${file_ext}"
diff --git a/security/integrity/evm/Kconfig b/security/integrity/evm/Kconfig
index d35b4915b00d..d606f3d12d6b 100644
--- a/security/integrity/evm/Kconfig
+++ b/security/integrity/evm/Kconfig
@@ -12,15 +12,41 @@ config EVM
12 12
13 If you are unsure how to answer this question, answer N. 13 If you are unsure how to answer this question, answer N.
14 14
15config EVM_HMAC_VERSION 15if EVM
16 int "EVM HMAC version" 16
17menu "EVM options"
18
19config EVM_ATTR_FSUUID
20 bool "FSUUID (version 2)"
21 default y
17 depends on EVM 22 depends on EVM
18 default 2
19 help 23 help
20 This options adds EVM HMAC version support. 24 Include filesystem UUID for HMAC calculation.
21 1 - original version 25
22 2 - add per filesystem unique identifier (UUID) (default) 26 Default value is 'selected', which is former version 2.
27 if 'not selected', it is former version 1
23 28
24 WARNING: changing the HMAC calculation method or adding 29 WARNING: changing the HMAC calculation method or adding
25 additional info to the calculation, requires existing EVM 30 additional info to the calculation, requires existing EVM
26 labeled file systems to be relabeled. 31 labeled file systems to be relabeled.
32
33config EVM_EXTRA_SMACK_XATTRS
34 bool "Additional SMACK xattrs"
35 depends on EVM && SECURITY_SMACK
36 default n
37 help
38 Include additional SMACK xattrs for HMAC calculation.
39
40 In addition to the original security xattrs (eg. security.selinux,
41 security.SMACK64, security.capability, and security.ima) included
42 in the HMAC calculation, enabling this option includes newly defined
43 Smack xattrs: security.SMACK64EXEC, security.SMACK64TRANSMUTE and
44 security.SMACK64MMAP.
45
46 WARNING: changing the HMAC calculation method or adding
47 additional info to the calculation, requires existing EVM
48 labeled file systems to be relabeled.
49
50endmenu
51
52endif
diff --git a/security/integrity/evm/evm.h b/security/integrity/evm/evm.h
index 37c88ddb3cfe..88bfe77efa1c 100644
--- a/security/integrity/evm/evm.h
+++ b/security/integrity/evm/evm.h
@@ -24,7 +24,10 @@
24extern int evm_initialized; 24extern int evm_initialized;
25extern char *evm_hmac; 25extern char *evm_hmac;
26extern char *evm_hash; 26extern char *evm_hash;
27extern int evm_hmac_version; 27
28#define EVM_ATTR_FSUUID 0x0001
29
30extern int evm_hmac_attrs;
28 31
29extern struct crypto_shash *hmac_tfm; 32extern struct crypto_shash *hmac_tfm;
30extern struct crypto_shash *hash_tfm; 33extern struct crypto_shash *hash_tfm;
diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c
index 6b540f1822e0..5e9687f02e1b 100644
--- a/security/integrity/evm/evm_crypto.c
+++ b/security/integrity/evm/evm_crypto.c
@@ -112,7 +112,7 @@ static void hmac_add_misc(struct shash_desc *desc, struct inode *inode,
112 hmac_misc.gid = from_kgid(&init_user_ns, inode->i_gid); 112 hmac_misc.gid = from_kgid(&init_user_ns, inode->i_gid);
113 hmac_misc.mode = inode->i_mode; 113 hmac_misc.mode = inode->i_mode;
114 crypto_shash_update(desc, (const u8 *)&hmac_misc, sizeof(hmac_misc)); 114 crypto_shash_update(desc, (const u8 *)&hmac_misc, sizeof(hmac_misc));
115 if (evm_hmac_version > 1) 115 if (evm_hmac_attrs & EVM_ATTR_FSUUID)
116 crypto_shash_update(desc, inode->i_sb->s_uuid, 116 crypto_shash_update(desc, inode->i_sb->s_uuid,
117 sizeof(inode->i_sb->s_uuid)); 117 sizeof(inode->i_sb->s_uuid));
118 crypto_shash_final(desc, digest); 118 crypto_shash_final(desc, digest);
diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
index 6e0bd933b6a9..3bcb80df4d01 100644
--- a/security/integrity/evm/evm_main.c
+++ b/security/integrity/evm/evm_main.c
@@ -32,7 +32,7 @@ static char *integrity_status_msg[] = {
32}; 32};
33char *evm_hmac = "hmac(sha1)"; 33char *evm_hmac = "hmac(sha1)";
34char *evm_hash = "sha1"; 34char *evm_hash = "sha1";
35int evm_hmac_version = CONFIG_EVM_HMAC_VERSION; 35int evm_hmac_attrs;
36 36
37char *evm_config_xattrnames[] = { 37char *evm_config_xattrnames[] = {
38#ifdef CONFIG_SECURITY_SELINUX 38#ifdef CONFIG_SECURITY_SELINUX
@@ -40,6 +40,11 @@ char *evm_config_xattrnames[] = {
40#endif 40#endif
41#ifdef CONFIG_SECURITY_SMACK 41#ifdef CONFIG_SECURITY_SMACK
42 XATTR_NAME_SMACK, 42 XATTR_NAME_SMACK,
43#ifdef CONFIG_EVM_EXTRA_SMACK_XATTRS
44 XATTR_NAME_SMACKEXEC,
45 XATTR_NAME_SMACKTRANSMUTE,
46 XATTR_NAME_SMACKMMAP,
47#endif
43#endif 48#endif
44#ifdef CONFIG_IMA_APPRAISE 49#ifdef CONFIG_IMA_APPRAISE
45 XATTR_NAME_IMA, 50 XATTR_NAME_IMA,
@@ -57,6 +62,14 @@ static int __init evm_set_fixmode(char *str)
57} 62}
58__setup("evm=", evm_set_fixmode); 63__setup("evm=", evm_set_fixmode);
59 64
65static void __init evm_init_config(void)
66{
67#ifdef CONFIG_EVM_ATTR_FSUUID
68 evm_hmac_attrs |= EVM_ATTR_FSUUID;
69#endif
70 pr_info("HMAC attrs: 0x%x\n", evm_hmac_attrs);
71}
72
60static int evm_find_protected_xattrs(struct dentry *dentry) 73static int evm_find_protected_xattrs(struct dentry *dentry)
61{ 74{
62 struct inode *inode = dentry->d_inode; 75 struct inode *inode = dentry->d_inode;
@@ -287,12 +300,20 @@ out:
287 * @xattr_value: pointer to the new extended attribute value 300 * @xattr_value: pointer to the new extended attribute value
288 * @xattr_value_len: pointer to the new extended attribute value length 301 * @xattr_value_len: pointer to the new extended attribute value length
289 * 302 *
290 * Updating 'security.evm' requires CAP_SYS_ADMIN privileges and that 303 * Before allowing the 'security.evm' protected xattr to be updated,
291 * the current value is valid. 304 * verify the existing value is valid. As only the kernel should have
305 * access to the EVM encrypted key needed to calculate the HMAC, prevent
306 * userspace from writing HMAC value. Writing 'security.evm' requires
307 * requires CAP_SYS_ADMIN privileges.
292 */ 308 */
293int evm_inode_setxattr(struct dentry *dentry, const char *xattr_name, 309int evm_inode_setxattr(struct dentry *dentry, const char *xattr_name,
294 const void *xattr_value, size_t xattr_value_len) 310 const void *xattr_value, size_t xattr_value_len)
295{ 311{
312 const struct evm_ima_xattr_data *xattr_data = xattr_value;
313
314 if ((strcmp(xattr_name, XATTR_NAME_EVM) == 0)
315 && (xattr_data->type == EVM_XATTR_HMAC))
316 return -EPERM;
296 return evm_protect_xattr(dentry, xattr_name, xattr_value, 317 return evm_protect_xattr(dentry, xattr_name, xattr_value,
297 xattr_value_len); 318 xattr_value_len);
298} 319}
@@ -432,6 +453,8 @@ static int __init init_evm(void)
432{ 453{
433 int error; 454 int error;
434 455
456 evm_init_config();
457
435 error = evm_init_secfs(); 458 error = evm_init_secfs();
436 if (error < 0) { 459 if (error < 0) {
437 pr_info("Error registering secfs\n"); 460 pr_info("Error registering secfs\n");
diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
index 291bf0f3a46d..d3113d4aaa3c 100644
--- a/security/integrity/ima/ima_appraise.c
+++ b/security/integrity/ima/ima_appraise.c
@@ -341,7 +341,7 @@ static int ima_protect_xattr(struct dentry *dentry, const char *xattr_name,
341 return 0; 341 return 0;
342} 342}
343 343
344static void ima_reset_appraise_flags(struct inode *inode) 344static void ima_reset_appraise_flags(struct inode *inode, int digsig)
345{ 345{
346 struct integrity_iint_cache *iint; 346 struct integrity_iint_cache *iint;
347 347
@@ -353,18 +353,22 @@ static void ima_reset_appraise_flags(struct inode *inode)
353 return; 353 return;
354 354
355 iint->flags &= ~IMA_DONE_MASK; 355 iint->flags &= ~IMA_DONE_MASK;
356 if (digsig)
357 iint->flags |= IMA_DIGSIG;
356 return; 358 return;
357} 359}
358 360
359int ima_inode_setxattr(struct dentry *dentry, const char *xattr_name, 361int ima_inode_setxattr(struct dentry *dentry, const char *xattr_name,
360 const void *xattr_value, size_t xattr_value_len) 362 const void *xattr_value, size_t xattr_value_len)
361{ 363{
364 const struct evm_ima_xattr_data *xvalue = xattr_value;
362 int result; 365 int result;
363 366
364 result = ima_protect_xattr(dentry, xattr_name, xattr_value, 367 result = ima_protect_xattr(dentry, xattr_name, xattr_value,
365 xattr_value_len); 368 xattr_value_len);
366 if (result == 1) { 369 if (result == 1) {
367 ima_reset_appraise_flags(dentry->d_inode); 370 ima_reset_appraise_flags(dentry->d_inode,
371 (xvalue->type == EVM_IMA_XATTR_DIGSIG) ? 1 : 0);
368 result = 0; 372 result = 0;
369 } 373 }
370 return result; 374 return result;
@@ -376,7 +380,7 @@ int ima_inode_removexattr(struct dentry *dentry, const char *xattr_name)
376 380
377 result = ima_protect_xattr(dentry, xattr_name, NULL, 0); 381 result = ima_protect_xattr(dentry, xattr_name, NULL, 0);
378 if (result == 1) { 382 if (result == 1) {
379 ima_reset_appraise_flags(dentry->d_inode); 383 ima_reset_appraise_flags(dentry->d_inode, 0);
380 result = 0; 384 result = 0;
381 } 385 }
382 return result; 386 return result;
diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
index 1bde8e627766..ccd0ac8fa9a0 100644
--- a/security/integrity/ima/ima_crypto.c
+++ b/security/integrity/ima/ima_crypto.c
@@ -27,6 +27,36 @@
27 27
28static struct crypto_shash *ima_shash_tfm; 28static struct crypto_shash *ima_shash_tfm;
29 29
30/**
31 * ima_kernel_read - read file content
32 *
33 * This is a function for reading file content instead of kernel_read().
34 * It does not perform locking checks to ensure it cannot be blocked.
35 * It does not perform security checks because it is irrelevant for IMA.
36 *
37 */
38static int ima_kernel_read(struct file *file, loff_t offset,
39 char *addr, unsigned long count)
40{
41 mm_segment_t old_fs;
42 char __user *buf = addr;
43 ssize_t ret;
44
45 if (!(file->f_mode & FMODE_READ))
46 return -EBADF;
47 if (!file->f_op->read && !file->f_op->aio_read)
48 return -EINVAL;
49
50 old_fs = get_fs();
51 set_fs(get_ds());
52 if (file->f_op->read)
53 ret = file->f_op->read(file, buf, count, &offset);
54 else
55 ret = do_sync_read(file, buf, count, &offset);
56 set_fs(old_fs);
57 return ret;
58}
59
30int ima_init_crypto(void) 60int ima_init_crypto(void)
31{ 61{
32 long rc; 62 long rc;
@@ -104,7 +134,7 @@ static int ima_calc_file_hash_tfm(struct file *file,
104 while (offset < i_size) { 134 while (offset < i_size) {
105 int rbuf_len; 135 int rbuf_len;
106 136
107 rbuf_len = kernel_read(file, offset, rbuf, PAGE_SIZE); 137 rbuf_len = ima_kernel_read(file, offset, rbuf, PAGE_SIZE);
108 if (rbuf_len < 0) { 138 if (rbuf_len < 0) {
109 rc = rbuf_len; 139 rc = rbuf_len;
110 break; 140 break;
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index dcc98cf542d8..09baa335ebc7 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -81,7 +81,6 @@ static void ima_rdwr_violation_check(struct file *file)
81{ 81{
82 struct inode *inode = file_inode(file); 82 struct inode *inode = file_inode(file);
83 fmode_t mode = file->f_mode; 83 fmode_t mode = file->f_mode;
84 int must_measure;
85 bool send_tomtou = false, send_writers = false; 84 bool send_tomtou = false, send_writers = false;
86 char *pathbuf = NULL; 85 char *pathbuf = NULL;
87 const char *pathname; 86 const char *pathname;
@@ -92,18 +91,19 @@ static void ima_rdwr_violation_check(struct file *file)
92 mutex_lock(&inode->i_mutex); /* file metadata: permissions, xattr */ 91 mutex_lock(&inode->i_mutex); /* file metadata: permissions, xattr */
93 92
94 if (mode & FMODE_WRITE) { 93 if (mode & FMODE_WRITE) {
95 if (atomic_read(&inode->i_readcount) && IS_IMA(inode)) 94 if (atomic_read(&inode->i_readcount) && IS_IMA(inode)) {
96 send_tomtou = true; 95 struct integrity_iint_cache *iint;
97 goto out; 96 iint = integrity_iint_find(inode);
97 /* IMA_MEASURE is set from reader side */
98 if (iint && (iint->flags & IMA_MEASURE))
99 send_tomtou = true;
100 }
101 } else {
102 if ((atomic_read(&inode->i_writecount) > 0) &&
103 ima_must_measure(inode, MAY_READ, FILE_CHECK))
104 send_writers = true;
98 } 105 }
99 106
100 must_measure = ima_must_measure(inode, MAY_READ, FILE_CHECK);
101 if (!must_measure)
102 goto out;
103
104 if (atomic_read(&inode->i_writecount) > 0)
105 send_writers = true;
106out:
107 mutex_unlock(&inode->i_mutex); 107 mutex_unlock(&inode->i_mutex);
108 108
109 if (!send_tomtou && !send_writers) 109 if (!send_tomtou && !send_writers)
diff --git a/sound/core/control.c b/sound/core/control.c
index f038f5afafe2..f0b0e14497a5 100644
--- a/sound/core/control.c
+++ b/sound/core/control.c
@@ -288,6 +288,10 @@ static bool snd_ctl_remove_numid_conflict(struct snd_card *card,
288{ 288{
289 struct snd_kcontrol *kctl; 289 struct snd_kcontrol *kctl;
290 290
291 /* Make sure that the ids assigned to the control do not wrap around */
292 if (card->last_numid >= UINT_MAX - count)
293 card->last_numid = 0;
294
291 list_for_each_entry(kctl, &card->controls, list) { 295 list_for_each_entry(kctl, &card->controls, list) {
292 if (kctl->id.numid < card->last_numid + 1 + count && 296 if (kctl->id.numid < card->last_numid + 1 + count &&
293 kctl->id.numid + kctl->count > card->last_numid + 1) { 297 kctl->id.numid + kctl->count > card->last_numid + 1) {
@@ -330,6 +334,7 @@ int snd_ctl_add(struct snd_card *card, struct snd_kcontrol *kcontrol)
330{ 334{
331 struct snd_ctl_elem_id id; 335 struct snd_ctl_elem_id id;
332 unsigned int idx; 336 unsigned int idx;
337 unsigned int count;
333 int err = -EINVAL; 338 int err = -EINVAL;
334 339
335 if (! kcontrol) 340 if (! kcontrol)
@@ -337,6 +342,9 @@ int snd_ctl_add(struct snd_card *card, struct snd_kcontrol *kcontrol)
337 if (snd_BUG_ON(!card || !kcontrol->info)) 342 if (snd_BUG_ON(!card || !kcontrol->info))
338 goto error; 343 goto error;
339 id = kcontrol->id; 344 id = kcontrol->id;
345 if (id.index > UINT_MAX - kcontrol->count)
346 goto error;
347
340 down_write(&card->controls_rwsem); 348 down_write(&card->controls_rwsem);
341 if (snd_ctl_find_id(card, &id)) { 349 if (snd_ctl_find_id(card, &id)) {
342 up_write(&card->controls_rwsem); 350 up_write(&card->controls_rwsem);
@@ -358,8 +366,9 @@ int snd_ctl_add(struct snd_card *card, struct snd_kcontrol *kcontrol)
358 card->controls_count += kcontrol->count; 366 card->controls_count += kcontrol->count;
359 kcontrol->id.numid = card->last_numid + 1; 367 kcontrol->id.numid = card->last_numid + 1;
360 card->last_numid += kcontrol->count; 368 card->last_numid += kcontrol->count;
369 count = kcontrol->count;
361 up_write(&card->controls_rwsem); 370 up_write(&card->controls_rwsem);
362 for (idx = 0; idx < kcontrol->count; idx++, id.index++, id.numid++) 371 for (idx = 0; idx < count; idx++, id.index++, id.numid++)
363 snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id); 372 snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id);
364 return 0; 373 return 0;
365 374
@@ -388,6 +397,7 @@ int snd_ctl_replace(struct snd_card *card, struct snd_kcontrol *kcontrol,
388 bool add_on_replace) 397 bool add_on_replace)
389{ 398{
390 struct snd_ctl_elem_id id; 399 struct snd_ctl_elem_id id;
400 unsigned int count;
391 unsigned int idx; 401 unsigned int idx;
392 struct snd_kcontrol *old; 402 struct snd_kcontrol *old;
393 int ret; 403 int ret;
@@ -423,8 +433,9 @@ add:
423 card->controls_count += kcontrol->count; 433 card->controls_count += kcontrol->count;
424 kcontrol->id.numid = card->last_numid + 1; 434 kcontrol->id.numid = card->last_numid + 1;
425 card->last_numid += kcontrol->count; 435 card->last_numid += kcontrol->count;
436 count = kcontrol->count;
426 up_write(&card->controls_rwsem); 437 up_write(&card->controls_rwsem);
427 for (idx = 0; idx < kcontrol->count; idx++, id.index++, id.numid++) 438 for (idx = 0; idx < count; idx++, id.index++, id.numid++)
428 snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id); 439 snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id);
429 return 0; 440 return 0;
430 441
@@ -897,9 +908,9 @@ static int snd_ctl_elem_write(struct snd_card *card, struct snd_ctl_file *file,
897 result = kctl->put(kctl, control); 908 result = kctl->put(kctl, control);
898 } 909 }
899 if (result > 0) { 910 if (result > 0) {
911 struct snd_ctl_elem_id id = control->id;
900 up_read(&card->controls_rwsem); 912 up_read(&card->controls_rwsem);
901 snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE, 913 snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE, &id);
902 &control->id);
903 return 0; 914 return 0;
904 } 915 }
905 } 916 }
@@ -991,6 +1002,7 @@ static int snd_ctl_elem_unlock(struct snd_ctl_file *file,
991 1002
992struct user_element { 1003struct user_element {
993 struct snd_ctl_elem_info info; 1004 struct snd_ctl_elem_info info;
1005 struct snd_card *card;
994 void *elem_data; /* element data */ 1006 void *elem_data; /* element data */
995 unsigned long elem_data_size; /* size of element data in bytes */ 1007 unsigned long elem_data_size; /* size of element data in bytes */
996 void *tlv_data; /* TLV data */ 1008 void *tlv_data; /* TLV data */
@@ -1034,7 +1046,9 @@ static int snd_ctl_elem_user_get(struct snd_kcontrol *kcontrol,
1034{ 1046{
1035 struct user_element *ue = kcontrol->private_data; 1047 struct user_element *ue = kcontrol->private_data;
1036 1048
1049 mutex_lock(&ue->card->user_ctl_lock);
1037 memcpy(&ucontrol->value, ue->elem_data, ue->elem_data_size); 1050 memcpy(&ucontrol->value, ue->elem_data, ue->elem_data_size);
1051 mutex_unlock(&ue->card->user_ctl_lock);
1038 return 0; 1052 return 0;
1039} 1053}
1040 1054
@@ -1043,10 +1057,12 @@ static int snd_ctl_elem_user_put(struct snd_kcontrol *kcontrol,
1043{ 1057{
1044 int change; 1058 int change;
1045 struct user_element *ue = kcontrol->private_data; 1059 struct user_element *ue = kcontrol->private_data;
1046 1060
1061 mutex_lock(&ue->card->user_ctl_lock);
1047 change = memcmp(&ucontrol->value, ue->elem_data, ue->elem_data_size) != 0; 1062 change = memcmp(&ucontrol->value, ue->elem_data, ue->elem_data_size) != 0;
1048 if (change) 1063 if (change)
1049 memcpy(ue->elem_data, &ucontrol->value, ue->elem_data_size); 1064 memcpy(ue->elem_data, &ucontrol->value, ue->elem_data_size);
1065 mutex_unlock(&ue->card->user_ctl_lock);
1050 return change; 1066 return change;
1051} 1067}
1052 1068
@@ -1066,19 +1082,32 @@ static int snd_ctl_elem_user_tlv(struct snd_kcontrol *kcontrol,
1066 new_data = memdup_user(tlv, size); 1082 new_data = memdup_user(tlv, size);
1067 if (IS_ERR(new_data)) 1083 if (IS_ERR(new_data))
1068 return PTR_ERR(new_data); 1084 return PTR_ERR(new_data);
1085 mutex_lock(&ue->card->user_ctl_lock);
1069 change = ue->tlv_data_size != size; 1086 change = ue->tlv_data_size != size;
1070 if (!change) 1087 if (!change)
1071 change = memcmp(ue->tlv_data, new_data, size); 1088 change = memcmp(ue->tlv_data, new_data, size);
1072 kfree(ue->tlv_data); 1089 kfree(ue->tlv_data);
1073 ue->tlv_data = new_data; 1090 ue->tlv_data = new_data;
1074 ue->tlv_data_size = size; 1091 ue->tlv_data_size = size;
1092 mutex_unlock(&ue->card->user_ctl_lock);
1075 } else { 1093 } else {
1076 if (! ue->tlv_data_size || ! ue->tlv_data) 1094 int ret = 0;
1077 return -ENXIO; 1095
1078 if (size < ue->tlv_data_size) 1096 mutex_lock(&ue->card->user_ctl_lock);
1079 return -ENOSPC; 1097 if (!ue->tlv_data_size || !ue->tlv_data) {
1098 ret = -ENXIO;
1099 goto err_unlock;
1100 }
1101 if (size < ue->tlv_data_size) {
1102 ret = -ENOSPC;
1103 goto err_unlock;
1104 }
1080 if (copy_to_user(tlv, ue->tlv_data, ue->tlv_data_size)) 1105 if (copy_to_user(tlv, ue->tlv_data, ue->tlv_data_size))
1081 return -EFAULT; 1106 ret = -EFAULT;
1107err_unlock:
1108 mutex_unlock(&ue->card->user_ctl_lock);
1109 if (ret)
1110 return ret;
1082 } 1111 }
1083 return change; 1112 return change;
1084} 1113}
@@ -1136,8 +1165,6 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file,
1136 struct user_element *ue; 1165 struct user_element *ue;
1137 int idx, err; 1166 int idx, err;
1138 1167
1139 if (!replace && card->user_ctl_count >= MAX_USER_CONTROLS)
1140 return -ENOMEM;
1141 if (info->count < 1) 1168 if (info->count < 1)
1142 return -EINVAL; 1169 return -EINVAL;
1143 access = info->access == 0 ? SNDRV_CTL_ELEM_ACCESS_READWRITE : 1170 access = info->access == 0 ? SNDRV_CTL_ELEM_ACCESS_READWRITE :
@@ -1146,21 +1173,16 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file,
1146 SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE)); 1173 SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE));
1147 info->id.numid = 0; 1174 info->id.numid = 0;
1148 memset(&kctl, 0, sizeof(kctl)); 1175 memset(&kctl, 0, sizeof(kctl));
1149 down_write(&card->controls_rwsem); 1176
1150 _kctl = snd_ctl_find_id(card, &info->id); 1177 if (replace) {
1151 err = 0; 1178 err = snd_ctl_remove_user_ctl(file, &info->id);
1152 if (_kctl) { 1179 if (err)
1153 if (replace) 1180 return err;
1154 err = snd_ctl_remove(card, _kctl);
1155 else
1156 err = -EBUSY;
1157 } else {
1158 if (replace)
1159 err = -ENOENT;
1160 } 1181 }
1161 up_write(&card->controls_rwsem); 1182
1162 if (err < 0) 1183 if (card->user_ctl_count >= MAX_USER_CONTROLS)
1163 return err; 1184 return -ENOMEM;
1185
1164 memcpy(&kctl.id, &info->id, sizeof(info->id)); 1186 memcpy(&kctl.id, &info->id, sizeof(info->id));
1165 kctl.count = info->owner ? info->owner : 1; 1187 kctl.count = info->owner ? info->owner : 1;
1166 access |= SNDRV_CTL_ELEM_ACCESS_USER; 1188 access |= SNDRV_CTL_ELEM_ACCESS_USER;
@@ -1210,6 +1232,7 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file,
1210 ue = kzalloc(sizeof(struct user_element) + private_size, GFP_KERNEL); 1232 ue = kzalloc(sizeof(struct user_element) + private_size, GFP_KERNEL);
1211 if (ue == NULL) 1233 if (ue == NULL)
1212 return -ENOMEM; 1234 return -ENOMEM;
1235 ue->card = card;
1213 ue->info = *info; 1236 ue->info = *info;
1214 ue->info.access = 0; 1237 ue->info.access = 0;
1215 ue->elem_data = (char *)ue + sizeof(*ue); 1238 ue->elem_data = (char *)ue + sizeof(*ue);
@@ -1321,8 +1344,9 @@ static int snd_ctl_tlv_ioctl(struct snd_ctl_file *file,
1321 } 1344 }
1322 err = kctl->tlv.c(kctl, op_flag, tlv.length, _tlv->tlv); 1345 err = kctl->tlv.c(kctl, op_flag, tlv.length, _tlv->tlv);
1323 if (err > 0) { 1346 if (err > 0) {
1347 struct snd_ctl_elem_id id = kctl->id;
1324 up_read(&card->controls_rwsem); 1348 up_read(&card->controls_rwsem);
1325 snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_TLV, &kctl->id); 1349 snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_TLV, &id);
1326 return 0; 1350 return 0;
1327 } 1351 }
1328 } else { 1352 } else {
diff --git a/sound/core/init.c b/sound/core/init.c
index 5ee83845c5de..7bdfd19e24a8 100644
--- a/sound/core/init.c
+++ b/sound/core/init.c
@@ -232,6 +232,7 @@ int snd_card_new(struct device *parent, int idx, const char *xid,
232 INIT_LIST_HEAD(&card->devices); 232 INIT_LIST_HEAD(&card->devices);
233 init_rwsem(&card->controls_rwsem); 233 init_rwsem(&card->controls_rwsem);
234 rwlock_init(&card->ctl_files_rwlock); 234 rwlock_init(&card->ctl_files_rwlock);
235 mutex_init(&card->user_ctl_lock);
235 INIT_LIST_HEAD(&card->controls); 236 INIT_LIST_HEAD(&card->controls);
236 INIT_LIST_HEAD(&card->ctl_files); 237 INIT_LIST_HEAD(&card->ctl_files);
237 spin_lock_init(&card->files_lock); 238 spin_lock_init(&card->files_lock);
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index 9ca5e647e54b..225c73152ee9 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -660,7 +660,7 @@ static int deliver_to_subscribers(struct snd_seq_client *client,
660 int atomic, int hop) 660 int atomic, int hop)
661{ 661{
662 struct snd_seq_subscribers *subs; 662 struct snd_seq_subscribers *subs;
663 int err = 0, num_ev = 0; 663 int err, result = 0, num_ev = 0;
664 struct snd_seq_event event_saved; 664 struct snd_seq_event event_saved;
665 struct snd_seq_client_port *src_port; 665 struct snd_seq_client_port *src_port;
666 struct snd_seq_port_subs_info *grp; 666 struct snd_seq_port_subs_info *grp;
@@ -685,8 +685,12 @@ static int deliver_to_subscribers(struct snd_seq_client *client,
685 subs->info.flags & SNDRV_SEQ_PORT_SUBS_TIME_REAL); 685 subs->info.flags & SNDRV_SEQ_PORT_SUBS_TIME_REAL);
686 err = snd_seq_deliver_single_event(client, event, 686 err = snd_seq_deliver_single_event(client, event,
687 0, atomic, hop); 687 0, atomic, hop);
688 if (err < 0) 688 if (err < 0) {
689 break; 689 /* save first error that occurs and continue */
690 if (!result)
691 result = err;
692 continue;
693 }
690 num_ev++; 694 num_ev++;
691 /* restore original event record */ 695 /* restore original event record */
692 *event = event_saved; 696 *event = event_saved;
@@ -697,7 +701,7 @@ static int deliver_to_subscribers(struct snd_seq_client *client,
697 up_read(&grp->list_mutex); 701 up_read(&grp->list_mutex);
698 *event = event_saved; /* restore */ 702 *event = event_saved; /* restore */
699 snd_seq_port_unlock(src_port); 703 snd_seq_port_unlock(src_port);
700 return (err < 0) ? err : num_ev; 704 return (result < 0) ? result : num_ev;
701} 705}
702 706
703 707
@@ -709,7 +713,7 @@ static int port_broadcast_event(struct snd_seq_client *client,
709 struct snd_seq_event *event, 713 struct snd_seq_event *event,
710 int atomic, int hop) 714 int atomic, int hop)
711{ 715{
712 int num_ev = 0, err = 0; 716 int num_ev = 0, err, result = 0;
713 struct snd_seq_client *dest_client; 717 struct snd_seq_client *dest_client;
714 struct snd_seq_client_port *port; 718 struct snd_seq_client_port *port;
715 719
@@ -724,14 +728,18 @@ static int port_broadcast_event(struct snd_seq_client *client,
724 err = snd_seq_deliver_single_event(NULL, event, 728 err = snd_seq_deliver_single_event(NULL, event,
725 SNDRV_SEQ_FILTER_BROADCAST, 729 SNDRV_SEQ_FILTER_BROADCAST,
726 atomic, hop); 730 atomic, hop);
727 if (err < 0) 731 if (err < 0) {
728 break; 732 /* save first error that occurs and continue */
733 if (!result)
734 result = err;
735 continue;
736 }
729 num_ev++; 737 num_ev++;
730 } 738 }
731 read_unlock(&dest_client->ports_lock); 739 read_unlock(&dest_client->ports_lock);
732 snd_seq_client_unlock(dest_client); 740 snd_seq_client_unlock(dest_client);
733 event->dest.port = SNDRV_SEQ_ADDRESS_BROADCAST; /* restore */ 741 event->dest.port = SNDRV_SEQ_ADDRESS_BROADCAST; /* restore */
734 return (err < 0) ? err : num_ev; 742 return (result < 0) ? result : num_ev;
735} 743}
736 744
737/* 745/*
@@ -741,7 +749,7 @@ static int port_broadcast_event(struct snd_seq_client *client,
741static int broadcast_event(struct snd_seq_client *client, 749static int broadcast_event(struct snd_seq_client *client,
742 struct snd_seq_event *event, int atomic, int hop) 750 struct snd_seq_event *event, int atomic, int hop)
743{ 751{
744 int err = 0, num_ev = 0; 752 int err, result = 0, num_ev = 0;
745 int dest; 753 int dest;
746 struct snd_seq_addr addr; 754 struct snd_seq_addr addr;
747 755
@@ -760,12 +768,16 @@ static int broadcast_event(struct snd_seq_client *client,
760 err = snd_seq_deliver_single_event(NULL, event, 768 err = snd_seq_deliver_single_event(NULL, event,
761 SNDRV_SEQ_FILTER_BROADCAST, 769 SNDRV_SEQ_FILTER_BROADCAST,
762 atomic, hop); 770 atomic, hop);
763 if (err < 0) 771 if (err < 0) {
764 break; 772 /* save first error that occurs and continue */
773 if (!result)
774 result = err;
775 continue;
776 }
765 num_ev += err; 777 num_ev += err;
766 } 778 }
767 event->dest = addr; /* restore */ 779 event->dest = addr; /* restore */
768 return (err < 0) ? err : num_ev; 780 return (result < 0) ? result : num_ev;
769} 781}
770 782
771 783
diff --git a/sound/core/seq/seq_fifo.c b/sound/core/seq/seq_fifo.c
index 559989992bef..53a403e17c5b 100644
--- a/sound/core/seq/seq_fifo.c
+++ b/sound/core/seq/seq_fifo.c
@@ -124,7 +124,7 @@ int snd_seq_fifo_event_in(struct snd_seq_fifo *f,
124 snd_use_lock_use(&f->use_lock); 124 snd_use_lock_use(&f->use_lock);
125 err = snd_seq_event_dup(f->pool, event, &cell, 1, NULL); /* always non-blocking */ 125 err = snd_seq_event_dup(f->pool, event, &cell, 1, NULL); /* always non-blocking */
126 if (err < 0) { 126 if (err < 0) {
127 if (err == -ENOMEM) 127 if ((err == -ENOMEM) || (err == -EAGAIN))
128 atomic_inc(&f->overflow); 128 atomic_inc(&f->overflow);
129 snd_use_lock_free(&f->use_lock); 129 snd_use_lock_free(&f->use_lock);
130 return err; 130 return err;
diff --git a/sound/core/timer.c b/sound/core/timer.c
index cfd455a8ac1a..777a45e08e53 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -390,7 +390,7 @@ static void snd_timer_notify1(struct snd_timer_instance *ti, int event)
390 struct timespec tstamp; 390 struct timespec tstamp;
391 391
392 if (timer_tstamp_monotonic) 392 if (timer_tstamp_monotonic)
393 do_posix_clock_monotonic_gettime(&tstamp); 393 ktime_get_ts(&tstamp);
394 else 394 else
395 getnstimeofday(&tstamp); 395 getnstimeofday(&tstamp);
396 if (snd_BUG_ON(event < SNDRV_TIMER_EVENT_START || 396 if (snd_BUG_ON(event < SNDRV_TIMER_EVENT_START ||
@@ -1203,7 +1203,7 @@ static void snd_timer_user_tinterrupt(struct snd_timer_instance *timeri,
1203 } 1203 }
1204 if (tu->last_resolution != resolution || ticks > 0) { 1204 if (tu->last_resolution != resolution || ticks > 0) {
1205 if (timer_tstamp_monotonic) 1205 if (timer_tstamp_monotonic)
1206 do_posix_clock_monotonic_gettime(&tstamp); 1206 ktime_get_ts(&tstamp);
1207 else 1207 else
1208 getnstimeofday(&tstamp); 1208 getnstimeofday(&tstamp);
1209 } 1209 }
diff --git a/sound/firewire/bebob/bebob.h b/sound/firewire/bebob/bebob.h
index d1c93a1e0978..e13eef99c27a 100644
--- a/sound/firewire/bebob/bebob.h
+++ b/sound/firewire/bebob/bebob.h
@@ -208,8 +208,6 @@ int snd_bebob_stream_set_rate(struct snd_bebob *bebob, unsigned int rate);
208int snd_bebob_stream_check_internal_clock(struct snd_bebob *bebob, 208int snd_bebob_stream_check_internal_clock(struct snd_bebob *bebob,
209 bool *internal); 209 bool *internal);
210int snd_bebob_stream_discover(struct snd_bebob *bebob); 210int snd_bebob_stream_discover(struct snd_bebob *bebob);
211int snd_bebob_stream_map(struct snd_bebob *bebob,
212 struct amdtp_stream *stream);
213int snd_bebob_stream_init_duplex(struct snd_bebob *bebob); 211int snd_bebob_stream_init_duplex(struct snd_bebob *bebob);
214int snd_bebob_stream_start_duplex(struct snd_bebob *bebob, unsigned int rate); 212int snd_bebob_stream_start_duplex(struct snd_bebob *bebob, unsigned int rate);
215void snd_bebob_stream_stop_duplex(struct snd_bebob *bebob); 213void snd_bebob_stream_stop_duplex(struct snd_bebob *bebob);
diff --git a/sound/firewire/bebob/bebob_stream.c b/sound/firewire/bebob/bebob_stream.c
index bc4f82776fda..ef4d0c9f6578 100644
--- a/sound/firewire/bebob/bebob_stream.c
+++ b/sound/firewire/bebob/bebob_stream.c
@@ -655,8 +655,6 @@ void snd_bebob_stream_stop_duplex(struct snd_bebob *bebob)
655 struct amdtp_stream *master, *slave; 655 struct amdtp_stream *master, *slave;
656 atomic_t *master_substreams, *slave_substreams; 656 atomic_t *master_substreams, *slave_substreams;
657 657
658 mutex_lock(&bebob->mutex);
659
660 if (bebob->master == &bebob->rx_stream) { 658 if (bebob->master == &bebob->rx_stream) {
661 slave = &bebob->tx_stream; 659 slave = &bebob->tx_stream;
662 master = &bebob->rx_stream; 660 master = &bebob->rx_stream;
@@ -669,6 +667,8 @@ void snd_bebob_stream_stop_duplex(struct snd_bebob *bebob)
669 master_substreams = &bebob->capture_substreams; 667 master_substreams = &bebob->capture_substreams;
670 } 668 }
671 669
670 mutex_lock(&bebob->mutex);
671
672 if (atomic_read(slave_substreams) == 0) { 672 if (atomic_read(slave_substreams) == 0) {
673 amdtp_stream_pcm_abort(slave); 673 amdtp_stream_pcm_abort(slave);
674 amdtp_stream_stop(slave); 674 amdtp_stream_stop(slave);
diff --git a/sound/firewire/fireworks/fireworks.c b/sound/firewire/fireworks/fireworks.c
index 996fdc44c83c..3e2ed8e82cbc 100644
--- a/sound/firewire/fireworks/fireworks.c
+++ b/sound/firewire/fireworks/fireworks.c
@@ -346,7 +346,6 @@ static void __exit snd_efw_exit(void)
346{ 346{
347 snd_efw_transaction_unregister(); 347 snd_efw_transaction_unregister();
348 driver_unregister(&efw_driver.driver); 348 driver_unregister(&efw_driver.driver);
349 mutex_destroy(&devices_mutex);
350} 349}
351 350
352module_init(snd_efw_init); 351module_init(snd_efw_init);
diff --git a/sound/firewire/fireworks/fireworks.h b/sound/firewire/fireworks/fireworks.h
index d2b36be4d2f8..4f0201a95222 100644
--- a/sound/firewire/fireworks/fireworks.h
+++ b/sound/firewire/fireworks/fireworks.h
@@ -162,7 +162,6 @@ enum snd_efw_grp_type {
162 SND_EFW_CH_TYPE_GUITAR = 7, 162 SND_EFW_CH_TYPE_GUITAR = 7,
163 SND_EFW_CH_TYPE_PIEZO_GUITAR = 8, 163 SND_EFW_CH_TYPE_PIEZO_GUITAR = 8,
164 SND_EFW_CH_TYPE_GUITAR_STRING = 9, 164 SND_EFW_CH_TYPE_GUITAR_STRING = 9,
165 SND_EFW_CH_TYPE_VIRTUAL = 0x10000,
166 SND_EFW_CH_TYPE_DUMMY 165 SND_EFW_CH_TYPE_DUMMY
167}; 166};
168struct snd_efw_phys_meters { 167struct snd_efw_phys_meters {
diff --git a/sound/firewire/fireworks/fireworks_hwdep.c b/sound/firewire/fireworks/fireworks_hwdep.c
index 4f8216fb6b62..33df8655fe81 100644
--- a/sound/firewire/fireworks/fireworks_hwdep.c
+++ b/sound/firewire/fireworks/fireworks_hwdep.c
@@ -58,7 +58,7 @@ hwdep_read_resp_buf(struct snd_efw *efw, char __user *buf, long remained,
58 efw->pull_ptr += till_end; 58 efw->pull_ptr += till_end;
59 if (efw->pull_ptr >= efw->resp_buf + 59 if (efw->pull_ptr >= efw->resp_buf +
60 snd_efw_resp_buf_size) 60 snd_efw_resp_buf_size)
61 efw->pull_ptr = efw->resp_buf; 61 efw->pull_ptr -= snd_efw_resp_buf_size;
62 62
63 length -= till_end; 63 length -= till_end;
64 buf += till_end; 64 buf += till_end;
diff --git a/sound/firewire/fireworks/fireworks_stream.c b/sound/firewire/fireworks/fireworks_stream.c
index 541569022a7c..b985fc5ebdc6 100644
--- a/sound/firewire/fireworks/fireworks_stream.c
+++ b/sound/firewire/fireworks/fireworks_stream.c
@@ -284,8 +284,6 @@ void snd_efw_stream_stop_duplex(struct snd_efw *efw)
284 struct amdtp_stream *master, *slave; 284 struct amdtp_stream *master, *slave;
285 atomic_t *master_substreams, *slave_substreams; 285 atomic_t *master_substreams, *slave_substreams;
286 286
287 mutex_lock(&efw->mutex);
288
289 if (efw->master == &efw->rx_stream) { 287 if (efw->master == &efw->rx_stream) {
290 slave = &efw->tx_stream; 288 slave = &efw->tx_stream;
291 master = &efw->rx_stream; 289 master = &efw->rx_stream;
@@ -298,6 +296,8 @@ void snd_efw_stream_stop_duplex(struct snd_efw *efw)
298 master_substreams = &efw->capture_substreams; 296 master_substreams = &efw->capture_substreams;
299 } 297 }
300 298
299 mutex_lock(&efw->mutex);
300
301 if (atomic_read(slave_substreams) == 0) { 301 if (atomic_read(slave_substreams) == 0) {
302 stop_stream(efw, slave); 302 stop_stream(efw, slave);
303 303
diff --git a/sound/firewire/fireworks/fireworks_transaction.c b/sound/firewire/fireworks/fireworks_transaction.c
index aa56b8ac537c..255dabc6fc33 100644
--- a/sound/firewire/fireworks/fireworks_transaction.c
+++ b/sound/firewire/fireworks/fireworks_transaction.c
@@ -8,19 +8,19 @@
8 8
9/* 9/*
10 * Fireworks have its own transaction. The transaction can be delivered by AV/C 10 * Fireworks have its own transaction. The transaction can be delivered by AV/C
11 * Vendor Specific command. But at least Windows driver and firmware version 5.5 11 * Vendor Specific command frame or usual asynchronous transaction. At least,
12 * or later don't use it. 12 * Windows driver and firmware version 5.5 or later don't use AV/C command.
13 * 13 *
14 * Transaction substance: 14 * Transaction substance:
15 * At first, 6 data exist. Following to the 6 data, parameters for each 15 * At first, 6 data exist. Following to the data, parameters for each command
16 * commands exists. All of parameters are 32 bit alighed to big endian. 16 * exist. All of the parameters are 32 bit alighed to big endian.
17 * data[0]: Length of transaction substance 17 * data[0]: Length of transaction substance
18 * data[1]: Transaction version 18 * data[1]: Transaction version
19 * data[2]: Sequence number. This is incremented by the device 19 * data[2]: Sequence number. This is incremented by the device
20 * data[3]: transaction category 20 * data[3]: Transaction category
21 * data[4]: transaction command 21 * data[4]: Transaction command
22 * data[5]: return value in response. 22 * data[5]: Return value in response.
23 * data[6-]: parameters 23 * data[6-]: Parameters
24 * 24 *
25 * Transaction address: 25 * Transaction address:
26 * command: 0xecc000000000 26 * command: 0xecc000000000
@@ -148,7 +148,7 @@ copy_resp_to_buf(struct snd_efw *efw, void *data, size_t length, int *rcode)
148 148
149 efw->push_ptr += till_end; 149 efw->push_ptr += till_end;
150 if (efw->push_ptr >= efw->resp_buf + snd_efw_resp_buf_size) 150 if (efw->push_ptr >= efw->resp_buf + snd_efw_resp_buf_size)
151 efw->push_ptr = efw->resp_buf; 151 efw->push_ptr -= snd_efw_resp_buf_size;
152 152
153 length -= till_end; 153 length -= till_end;
154 data += till_end; 154 data += till_end;
diff --git a/sound/pci/hda/hda_i915.c b/sound/pci/hda/hda_i915.c
index 9d07e4edacdb..e9e8a4a4a9a1 100644
--- a/sound/pci/hda/hda_i915.c
+++ b/sound/pci/hda/hda_i915.c
@@ -22,20 +22,20 @@
22#include <drm/i915_powerwell.h> 22#include <drm/i915_powerwell.h>
23#include "hda_i915.h" 23#include "hda_i915.h"
24 24
25static void (*get_power)(void); 25static int (*get_power)(void);
26static void (*put_power)(void); 26static int (*put_power)(void);
27 27
28void hda_display_power(bool enable) 28int hda_display_power(bool enable)
29{ 29{
30 if (!get_power || !put_power) 30 if (!get_power || !put_power)
31 return; 31 return -ENODEV;
32 32
33 pr_debug("HDA display power %s \n", 33 pr_debug("HDA display power %s \n",
34 enable ? "Enable" : "Disable"); 34 enable ? "Enable" : "Disable");
35 if (enable) 35 if (enable)
36 get_power(); 36 return get_power();
37 else 37 else
38 put_power(); 38 return put_power();
39} 39}
40 40
41int hda_i915_init(void) 41int hda_i915_init(void)
diff --git a/sound/pci/hda/hda_i915.h b/sound/pci/hda/hda_i915.h
index 5a63da2c53e5..bfd835f8f1aa 100644
--- a/sound/pci/hda/hda_i915.h
+++ b/sound/pci/hda/hda_i915.h
@@ -17,11 +17,11 @@
17#define __SOUND_HDA_I915_H 17#define __SOUND_HDA_I915_H
18 18
19#ifdef CONFIG_SND_HDA_I915 19#ifdef CONFIG_SND_HDA_I915
20void hda_display_power(bool enable); 20int hda_display_power(bool enable);
21int hda_i915_init(void); 21int hda_i915_init(void);
22int hda_i915_exit(void); 22int hda_i915_exit(void);
23#else 23#else
24static inline void hda_display_power(bool enable) {} 24static inline int hda_display_power(bool enable) { return 0; }
25static inline int hda_i915_init(void) 25static inline int hda_i915_init(void)
26{ 26{
27 return -ENODEV; 27 return -ENODEV;
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index cd77b9b19b73..23fd6b9aecca 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -237,6 +237,12 @@ enum {
237 AZX_DCAPS_COUNT_LPIB_DELAY | AZX_DCAPS_PM_RUNTIME | \ 237 AZX_DCAPS_COUNT_LPIB_DELAY | AZX_DCAPS_PM_RUNTIME | \
238 AZX_DCAPS_I915_POWERWELL) 238 AZX_DCAPS_I915_POWERWELL)
239 239
240/* Broadwell HDMI can't use position buffer reliably, force to use LPIB */
241#define AZX_DCAPS_INTEL_BROADWELL \
242 (AZX_DCAPS_SCH_SNOOP | AZX_DCAPS_ALIGN_BUFSIZE | \
243 AZX_DCAPS_POSFIX_LPIB | AZX_DCAPS_PM_RUNTIME | \
244 AZX_DCAPS_I915_POWERWELL)
245
240/* quirks for ATI SB / AMD Hudson */ 246/* quirks for ATI SB / AMD Hudson */
241#define AZX_DCAPS_PRESET_ATI_SB \ 247#define AZX_DCAPS_PRESET_ATI_SB \
242 (AZX_DCAPS_ATI_SNOOP | AZX_DCAPS_NO_TCSEL | \ 248 (AZX_DCAPS_ATI_SNOOP | AZX_DCAPS_NO_TCSEL | \
@@ -1367,12 +1373,6 @@ static int azx_first_init(struct azx *chip)
1367 /* initialize streams */ 1373 /* initialize streams */
1368 azx_init_stream(chip); 1374 azx_init_stream(chip);
1369 1375
1370 /* workaround for Broadwell HDMI: the first stream is broken,
1371 * so mask it by keeping it as if opened
1372 */
1373 if (pci->vendor == 0x8086 && pci->device == 0x160c)
1374 chip->azx_dev[0].opened = 1;
1375
1376 /* initialize chip */ 1376 /* initialize chip */
1377 azx_init_pci(chip); 1377 azx_init_pci(chip);
1378 azx_init_chip(chip, (probe_only[dev] & 2) == 0); 1378 azx_init_chip(chip, (probe_only[dev] & 2) == 0);
@@ -1656,8 +1656,13 @@ static int azx_probe_continue(struct azx *chip)
1656 "Error request power-well from i915\n"); 1656 "Error request power-well from i915\n");
1657 goto out_free; 1657 goto out_free;
1658 } 1658 }
1659 err = hda_display_power(true);
1660 if (err < 0) {
1661 dev_err(chip->card->dev,
1662 "Cannot turn on display power on i915\n");
1663 goto out_free;
1664 }
1659#endif 1665#endif
1660 hda_display_power(true);
1661 } 1666 }
1662 1667
1663 err = azx_first_init(chip); 1668 err = azx_first_init(chip);
@@ -1769,7 +1774,7 @@ static const struct pci_device_id azx_ids[] = {
1769 .driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL }, 1774 .driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL },
1770 /* Broadwell */ 1775 /* Broadwell */
1771 { PCI_DEVICE(0x8086, 0x160c), 1776 { PCI_DEVICE(0x8086, 0x160c),
1772 .driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL }, 1777 .driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_BROADWELL },
1773 /* 5 Series/3400 */ 1778 /* 5 Series/3400 */
1774 { PCI_DEVICE(0x8086, 0x3b56), 1779 { PCI_DEVICE(0x8086, 0x3b56),
1775 .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_NOPM }, 1780 .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_NOPM },
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index be0a9ee0b804..3e4417b0ddbe 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -1594,10 +1594,18 @@ static bool hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
1594 * Re-setup pin and infoframe. This is needed e.g. when 1594 * Re-setup pin and infoframe. This is needed e.g. when
1595 * - sink is first plugged-in (infoframe is not set up if !monitor_present) 1595 * - sink is first plugged-in (infoframe is not set up if !monitor_present)
1596 * - transcoder can change during stream playback on Haswell 1596 * - transcoder can change during stream playback on Haswell
1597 * and this can make HW reset converter selection on a pin.
1597 */ 1598 */
1598 if (eld->eld_valid && !old_eld_valid && per_pin->setup) 1599 if (eld->eld_valid && !old_eld_valid && per_pin->setup) {
1600 if (is_haswell_plus(codec) || is_valleyview(codec)) {
1601 intel_verify_pin_cvt_connect(codec, per_pin);
1602 intel_not_share_assigned_cvt(codec, pin_nid,
1603 per_pin->mux_idx);
1604 }
1605
1599 hdmi_setup_audio_infoframe(codec, per_pin, 1606 hdmi_setup_audio_infoframe(codec, per_pin,
1600 per_pin->non_pcm); 1607 per_pin->non_pcm);
1608 }
1601 } 1609 }
1602 1610
1603 if (eld_changed) 1611 if (eld_changed)
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 12fb411adf77..af76995fa966 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -929,6 +929,7 @@ struct alc_codec_rename_pci_table {
929}; 929};
930 930
931static struct alc_codec_rename_table rename_tbl[] = { 931static struct alc_codec_rename_table rename_tbl[] = {
932 { 0x10ec0221, 0xf00f, 0x1003, "ALC231" },
932 { 0x10ec0269, 0xfff0, 0x3010, "ALC277" }, 933 { 0x10ec0269, 0xfff0, 0x3010, "ALC277" },
933 { 0x10ec0269, 0xf0f0, 0x2010, "ALC259" }, 934 { 0x10ec0269, 0xf0f0, 0x2010, "ALC259" },
934 { 0x10ec0269, 0xf0f0, 0x3010, "ALC258" }, 935 { 0x10ec0269, 0xf0f0, 0x3010, "ALC258" },
@@ -937,6 +938,7 @@ static struct alc_codec_rename_table rename_tbl[] = {
937 { 0x10ec0269, 0xffff, 0x6023, "ALC281X" }, 938 { 0x10ec0269, 0xffff, 0x6023, "ALC281X" },
938 { 0x10ec0269, 0x00f0, 0x0020, "ALC269VC" }, 939 { 0x10ec0269, 0x00f0, 0x0020, "ALC269VC" },
939 { 0x10ec0269, 0x00f0, 0x0030, "ALC269VD" }, 940 { 0x10ec0269, 0x00f0, 0x0030, "ALC269VD" },
941 { 0x10ec0662, 0xffff, 0x4020, "ALC656" },
940 { 0x10ec0887, 0x00f0, 0x0030, "ALC887-VD" }, 942 { 0x10ec0887, 0x00f0, 0x0030, "ALC887-VD" },
941 { 0x10ec0888, 0x00f0, 0x0030, "ALC888-VD" }, 943 { 0x10ec0888, 0x00f0, 0x0030, "ALC888-VD" },
942 { 0x10ec0888, 0xf0f0, 0x3020, "ALC886" }, 944 { 0x10ec0888, 0xf0f0, 0x3020, "ALC886" },
@@ -956,6 +958,19 @@ static struct alc_codec_rename_pci_table rename_pci_tbl[] = {
956 { 0x10ec0293, 0x1028, 0, "ALC3235" }, 958 { 0x10ec0293, 0x1028, 0, "ALC3235" },
957 { 0x10ec0255, 0x1028, 0, "ALC3234" }, 959 { 0x10ec0255, 0x1028, 0, "ALC3234" },
958 { 0x10ec0668, 0x1028, 0, "ALC3661" }, 960 { 0x10ec0668, 0x1028, 0, "ALC3661" },
961 { 0x10ec0275, 0x1028, 0, "ALC3260" },
962 { 0x10ec0899, 0x1028, 0, "ALC3861" },
963 { 0x10ec0670, 0x1025, 0, "ALC669X" },
964 { 0x10ec0676, 0x1025, 0, "ALC679X" },
965 { 0x10ec0282, 0x1043, 0, "ALC3229" },
966 { 0x10ec0233, 0x1043, 0, "ALC3236" },
967 { 0x10ec0280, 0x103c, 0, "ALC3228" },
968 { 0x10ec0282, 0x103c, 0, "ALC3227" },
969 { 0x10ec0286, 0x103c, 0, "ALC3242" },
970 { 0x10ec0290, 0x103c, 0, "ALC3241" },
971 { 0x10ec0668, 0x103c, 0, "ALC3662" },
972 { 0x10ec0283, 0x17aa, 0, "ALC3239" },
973 { 0x10ec0292, 0x17aa, 0, "ALC3232" },
959 { } /* terminator */ 974 { } /* terminator */
960}; 975};
961 976
@@ -1412,6 +1427,7 @@ static const struct snd_pci_quirk alc880_fixup_tbl[] = {
1412 SND_PCI_QUIRK(0x1043, 0x10c3, "ASUS W5A", ALC880_FIXUP_ASUS_W5A), 1427 SND_PCI_QUIRK(0x1043, 0x10c3, "ASUS W5A", ALC880_FIXUP_ASUS_W5A),
1413 SND_PCI_QUIRK(0x1043, 0x1964, "ASUS Z71V", ALC880_FIXUP_Z71V), 1428 SND_PCI_QUIRK(0x1043, 0x1964, "ASUS Z71V", ALC880_FIXUP_Z71V),
1414 SND_PCI_QUIRK_VENDOR(0x1043, "ASUS", ALC880_FIXUP_GPIO1), 1429 SND_PCI_QUIRK_VENDOR(0x1043, "ASUS", ALC880_FIXUP_GPIO1),
1430 SND_PCI_QUIRK(0x147b, 0x1045, "ABit AA8XE", ALC880_FIXUP_6ST_AUTOMUTE),
1415 SND_PCI_QUIRK(0x1558, 0x5401, "Clevo GPIO2", ALC880_FIXUP_GPIO2), 1431 SND_PCI_QUIRK(0x1558, 0x5401, "Clevo GPIO2", ALC880_FIXUP_GPIO2),
1416 SND_PCI_QUIRK_VENDOR(0x1558, "Clevo", ALC880_FIXUP_EAPD_COEF), 1432 SND_PCI_QUIRK_VENDOR(0x1558, "Clevo", ALC880_FIXUP_EAPD_COEF),
1417 SND_PCI_QUIRK(0x1584, 0x9050, "Uniwill", ALC880_FIXUP_UNIWILL_DIG), 1433 SND_PCI_QUIRK(0x1584, 0x9050, "Uniwill", ALC880_FIXUP_UNIWILL_DIG),
@@ -4230,6 +4246,7 @@ enum {
4230 ALC269_FIXUP_HEADSET_MIC, 4246 ALC269_FIXUP_HEADSET_MIC,
4231 ALC269_FIXUP_QUANTA_MUTE, 4247 ALC269_FIXUP_QUANTA_MUTE,
4232 ALC269_FIXUP_LIFEBOOK, 4248 ALC269_FIXUP_LIFEBOOK,
4249 ALC269_FIXUP_LIFEBOOK_EXTMIC,
4233 ALC269_FIXUP_AMIC, 4250 ALC269_FIXUP_AMIC,
4234 ALC269_FIXUP_DMIC, 4251 ALC269_FIXUP_DMIC,
4235 ALC269VB_FIXUP_AMIC, 4252 ALC269VB_FIXUP_AMIC,
@@ -4367,6 +4384,13 @@ static const struct hda_fixup alc269_fixups[] = {
4367 .chained = true, 4384 .chained = true,
4368 .chain_id = ALC269_FIXUP_QUANTA_MUTE 4385 .chain_id = ALC269_FIXUP_QUANTA_MUTE
4369 }, 4386 },
4387 [ALC269_FIXUP_LIFEBOOK_EXTMIC] = {
4388 .type = HDA_FIXUP_PINS,
4389 .v.pins = (const struct hda_pintbl[]) {
4390 { 0x19, 0x01a1903c }, /* headset mic, with jack detect */
4391 { }
4392 },
4393 },
4370 [ALC269_FIXUP_AMIC] = { 4394 [ALC269_FIXUP_AMIC] = {
4371 .type = HDA_FIXUP_PINS, 4395 .type = HDA_FIXUP_PINS,
4372 .v.pins = (const struct hda_pintbl[]) { 4396 .v.pins = (const struct hda_pintbl[]) {
@@ -4741,18 +4765,12 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4741 SND_PCI_QUIRK(0x1028, 0x0614, "Dell Inspiron 3135", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), 4765 SND_PCI_QUIRK(0x1028, 0x0614, "Dell Inspiron 3135", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
4742 SND_PCI_QUIRK(0x1028, 0x0615, "Dell Vostro 5470", ALC290_FIXUP_SUBWOOFER_HSJACK), 4766 SND_PCI_QUIRK(0x1028, 0x0615, "Dell Vostro 5470", ALC290_FIXUP_SUBWOOFER_HSJACK),
4743 SND_PCI_QUIRK(0x1028, 0x0616, "Dell Vostro 5470", ALC290_FIXUP_SUBWOOFER_HSJACK), 4767 SND_PCI_QUIRK(0x1028, 0x0616, "Dell Vostro 5470", ALC290_FIXUP_SUBWOOFER_HSJACK),
4744 SND_PCI_QUIRK(0x1028, 0x062c, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
4745 SND_PCI_QUIRK(0x1028, 0x0638, "Dell Inspiron 5439", ALC290_FIXUP_MONO_SPEAKERS_HSJACK), 4768 SND_PCI_QUIRK(0x1028, 0x0638, "Dell Inspiron 5439", ALC290_FIXUP_MONO_SPEAKERS_HSJACK),
4746 SND_PCI_QUIRK(0x1028, 0x063f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), 4769 SND_PCI_QUIRK(0x1028, 0x063f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
4747 SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 4770 SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
4748 SND_PCI_QUIRK(0x1028, 0x064b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 4771 SND_PCI_QUIRK(0x1028, 0x064b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
4749 SND_PCI_QUIRK(0x1028, 0x064d, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
4750 SND_PCI_QUIRK(0x1028, 0x0668, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE), 4772 SND_PCI_QUIRK(0x1028, 0x0668, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE),
4751 SND_PCI_QUIRK(0x1028, 0x0669, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE), 4773 SND_PCI_QUIRK(0x1028, 0x0669, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE),
4752 SND_PCI_QUIRK(0x1028, 0x0674, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
4753 SND_PCI_QUIRK(0x1028, 0x067e, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
4754 SND_PCI_QUIRK(0x1028, 0x067f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
4755 SND_PCI_QUIRK(0x1028, 0x0680, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
4756 SND_PCI_QUIRK(0x1028, 0x0684, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), 4774 SND_PCI_QUIRK(0x1028, 0x0684, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
4757 SND_PCI_QUIRK(0x1028, 0x15cc, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), 4775 SND_PCI_QUIRK(0x1028, 0x15cc, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
4758 SND_PCI_QUIRK(0x1028, 0x15cd, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), 4776 SND_PCI_QUIRK(0x1028, 0x15cd, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
@@ -4764,14 +4782,24 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4764 SND_PCI_QUIRK(0x103c, 0x1983, "HP Pavilion", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4782 SND_PCI_QUIRK(0x103c, 0x1983, "HP Pavilion", ALC269_FIXUP_HP_MUTE_LED_MIC1),
4765 SND_PCI_QUIRK(0x103c, 0x218b, "HP", ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED), 4783 SND_PCI_QUIRK(0x103c, 0x218b, "HP", ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED),
4766 /* ALC282 */ 4784 /* ALC282 */
4785 SND_PCI_QUIRK(0x103c, 0x220d, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
4786 SND_PCI_QUIRK(0x103c, 0x220e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
4767 SND_PCI_QUIRK(0x103c, 0x220f, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4787 SND_PCI_QUIRK(0x103c, 0x220f, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
4788 SND_PCI_QUIRK(0x103c, 0x2210, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
4789 SND_PCI_QUIRK(0x103c, 0x2211, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
4790 SND_PCI_QUIRK(0x103c, 0x2212, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
4768 SND_PCI_QUIRK(0x103c, 0x2213, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4791 SND_PCI_QUIRK(0x103c, 0x2213, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
4792 SND_PCI_QUIRK(0x103c, 0x2214, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
4769 SND_PCI_QUIRK(0x103c, 0x2266, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4793 SND_PCI_QUIRK(0x103c, 0x2266, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
4770 SND_PCI_QUIRK(0x103c, 0x2267, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4794 SND_PCI_QUIRK(0x103c, 0x2267, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
4771 SND_PCI_QUIRK(0x103c, 0x2268, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4795 SND_PCI_QUIRK(0x103c, 0x2268, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
4772 SND_PCI_QUIRK(0x103c, 0x2269, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4796 SND_PCI_QUIRK(0x103c, 0x2269, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
4773 SND_PCI_QUIRK(0x103c, 0x226a, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4797 SND_PCI_QUIRK(0x103c, 0x226a, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
4774 SND_PCI_QUIRK(0x103c, 0x226b, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4798 SND_PCI_QUIRK(0x103c, 0x226b, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
4799 SND_PCI_QUIRK(0x103c, 0x226c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
4800 SND_PCI_QUIRK(0x103c, 0x226d, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
4801 SND_PCI_QUIRK(0x103c, 0x226e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
4802 SND_PCI_QUIRK(0x103c, 0x226f, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
4775 SND_PCI_QUIRK(0x103c, 0x227a, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4803 SND_PCI_QUIRK(0x103c, 0x227a, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
4776 SND_PCI_QUIRK(0x103c, 0x227b, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4804 SND_PCI_QUIRK(0x103c, 0x227b, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
4777 SND_PCI_QUIRK(0x103c, 0x229e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4805 SND_PCI_QUIRK(0x103c, 0x229e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
@@ -4811,6 +4839,10 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4811 SND_PCI_QUIRK(0x103c, 0x22c8, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4839 SND_PCI_QUIRK(0x103c, 0x22c8, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
4812 SND_PCI_QUIRK(0x103c, 0x22c3, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4840 SND_PCI_QUIRK(0x103c, 0x22c3, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
4813 SND_PCI_QUIRK(0x103c, 0x22c4, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4841 SND_PCI_QUIRK(0x103c, 0x22c4, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
4842 SND_PCI_QUIRK(0x103c, 0x2334, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
4843 SND_PCI_QUIRK(0x103c, 0x2335, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
4844 SND_PCI_QUIRK(0x103c, 0x2336, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
4845 SND_PCI_QUIRK(0x103c, 0x2337, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
4814 SND_PCI_QUIRK_VENDOR(0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED), 4846 SND_PCI_QUIRK_VENDOR(0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED),
4815 SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300), 4847 SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
4816 SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 4848 SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
@@ -4834,6 +4866,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4834 SND_PCI_QUIRK(0x104d, 0x9099, "Sony VAIO S13", ALC275_FIXUP_SONY_DISABLE_AAMIX), 4866 SND_PCI_QUIRK(0x104d, 0x9099, "Sony VAIO S13", ALC275_FIXUP_SONY_DISABLE_AAMIX),
4835 SND_PCI_QUIRK_VENDOR(0x104d, "Sony VAIO", ALC269_FIXUP_SONY_VAIO), 4867 SND_PCI_QUIRK_VENDOR(0x104d, "Sony VAIO", ALC269_FIXUP_SONY_VAIO),
4836 SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook", ALC269_FIXUP_LIFEBOOK), 4868 SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook", ALC269_FIXUP_LIFEBOOK),
4869 SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
4837 SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE), 4870 SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
4838 SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE), 4871 SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE),
4839 SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE), 4872 SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE),
@@ -4977,6 +5010,26 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
4977#endif 5010#endif
4978 .pins = (const struct hda_pintbl[]) { 5011 .pins = (const struct hda_pintbl[]) {
4979 {0x12, 0x90a60160}, 5012 {0x12, 0x90a60160},
5013 {0x14, 0x90170120},
5014 {0x17, 0x90170140},
5015 {0x18, 0x40000000},
5016 {0x19, 0x411111f0},
5017 {0x1a, 0x411111f0},
5018 {0x1b, 0x411111f0},
5019 {0x1d, 0x41163b05},
5020 {0x1e, 0x411111f0},
5021 {0x21, 0x0321102f},
5022 },
5023 .value = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5024 },
5025 {
5026 .codec = 0x10ec0255,
5027 .subvendor = 0x1028,
5028#ifdef CONFIG_SND_DEBUG_VERBOSE
5029 .name = "Dell",
5030#endif
5031 .pins = (const struct hda_pintbl[]) {
5032 {0x12, 0x90a60160},
4980 {0x14, 0x90170130}, 5033 {0x14, 0x90170130},
4981 {0x17, 0x40000000}, 5034 {0x17, 0x40000000},
4982 {0x18, 0x411111f0}, 5035 {0x18, 0x411111f0},
@@ -5129,7 +5182,7 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
5129 {0x1d, 0x40700001}, 5182 {0x1d, 0x40700001},
5130 {0x1e, 0x411111f0}, 5183 {0x1e, 0x411111f0},
5131 }, 5184 },
5132 .value = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, 5185 .value = ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
5133 }, 5186 },
5134 {} 5187 {}
5135}; 5188};
@@ -6014,6 +6067,27 @@ static const struct snd_hda_pin_quirk alc662_pin_fixup_tbl[] = {
6014 .name = "Dell", 6067 .name = "Dell",
6015#endif 6068#endif
6016 .pins = (const struct hda_pintbl[]) { 6069 .pins = (const struct hda_pintbl[]) {
6070 {0x12, 0x99a30140},
6071 {0x14, 0x90170110},
6072 {0x15, 0x0321101f},
6073 {0x16, 0x03011020},
6074 {0x18, 0x40000008},
6075 {0x19, 0x411111f0},
6076 {0x1a, 0x411111f0},
6077 {0x1b, 0x411111f0},
6078 {0x1d, 0x41000001},
6079 {0x1e, 0x411111f0},
6080 {0x1f, 0x411111f0},
6081 },
6082 .value = ALC668_FIXUP_AUTO_MUTE,
6083 },
6084 {
6085 .codec = 0x10ec0668,
6086 .subvendor = 0x1028,
6087#ifdef CONFIG_SND_DEBUG_VERBOSE
6088 .name = "Dell",
6089#endif
6090 .pins = (const struct hda_pintbl[]) {
6017 {0x12, 0x99a30150}, 6091 {0x12, 0x99a30150},
6018 {0x14, 0x90170110}, 6092 {0x14, 0x90170110},
6019 {0x15, 0x0321101f}, 6093 {0x15, 0x0321101f},
@@ -6190,6 +6264,7 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = {
6190 { .id = 0x10ec0221, .name = "ALC221", .patch = patch_alc269 }, 6264 { .id = 0x10ec0221, .name = "ALC221", .patch = patch_alc269 },
6191 { .id = 0x10ec0231, .name = "ALC231", .patch = patch_alc269 }, 6265 { .id = 0x10ec0231, .name = "ALC231", .patch = patch_alc269 },
6192 { .id = 0x10ec0233, .name = "ALC233", .patch = patch_alc269 }, 6266 { .id = 0x10ec0233, .name = "ALC233", .patch = patch_alc269 },
6267 { .id = 0x10ec0235, .name = "ALC233", .patch = patch_alc269 },
6193 { .id = 0x10ec0255, .name = "ALC255", .patch = patch_alc269 }, 6268 { .id = 0x10ec0255, .name = "ALC255", .patch = patch_alc269 },
6194 { .id = 0x10ec0260, .name = "ALC260", .patch = patch_alc260 }, 6269 { .id = 0x10ec0260, .name = "ALC260", .patch = patch_alc260 },
6195 { .id = 0x10ec0262, .name = "ALC262", .patch = patch_alc262 }, 6270 { .id = 0x10ec0262, .name = "ALC262", .patch = patch_alc262 },
@@ -6223,10 +6298,12 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = {
6223 .patch = patch_alc662 }, 6298 .patch = patch_alc662 },
6224 { .id = 0x10ec0663, .name = "ALC663", .patch = patch_alc662 }, 6299 { .id = 0x10ec0663, .name = "ALC663", .patch = patch_alc662 },
6225 { .id = 0x10ec0665, .name = "ALC665", .patch = patch_alc662 }, 6300 { .id = 0x10ec0665, .name = "ALC665", .patch = patch_alc662 },
6301 { .id = 0x10ec0667, .name = "ALC667", .patch = patch_alc662 },
6226 { .id = 0x10ec0668, .name = "ALC668", .patch = patch_alc662 }, 6302 { .id = 0x10ec0668, .name = "ALC668", .patch = patch_alc662 },
6227 { .id = 0x10ec0670, .name = "ALC670", .patch = patch_alc662 }, 6303 { .id = 0x10ec0670, .name = "ALC670", .patch = patch_alc662 },
6228 { .id = 0x10ec0671, .name = "ALC671", .patch = patch_alc662 }, 6304 { .id = 0x10ec0671, .name = "ALC671", .patch = patch_alc662 },
6229 { .id = 0x10ec0680, .name = "ALC680", .patch = patch_alc680 }, 6305 { .id = 0x10ec0680, .name = "ALC680", .patch = patch_alc680 },
6306 { .id = 0x10ec0867, .name = "ALC891", .patch = patch_alc882 },
6230 { .id = 0x10ec0880, .name = "ALC880", .patch = patch_alc880 }, 6307 { .id = 0x10ec0880, .name = "ALC880", .patch = patch_alc880 },
6231 { .id = 0x10ec0882, .name = "ALC882", .patch = patch_alc882 }, 6308 { .id = 0x10ec0882, .name = "ALC882", .patch = patch_alc882 },
6232 { .id = 0x10ec0883, .name = "ALC883", .patch = patch_alc882 }, 6309 { .id = 0x10ec0883, .name = "ALC883", .patch = patch_alc882 },
diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c
index 68340d7df76d..c91860e0a28d 100644
--- a/sound/pci/intel8x0.c
+++ b/sound/pci/intel8x0.c
@@ -2779,7 +2779,7 @@ static void intel8x0_measure_ac97_clock(struct intel8x0 *chip)
2779 unsigned long port; 2779 unsigned long port;
2780 unsigned long pos, pos1, t; 2780 unsigned long pos, pos1, t;
2781 int civ, timeout = 1000, attempt = 1; 2781 int civ, timeout = 1000, attempt = 1;
2782 struct timespec start_time, stop_time; 2782 ktime_t start_time, stop_time;
2783 2783
2784 if (chip->ac97_bus->clock != 48000) 2784 if (chip->ac97_bus->clock != 48000)
2785 return; /* specified in module option */ 2785 return; /* specified in module option */
@@ -2813,7 +2813,7 @@ static void intel8x0_measure_ac97_clock(struct intel8x0 *chip)
2813 iputbyte(chip, port + ICH_REG_OFF_CR, ICH_IOCE); 2813 iputbyte(chip, port + ICH_REG_OFF_CR, ICH_IOCE);
2814 iputdword(chip, ICHREG(ALI_DMACR), 1 << ichdev->ali_slot); 2814 iputdword(chip, ICHREG(ALI_DMACR), 1 << ichdev->ali_slot);
2815 } 2815 }
2816 do_posix_clock_monotonic_gettime(&start_time); 2816 start_time = ktime_get();
2817 spin_unlock_irq(&chip->reg_lock); 2817 spin_unlock_irq(&chip->reg_lock);
2818 msleep(50); 2818 msleep(50);
2819 spin_lock_irq(&chip->reg_lock); 2819 spin_lock_irq(&chip->reg_lock);
@@ -2837,7 +2837,7 @@ static void intel8x0_measure_ac97_clock(struct intel8x0 *chip)
2837 pos += ichdev->position; 2837 pos += ichdev->position;
2838 } 2838 }
2839 chip->in_measurement = 0; 2839 chip->in_measurement = 0;
2840 do_posix_clock_monotonic_gettime(&stop_time); 2840 stop_time = ktime_get();
2841 /* stop */ 2841 /* stop */
2842 if (chip->device_type == DEVICE_ALI) { 2842 if (chip->device_type == DEVICE_ALI) {
2843 iputdword(chip, ICHREG(ALI_DMACR), 1 << (ichdev->ali_slot + 16)); 2843 iputdword(chip, ICHREG(ALI_DMACR), 1 << (ichdev->ali_slot + 16));
@@ -2865,9 +2865,7 @@ static void intel8x0_measure_ac97_clock(struct intel8x0 *chip)
2865 } 2865 }
2866 2866
2867 pos /= 4; 2867 pos /= 4;
2868 t = stop_time.tv_sec - start_time.tv_sec; 2868 t = ktime_us_delta(stop_time, start_time);
2869 t *= 1000000;
2870 t += (stop_time.tv_nsec - start_time.tv_nsec) / 1000;
2871 dev_info(chip->card->dev, 2869 dev_info(chip->card->dev,
2872 "%s: measured %lu usecs (%lu samples)\n", __func__, t, pos); 2870 "%s: measured %lu usecs (%lu samples)\n", __func__, t, pos);
2873 if (t == 0) { 2871 if (t == 0) {
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index cbfa1e18f651..0b9571c858f8 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -225,11 +225,11 @@ config SND_SOC_ADAU1373
225config SND_SOC_ADAU1701 225config SND_SOC_ADAU1701
226 tristate "Analog Devices ADAU1701 CODEC" 226 tristate "Analog Devices ADAU1701 CODEC"
227 depends on I2C 227 depends on I2C
228 select SND_SOC_SIGMADSP 228 select SND_SOC_SIGMADSP_I2C
229 229
230config SND_SOC_ADAU17X1 230config SND_SOC_ADAU17X1
231 tristate 231 tristate
232 select SND_SOC_SIGMADSP 232 select SND_SOC_SIGMADSP_REGMAP
233 233
234config SND_SOC_ADAU1761 234config SND_SOC_ADAU1761
235 tristate 235 tristate
@@ -476,6 +476,14 @@ config SND_SOC_SIGMADSP
476 tristate 476 tristate
477 select CRC32 477 select CRC32
478 478
479config SND_SOC_SIGMADSP_I2C
480 tristate
481 select SND_SOC_SIGMADSP
482
483config SND_SOC_SIGMADSP_REGMAP
484 tristate
485 select SND_SOC_SIGMADSP
486
479config SND_SOC_SIRF_AUDIO_CODEC 487config SND_SOC_SIRF_AUDIO_CODEC
480 tristate "SiRF SoC internal audio codec" 488 tristate "SiRF SoC internal audio codec"
481 select REGMAP_MMIO 489 select REGMAP_MMIO
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index be3377b8d73f..1bd6e1cf6f82 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -77,6 +77,8 @@ snd-soc-sgtl5000-objs := sgtl5000.o
77snd-soc-alc5623-objs := alc5623.o 77snd-soc-alc5623-objs := alc5623.o
78snd-soc-alc5632-objs := alc5632.o 78snd-soc-alc5632-objs := alc5632.o
79snd-soc-sigmadsp-objs := sigmadsp.o 79snd-soc-sigmadsp-objs := sigmadsp.o
80snd-soc-sigmadsp-i2c-objs := sigmadsp-i2c.o
81snd-soc-sigmadsp-regmap-objs := sigmadsp-regmap.o
80snd-soc-si476x-objs := si476x.o 82snd-soc-si476x-objs := si476x.o
81snd-soc-sirf-audio-codec-objs := sirf-audio-codec.o 83snd-soc-sirf-audio-codec-objs := sirf-audio-codec.o
82snd-soc-sn95031-objs := sn95031.o 84snd-soc-sn95031-objs := sn95031.o
@@ -240,6 +242,8 @@ obj-$(CONFIG_SND_SOC_RT5651) += snd-soc-rt5651.o
240obj-$(CONFIG_SND_SOC_RT5677) += snd-soc-rt5677.o 242obj-$(CONFIG_SND_SOC_RT5677) += snd-soc-rt5677.o
241obj-$(CONFIG_SND_SOC_SGTL5000) += snd-soc-sgtl5000.o 243obj-$(CONFIG_SND_SOC_SGTL5000) += snd-soc-sgtl5000.o
242obj-$(CONFIG_SND_SOC_SIGMADSP) += snd-soc-sigmadsp.o 244obj-$(CONFIG_SND_SOC_SIGMADSP) += snd-soc-sigmadsp.o
245obj-$(CONFIG_SND_SOC_SIGMADSP_I2C) += snd-soc-sigmadsp-i2c.o
246obj-$(CONFIG_SND_SOC_SIGMADSP_REGMAP) += snd-soc-sigmadsp-regmap.o
243obj-$(CONFIG_SND_SOC_SI476X) += snd-soc-si476x.o 247obj-$(CONFIG_SND_SOC_SI476X) += snd-soc-si476x.o
244obj-$(CONFIG_SND_SOC_SN95031) +=snd-soc-sn95031.o 248obj-$(CONFIG_SND_SOC_SN95031) +=snd-soc-sn95031.o
245obj-$(CONFIG_SND_SOC_SPDIF) += snd-soc-spdif-rx.o snd-soc-spdif-tx.o 249obj-$(CONFIG_SND_SOC_SPDIF) += snd-soc-spdif-rx.o snd-soc-spdif-tx.o
diff --git a/sound/soc/codecs/sigmadsp-i2c.c b/sound/soc/codecs/sigmadsp-i2c.c
new file mode 100644
index 000000000000..246081aae8ca
--- /dev/null
+++ b/sound/soc/codecs/sigmadsp-i2c.c
@@ -0,0 +1,35 @@
1/*
2 * Load Analog Devices SigmaStudio firmware files
3 *
4 * Copyright 2009-2011 Analog Devices Inc.
5 *
6 * Licensed under the GPL-2 or later.
7 */
8
9#include <linux/i2c.h>
10#include <linux/export.h>
11#include <linux/module.h>
12
13#include "sigmadsp.h"
14
15static int sigma_action_write_i2c(void *control_data,
16 const struct sigma_action *sa, size_t len)
17{
18 return i2c_master_send(control_data, (const unsigned char *)&sa->addr,
19 len);
20}
21
22int process_sigma_firmware(struct i2c_client *client, const char *name)
23{
24 struct sigma_firmware ssfw;
25
26 ssfw.control_data = client;
27 ssfw.write = sigma_action_write_i2c;
28
29 return _process_sigma_firmware(&client->dev, &ssfw, name);
30}
31EXPORT_SYMBOL(process_sigma_firmware);
32
33MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
34MODULE_DESCRIPTION("SigmaDSP I2C firmware loader");
35MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/sigmadsp-regmap.c b/sound/soc/codecs/sigmadsp-regmap.c
new file mode 100644
index 000000000000..f78ed8d2cfb2
--- /dev/null
+++ b/sound/soc/codecs/sigmadsp-regmap.c
@@ -0,0 +1,36 @@
1/*
2 * Load Analog Devices SigmaStudio firmware files
3 *
4 * Copyright 2009-2011 Analog Devices Inc.
5 *
6 * Licensed under the GPL-2 or later.
7 */
8
9#include <linux/regmap.h>
10#include <linux/export.h>
11#include <linux/module.h>
12
13#include "sigmadsp.h"
14
15static int sigma_action_write_regmap(void *control_data,
16 const struct sigma_action *sa, size_t len)
17{
18 return regmap_raw_write(control_data, be16_to_cpu(sa->addr),
19 sa->payload, len - 2);
20}
21
22int process_sigma_firmware_regmap(struct device *dev, struct regmap *regmap,
23 const char *name)
24{
25 struct sigma_firmware ssfw;
26
27 ssfw.control_data = regmap;
28 ssfw.write = sigma_action_write_regmap;
29
30 return _process_sigma_firmware(dev, &ssfw, name);
31}
32EXPORT_SYMBOL(process_sigma_firmware_regmap);
33
34MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
35MODULE_DESCRIPTION("SigmaDSP regmap firmware loader");
36MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/sigmadsp.c b/sound/soc/codecs/sigmadsp.c
index 4068f2491232..f2de7e049bc6 100644
--- a/sound/soc/codecs/sigmadsp.c
+++ b/sound/soc/codecs/sigmadsp.c
@@ -34,23 +34,6 @@ enum {
34 SIGMA_ACTION_END, 34 SIGMA_ACTION_END,
35}; 35};
36 36
37struct sigma_action {
38 u8 instr;
39 u8 len_hi;
40 __le16 len;
41 __be16 addr;
42 unsigned char payload[];
43} __packed;
44
45struct sigma_firmware {
46 const struct firmware *fw;
47 size_t pos;
48
49 void *control_data;
50 int (*write)(void *control_data, const struct sigma_action *sa,
51 size_t len);
52};
53
54static inline u32 sigma_action_len(struct sigma_action *sa) 37static inline u32 sigma_action_len(struct sigma_action *sa)
55{ 38{
56 return (sa->len_hi << 16) | le16_to_cpu(sa->len); 39 return (sa->len_hi << 16) | le16_to_cpu(sa->len);
@@ -138,7 +121,7 @@ process_sigma_actions(struct sigma_firmware *ssfw)
138 return 0; 121 return 0;
139} 122}
140 123
141static int _process_sigma_firmware(struct device *dev, 124int _process_sigma_firmware(struct device *dev,
142 struct sigma_firmware *ssfw, const char *name) 125 struct sigma_firmware *ssfw, const char *name)
143{ 126{
144 int ret; 127 int ret;
@@ -197,50 +180,6 @@ static int _process_sigma_firmware(struct device *dev,
197 180
198 return ret; 181 return ret;
199} 182}
200 183EXPORT_SYMBOL_GPL(_process_sigma_firmware);
201#if IS_ENABLED(CONFIG_I2C)
202
203static int sigma_action_write_i2c(void *control_data,
204 const struct sigma_action *sa, size_t len)
205{
206 return i2c_master_send(control_data, (const unsigned char *)&sa->addr,
207 len);
208}
209
210int process_sigma_firmware(struct i2c_client *client, const char *name)
211{
212 struct sigma_firmware ssfw;
213
214 ssfw.control_data = client;
215 ssfw.write = sigma_action_write_i2c;
216
217 return _process_sigma_firmware(&client->dev, &ssfw, name);
218}
219EXPORT_SYMBOL(process_sigma_firmware);
220
221#endif
222
223#if IS_ENABLED(CONFIG_REGMAP)
224
225static int sigma_action_write_regmap(void *control_data,
226 const struct sigma_action *sa, size_t len)
227{
228 return regmap_raw_write(control_data, be16_to_cpu(sa->addr),
229 sa->payload, len - 2);
230}
231
232int process_sigma_firmware_regmap(struct device *dev, struct regmap *regmap,
233 const char *name)
234{
235 struct sigma_firmware ssfw;
236
237 ssfw.control_data = regmap;
238 ssfw.write = sigma_action_write_regmap;
239
240 return _process_sigma_firmware(dev, &ssfw, name);
241}
242EXPORT_SYMBOL(process_sigma_firmware_regmap);
243
244#endif
245 184
246MODULE_LICENSE("GPL"); 185MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/sigmadsp.h b/sound/soc/codecs/sigmadsp.h
index e439cbd7af7d..c47cd23e9827 100644
--- a/sound/soc/codecs/sigmadsp.h
+++ b/sound/soc/codecs/sigmadsp.h
@@ -12,6 +12,26 @@
12#include <linux/device.h> 12#include <linux/device.h>
13#include <linux/regmap.h> 13#include <linux/regmap.h>
14 14
15struct sigma_action {
16 u8 instr;
17 u8 len_hi;
18 __le16 len;
19 __be16 addr;
20 unsigned char payload[];
21} __packed;
22
23struct sigma_firmware {
24 const struct firmware *fw;
25 size_t pos;
26
27 void *control_data;
28 int (*write)(void *control_data, const struct sigma_action *sa,
29 size_t len);
30};
31
32int _process_sigma_firmware(struct device *dev,
33 struct sigma_firmware *ssfw, const char *name);
34
15struct i2c_client; 35struct i2c_client;
16 36
17extern int process_sigma_firmware(struct i2c_client *client, const char *name); 37extern int process_sigma_firmware(struct i2c_client *client, const char *name);
diff --git a/sound/soc/fsl/fsl_dma.c b/sound/soc/fsl/fsl_dma.c
index 6bb0ea59284f..a609aafc994d 100644
--- a/sound/soc/fsl/fsl_dma.c
+++ b/sound/soc/fsl/fsl_dma.c
@@ -923,8 +923,8 @@ static int fsl_soc_dma_probe(struct platform_device *pdev)
923 dma->dai.pcm_free = fsl_dma_free_dma_buffers; 923 dma->dai.pcm_free = fsl_dma_free_dma_buffers;
924 924
925 /* Store the SSI-specific information that we need */ 925 /* Store the SSI-specific information that we need */
926 dma->ssi_stx_phys = res.start + offsetof(struct ccsr_ssi, stx0); 926 dma->ssi_stx_phys = res.start + CCSR_SSI_STX0;
927 dma->ssi_srx_phys = res.start + offsetof(struct ccsr_ssi, srx0); 927 dma->ssi_srx_phys = res.start + CCSR_SSI_SRX0;
928 928
929 iprop = of_get_property(ssi_np, "fsl,fifo-depth", NULL); 929 iprop = of_get_property(ssi_np, "fsl,fifo-depth", NULL);
930 if (iprop) 930 if (iprop)
diff --git a/sound/soc/fsl/fsl_spdif.c b/sound/soc/fsl/fsl_spdif.c
index b912d45a2a4c..d7a60614dd21 100644
--- a/sound/soc/fsl/fsl_spdif.c
+++ b/sound/soc/fsl/fsl_spdif.c
@@ -762,7 +762,7 @@ static int fsl_spdif_vbit_get(struct snd_kcontrol *kcontrol,
762 struct regmap *regmap = spdif_priv->regmap; 762 struct regmap *regmap = spdif_priv->regmap;
763 u32 val; 763 u32 val;
764 764
765 val = regmap_read(regmap, REG_SPDIF_SIS, &val); 765 regmap_read(regmap, REG_SPDIF_SIS, &val);
766 ucontrol->value.integer.value[0] = (val & INT_VAL_NOGOOD) != 0; 766 ucontrol->value.integer.value[0] = (val & INT_VAL_NOGOOD) != 0;
767 regmap_write(regmap, REG_SPDIF_SIC, INT_VAL_NOGOOD); 767 regmap_write(regmap, REG_SPDIF_SIC, INT_VAL_NOGOOD);
768 768
@@ -1076,7 +1076,7 @@ static u32 fsl_spdif_txclk_caldiv(struct fsl_spdif_priv *spdif_priv,
1076 goto out; 1076 goto out;
1077 } else if (arate / rate[index] == 1) { 1077 } else if (arate / rate[index] == 1) {
1078 /* A little bigger than expect */ 1078 /* A little bigger than expect */
1079 sub = (arate - rate[index]) * 100000; 1079 sub = (u64)(arate - rate[index]) * 100000;
1080 do_div(sub, rate[index]); 1080 do_div(sub, rate[index]);
1081 if (sub >= savesub) 1081 if (sub >= savesub)
1082 continue; 1082 continue;
@@ -1086,7 +1086,7 @@ static u32 fsl_spdif_txclk_caldiv(struct fsl_spdif_priv *spdif_priv,
1086 spdif_priv->txrate[index] = arate; 1086 spdif_priv->txrate[index] = arate;
1087 } else if (rate[index] / arate == 1) { 1087 } else if (rate[index] / arate == 1) {
1088 /* A little smaller than expect */ 1088 /* A little smaller than expect */
1089 sub = (rate[index] - arate) * 100000; 1089 sub = (u64)(rate[index] - arate) * 100000;
1090 do_div(sub, rate[index]); 1090 do_div(sub, rate[index]);
1091 if (sub >= savesub) 1091 if (sub >= savesub)
1092 continue; 1092 continue;
diff --git a/sound/soc/pxa/Kconfig b/sound/soc/pxa/Kconfig
index 6acb225ec6fd..2434b6d61675 100644
--- a/sound/soc/pxa/Kconfig
+++ b/sound/soc/pxa/Kconfig
@@ -11,6 +11,7 @@ config SND_PXA2XX_SOC
11config SND_MMP_SOC 11config SND_MMP_SOC
12 bool "Soc Audio for Marvell MMP chips" 12 bool "Soc Audio for Marvell MMP chips"
13 depends on ARCH_MMP 13 depends on ARCH_MMP
14 select MMP_SRAM
14 select SND_SOC_GENERIC_DMAENGINE_PCM 15 select SND_SOC_GENERIC_DMAENGINE_PCM
15 select SND_ARM 16 select SND_ARM
16 help 17 help
@@ -40,7 +41,7 @@ config SND_MMP_SOC_SSPA
40 41
41config SND_PXA2XX_SOC_CORGI 42config SND_PXA2XX_SOC_CORGI
42 tristate "SoC Audio support for Sharp Zaurus SL-C7x0" 43 tristate "SoC Audio support for Sharp Zaurus SL-C7x0"
43 depends on SND_PXA2XX_SOC && PXA_SHARP_C7xx 44 depends on SND_PXA2XX_SOC && PXA_SHARP_C7xx && I2C
44 select SND_PXA2XX_SOC_I2S 45 select SND_PXA2XX_SOC_I2S
45 select SND_SOC_WM8731 46 select SND_SOC_WM8731
46 help 47 help
@@ -49,7 +50,7 @@ config SND_PXA2XX_SOC_CORGI
49 50
50config SND_PXA2XX_SOC_SPITZ 51config SND_PXA2XX_SOC_SPITZ
51 tristate "SoC Audio support for Sharp Zaurus SL-Cxx00" 52 tristate "SoC Audio support for Sharp Zaurus SL-Cxx00"
52 depends on SND_PXA2XX_SOC && PXA_SHARP_Cxx00 53 depends on SND_PXA2XX_SOC && PXA_SHARP_Cxx00 && I2C
53 select SND_PXA2XX_SOC_I2S 54 select SND_PXA2XX_SOC_I2S
54 select SND_SOC_WM8750 55 select SND_SOC_WM8750
55 help 56 help
@@ -58,7 +59,7 @@ config SND_PXA2XX_SOC_SPITZ
58 59
59config SND_PXA2XX_SOC_Z2 60config SND_PXA2XX_SOC_Z2
60 tristate "SoC Audio support for Zipit Z2" 61 tristate "SoC Audio support for Zipit Z2"
61 depends on SND_PXA2XX_SOC && MACH_ZIPIT2 62 depends on SND_PXA2XX_SOC && MACH_ZIPIT2 && I2C
62 select SND_PXA2XX_SOC_I2S 63 select SND_PXA2XX_SOC_I2S
63 select SND_SOC_WM8750 64 select SND_SOC_WM8750
64 help 65 help
@@ -66,7 +67,7 @@ config SND_PXA2XX_SOC_Z2
66 67
67config SND_PXA2XX_SOC_POODLE 68config SND_PXA2XX_SOC_POODLE
68 tristate "SoC Audio support for Poodle" 69 tristate "SoC Audio support for Poodle"
69 depends on SND_PXA2XX_SOC && MACH_POODLE 70 depends on SND_PXA2XX_SOC && MACH_POODLE && I2C
70 select SND_PXA2XX_SOC_I2S 71 select SND_PXA2XX_SOC_I2S
71 select SND_SOC_WM8731 72 select SND_SOC_WM8731
72 help 73 help
@@ -181,7 +182,7 @@ config SND_PXA2XX_SOC_HX4700
181 182
182config SND_PXA2XX_SOC_MAGICIAN 183config SND_PXA2XX_SOC_MAGICIAN
183 tristate "SoC Audio support for HTC Magician" 184 tristate "SoC Audio support for HTC Magician"
184 depends on SND_PXA2XX_SOC && MACH_MAGICIAN 185 depends on SND_PXA2XX_SOC && MACH_MAGICIAN && I2C
185 select SND_PXA2XX_SOC_I2S 186 select SND_PXA2XX_SOC_I2S
186 select SND_PXA_SOC_SSP 187 select SND_PXA_SOC_SSP
187 select SND_SOC_UDA1380 188 select SND_SOC_UDA1380
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
index 91880156e1ae..4e86265f625c 100644
--- a/sound/soc/sh/rcar/core.c
+++ b/sound/soc/sh/rcar/core.c
@@ -315,7 +315,7 @@ static void rsnd_dma_of_name(struct rsnd_dma *dma,
315 dst_mod = mod[index]; 315 dst_mod = mod[index];
316 } else { 316 } else {
317 src_mod = mod[index]; 317 src_mod = mod[index];
318 dst_mod = mod[index + 1]; 318 dst_mod = mod[index - 1];
319 } 319 }
320 320
321 index = 0; 321 index = 0;
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index a74b9bf23d9f..cdc837ed144d 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -2755,7 +2755,7 @@ int snd_soc_dapm_put_volsw(struct snd_kcontrol *kcontrol,
2755 unsigned int mask = (1 << fls(max)) - 1; 2755 unsigned int mask = (1 << fls(max)) - 1;
2756 unsigned int invert = mc->invert; 2756 unsigned int invert = mc->invert;
2757 unsigned int val; 2757 unsigned int val;
2758 int connect, change; 2758 int connect, change, reg_change = 0;
2759 struct snd_soc_dapm_update update; 2759 struct snd_soc_dapm_update update;
2760 int ret = 0; 2760 int ret = 0;
2761 2761
@@ -2773,20 +2773,23 @@ int snd_soc_dapm_put_volsw(struct snd_kcontrol *kcontrol,
2773 mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME); 2773 mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
2774 2774
2775 change = dapm_kcontrol_set_value(kcontrol, val); 2775 change = dapm_kcontrol_set_value(kcontrol, val);
2776 if (change) {
2777 if (reg != SND_SOC_NOPM) {
2778 mask = mask << shift;
2779 val = val << shift;
2780
2781 if (snd_soc_test_bits(codec, reg, mask, val)) {
2782 update.kcontrol = kcontrol;
2783 update.reg = reg;
2784 update.mask = mask;
2785 update.val = val;
2786 card->update = &update;
2787 }
2788 2776
2777 if (reg != SND_SOC_NOPM) {
2778 mask = mask << shift;
2779 val = val << shift;
2780
2781 reg_change = snd_soc_test_bits(codec, reg, mask, val);
2782 }
2783
2784 if (change || reg_change) {
2785 if (reg_change) {
2786 update.kcontrol = kcontrol;
2787 update.reg = reg;
2788 update.mask = mask;
2789 update.val = val;
2790 card->update = &update;
2789 } 2791 }
2792 change |= reg_change;
2790 2793
2791 ret = soc_dapm_mixer_update_power(card, kcontrol, connect); 2794 ret = soc_dapm_mixer_update_power(card, kcontrol, connect);
2792 2795
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index 1bd35e8ed9f1..2185091c5227 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -17,6 +17,7 @@
17#include "../util.h" 17#include "../util.h"
18#include "../ui.h" 18#include "../ui.h"
19#include "map.h" 19#include "map.h"
20#include "annotate.h"
20 21
21struct hist_browser { 22struct hist_browser {
22 struct ui_browser b; 23 struct ui_browser b;
@@ -1592,13 +1593,18 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
1592 bi->to.sym->name) > 0) 1593 bi->to.sym->name) > 0)
1593 annotate_t = nr_options++; 1594 annotate_t = nr_options++;
1594 } else { 1595 } else {
1595
1596 if (browser->selection != NULL && 1596 if (browser->selection != NULL &&
1597 browser->selection->sym != NULL && 1597 browser->selection->sym != NULL &&
1598 !browser->selection->map->dso->annotate_warned && 1598 !browser->selection->map->dso->annotate_warned) {
1599 asprintf(&options[nr_options], "Annotate %s", 1599 struct annotation *notes;
1600 browser->selection->sym->name) > 0) 1600
1601 annotate = nr_options++; 1601 notes = symbol__annotation(browser->selection->sym);
1602
1603 if (notes->src &&
1604 asprintf(&options[nr_options], "Annotate %s",
1605 browser->selection->sym->name) > 0)
1606 annotate = nr_options++;
1607 }
1602 } 1608 }
1603 1609
1604 if (thread != NULL && 1610 if (thread != NULL &&
@@ -1655,6 +1661,7 @@ retry_popup_menu:
1655 1661
1656 if (choice == annotate || choice == annotate_t || choice == annotate_f) { 1662 if (choice == annotate || choice == annotate_t || choice == annotate_f) {
1657 struct hist_entry *he; 1663 struct hist_entry *he;
1664 struct annotation *notes;
1658 int err; 1665 int err;
1659do_annotate: 1666do_annotate:
1660 if (!objdump_path && perf_session_env__lookup_objdump(env)) 1667 if (!objdump_path && perf_session_env__lookup_objdump(env))
@@ -1678,6 +1685,10 @@ do_annotate:
1678 he->ms.map = he->branch_info->to.map; 1685 he->ms.map = he->branch_info->to.map;
1679 } 1686 }
1680 1687
1688 notes = symbol__annotation(he->ms.sym);
1689 if (!notes->src)
1690 continue;
1691
1681 /* 1692 /*
1682 * Don't let this be freed, say, by hists__decay_entry. 1693 * Don't let this be freed, say, by hists__decay_entry.
1683 */ 1694 */
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 0e5fea95d596..c73e1fc12e53 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -496,18 +496,6 @@ struct process_args {
496 u64 start; 496 u64 start;
497}; 497};
498 498
499static int symbol__in_kernel(void *arg, const char *name,
500 char type __maybe_unused, u64 start)
501{
502 struct process_args *args = arg;
503
504 if (strchr(name, '['))
505 return 0;
506
507 args->start = start;
508 return 1;
509}
510
511static void machine__get_kallsyms_filename(struct machine *machine, char *buf, 499static void machine__get_kallsyms_filename(struct machine *machine, char *buf,
512 size_t bufsz) 500 size_t bufsz)
513{ 501{
@@ -517,27 +505,41 @@ static void machine__get_kallsyms_filename(struct machine *machine, char *buf,
517 scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir); 505 scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir);
518} 506}
519 507
520/* Figure out the start address of kernel map from /proc/kallsyms */ 508const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
521static u64 machine__get_kernel_start_addr(struct machine *machine) 509
510/* Figure out the start address of kernel map from /proc/kallsyms.
511 * Returns the name of the start symbol in *symbol_name. Pass in NULL as
512 * symbol_name if it's not that important.
513 */
514static u64 machine__get_kernel_start_addr(struct machine *machine,
515 const char **symbol_name)
522{ 516{
523 char filename[PATH_MAX]; 517 char filename[PATH_MAX];
524 struct process_args args; 518 int i;
519 const char *name;
520 u64 addr = 0;
525 521
526 machine__get_kallsyms_filename(machine, filename, PATH_MAX); 522 machine__get_kallsyms_filename(machine, filename, PATH_MAX);
527 523
528 if (symbol__restricted_filename(filename, "/proc/kallsyms")) 524 if (symbol__restricted_filename(filename, "/proc/kallsyms"))
529 return 0; 525 return 0;
530 526
531 if (kallsyms__parse(filename, &args, symbol__in_kernel) <= 0) 527 for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
532 return 0; 528 addr = kallsyms__get_function_start(filename, name);
529 if (addr)
530 break;
531 }
532
533 if (symbol_name)
534 *symbol_name = name;
533 535
534 return args.start; 536 return addr;
535} 537}
536 538
537int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel) 539int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
538{ 540{
539 enum map_type type; 541 enum map_type type;
540 u64 start = machine__get_kernel_start_addr(machine); 542 u64 start = machine__get_kernel_start_addr(machine, NULL);
541 543
542 for (type = 0; type < MAP__NR_TYPES; ++type) { 544 for (type = 0; type < MAP__NR_TYPES; ++type) {
543 struct kmap *kmap; 545 struct kmap *kmap;
@@ -852,23 +854,11 @@ static int machine__create_modules(struct machine *machine)
852 return 0; 854 return 0;
853} 855}
854 856
855const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
856
857int machine__create_kernel_maps(struct machine *machine) 857int machine__create_kernel_maps(struct machine *machine)
858{ 858{
859 struct dso *kernel = machine__get_kernel(machine); 859 struct dso *kernel = machine__get_kernel(machine);
860 char filename[PATH_MAX];
861 const char *name; 860 const char *name;
862 u64 addr = 0; 861 u64 addr = machine__get_kernel_start_addr(machine, &name);
863 int i;
864
865 machine__get_kallsyms_filename(machine, filename, PATH_MAX);
866
867 for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
868 addr = kallsyms__get_function_start(filename, name);
869 if (addr)
870 break;
871 }
872 if (!addr) 862 if (!addr)
873 return -1; 863 return -1;
874 864