aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/00-INDEX2
-rw-r--r--Documentation/ABI/testing/sysfs-class-scsi_host13
-rw-r--r--Documentation/DocBook/media/v4l/controls.xml38
-rw-r--r--Documentation/PCI/MSI-HOWTO.txt89
-rw-r--r--Documentation/SubmittingDrivers2
-rw-r--r--Documentation/SubmittingPatches2
-rw-r--r--Documentation/block/cfq-iosched.txt71
-rw-r--r--Documentation/cgroups/memory.txt85
-rw-r--r--Documentation/email-clients.txt12
-rw-r--r--Documentation/feature-removal-schedule.txt8
-rw-r--r--Documentation/filesystems/befs.txt2
-rw-r--r--Documentation/hwmon/max160657
-rw-r--r--Documentation/ioctl/ioctl-number.txt2
-rw-r--r--Documentation/kernel-docs.txt11
-rw-r--r--Documentation/kernel-parameters.txt70
-rw-r--r--Documentation/networking/dmfe.txt3
-rw-r--r--Documentation/networking/ip-sysctl.txt2
-rw-r--r--Documentation/power/runtime_pm.txt3
-rw-r--r--Documentation/ramoops.txt76
-rw-r--r--Documentation/virtual/00-INDEX3
-rw-r--r--Documentation/virtual/lguest/lguest.c3
-rw-r--r--Documentation/virtual/virtio-spec.txt2200
-rw-r--r--MAINTAINERS42
-rw-r--r--Makefile4
-rw-r--r--arch/alpha/Kconfig2
-rw-r--r--arch/alpha/include/asm/sysinfo.h9
-rw-r--r--arch/alpha/include/asm/thread_info.h8
-rw-r--r--arch/alpha/kernel/osf_sys.c12
-rw-r--r--arch/alpha/kernel/systbls.S2
-rw-r--r--arch/arm/Kconfig12
-rw-r--r--arch/arm/boot/compressed/mmcif-sh7372.c2
-rw-r--r--arch/arm/boot/compressed/sdhi-sh7372.c2
-rw-r--r--arch/arm/boot/dts/tegra-harmony.dts12
-rw-r--r--arch/arm/boot/dts/tegra-seaboard.dts6
-rw-r--r--arch/arm/include/asm/hardware/cache-l2x0.h11
-rw-r--r--arch/arm/include/asm/pmu.h10
-rw-r--r--arch/arm/kernel/calls.S2
-rw-r--r--arch/arm/kernel/pmu.c26
-rw-r--r--arch/arm/kernel/relocate_kernel.S3
-rw-r--r--arch/arm/kernel/setup.c15
-rw-r--r--arch/arm/kernel/smp_twd.c4
-rw-r--r--arch/arm/mach-at91/at91sam9261.c2
-rw-r--r--arch/arm/mach-cns3xxx/include/mach/entry-macro.S1
-rw-r--r--arch/arm/mach-cns3xxx/include/mach/system.h1
-rw-r--r--arch/arm/mach-cns3xxx/include/mach/uncompress.h1
-rw-r--r--arch/arm/mach-cns3xxx/pcie.c2
-rw-r--r--arch/arm/mach-davinci/board-da850-evm.c28
-rw-r--r--arch/arm/mach-davinci/include/mach/psc.h2
-rw-r--r--arch/arm/mach-davinci/sleep.S6
-rw-r--r--arch/arm/mach-dove/common.c2
-rw-r--r--arch/arm/mach-ep93xx/include/mach/ts72xx.h26
-rw-r--r--arch/arm/mach-exynos4/clock.c4
-rw-r--r--arch/arm/mach-exynos4/cpu.c11
-rw-r--r--arch/arm/mach-exynos4/include/mach/irqs.h5
-rw-r--r--arch/arm/mach-exynos4/include/mach/regs-pmu.h2
-rw-r--r--arch/arm/mach-exynos4/irq-eint.c7
-rw-r--r--arch/arm/mach-exynos4/mach-universal_c210.c4
-rw-r--r--arch/arm/mach-exynos4/mct.c10
-rw-r--r--arch/arm/mach-exynos4/platsmp.c2
-rw-r--r--arch/arm/mach-exynos4/setup-keypad.c11
-rw-r--r--arch/arm/mach-exynos4/setup-usb-phy.c2
-rw-r--r--arch/arm/mach-footbridge/Kconfig1
-rw-r--r--arch/arm/mach-footbridge/dc21285.c1
-rw-r--r--arch/arm/mach-imx/mach-cpuimx27.c2
-rw-r--r--arch/arm/mach-imx/mach-cpuimx35.c2
-rw-r--r--arch/arm/mach-imx/mach-eukrea_cpuimx25.c2
-rw-r--r--arch/arm/mach-integrator/integrator_ap.c8
-rw-r--r--arch/arm/mach-integrator/pci_v3.c2
-rw-r--r--arch/arm/mach-omap2/clock3xxx_data.c2
-rw-r--r--arch/arm/mach-omap2/clock44xx_data.c10
-rw-r--r--arch/arm/mach-omap2/clockdomain.c2
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2430_data.c1
-rw-r--r--arch/arm/mach-omap2/pm.c2
-rw-r--r--arch/arm/mach-omap2/powerdomain.c25
-rw-r--r--arch/arm/mach-orion5x/dns323-setup.c2
-rw-r--r--arch/arm/mach-orion5x/pci.c1
-rw-r--r--arch/arm/mach-prima2/clock.c1
-rw-r--r--arch/arm/mach-prima2/irq.c1
-rw-r--r--arch/arm/mach-prima2/rstc.c1
-rw-r--r--arch/arm/mach-prima2/timer.c1
-rw-r--r--arch/arm/mach-realview/include/mach/system.h1
-rw-r--r--arch/arm/mach-s3c64xx/mach-smdk6410.c39
-rw-r--r--arch/arm/mach-s3c64xx/pm.c1
-rw-r--r--arch/arm/mach-s5p64x0/irq-eint.c2
-rw-r--r--arch/arm/mach-s5pv210/pm.c2
-rw-r--r--arch/arm/mach-shmobile/board-ag5evm.c3
-rw-r--r--arch/arm/mach-shmobile/board-ap4evb.c1
-rw-r--r--arch/arm/mach-shmobile/board-mackerel.c5
-rw-r--r--arch/arm/mach-shmobile/clock-sh7372.c31
-rw-r--r--arch/arm/mach-shmobile/clock-sh73a0.c2
-rw-r--r--arch/arm/mach-shmobile/include/mach/sh7372.h4
-rw-r--r--arch/arm/mach-shmobile/intc-sh7372.c7
-rw-r--r--arch/arm/mach-shmobile/setup-sh7372.c176
-rw-r--r--arch/arm/mach-vexpress/v2m.c7
-rw-r--r--arch/arm/mm/abort-macro.S2
-rw-r--r--arch/arm/mm/cache-l2x0.c21
-rw-r--r--arch/arm/mm/init.c2
-rw-r--r--arch/arm/mm/proc-arm920.S2
-rw-r--r--arch/arm/mm/proc-arm926.S2
-rw-r--r--arch/arm/mm/proc-sa1100.S10
-rw-r--r--arch/arm/mm/proc-v6.S16
-rw-r--r--arch/arm/mm/proc-v7.S6
-rw-r--r--arch/arm/mm/proc-xsc3.S6
-rw-r--r--arch/arm/plat-omap/omap_device.c6
-rw-r--r--arch/arm/plat-s5p/clock.c2
-rw-r--r--arch/arm/plat-s5p/irq-gpioint.c6
-rw-r--r--arch/arm/plat-samsung/clock.c11
-rw-r--r--arch/arm/plat-samsung/include/plat/backlight.h2
-rw-r--r--arch/arm/plat-samsung/include/plat/clock.h8
-rw-r--r--arch/arm/plat-samsung/include/plat/watchdog-reset.h10
-rw-r--r--arch/arm/plat-samsung/irq-vic-timer.c5
-rw-r--r--arch/arm/tools/mach-types6
-rw-r--r--arch/avr32/kernel/syscall_table.S2
-rw-r--r--arch/blackfin/mach-common/entry.S2
-rw-r--r--arch/cris/arch-v10/kernel/entry.S2
-rw-r--r--arch/cris/arch-v32/kernel/entry.S2
-rw-r--r--arch/cris/include/asm/serial.h9
-rw-r--r--arch/frv/kernel/entry.S2
-rw-r--r--arch/h8300/kernel/syscalls.S2
-rw-r--r--arch/ia64/Kconfig1
-rw-r--r--arch/ia64/configs/generic_defconfig1
-rw-r--r--arch/ia64/kernel/entry.S2
-rw-r--r--arch/m32r/kernel/syscall_table.S2
-rw-r--r--arch/m68k/include/asm/page_mm.h2
-rw-r--r--arch/m68k/kernel/syscalltable.S2
-rw-r--r--arch/microblaze/kernel/syscall_table.S2
-rw-r--r--arch/mips/kernel/scall32-o32.S2
-rw-r--r--arch/mips/kernel/scall64-64.S2
-rw-r--r--arch/mips/kernel/scall64-n32.S2
-rw-r--r--arch/mips/kernel/scall64-o32.S2
-rw-r--r--arch/mn10300/kernel/entry.S2
-rw-r--r--arch/openrisc/include/asm/dma-mapping.h59
-rw-r--r--arch/openrisc/include/asm/sigcontext.h7
-rw-r--r--arch/openrisc/kernel/dma.c28
-rw-r--r--arch/openrisc/kernel/signal.c29
-rw-r--r--arch/parisc/kernel/syscall_table.S2
-rw-r--r--arch/powerpc/boot/dts/p1023rds.dts2
-rw-r--r--arch/powerpc/configs/85xx/p1023rds_defconfig1
-rw-r--r--arch/powerpc/configs/corenet32_smp_defconfig1
-rw-r--r--arch/powerpc/configs/corenet64_smp_defconfig5
-rw-r--r--arch/powerpc/configs/mpc85xx_defconfig1
-rw-r--r--arch/powerpc/configs/mpc85xx_smp_defconfig1
-rw-r--r--arch/powerpc/include/asm/systbl.h2
-rw-r--r--arch/powerpc/sysdev/fsl_rio.c5
-rw-r--r--arch/s390/kernel/compat_wrapper.S6
-rw-r--r--arch/s390/kernel/early.c14
-rw-r--r--arch/s390/kernel/ipl.c7
-rw-r--r--arch/s390/kernel/syscalls.S2
-rw-r--r--arch/sh/include/asm/ptrace.h2
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7757.c1
-rw-r--r--arch/sh/kernel/idle.c2
-rw-r--r--arch/sh/kernel/syscalls_32.S2
-rw-r--r--arch/sh/kernel/syscalls_64.S2
-rw-r--r--arch/sh/kernel/traps_32.c37
-rw-r--r--arch/sparc/Kconfig1
-rw-r--r--arch/sparc/include/asm/sigcontext.h14
-rw-r--r--arch/sparc/include/asm/spinlock_32.h11
-rw-r--r--arch/sparc/include/asm/spinlock_64.h6
-rw-r--r--arch/sparc/kernel/Makefile1
-rw-r--r--arch/sparc/kernel/irq.h2
-rw-r--r--arch/sparc/kernel/pcic.c4
-rw-r--r--arch/sparc/kernel/setup_64.c10
-rw-r--r--arch/sparc/kernel/signal32.c184
-rw-r--r--arch/sparc/kernel/signal_32.c172
-rw-r--r--arch/sparc/kernel/signal_64.c108
-rw-r--r--arch/sparc/kernel/sigutil.h9
-rw-r--r--arch/sparc/kernel/sigutil_32.c120
-rw-r--r--arch/sparc/kernel/sigutil_64.c93
-rw-r--r--arch/sparc/kernel/sys32.S1
-rw-r--r--arch/sparc/kernel/systbls_32.S2
-rw-r--r--arch/sparc/kernel/systbls_64.S4
-rw-r--r--arch/um/Kconfig.x864
-rw-r--r--arch/um/Makefile2
-rw-r--r--arch/um/drivers/line.c61
-rw-r--r--arch/um/drivers/xterm.c1
-rw-r--r--arch/um/include/asm/ptrace-generic.h4
-rw-r--r--arch/um/include/shared/line.h1
-rw-r--r--arch/um/include/shared/registers.h2
-rw-r--r--arch/um/kernel/process.c2
-rw-r--r--arch/um/kernel/ptrace.c28
-rw-r--r--arch/um/os-Linux/registers.c9
-rw-r--r--arch/um/os-Linux/skas/mem.c2
-rw-r--r--arch/um/os-Linux/skas/process.c19
-rw-r--r--arch/um/sys-i386/asm/ptrace.h5
-rw-r--r--arch/um/sys-i386/ptrace.c28
-rw-r--r--arch/um/sys-i386/shared/sysdep/ptrace.h1
-rw-r--r--arch/um/sys-x86_64/ptrace.c12
-rw-r--r--arch/um/sys-x86_64/shared/sysdep/ptrace.h1
-rw-r--r--arch/x86/ia32/ia32entry.S2
-rw-r--r--arch/x86/include/asm/alternative-asm.h1
-rw-r--r--arch/x86/include/asm/alternative.h4
-rw-r--r--arch/x86/include/asm/cpufeature.h2
-rw-r--r--arch/x86/include/asm/pvclock.h2
-rw-r--r--arch/x86/include/asm/unistd_64.h2
-rw-r--r--arch/x86/include/asm/xen/page.h4
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c1
-rw-r--r--arch/x86/kernel/cpu/mtrr/main.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event.c3
-rw-r--r--arch/x86/kernel/entry_32.S8
-rw-r--r--arch/x86/kernel/syscall_table_32.S2
-rw-r--r--arch/x86/kvm/Kconfig3
-rw-r--r--arch/x86/mm/fault.c1
-rw-r--r--arch/x86/pci/acpi.c14
-rw-r--r--arch/x86/platform/mrst/mrst.c4
-rw-r--r--arch/x86/platform/olpc/olpc.c4
-rw-r--r--arch/x86/vdso/vdso32/sysenter.S2
-rw-r--r--arch/x86/xen/Makefile2
-rw-r--r--arch/x86/xen/enlighten.c4
-rw-r--r--arch/x86/xen/mmu.c10
-rw-r--r--arch/x86/xen/setup.c21
-rw-r--r--arch/x86/xen/smp.c15
-rw-r--r--arch/x86/xen/time.c5
-rw-r--r--arch/x86/xen/xen-asm_32.S8
-rw-r--r--arch/xtensa/include/asm/unistd.h2
-rw-r--r--block/Kconfig10
-rw-r--r--block/Makefile1
-rw-r--r--block/blk-cgroup.c37
-rw-r--r--block/blk-core.c23
-rw-r--r--block/blk-flush.c25
-rw-r--r--block/blk-softirq.c10
-rw-r--r--block/blk-sysfs.c10
-rw-r--r--block/blk-throttle.c4
-rw-r--r--block/blk.h2
-rw-r--r--block/bsg-lib.c298
-rw-r--r--block/cfq-iosched.c21
-rw-r--r--block/genhd.c8
-rw-r--r--drivers/acpi/acpica/acconfig.h2
-rw-r--r--drivers/acpi/apei/Kconfig1
-rw-r--r--drivers/acpi/apei/apei-base.c2
-rw-r--r--drivers/ata/Kconfig9
-rw-r--r--drivers/ata/Makefile1
-rw-r--r--drivers/ata/pata_imx.c253
-rw-r--r--drivers/ata/pata_via.c18
-rw-r--r--drivers/ata/sata_dwc_460ex.c14
-rw-r--r--drivers/ata/sata_sil.c2
-rw-r--r--drivers/base/devres.c1
-rw-r--r--drivers/base/devtmpfs.c2
-rw-r--r--drivers/base/firmware_class.c11
-rw-r--r--drivers/base/platform.c2
-rw-r--r--drivers/base/power/clock_ops.c40
-rw-r--r--drivers/base/power/domain.c30
-rw-r--r--drivers/base/regmap/regmap-i2c.c1
-rw-r--r--drivers/base/regmap/regmap-spi.c3
-rw-r--r--drivers/base/regmap/regmap.c7
-rw-r--r--drivers/bcma/main.c12
-rw-r--r--drivers/block/Kconfig17
-rw-r--r--drivers/block/drbd/drbd_nl.c4
-rw-r--r--drivers/block/floppy.c8
-rw-r--r--drivers/block/loop.c297
-rw-r--r--drivers/block/swim3.c1
-rw-r--r--drivers/block/xen-blkback/common.h2
-rw-r--r--drivers/block/xen-blkback/xenbus.c6
-rw-r--r--drivers/block/xen-blkfront.c6
-rw-r--r--drivers/bluetooth/ath3k.c1
-rw-r--r--drivers/bluetooth/btusb.c19
-rw-r--r--drivers/bluetooth/btwilink.c16
-rw-r--r--drivers/cdrom/cdrom.c8
-rw-r--r--drivers/char/msm_smd_pkt.c5
-rw-r--r--drivers/clocksource/sh_cmt.c34
-rw-r--r--drivers/cpufreq/pcc-cpufreq.c3
-rw-r--r--drivers/dma/ste_dma40.c42
-rw-r--r--drivers/edac/i7core_edac.c2
-rw-r--r--drivers/firewire/core-cdev.c24
-rw-r--r--drivers/firewire/core-device.c15
-rw-r--r--drivers/firewire/ohci.c12
-rw-r--r--drivers/firewire/sbp2.c4
-rw-r--r--drivers/firmware/google/gsmi.c2
-rw-r--r--drivers/gpio/gpio-generic.c15
-rw-r--r--drivers/gpu/drm/drm_crtc.c2
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c1
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h4
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c6
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h17
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c3
-rw-r--r--drivers/gpu/drm/i915/intel_display.c191
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c7
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h3
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c82
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c1
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c72
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c7
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c15
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c12
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c12
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c44
-rw-r--r--drivers/gpu/drm/radeon/ni.c16
-rw-r--r--drivers/gpu/drm/radeon/r100.c22
-rw-r--r--drivers/gpu/drm/radeon/r200.c4
-rw-r--r--drivers/gpu/drm/radeon/r600.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon.h7
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h8
-rw-r--r--drivers/gpu/drm/radeon/radeon_clocks.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c76
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c21
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c34
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c30
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c15
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c2
-rw-r--r--drivers/hid/Kconfig1
-rw-r--r--drivers/hid/hid-apple.c6
-rw-r--r--drivers/hid/hid-core.c3
-rw-r--r--drivers/hid/hid-ids.h7
-rw-r--r--drivers/hid/hid-magicmouse.c66
-rw-r--r--drivers/hid/hid-wacom.c24
-rw-r--r--drivers/hid/hid-wiimote.c277
-rw-r--r--drivers/hid/usbhid/hid-quirks.c2
-rw-r--r--drivers/hwmon/coretemp.c7
-rw-r--r--drivers/hwmon/i5k_amb.c42
-rw-r--r--drivers/hwmon/ibmaem.c15
-rw-r--r--drivers/hwmon/max16065.c2
-rw-r--r--drivers/hwmon/ntc_thermistor.c3
-rw-r--r--drivers/hwmon/pmbus/lm25066.c12
-rw-r--r--drivers/hwmon/pmbus/pmbus.h1
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c29
-rw-r--r--drivers/hwmon/pmbus/ucd9000.c6
-rw-r--r--drivers/hwmon/pmbus/ucd9200.c6
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c9
-rw-r--r--drivers/i2c/busses/i2c-omap.c29
-rw-r--r--drivers/i2c/busses/i2c-pxa-pci.c5
-rw-r--r--drivers/i2c/busses/i2c-tegra.c60
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c8
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c10
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h2
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c2
-rw-r--r--drivers/input/joystick/analog.c2
-rw-r--r--drivers/input/keyboard/adp5588-keys.c1
-rw-r--r--drivers/input/keyboard/ep93xx_keypad.c1
-rw-r--r--drivers/input/keyboard/tegra-kbc.c2
-rw-r--r--drivers/input/misc/ad714x-i2c.c81
-rw-r--r--drivers/input/misc/ad714x-spi.c68
-rw-r--r--drivers/input/misc/ad714x.c116
-rw-r--r--drivers/input/misc/ad714x.h35
-rw-r--r--drivers/input/misc/cm109.c2
-rw-r--r--drivers/input/misc/mma8450.c2
-rw-r--r--drivers/input/misc/mpu3050.c2
-rw-r--r--drivers/input/mouse/bcm5974.c60
-rw-r--r--drivers/input/tablet/wacom_sys.c31
-rw-r--r--drivers/input/tablet/wacom_wac.c49
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c12
-rw-r--r--drivers/input/touchscreen/max11801_ts.c3
-rw-r--r--drivers/input/touchscreen/tnetv107x-ts.c1
-rw-r--r--drivers/input/touchscreen/wacom_w8001.c2
-rw-r--r--drivers/iommu/amd_iommu.c18
-rw-r--r--drivers/iommu/dmar.c2
-rw-r--r--drivers/leds/leds-ams-delta.c1
-rw-r--r--drivers/leds/leds-bd2802.c5
-rw-r--r--drivers/leds/leds-hp6xx.c1
-rw-r--r--drivers/leds/ledtrig-timer.c2
-rw-r--r--drivers/md/linear.h2
-rw-r--r--drivers/md/md.c28
-rw-r--r--drivers/md/raid1.c14
-rw-r--r--drivers/md/raid10.c47
-rw-r--r--drivers/md/raid5.c2
-rw-r--r--drivers/media/dvb/dvb-usb/vp7045.c26
-rw-r--r--drivers/media/rc/nuvoton-cir.c45
-rw-r--r--drivers/media/rc/nuvoton-cir.h1
-rw-r--r--drivers/media/video/gspca/ov519.c22
-rw-r--r--drivers/media/video/gspca/sonixj.c6
-rw-r--r--drivers/media/video/pwc/pwc-v4l.c2
-rw-r--r--drivers/media/video/via-camera.c2
-rw-r--r--drivers/mfd/max8997.c5
-rw-r--r--drivers/mfd/omap-usb-host.c2
-rw-r--r--drivers/mfd/tps65910-irq.c2
-rw-r--r--drivers/mfd/twl4030-madc.c5
-rw-r--r--drivers/mfd/wm8350-gpio.c4
-rw-r--r--drivers/misc/Kconfig1
-rw-r--r--drivers/misc/ab8500-pwm.c2
-rw-r--r--drivers/misc/cb710/core.c3
-rw-r--r--drivers/misc/fsa9480.c4
-rw-r--r--drivers/misc/pti.c14
-rw-r--r--drivers/misc/ti-st/st_core.c10
-rw-r--r--drivers/misc/ti-st/st_kim.c33
-rw-r--r--drivers/misc/ti-st/st_ll.c19
-rw-r--r--drivers/mmc/card/block.c3
-rw-r--r--drivers/mmc/card/mmc_test.c58
-rw-r--r--drivers/mmc/core/core.c37
-rw-r--r--drivers/mmc/core/host.c12
-rw-r--r--drivers/mmc/core/host.h8
-rw-r--r--drivers/mmc/core/mmc.c2
-rw-r--r--drivers/mmc/core/mmc_ops.c2
-rw-r--r--drivers/mmc/core/sd.c81
-rw-r--r--drivers/mmc/host/dw_mmc.c6
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c41
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c3
-rw-r--r--drivers/mmc/host/sdhci-s3c.c6
-rw-r--r--drivers/mmc/host/sdhci.c53
-rw-r--r--drivers/mmc/host/sh_mobile_sdhi.c4
-rw-r--r--drivers/mmc/host/tmio_mmc.c2
-rw-r--r--drivers/mtd/ubi/debug.h2
-rw-r--r--drivers/net/can/ti_hecc.c4
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c12
-rw-r--r--drivers/net/ethernet/aeroflex/greth.h1
-rw-r--r--drivers/net/ethernet/amd/am79c961a.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h124
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c27
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c48
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c46
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c162
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c7
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c8
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c60
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c4
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c1
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/Kconfig11
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h12
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c302
-rw-r--r--drivers/net/ethernet/realtek/r8169.c32
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c1
-rw-r--r--drivers/net/ethernet/sfc/efx.c18
-rw-r--r--drivers/net/ethernet/sfc/io.h6
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c46
-rw-r--r--drivers/net/ethernet/sfc/nic.c7
-rw-r--r--drivers/net/ethernet/sfc/nic.h2
-rw-r--r--drivers/net/ethernet/sfc/siena.c25
-rw-r--r--drivers/net/ethernet/sfc/workarounds.h2
-rw-r--r--drivers/net/ethernet/sun/cassini.c3
-rw-r--r--drivers/net/netconsole.c8
-rw-r--r--drivers/net/phy/national.c17
-rw-r--r--drivers/net/ppp/ppp_generic.c7
-rw-r--r--drivers/net/rionet.c23
-rw-r--r--drivers/net/usb/ipheth.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c10
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c4
-rw-r--r--drivers/net/wireless/b43/main.c3
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c21
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c39
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-rs.c13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-ucode.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c47
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c22
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c31
-rw-r--r--drivers/net/wireless/rtlwifi/core.c8
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/trx.c11
-rw-r--r--drivers/net/wireless/wl12xx/acx.c6
-rw-r--r--drivers/net/wireless/wl12xx/sdio.c2
-rw-r--r--drivers/net/wireless/wl12xx/testmode.c50
-rw-r--r--drivers/pci/hotplug/pcihp_slot.c47
-rw-r--r--drivers/pci/of.c2
-rw-r--r--drivers/pci/pci.c67
-rw-r--r--drivers/pci/pci.h4
-rw-r--r--drivers/pci/probe.c147
-rw-r--r--drivers/pci/setup-bus.c166
-rw-r--r--drivers/pci/setup-res.c152
-rw-r--r--drivers/power/max8997_charger.c1
-rw-r--r--drivers/power/max8998_charger.c1
-rw-r--r--drivers/power/s3c_adc_battery.c1
-rw-r--r--drivers/rapidio/rio-scan.c3
-rw-r--r--drivers/rtc/interface.c4
-rw-r--r--drivers/rtc/rtc-ep93xx.c16
-rw-r--r--drivers/rtc/rtc-imxdi.c1
-rw-r--r--drivers/rtc/rtc-lib.c2
-rw-r--r--drivers/rtc/rtc-s3c.c105
-rw-r--r--drivers/rtc/rtc-twl.c60
-rw-r--r--drivers/s390/block/dasd_ioctl.c10
-rw-r--r--drivers/s390/char/sclp_cmd.c6
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c2
-rw-r--r--drivers/scsi/fcoe/fcoe.c13
-rw-r--r--drivers/scsi/hpsa.c57
-rw-r--r--drivers/scsi/isci/host.c13
-rw-r--r--drivers/scsi/isci/host.h3
-rw-r--r--drivers/scsi/isci/init.c47
-rw-r--r--drivers/scsi/isci/phy.c13
-rw-r--r--drivers/scsi/isci/registers.h12
-rw-r--r--drivers/scsi/isci/request.c30
-rw-r--r--drivers/scsi/isci/unsolicited_frame_control.c2
-rw-r--r--drivers/scsi/isci/unsolicited_frame_control.h2
-rw-r--r--drivers/scsi/libfc/fc_exch.c59
-rw-r--r--drivers/scsi/libfc/fc_fcp.c11
-rw-r--r--drivers/scsi/libfc/fc_lport.c11
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c36
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h5
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h29
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c282
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c109
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c25
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c30
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla4xxx/Kconfig2
-rw-r--r--drivers/sh/intc/chip.c3
-rw-r--r--drivers/staging/brcm80211/brcmsmac/otp.c1
-rw-r--r--drivers/staging/brcm80211/brcmsmac/types.h1
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc.c4
-rw-r--r--drivers/staging/gma500/mdfld_dsi_dbi.c3
-rw-r--r--drivers/staging/gma500/mdfld_dsi_dbi.h3
-rw-r--r--drivers/staging/gma500/mdfld_dsi_dpi.c7
-rw-r--r--drivers/staging/gma500/mdfld_dsi_output.c4
-rw-r--r--drivers/staging/gma500/medfield.h2
-rw-r--r--drivers/staging/gma500/psb_drv.h1
-rw-r--r--drivers/staging/octeon/ethernet-rgmii.c1
-rw-r--r--drivers/staging/octeon/ethernet-spi.c1
-rw-r--r--drivers/staging/tidspbridge/core/dsp-clock.c1
-rw-r--r--drivers/staging/zcache/tmem.c2
-rw-r--r--drivers/staging/zcache/zcache-main.c10
-rw-r--r--drivers/target/iscsi/iscsi_target.c1
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c4
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c16
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c45
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c274
-rw-r--r--drivers/target/target_core_cdb.c92
-rw-r--r--drivers/target/target_core_device.c48
-rw-r--r--drivers/target/target_core_fabric_configfs.c2
-rw-r--r--drivers/target/target_core_pr.c8
-rw-r--r--drivers/target/target_core_rd.c24
-rw-r--r--drivers/target/target_core_tpg.c64
-rw-r--r--drivers/target/target_core_transport.c215
-rw-r--r--drivers/target/tcm_fc/tcm_fc.h12
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c90
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c13
-rw-r--r--drivers/target/tcm_fc/tfc_io.c62
-rw-r--r--drivers/tty/pty.c17
-rw-r--r--drivers/tty/serial/8250.c8
-rw-r--r--drivers/tty/serial/8250_pci.c11
-rw-r--r--drivers/tty/serial/8250_pnp.c3
-rw-r--r--drivers/tty/serial/atmel_serial.c8
-rw-r--r--drivers/tty/serial/crisv10.c4
-rw-r--r--drivers/tty/serial/max3107-aava.c2
-rw-r--r--drivers/tty/serial/max3107.c2
-rw-r--r--drivers/tty/serial/mrst_max3110.c2
-rw-r--r--drivers/tty/serial/omap-serial.c3
-rw-r--r--drivers/tty/serial/pch_uart.c3
-rw-r--r--drivers/tty/serial/samsung.c8
-rw-r--r--drivers/tty/serial/serial_core.c5
-rw-r--r--drivers/tty/serial/sh-sci.c72
-rw-r--r--drivers/tty/serial/ucc_uart.c2
-rw-r--r--drivers/tty/tty_io.c3
-rw-r--r--drivers/usb/core/hcd.c2
-rw-r--r--drivers/usb/gadget/f_phonet.c1
-rw-r--r--drivers/usb/host/ehci-hub.c7
-rw-r--r--drivers/usb/host/ehci-s5p.c1
-rw-r--r--drivers/usb/host/xhci-hub.c19
-rw-r--r--drivers/usb/host/xhci-ring.c109
-rw-r--r--drivers/usb/host/xhci.c28
-rw-r--r--drivers/usb/musb/blackfin.c1
-rw-r--r--drivers/usb/musb/cppi_dma.c26
-rw-r--r--drivers/usb/musb/musb_core.h12
-rw-r--r--drivers/usb/musb/musb_gadget.c4
-rw-r--r--drivers/usb/musb/musb_regs.h6
-rw-r--r--drivers/usb/musb/tusb6010.c1
-rw-r--r--drivers/usb/musb/tusb6010_omap.c1
-rw-r--r--drivers/usb/musb/ux500_dma.c38
-rw-r--r--drivers/usb/serial/ftdi_sio.c20
-rw-r--r--drivers/usb/serial/option.c104
-rw-r--r--drivers/video/backlight/adp8870_bl.c2
-rw-r--r--drivers/video/backlight/backlight.c2
-rw-r--r--drivers/video/backlight/ep93xx_bl.c2
-rw-r--r--drivers/video/backlight/pwm_bl.c9
-rw-r--r--drivers/w1/masters/ds2490.c4
-rw-r--r--drivers/w1/masters/matrox_w1.c4
-rw-r--r--drivers/w1/slaves/w1_ds2408.c2
-rw-r--r--drivers/w1/slaves/w1_smem.c4
-rw-r--r--drivers/w1/slaves/w1_therm.c4
-rw-r--r--drivers/w1/w1.c4
-rw-r--r--drivers/w1/w1.h2
-rw-r--r--drivers/w1/w1_family.c2
-rw-r--r--drivers/w1/w1_family.h2
-rw-r--r--drivers/w1/w1_int.c2
-rw-r--r--drivers/w1/w1_int.h2
-rw-r--r--drivers/w1/w1_io.c2
-rw-r--r--drivers/w1/w1_log.h2
-rw-r--r--drivers/w1/w1_netlink.c2
-rw-r--r--drivers/w1/w1_netlink.h2
-rw-r--r--drivers/watchdog/hpwdt.c9
-rw-r--r--drivers/watchdog/lantiq_wdt.c8
-rw-r--r--drivers/watchdog/sbc_epx_c3.c2
-rw-r--r--drivers/watchdog/watchdog_dev.c14
-rw-r--r--drivers/xen/events.c40
-rw-r--r--drivers/xen/xen-selfballoon.c1
-rw-r--r--fs/9p/v9fs_vfs.h6
-rw-r--r--fs/9p/vfs_file.c36
-rw-r--r--fs/9p/vfs_inode.c139
-rw-r--r--fs/9p/vfs_inode_dotl.c86
-rw-r--r--fs/9p/vfs_super.c2
-rw-r--r--fs/befs/linuxvfs.c23
-rw-r--r--fs/block_dev.c7
-rw-r--r--fs/btrfs/btrfs_inode.h6
-rw-r--r--fs/btrfs/ctree.h10
-rw-r--r--fs/btrfs/extent-tree.c77
-rw-r--r--fs/btrfs/file-item.c4
-rw-r--r--fs/btrfs/file.c49
-rw-r--r--fs/btrfs/free-space-cache.c20
-rw-r--r--fs/btrfs/inode.c52
-rw-r--r--fs/btrfs/ioctl.c36
-rw-r--r--fs/btrfs/transaction.c4
-rw-r--r--fs/btrfs/tree-log.c28
-rw-r--r--fs/btrfs/volumes.c51
-rw-r--r--fs/btrfs/volumes.h2
-rw-r--r--fs/btrfs/xattr.c9
-rw-r--r--fs/ceph/mds_client.c2
-rw-r--r--fs/ceph/super.c4
-rw-r--r--fs/cifs/cifs_debug.c2
-rw-r--r--fs/cifs/cifsacl.c28
-rw-r--r--fs/cifs/cifsencrypt.c54
-rw-r--r--fs/cifs/cifsfs.c10
-rw-r--r--fs/cifs/cifsfs.h2
-rw-r--r--fs/cifs/cifsglob.h56
-rw-r--r--fs/cifs/cifssmb.c3
-rw-r--r--fs/cifs/connect.c7
-rw-r--r--fs/cifs/dir.c4
-rw-r--r--fs/cifs/transport.c51
-rw-r--r--fs/compat.c5
-rw-r--r--fs/ext3/inode.c4
-rw-r--r--fs/ext3/namei.c3
-rw-r--r--fs/ext4/ext4.h1
-rw-r--r--fs/ext4/ext4_jbd2.h4
-rw-r--r--fs/ext4/indirect.c9
-rw-r--r--fs/ext4/inode.c27
-rw-r--r--fs/ext4/namei.c3
-rw-r--r--fs/ext4/page-io.c24
-rw-r--r--fs/ext4/super.c1
-rw-r--r--fs/fat/dir.c2
-rw-r--r--fs/fat/inode.c7
-rw-r--r--fs/fuse/dev.c16
-rw-r--r--fs/fuse/file.c84
-rw-r--r--fs/fuse/fuse_i.h8
-rw-r--r--fs/fuse/inode.c13
-rw-r--r--fs/gfs2/log.c4
-rw-r--r--fs/gfs2/meta_io.c6
-rw-r--r--fs/gfs2/ops_fstype.c2
-rw-r--r--fs/gfs2/quota.c2
-rw-r--r--fs/hfsplus/super.c15
-rw-r--r--fs/hfsplus/wrapper.c4
-rw-r--r--fs/hugetlbfs/inode.c1
-rw-r--r--fs/inode.c24
-rw-r--r--fs/jfs/jfs_umount.c4
-rw-r--r--fs/namei.c37
-rw-r--r--fs/nfs/blocklayout/blocklayout.c1
-rw-r--r--fs/nfs/callback.h2
-rw-r--r--fs/nfs/callback_proc.c25
-rw-r--r--fs/nfs/callback_xdr.c24
-rw-r--r--fs/nfs/nfs4_fs.h8
-rw-r--r--fs/nfs/nfs4proc.c20
-rw-r--r--fs/nfs/nfs4renewd.c12
-rw-r--r--fs/nfs/nfs4state.c6
-rw-r--r--fs/nfs/objlayout/objio_osd.c28
-rw-r--r--fs/nfs/objlayout/pnfs_osd_xdr_cli.c3
-rw-r--r--fs/nfs/super.c23
-rw-r--r--fs/nfs/write.c2
-rw-r--r--fs/proc/task_mmu.c80
-rw-r--r--fs/ubifs/debug.h6
-rw-r--r--fs/xfs/Makefile119
-rw-r--r--fs/xfs/kmem.c (renamed from fs/xfs/linux-2.6/kmem.c)0
-rw-r--r--fs/xfs/kmem.h (renamed from fs/xfs/linux-2.6/kmem.h)0
-rw-r--r--fs/xfs/mrlock.h (renamed from fs/xfs/linux-2.6/mrlock.h)0
-rw-r--r--fs/xfs/time.h (renamed from fs/xfs/linux-2.6/time.h)0
-rw-r--r--fs/xfs/uuid.c (renamed from fs/xfs/support/uuid.c)0
-rw-r--r--fs/xfs/uuid.h (renamed from fs/xfs/support/uuid.h)0
-rw-r--r--fs/xfs/xfs.h3
-rw-r--r--fs/xfs/xfs_acl.c (renamed from fs/xfs/linux-2.6/xfs_acl.c)0
-rw-r--r--fs/xfs/xfs_aops.c (renamed from fs/xfs/linux-2.6/xfs_aops.c)3
-rw-r--r--fs/xfs/xfs_aops.h (renamed from fs/xfs/linux-2.6/xfs_aops.h)0
-rw-r--r--fs/xfs/xfs_buf.c (renamed from fs/xfs/linux-2.6/xfs_buf.c)0
-rw-r--r--fs/xfs/xfs_buf.h (renamed from fs/xfs/linux-2.6/xfs_buf.h)0
-rw-r--r--fs/xfs/xfs_discard.c (renamed from fs/xfs/linux-2.6/xfs_discard.c)0
-rw-r--r--fs/xfs/xfs_discard.h (renamed from fs/xfs/linux-2.6/xfs_discard.h)0
-rw-r--r--fs/xfs/xfs_dquot.c (renamed from fs/xfs/quota/xfs_dquot.c)0
-rw-r--r--fs/xfs/xfs_dquot.h (renamed from fs/xfs/quota/xfs_dquot.h)0
-rw-r--r--fs/xfs/xfs_dquot_item.c (renamed from fs/xfs/quota/xfs_dquot_item.c)0
-rw-r--r--fs/xfs/xfs_dquot_item.h (renamed from fs/xfs/quota/xfs_dquot_item.h)0
-rw-r--r--fs/xfs/xfs_export.c (renamed from fs/xfs/linux-2.6/xfs_export.c)0
-rw-r--r--fs/xfs/xfs_export.h (renamed from fs/xfs/linux-2.6/xfs_export.h)0
-rw-r--r--fs/xfs/xfs_file.c (renamed from fs/xfs/linux-2.6/xfs_file.c)0
-rw-r--r--fs/xfs/xfs_fs_subr.c (renamed from fs/xfs/linux-2.6/xfs_fs_subr.c)0
-rw-r--r--fs/xfs/xfs_globals.c (renamed from fs/xfs/linux-2.6/xfs_globals.c)0
-rw-r--r--fs/xfs/xfs_ioctl.c (renamed from fs/xfs/linux-2.6/xfs_ioctl.c)0
-rw-r--r--fs/xfs/xfs_ioctl.h (renamed from fs/xfs/linux-2.6/xfs_ioctl.h)0
-rw-r--r--fs/xfs/xfs_ioctl32.c (renamed from fs/xfs/linux-2.6/xfs_ioctl32.c)0
-rw-r--r--fs/xfs/xfs_ioctl32.h (renamed from fs/xfs/linux-2.6/xfs_ioctl32.h)0
-rw-r--r--fs/xfs/xfs_iops.c (renamed from fs/xfs/linux-2.6/xfs_iops.c)14
-rw-r--r--fs/xfs/xfs_iops.h (renamed from fs/xfs/linux-2.6/xfs_iops.h)0
-rw-r--r--fs/xfs/xfs_linux.h (renamed from fs/xfs/linux-2.6/xfs_linux.h)27
-rw-r--r--fs/xfs/xfs_message.c (renamed from fs/xfs/linux-2.6/xfs_message.c)0
-rw-r--r--fs/xfs/xfs_message.h (renamed from fs/xfs/linux-2.6/xfs_message.h)0
-rw-r--r--fs/xfs/xfs_qm.c (renamed from fs/xfs/quota/xfs_qm.c)0
-rw-r--r--fs/xfs/xfs_qm.h (renamed from fs/xfs/quota/xfs_qm.h)0
-rw-r--r--fs/xfs/xfs_qm_bhv.c (renamed from fs/xfs/quota/xfs_qm_bhv.c)0
-rw-r--r--fs/xfs/xfs_qm_stats.c (renamed from fs/xfs/quota/xfs_qm_stats.c)0
-rw-r--r--fs/xfs/xfs_qm_stats.h (renamed from fs/xfs/quota/xfs_qm_stats.h)0
-rw-r--r--fs/xfs/xfs_qm_syscalls.c (renamed from fs/xfs/quota/xfs_qm_syscalls.c)0
-rw-r--r--fs/xfs/xfs_quota_priv.h (renamed from fs/xfs/quota/xfs_quota_priv.h)0
-rw-r--r--fs/xfs/xfs_quotaops.c (renamed from fs/xfs/linux-2.6/xfs_quotaops.c)2
-rw-r--r--fs/xfs/xfs_stats.c (renamed from fs/xfs/linux-2.6/xfs_stats.c)0
-rw-r--r--fs/xfs/xfs_stats.h (renamed from fs/xfs/linux-2.6/xfs_stats.h)0
-rw-r--r--fs/xfs/xfs_super.c (renamed from fs/xfs/linux-2.6/xfs_super.c)36
-rw-r--r--fs/xfs/xfs_super.h (renamed from fs/xfs/linux-2.6/xfs_super.h)0
-rw-r--r--fs/xfs/xfs_sync.c (renamed from fs/xfs/linux-2.6/xfs_sync.c)0
-rw-r--r--fs/xfs/xfs_sync.h (renamed from fs/xfs/linux-2.6/xfs_sync.h)0
-rw-r--r--fs/xfs/xfs_sysctl.c (renamed from fs/xfs/linux-2.6/xfs_sysctl.c)0
-rw-r--r--fs/xfs/xfs_sysctl.h (renamed from fs/xfs/linux-2.6/xfs_sysctl.h)0
-rw-r--r--fs/xfs/xfs_trace.c (renamed from fs/xfs/linux-2.6/xfs_trace.c)4
-rw-r--r--fs/xfs/xfs_trace.h (renamed from fs/xfs/linux-2.6/xfs_trace.h)0
-rw-r--r--fs/xfs/xfs_trans_dquot.c (renamed from fs/xfs/quota/xfs_trans_dquot.c)0
-rw-r--r--fs/xfs/xfs_vnode.h (renamed from fs/xfs/linux-2.6/xfs_vnode.h)0
-rw-r--r--fs/xfs/xfs_xattr.c (renamed from fs/xfs/linux-2.6/xfs_xattr.c)0
-rw-r--r--include/asm-generic/memory_model.h4
-rw-r--r--include/asm-generic/unistd.h2
-rw-r--r--include/linux/basic_mmio_gpio.h15
-rw-r--r--include/linux/blk_types.h13
-rw-r--r--include/linux/blkdev.h6
-rw-r--r--include/linux/blktrace_api.h5
-rw-r--r--include/linux/bsg-lib.h73
-rw-r--r--include/linux/compat.h1
-rw-r--r--include/linux/connector.h2
-rw-r--r--include/linux/fs.h7
-rw-r--r--include/linux/fuse.h9
-rw-r--r--include/linux/hash.h2
-rw-r--r--include/linux/irq.h11
-rw-r--r--include/linux/irqdesc.h1
-rw-r--r--include/linux/loop.h5
-rw-r--r--include/linux/memcontrol.h19
-rw-r--r--include/linux/mfd/wm8994/pdata.h2
-rw-r--r--include/linux/miscdevice.h1
-rw-r--r--include/linux/mm.h6
-rw-r--r--include/linux/mmc/host.h2
-rw-r--r--include/linux/pci.h16
-rw-r--r--include/linux/perf_event.h24
-rw-r--r--include/linux/personality.h1
-rw-r--r--include/linux/pm_domain.h10
-rw-r--r--include/linux/pwm_backlight.h1
-rw-r--r--include/linux/regulator/consumer.h2
-rw-r--r--include/linux/rio_regs.h18
-rw-r--r--include/linux/rtc.h3
-rw-r--r--include/linux/skbuff.h1
-rw-r--r--include/linux/snmp.h2
-rw-r--r--include/linux/swap.h6
-rw-r--r--include/linux/syscalls.h3
-rw-r--r--include/linux/ti_wilink_st.h27
-rw-r--r--include/linux/tty.h2
-rw-r--r--include/linux/tty_driver.h3
-rw-r--r--include/linux/writeback.h11
-rw-r--r--include/net/9p/9p.h29
-rw-r--r--include/net/cfg80211.h3
-rw-r--r--include/net/flow.h25
-rw-r--r--include/net/request_sock.h3
-rw-r--r--include/net/sctp/command.h1
-rw-r--r--include/net/tcp.h22
-rw-r--r--include/net/transp_v6.h1
-rw-r--r--include/sound/tlv320aic3x.h2
-rw-r--r--include/target/target_core_fabric_ops.h6
-rw-r--r--include/trace/events/block.h20
-rw-r--r--init/main.c15
-rw-r--r--kernel/events/core.c67
-rw-r--r--kernel/irq/chip.c2
-rw-r--r--kernel/irq/generic-chip.c4
-rw-r--r--kernel/irq/irqdesc.c37
-rw-r--r--kernel/irq/manage.c17
-rw-r--r--kernel/lockdep.c8
-rw-r--r--kernel/power/Kconfig4
-rw-r--r--kernel/printk.c2
-rw-r--r--kernel/sched.c43
-rw-r--r--kernel/sys.c38
-rw-r--r--kernel/sys_ni.c1
-rw-r--r--kernel/sysctl_binary.c2
-rw-r--r--kernel/sysctl_check.c2
-rw-r--r--kernel/taskstats.c1
-rw-r--r--kernel/time/alarmtimer.c18
-rw-r--r--kernel/trace/blktrace.c21
-rw-r--r--kernel/tsacct.c15
-rw-r--r--kernel/workqueue.c7
-rw-r--r--lib/Makefile4
-rw-r--r--lib/sha1.c1
-rw-r--r--lib/xz/xz_dec_bcj.c27
-rw-r--r--mm/backing-dev.c30
-rw-r--r--mm/filemap.c6
-rw-r--r--mm/highmem.c4
-rw-r--r--mm/memcontrol.c198
-rw-r--r--mm/mempolicy.c9
-rw-r--r--mm/page-writeback.c15
-rw-r--r--mm/slub.c2
-rw-r--r--mm/vmalloc.c15
-rw-r--r--mm/vmscan.c69
-rw-r--r--mm/vmstat.c4
-rw-r--r--net/9p/trans_virtio.c17
-rw-r--r--net/atm/br2684.c7
-rw-r--r--net/bluetooth/af_bluetooth.c6
-rw-r--r--net/bluetooth/bnep/bnep.h1
-rw-r--r--net/bluetooth/bnep/core.c13
-rw-r--r--net/bluetooth/cmtp/capi.c3
-rw-r--r--net/bluetooth/cmtp/cmtp.h1
-rw-r--r--net/bluetooth/cmtp/core.c20
-rw-r--r--net/bluetooth/hci_core.c8
-rw-r--r--net/bluetooth/hci_event.c17
-rw-r--r--net/bluetooth/hidp/core.c19
-rw-r--r--net/bluetooth/l2cap_core.c6
-rw-r--r--net/bluetooth/l2cap_sock.c30
-rw-r--r--net/bluetooth/rfcomm/core.c17
-rw-r--r--net/bluetooth/rfcomm/sock.c28
-rw-r--r--net/bluetooth/sco.c28
-rw-r--r--net/bridge/br_if.c6
-rw-r--r--net/bridge/br_multicast.c21
-rw-r--r--net/bridge/netfilter/Kconfig2
-rw-r--r--net/caif/caif_dev.c6
-rw-r--r--net/can/af_can.c2
-rw-r--r--net/ceph/msgpool.c40
-rw-r--r--net/ceph/osd_client.c22
-rw-r--r--net/core/dev.c8
-rw-r--r--net/core/fib_rules.c4
-rw-r--r--net/core/flow.c36
-rw-r--r--net/core/neighbour.c8
-rw-r--r--net/core/netpoll.c4
-rw-r--r--net/core/skbuff.c22
-rw-r--r--net/ethernet/eth.c2
-rw-r--r--net/ipv4/af_inet.c7
-rw-r--r--net/ipv4/fib_semantics.c10
-rw-r--r--net/ipv4/igmp.c2
-rw-r--r--net/ipv4/netfilter/ip_queue.c12
-rw-r--r--net/ipv4/proc.c2
-rw-r--r--net/ipv4/tcp_input.c2
-rw-r--r--net/ipv4/tcp_ipv4.c49
-rw-r--r--net/ipv6/addrconf.c4
-rw-r--r--net/ipv6/datagram.c5
-rw-r--r--net/ipv6/ip6_flowlabel.c8
-rw-r--r--net/ipv6/ipv6_sockglue.c2
-rw-r--r--net/ipv6/mcast.c2
-rw-r--r--net/ipv6/netfilter/ip6_queue.c12
-rw-r--r--net/ipv6/raw.c4
-rw-r--r--net/ipv6/route.c33
-rw-r--r--net/ipv6/tcp_ipv6.c31
-rw-r--r--net/ipv6/udp.c4
-rw-r--r--net/irda/irsysctl.c6
-rw-r--r--net/irda/qos.c6
-rw-r--r--net/mac80211/main.c2
-rw-r--r--net/mac80211/sta_info.c2
-rw-r--r--net/netfilter/nf_conntrack_pptp.c1
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c6
-rw-r--r--net/netfilter/nfnetlink_queue.c4
-rw-r--r--net/netfilter/xt_rateest.c9
-rw-r--r--net/sched/cls_rsvp.h27
-rw-r--r--net/sctp/sm_sideeffect.c5
-rw-r--r--net/sctp/sm_statefuns.c6
-rw-r--r--net/socket.c10
-rw-r--r--net/wireless/core.c7
-rw-r--r--net/wireless/reg.c1
-rw-r--r--net/wireless/sme.c2
-rw-r--r--net/wireless/sysfs.c6
-rw-r--r--net/xfrm/xfrm_input.c5
-rwxr-xr-xscripts/checkpatch.pl3
-rwxr-xr-xscripts/get_maintainer.pl2
-rw-r--r--sound/aoa/fabrics/layout.c2
-rw-r--r--sound/core/pcm_lib.c33
-rw-r--r--sound/pci/ac97/ac97_patch.c1
-rw-r--r--sound/pci/azt3328.c11
-rw-r--r--sound/pci/hda/alc268_quirks.c36
-rw-r--r--sound/pci/hda/hda_codec.c6
-rw-r--r--sound/pci/hda/hda_eld.c31
-rw-r--r--sound/pci/hda/patch_cirrus.c10
-rw-r--r--sound/pci/hda/patch_conexant.c57
-rw-r--r--sound/pci/hda/patch_realtek.c39
-rw-r--r--sound/pci/hda/patch_sigmatel.c3
-rw-r--r--sound/soc/blackfin/bf5xx-ad193x.c6
-rw-r--r--sound/soc/codecs/ad193x.c11
-rw-r--r--sound/soc/codecs/ad193x.h5
-rw-r--r--sound/soc/codecs/sta32x.c1
-rw-r--r--sound/soc/codecs/wm8750.c8
-rw-r--r--sound/soc/codecs/wm8903.c5
-rw-r--r--sound/soc/codecs/wm8962.c12
-rw-r--r--sound/soc/codecs/wm8994.c1
-rw-r--r--sound/soc/codecs/wm8996.c28
-rw-r--r--sound/soc/ep93xx/ep93xx-i2s.c5
-rw-r--r--sound/soc/fsl/fsl_dma.c2
-rw-r--r--sound/soc/fsl/mpc5200_dma.c6
-rw-r--r--sound/soc/fsl/mpc8610_hpcd.c18
-rw-r--r--sound/soc/fsl/p1022_ds.c4
-rw-r--r--sound/soc/imx/imx-pcm-fiq.c1
-rw-r--r--sound/soc/kirkwood/kirkwood-i2s.c2
-rw-r--r--sound/soc/omap/ams-delta.c6
-rw-r--r--sound/soc/omap/n810.c4
-rw-r--r--sound/soc/omap/omap-mcbsp.c4
-rw-r--r--sound/soc/omap/omap-mcbsp.h2
-rw-r--r--sound/soc/omap/omap-pcm.c4
-rw-r--r--sound/soc/omap/omap-pcm.h2
-rw-r--r--sound/soc/omap/rx51.c2
-rw-r--r--sound/soc/samsung/Kconfig1
-rw-r--r--sound/soc/samsung/Makefile2
-rw-r--r--sound/soc/samsung/h1940_uda1380.c1
-rw-r--r--sound/soc/samsung/idma.c453
-rw-r--r--sound/soc/samsung/idma.h26
-rw-r--r--sound/soc/samsung/jive_wm8750.c2
-rw-r--r--sound/soc/samsung/rx1950_uda1380.c1
-rw-r--r--sound/soc/samsung/speyside_wm8962.c8
-rw-r--r--sound/soc/soc-cache.c12
-rw-r--r--sound/soc/soc-core.c6
-rw-r--r--sound/soc/soc-dapm.c2
-rw-r--r--sound/soc/soc-io.c23
-rw-r--r--sound/soc/soc-jack.c4
-rw-r--r--sound/soc/soc-pcm.c3
-rw-r--r--sound/soc/tegra/tegra_pcm.c9
-rw-r--r--sound/soc/tegra/tegra_wm8903.c19
-rw-r--r--sound/usb/caiaq/audio.c37
-rw-r--r--sound/usb/caiaq/device.h1
-rw-r--r--sound/usb/mixer.c3
-rw-r--r--sound/usb/quirks-table.h34
-rw-r--r--tools/perf/arch/arm/util/dwarf-regs.c3
-rw-r--r--tools/perf/builtin-probe.c14
-rw-r--r--tools/perf/builtin-record.c4
-rw-r--r--tools/perf/builtin-stat.c7
-rw-r--r--tools/perf/util/dwarf-aux.c210
-rw-r--r--tools/perf/util/dwarf-aux.h11
-rw-r--r--tools/perf/util/evlist.c11
-rw-r--r--tools/perf/util/header.c11
-rw-r--r--tools/perf/util/include/linux/compiler.h2
-rw-r--r--tools/perf/util/parse-events.c8
-rw-r--r--tools/perf/util/probe-finder.c231
-rw-r--r--tools/perf/util/probe-finder.h2
-rw-r--r--tools/perf/util/symbol.c59
-rw-r--r--tools/perf/util/symbol.h1
-rw-r--r--tools/perf/util/ui/browsers/top.c1
-rw-r--r--tools/power/cpupower/Makefile7
-rw-r--r--tools/power/cpupower/debug/x86_64/Makefile8
l---------tools/power/cpupower/debug/x86_64/centrino-decode.c1
l---------tools/power/cpupower/debug/x86_64/powernow-k8-decode.c1
-rw-r--r--tools/power/cpupower/man/cpupower-frequency-info.16
-rw-r--r--tools/power/cpupower/man/cpupower-frequency-set.18
-rw-r--r--tools/power/cpupower/man/cpupower.114
-rw-r--r--tools/power/cpupower/utils/builtin.h7
-rw-r--r--tools/power/cpupower/utils/cpufreq-info.c42
-rw-r--r--tools/power/cpupower/utils/cpufreq-set.c29
-rw-r--r--tools/power/cpupower/utils/cpuidle-info.c24
-rw-r--r--tools/power/cpupower/utils/cpupower-info.c20
-rw-r--r--tools/power/cpupower/utils/cpupower-set.c25
-rw-r--r--tools/power/cpupower/utils/cpupower.c91
-rw-r--r--tools/power/cpupower/utils/helpers/helpers.h12
-rw-r--r--tools/power/cpupower/utils/helpers/sysfs.c50
-rw-r--r--tools/power/cpupower/utils/helpers/sysfs.h2
-rw-r--r--tools/power/cpupower/utils/helpers/topology.c5
-rw-r--r--tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c2
-rw-r--r--tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c66
-rw-r--r--tools/power/cpupower/utils/idle_monitor/mperf_monitor.c177
947 files changed, 13246 insertions, 5718 deletions
diff --git a/Documentation/00-INDEX b/Documentation/00-INDEX
index 1f89424c36a6..65bbd2622396 100644
--- a/Documentation/00-INDEX
+++ b/Documentation/00-INDEX
@@ -272,6 +272,8 @@ printk-formats.txt
272 - how to get printk format specifiers right 272 - how to get printk format specifiers right
273prio_tree.txt 273prio_tree.txt
274 - info on radix-priority-search-tree use for indexing vmas. 274 - info on radix-priority-search-tree use for indexing vmas.
275ramoops.txt
276 - documentation of the ramoops oops/panic logging module.
275rbtree.txt 277rbtree.txt
276 - info on what red-black trees are and what they are for. 278 - info on what red-black trees are and what they are for.
277robust-futex-ABI.txt 279robust-futex-ABI.txt
diff --git a/Documentation/ABI/testing/sysfs-class-scsi_host b/Documentation/ABI/testing/sysfs-class-scsi_host
new file mode 100644
index 000000000000..29a4f892e433
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-scsi_host
@@ -0,0 +1,13 @@
1What: /sys/class/scsi_host/hostX/isci_id
2Date: June 2011
3Contact: Dave Jiang <dave.jiang@intel.com>
4Description:
5 This file contains the enumerated host ID for the Intel
6 SCU controller. The Intel(R) C600 Series Chipset SATA/SAS
7 Storage Control Unit embeds up to two 4-port controllers in
8 a single PCI device. The controllers are enumerated in order
9 which usually means the lowest number scsi_host corresponds
10 with the first controller, but this association is not
11 guaranteed. The 'isci_id' attribute unambiguously identifies
12 the controller index: '0' for the first controller,
13 '1' for the second.
diff --git a/Documentation/DocBook/media/v4l/controls.xml b/Documentation/DocBook/media/v4l/controls.xml
index 85164016ed26..23fdf79f8cf3 100644
--- a/Documentation/DocBook/media/v4l/controls.xml
+++ b/Documentation/DocBook/media/v4l/controls.xml
@@ -1455,7 +1455,7 @@ Applicable to the H264 encoder.</entry>
1455 </row> 1455 </row>
1456 1456
1457 <row><entry></entry></row> 1457 <row><entry></entry></row>
1458 <row> 1458 <row id="v4l2-mpeg-video-h264-vui-sar-idc">
1459 <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC</constant>&nbsp;</entry> 1459 <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC</constant>&nbsp;</entry>
1460 <entry>enum&nbsp;v4l2_mpeg_video_h264_vui_sar_idc</entry> 1460 <entry>enum&nbsp;v4l2_mpeg_video_h264_vui_sar_idc</entry>
1461 </row> 1461 </row>
@@ -1561,7 +1561,7 @@ Applicable to the H264 encoder.</entry>
1561 </row> 1561 </row>
1562 1562
1563 <row><entry></entry></row> 1563 <row><entry></entry></row>
1564 <row> 1564 <row id="v4l2-mpeg-video-h264-level">
1565 <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_LEVEL</constant>&nbsp;</entry> 1565 <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_LEVEL</constant>&nbsp;</entry>
1566 <entry>enum&nbsp;v4l2_mpeg_video_h264_level</entry> 1566 <entry>enum&nbsp;v4l2_mpeg_video_h264_level</entry>
1567 </row> 1567 </row>
@@ -1641,7 +1641,7 @@ Possible values are:</entry>
1641 </row> 1641 </row>
1642 1642
1643 <row><entry></entry></row> 1643 <row><entry></entry></row>
1644 <row> 1644 <row id="v4l2-mpeg-video-mpeg4-level">
1645 <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL</constant>&nbsp;</entry> 1645 <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL</constant>&nbsp;</entry>
1646 <entry>enum&nbsp;v4l2_mpeg_video_mpeg4_level</entry> 1646 <entry>enum&nbsp;v4l2_mpeg_video_mpeg4_level</entry>
1647 </row> 1647 </row>
@@ -1689,9 +1689,9 @@ Possible values are:</entry>
1689 </row> 1689 </row>
1690 1690
1691 <row><entry></entry></row> 1691 <row><entry></entry></row>
1692 <row> 1692 <row id="v4l2-mpeg-video-h264-profile">
1693 <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_PROFILE</constant>&nbsp;</entry> 1693 <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_PROFILE</constant>&nbsp;</entry>
1694 <entry>enum&nbsp;v4l2_mpeg_h264_profile</entry> 1694 <entry>enum&nbsp;v4l2_mpeg_video_h264_profile</entry>
1695 </row> 1695 </row>
1696 <row><entry spanname="descr">The profile information for H264. 1696 <row><entry spanname="descr">The profile information for H264.
1697Applicable to the H264 encoder. 1697Applicable to the H264 encoder.
@@ -1774,9 +1774,9 @@ Possible values are:</entry>
1774 </row> 1774 </row>
1775 1775
1776 <row><entry></entry></row> 1776 <row><entry></entry></row>
1777 <row> 1777 <row id="v4l2-mpeg-video-mpeg4-profile">
1778 <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE</constant>&nbsp;</entry> 1778 <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE</constant>&nbsp;</entry>
1779 <entry>enum&nbsp;v4l2_mpeg_mpeg4_profile</entry> 1779 <entry>enum&nbsp;v4l2_mpeg_video_mpeg4_profile</entry>
1780 </row> 1780 </row>
1781 <row><entry spanname="descr">The profile information for MPEG4. 1781 <row><entry spanname="descr">The profile information for MPEG4.
1782Applicable to the MPEG4 encoder. 1782Applicable to the MPEG4 encoder.
@@ -1820,9 +1820,9 @@ Applicable to the encoder.
1820 </row> 1820 </row>
1821 1821
1822 <row><entry></entry></row> 1822 <row><entry></entry></row>
1823 <row> 1823 <row id="v4l2-mpeg-video-multi-slice-mode">
1824 <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE</constant>&nbsp;</entry> 1824 <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE</constant>&nbsp;</entry>
1825 <entry>enum&nbsp;v4l2_mpeg_multi_slice_mode</entry> 1825 <entry>enum&nbsp;v4l2_mpeg_video_multi_slice_mode</entry>
1826 </row> 1826 </row>
1827 <row><entry spanname="descr">Determines how the encoder should handle division of frame into slices. 1827 <row><entry spanname="descr">Determines how the encoder should handle division of frame into slices.
1828Applicable to the encoder. 1828Applicable to the encoder.
@@ -1868,9 +1868,9 @@ Applicable to the encoder.</entry>
1868 </row> 1868 </row>
1869 1869
1870 <row><entry></entry></row> 1870 <row><entry></entry></row>
1871 <row> 1871 <row id="v4l2-mpeg-video-h264-loop-filter-mode">
1872 <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE</constant>&nbsp;</entry> 1872 <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE</constant>&nbsp;</entry>
1873 <entry>enum&nbsp;v4l2_mpeg_h264_loop_filter_mode</entry> 1873 <entry>enum&nbsp;v4l2_mpeg_video_h264_loop_filter_mode</entry>
1874 </row> 1874 </row>
1875 <row><entry spanname="descr">Loop filter mode for H264 encoder. 1875 <row><entry spanname="descr">Loop filter mode for H264 encoder.
1876Possible values are:</entry> 1876Possible values are:</entry>
@@ -1913,9 +1913,9 @@ Applicable to the H264 encoder.</entry>
1913 </row> 1913 </row>
1914 1914
1915 <row><entry></entry></row> 1915 <row><entry></entry></row>
1916 <row> 1916 <row id="v4l2-mpeg-video-h264-entropy-mode">
1917 <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE</constant>&nbsp;</entry> 1917 <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE</constant>&nbsp;</entry>
1918 <entry>enum&nbsp;v4l2_mpeg_h264_symbol_mode</entry> 1918 <entry>enum&nbsp;v4l2_mpeg_video_h264_entropy_mode</entry>
1919 </row> 1919 </row>
1920 <row><entry spanname="descr">Entropy coding mode for H264 - CABAC/CAVALC. 1920 <row><entry spanname="descr">Entropy coding mode for H264 - CABAC/CAVALC.
1921Applicable to the H264 encoder. 1921Applicable to the H264 encoder.
@@ -2140,9 +2140,9 @@ previous frames. Applicable to the H264 encoder.</entry>
2140 </row> 2140 </row>
2141 2141
2142 <row><entry></entry></row> 2142 <row><entry></entry></row>
2143 <row> 2143 <row id="v4l2-mpeg-video-header-mode">
2144 <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_HEADER_MODE</constant>&nbsp;</entry> 2144 <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_HEADER_MODE</constant>&nbsp;</entry>
2145 <entry>enum&nbsp;v4l2_mpeg_header_mode</entry> 2145 <entry>enum&nbsp;v4l2_mpeg_video_header_mode</entry>
2146 </row> 2146 </row>
2147 <row><entry spanname="descr">Determines whether the header is returned as the first buffer or is 2147 <row><entry spanname="descr">Determines whether the header is returned as the first buffer or is
2148it returned together with the first frame. Applicable to encoders. 2148it returned together with the first frame. Applicable to encoders.
@@ -2320,9 +2320,9 @@ Valid only when H.264 and macroblock level RC is enabled (<constant>V4L2_CID_MPE
2320Applicable to the H264 encoder.</entry> 2320Applicable to the H264 encoder.</entry>
2321 </row> 2321 </row>
2322 <row><entry></entry></row> 2322 <row><entry></entry></row>
2323 <row> 2323 <row id="v4l2-mpeg-mfc51-video-frame-skip-mode">
2324 <entry spanname="id"><constant>V4L2_CID_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE</constant>&nbsp;</entry> 2324 <entry spanname="id"><constant>V4L2_CID_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE</constant>&nbsp;</entry>
2325 <entry>enum&nbsp;v4l2_mpeg_mfc51_frame_skip_mode</entry> 2325 <entry>enum&nbsp;v4l2_mpeg_mfc51_video_frame_skip_mode</entry>
2326 </row> 2326 </row>
2327 <row><entry spanname="descr"> 2327 <row><entry spanname="descr">
2328Indicates in what conditions the encoder should skip frames. If encoding a frame would cause the encoded stream to be larger then 2328Indicates in what conditions the encoder should skip frames. If encoding a frame would cause the encoded stream to be larger then
@@ -2361,9 +2361,9 @@ the stream will meet tight bandwidth contraints. Applicable to encoders.
2361</entry> 2361</entry>
2362 </row> 2362 </row>
2363 <row><entry></entry></row> 2363 <row><entry></entry></row>
2364 <row> 2364 <row id="v4l2-mpeg-mfc51-video-force-frame-type">
2365 <entry spanname="id"><constant>V4L2_CID_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE</constant>&nbsp;</entry> 2365 <entry spanname="id"><constant>V4L2_CID_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE</constant>&nbsp;</entry>
2366 <entry>enum&nbsp;v4l2_mpeg_mfc51_force_frame_type</entry> 2366 <entry>enum&nbsp;v4l2_mpeg_mfc51_video_force_frame_type</entry>
2367 </row> 2367 </row>
2368 <row><entry spanname="descr">Force a frame type for the next queued buffer. Applicable to encoders. 2368 <row><entry spanname="descr">Force a frame type for the next queued buffer. Applicable to encoders.
2369Possible values are:</entry> 2369Possible values are:</entry>
diff --git a/Documentation/PCI/MSI-HOWTO.txt b/Documentation/PCI/MSI-HOWTO.txt
index 3f5e0b09bed5..53e6fca146d7 100644
--- a/Documentation/PCI/MSI-HOWTO.txt
+++ b/Documentation/PCI/MSI-HOWTO.txt
@@ -45,7 +45,7 @@ arrived in memory (this becomes more likely with devices behind PCI-PCI
45bridges). In order to ensure that all the data has arrived in memory, 45bridges). In order to ensure that all the data has arrived in memory,
46the interrupt handler must read a register on the device which raised 46the interrupt handler must read a register on the device which raised
47the interrupt. PCI transaction ordering rules require that all the data 47the interrupt. PCI transaction ordering rules require that all the data
48arrives in memory before the value can be returned from the register. 48arrive in memory before the value may be returned from the register.
49Using MSIs avoids this problem as the interrupt-generating write cannot 49Using MSIs avoids this problem as the interrupt-generating write cannot
50pass the data writes, so by the time the interrupt is raised, the driver 50pass the data writes, so by the time the interrupt is raised, the driver
51knows that all the data has arrived in memory. 51knows that all the data has arrived in memory.
@@ -86,13 +86,13 @@ device.
86 86
87int pci_enable_msi(struct pci_dev *dev) 87int pci_enable_msi(struct pci_dev *dev)
88 88
89A successful call will allocate ONE interrupt to the device, regardless 89A successful call allocates ONE interrupt to the device, regardless
90of how many MSIs the device supports. The device will be switched from 90of how many MSIs the device supports. The device is switched from
91pin-based interrupt mode to MSI mode. The dev->irq number is changed 91pin-based interrupt mode to MSI mode. The dev->irq number is changed
92to a new number which represents the message signaled interrupt. 92to a new number which represents the message signaled interrupt;
93This function should be called before the driver calls request_irq() 93consequently, this function should be called before the driver calls
94since enabling MSIs disables the pin-based IRQ and the driver will not 94request_irq(), because an MSI is delivered via a vector that is
95receive interrupts on the old interrupt. 95different from the vector of a pin-based interrupt.
96 96
974.2.2 pci_enable_msi_block 974.2.2 pci_enable_msi_block
98 98
@@ -111,20 +111,20 @@ the device are in the range dev->irq to dev->irq + count - 1.
111 111
112If this function returns a negative number, it indicates an error and 112If this function returns a negative number, it indicates an error and
113the driver should not attempt to request any more MSI interrupts for 113the driver should not attempt to request any more MSI interrupts for
114this device. If this function returns a positive number, it will be 114this device. If this function returns a positive number, it is
115less than 'count' and indicate the number of interrupts that could have 115less than 'count' and indicates the number of interrupts that could have
116been allocated. In neither case will the irq value have been 116been allocated. In neither case is the irq value updated or the device
117updated, nor will the device have been switched into MSI mode. 117switched into MSI mode.
118 118
119The device driver must decide what action to take if 119The device driver must decide what action to take if
120pci_enable_msi_block() returns a value less than the number asked for. 120pci_enable_msi_block() returns a value less than the number requested.
121Some devices can make use of fewer interrupts than the maximum they 121For instance, the driver could still make use of fewer interrupts;
122request; in this case the driver should call pci_enable_msi_block() 122in this case the driver should call pci_enable_msi_block()
123again. Note that it is not guaranteed to succeed, even when the 123again. Note that it is not guaranteed to succeed, even when the
124'count' has been reduced to the value returned from a previous call to 124'count' has been reduced to the value returned from a previous call to
125pci_enable_msi_block(). This is because there are multiple constraints 125pci_enable_msi_block(). This is because there are multiple constraints
126on the number of vectors that can be allocated; pci_enable_msi_block() 126on the number of vectors that can be allocated; pci_enable_msi_block()
127will return as soon as it finds any constraint that doesn't allow the 127returns as soon as it finds any constraint that doesn't allow the
128call to succeed. 128call to succeed.
129 129
1304.2.3 pci_disable_msi 1304.2.3 pci_disable_msi
@@ -137,10 +137,10 @@ interrupt number and frees the previously allocated message signaled
137interrupt(s). The interrupt may subsequently be assigned to another 137interrupt(s). The interrupt may subsequently be assigned to another
138device, so drivers should not cache the value of dev->irq. 138device, so drivers should not cache the value of dev->irq.
139 139
140A device driver must always call free_irq() on the interrupt(s) 140Before calling this function, a device driver must always call free_irq()
141for which it has called request_irq() before calling this function. 141on any interrupt for which it previously called request_irq().
142Failure to do so will result in a BUG_ON(), the device will be left with 142Failure to do so results in a BUG_ON(), leaving the device with
143MSI enabled and will leak its vector. 143MSI enabled and thus leaking its vector.
144 144
1454.3 Using MSI-X 1454.3 Using MSI-X
146 146
@@ -155,10 +155,10 @@ struct msix_entry {
155}; 155};
156 156
157This allows for the device to use these interrupts in a sparse fashion; 157This allows for the device to use these interrupts in a sparse fashion;
158for example it could use interrupts 3 and 1027 and allocate only a 158for example, it could use interrupts 3 and 1027 and yet allocate only a
159two-element array. The driver is expected to fill in the 'entry' value 159two-element array. The driver is expected to fill in the 'entry' value
160in each element of the array to indicate which entries it wants the kernel 160in each element of the array to indicate for which entries the kernel
161to assign interrupts for. It is invalid to fill in two entries with the 161should assign interrupts; it is invalid to fill in two entries with the
162same number. 162same number.
163 163
1644.3.1 pci_enable_msix 1644.3.1 pci_enable_msix
@@ -168,10 +168,11 @@ int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec)
168Calling this function asks the PCI subsystem to allocate 'nvec' MSIs. 168Calling this function asks the PCI subsystem to allocate 'nvec' MSIs.
169The 'entries' argument is a pointer to an array of msix_entry structs 169The 'entries' argument is a pointer to an array of msix_entry structs
170which should be at least 'nvec' entries in size. On success, the 170which should be at least 'nvec' entries in size. On success, the
171function will return 0 and the device will have been switched into 171device is switched into MSI-X mode and the function returns 0.
172MSI-X interrupt mode. The 'vector' elements in each entry will have 172The 'vector' member in each entry is populated with the interrupt number;
173been filled in with the interrupt number. The driver should then call 173the driver should then call request_irq() for each 'vector' that it
174request_irq() for each 'vector' that it decides to use. 174decides to use. The device driver is responsible for keeping track of the
175interrupts assigned to the MSI-X vectors so it can free them again later.
175 176
176If this function returns a negative number, it indicates an error and 177If this function returns a negative number, it indicates an error and
177the driver should not attempt to allocate any more MSI-X interrupts for 178the driver should not attempt to allocate any more MSI-X interrupts for
@@ -181,16 +182,14 @@ below.
181 182
182This function, in contrast with pci_enable_msi(), does not adjust 183This function, in contrast with pci_enable_msi(), does not adjust
183dev->irq. The device will not generate interrupts for this interrupt 184dev->irq. The device will not generate interrupts for this interrupt
184number once MSI-X is enabled. The device driver is responsible for 185number once MSI-X is enabled.
185keeping track of the interrupts assigned to the MSI-X vectors so it can
186free them again later.
187 186
188Device drivers should normally call this function once per device 187Device drivers should normally call this function once per device
189during the initialization phase. 188during the initialization phase.
190 189
191It is ideal if drivers can cope with a variable number of MSI-X interrupts, 190It is ideal if drivers can cope with a variable number of MSI-X interrupts;
192there are many reasons why the platform may not be able to provide the 191there are many reasons why the platform may not be able to provide the
193exact number a driver asks for. 192exact number that a driver asks for.
194 193
195A request loop to achieve that might look like: 194A request loop to achieve that might look like:
196 195
@@ -212,15 +211,15 @@ static int foo_driver_enable_msix(struct foo_adapter *adapter, int nvec)
212 211
213void pci_disable_msix(struct pci_dev *dev) 212void pci_disable_msix(struct pci_dev *dev)
214 213
215This API should be used to undo the effect of pci_enable_msix(). It frees 214This function should be used to undo the effect of pci_enable_msix(). It frees
216the previously allocated message signaled interrupts. The interrupts may 215the previously allocated message signaled interrupts. The interrupts may
217subsequently be assigned to another device, so drivers should not cache 216subsequently be assigned to another device, so drivers should not cache
218the value of the 'vector' elements over a call to pci_disable_msix(). 217the value of the 'vector' elements over a call to pci_disable_msix().
219 218
220A device driver must always call free_irq() on the interrupt(s) 219Before calling this function, a device driver must always call free_irq()
221for which it has called request_irq() before calling this function. 220on any interrupt for which it previously called request_irq().
222Failure to do so will result in a BUG_ON(), the device will be left with 221Failure to do so results in a BUG_ON(), leaving the device with
223MSI enabled and will leak its vector. 222MSI-X enabled and thus leaking its vector.
224 223
2254.3.3 The MSI-X Table 2244.3.3 The MSI-X Table
226 225
@@ -232,10 +231,10 @@ mask or unmask an interrupt, it should call disable_irq() / enable_irq().
2324.4 Handling devices implementing both MSI and MSI-X capabilities 2314.4 Handling devices implementing both MSI and MSI-X capabilities
233 232
234If a device implements both MSI and MSI-X capabilities, it can 233If a device implements both MSI and MSI-X capabilities, it can
235run in either MSI mode or MSI-X mode but not both simultaneously. 234run in either MSI mode or MSI-X mode, but not both simultaneously.
236This is a requirement of the PCI spec, and it is enforced by the 235This is a requirement of the PCI spec, and it is enforced by the
237PCI layer. Calling pci_enable_msi() when MSI-X is already enabled or 236PCI layer. Calling pci_enable_msi() when MSI-X is already enabled or
238pci_enable_msix() when MSI is already enabled will result in an error. 237pci_enable_msix() when MSI is already enabled results in an error.
239If a device driver wishes to switch between MSI and MSI-X at runtime, 238If a device driver wishes to switch between MSI and MSI-X at runtime,
240it must first quiesce the device, then switch it back to pin-interrupt 239it must first quiesce the device, then switch it back to pin-interrupt
241mode, before calling pci_enable_msi() or pci_enable_msix() and resuming 240mode, before calling pci_enable_msi() or pci_enable_msix() and resuming
@@ -251,7 +250,7 @@ the MSI-X facilities in preference to the MSI facilities. As mentioned
251above, MSI-X supports any number of interrupts between 1 and 2048. 250above, MSI-X supports any number of interrupts between 1 and 2048.
252In constrast, MSI is restricted to a maximum of 32 interrupts (and 251In constrast, MSI is restricted to a maximum of 32 interrupts (and
253must be a power of two). In addition, the MSI interrupt vectors must 252must be a power of two). In addition, the MSI interrupt vectors must
254be allocated consecutively, so the system may not be able to allocate 253be allocated consecutively, so the system might not be able to allocate
255as many vectors for MSI as it could for MSI-X. On some platforms, MSI 254as many vectors for MSI as it could for MSI-X. On some platforms, MSI
256interrupts must all be targeted at the same set of CPUs whereas MSI-X 255interrupts must all be targeted at the same set of CPUs whereas MSI-X
257interrupts can all be targeted at different CPUs. 256interrupts can all be targeted at different CPUs.
@@ -281,7 +280,7 @@ disabled to enabled and back again.
281 280
282Using 'lspci -v' (as root) may show some devices with "MSI", "Message 281Using 'lspci -v' (as root) may show some devices with "MSI", "Message
283Signalled Interrupts" or "MSI-X" capabilities. Each of these capabilities 282Signalled Interrupts" or "MSI-X" capabilities. Each of these capabilities
284has an 'Enable' flag which will be followed with either "+" (enabled) 283has an 'Enable' flag which is followed with either "+" (enabled)
285or "-" (disabled). 284or "-" (disabled).
286 285
287 286
@@ -298,7 +297,7 @@ The PCI stack provides three ways to disable MSIs:
298 297
299Some host chipsets simply don't support MSIs properly. If we're 298Some host chipsets simply don't support MSIs properly. If we're
300lucky, the manufacturer knows this and has indicated it in the ACPI 299lucky, the manufacturer knows this and has indicated it in the ACPI
301FADT table. In this case, Linux will automatically disable MSIs. 300FADT table. In this case, Linux automatically disables MSIs.
302Some boards don't include this information in the table and so we have 301Some boards don't include this information in the table and so we have
303to detect them ourselves. The complete list of these is found near the 302to detect them ourselves. The complete list of these is found near the
304quirk_disable_all_msi() function in drivers/pci/quirks.c. 303quirk_disable_all_msi() function in drivers/pci/quirks.c.
@@ -317,7 +316,7 @@ Some bridges allow you to enable MSIs by changing some bits in their
317PCI configuration space (especially the Hypertransport chipsets such 316PCI configuration space (especially the Hypertransport chipsets such
318as the nVidia nForce and Serverworks HT2000). As with host chipsets, 317as the nVidia nForce and Serverworks HT2000). As with host chipsets,
319Linux mostly knows about them and automatically enables MSIs if it can. 318Linux mostly knows about them and automatically enables MSIs if it can.
320If you have a bridge which Linux doesn't yet know about, you can enable 319If you have a bridge unknown to Linux, you can enable
321MSIs in configuration space using whatever method you know works, then 320MSIs in configuration space using whatever method you know works, then
322enable MSIs on that bridge by doing: 321enable MSIs on that bridge by doing:
323 322
@@ -327,7 +326,7 @@ where $bridge is the PCI address of the bridge you've enabled (eg
3270000:00:0e.0). 3260000:00:0e.0).
328 327
329To disable MSIs, echo 0 instead of 1. Changing this value should be 328To disable MSIs, echo 0 instead of 1. Changing this value should be
330done with caution as it can break interrupt handling for all devices 329done with caution as it could break interrupt handling for all devices
331below this bridge. 330below this bridge.
332 331
333Again, please notify linux-pci@vger.kernel.org of any bridges that need 332Again, please notify linux-pci@vger.kernel.org of any bridges that need
@@ -336,7 +335,7 @@ special handling.
3365.3. Disabling MSIs on a single device 3355.3. Disabling MSIs on a single device
337 336
338Some devices are known to have faulty MSI implementations. Usually this 337Some devices are known to have faulty MSI implementations. Usually this
339is handled in the individual device driver but occasionally it's necessary 338is handled in the individual device driver, but occasionally it's necessary
340to handle this with a quirk. Some drivers have an option to disable use 339to handle this with a quirk. Some drivers have an option to disable use
341of MSI. While this is a convenient workaround for the driver author, 340of MSI. While this is a convenient workaround for the driver author,
342it is not good practise, and should not be emulated. 341it is not good practise, and should not be emulated.
@@ -350,7 +349,7 @@ for your machine. You should also check your .config to be sure you
350have enabled CONFIG_PCI_MSI. 349have enabled CONFIG_PCI_MSI.
351 350
352Then, 'lspci -t' gives the list of bridges above a device. Reading 351Then, 'lspci -t' gives the list of bridges above a device. Reading
353/sys/bus/pci/devices/*/msi_bus will tell you whether MSI are enabled (1) 352/sys/bus/pci/devices/*/msi_bus will tell you whether MSIs are enabled (1)
354or disabled (0). If 0 is found in any of the msi_bus files belonging 353or disabled (0). If 0 is found in any of the msi_bus files belonging
355to bridges between the PCI root and the device, MSIs are disabled. 354to bridges between the PCI root and the device, MSIs are disabled.
356 355
diff --git a/Documentation/SubmittingDrivers b/Documentation/SubmittingDrivers
index 319baa8b60dd..36d16bbf72c6 100644
--- a/Documentation/SubmittingDrivers
+++ b/Documentation/SubmittingDrivers
@@ -130,7 +130,7 @@ Linux kernel master tree:
130 ftp.??.kernel.org:/pub/linux/kernel/... 130 ftp.??.kernel.org:/pub/linux/kernel/...
131 ?? == your country code, such as "us", "uk", "fr", etc. 131 ?? == your country code, such as "us", "uk", "fr", etc.
132 132
133 http://git.kernel.org/?p=linux/kernel/git/torvalds/linux-2.6.git 133 http://git.kernel.org/?p=linux/kernel/git/torvalds/linux.git
134 134
135Linux kernel mailing list: 135Linux kernel mailing list:
136 linux-kernel@vger.kernel.org 136 linux-kernel@vger.kernel.org
diff --git a/Documentation/SubmittingPatches b/Documentation/SubmittingPatches
index 569f3532e138..4468ce24427c 100644
--- a/Documentation/SubmittingPatches
+++ b/Documentation/SubmittingPatches
@@ -303,7 +303,7 @@ patches that are being emailed around.
303 303
304The sign-off is a simple line at the end of the explanation for the 304The sign-off is a simple line at the end of the explanation for the
305patch, which certifies that you wrote it or otherwise have the right to 305patch, which certifies that you wrote it or otherwise have the right to
306pass it on as a open-source patch. The rules are pretty simple: if you 306pass it on as an open-source patch. The rules are pretty simple: if you
307can certify the below: 307can certify the below:
308 308
309 Developer's Certificate of Origin 1.1 309 Developer's Certificate of Origin 1.1
diff --git a/Documentation/block/cfq-iosched.txt b/Documentation/block/cfq-iosched.txt
index e578feed6d81..6d670f570451 100644
--- a/Documentation/block/cfq-iosched.txt
+++ b/Documentation/block/cfq-iosched.txt
@@ -43,3 +43,74 @@ If one sets slice_idle=0 and if storage supports NCQ, CFQ internally switches
43to IOPS mode and starts providing fairness in terms of number of requests 43to IOPS mode and starts providing fairness in terms of number of requests
44dispatched. Note that this mode switching takes effect only for group 44dispatched. Note that this mode switching takes effect only for group
45scheduling. For non-cgroup users nothing should change. 45scheduling. For non-cgroup users nothing should change.
46
47CFQ IO scheduler Idling Theory
48===============================
49Idling on a queue is primarily about waiting for the next request to come
50on same queue after completion of a request. In this process CFQ will not
51dispatch requests from other cfq queues even if requests are pending there.
52
53The rationale behind idling is that it can cut down on number of seeks
54on rotational media. For example, if a process is doing dependent
55sequential reads (next read will come on only after completion of previous
56one), then not dispatching request from other queue should help as we
57did not move the disk head and kept on dispatching sequential IO from
58one queue.
59
60CFQ has following service trees and various queues are put on these trees.
61
62 sync-idle sync-noidle async
63
64All cfq queues doing synchronous sequential IO go on to sync-idle tree.
65On this tree we idle on each queue individually.
66
67All synchronous non-sequential queues go on sync-noidle tree. Also any
68request which are marked with REQ_NOIDLE go on this service tree. On this
69tree we do not idle on individual queues instead idle on the whole group
70of queues or the tree. So if there are 4 queues waiting for IO to dispatch
71we will idle only once last queue has dispatched the IO and there is
72no more IO on this service tree.
73
74All async writes go on async service tree. There is no idling on async
75queues.
76
77CFQ has some optimizations for SSDs and if it detects a non-rotational
78media which can support higher queue depth (multiple requests at in
79flight at a time), then it cuts down on idling of individual queues and
80all the queues move to sync-noidle tree and only tree idle remains. This
81tree idling provides isolation with buffered write queues on async tree.
82
83FAQ
84===
85Q1. Why to idle at all on queues marked with REQ_NOIDLE.
86
87A1. We only do tree idle (all queues on sync-noidle tree) on queues marked
88 with REQ_NOIDLE. This helps in providing isolation with all the sync-idle
89 queues. Otherwise in presence of many sequential readers, other
90 synchronous IO might not get fair share of disk.
91
92 For example, if there are 10 sequential readers doing IO and they get
93 100ms each. If a REQ_NOIDLE request comes in, it will be scheduled
94 roughly after 1 second. If after completion of REQ_NOIDLE request we
95 do not idle, and after a couple of milli seconds a another REQ_NOIDLE
96 request comes in, again it will be scheduled after 1second. Repeat it
97 and notice how a workload can lose its disk share and suffer due to
98 multiple sequential readers.
99
100 fsync can generate dependent IO where bunch of data is written in the
101 context of fsync, and later some journaling data is written. Journaling
102 data comes in only after fsync has finished its IO (atleast for ext4
103 that seemed to be the case). Now if one decides not to idle on fsync
104 thread due to REQ_NOIDLE, then next journaling write will not get
105 scheduled for another second. A process doing small fsync, will suffer
106 badly in presence of multiple sequential readers.
107
108 Hence doing tree idling on threads using REQ_NOIDLE flag on requests
109 provides isolation from multiple sequential readers and at the same
110 time we do not idle on individual threads.
111
112Q2. When to specify REQ_NOIDLE
113A2. I would think whenever one is doing synchronous write and not expecting
114 more writes to be dispatched from same context soon, should be able
115 to specify REQ_NOIDLE on writes and that probably should work well for
116 most of the cases.
diff --git a/Documentation/cgroups/memory.txt b/Documentation/cgroups/memory.txt
index 6f3c598971fc..06eb6d957c83 100644
--- a/Documentation/cgroups/memory.txt
+++ b/Documentation/cgroups/memory.txt
@@ -380,7 +380,7 @@ will be charged as a new owner of it.
380 380
3815.2 stat file 3815.2 stat file
382 382
3835.2.1 memory.stat file includes following statistics 383memory.stat file includes following statistics
384 384
385# per-memory cgroup local status 385# per-memory cgroup local status
386cache - # of bytes of page cache memory. 386cache - # of bytes of page cache memory.
@@ -438,89 +438,6 @@ Note:
438 file_mapped is accounted only when the memory cgroup is owner of page 438 file_mapped is accounted only when the memory cgroup is owner of page
439 cache.) 439 cache.)
440 440
4415.2.2 memory.vmscan_stat
442
443memory.vmscan_stat includes statistics information for memory scanning and
444freeing, reclaiming. The statistics shows memory scanning information since
445memory cgroup creation and can be reset to 0 by writing 0 as
446
447 #echo 0 > ../memory.vmscan_stat
448
449This file contains following statistics.
450
451[param]_[file_or_anon]_pages_by_[reason]_[under_heararchy]
452[param]_elapsed_ns_by_[reason]_[under_hierarchy]
453
454For example,
455
456 scanned_file_pages_by_limit indicates the number of scanned
457 file pages at vmscan.
458
459Now, 3 parameters are supported
460
461 scanned - the number of pages scanned by vmscan
462 rotated - the number of pages activated at vmscan
463 freed - the number of pages freed by vmscan
464
465If "rotated" is high against scanned/freed, the memcg seems busy.
466
467Now, 2 reason are supported
468
469 limit - the memory cgroup's limit
470 system - global memory pressure + softlimit
471 (global memory pressure not under softlimit is not handled now)
472
473When under_hierarchy is added in the tail, the number indicates the
474total memcg scan of its children and itself.
475
476elapsed_ns is a elapsed time in nanosecond. This may include sleep time
477and not indicates CPU usage. So, please take this as just showing
478latency.
479
480Here is an example.
481
482# cat /cgroup/memory/A/memory.vmscan_stat
483scanned_pages_by_limit 9471864
484scanned_anon_pages_by_limit 6640629
485scanned_file_pages_by_limit 2831235
486rotated_pages_by_limit 4243974
487rotated_anon_pages_by_limit 3971968
488rotated_file_pages_by_limit 272006
489freed_pages_by_limit 2318492
490freed_anon_pages_by_limit 962052
491freed_file_pages_by_limit 1356440
492elapsed_ns_by_limit 351386416101
493scanned_pages_by_system 0
494scanned_anon_pages_by_system 0
495scanned_file_pages_by_system 0
496rotated_pages_by_system 0
497rotated_anon_pages_by_system 0
498rotated_file_pages_by_system 0
499freed_pages_by_system 0
500freed_anon_pages_by_system 0
501freed_file_pages_by_system 0
502elapsed_ns_by_system 0
503scanned_pages_by_limit_under_hierarchy 9471864
504scanned_anon_pages_by_limit_under_hierarchy 6640629
505scanned_file_pages_by_limit_under_hierarchy 2831235
506rotated_pages_by_limit_under_hierarchy 4243974
507rotated_anon_pages_by_limit_under_hierarchy 3971968
508rotated_file_pages_by_limit_under_hierarchy 272006
509freed_pages_by_limit_under_hierarchy 2318492
510freed_anon_pages_by_limit_under_hierarchy 962052
511freed_file_pages_by_limit_under_hierarchy 1356440
512elapsed_ns_by_limit_under_hierarchy 351386416101
513scanned_pages_by_system_under_hierarchy 0
514scanned_anon_pages_by_system_under_hierarchy 0
515scanned_file_pages_by_system_under_hierarchy 0
516rotated_pages_by_system_under_hierarchy 0
517rotated_anon_pages_by_system_under_hierarchy 0
518rotated_file_pages_by_system_under_hierarchy 0
519freed_pages_by_system_under_hierarchy 0
520freed_anon_pages_by_system_under_hierarchy 0
521freed_file_pages_by_system_under_hierarchy 0
522elapsed_ns_by_system_under_hierarchy 0
523
5245.3 swappiness 4415.3 swappiness
525 442
526Similar to /proc/sys/vm/swappiness, but affecting a hierarchy of groups only. 443Similar to /proc/sys/vm/swappiness, but affecting a hierarchy of groups only.
diff --git a/Documentation/email-clients.txt b/Documentation/email-clients.txt
index a0b58e29f911..860c29a472ad 100644
--- a/Documentation/email-clients.txt
+++ b/Documentation/email-clients.txt
@@ -199,18 +199,16 @@ to coerce it into behaving.
199 199
200To beat some sense out of the internal editor, do this: 200To beat some sense out of the internal editor, do this:
201 201
202- Under account settings, composition and addressing, uncheck "Compose
203 messages in HTML format".
204
205- Edit your Thunderbird config settings so that it won't use format=flowed. 202- Edit your Thunderbird config settings so that it won't use format=flowed.
206 Go to "edit->preferences->advanced->config editor" to bring up the 203 Go to "edit->preferences->advanced->config editor" to bring up the
207 thunderbird's registry editor, and set "mailnews.send_plaintext_flowed" to 204 thunderbird's registry editor, and set "mailnews.send_plaintext_flowed" to
208 "false". 205 "false".
209 206
210- Enable "preformat" mode: Shft-click on the Write icon to bring up the HTML 207- Disable HTML Format: Set "mail.identity.id1.compose_html" to "false".
211 composer, select "Preformat" from the drop-down box just under the subject 208
212 line, then close the message without saving. (This setting also applies to 209- Enable "preformat" mode: Set "editor.quotesPreformatted" to "true".
213 the text composer, but the only control for it is in the HTML composer.) 210
211- Enable UTF8: Set "prefs.converted-to-utf8" to "true".
214 212
215- Install the "toggle wordwrap" extension. Download the file from: 213- Install the "toggle wordwrap" extension. Download the file from:
216 https://addons.mozilla.org/thunderbird/addon/2351/ 214 https://addons.mozilla.org/thunderbird/addon/2351/
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index c4a6e148732a..4dc465477665 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -592,3 +592,11 @@ Why: In 3.0, we can now autodetect internal 3G device and already have
592 interface that was used by acer-wmi driver. It will replaced by 592 interface that was used by acer-wmi driver. It will replaced by
593 information log when acer-wmi initial. 593 information log when acer-wmi initial.
594Who: Lee, Chun-Yi <jlee@novell.com> 594Who: Lee, Chun-Yi <jlee@novell.com>
595
596----------------------------
597What: The XFS nodelaylog mount option
598When: 3.3
599Why: The delaylog mode that has been the default since 2.6.39 has proven
600 stable, and the old code is in the way of additional improvements in
601 the log code.
602Who: Christoph Hellwig <hch@lst.de>
diff --git a/Documentation/filesystems/befs.txt b/Documentation/filesystems/befs.txt
index 6e49c363938e..da45e6c842b8 100644
--- a/Documentation/filesystems/befs.txt
+++ b/Documentation/filesystems/befs.txt
@@ -27,7 +27,7 @@ His original code can still be found at:
27Does anyone know of a more current email address for Makoto? He doesn't 27Does anyone know of a more current email address for Makoto? He doesn't
28respond to the address given above... 28respond to the address given above...
29 29
30Current maintainer: Sergey S. Kostyliov <rathamahata@php4.ru> 30This filesystem doesn't have a maintainer.
31 31
32WHAT IS THIS DRIVER? 32WHAT IS THIS DRIVER?
33================== 33==================
diff --git a/Documentation/hwmon/max16065 b/Documentation/hwmon/max16065
index 44b4f61e04f9..c11f64a1f2ad 100644
--- a/Documentation/hwmon/max16065
+++ b/Documentation/hwmon/max16065
@@ -62,6 +62,13 @@ can be safely used to identify the chip. You will have to instantiate
62the devices explicitly. Please see Documentation/i2c/instantiating-devices for 62the devices explicitly. Please see Documentation/i2c/instantiating-devices for
63details. 63details.
64 64
65WARNING: Do not access chip registers using the i2cdump command, and do not use
66any of the i2ctools commands on a command register (0xa5 to 0xac). The chips
67supported by this driver interpret any access to a command register (including
68read commands) as request to execute the command in question. This may result in
69power loss, board resets, and/or Flash corruption. Worst case, your board may
70turn into a brick.
71
65 72
66Sysfs entries 73Sysfs entries
67------------- 74-------------
diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt
index 845a191004b1..54078ed96b37 100644
--- a/Documentation/ioctl/ioctl-number.txt
+++ b/Documentation/ioctl/ioctl-number.txt
@@ -319,4 +319,6 @@ Code Seq#(hex) Include File Comments
319 <mailto:thomas@winischhofer.net> 319 <mailto:thomas@winischhofer.net>
3200xF4 00-1F video/mbxfb.h mbxfb 3200xF4 00-1F video/mbxfb.h mbxfb
321 <mailto:raph@8d.com> 321 <mailto:raph@8d.com>
3220xF6 all LTTng Linux Trace Toolkit Next Generation
323 <mailto:mathieu.desnoyers@efficios.com>
3220xFD all linux/dm-ioctl.h 3240xFD all linux/dm-ioctl.h
diff --git a/Documentation/kernel-docs.txt b/Documentation/kernel-docs.txt
index 9a8674629a07..0e0734b509d8 100644
--- a/Documentation/kernel-docs.txt
+++ b/Documentation/kernel-docs.txt
@@ -620,17 +620,6 @@
620 (including this document itself) have been moved there, and might 620 (including this document itself) have been moved there, and might
621 be more up to date than the web version. 621 be more up to date than the web version.
622 622
623 * Name: "Linux Source Driver"
624 URL: http://lsd.linux.cz
625 Keywords: Browsing source code.
626 Description: "Linux Source Driver (LSD) is an application, which
627 can make browsing source codes of Linux kernel easier than you can
628 imagine. You can select between multiple versions of kernel (e.g.
629 0.01, 1.0.0, 2.0.33, 2.0.34pre13, 2.0.0, 2.1.101 etc.). With LSD
630 you can search Linux kernel (fulltext, macros, types, functions
631 and variables) and LSD can generate patches for you on the fly
632 (files, directories or kernel)".
633
634 * Name: "Linux Kernel Source Reference" 623 * Name: "Linux Kernel Source Reference"
635 Author: Thomas Graichen. 624 Author: Thomas Graichen.
636 URL: http://marc.info/?l=linux-kernel&m=96446640102205&w=4 625 URL: http://marc.info/?l=linux-kernel&m=96446640102205&w=4
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 78926aa2531c..854ed5ca7e3f 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -40,6 +40,7 @@ parameter is applicable:
40 ALSA ALSA sound support is enabled. 40 ALSA ALSA sound support is enabled.
41 APIC APIC support is enabled. 41 APIC APIC support is enabled.
42 APM Advanced Power Management support is enabled. 42 APM Advanced Power Management support is enabled.
43 ARM ARM architecture is enabled.
43 AVR32 AVR32 architecture is enabled. 44 AVR32 AVR32 architecture is enabled.
44 AX25 Appropriate AX.25 support is enabled. 45 AX25 Appropriate AX.25 support is enabled.
45 BLACKFIN Blackfin architecture is enabled. 46 BLACKFIN Blackfin architecture is enabled.
@@ -49,6 +50,7 @@ parameter is applicable:
49 EFI EFI Partitioning (GPT) is enabled 50 EFI EFI Partitioning (GPT) is enabled
50 EIDE EIDE/ATAPI support is enabled. 51 EIDE EIDE/ATAPI support is enabled.
51 FB The frame buffer device is enabled. 52 FB The frame buffer device is enabled.
53 FTRACE Function tracing enabled.
52 GCOV GCOV profiling is enabled. 54 GCOV GCOV profiling is enabled.
53 HW Appropriate hardware is enabled. 55 HW Appropriate hardware is enabled.
54 IA-64 IA-64 architecture is enabled. 56 IA-64 IA-64 architecture is enabled.
@@ -69,6 +71,7 @@ parameter is applicable:
69 Documentation/m68k/kernel-options.txt. 71 Documentation/m68k/kernel-options.txt.
70 MCA MCA bus support is enabled. 72 MCA MCA bus support is enabled.
71 MDA MDA console support is enabled. 73 MDA MDA console support is enabled.
74 MIPS MIPS architecture is enabled.
72 MOUSE Appropriate mouse support is enabled. 75 MOUSE Appropriate mouse support is enabled.
73 MSI Message Signaled Interrupts (PCI). 76 MSI Message Signaled Interrupts (PCI).
74 MTD MTD (Memory Technology Device) support is enabled. 77 MTD MTD (Memory Technology Device) support is enabled.
@@ -100,7 +103,6 @@ parameter is applicable:
100 SPARC Sparc architecture is enabled. 103 SPARC Sparc architecture is enabled.
101 SWSUSP Software suspend (hibernation) is enabled. 104 SWSUSP Software suspend (hibernation) is enabled.
102 SUSPEND System suspend states are enabled. 105 SUSPEND System suspend states are enabled.
103 FTRACE Function tracing enabled.
104 TPM TPM drivers are enabled. 106 TPM TPM drivers are enabled.
105 TS Appropriate touchscreen support is enabled. 107 TS Appropriate touchscreen support is enabled.
106 UMS USB Mass Storage support is enabled. 108 UMS USB Mass Storage support is enabled.
@@ -115,7 +117,7 @@ parameter is applicable:
115 X86-64 X86-64 architecture is enabled. 117 X86-64 X86-64 architecture is enabled.
116 More X86-64 boot options can be found in 118 More X86-64 boot options can be found in
117 Documentation/x86/x86_64/boot-options.txt . 119 Documentation/x86/x86_64/boot-options.txt .
118 X86 Either 32bit or 64bit x86 (same as X86-32+X86-64) 120 X86 Either 32-bit or 64-bit x86 (same as X86-32+X86-64)
119 XEN Xen support is enabled 121 XEN Xen support is enabled
120 122
121In addition, the following text indicates that the option: 123In addition, the following text indicates that the option:
@@ -376,7 +378,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
376 atkbd.softrepeat= [HW] 378 atkbd.softrepeat= [HW]
377 Use software keyboard repeat 379 Use software keyboard repeat
378 380
379 autotest [IA64] 381 autotest [IA-64]
380 382
381 baycom_epp= [HW,AX25] 383 baycom_epp= [HW,AX25]
382 Format: <io>,<mode> 384 Format: <io>,<mode>
@@ -681,8 +683,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
681 uart[8250],mmio32,<addr>[,options] 683 uart[8250],mmio32,<addr>[,options]
682 Start an early, polled-mode console on the 8250/16550 684 Start an early, polled-mode console on the 8250/16550
683 UART at the specified I/O port or MMIO address. 685 UART at the specified I/O port or MMIO address.
684 MMIO inter-register address stride is either 8bit (mmio) 686 MMIO inter-register address stride is either 8-bit
685 or 32bit (mmio32). 687 (mmio) or 32-bit (mmio32).
686 The options are the same as for ttyS, above. 688 The options are the same as for ttyS, above.
687 689
688 earlyprintk= [X86,SH,BLACKFIN] 690 earlyprintk= [X86,SH,BLACKFIN]
@@ -725,7 +727,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
725 See Documentation/block/as-iosched.txt and 727 See Documentation/block/as-iosched.txt and
726 Documentation/block/deadline-iosched.txt for details. 728 Documentation/block/deadline-iosched.txt for details.
727 729
728 elfcorehdr= [IA64,PPC,SH,X86] 730 elfcorehdr= [IA-64,PPC,SH,X86]
729 Specifies physical address of start of kernel core 731 Specifies physical address of start of kernel core
730 image elf header. Generally kexec loader will 732 image elf header. Generally kexec loader will
731 pass this option to capture kernel. 733 pass this option to capture kernel.
@@ -791,7 +793,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
791 tracer at boot up. function-list is a comma separated 793 tracer at boot up. function-list is a comma separated
792 list of functions. This list can be changed at run 794 list of functions. This list can be changed at run
793 time by the set_ftrace_filter file in the debugfs 795 time by the set_ftrace_filter file in the debugfs
794 tracing directory. 796 tracing directory.
795 797
796 ftrace_notrace=[function-list] 798 ftrace_notrace=[function-list]
797 [FTRACE] Do not trace the functions specified in 799 [FTRACE] Do not trace the functions specified in
@@ -829,7 +831,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
829 831
830 hashdist= [KNL,NUMA] Large hashes allocated during boot 832 hashdist= [KNL,NUMA] Large hashes allocated during boot
831 are distributed across NUMA nodes. Defaults on 833 are distributed across NUMA nodes. Defaults on
832 for 64bit NUMA, off otherwise. 834 for 64-bit NUMA, off otherwise.
833 Format: 0 | 1 (for off | on) 835 Format: 0 | 1 (for off | on)
834 836
835 hcl= [IA-64] SGI's Hardware Graph compatibility layer 837 hcl= [IA-64] SGI's Hardware Graph compatibility layer
@@ -998,10 +1000,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
998 DMA. 1000 DMA.
999 forcedac [x86_64] 1001 forcedac [x86_64]
1000 With this option iommu will not optimize to look 1002 With this option iommu will not optimize to look
1001 for io virtual address below 32 bit forcing dual 1003 for io virtual address below 32-bit forcing dual
1002 address cycle on pci bus for cards supporting greater 1004 address cycle on pci bus for cards supporting greater
1003 than 32 bit addressing. The default is to look 1005 than 32-bit addressing. The default is to look
1004 for translation below 32 bit and if not available 1006 for translation below 32-bit and if not available
1005 then look in the higher range. 1007 then look in the higher range.
1006 strict [Default Off] 1008 strict [Default Off]
1007 With this option on every unmap_single operation will 1009 With this option on every unmap_single operation will
@@ -1017,7 +1019,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
1017 off disable Interrupt Remapping 1019 off disable Interrupt Remapping
1018 nosid disable Source ID checking 1020 nosid disable Source ID checking
1019 1021
1020 inttest= [IA64] 1022 inttest= [IA-64]
1021 1023
1022 iomem= Disable strict checking of access to MMIO memory 1024 iomem= Disable strict checking of access to MMIO memory
1023 strict regions from userspace. 1025 strict regions from userspace.
@@ -1034,7 +1036,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
1034 nomerge 1036 nomerge
1035 forcesac 1037 forcesac
1036 soft 1038 soft
1037 pt [x86, IA64] 1039 pt [x86, IA-64]
1038 1040
1039 io7= [HW] IO7 for Marvel based alpha systems 1041 io7= [HW] IO7 for Marvel based alpha systems
1040 See comment before marvel_specify_io7 in 1042 See comment before marvel_specify_io7 in
@@ -1165,7 +1167,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
1165 1167
1166 kvm-amd.npt= [KVM,AMD] Disable nested paging (virtualized MMU) 1168 kvm-amd.npt= [KVM,AMD] Disable nested paging (virtualized MMU)
1167 for all guests. 1169 for all guests.
1168 Default is 1 (enabled) if in 64bit or 32bit-PAE mode 1170 Default is 1 (enabled) if in 64-bit or 32-bit PAE mode.
1169 1171
1170 kvm-intel.ept= [KVM,Intel] Disable extended page tables 1172 kvm-intel.ept= [KVM,Intel] Disable extended page tables
1171 (virtualized MMU) support on capable Intel chips. 1173 (virtualized MMU) support on capable Intel chips.
@@ -1202,10 +1204,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
1202 libata.dma=0 Disable all PATA and SATA DMA 1204 libata.dma=0 Disable all PATA and SATA DMA
1203 libata.dma=1 PATA and SATA Disk DMA only 1205 libata.dma=1 PATA and SATA Disk DMA only
1204 libata.dma=2 ATAPI (CDROM) DMA only 1206 libata.dma=2 ATAPI (CDROM) DMA only
1205 libata.dma=4 Compact Flash DMA only 1207 libata.dma=4 Compact Flash DMA only
1206 Combinations also work, so libata.dma=3 enables DMA 1208 Combinations also work, so libata.dma=3 enables DMA
1207 for disks and CDROMs, but not CFs. 1209 for disks and CDROMs, but not CFs.
1208 1210
1209 libata.ignore_hpa= [LIBATA] Ignore HPA limit 1211 libata.ignore_hpa= [LIBATA] Ignore HPA limit
1210 libata.ignore_hpa=0 keep BIOS limits (default) 1212 libata.ignore_hpa=0 keep BIOS limits (default)
1211 libata.ignore_hpa=1 ignore limits, using full disk 1213 libata.ignore_hpa=1 ignore limits, using full disk
@@ -1331,7 +1333,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
1331 ltpc= [NET] 1333 ltpc= [NET]
1332 Format: <io>,<irq>,<dma> 1334 Format: <io>,<irq>,<dma>
1333 1335
1334 machvec= [IA64] Force the use of a particular machine-vector 1336 machvec= [IA-64] Force the use of a particular machine-vector
1335 (machvec) in a generic kernel. 1337 (machvec) in a generic kernel.
1336 Example: machvec=hpzx1_swiotlb 1338 Example: machvec=hpzx1_swiotlb
1337 1339
@@ -1348,9 +1350,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
1348 it is equivalent to "nosmp", which also disables 1350 it is equivalent to "nosmp", which also disables
1349 the IO APIC. 1351 the IO APIC.
1350 1352
1351 max_loop= [LOOP] Maximum number of loopback devices that can 1353 max_loop= [LOOP] The number of loop block devices that get
1352 be mounted 1354 (loop.max_loop) unconditionally pre-created at init time. The default
1353 Format: <1-256> 1355 number is configured by BLK_DEV_LOOP_MIN_COUNT. Instead
1356 of statically allocating a predefined number, loop
1357 devices can be requested on-demand with the
1358 /dev/loop-control interface.
1354 1359
1355 mcatest= [IA-64] 1360 mcatest= [IA-64]
1356 1361
@@ -1734,7 +1739,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
1734 1739
1735 nointroute [IA-64] 1740 nointroute [IA-64]
1736 1741
1737 nojitter [IA64] Disables jitter checking for ITC timers. 1742 nojitter [IA-64] Disables jitter checking for ITC timers.
1738 1743
1739 no-kvmclock [X86,KVM] Disable paravirtualized KVM clock driver 1744 no-kvmclock [X86,KVM] Disable paravirtualized KVM clock driver
1740 1745
@@ -1800,7 +1805,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
1800 1805
1801 nox2apic [X86-64,APIC] Do not enable x2APIC mode. 1806 nox2apic [X86-64,APIC] Do not enable x2APIC mode.
1802 1807
1803 nptcg= [IA64] Override max number of concurrent global TLB 1808 nptcg= [IA-64] Override max number of concurrent global TLB
1804 purges which is reported from either PAL_VM_SUMMARY or 1809 purges which is reported from either PAL_VM_SUMMARY or
1805 SAL PALO. 1810 SAL PALO.
1806 1811
@@ -2077,13 +2082,16 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
2077 Format: { parport<nr> | timid | 0 } 2082 Format: { parport<nr> | timid | 0 }
2078 See also Documentation/parport.txt. 2083 See also Documentation/parport.txt.
2079 2084
2080 pmtmr= [X86] Manual setup of pmtmr I/O Port. 2085 pmtmr= [X86] Manual setup of pmtmr I/O Port.
2081 Override pmtimer IOPort with a hex value. 2086 Override pmtimer IOPort with a hex value.
2082 e.g. pmtmr=0x508 2087 e.g. pmtmr=0x508
2083 2088
2084 pnp.debug [PNP] 2089 pnp.debug=1 [PNP]
2085 Enable PNP debug messages. This depends on the 2090 Enable PNP debug messages (depends on the
2086 CONFIG_PNP_DEBUG_MESSAGES option. 2091 CONFIG_PNP_DEBUG_MESSAGES option). Change at run-time
2092 via /sys/module/pnp/parameters/debug. We always show
2093 current resource usage; turning this on also shows
2094 possible settings and some assignment information.
2087 2095
2088 pnpacpi= [ACPI] 2096 pnpacpi= [ACPI]
2089 { off } 2097 { off }
@@ -2635,6 +2643,16 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
2635 medium is write-protected). 2643 medium is write-protected).
2636 Example: quirks=0419:aaf5:rl,0421:0433:rc 2644 Example: quirks=0419:aaf5:rl,0421:0433:rc
2637 2645
2646 user_debug= [KNL,ARM]
2647 Format: <int>
2648 See arch/arm/Kconfig.debug help text.
2649 1 - undefined instruction events
2650 2 - system calls
2651 4 - invalid data aborts
2652 8 - SIGSEGV faults
2653 16 - SIGBUS faults
2654 Example: user_debug=31
2655
2638 userpte= 2656 userpte=
2639 [X86] Flags controlling user PTE allocations. 2657 [X86] Flags controlling user PTE allocations.
2640 2658
diff --git a/Documentation/networking/dmfe.txt b/Documentation/networking/dmfe.txt
index 8006c227fda2..25320bf19c86 100644
--- a/Documentation/networking/dmfe.txt
+++ b/Documentation/networking/dmfe.txt
@@ -1,3 +1,5 @@
1Note: This driver doesn't have a maintainer.
2
1Davicom DM9102(A)/DM9132/DM9801 fast ethernet driver for Linux. 3Davicom DM9102(A)/DM9132/DM9801 fast ethernet driver for Linux.
2 4
3This program is free software; you can redistribute it and/or 5This program is free software; you can redistribute it and/or
@@ -55,7 +57,6 @@ Test and make sure PCI latency is now correct for all cases.
55Authors: 57Authors:
56 58
57Sten Wang <sten_wang@davicom.com.tw > : Original Author 59Sten Wang <sten_wang@davicom.com.tw > : Original Author
58Tobias Ringstrom <tori@unhappy.mine.nu> : Current Maintainer
59 60
60Contributors: 61Contributors:
61 62
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index f2716df05601..98c8d4229f0a 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -992,7 +992,7 @@ bindv6only - BOOLEAN
992 TRUE: disable IPv4-mapped address feature 992 TRUE: disable IPv4-mapped address feature
993 FALSE: enable IPv4-mapped address feature 993 FALSE: enable IPv4-mapped address feature
994 994
995 Default: FALSE (as specified in RFC2553bis) 995 Default: FALSE (as specified in RFC3493)
996 996
997IPv6 Fragmentation: 997IPv6 Fragmentation:
998 998
diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
index 4ce5450ab6e8..6066e3a6b9a9 100644
--- a/Documentation/power/runtime_pm.txt
+++ b/Documentation/power/runtime_pm.txt
@@ -431,8 +431,7 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
431 431
432 void pm_runtime_irq_safe(struct device *dev); 432 void pm_runtime_irq_safe(struct device *dev);
433 - set the power.irq_safe flag for the device, causing the runtime-PM 433 - set the power.irq_safe flag for the device, causing the runtime-PM
434 suspend and resume callbacks (but not the idle callback) to be invoked 434 callbacks to be invoked with interrupts off
435 with interrupts disabled
436 435
437 void pm_runtime_mark_last_busy(struct device *dev); 436 void pm_runtime_mark_last_busy(struct device *dev);
438 - set the power.last_busy field to the current time 437 - set the power.last_busy field to the current time
diff --git a/Documentation/ramoops.txt b/Documentation/ramoops.txt
new file mode 100644
index 000000000000..8fb1ba7fe7bf
--- /dev/null
+++ b/Documentation/ramoops.txt
@@ -0,0 +1,76 @@
1Ramoops oops/panic logger
2=========================
3
4Sergiu Iordache <sergiu@chromium.org>
5
6Updated: 8 August 2011
7
80. Introduction
9
10Ramoops is an oops/panic logger that writes its logs to RAM before the system
11crashes. It works by logging oopses and panics in a circular buffer. Ramoops
12needs a system with persistent RAM so that the content of that area can
13survive after a restart.
14
151. Ramoops concepts
16
17Ramoops uses a predefined memory area to store the dump. The start and size of
18the memory area are set using two variables:
19 * "mem_address" for the start
20 * "mem_size" for the size. The memory size will be rounded down to a
21 power of two.
22
23The memory area is divided into "record_size" chunks (also rounded down to
24power of two) and each oops/panic writes a "record_size" chunk of
25information.
26
27Dumping both oopses and panics can be done by setting 1 in the "dump_oops"
28variable while setting 0 in that variable dumps only the panics.
29
30The module uses a counter to record multiple dumps but the counter gets reset
31on restart (i.e. new dumps after the restart will overwrite old ones).
32
332. Setting the parameters
34
35Setting the ramoops parameters can be done in 2 different manners:
36 1. Use the module parameters (which have the names of the variables described
37 as before).
38 2. Use a platform device and set the platform data. The parameters can then
39 be set through that platform data. An example of doing that is:
40
41#include <linux/ramoops.h>
42[...]
43
44static struct ramoops_platform_data ramoops_data = {
45 .mem_size = <...>,
46 .mem_address = <...>,
47 .record_size = <...>,
48 .dump_oops = <...>,
49};
50
51static struct platform_device ramoops_dev = {
52 .name = "ramoops",
53 .dev = {
54 .platform_data = &ramoops_data,
55 },
56};
57
58[... inside a function ...]
59int ret;
60
61ret = platform_device_register(&ramoops_dev);
62if (ret) {
63 printk(KERN_ERR "unable to register platform device\n");
64 return ret;
65}
66
673. Dump format
68
69The data dump begins with a header, currently defined as "====" followed by a
70timestamp and a new line. The dump then continues with the actual data.
71
724. Reading the data
73
74The dump data can be read from memory (through /dev/mem or other means).
75Getting the module parameters, which are needed in order to parse the data, can
76be done through /sys/module/ramoops/parameters/* .
diff --git a/Documentation/virtual/00-INDEX b/Documentation/virtual/00-INDEX
index fe0251c4cfb7..8e601991d91c 100644
--- a/Documentation/virtual/00-INDEX
+++ b/Documentation/virtual/00-INDEX
@@ -8,3 +8,6 @@ lguest/
8 - Extremely simple hypervisor for experimental/educational use. 8 - Extremely simple hypervisor for experimental/educational use.
9uml/ 9uml/
10 - User Mode Linux, builds/runs Linux kernel as a userspace program. 10 - User Mode Linux, builds/runs Linux kernel as a userspace program.
11virtio.txt
12 - Text version of draft virtio spec.
13 See http://ozlabs.org/~rusty/virtio-spec
diff --git a/Documentation/virtual/lguest/lguest.c b/Documentation/virtual/lguest/lguest.c
index 043bd7df3139..d928c134dee6 100644
--- a/Documentation/virtual/lguest/lguest.c
+++ b/Documentation/virtual/lguest/lguest.c
@@ -1996,6 +1996,9 @@ int main(int argc, char *argv[])
1996 /* We use a simple helper to copy the arguments separated by spaces. */ 1996 /* We use a simple helper to copy the arguments separated by spaces. */
1997 concat((char *)(boot + 1), argv+optind+2); 1997 concat((char *)(boot + 1), argv+optind+2);
1998 1998
1999 /* Set kernel alignment to 16M (CONFIG_PHYSICAL_ALIGN) */
2000 boot->hdr.kernel_alignment = 0x1000000;
2001
1999 /* Boot protocol version: 2.07 supports the fields for lguest. */ 2002 /* Boot protocol version: 2.07 supports the fields for lguest. */
2000 boot->hdr.version = 0x207; 2003 boot->hdr.version = 0x207;
2001 2004
diff --git a/Documentation/virtual/virtio-spec.txt b/Documentation/virtual/virtio-spec.txt
new file mode 100644
index 000000000000..a350ae135b8c
--- /dev/null
+++ b/Documentation/virtual/virtio-spec.txt
@@ -0,0 +1,2200 @@
1[Generated file: see http://ozlabs.org/~rusty/virtio-spec/]
2Virtio PCI Card Specification
3v0.9.1 DRAFT
4-
5
6Rusty Russell <rusty@rustcorp.com.au>IBM Corporation (Editor)
7
82011 August 1.
9
10Purpose and Description
11
12This document describes the specifications of the “virtio” family
13of PCI[LaTeX Command: nomenclature] devices. These are devices
14are found in virtual environments[LaTeX Command: nomenclature],
15yet by design they are not all that different from physical PCI
16devices, and this document treats them as such. This allows the
17guest to use standard PCI drivers and discovery mechanisms.
18
19The purpose of virtio and this specification is that virtual
20environments and guests should have a straightforward, efficient,
21standard and extensible mechanism for virtual devices, rather
22than boutique per-environment or per-OS mechanisms.
23
24 Straightforward: Virtio PCI devices use normal PCI mechanisms
25 of interrupts and DMA which should be familiar to any device
26 driver author. There is no exotic page-flipping or COW
27 mechanism: it's just a PCI device.[footnote:
28This lack of page-sharing implies that the implementation of the
29device (e.g. the hypervisor or host) needs full access to the
30guest memory. Communication with untrusted parties (i.e.
31inter-guest communication) requires copying.
32]
33
34 Efficient: Virtio PCI devices consist of rings of descriptors
35 for input and output, which are neatly separated to avoid cache
36 effects from both guest and device writing to the same cache
37 lines.
38
39 Standard: Virtio PCI makes no assumptions about the environment
40 in which it operates, beyond supporting PCI. In fact the virtio
41 devices specified in the appendices do not require PCI at all:
42 they have been implemented on non-PCI buses.[footnote:
43The Linux implementation further separates the PCI virtio code
44from the specific virtio drivers: these drivers are shared with
45the non-PCI implementations (currently lguest and S/390).
46]
47
48 Extensible: Virtio PCI devices contain feature bits which are
49 acknowledged by the guest operating system during device setup.
50 This allows forwards and backwards compatibility: the device
51 offers all the features it knows about, and the driver
52 acknowledges those it understands and wishes to use.
53
54 Virtqueues
55
56The mechanism for bulk data transport on virtio PCI devices is
57pretentiously called a virtqueue. Each device can have zero or
58more virtqueues: for example, the network device has one for
59transmit and one for receive.
60
61Each virtqueue occupies two or more physically-contiguous pages
62(defined, for the purposes of this specification, as 4096 bytes),
63and consists of three parts:
64
65
66+-------------------+-----------------------------------+-----------+
67| Descriptor Table | Available Ring (padding) | Used Ring |
68+-------------------+-----------------------------------+-----------+
69
70
71When the driver wants to send buffers to the device, it puts them
72in one or more slots in the descriptor table, and writes the
73descriptor indices into the available ring. It then notifies the
74device. When the device has finished with the buffers, it writes
75the descriptors into the used ring, and sends an interrupt.
76
77Specification
78
79 PCI Discovery
80
81Any PCI device with Vendor ID 0x1AF4, and Device ID 0x1000
82through 0x103F inclusive is a virtio device[footnote:
83The actual value within this range is ignored
84]. The device must also have a Revision ID of 0 to match this
85specification.
86
87The Subsystem Device ID indicates which virtio device is
88supported by the device. The Subsystem Vendor ID should reflect
89the PCI Vendor ID of the environment (it's currently only used
90for informational purposes by the guest).
91
92
93+----------------------+--------------------+---------------+
94| Subsystem Device ID | Virtio Device | Specification |
95+----------------------+--------------------+---------------+
96+----------------------+--------------------+---------------+
97| 1 | network card | Appendix C |
98+----------------------+--------------------+---------------+
99| 2 | block device | Appendix D |
100+----------------------+--------------------+---------------+
101| 3 | console | Appendix E |
102+----------------------+--------------------+---------------+
103| 4 | entropy source | Appendix F |
104+----------------------+--------------------+---------------+
105| 5 | memory ballooning | Appendix G |
106+----------------------+--------------------+---------------+
107| 6 | ioMemory | - |
108+----------------------+--------------------+---------------+
109| 9 | 9P transport | - |
110+----------------------+--------------------+---------------+
111
112
113 Device Configuration
114
115To configure the device, we use the first I/O region of the PCI
116device. This contains a virtio header followed by a
117device-specific region.
118
119There may be different widths of accesses to the I/O region; the “
120natural” access method for each field in the virtio header must
121be used (i.e. 32-bit accesses for 32-bit fields, etc), but the
122device-specific region can be accessed using any width accesses,
123and should obtain the same results.
124
125Note that this is possible because while the virtio header is PCI
126(i.e. little) endian, the device-specific region is encoded in
127the native endian of the guest (where such distinction is
128applicable).
129
130 Device Initialization Sequence
131
132We start with an overview of device initialization, then expand
133on the details of the device and how each step is preformed.
134
135 Reset the device. This is not required on initial start up.
136
137 The ACKNOWLEDGE status bit is set: we have noticed the device.
138
139 The DRIVER status bit is set: we know how to drive the device.
140
141 Device-specific setup, including reading the Device Feature
142 Bits, discovery of virtqueues for the device, optional MSI-X
143 setup, and reading and possibly writing the virtio
144 configuration space.
145
146 The subset of Device Feature Bits understood by the driver is
147 written to the device.
148
149 The DRIVER_OK status bit is set.
150
151 The device can now be used (ie. buffers added to the
152 virtqueues)[footnote:
153Historically, drivers have used the device before steps 5 and 6.
154This is only allowed if the driver does not use any features
155which would alter this early use of the device.
156]
157
158If any of these steps go irrecoverably wrong, the guest should
159set the FAILED status bit to indicate that it has given up on the
160device (it can reset the device later to restart if desired).
161
162We now cover the fields required for general setup in detail.
163
164 Virtio Header
165
166The virtio header looks as follows:
167
168
169+------------++---------------------+---------------------+----------+--------+---------+---------+---------+--------+
170| Bits || 32 | 32 | 32 | 16 | 16 | 16 | 8 | 8 |
171+------------++---------------------+---------------------+----------+--------+---------+---------+---------+--------+
172| Read/Write || R | R+W | R+W | R | R+W | R+W | R+W | R |
173+------------++---------------------+---------------------+----------+--------+---------+---------+---------+--------+
174| Purpose || Device | Guest | Queue | Queue | Queue | Queue | Device | ISR |
175| || Features bits 0:31 | Features bits 0:31 | Address | Size | Select | Notify | Status | Status |
176+------------++---------------------+---------------------+----------+--------+---------+---------+---------+--------+
177
178
179If MSI-X is enabled for the device, two additional fields
180immediately follow this header:
181
182
183+------------++----------------+--------+
184| Bits || 16 | 16 |
185 +----------------+--------+
186+------------++----------------+--------+
187| Read/Write || R+W | R+W |
188+------------++----------------+--------+
189| Purpose || Configuration | Queue |
190| (MSI-X) || Vector | Vector |
191+------------++----------------+--------+
192
193
194Finally, if feature bits (VIRTIO_F_FEATURES_HI) this is
195immediately followed by two additional fields:
196
197
198+------------++----------------------+----------------------
199| Bits || 32 | 32
200+------------++----------------------+----------------------
201| Read/Write || R | R+W
202+------------++----------------------+----------------------
203| Purpose || Device | Guest
204| || Features bits 32:63 | Features bits 32:63
205+------------++----------------------+----------------------
206
207
208Immediately following these general headers, there may be
209device-specific headers:
210
211
212+------------++--------------------+
213| Bits || Device Specific |
214 +--------------------+
215+------------++--------------------+
216| Read/Write || Device Specific |
217+------------++--------------------+
218| Purpose || Device Specific... |
219| || |
220+------------++--------------------+
221
222
223 Device Status
224
225The Device Status field is updated by the guest to indicate its
226progress. This provides a simple low-level diagnostic: it's most
227useful to imagine them hooked up to traffic lights on the console
228indicating the status of each device.
229
230The device can be reset by writing a 0 to this field, otherwise
231at least one bit should be set:
232
233 ACKNOWLEDGE (1) Indicates that the guest OS has found the
234 device and recognized it as a valid virtio device.
235
236 DRIVER (2) Indicates that the guest OS knows how to drive the
237 device. Under Linux, drivers can be loadable modules so there
238 may be a significant (or infinite) delay before setting this
239 bit.
240
241 DRIVER_OK (3) Indicates that the driver is set up and ready to
242 drive the device.
243
244 FAILED (8) Indicates that something went wrong in the guest,
245 and it has given up on the device. This could be an internal
246 error, or the driver didn't like the device for some reason, or
247 even a fatal error during device operation. The device must be
248 reset before attempting to re-initialize.
249
250 Feature Bits
251
252The least significant 31 bits of the first configuration field
253indicates the features that the device supports (the high bit is
254reserved, and will be used to indicate the presence of future
255feature bits elsewhere). If more than 31 feature bits are
256supported, the device indicates so by setting feature bit 31 (see
257[cha:Reserved-Feature-Bits]). The bits are allocated as follows:
258
259 0 to 23 Feature bits for the specific device type
260
261 24 to 40 Feature bits reserved for extensions to the queue and
262 feature negotiation mechanisms
263
264 41 to 63 Feature bits reserved for future extensions
265
266For example, feature bit 0 for a network device (i.e. Subsystem
267Device ID 1) indicates that the device supports checksumming of
268packets.
269
270The feature bits are negotiated: the device lists all the
271features it understands in the Device Features field, and the
272guest writes the subset that it understands into the Guest
273Features field. The only way to renegotiate is to reset the
274device.
275
276In particular, new fields in the device configuration header are
277indicated by offering a feature bit, so the guest can check
278before accessing that part of the configuration space.
279
280This allows for forwards and backwards compatibility: if the
281device is enhanced with a new feature bit, older guests will not
282write that feature bit back to the Guest Features field and it
283can go into backwards compatibility mode. Similarly, if a guest
284is enhanced with a feature that the device doesn't support, it
285will not see that feature bit in the Device Features field and
286can go into backwards compatibility mode (or, for poor
287implementations, set the FAILED Device Status bit).
288
289Access to feature bits 32 to 63 is enabled by Guest by setting
290feature bit 31. If this bit is unset, Device must assume that all
291feature bits > 31 are unset.
292
293 Configuration/Queue Vectors
294
295When MSI-X capability is present and enabled in the device
296(through standard PCI configuration space) 4 bytes at byte offset
29720 are used to map configuration change and queue interrupts to
298MSI-X vectors. In this case, the ISR Status field is unused, and
299device specific configuration starts at byte offset 24 in virtio
300header structure. When MSI-X capability is not enabled, device
301specific configuration starts at byte offset 20 in virtio header.
302
303Writing a valid MSI-X Table entry number, 0 to 0x7FF, to one of
304Configuration/Queue Vector registers, maps interrupts triggered
305by the configuration change/selected queue events respectively to
306the corresponding MSI-X vector. To disable interrupts for a
307specific event type, unmap it by writing a special NO_VECTOR
308value:
309
310/* Vector value used to disable MSI for queue */
311
312#define VIRTIO_MSI_NO_VECTOR 0xffff
313
314Reading these registers returns vector mapped to a given event,
315or NO_VECTOR if unmapped. All queue and configuration change
316events are unmapped by default.
317
318Note that mapping an event to vector might require allocating
319internal device resources, and might fail. Devices report such
320failures by returning the NO_VECTOR value when the relevant
321Vector field is read. After mapping an event to vector, the
322driver must verify success by reading the Vector field value: on
323success, the previously written value is returned, and on
324failure, NO_VECTOR is returned. If a mapping failure is detected,
325the driver can retry mapping with fewervectors, or disable MSI-X.
326
327 Virtqueue Configuration
328
329As a device can have zero or more virtqueues for bulk data
330transport (for example, the network driver has two), the driver
331needs to configure them as part of the device-specific
332configuration.
333
334This is done as follows, for each virtqueue a device has:
335
336 Write the virtqueue index (first queue is 0) to the Queue
337 Select field.
338
339 Read the virtqueue size from the Queue Size field, which is
340 always a power of 2. This controls how big the virtqueue is
341 (see below). If this field is 0, the virtqueue does not exist.
342
343 Allocate and zero virtqueue in contiguous physical memory, on a
344 4096 byte alignment. Write the physical address, divided by
345 4096 to the Queue Address field.[footnote:
346The 4096 is based on the x86 page size, but it's also large
347enough to ensure that the separate parts of the virtqueue are on
348separate cache lines.
349]
350
351 Optionally, if MSI-X capability is present and enabled on the
352 device, select a vector to use to request interrupts triggered
353 by virtqueue events. Write the MSI-X Table entry number
354 corresponding to this vector in Queue Vector field. Read the
355 Queue Vector field: on success, previously written value is
356 returned; on failure, NO_VECTOR value is returned.
357
358The Queue Size field controls the total number of bytes required
359for the virtqueue according to the following formula:
360
361#define ALIGN(x) (((x) + 4095) & ~4095)
362
363static inline unsigned vring_size(unsigned int qsz)
364
365{
366
367 return ALIGN(sizeof(struct vring_desc)*qsz + sizeof(u16)*(2
368+ qsz))
369
370 + ALIGN(sizeof(struct vring_used_elem)*qsz);
371
372}
373
374This currently wastes some space with padding, but also allows
375future extensions. The virtqueue layout structure looks like this
376(qsz is the Queue Size field, which is a variable, so this code
377won't compile):
378
379struct vring {
380
381 /* The actual descriptors (16 bytes each) */
382
383 struct vring_desc desc[qsz];
384
385
386
387 /* A ring of available descriptor heads with free-running
388index. */
389
390 struct vring_avail avail;
391
392
393
394 // Padding to the next 4096 boundary.
395
396 char pad[];
397
398
399
400 // A ring of used descriptor heads with free-running index.
401
402 struct vring_used used;
403
404};
405
406 A Note on Virtqueue Endianness
407
408Note that the endian of these fields and everything else in the
409virtqueue is the native endian of the guest, not little-endian as
410PCI normally is. This makes for simpler guest code, and it is
411assumed that the host already has to be deeply aware of the guest
412endian so such an “endian-aware” device is not a significant
413issue.
414
415 Descriptor Table
416
417The descriptor table refers to the buffers the guest is using for
418the device. The addresses are physical addresses, and the buffers
419can be chained via the next field. Each descriptor describes a
420buffer which is read-only or write-only, but a chain of
421descriptors can contain both read-only and write-only buffers.
422
423No descriptor chain may be more than 2^32 bytes long in total.struct vring_desc {
424
425 /* Address (guest-physical). */
426
427 u64 addr;
428
429 /* Length. */
430
431 u32 len;
432
433/* This marks a buffer as continuing via the next field. */
434
435#define VRING_DESC_F_NEXT 1
436
437/* This marks a buffer as write-only (otherwise read-only). */
438
439#define VRING_DESC_F_WRITE 2
440
441/* This means the buffer contains a list of buffer descriptors.
442*/
443
444#define VRING_DESC_F_INDIRECT 4
445
446 /* The flags as indicated above. */
447
448 u16 flags;
449
450 /* Next field if flags & NEXT */
451
452 u16 next;
453
454};
455
456The number of descriptors in the table is specified by the Queue
457Size field for this virtqueue.
458
459 <sub:Indirect-Descriptors>Indirect Descriptors
460
461Some devices benefit by concurrently dispatching a large number
462of large requests. The VIRTIO_RING_F_INDIRECT_DESC feature can be
463used to allow this (see [cha:Reserved-Feature-Bits]). To increase
464ring capacity it is possible to store a table of indirect
465descriptors anywhere in memory, and insert a descriptor in main
466virtqueue (with flags&INDIRECT on) that refers to memory buffer
467containing this indirect descriptor table; fields addr and len
468refer to the indirect table address and length in bytes,
469respectively. The indirect table layout structure looks like this
470(len is the length of the descriptor that refers to this table,
471which is a variable, so this code won't compile):
472
473struct indirect_descriptor_table {
474
475 /* The actual descriptors (16 bytes each) */
476
477 struct vring_desc desc[len / 16];
478
479};
480
481The first indirect descriptor is located at start of the indirect
482descriptor table (index 0), additional indirect descriptors are
483chained by next field. An indirect descriptor without next field
484(with flags&NEXT off) signals the end of the indirect descriptor
485table, and transfers control back to the main virtqueue. An
486indirect descriptor can not refer to another indirect descriptor
487table (flags&INDIRECT must be off). A single indirect descriptor
488table can include both read-only and write-only descriptors;
489write-only flag (flags&WRITE) in the descriptor that refers to it
490is ignored.
491
492 Available Ring
493
494The available ring refers to what descriptors we are offering the
495device: it refers to the head of a descriptor chain. The “flags”
496field is currently 0 or 1: 1 indicating that we do not need an
497interrupt when the device consumes a descriptor from the
498available ring. Alternatively, the guest can ask the device to
499delay interrupts until an entry with an index specified by the “
500used_event” field is written in the used ring (equivalently,
501until the idx field in the used ring will reach the value
502used_event + 1). The method employed by the device is controlled
503by the VIRTIO_RING_F_EVENT_IDX feature bit (see [cha:Reserved-Feature-Bits]
504). This interrupt suppression is merely an optimization; it may
505not suppress interrupts entirely.
506
507The “idx” field indicates where we would put the next descriptor
508entry (modulo the ring size). This starts at 0, and increases.
509
510struct vring_avail {
511
512#define VRING_AVAIL_F_NO_INTERRUPT 1
513
514 u16 flags;
515
516 u16 idx;
517
518 u16 ring[qsz]; /* qsz is the Queue Size field read from device
519*/
520
521 u16 used_event;
522
523};
524
525 Used Ring
526
527The used ring is where the device returns buffers once it is done
528with them. The flags field can be used by the device to hint that
529no notification is necessary when the guest adds to the available
530ring. Alternatively, the “avail_event” field can be used by the
531device to hint that no notification is necessary until an entry
532with an index specified by the “avail_event” is written in the
533available ring (equivalently, until the idx field in the
534available ring will reach the value avail_event + 1). The method
535employed by the device is controlled by the guest through the
536VIRTIO_RING_F_EVENT_IDX feature bit (see [cha:Reserved-Feature-Bits]
537). [footnote:
538These fields are kept here because this is the only part of the
539virtqueue written by the device
540].
541
542Each entry in the ring is a pair: the head entry of the
543descriptor chain describing the buffer (this matches an entry
544placed in the available ring by the guest earlier), and the total
545of bytes written into the buffer. The latter is extremely useful
546for guests using untrusted buffers: if you do not know exactly
547how much has been written by the device, you usually have to zero
548the buffer to ensure no data leakage occurs.
549
550/* u32 is used here for ids for padding reasons. */
551
552struct vring_used_elem {
553
554 /* Index of start of used descriptor chain. */
555
556 u32 id;
557
558 /* Total length of the descriptor chain which was used
559(written to) */
560
561 u32 len;
562
563};
564
565
566
567struct vring_used {
568
569#define VRING_USED_F_NO_NOTIFY 1
570
571 u16 flags;
572
573 u16 idx;
574
575 struct vring_used_elem ring[qsz];
576
577 u16 avail_event;
578
579};
580
581 Helpers for Managing Virtqueues
582
583The Linux Kernel Source code contains the definitions above and
584helper routines in a more usable form, in
585include/linux/virtio_ring.h. This was explicitly licensed by IBM
586and Red Hat under the (3-clause) BSD license so that it can be
587freely used by all other projects, and is reproduced (with slight
588variation to remove Linux assumptions) in Appendix A.
589
590 Device Operation
591
592There are two parts to device operation: supplying new buffers to
593the device, and processing used buffers from the device. As an
594example, the virtio network device has two virtqueues: the
595transmit virtqueue and the receive virtqueue. The driver adds
596outgoing (read-only) packets to the transmit virtqueue, and then
597frees them after they are used. Similarly, incoming (write-only)
598buffers are added to the receive virtqueue, and processed after
599they are used.
600
601 Supplying Buffers to The Device
602
603Actual transfer of buffers from the guest OS to the device
604operates as follows:
605
606 Place the buffer(s) into free descriptor(s).
607
608 If there are no free descriptors, the guest may choose to
609 notify the device even if notifications are suppressed (to
610 reduce latency).[footnote:
611The Linux drivers do this only for read-only buffers: for
612write-only buffers, it is assumed that the driver is merely
613trying to keep the receive buffer ring full, and no notification
614of this expected condition is necessary.
615]
616
617 Place the id of the buffer in the next ring entry of the
618 available ring.
619
620 The steps (1) and (2) may be performed repeatedly if batching
621 is possible.
622
623 A memory barrier should be executed to ensure the device sees
624 the updated descriptor table and available ring before the next
625 step.
626
627 The available “idx” field should be increased by the number of
628 entries added to the available ring.
629
630 A memory barrier should be executed to ensure that we update
631 the idx field before checking for notification suppression.
632
633 If notifications are not suppressed, the device should be
634 notified of the new buffers.
635
636Note that the above code does not take precautions against the
637available ring buffer wrapping around: this is not possible since
638the ring buffer is the same size as the descriptor table, so step
639(1) will prevent such a condition.
640
641In addition, the maximum queue size is 32768 (it must be a power
642of 2 which fits in 16 bits), so the 16-bit “idx” value can always
643distinguish between a full and empty buffer.
644
645Here is a description of each stage in more detail.
646
647 Placing Buffers Into The Descriptor Table
648
649A buffer consists of zero or more read-only physically-contiguous
650elements followed by zero or more physically-contiguous
651write-only elements (it must have at least one element). This
652algorithm maps it into the descriptor table:
653
654 for each buffer element, b:
655
656 Get the next free descriptor table entry, d
657
658 Set d.addr to the physical address of the start of b
659
660 Set d.len to the length of b.
661
662 If b is write-only, set d.flags to VRING_DESC_F_WRITE,
663 otherwise 0.
664
665 If there is a buffer element after this:
666
667 Set d.next to the index of the next free descriptor element.
668
669 Set the VRING_DESC_F_NEXT bit in d.flags.
670
671In practice, the d.next fields are usually used to chain free
672descriptors, and a separate count kept to check there are enough
673free descriptors before beginning the mappings.
674
675 Updating The Available Ring
676
677The head of the buffer we mapped is the first d in the algorithm
678above. A naive implementation would do the following:
679
680avail->ring[avail->idx % qsz] = head;
681
682However, in general we can add many descriptors before we update
683the “idx” field (at which point they become visible to the
684device), so we keep a counter of how many we've added:
685
686avail->ring[(avail->idx + added++) % qsz] = head;
687
688 Updating The Index Field
689
690Once the idx field of the virtqueue is updated, the device will
691be able to access the descriptor entries we've created and the
692memory they refer to. This is why a memory barrier is generally
693used before the idx update, to ensure it sees the most up-to-date
694copy.
695
696The idx field always increments, and we let it wrap naturally at
69765536:
698
699avail->idx += added;
700
701 <sub:Notifying-The-Device>Notifying The Device
702
703Device notification occurs by writing the 16-bit virtqueue index
704of this virtqueue to the Queue Notify field of the virtio header
705in the first I/O region of the PCI device. This can be expensive,
706however, so the device can suppress such notifications if it
707doesn't need them. We have to be careful to expose the new idx
708value before checking the suppression flag: it's OK to notify
709gratuitously, but not to omit a required notification. So again,
710we use a memory barrier here before reading the flags or the
711avail_event field.
712
713If the VIRTIO_F_RING_EVENT_IDX feature is not negotiated, and if
714the VRING_USED_F_NOTIFY flag is not set, we go ahead and write to
715the PCI configuration space.
716
717If the VIRTIO_F_RING_EVENT_IDX feature is negotiated, we read the
718avail_event field in the available ring structure. If the
719available index crossed_the avail_event field value since the
720last notification, we go ahead and write to the PCI configuration
721space. The avail_event field wraps naturally at 65536 as well:
722
723(u16)(new_idx - avail_event - 1) < (u16)(new_idx - old_idx)
724
725 <sub:Receiving-Used-Buffers>Receiving Used Buffers From The
726 Device
727
728Once the device has used a buffer (read from or written to it, or
729parts of both, depending on the nature of the virtqueue and the
730device), it sends an interrupt, following an algorithm very
731similar to the algorithm used for the driver to send the device a
732buffer:
733
734 Write the head descriptor number to the next field in the used
735 ring.
736
737 Update the used ring idx.
738
739 Determine whether an interrupt is necessary:
740
741 If the VIRTIO_F_RING_EVENT_IDX feature is not negotiated: check
742 if f the VRING_AVAIL_F_NO_INTERRUPT flag is not set in avail-
743 >flags
744
745 If the VIRTIO_F_RING_EVENT_IDX feature is negotiated: check
746 whether the used index crossed the used_event field value
747 since the last update. The used_event field wraps naturally
748 at 65536 as well:(u16)(new_idx - used_event - 1) < (u16)(new_idx - old_idx)
749
750 If an interrupt is necessary:
751
752 If MSI-X capability is disabled:
753
754 Set the lower bit of the ISR Status field for the device.
755
756 Send the appropriate PCI interrupt for the device.
757
758 If MSI-X capability is enabled:
759
760 Request the appropriate MSI-X interrupt message for the
761 device, Queue Vector field sets the MSI-X Table entry
762 number.
763
764 If Queue Vector field value is NO_VECTOR, no interrupt
765 message is requested for this event.
766
767The guest interrupt handler should:
768
769 If MSI-X capability is disabled: read the ISR Status field,
770 which will reset it to zero. If the lower bit is zero, the
771 interrupt was not for this device. Otherwise, the guest driver
772 should look through the used rings of each virtqueue for the
773 device, to see if any progress has been made by the device
774 which requires servicing.
775
776 If MSI-X capability is enabled: look through the used rings of
777 each virtqueue mapped to the specific MSI-X vector for the
778 device, to see if any progress has been made by the device
779 which requires servicing.
780
781For each ring, guest should then disable interrupts by writing
782VRING_AVAIL_F_NO_INTERRUPT flag in avail structure, if required.
783It can then process used ring entries finally enabling interrupts
784by clearing the VRING_AVAIL_F_NO_INTERRUPT flag or updating the
785EVENT_IDX field in the available structure, Guest should then
786execute a memory barrier, and then recheck the ring empty
787condition. This is necessary to handle the case where, after the
788last check and before enabling interrupts, an interrupt has been
789suppressed by the device:
790
791vring_disable_interrupts(vq);
792
793for (;;) {
794
795 if (vq->last_seen_used != vring->used.idx) {
796
797 vring_enable_interrupts(vq);
798
799 mb();
800
801 if (vq->last_seen_used != vring->used.idx)
802
803 break;
804
805 }
806
807 struct vring_used_elem *e =
808vring.used->ring[vq->last_seen_used%vsz];
809
810 process_buffer(e);
811
812 vq->last_seen_used++;
813
814}
815
816 Dealing With Configuration Changes
817
818Some virtio PCI devices can change the device configuration
819state, as reflected in the virtio header in the PCI configuration
820space. In this case:
821
822 If MSI-X capability is disabled: an interrupt is delivered and
823 the second highest bit is set in the ISR Status field to
824 indicate that the driver should re-examine the configuration
825 space.Note that a single interrupt can indicate both that one
826 or more virtqueue has been used and that the configuration
827 space has changed: even if the config bit is set, virtqueues
828 must be scanned.
829
830 If MSI-X capability is enabled: an interrupt message is
831 requested. The Configuration Vector field sets the MSI-X Table
832 entry number to use. If Configuration Vector field value is
833 NO_VECTOR, no interrupt message is requested for this event.
834
835Creating New Device Types
836
837Various considerations are necessary when creating a new device
838type:
839
840 How Many Virtqueues?
841
842It is possible that a very simple device will operate entirely
843through its configuration space, but most will need at least one
844virtqueue in which it will place requests. A device with both
845input and output (eg. console and network devices described here)
846need two queues: one which the driver fills with buffers to
847receive input, and one which the driver places buffers to
848transmit output.
849
850 What Configuration Space Layout?
851
852Configuration space is generally used for rarely-changing or
853initialization-time parameters. But it is a limited resource, so
854it might be better to use a virtqueue to update configuration
855information (the network device does this for filtering,
856otherwise the table in the config space could potentially be very
857large).
858
859Note that this space is generally the guest's native endian,
860rather than PCI's little-endian.
861
862 What Device Number?
863
864Currently device numbers are assigned quite freely: a simple
865request mail to the author of this document or the Linux
866virtualization mailing list[footnote:
867
868https://lists.linux-foundation.org/mailman/listinfo/virtualization
869] will be sufficient to secure a unique one.
870
871Meanwhile for experimental drivers, use 65535 and work backwards.
872
873 How many MSI-X vectors?
874
875Using the optional MSI-X capability devices can speed up
876interrupt processing by removing the need to read ISR Status
877register by guest driver (which might be an expensive operation),
878reducing interrupt sharing between devices and queues within the
879device, and handling interrupts from multiple CPUs. However, some
880systems impose a limit (which might be as low as 256) on the
881total number of MSI-X vectors that can be allocated to all
882devices. Devices and/or device drivers should take this into
883account, limiting the number of vectors used unless the device is
884expected to cause a high volume of interrupts. Devices can
885control the number of vectors used by limiting the MSI-X Table
886Size or not presenting MSI-X capability in PCI configuration
887space. Drivers can control this by mapping events to as small
888number of vectors as possible, or disabling MSI-X capability
889altogether.
890
891 Message Framing
892
893The descriptors used for a buffer should not effect the semantics
894of the message, except for the total length of the buffer. For
895example, a network buffer consists of a 10 byte header followed
896by the network packet. Whether this is presented in the ring
897descriptor chain as (say) a 10 byte buffer and a 1514 byte
898buffer, or a single 1524 byte buffer, or even three buffers,
899should have no effect.
900
901In particular, no implementation should use the descriptor
902boundaries to determine the size of any header in a request.[footnote:
903The current qemu device implementations mistakenly insist that
904the first descriptor cover the header in these cases exactly, so
905a cautious driver should arrange it so.
906]
907
908 Device Improvements
909
910Any change to configuration space, or new virtqueues, or
911behavioural changes, should be indicated by negotiation of a new
912feature bit. This establishes clarity[footnote:
913Even if it does mean documenting design or implementation
914mistakes!
915] and avoids future expansion problems.
916
917Clusters of functionality which are always implemented together
918can use a single bit, but if one feature makes sense without the
919others they should not be gratuitously grouped together to
920conserve feature bits. We can always extend the spec when the
921first person needs more than 24 feature bits for their device.
922
923[LaTeX Command: printnomenclature]
924
925Appendix A: virtio_ring.h
926
927#ifndef VIRTIO_RING_H
928
929#define VIRTIO_RING_H
930
931/* An interface for efficient virtio implementation.
932
933 *
934
935 * This header is BSD licensed so anyone can use the definitions
936
937 * to implement compatible drivers/servers.
938
939 *
940
941 * Copyright 2007, 2009, IBM Corporation
942
943 * Copyright 2011, Red Hat, Inc
944
945 * All rights reserved.
946
947 *
948
949 * Redistribution and use in source and binary forms, with or
950without
951
952 * modification, are permitted provided that the following
953conditions
954
955 * are met:
956
957 * 1. Redistributions of source code must retain the above
958copyright
959
960 * notice, this list of conditions and the following
961disclaimer.
962
963 * 2. Redistributions in binary form must reproduce the above
964copyright
965
966 * notice, this list of conditions and the following
967disclaimer in the
968
969 * documentation and/or other materials provided with the
970distribution.
971
972 * 3. Neither the name of IBM nor the names of its contributors
973
974 * may be used to endorse or promote products derived from
975this software
976
977 * without specific prior written permission.
978
979 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
980CONTRIBUTORS ``AS IS'' AND
981
982 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
983TO, THE
984
985 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
986PARTICULAR PURPOSE
987
988 * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE
989LIABLE
990
991 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
992CONSEQUENTIAL
993
994 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
995SUBSTITUTE GOODS
996
997 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
998INTERRUPTION)
999
1000 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
1001CONTRACT, STRICT
1002
1003 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
1004IN ANY WAY
1005
1006 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
1007POSSIBILITY OF
1008
1009 * SUCH DAMAGE.
1010
1011 */
1012
1013
1014
1015/* This marks a buffer as continuing via the next field. */
1016
1017#define VRING_DESC_F_NEXT 1
1018
1019/* This marks a buffer as write-only (otherwise read-only). */
1020
1021#define VRING_DESC_F_WRITE 2
1022
1023
1024
1025/* The Host uses this in used->flags to advise the Guest: don't
1026kick me
1027
1028 * when you add a buffer. It's unreliable, so it's simply an
1029
1030 * optimization. Guest will still kick if it's out of buffers.
1031*/
1032
1033#define VRING_USED_F_NO_NOTIFY 1
1034
1035/* The Guest uses this in avail->flags to advise the Host: don't
1036
1037 * interrupt me when you consume a buffer. It's unreliable, so
1038it's
1039
1040 * simply an optimization. */
1041
1042#define VRING_AVAIL_F_NO_INTERRUPT 1
1043
1044
1045
1046/* Virtio ring descriptors: 16 bytes.
1047
1048 * These can chain together via "next". */
1049
1050struct vring_desc {
1051
1052 /* Address (guest-physical). */
1053
1054 uint64_t addr;
1055
1056 /* Length. */
1057
1058 uint32_t len;
1059
1060 /* The flags as indicated above. */
1061
1062 uint16_t flags;
1063
1064 /* We chain unused descriptors via this, too */
1065
1066 uint16_t next;
1067
1068};
1069
1070
1071
1072struct vring_avail {
1073
1074 uint16_t flags;
1075
1076 uint16_t idx;
1077
1078 uint16_t ring[];
1079
1080 uint16_t used_event;
1081
1082};
1083
1084
1085
1086/* u32 is used here for ids for padding reasons. */
1087
1088struct vring_used_elem {
1089
1090 /* Index of start of used descriptor chain. */
1091
1092 uint32_t id;
1093
1094 /* Total length of the descriptor chain which was written
1095to. */
1096
1097 uint32_t len;
1098
1099};
1100
1101
1102
1103struct vring_used {
1104
1105 uint16_t flags;
1106
1107 uint16_t idx;
1108
1109 struct vring_used_elem ring[];
1110
1111 uint16_t avail_event;
1112
1113};
1114
1115
1116
1117struct vring {
1118
1119 unsigned int num;
1120
1121
1122
1123 struct vring_desc *desc;
1124
1125 struct vring_avail *avail;
1126
1127 struct vring_used *used;
1128
1129};
1130
1131
1132
1133/* The standard layout for the ring is a continuous chunk of
1134memory which
1135
1136 * looks like this. We assume num is a power of 2.
1137
1138 *
1139
1140 * struct vring {
1141
1142 * // The actual descriptors (16 bytes each)
1143
1144 * struct vring_desc desc[num];
1145
1146 *
1147
1148 * // A ring of available descriptor heads with free-running
1149index.
1150
1151 * __u16 avail_flags;
1152
1153 * __u16 avail_idx;
1154
1155 * __u16 available[num];
1156
1157 *
1158
1159 * // Padding to the next align boundary.
1160
1161 * char pad[];
1162
1163 *
1164
1165 * // A ring of used descriptor heads with free-running
1166index.
1167
1168 * __u16 used_flags;
1169
1170 * __u16 EVENT_IDX;
1171
1172 * struct vring_used_elem used[num];
1173
1174 * };
1175
1176 * Note: for virtio PCI, align is 4096.
1177
1178 */
1179
1180static inline void vring_init(struct vring *vr, unsigned int num,
1181void *p,
1182
1183 unsigned long align)
1184
1185{
1186
1187 vr->num = num;
1188
1189 vr->desc = p;
1190
1191 vr->avail = p + num*sizeof(struct vring_desc);
1192
1193 vr->used = (void *)(((unsigned long)&vr->avail->ring[num]
1194
1195 + align-1)
1196
1197 & ~(align - 1));
1198
1199}
1200
1201
1202
1203static inline unsigned vring_size(unsigned int num, unsigned long
1204align)
1205
1206{
1207
1208 return ((sizeof(struct vring_desc)*num +
1209sizeof(uint16_t)*(2+num)
1210
1211 + align - 1) & ~(align - 1))
1212
1213 + sizeof(uint16_t)*3 + sizeof(struct
1214vring_used_elem)*num;
1215
1216}
1217
1218
1219
1220static inline int vring_need_event(uint16_t event_idx, uint16_t
1221new_idx, uint16_t old_idx)
1222
1223{
1224
1225 return (uint16_t)(new_idx - event_idx - 1) <
1226(uint16_t)(new_idx - old_idx);
1227
1228}
1229
1230#endif /* VIRTIO_RING_H */
1231
1232<cha:Reserved-Feature-Bits>Appendix B: Reserved Feature Bits
1233
1234Currently there are five device-independent feature bits defined:
1235
1236 VIRTIO_F_NOTIFY_ON_EMPTY (24) Negotiating this feature
1237 indicates that the driver wants an interrupt if the device runs
1238 out of available descriptors on a virtqueue, even though
1239 interrupts are suppressed using the VRING_AVAIL_F_NO_INTERRUPT
1240 flag or the used_event field. An example of this is the
1241 networking driver: it doesn't need to know every time a packet
1242 is transmitted, but it does need to free the transmitted
1243 packets a finite time after they are transmitted. It can avoid
1244 using a timer if the device interrupts it when all the packets
1245 are transmitted.
1246
1247 VIRTIO_F_RING_INDIRECT_DESC (28) Negotiating this feature
1248 indicates that the driver can use descriptors with the
1249 VRING_DESC_F_INDIRECT flag set, as described in [sub:Indirect-Descriptors]
1250 .
1251
1252 VIRTIO_F_RING_EVENT_IDX(29) This feature enables the used_event
1253 and the avail_event fields. If set, it indicates that the
1254 device should ignore the flags field in the available ring
1255 structure. Instead, the used_event field in this structure is
1256 used by guest to suppress device interrupts. Further, the
1257 driver should ignore the flags field in the used ring
1258 structure. Instead, the avail_event field in this structure is
1259 used by the device to suppress notifications. If unset, the
1260 driver should ignore the used_event field; the device should
1261 ignore the avail_event field; the flags field is used
1262
1263 VIRTIO_F_BAD_FEATURE(30) This feature should never be
1264 negotiated by the guest; doing so is an indication that the
1265 guest is faulty[footnote:
1266An experimental virtio PCI driver contained in Linux version
12672.6.25 had this problem, and this feature bit can be used to
1268detect it.
1269]
1270
1271 VIRTIO_F_FEATURES_HIGH(31) This feature indicates that the
1272 device supports feature bits 32:63. If unset, feature bits
1273 32:63 are unset.
1274
1275Appendix C: Network Device
1276
1277The virtio network device is a virtual ethernet card, and is the
1278most complex of the devices supported so far by virtio. It has
1279enhanced rapidly and demonstrates clearly how support for new
1280features should be added to an existing device. Empty buffers are
1281placed in one virtqueue for receiving packets, and outgoing
1282packets are enqueued into another for transmission in that order.
1283A third command queue is used to control advanced filtering
1284features.
1285
1286 Configuration
1287
1288 Subsystem Device ID 1
1289
1290 Virtqueues 0:receiveq. 1:transmitq. 2:controlq[footnote:
1291Only if VIRTIO_NET_F_CTRL_VQ set
1292]
1293
1294 Feature bits
1295
1296 VIRTIO_NET_F_CSUM (0) Device handles packets with partial
1297 checksum
1298
1299 VIRTIO_NET_F_GUEST_CSUM (1) Guest handles packets with partial
1300 checksum
1301
1302 VIRTIO_NET_F_MAC (5) Device has given MAC address.
1303
1304 VIRTIO_NET_F_GSO (6) (Deprecated) device handles packets with
1305 any GSO type.[footnote:
1306It was supposed to indicate segmentation offload support, but
1307upon further investigation it became clear that multiple bits
1308were required.
1309]
1310
1311 VIRTIO_NET_F_GUEST_TSO4 (7) Guest can receive TSOv4.
1312
1313 VIRTIO_NET_F_GUEST_TSO6 (8) Guest can receive TSOv6.
1314
1315 VIRTIO_NET_F_GUEST_ECN (9) Guest can receive TSO with ECN.
1316
1317 VIRTIO_NET_F_GUEST_UFO (10) Guest can receive UFO.
1318
1319 VIRTIO_NET_F_HOST_TSO4 (11) Device can receive TSOv4.
1320
1321 VIRTIO_NET_F_HOST_TSO6 (12) Device can receive TSOv6.
1322
1323 VIRTIO_NET_F_HOST_ECN (13) Device can receive TSO with ECN.
1324
1325 VIRTIO_NET_F_HOST_UFO (14) Device can receive UFO.
1326
1327 VIRTIO_NET_F_MRG_RXBUF (15) Guest can merge receive buffers.
1328
1329 VIRTIO_NET_F_STATUS (16) Configuration status field is
1330 available.
1331
1332 VIRTIO_NET_F_CTRL_VQ (17) Control channel is available.
1333
1334 VIRTIO_NET_F_CTRL_RX (18) Control channel RX mode support.
1335
1336 VIRTIO_NET_F_CTRL_VLAN (19) Control channel VLAN filtering.
1337
1338 Device configuration layout Two configuration fields are
1339 currently defined. The mac address field always exists (though
1340 is only valid if VIRTIO_NET_F_MAC is set), and the status field
1341 only exists if VIRTIO_NET_F_STATUS is set. Only one bit is
1342 currently defined for the status field: VIRTIO_NET_S_LINK_UP. #define VIRTIO_NET_S_LINK_UP 1
1343
1344
1345
1346struct virtio_net_config {
1347
1348 u8 mac[6];
1349
1350 u16 status;
1351
1352};
1353
1354 Device Initialization
1355
1356 The initialization routine should identify the receive and
1357 transmission virtqueues.
1358
1359 If the VIRTIO_NET_F_MAC feature bit is set, the configuration
1360 space “mac” entry indicates the “physical” address of the the
1361 network card, otherwise a private MAC address should be
1362 assigned. All guests are expected to negotiate this feature if
1363 it is set.
1364
1365 If the VIRTIO_NET_F_CTRL_VQ feature bit is negotiated, identify
1366 the control virtqueue.
1367
1368 If the VIRTIO_NET_F_STATUS feature bit is negotiated, the link
1369 status can be read from the bottom bit of the “status” config
1370 field. Otherwise, the link should be assumed active.
1371
1372 The receive virtqueue should be filled with receive buffers.
1373 This is described in detail below in “Setting Up Receive
1374 Buffers”.
1375
1376 A driver can indicate that it will generate checksumless
1377 packets by negotating the VIRTIO_NET_F_CSUM feature. This “
1378 checksum offload” is a common feature on modern network cards.
1379
1380 If that feature is negotiated, a driver can use TCP or UDP
1381 segmentation offload by negotiating the VIRTIO_NET_F_HOST_TSO4
1382 (IPv4 TCP), VIRTIO_NET_F_HOST_TSO6 (IPv6 TCP) and
1383 VIRTIO_NET_F_HOST_UFO (UDP fragmentation) features. It should
1384 not send TCP packets requiring segmentation offload which have
1385 the Explicit Congestion Notification bit set, unless the
1386 VIRTIO_NET_F_HOST_ECN feature is negotiated.[footnote:
1387This is a common restriction in real, older network cards.
1388]
1389
1390 The converse features are also available: a driver can save the
1391 virtual device some work by negotiating these features.[footnote:
1392For example, a network packet transported between two guests on
1393the same system may not require checksumming at all, nor
1394segmentation, if both guests are amenable.
1395] The VIRTIO_NET_F_GUEST_CSUM feature indicates that partially
1396 checksummed packets can be received, and if it can do that then
1397 the VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
1398 VIRTIO_NET_F_GUEST_UFO and VIRTIO_NET_F_GUEST_ECN are the input
1399 equivalents of the features described above. See “Receiving
1400 Packets” below.
1401
1402 Device Operation
1403
1404Packets are transmitted by placing them in the transmitq, and
1405buffers for incoming packets are placed in the receiveq. In each
1406case, the packet itself is preceeded by a header:
1407
1408struct virtio_net_hdr {
1409
1410#define VIRTIO_NET_HDR_F_NEEDS_CSUM 1
1411
1412 u8 flags;
1413
1414#define VIRTIO_NET_HDR_GSO_NONE 0
1415
1416#define VIRTIO_NET_HDR_GSO_TCPV4 1
1417
1418#define VIRTIO_NET_HDR_GSO_UDP 3
1419
1420#define VIRTIO_NET_HDR_GSO_TCPV6 4
1421
1422#define VIRTIO_NET_HDR_GSO_ECN 0x80
1423
1424 u8 gso_type;
1425
1426 u16 hdr_len;
1427
1428 u16 gso_size;
1429
1430 u16 csum_start;
1431
1432 u16 csum_offset;
1433
1434/* Only if VIRTIO_NET_F_MRG_RXBUF: */
1435
1436 u16 num_buffers
1437
1438};
1439
1440The controlq is used to control device features such as
1441filtering.
1442
1443 Packet Transmission
1444
1445Transmitting a single packet is simple, but varies depending on
1446the different features the driver negotiated.
1447
1448 If the driver negotiated VIRTIO_NET_F_CSUM, and the packet has
1449 not been fully checksummed, then the virtio_net_hdr's fields
1450 are set as follows. Otherwise, the packet must be fully
1451 checksummed, and flags is zero.
1452
1453 flags has the VIRTIO_NET_HDR_F_NEEDS_CSUM set,
1454
1455 <ite:csum_start-is-set>csum_start is set to the offset within
1456 the packet to begin checksumming, and
1457
1458 csum_offset indicates how many bytes after the csum_start the
1459 new (16 bit ones' complement) checksum should be placed.[footnote:
1460For example, consider a partially checksummed TCP (IPv4) packet.
1461It will have a 14 byte ethernet header and 20 byte IP header
1462followed by the TCP header (with the TCP checksum field 16 bytes
1463into that header). csum_start will be 14+20 = 34 (the TCP
1464checksum includes the header), and csum_offset will be 16. The
1465value in the TCP checksum field will be the sum of the TCP pseudo
1466header, so that replacing it by the ones' complement checksum of
1467the TCP header and body will give the correct result.
1468]
1469
1470 <enu:If-the-driver>If the driver negotiated
1471 VIRTIO_NET_F_HOST_TSO4, TSO6 or UFO, and the packet requires
1472 TCP segmentation or UDP fragmentation, then the “gso_type”
1473 field is set to VIRTIO_NET_HDR_GSO_TCPV4, TCPV6 or UDP.
1474 (Otherwise, it is set to VIRTIO_NET_HDR_GSO_NONE). In this
1475 case, packets larger than 1514 bytes can be transmitted: the
1476 metadata indicates how to replicate the packet header to cut it
1477 into smaller packets. The other gso fields are set:
1478
1479 hdr_len is a hint to the device as to how much of the header
1480 needs to be kept to copy into each packet, usually set to the
1481 length of the headers, including the transport header.[footnote:
1482Due to various bugs in implementations, this field is not useful
1483as a guarantee of the transport header size.
1484]
1485
1486 gso_size is the size of the packet beyond that header (ie.
1487 MSS).
1488
1489 If the driver negotiated the VIRTIO_NET_F_HOST_ECN feature, the
1490 VIRTIO_NET_HDR_GSO_ECN bit may be set in “gso_type” as well,
1491 indicating that the TCP packet has the ECN bit set.[footnote:
1492This case is not handled by some older hardware, so is called out
1493specifically in the protocol.
1494]
1495
1496 If the driver negotiated the VIRTIO_NET_F_MRG_RXBUF feature,
1497 the num_buffers field is set to zero.
1498
1499 The header and packet are added as one output buffer to the
1500 transmitq, and the device is notified of the new entry (see [sub:Notifying-The-Device]
1501 ).[footnote:
1502Note that the header will be two bytes longer for the
1503VIRTIO_NET_F_MRG_RXBUF case.
1504]
1505
1506 Packet Transmission Interrupt
1507
1508Often a driver will suppress transmission interrupts using the
1509VRING_AVAIL_F_NO_INTERRUPT flag (see [sub:Receiving-Used-Buffers]
1510) and check for used packets in the transmit path of following
1511packets. However, it will still receive interrupts if the
1512VIRTIO_F_NOTIFY_ON_EMPTY feature is negotiated, indicating that
1513the transmission queue is completely emptied.
1514
1515The normal behavior in this interrupt handler is to retrieve and
1516new descriptors from the used ring and free the corresponding
1517headers and packets.
1518
1519 Setting Up Receive Buffers
1520
1521It is generally a good idea to keep the receive virtqueue as
1522fully populated as possible: if it runs out, network performance
1523will suffer.
1524
1525If the VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6 or
1526VIRTIO_NET_F_GUEST_UFO features are used, the Guest will need to
1527accept packets of up to 65550 bytes long (the maximum size of a
1528TCP or UDP packet, plus the 14 byte ethernet header), otherwise
15291514 bytes. So unless VIRTIO_NET_F_MRG_RXBUF is negotiated, every
1530buffer in the receive queue needs to be at least this length [footnote:
1531Obviously each one can be split across multiple descriptor
1532elements.
1533].
1534
1535If VIRTIO_NET_F_MRG_RXBUF is negotiated, each buffer must be at
1536least the size of the struct virtio_net_hdr.
1537
1538 Packet Receive Interrupt
1539
1540When a packet is copied into a buffer in the receiveq, the
1541optimal path is to disable further interrupts for the receiveq
1542(see [sub:Receiving-Used-Buffers]) and process packets until no
1543more are found, then re-enable them.
1544
1545Processing packet involves:
1546
1547 If the driver negotiated the VIRTIO_NET_F_MRG_RXBUF feature,
1548 then the “num_buffers” field indicates how many descriptors
1549 this packet is spread over (including this one). This allows
1550 receipt of large packets without having to allocate large
1551 buffers. In this case, there will be at least “num_buffers” in
1552 the used ring, and they should be chained together to form a
1553 single packet. The other buffers will not begin with a struct
1554 virtio_net_hdr.
1555
1556 If the VIRTIO_NET_F_MRG_RXBUF feature was not negotiated, or
1557 the “num_buffers” field is one, then the entire packet will be
1558 contained within this buffer, immediately following the struct
1559 virtio_net_hdr.
1560
1561 If the VIRTIO_NET_F_GUEST_CSUM feature was negotiated, the
1562 VIRTIO_NET_HDR_F_NEEDS_CSUM bit in the “flags” field may be
1563 set: if so, the checksum on the packet is incomplete and the “
1564 csum_start” and “csum_offset” fields indicate how to calculate
1565 it (see [ite:csum_start-is-set]).
1566
1567 If the VIRTIO_NET_F_GUEST_TSO4, TSO6 or UFO options were
1568 negotiated, then the “gso_type” may be something other than
1569 VIRTIO_NET_HDR_GSO_NONE, and the “gso_size” field indicates the
1570 desired MSS (see [enu:If-the-driver]).Control Virtqueue
1571
1572The driver uses the control virtqueue (if VIRTIO_NET_F_VTRL_VQ is
1573negotiated) to send commands to manipulate various features of
1574the device which would not easily map into the configuration
1575space.
1576
1577All commands are of the following form:
1578
1579struct virtio_net_ctrl {
1580
1581 u8 class;
1582
1583 u8 command;
1584
1585 u8 command-specific-data[];
1586
1587 u8 ack;
1588
1589};
1590
1591
1592
1593/* ack values */
1594
1595#define VIRTIO_NET_OK 0
1596
1597#define VIRTIO_NET_ERR 1
1598
1599The class, command and command-specific-data are set by the
1600driver, and the device sets the ack byte. There is little it can
1601do except issue a diagnostic if the ack byte is not
1602VIRTIO_NET_OK.
1603
1604 Packet Receive Filtering
1605
1606If the VIRTIO_NET_F_CTRL_RX feature is negotiated, the driver can
1607send control commands for promiscuous mode, multicast receiving,
1608and filtering of MAC addresses.
1609
1610Note that in general, these commands are best-effort: unwanted
1611packets may still arrive.
1612
1613 Setting Promiscuous Mode
1614
1615#define VIRTIO_NET_CTRL_RX 0
1616
1617 #define VIRTIO_NET_CTRL_RX_PROMISC 0
1618
1619 #define VIRTIO_NET_CTRL_RX_ALLMULTI 1
1620
1621The class VIRTIO_NET_CTRL_RX has two commands:
1622VIRTIO_NET_CTRL_RX_PROMISC turns promiscuous mode on and off, and
1623VIRTIO_NET_CTRL_RX_ALLMULTI turns all-multicast receive on and
1624off. The command-specific-data is one byte containing 0 (off) or
16251 (on).
1626
1627 Setting MAC Address Filtering
1628
1629struct virtio_net_ctrl_mac {
1630
1631 u32 entries;
1632
1633 u8 macs[entries][ETH_ALEN];
1634
1635};
1636
1637
1638
1639#define VIRTIO_NET_CTRL_MAC 1
1640
1641 #define VIRTIO_NET_CTRL_MAC_TABLE_SET 0
1642
1643The device can filter incoming packets by any number of
1644destination MAC addresses.[footnote:
1645Since there are no guarentees, it can use a hash filter
1646orsilently switch to allmulti or promiscuous mode if it is given
1647too many addresses.
1648] This table is set using the class VIRTIO_NET_CTRL_MAC and the
1649command VIRTIO_NET_CTRL_MAC_TABLE_SET. The command-specific-data
1650is two variable length tables of 6-byte MAC addresses. The first
1651table contains unicast addresses, and the second contains
1652multicast addresses.
1653
1654 VLAN Filtering
1655
1656If the driver negotiates the VIRTION_NET_F_CTRL_VLAN feature, it
1657can control a VLAN filter table in the device.
1658
1659#define VIRTIO_NET_CTRL_VLAN 2
1660
1661 #define VIRTIO_NET_CTRL_VLAN_ADD 0
1662
1663 #define VIRTIO_NET_CTRL_VLAN_DEL 1
1664
1665Both the VIRTIO_NET_CTRL_VLAN_ADD and VIRTIO_NET_CTRL_VLAN_DEL
1666command take a 16-bit VLAN id as the command-specific-data.
1667
1668Appendix D: Block Device
1669
1670The virtio block device is a simple virtual block device (ie.
1671disk). Read and write requests (and other exotic requests) are
1672placed in the queue, and serviced (probably out of order) by the
1673device except where noted.
1674
1675 Configuration
1676
1677 Subsystem Device ID 2
1678
1679 Virtqueues 0:requestq.
1680
1681 Feature bits
1682
1683 VIRTIO_BLK_F_BARRIER (0) Host supports request barriers.
1684
1685 VIRTIO_BLK_F_SIZE_MAX (1) Maximum size of any single segment is
1686 in “size_max”.
1687
1688 VIRTIO_BLK_F_SEG_MAX (2) Maximum number of segments in a
1689 request is in “seg_max”.
1690
1691 VIRTIO_BLK_F_GEOMETRY (4) Disk-style geometry specified in “
1692 geometry”.
1693
1694 VIRTIO_BLK_F_RO (5) Device is read-only.
1695
1696 VIRTIO_BLK_F_BLK_SIZE (6) Block size of disk is in “blk_size”.
1697
1698 VIRTIO_BLK_F_SCSI (7) Device supports scsi packet commands.
1699
1700 VIRTIO_BLK_F_FLUSH (9) Cache flush command support.
1701
1702
1703
1704 Device configuration layout The capacity of the device
1705 (expressed in 512-byte sectors) is always present. The
1706 availability of the others all depend on various feature bits
1707 as indicated above. struct virtio_blk_config {
1708
1709 u64 capacity;
1710
1711 u32 size_max;
1712
1713 u32 seg_max;
1714
1715 struct virtio_blk_geometry {
1716
1717 u16 cylinders;
1718
1719 u8 heads;
1720
1721 u8 sectors;
1722
1723 } geometry;
1724
1725 u32 blk_size;
1726
1727
1728
1729};
1730
1731 Device Initialization
1732
1733 The device size should be read from the “capacity”
1734 configuration field. No requests should be submitted which goes
1735 beyond this limit.
1736
1737 If the VIRTIO_BLK_F_BLK_SIZE feature is negotiated, the
1738 blk_size field can be read to determine the optimal sector size
1739 for the driver to use. This does not effect the units used in
1740 the protocol (always 512 bytes), but awareness of the correct
1741 value can effect performance.
1742
1743 If the VIRTIO_BLK_F_RO feature is set by the device, any write
1744 requests will fail.
1745
1746
1747
1748 Device Operation
1749
1750The driver queues requests to the virtqueue, and they are used by
1751the device (not necessarily in order). Each request is of form:
1752
1753struct virtio_blk_req {
1754
1755
1756
1757 u32 type;
1758
1759 u32 ioprio;
1760
1761 u64 sector;
1762
1763 char data[][512];
1764
1765 u8 status;
1766
1767};
1768
1769If the device has VIRTIO_BLK_F_SCSI feature, it can also support
1770scsi packet command requests, each of these requests is of form:struct virtio_scsi_pc_req {
1771
1772 u32 type;
1773
1774 u32 ioprio;
1775
1776 u64 sector;
1777
1778 char cmd[];
1779
1780 char data[][512];
1781
1782#define SCSI_SENSE_BUFFERSIZE 96
1783
1784 u8 sense[SCSI_SENSE_BUFFERSIZE];
1785
1786 u32 errors;
1787
1788 u32 data_len;
1789
1790 u32 sense_len;
1791
1792 u32 residual;
1793
1794 u8 status;
1795
1796};
1797
1798The type of the request is either a read (VIRTIO_BLK_T_IN), a
1799write (VIRTIO_BLK_T_OUT), a scsi packet command
1800(VIRTIO_BLK_T_SCSI_CMD or VIRTIO_BLK_T_SCSI_CMD_OUT[footnote:
1801the SCSI_CMD and SCSI_CMD_OUT types are equivalent, the device
1802does not distinguish between them
1803]) or a flush (VIRTIO_BLK_T_FLUSH or VIRTIO_BLK_T_FLUSH_OUT[footnote:
1804the FLUSH and FLUSH_OUT types are equivalent, the device does not
1805distinguish between them
1806]). If the device has VIRTIO_BLK_F_BARRIER feature the high bit
1807(VIRTIO_BLK_T_BARRIER) indicates that this request acts as a
1808barrier and that all preceeding requests must be complete before
1809this one, and all following requests must not be started until
1810this is complete. Note that a barrier does not flush caches in
1811the underlying backend device in host, and thus does not serve as
1812data consistency guarantee. Driver must use FLUSH request to
1813flush the host cache.
1814
1815#define VIRTIO_BLK_T_IN 0
1816
1817#define VIRTIO_BLK_T_OUT 1
1818
1819#define VIRTIO_BLK_T_SCSI_CMD 2
1820
1821#define VIRTIO_BLK_T_SCSI_CMD_OUT 3
1822
1823#define VIRTIO_BLK_T_FLUSH 4
1824
1825#define VIRTIO_BLK_T_FLUSH_OUT 5
1826
1827#define VIRTIO_BLK_T_BARRIER 0x80000000
1828
1829The ioprio field is a hint about the relative priorities of
1830requests to the device: higher numbers indicate more important
1831requests.
1832
1833The sector number indicates the offset (multiplied by 512) where
1834the read or write is to occur. This field is unused and set to 0
1835for scsi packet commands and for flush commands.
1836
1837The cmd field is only present for scsi packet command requests,
1838and indicates the command to perform. This field must reside in a
1839single, separate read-only buffer; command length can be derived
1840from the length of this buffer.
1841
1842Note that these first three (four for scsi packet commands)
1843fields are always read-only: the data field is either read-only
1844or write-only, depending on the request. The size of the read or
1845write can be derived from the total size of the request buffers.
1846
1847The sense field is only present for scsi packet command requests,
1848and indicates the buffer for scsi sense data.
1849
1850The data_len field is only present for scsi packet command
1851requests, this field is deprecated, and should be ignored by the
1852driver. Historically, devices copied data length there.
1853
1854The sense_len field is only present for scsi packet command
1855requests and indicates the number of bytes actually written to
1856the sense buffer.
1857
1858The residual field is only present for scsi packet command
1859requests and indicates the residual size, calculated as data
1860length - number of bytes actually transferred.
1861
1862The final status byte is written by the device: either
1863VIRTIO_BLK_S_OK for success, VIRTIO_BLK_S_IOERR for host or guest
1864error or VIRTIO_BLK_S_UNSUPP for a request unsupported by host:#define VIRTIO_BLK_S_OK 0
1865
1866#define VIRTIO_BLK_S_IOERR 1
1867
1868#define VIRTIO_BLK_S_UNSUPP 2
1869
1870Historically, devices assumed that the fields type, ioprio and
1871sector reside in a single, separate read-only buffer; the fields
1872errors, data_len, sense_len and residual reside in a single,
1873separate write-only buffer; the sense field in a separate
1874write-only buffer of size 96 bytes, by itself; the fields errors,
1875data_len, sense_len and residual in a single write-only buffer;
1876and the status field is a separate read-only buffer of size 1
1877byte, by itself.
1878
1879Appendix E: Console Device
1880
1881The virtio console device is a simple device for data input and
1882output. A device may have one or more ports. Each port has a pair
1883of input and output virtqueues. Moreover, a device has a pair of
1884control IO virtqueues. The control virtqueues are used to
1885communicate information between the device and the driver about
1886ports being opened and closed on either side of the connection,
1887indication from the host about whether a particular port is a
1888console port, adding new ports, port hot-plug/unplug, etc., and
1889indication from the guest about whether a port or a device was
1890successfully added, port open/close, etc.. For data IO, one or
1891more empty buffers are placed in the receive queue for incoming
1892data and outgoing characters are placed in the transmit queue.
1893
1894 Configuration
1895
1896 Subsystem Device ID 3
1897
1898 Virtqueues 0:receiveq(port0). 1:transmitq(port0), 2:control
1899 receiveq[footnote:
1900Ports 2 onwards only if VIRTIO_CONSOLE_F_MULTIPORT is set
1901], 3:control transmitq, 4:receiveq(port1), 5:transmitq(port1),
1902 ...
1903
1904 Feature bits
1905
1906 VIRTIO_CONSOLE_F_SIZE (0) Configuration cols and rows fields
1907 are valid.
1908
1909 VIRTIO_CONSOLE_F_MULTIPORT(1) Device has support for multiple
1910 ports; configuration fields nr_ports and max_nr_ports are
1911 valid and control virtqueues will be used.
1912
1913 Device configuration layout The size of the console is supplied
1914 in the configuration space if the VIRTIO_CONSOLE_F_SIZE feature
1915 is set. Furthermore, if the VIRTIO_CONSOLE_F_MULTIPORT feature
1916 is set, the maximum number of ports supported by the device can
1917 be fetched.struct virtio_console_config {
1918
1919 u16 cols;
1920
1921 u16 rows;
1922
1923
1924
1925 u32 max_nr_ports;
1926
1927};
1928
1929 Device Initialization
1930
1931 If the VIRTIO_CONSOLE_F_SIZE feature is negotiated, the driver
1932 can read the console dimensions from the configuration fields.
1933
1934 If the VIRTIO_CONSOLE_F_MULTIPORT feature is negotiated, the
1935 driver can spawn multiple ports, not all of which may be
1936 attached to a console. Some could be generic ports. In this
1937 case, the control virtqueues are enabled and according to the
1938 max_nr_ports configuration-space value, the appropriate number
1939 of virtqueues are created. A control message indicating the
1940 driver is ready is sent to the host. The host can then send
1941 control messages for adding new ports to the device. After
1942 creating and initializing each port, a
1943 VIRTIO_CONSOLE_PORT_READY control message is sent to the host
1944 for that port so the host can let us know of any additional
1945 configuration options set for that port.
1946
1947 The receiveq for each port is populated with one or more
1948 receive buffers.
1949
1950 Device Operation
1951
1952 For output, a buffer containing the characters is placed in the
1953 port's transmitq.[footnote:
1954Because this is high importance and low bandwidth, the current
1955Linux implementation polls for the buffer to be used, rather than
1956waiting for an interrupt, simplifying the implementation
1957significantly. However, for generic serial ports with the
1958O_NONBLOCK flag set, the polling limitation is relaxed and the
1959consumed buffers are freed upon the next write or poll call or
1960when a port is closed or hot-unplugged.
1961]
1962
1963 When a buffer is used in the receiveq (signalled by an
1964 interrupt), the contents is the input to the port associated
1965 with the virtqueue for which the notification was received.
1966
1967 If the driver negotiated the VIRTIO_CONSOLE_F_SIZE feature, a
1968 configuration change interrupt may occur. The updated size can
1969 be read from the configuration fields.
1970
1971 If the driver negotiated the VIRTIO_CONSOLE_F_MULTIPORT
1972 feature, active ports are announced by the host using the
1973 VIRTIO_CONSOLE_PORT_ADD control message. The same message is
1974 used for port hot-plug as well.
1975
1976 If the host specified a port `name', a sysfs attribute is
1977 created with the name filled in, so that udev rules can be
1978 written that can create a symlink from the port's name to the
1979 char device for port discovery by applications in the guest.
1980
1981 Changes to ports' state are effected by control messages.
1982 Appropriate action is taken on the port indicated in the
1983 control message. The layout of the structure of the control
1984 buffer and the events associated are:struct virtio_console_control {
1985
1986 uint32_t id; /* Port number */
1987
1988 uint16_t event; /* The kind of control event */
1989
1990 uint16_t value; /* Extra information for the event */
1991
1992};
1993
1994
1995
1996/* Some events for the internal messages (control packets) */
1997
1998
1999
2000#define VIRTIO_CONSOLE_DEVICE_READY 0
2001
2002#define VIRTIO_CONSOLE_PORT_ADD 1
2003
2004#define VIRTIO_CONSOLE_PORT_REMOVE 2
2005
2006#define VIRTIO_CONSOLE_PORT_READY 3
2007
2008#define VIRTIO_CONSOLE_CONSOLE_PORT 4
2009
2010#define VIRTIO_CONSOLE_RESIZE 5
2011
2012#define VIRTIO_CONSOLE_PORT_OPEN 6
2013
2014#define VIRTIO_CONSOLE_PORT_NAME 7
2015
2016Appendix F: Entropy Device
2017
2018The virtio entropy device supplies high-quality randomness for
2019guest use.
2020
2021 Configuration
2022
2023 Subsystem Device ID 4
2024
2025 Virtqueues 0:requestq.
2026
2027 Feature bits None currently defined
2028
2029 Device configuration layout None currently defined.
2030
2031 Device Initialization
2032
2033 The virtqueue is initialized
2034
2035 Device Operation
2036
2037When the driver requires random bytes, it places the descriptor
2038of one or more buffers in the queue. It will be completely filled
2039by random data by the device.
2040
2041Appendix G: Memory Balloon Device
2042
2043The virtio memory balloon device is a primitive device for
2044managing guest memory: the device asks for a certain amount of
2045memory, and the guest supplies it (or withdraws it, if the device
2046has more than it asks for). This allows the guest to adapt to
2047changes in allowance of underlying physical memory. If the
2048feature is negotiated, the device can also be used to communicate
2049guest memory statistics to the host.
2050
2051 Configuration
2052
2053 Subsystem Device ID 5
2054
2055 Virtqueues 0:inflateq. 1:deflateq. 2:statsq.[footnote:
2056Only if VIRTIO_BALLON_F_STATS_VQ set
2057]
2058
2059 Feature bits
2060
2061 VIRTIO_BALLOON_F_MUST_TELL_HOST (0) Host must be told before
2062 pages from the balloon are used.
2063
2064 VIRTIO_BALLOON_F_STATS_VQ (1) A virtqueue for reporting guest
2065 memory statistics is present.
2066
2067 Device configuration layout Both fields of this configuration
2068 are always available. Note that they are little endian, despite
2069 convention that device fields are guest endian:struct virtio_balloon_config {
2070
2071 u32 num_pages;
2072
2073 u32 actual;
2074
2075};
2076
2077 Device Initialization
2078
2079 The inflate and deflate virtqueues are identified.
2080
2081 If the VIRTIO_BALLOON_F_STATS_VQ feature bit is negotiated:
2082
2083 Identify the stats virtqueue.
2084
2085 Add one empty buffer to the stats virtqueue and notify the
2086 host.
2087
2088Device operation begins immediately.
2089
2090 Device Operation
2091
2092 Memory Ballooning The device is driven by the receipt of a
2093 configuration change interrupt.
2094
2095 The “num_pages” configuration field is examined. If this is
2096 greater than the “actual” number of pages, memory must be given
2097 to the balloon. If it is less than the “actual” number of
2098 pages, memory may be taken back from the balloon for general
2099 use.
2100
2101 To supply memory to the balloon (aka. inflate):
2102
2103 The driver constructs an array of addresses of unused memory
2104 pages. These addresses are divided by 4096[footnote:
2105This is historical, and independent of the guest page size
2106] and the descriptor describing the resulting 32-bit array is
2107 added to the inflateq.
2108
2109 To remove memory from the balloon (aka. deflate):
2110
2111 The driver constructs an array of addresses of memory pages it
2112 has previously given to the balloon, as described above. This
2113 descriptor is added to the deflateq.
2114
2115 If the VIRTIO_BALLOON_F_MUST_TELL_HOST feature is set, the
2116 guest may not use these requested pages until that descriptor
2117 in the deflateq has been used by the device.
2118
2119 Otherwise, the guest may begin to re-use pages previously given
2120 to the balloon before the device has acknowledged their
2121 withdrawl. [footnote:
2122In this case, deflation advice is merely a courtesy
2123]
2124
2125 In either case, once the device has completed the inflation or
2126 deflation, the “actual” field of the configuration should be
2127 updated to reflect the new number of pages in the balloon.[footnote:
2128As updates to configuration space are not atomic, this field
2129isn't particularly reliable, but can be used to diagnose buggy
2130guests.
2131]
2132
2133 Memory Statistics
2134
2135The stats virtqueue is atypical because communication is driven
2136by the device (not the driver). The channel becomes active at
2137driver initialization time when the driver adds an empty buffer
2138and notifies the device. A request for memory statistics proceeds
2139as follows:
2140
2141 The device pushes the buffer onto the used ring and sends an
2142 interrupt.
2143
2144 The driver pops the used buffer and discards it.
2145
2146 The driver collects memory statistics and writes them into a
2147 new buffer.
2148
2149 The driver adds the buffer to the virtqueue and notifies the
2150 device.
2151
2152 The device pops the buffer (retaining it to initiate a
2153 subsequent request) and consumes the statistics.
2154
2155 Memory Statistics Format Each statistic consists of a 16 bit
2156 tag and a 64 bit value. Both quantities are represented in the
2157 native endian of the guest. All statistics are optional and the
2158 driver may choose which ones to supply. To guarantee backwards
2159 compatibility, unsupported statistics should be omitted.
2160
2161 struct virtio_balloon_stat {
2162
2163#define VIRTIO_BALLOON_S_SWAP_IN 0
2164
2165#define VIRTIO_BALLOON_S_SWAP_OUT 1
2166
2167#define VIRTIO_BALLOON_S_MAJFLT 2
2168
2169#define VIRTIO_BALLOON_S_MINFLT 3
2170
2171#define VIRTIO_BALLOON_S_MEMFREE 4
2172
2173#define VIRTIO_BALLOON_S_MEMTOT 5
2174
2175 u16 tag;
2176
2177 u64 val;
2178
2179} __attribute__((packed));
2180
2181 Tags
2182
2183 VIRTIO_BALLOON_S_SWAP_IN The amount of memory that has been
2184 swapped in (in bytes).
2185
2186 VIRTIO_BALLOON_S_SWAP_OUT The amount of memory that has been
2187 swapped out to disk (in bytes).
2188
2189 VIRTIO_BALLOON_S_MAJFLT The number of major page faults that
2190 have occurred.
2191
2192 VIRTIO_BALLOON_S_MINFLT The number of minor page faults that
2193 have occurred.
2194
2195 VIRTIO_BALLOON_S_MEMFREE The amount of memory not being used
2196 for any purpose (in bytes).
2197
2198 VIRTIO_BALLOON_S_MEMTOT The total amount of memory available
2199 (in bytes).
2200
diff --git a/MAINTAINERS b/MAINTAINERS
index 3891a12eb6a7..5faf685ee6c2 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1286,7 +1286,6 @@ F: drivers/input/misc/ati_remote2.c
1286ATLX ETHERNET DRIVERS 1286ATLX ETHERNET DRIVERS
1287M: Jay Cliburn <jcliburn@gmail.com> 1287M: Jay Cliburn <jcliburn@gmail.com>
1288M: Chris Snook <chris.snook@gmail.com> 1288M: Chris Snook <chris.snook@gmail.com>
1289M: Jie Yang <yangjie@qca.qualcomm.com>
1290L: netdev@vger.kernel.org 1289L: netdev@vger.kernel.org
1291W: http://sourceforge.net/projects/atl1 1290W: http://sourceforge.net/projects/atl1
1292W: http://atl1.sourceforge.net 1291W: http://atl1.sourceforge.net
@@ -1582,7 +1581,6 @@ F: drivers/scsi/bfa/
1582 1581
1583BROCADE BNA 10 GIGABIT ETHERNET DRIVER 1582BROCADE BNA 10 GIGABIT ETHERNET DRIVER
1584M: Rasesh Mody <rmody@brocade.com> 1583M: Rasesh Mody <rmody@brocade.com>
1585M: Debashis Dutt <ddutt@brocade.com>
1586L: netdev@vger.kernel.org 1584L: netdev@vger.kernel.org
1587S: Supported 1585S: Supported
1588F: drivers/net/ethernet/brocade/bna/ 1586F: drivers/net/ethernet/brocade/bna/
@@ -1766,7 +1764,6 @@ F: Documentation/zh_CN/
1766 1764
1767CISCO VIC ETHERNET NIC DRIVER 1765CISCO VIC ETHERNET NIC DRIVER
1768M: Christian Benvenuti <benve@cisco.com> 1766M: Christian Benvenuti <benve@cisco.com>
1769M: Vasanthy Kolluri <vkolluri@cisco.com>
1770M: Roopa Prabhu <roprabhu@cisco.com> 1767M: Roopa Prabhu <roprabhu@cisco.com>
1771M: David Wang <dwang2@cisco.com> 1768M: David Wang <dwang2@cisco.com>
1772S: Supported 1769S: Supported
@@ -1891,7 +1888,7 @@ S: Maintained
1891F: drivers/connector/ 1888F: drivers/connector/
1892 1889
1893CONTROL GROUPS (CGROUPS) 1890CONTROL GROUPS (CGROUPS)
1894M: Paul Menage <menage@google.com> 1891M: Paul Menage <paul@paulmenage.org>
1895M: Li Zefan <lizf@cn.fujitsu.com> 1892M: Li Zefan <lizf@cn.fujitsu.com>
1896L: containers@lists.linux-foundation.org 1893L: containers@lists.linux-foundation.org
1897S: Maintained 1894S: Maintained
@@ -1940,7 +1937,7 @@ S: Maintained
1940F: tools/power/cpupower 1937F: tools/power/cpupower
1941 1938
1942CPUSETS 1939CPUSETS
1943M: Paul Menage <menage@google.com> 1940M: Paul Menage <paul@paulmenage.org>
1944W: http://www.bullopensource.org/cpuset/ 1941W: http://www.bullopensource.org/cpuset/
1945W: http://oss.sgi.com/projects/cpusets/ 1942W: http://oss.sgi.com/projects/cpusets/
1946S: Supported 1943S: Supported
@@ -2657,11 +2654,11 @@ F: drivers/net/wan/dlci.c
2657F: drivers/net/wan/sdla.c 2654F: drivers/net/wan/sdla.c
2658 2655
2659FRAMEBUFFER LAYER 2656FRAMEBUFFER LAYER
2660M: Paul Mundt <lethal@linux-sh.org> 2657M: Florian Tobias Schandinat <FlorianSchandinat@gmx.de>
2661L: linux-fbdev@vger.kernel.org 2658L: linux-fbdev@vger.kernel.org
2662W: http://linux-fbdev.sourceforge.net/ 2659W: http://linux-fbdev.sourceforge.net/
2663Q: http://patchwork.kernel.org/project/linux-fbdev/list/ 2660Q: http://patchwork.kernel.org/project/linux-fbdev/list/
2664T: git git://git.kernel.org/pub/scm/linux/kernel/git/lethal/fbdev-2.6.git 2661T: git git://github.com/schandinat/linux-2.6.git fbdev-next
2665S: Maintained 2662S: Maintained
2666F: Documentation/fb/ 2663F: Documentation/fb/
2667F: Documentation/devicetree/bindings/fb/ 2664F: Documentation/devicetree/bindings/fb/
@@ -3271,6 +3268,17 @@ F: Documentation/input/multi-touch-protocol.txt
3271F: drivers/input/input-mt.c 3268F: drivers/input/input-mt.c
3272K: \b(ABS|SYN)_MT_ 3269K: \b(ABS|SYN)_MT_
3273 3270
3271INTEL C600 SERIES SAS CONTROLLER DRIVER
3272M: Intel SCU Linux support <intel-linux-scu@intel.com>
3273M: Dan Williams <dan.j.williams@intel.com>
3274M: Dave Jiang <dave.jiang@intel.com>
3275M: Ed Nadolski <edmund.nadolski@intel.com>
3276L: linux-scsi@vger.kernel.org
3277T: git git://git.kernel.org/pub/scm/linux/kernel/git/djbw/isci.git
3278S: Maintained
3279F: drivers/scsi/isci/
3280F: firmware/isci/
3281
3274INTEL IDLE DRIVER 3282INTEL IDLE DRIVER
3275M: Len Brown <lenb@kernel.org> 3283M: Len Brown <lenb@kernel.org>
3276L: linux-pm@lists.linux-foundation.org 3284L: linux-pm@lists.linux-foundation.org
@@ -4405,7 +4413,8 @@ L: netfilter@vger.kernel.org
4405L: coreteam@netfilter.org 4413L: coreteam@netfilter.org
4406W: http://www.netfilter.org/ 4414W: http://www.netfilter.org/
4407W: http://www.iptables.org/ 4415W: http://www.iptables.org/
4408T: git git://git.kernel.org/pub/scm/linux/kernel/git/kaber/nf-2.6.git 4416T: git git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-2.6.git
4417T: git git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-next-2.6.git
4409S: Supported 4418S: Supported
4410F: include/linux/netfilter* 4419F: include/linux/netfilter*
4411F: include/linux/netfilter/ 4420F: include/linux/netfilter/
@@ -4451,8 +4460,8 @@ M: "David S. Miller" <davem@davemloft.net>
4451L: netdev@vger.kernel.org 4460L: netdev@vger.kernel.org
4452W: http://www.linuxfoundation.org/en/Net 4461W: http://www.linuxfoundation.org/en/Net
4453W: http://patchwork.ozlabs.org/project/netdev/list/ 4462W: http://patchwork.ozlabs.org/project/netdev/list/
4454T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.git 4463T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
4455T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6.git 4464T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git
4456S: Maintained 4465S: Maintained
4457F: net/ 4466F: net/
4458F: include/net/ 4467F: include/net/
@@ -4617,7 +4626,7 @@ F: arch/arm/mach-omap2/clockdomain2xxx_3xxx.c
4617F: arch/arm/mach-omap2/clockdomain44xx.c 4626F: arch/arm/mach-omap2/clockdomain44xx.c
4618 4627
4619OMAP AUDIO SUPPORT 4628OMAP AUDIO SUPPORT
4620M: Jarkko Nikula <jhnikula@gmail.com> 4629M: Jarkko Nikula <jarkko.nikula@bitmer.com>
4621L: alsa-devel@alsa-project.org (subscribers-only) 4630L: alsa-devel@alsa-project.org (subscribers-only)
4622L: linux-omap@vger.kernel.org 4631L: linux-omap@vger.kernel.org
4623S: Maintained 4632S: Maintained
@@ -4787,7 +4796,7 @@ F: drivers/net/wireless/orinoco/
4787 4796
4788OSD LIBRARY and FILESYSTEM 4797OSD LIBRARY and FILESYSTEM
4789M: Boaz Harrosh <bharrosh@panasas.com> 4798M: Boaz Harrosh <bharrosh@panasas.com>
4790M: Benny Halevy <bhalevy@panasas.com> 4799M: Benny Halevy <bhalevy@tonian.com>
4791L: osd-dev@open-osd.org 4800L: osd-dev@open-osd.org
4792W: http://open-osd.org 4801W: http://open-osd.org
4793T: git git://git.open-osd.org/open-osd.git 4802T: git git://git.open-osd.org/open-osd.git
@@ -4984,7 +4993,7 @@ M: Paul Mackerras <paulus@samba.org>
4984M: Ingo Molnar <mingo@elte.hu> 4993M: Ingo Molnar <mingo@elte.hu>
4985M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> 4994M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
4986S: Supported 4995S: Supported
4987F: kernel/perf_event*.c 4996F: kernel/events/*
4988F: include/linux/perf_event.h 4997F: include/linux/perf_event.h
4989F: arch/*/kernel/perf_event*.c 4998F: arch/*/kernel/perf_event*.c
4990F: arch/*/kernel/*/perf_event*.c 4999F: arch/*/kernel/*/perf_event*.c
@@ -5546,6 +5555,7 @@ F: include/media/*7146*
5546 5555
5547SAMSUNG AUDIO (ASoC) DRIVERS 5556SAMSUNG AUDIO (ASoC) DRIVERS
5548M: Jassi Brar <jassisinghbrar@gmail.com> 5557M: Jassi Brar <jassisinghbrar@gmail.com>
5558M: Sangbeom Kim <sbkim73@samsung.com>
5549L: alsa-devel@alsa-project.org (moderated for non-subscribers) 5559L: alsa-devel@alsa-project.org (moderated for non-subscribers)
5550S: Supported 5560S: Supported
5551F: sound/soc/samsung 5561F: sound/soc/samsung
@@ -7095,7 +7105,7 @@ S: Supported
7095F: drivers/mmc/host/vub300.c 7105F: drivers/mmc/host/vub300.c
7096 7106
7097W1 DALLAS'S 1-WIRE BUS 7107W1 DALLAS'S 1-WIRE BUS
7098M: Evgeniy Polyakov <johnpol@2ka.mipt.ru> 7108M: Evgeniy Polyakov <zbr@ioremap.net>
7099S: Maintained 7109S: Maintained
7100F: Documentation/w1/ 7110F: Documentation/w1/
7101F: drivers/w1/ 7111F: drivers/w1/
@@ -7207,6 +7217,9 @@ W: http://opensource.wolfsonmicro.com/content/linux-drivers-wolfson-devices
7207S: Supported 7217S: Supported
7208F: Documentation/hwmon/wm83?? 7218F: Documentation/hwmon/wm83??
7209F: drivers/leds/leds-wm83*.c 7219F: drivers/leds/leds-wm83*.c
7220F: drivers/input/misc/wm831x-on.c
7221F: drivers/input/touchscreen/wm831x-ts.c
7222F: drivers/input/touchscreen/wm97*.c
7210F: drivers/mfd/wm8*.c 7223F: drivers/mfd/wm8*.c
7211F: drivers/power/wm83*.c 7224F: drivers/power/wm83*.c
7212F: drivers/rtc/rtc-wm83*.c 7225F: drivers/rtc/rtc-wm83*.c
@@ -7216,6 +7229,7 @@ F: drivers/watchdog/wm83*_wdt.c
7216F: include/linux/mfd/wm831x/ 7229F: include/linux/mfd/wm831x/
7217F: include/linux/mfd/wm8350/ 7230F: include/linux/mfd/wm8350/
7218F: include/linux/mfd/wm8400* 7231F: include/linux/mfd/wm8400*
7232F: include/linux/wm97xx.h
7219F: include/sound/wm????.h 7233F: include/sound/wm????.h
7220F: sound/soc/codecs/wm* 7234F: sound/soc/codecs/wm*
7221 7235
diff --git a/Makefile b/Makefile
index b4ca4e111c9a..733dcba61f34 100644
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 1 2PATCHLEVEL = 1
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc1 4EXTRAVERSION = -rc7
5NAME = Sneaky Weasel 5NAME = "Divemaster Edition"
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
8# To see a list of typical targets execute "make help" 8# To see a list of typical targets execute "make help"
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 60cde53d266c..8bb936226dee 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -51,7 +51,7 @@ config GENERIC_CMOS_UPDATE
51 def_bool y 51 def_bool y
52 52
53config GENERIC_GPIO 53config GENERIC_GPIO
54 def_bool y 54 bool
55 55
56config ZONE_DMA 56config ZONE_DMA
57 bool 57 bool
diff --git a/arch/alpha/include/asm/sysinfo.h b/arch/alpha/include/asm/sysinfo.h
index 086aba284df2..e77d77cd07b8 100644
--- a/arch/alpha/include/asm/sysinfo.h
+++ b/arch/alpha/include/asm/sysinfo.h
@@ -27,13 +27,4 @@
27#define UAC_NOFIX 2 27#define UAC_NOFIX 2
28#define UAC_SIGBUS 4 28#define UAC_SIGBUS 4
29 29
30
31#ifdef __KERNEL__
32
33/* This is the shift that is applied to the UAC bits as stored in the
34 per-thread flags. See thread_info.h. */
35#define UAC_SHIFT 6
36
37#endif
38
39#endif /* __ASM_ALPHA_SYSINFO_H */ 30#endif /* __ASM_ALPHA_SYSINFO_H */
diff --git a/arch/alpha/include/asm/thread_info.h b/arch/alpha/include/asm/thread_info.h
index 6f32f9c84a2d..ff73db022342 100644
--- a/arch/alpha/include/asm/thread_info.h
+++ b/arch/alpha/include/asm/thread_info.h
@@ -74,9 +74,9 @@ register struct thread_info *__current_thread_info __asm__("$8");
74#define TIF_NEED_RESCHED 3 /* rescheduling necessary */ 74#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
75#define TIF_POLLING_NRFLAG 8 /* poll_idle is polling NEED_RESCHED */ 75#define TIF_POLLING_NRFLAG 8 /* poll_idle is polling NEED_RESCHED */
76#define TIF_DIE_IF_KERNEL 9 /* dik recursion lock */ 76#define TIF_DIE_IF_KERNEL 9 /* dik recursion lock */
77#define TIF_UAC_NOPRINT 10 /* see sysinfo.h */ 77#define TIF_UAC_NOPRINT 10 /* ! Preserve sequence of following */
78#define TIF_UAC_NOFIX 11 78#define TIF_UAC_NOFIX 11 /* ! flags as they match */
79#define TIF_UAC_SIGBUS 12 79#define TIF_UAC_SIGBUS 12 /* ! userspace part of 'osf_sysinfo' */
80#define TIF_MEMDIE 13 /* is terminating due to OOM killer */ 80#define TIF_MEMDIE 13 /* is terminating due to OOM killer */
81#define TIF_RESTORE_SIGMASK 14 /* restore signal mask in do_signal */ 81#define TIF_RESTORE_SIGMASK 14 /* restore signal mask in do_signal */
82#define TIF_FREEZE 16 /* is freezing for suspend */ 82#define TIF_FREEZE 16 /* is freezing for suspend */
@@ -97,7 +97,7 @@ register struct thread_info *__current_thread_info __asm__("$8");
97#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK \ 97#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK \
98 | _TIF_SYSCALL_TRACE) 98 | _TIF_SYSCALL_TRACE)
99 99
100#define ALPHA_UAC_SHIFT 10 100#define ALPHA_UAC_SHIFT TIF_UAC_NOPRINT
101#define ALPHA_UAC_MASK (1 << TIF_UAC_NOPRINT | 1 << TIF_UAC_NOFIX | \ 101#define ALPHA_UAC_MASK (1 << TIF_UAC_NOPRINT | 1 << TIF_UAC_NOFIX | \
102 1 << TIF_UAC_SIGBUS) 102 1 << TIF_UAC_SIGBUS)
103 103
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index 326f0a2d56e5..01e8715e26d9 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -42,6 +42,7 @@
42#include <asm/uaccess.h> 42#include <asm/uaccess.h>
43#include <asm/system.h> 43#include <asm/system.h>
44#include <asm/sysinfo.h> 44#include <asm/sysinfo.h>
45#include <asm/thread_info.h>
45#include <asm/hwrpb.h> 46#include <asm/hwrpb.h>
46#include <asm/processor.h> 47#include <asm/processor.h>
47 48
@@ -633,9 +634,10 @@ SYSCALL_DEFINE5(osf_getsysinfo, unsigned long, op, void __user *, buffer,
633 case GSI_UACPROC: 634 case GSI_UACPROC:
634 if (nbytes < sizeof(unsigned int)) 635 if (nbytes < sizeof(unsigned int))
635 return -EINVAL; 636 return -EINVAL;
636 w = (current_thread_info()->flags >> UAC_SHIFT) & UAC_BITMASK; 637 w = (current_thread_info()->flags >> ALPHA_UAC_SHIFT) &
637 if (put_user(w, (unsigned int __user *)buffer)) 638 UAC_BITMASK;
638 return -EFAULT; 639 if (put_user(w, (unsigned int __user *)buffer))
640 return -EFAULT;
639 return 1; 641 return 1;
640 642
641 case GSI_PROC_TYPE: 643 case GSI_PROC_TYPE:
@@ -756,8 +758,8 @@ SYSCALL_DEFINE5(osf_setsysinfo, unsigned long, op, void __user *, buffer,
756 case SSIN_UACPROC: 758 case SSIN_UACPROC:
757 again: 759 again:
758 old = current_thread_info()->flags; 760 old = current_thread_info()->flags;
759 new = old & ~(UAC_BITMASK << UAC_SHIFT); 761 new = old & ~(UAC_BITMASK << ALPHA_UAC_SHIFT);
760 new = new | (w & UAC_BITMASK) << UAC_SHIFT; 762 new = new | (w & UAC_BITMASK) << ALPHA_UAC_SHIFT;
761 if (cmpxchg(&current_thread_info()->flags, 763 if (cmpxchg(&current_thread_info()->flags,
762 old, new) != old) 764 old, new) != old)
763 goto again; 765 goto again;
diff --git a/arch/alpha/kernel/systbls.S b/arch/alpha/kernel/systbls.S
index b9c28f3f1956..6acea1f96de3 100644
--- a/arch/alpha/kernel/systbls.S
+++ b/arch/alpha/kernel/systbls.S
@@ -360,7 +360,7 @@ sys_call_table:
360 .quad sys_newuname 360 .quad sys_newuname
361 .quad sys_nanosleep /* 340 */ 361 .quad sys_nanosleep /* 340 */
362 .quad sys_mremap 362 .quad sys_mremap
363 .quad sys_nfsservctl 363 .quad sys_ni_syscall /* old nfsservctl */
364 .quad sys_setresuid 364 .quad sys_setresuid
365 .quad sys_getresuid 365 .quad sys_getresuid
366 .quad sys_pciconfig_read /* 345 */ 366 .quad sys_pciconfig_read /* 345 */
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 5ebc5d922ea1..3269576dbfa8 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1271,6 +1271,18 @@ config ARM_ERRATA_754327
1271 This workaround defines cpu_relax() as smp_mb(), preventing correctly 1271 This workaround defines cpu_relax() as smp_mb(), preventing correctly
1272 written polling loops from denying visibility of updates to memory. 1272 written polling loops from denying visibility of updates to memory.
1273 1273
1274config ARM_ERRATA_364296
1275 bool "ARM errata: Possible cache data corruption with hit-under-miss enabled"
1276 depends on CPU_V6 && !SMP
1277 help
1278 This options enables the workaround for the 364296 ARM1136
1279 r0p2 erratum (possible cache data corruption with
1280 hit-under-miss enabled). It sets the undocumented bit 31 in
1281 the auxiliary control register and the FI bit in the control
1282 register, thus disabling hit-under-miss without putting the
1283 processor into full low interrupt latency mode. ARM11MPCore
1284 is not affected.
1285
1274endmenu 1286endmenu
1275 1287
1276source "arch/arm/common/Kconfig" 1288source "arch/arm/common/Kconfig"
diff --git a/arch/arm/boot/compressed/mmcif-sh7372.c b/arch/arm/boot/compressed/mmcif-sh7372.c
index b6f61d9a5a1b..672ae95db5c3 100644
--- a/arch/arm/boot/compressed/mmcif-sh7372.c
+++ b/arch/arm/boot/compressed/mmcif-sh7372.c
@@ -82,7 +82,7 @@ asmlinkage void mmc_loader(unsigned char *buf, unsigned long len)
82 82
83 83
84 /* Disable clock to MMC hardware block */ 84 /* Disable clock to MMC hardware block */
85 __raw_writel(__raw_readl(SMSTPCR3) & (1 << 12), SMSTPCR3); 85 __raw_writel(__raw_readl(SMSTPCR3) | (1 << 12), SMSTPCR3);
86 86
87 mmc_update_progress(MMC_PROGRESS_DONE); 87 mmc_update_progress(MMC_PROGRESS_DONE);
88} 88}
diff --git a/arch/arm/boot/compressed/sdhi-sh7372.c b/arch/arm/boot/compressed/sdhi-sh7372.c
index d403a8b24d7f..d279294f2381 100644
--- a/arch/arm/boot/compressed/sdhi-sh7372.c
+++ b/arch/arm/boot/compressed/sdhi-sh7372.c
@@ -85,7 +85,7 @@ asmlinkage void mmc_loader(unsigned short *buf, unsigned long len)
85 goto err; 85 goto err;
86 86
87 /* Disable clock to SDHI1 hardware block */ 87 /* Disable clock to SDHI1 hardware block */
88 __raw_writel(__raw_readl(SMSTPCR3) & (1 << 13), SMSTPCR3); 88 __raw_writel(__raw_readl(SMSTPCR3) | (1 << 13), SMSTPCR3);
89 89
90 mmc_update_progress(MMC_PROGRESS_DONE); 90 mmc_update_progress(MMC_PROGRESS_DONE);
91 91
diff --git a/arch/arm/boot/dts/tegra-harmony.dts b/arch/arm/boot/dts/tegra-harmony.dts
index 4c053340ce33..e5818668d091 100644
--- a/arch/arm/boot/dts/tegra-harmony.dts
+++ b/arch/arm/boot/dts/tegra-harmony.dts
@@ -57,14 +57,14 @@
57 }; 57 };
58 58
59 sdhci@c8000200 { 59 sdhci@c8000200 {
60 gpios = <&gpio 69 0>, /* cd, gpio PI5 */ 60 cd-gpios = <&gpio 69 0>; /* gpio PI5 */
61 <&gpio 57 0>, /* wp, gpio PH1 */ 61 wp-gpios = <&gpio 57 0>; /* gpio PH1 */
62 <&gpio 155 0>; /* power, gpio PT3 */ 62 power-gpios = <&gpio 155 0>; /* gpio PT3 */
63 }; 63 };
64 64
65 sdhci@c8000600 { 65 sdhci@c8000600 {
66 gpios = <&gpio 58 0>, /* cd, gpio PH2 */ 66 cd-gpios = <&gpio 58 0>; /* gpio PH2 */
67 <&gpio 59 0>, /* wp, gpio PH3 */ 67 wp-gpios = <&gpio 59 0>; /* gpio PH3 */
68 <&gpio 70 0>; /* power, gpio PI6 */ 68 power-gpios = <&gpio 70 0>; /* gpio PI6 */
69 }; 69 };
70}; 70};
diff --git a/arch/arm/boot/dts/tegra-seaboard.dts b/arch/arm/boot/dts/tegra-seaboard.dts
index 1940cae00748..64cedca6fc79 100644
--- a/arch/arm/boot/dts/tegra-seaboard.dts
+++ b/arch/arm/boot/dts/tegra-seaboard.dts
@@ -21,8 +21,8 @@
21 }; 21 };
22 22
23 sdhci@c8000400 { 23 sdhci@c8000400 {
24 gpios = <&gpio 69 0>, /* cd, gpio PI5 */ 24 cd-gpios = <&gpio 69 0>; /* gpio PI5 */
25 <&gpio 57 0>, /* wp, gpio PH1 */ 25 wp-gpios = <&gpio 57 0>; /* gpio PH1 */
26 <&gpio 70 0>; /* power, gpio PI6 */ 26 power-gpios = <&gpio 70 0>; /* gpio PI6 */
27 }; 27 };
28}; 28};
diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h
index 16bd48031583..99a6ed7e1bfd 100644
--- a/arch/arm/include/asm/hardware/cache-l2x0.h
+++ b/arch/arm/include/asm/hardware/cache-l2x0.h
@@ -45,8 +45,13 @@
45#define L2X0_CLEAN_INV_LINE_PA 0x7F0 45#define L2X0_CLEAN_INV_LINE_PA 0x7F0
46#define L2X0_CLEAN_INV_LINE_IDX 0x7F8 46#define L2X0_CLEAN_INV_LINE_IDX 0x7F8
47#define L2X0_CLEAN_INV_WAY 0x7FC 47#define L2X0_CLEAN_INV_WAY 0x7FC
48#define L2X0_LOCKDOWN_WAY_D 0x900 48/*
49#define L2X0_LOCKDOWN_WAY_I 0x904 49 * The lockdown registers repeat 8 times for L310, the L210 has only one
50 * D and one I lockdown register at 0x0900 and 0x0904.
51 */
52#define L2X0_LOCKDOWN_WAY_D_BASE 0x900
53#define L2X0_LOCKDOWN_WAY_I_BASE 0x904
54#define L2X0_LOCKDOWN_STRIDE 0x08
50#define L2X0_TEST_OPERATION 0xF00 55#define L2X0_TEST_OPERATION 0xF00
51#define L2X0_LINE_DATA 0xF10 56#define L2X0_LINE_DATA 0xF10
52#define L2X0_LINE_TAG 0xF30 57#define L2X0_LINE_TAG 0xF30
@@ -64,7 +69,7 @@
64#define L2X0_AUX_CTRL_MASK 0xc0000fff 69#define L2X0_AUX_CTRL_MASK 0xc0000fff
65#define L2X0_AUX_CTRL_ASSOCIATIVITY_SHIFT 16 70#define L2X0_AUX_CTRL_ASSOCIATIVITY_SHIFT 16
66#define L2X0_AUX_CTRL_WAY_SIZE_SHIFT 17 71#define L2X0_AUX_CTRL_WAY_SIZE_SHIFT 17
67#define L2X0_AUX_CTRL_WAY_SIZE_MASK (0x3 << 17) 72#define L2X0_AUX_CTRL_WAY_SIZE_MASK (0x7 << 17)
68#define L2X0_AUX_CTRL_SHARE_OVERRIDE_SHIFT 22 73#define L2X0_AUX_CTRL_SHARE_OVERRIDE_SHIFT 22
69#define L2X0_AUX_CTRL_NS_LOCKDOWN_SHIFT 26 74#define L2X0_AUX_CTRL_NS_LOCKDOWN_SHIFT 26
70#define L2X0_AUX_CTRL_NS_INT_CTRL_SHIFT 27 75#define L2X0_AUX_CTRL_NS_INT_CTRL_SHIFT 27
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
index 67c70a31a1be..b7e82c4aced6 100644
--- a/arch/arm/include/asm/pmu.h
+++ b/arch/arm/include/asm/pmu.h
@@ -41,7 +41,7 @@ struct arm_pmu_platdata {
41 * encoded error on failure. 41 * encoded error on failure.
42 */ 42 */
43extern struct platform_device * 43extern struct platform_device *
44reserve_pmu(enum arm_pmu_type device); 44reserve_pmu(enum arm_pmu_type type);
45 45
46/** 46/**
47 * release_pmu() - Relinquish control of the performance counters 47 * release_pmu() - Relinquish control of the performance counters
@@ -62,26 +62,26 @@ release_pmu(enum arm_pmu_type type);
62 * the actual hardware initialisation. 62 * the actual hardware initialisation.
63 */ 63 */
64extern int 64extern int
65init_pmu(enum arm_pmu_type device); 65init_pmu(enum arm_pmu_type type);
66 66
67#else /* CONFIG_CPU_HAS_PMU */ 67#else /* CONFIG_CPU_HAS_PMU */
68 68
69#include <linux/err.h> 69#include <linux/err.h>
70 70
71static inline struct platform_device * 71static inline struct platform_device *
72reserve_pmu(enum arm_pmu_type device) 72reserve_pmu(enum arm_pmu_type type)
73{ 73{
74 return ERR_PTR(-ENODEV); 74 return ERR_PTR(-ENODEV);
75} 75}
76 76
77static inline int 77static inline int
78release_pmu(struct platform_device *pdev) 78release_pmu(enum arm_pmu_type type)
79{ 79{
80 return -ENODEV; 80 return -ENODEV;
81} 81}
82 82
83static inline int 83static inline int
84init_pmu(enum arm_pmu_type device) 84init_pmu(enum arm_pmu_type type)
85{ 85{
86 return -ENODEV; 86 return -ENODEV;
87} 87}
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index 80f7896cc016..9943e9e74a1b 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -178,7 +178,7 @@
178 CALL(sys_ni_syscall) /* vm86 */ 178 CALL(sys_ni_syscall) /* vm86 */
179 CALL(sys_ni_syscall) /* was sys_query_module */ 179 CALL(sys_ni_syscall) /* was sys_query_module */
180 CALL(sys_poll) 180 CALL(sys_poll)
181 CALL(sys_nfsservctl) 181 CALL(sys_ni_syscall) /* was nfsservctl */
182/* 170 */ CALL(sys_setresgid16) 182/* 170 */ CALL(sys_setresgid16)
183 CALL(sys_getresgid16) 183 CALL(sys_getresgid16)
184 CALL(sys_prctl) 184 CALL(sys_prctl)
diff --git a/arch/arm/kernel/pmu.c b/arch/arm/kernel/pmu.c
index 2b70709376c3..c53474fe84df 100644
--- a/arch/arm/kernel/pmu.c
+++ b/arch/arm/kernel/pmu.c
@@ -31,7 +31,7 @@ static int __devinit pmu_register(struct platform_device *pdev,
31{ 31{
32 if (type < 0 || type >= ARM_NUM_PMU_DEVICES) { 32 if (type < 0 || type >= ARM_NUM_PMU_DEVICES) {
33 pr_warning("received registration request for unknown " 33 pr_warning("received registration request for unknown "
34 "device %d\n", type); 34 "PMU device type %d\n", type);
35 return -EINVAL; 35 return -EINVAL;
36 } 36 }
37 37
@@ -112,17 +112,17 @@ static int __init register_pmu_driver(void)
112device_initcall(register_pmu_driver); 112device_initcall(register_pmu_driver);
113 113
114struct platform_device * 114struct platform_device *
115reserve_pmu(enum arm_pmu_type device) 115reserve_pmu(enum arm_pmu_type type)
116{ 116{
117 struct platform_device *pdev; 117 struct platform_device *pdev;
118 118
119 if (test_and_set_bit_lock(device, &pmu_lock)) { 119 if (test_and_set_bit_lock(type, &pmu_lock)) {
120 pdev = ERR_PTR(-EBUSY); 120 pdev = ERR_PTR(-EBUSY);
121 } else if (pmu_devices[device] == NULL) { 121 } else if (pmu_devices[type] == NULL) {
122 clear_bit_unlock(device, &pmu_lock); 122 clear_bit_unlock(type, &pmu_lock);
123 pdev = ERR_PTR(-ENODEV); 123 pdev = ERR_PTR(-ENODEV);
124 } else { 124 } else {
125 pdev = pmu_devices[device]; 125 pdev = pmu_devices[type];
126 } 126 }
127 127
128 return pdev; 128 return pdev;
@@ -130,11 +130,11 @@ reserve_pmu(enum arm_pmu_type device)
130EXPORT_SYMBOL_GPL(reserve_pmu); 130EXPORT_SYMBOL_GPL(reserve_pmu);
131 131
132int 132int
133release_pmu(enum arm_pmu_type device) 133release_pmu(enum arm_pmu_type type)
134{ 134{
135 if (WARN_ON(!pmu_devices[device])) 135 if (WARN_ON(!pmu_devices[type]))
136 return -EINVAL; 136 return -EINVAL;
137 clear_bit_unlock(device, &pmu_lock); 137 clear_bit_unlock(type, &pmu_lock);
138 return 0; 138 return 0;
139} 139}
140EXPORT_SYMBOL_GPL(release_pmu); 140EXPORT_SYMBOL_GPL(release_pmu);
@@ -182,17 +182,17 @@ init_cpu_pmu(void)
182} 182}
183 183
184int 184int
185init_pmu(enum arm_pmu_type device) 185init_pmu(enum arm_pmu_type type)
186{ 186{
187 int err = 0; 187 int err = 0;
188 188
189 switch (device) { 189 switch (type) {
190 case ARM_PMU_DEVICE_CPU: 190 case ARM_PMU_DEVICE_CPU:
191 err = init_cpu_pmu(); 191 err = init_cpu_pmu();
192 break; 192 break;
193 default: 193 default:
194 pr_warning("attempt to initialise unknown device %d\n", 194 pr_warning("attempt to initialise PMU of unknown "
195 device); 195 "type %d\n", type);
196 err = -EINVAL; 196 err = -EINVAL;
197 } 197 }
198 198
diff --git a/arch/arm/kernel/relocate_kernel.S b/arch/arm/kernel/relocate_kernel.S
index 9cf4cbf8f95b..d0cdedf4864d 100644
--- a/arch/arm/kernel/relocate_kernel.S
+++ b/arch/arm/kernel/relocate_kernel.S
@@ -57,7 +57,8 @@ relocate_new_kernel:
57 mov r0,#0 57 mov r0,#0
58 ldr r1,kexec_mach_type 58 ldr r1,kexec_mach_type
59 ldr r2,kexec_boot_atags 59 ldr r2,kexec_boot_atags
60 mov pc,lr 60 ARM( mov pc, lr )
61 THUMB( bx lr )
61 62
62 .align 63 .align
63 64
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 70bca649e925..e514c76043b4 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -280,18 +280,19 @@ static void __init cacheid_init(void)
280 if (arch >= CPU_ARCH_ARMv6) { 280 if (arch >= CPU_ARCH_ARMv6) {
281 if ((cachetype & (7 << 29)) == 4 << 29) { 281 if ((cachetype & (7 << 29)) == 4 << 29) {
282 /* ARMv7 register format */ 282 /* ARMv7 register format */
283 arch = CPU_ARCH_ARMv7;
283 cacheid = CACHEID_VIPT_NONALIASING; 284 cacheid = CACHEID_VIPT_NONALIASING;
284 if ((cachetype & (3 << 14)) == 1 << 14) 285 if ((cachetype & (3 << 14)) == 1 << 14)
285 cacheid |= CACHEID_ASID_TAGGED; 286 cacheid |= CACHEID_ASID_TAGGED;
286 else if (cpu_has_aliasing_icache(CPU_ARCH_ARMv7))
287 cacheid |= CACHEID_VIPT_I_ALIASING;
288 } else if (cachetype & (1 << 23)) {
289 cacheid = CACHEID_VIPT_ALIASING;
290 } else { 287 } else {
291 cacheid = CACHEID_VIPT_NONALIASING; 288 arch = CPU_ARCH_ARMv6;
292 if (cpu_has_aliasing_icache(CPU_ARCH_ARMv6)) 289 if (cachetype & (1 << 23))
293 cacheid |= CACHEID_VIPT_I_ALIASING; 290 cacheid = CACHEID_VIPT_ALIASING;
291 else
292 cacheid = CACHEID_VIPT_NONALIASING;
294 } 293 }
294 if (cpu_has_aliasing_icache(arch))
295 cacheid |= CACHEID_VIPT_I_ALIASING;
295 } else { 296 } else {
296 cacheid = CACHEID_VIVT; 297 cacheid = CACHEID_VIVT;
297 } 298 }
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index 2c277d40cee6..01c186222f3b 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -137,8 +137,8 @@ void __cpuinit twd_timer_setup(struct clock_event_device *clk)
137 clk->max_delta_ns = clockevent_delta2ns(0xffffffff, clk); 137 clk->max_delta_ns = clockevent_delta2ns(0xffffffff, clk);
138 clk->min_delta_ns = clockevent_delta2ns(0xf, clk); 138 clk->min_delta_ns = clockevent_delta2ns(0xf, clk);
139 139
140 clockevents_register_device(clk);
141
140 /* Make sure our local interrupt controller has this enabled */ 142 /* Make sure our local interrupt controller has this enabled */
141 gic_enable_ppi(clk->irq); 143 gic_enable_ppi(clk->irq);
142
143 clockevents_register_device(clk);
144} 144}
diff --git a/arch/arm/mach-at91/at91sam9261.c b/arch/arm/mach-at91/at91sam9261.c
index d522b47e30b5..6c8e3b5f669f 100644
--- a/arch/arm/mach-at91/at91sam9261.c
+++ b/arch/arm/mach-at91/at91sam9261.c
@@ -157,7 +157,7 @@ static struct clk_lookup periph_clocks_lookups[] = {
157 CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.1", &spi1_clk), 157 CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.1", &spi1_clk),
158 CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tc0_clk), 158 CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tc0_clk),
159 CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.0", &tc1_clk), 159 CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.0", &tc1_clk),
160 CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.0", &tc1_clk), 160 CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.0", &tc2_clk),
161 CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc0_clk), 161 CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc0_clk),
162 CLKDEV_CON_DEV_ID("pclk", "ssc.1", &ssc1_clk), 162 CLKDEV_CON_DEV_ID("pclk", "ssc.1", &ssc1_clk),
163 CLKDEV_CON_DEV_ID("pclk", "ssc.2", &ssc2_clk), 163 CLKDEV_CON_DEV_ID("pclk", "ssc.2", &ssc2_clk),
diff --git a/arch/arm/mach-cns3xxx/include/mach/entry-macro.S b/arch/arm/mach-cns3xxx/include/mach/entry-macro.S
index 6bd83ed90afe..d87bfc397d39 100644
--- a/arch/arm/mach-cns3xxx/include/mach/entry-macro.S
+++ b/arch/arm/mach-cns3xxx/include/mach/entry-macro.S
@@ -8,7 +8,6 @@
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10 10
11#include <mach/hardware.h>
12#include <asm/hardware/entry-macro-gic.S> 11#include <asm/hardware/entry-macro-gic.S>
13 12
14 .macro disable_fiq 13 .macro disable_fiq
diff --git a/arch/arm/mach-cns3xxx/include/mach/system.h b/arch/arm/mach-cns3xxx/include/mach/system.h
index 58bb03ae3cf4..4f16c9b79f78 100644
--- a/arch/arm/mach-cns3xxx/include/mach/system.h
+++ b/arch/arm/mach-cns3xxx/include/mach/system.h
@@ -13,7 +13,6 @@
13 13
14#include <linux/io.h> 14#include <linux/io.h>
15#include <asm/proc-fns.h> 15#include <asm/proc-fns.h>
16#include <mach/hardware.h>
17 16
18static inline void arch_idle(void) 17static inline void arch_idle(void)
19{ 18{
diff --git a/arch/arm/mach-cns3xxx/include/mach/uncompress.h b/arch/arm/mach-cns3xxx/include/mach/uncompress.h
index de8ead9b91f7..a91b6058ab4f 100644
--- a/arch/arm/mach-cns3xxx/include/mach/uncompress.h
+++ b/arch/arm/mach-cns3xxx/include/mach/uncompress.h
@@ -8,7 +8,6 @@
8 */ 8 */
9 9
10#include <asm/mach-types.h> 10#include <asm/mach-types.h>
11#include <mach/hardware.h>
12#include <mach/cns3xxx.h> 11#include <mach/cns3xxx.h>
13 12
14#define AMBA_UART_DR(base) (*(volatile unsigned char *)((base) + 0x00)) 13#define AMBA_UART_DR(base) (*(volatile unsigned char *)((base) + 0x00))
diff --git a/arch/arm/mach-cns3xxx/pcie.c b/arch/arm/mach-cns3xxx/pcie.c
index 06fd25d70aec..0f8fca48a5ed 100644
--- a/arch/arm/mach-cns3xxx/pcie.c
+++ b/arch/arm/mach-cns3xxx/pcie.c
@@ -49,7 +49,7 @@ static struct cns3xxx_pcie *sysdata_to_cnspci(void *sysdata)
49 return &cns3xxx_pcie[root->domain]; 49 return &cns3xxx_pcie[root->domain];
50} 50}
51 51
52static struct cns3xxx_pcie *pdev_to_cnspci(struct pci_dev *dev) 52static struct cns3xxx_pcie *pdev_to_cnspci(const struct pci_dev *dev)
53{ 53{
54 return sysdata_to_cnspci(dev->sysdata); 54 return sysdata_to_cnspci(dev->sysdata);
55} 55}
diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c
index bd5394537c88..008d51407cd7 100644
--- a/arch/arm/mach-davinci/board-da850-evm.c
+++ b/arch/arm/mach-davinci/board-da850-evm.c
@@ -115,6 +115,32 @@ static struct spi_board_info da850evm_spi_info[] = {
115 }, 115 },
116}; 116};
117 117
118#ifdef CONFIG_MTD
119static void da850_evm_m25p80_notify_add(struct mtd_info *mtd)
120{
121 char *mac_addr = davinci_soc_info.emac_pdata->mac_addr;
122 size_t retlen;
123
124 if (!strcmp(mtd->name, "MAC-Address")) {
125 mtd->read(mtd, 0, ETH_ALEN, &retlen, mac_addr);
126 if (retlen == ETH_ALEN)
127 pr_info("Read MAC addr from SPI Flash: %pM\n",
128 mac_addr);
129 }
130}
131
132static struct mtd_notifier da850evm_spi_notifier = {
133 .add = da850_evm_m25p80_notify_add,
134};
135
136static void da850_evm_setup_mac_addr(void)
137{
138 register_mtd_user(&da850evm_spi_notifier);
139}
140#else
141static void da850_evm_setup_mac_addr(void) { }
142#endif
143
118static struct mtd_partition da850_evm_norflash_partition[] = { 144static struct mtd_partition da850_evm_norflash_partition[] = {
119 { 145 {
120 .name = "bootloaders + env", 146 .name = "bootloaders + env",
@@ -1244,6 +1270,8 @@ static __init void da850_evm_init(void)
1244 if (ret) 1270 if (ret)
1245 pr_warning("da850_evm_init: sata registration failed: %d\n", 1271 pr_warning("da850_evm_init: sata registration failed: %d\n",
1246 ret); 1272 ret);
1273
1274 da850_evm_setup_mac_addr();
1247} 1275}
1248 1276
1249#ifdef CONFIG_SERIAL_8250_CONSOLE 1277#ifdef CONFIG_SERIAL_8250_CONSOLE
diff --git a/arch/arm/mach-davinci/include/mach/psc.h b/arch/arm/mach-davinci/include/mach/psc.h
index 47fd0bc3d3e7..fa59c097223d 100644
--- a/arch/arm/mach-davinci/include/mach/psc.h
+++ b/arch/arm/mach-davinci/include/mach/psc.h
@@ -243,7 +243,7 @@
243#define PSC_STATE_DISABLE 2 243#define PSC_STATE_DISABLE 2
244#define PSC_STATE_ENABLE 3 244#define PSC_STATE_ENABLE 3
245 245
246#define MDSTAT_STATE_MASK 0x1f 246#define MDSTAT_STATE_MASK 0x3f
247#define MDCTL_FORCE BIT(31) 247#define MDCTL_FORCE BIT(31)
248 248
249#ifndef __ASSEMBLER__ 249#ifndef __ASSEMBLER__
diff --git a/arch/arm/mach-davinci/sleep.S b/arch/arm/mach-davinci/sleep.S
index fb5e72b532b0..5f1e045a3ad1 100644
--- a/arch/arm/mach-davinci/sleep.S
+++ b/arch/arm/mach-davinci/sleep.S
@@ -217,7 +217,11 @@ ddr2clk_stop_done:
217ENDPROC(davinci_ddr_psc_config) 217ENDPROC(davinci_ddr_psc_config)
218 218
219CACHE_FLUSH: 219CACHE_FLUSH:
220 .word arm926_flush_kern_cache_all 220#ifdef CONFIG_CPU_V6
221 .word v6_flush_kern_cache_all
222#else
223 .word arm926_flush_kern_cache_all
224#endif
221 225
222ENTRY(davinci_cpu_suspend_sz) 226ENTRY(davinci_cpu_suspend_sz)
223 .word . - davinci_cpu_suspend 227 .word . - davinci_cpu_suspend
diff --git a/arch/arm/mach-dove/common.c b/arch/arm/mach-dove/common.c
index 83dce859886d..a9e0dae86a26 100644
--- a/arch/arm/mach-dove/common.c
+++ b/arch/arm/mach-dove/common.c
@@ -158,7 +158,7 @@ void __init dove_spi0_init(void)
158 158
159void __init dove_spi1_init(void) 159void __init dove_spi1_init(void)
160{ 160{
161 orion_spi_init(DOVE_SPI1_PHYS_BASE, get_tclk()); 161 orion_spi_1_init(DOVE_SPI1_PHYS_BASE, get_tclk());
162} 162}
163 163
164/***************************************************************************** 164/*****************************************************************************
diff --git a/arch/arm/mach-ep93xx/include/mach/ts72xx.h b/arch/arm/mach-ep93xx/include/mach/ts72xx.h
index 0eabec62cd9d..f1397a13e76b 100644
--- a/arch/arm/mach-ep93xx/include/mach/ts72xx.h
+++ b/arch/arm/mach-ep93xx/include/mach/ts72xx.h
@@ -6,7 +6,7 @@
6 * TS72xx memory map: 6 * TS72xx memory map:
7 * 7 *
8 * virt phys size 8 * virt phys size
9 * febff000 22000000 4K model number register 9 * febff000 22000000 4K model number register (bits 0-2)
10 * febfe000 22400000 4K options register 10 * febfe000 22400000 4K options register
11 * febfd000 22800000 4K options register #2 11 * febfd000 22800000 4K options register #2
12 * febf9000 10800000 4K TS-5620 RTC index register 12 * febf9000 10800000 4K TS-5620 RTC index register
@@ -20,6 +20,9 @@
20#define TS72XX_MODEL_TS7200 0x00 20#define TS72XX_MODEL_TS7200 0x00
21#define TS72XX_MODEL_TS7250 0x01 21#define TS72XX_MODEL_TS7250 0x01
22#define TS72XX_MODEL_TS7260 0x02 22#define TS72XX_MODEL_TS7260 0x02
23#define TS72XX_MODEL_TS7300 0x03
24#define TS72XX_MODEL_TS7400 0x04
25#define TS72XX_MODEL_MASK 0x07
23 26
24 27
25#define TS72XX_OPTIONS_PHYS_BASE 0x22400000 28#define TS72XX_OPTIONS_PHYS_BASE 0x22400000
@@ -51,19 +54,34 @@
51 54
52#ifndef __ASSEMBLY__ 55#ifndef __ASSEMBLY__
53 56
57static inline int ts72xx_model(void)
58{
59 return __raw_readb(TS72XX_MODEL_VIRT_BASE) & TS72XX_MODEL_MASK;
60}
61
54static inline int board_is_ts7200(void) 62static inline int board_is_ts7200(void)
55{ 63{
56 return __raw_readb(TS72XX_MODEL_VIRT_BASE) == TS72XX_MODEL_TS7200; 64 return ts72xx_model() == TS72XX_MODEL_TS7200;
57} 65}
58 66
59static inline int board_is_ts7250(void) 67static inline int board_is_ts7250(void)
60{ 68{
61 return __raw_readb(TS72XX_MODEL_VIRT_BASE) == TS72XX_MODEL_TS7250; 69 return ts72xx_model() == TS72XX_MODEL_TS7250;
62} 70}
63 71
64static inline int board_is_ts7260(void) 72static inline int board_is_ts7260(void)
65{ 73{
66 return __raw_readb(TS72XX_MODEL_VIRT_BASE) == TS72XX_MODEL_TS7260; 74 return ts72xx_model() == TS72XX_MODEL_TS7260;
75}
76
77static inline int board_is_ts7300(void)
78{
79 return ts72xx_model() == TS72XX_MODEL_TS7300;
80}
81
82static inline int board_is_ts7400(void)
83{
84 return ts72xx_model() == TS72XX_MODEL_TS7400;
67} 85}
68 86
69static inline int is_max197_installed(void) 87static inline int is_max197_installed(void)
diff --git a/arch/arm/mach-exynos4/clock.c b/arch/arm/mach-exynos4/clock.c
index 851dea018578..79d6cd0c8e7b 100644
--- a/arch/arm/mach-exynos4/clock.c
+++ b/arch/arm/mach-exynos4/clock.c
@@ -520,7 +520,7 @@ static struct clk init_clocks_off[] = {
520 .ctrlbit = (1 << 21), 520 .ctrlbit = (1 << 21),
521 }, { 521 }, {
522 .name = "ac97", 522 .name = "ac97",
523 .id = -1, 523 .devname = "samsung-ac97",
524 .enable = exynos4_clk_ip_peril_ctrl, 524 .enable = exynos4_clk_ip_peril_ctrl,
525 .ctrlbit = (1 << 27), 525 .ctrlbit = (1 << 27),
526 }, { 526 }, {
@@ -1160,7 +1160,7 @@ void __init_or_cpufreq exynos4_setup_clocks(void)
1160 1160
1161 vpllsrc = clk_get_rate(&clk_vpllsrc.clk); 1161 vpllsrc = clk_get_rate(&clk_vpllsrc.clk);
1162 vpll = s5p_get_pll46xx(vpllsrc, __raw_readl(S5P_VPLL_CON0), 1162 vpll = s5p_get_pll46xx(vpllsrc, __raw_readl(S5P_VPLL_CON0),
1163 __raw_readl(S5P_VPLL_CON1), pll_4650); 1163 __raw_readl(S5P_VPLL_CON1), pll_4650c);
1164 1164
1165 clk_fout_apll.ops = &exynos4_fout_apll_ops; 1165 clk_fout_apll.ops = &exynos4_fout_apll_ops;
1166 clk_fout_mpll.rate = mpll; 1166 clk_fout_mpll.rate = mpll;
diff --git a/arch/arm/mach-exynos4/cpu.c b/arch/arm/mach-exynos4/cpu.c
index 2d8a40c9e6e5..746d6fc6d397 100644
--- a/arch/arm/mach-exynos4/cpu.c
+++ b/arch/arm/mach-exynos4/cpu.c
@@ -24,12 +24,13 @@
24#include <plat/exynos4.h> 24#include <plat/exynos4.h>
25#include <plat/adc-core.h> 25#include <plat/adc-core.h>
26#include <plat/sdhci.h> 26#include <plat/sdhci.h>
27#include <plat/devs.h>
28#include <plat/fb-core.h> 27#include <plat/fb-core.h>
29#include <plat/fimc-core.h> 28#include <plat/fimc-core.h>
30#include <plat/iic-core.h> 29#include <plat/iic-core.h>
30#include <plat/reset.h>
31 31
32#include <mach/regs-irq.h> 32#include <mach/regs-irq.h>
33#include <mach/regs-pmu.h>
33 34
34extern int combiner_init(unsigned int combiner_nr, void __iomem *base, 35extern int combiner_init(unsigned int combiner_nr, void __iomem *base,
35 unsigned int irq_start); 36 unsigned int irq_start);
@@ -128,6 +129,11 @@ static void exynos4_idle(void)
128 local_irq_enable(); 129 local_irq_enable();
129} 130}
130 131
132static void exynos4_sw_reset(void)
133{
134 __raw_writel(0x1, S5P_SWRESET);
135}
136
131/* 137/*
132 * exynos4_map_io 138 * exynos4_map_io
133 * 139 *
@@ -241,5 +247,8 @@ int __init exynos4_init(void)
241 /* set idle function */ 247 /* set idle function */
242 pm_idle = exynos4_idle; 248 pm_idle = exynos4_idle;
243 249
250 /* set sw_reset function */
251 s5p_reset_hook = exynos4_sw_reset;
252
244 return sysdev_register(&exynos4_sysdev); 253 return sysdev_register(&exynos4_sysdev);
245} 254}
diff --git a/arch/arm/mach-exynos4/include/mach/irqs.h b/arch/arm/mach-exynos4/include/mach/irqs.h
index 934d2a493982..f8952f8f3757 100644
--- a/arch/arm/mach-exynos4/include/mach/irqs.h
+++ b/arch/arm/mach-exynos4/include/mach/irqs.h
@@ -80,9 +80,8 @@
80#define IRQ_HSMMC3 IRQ_SPI(76) 80#define IRQ_HSMMC3 IRQ_SPI(76)
81#define IRQ_DWMCI IRQ_SPI(77) 81#define IRQ_DWMCI IRQ_SPI(77)
82 82
83#define IRQ_MIPICSI0 IRQ_SPI(78) 83#define IRQ_MIPI_CSIS0 IRQ_SPI(78)
84 84#define IRQ_MIPI_CSIS1 IRQ_SPI(80)
85#define IRQ_MIPICSI1 IRQ_SPI(80)
86 85
87#define IRQ_ONENAND_AUDI IRQ_SPI(82) 86#define IRQ_ONENAND_AUDI IRQ_SPI(82)
88#define IRQ_ROTATOR IRQ_SPI(83) 87#define IRQ_ROTATOR IRQ_SPI(83)
diff --git a/arch/arm/mach-exynos4/include/mach/regs-pmu.h b/arch/arm/mach-exynos4/include/mach/regs-pmu.h
index fa49bbb8e7b0..cdf9b47c303c 100644
--- a/arch/arm/mach-exynos4/include/mach/regs-pmu.h
+++ b/arch/arm/mach-exynos4/include/mach/regs-pmu.h
@@ -29,6 +29,8 @@
29#define S5P_USE_STANDBY_WFE1 (1 << 25) 29#define S5P_USE_STANDBY_WFE1 (1 << 25)
30#define S5P_USE_MASK ((0x3 << 16) | (0x3 << 24)) 30#define S5P_USE_MASK ((0x3 << 16) | (0x3 << 24))
31 31
32#define S5P_SWRESET S5P_PMUREG(0x0400)
33
32#define S5P_WAKEUP_STAT S5P_PMUREG(0x0600) 34#define S5P_WAKEUP_STAT S5P_PMUREG(0x0600)
33#define S5P_EINT_WAKEUP_MASK S5P_PMUREG(0x0604) 35#define S5P_EINT_WAKEUP_MASK S5P_PMUREG(0x0604)
34#define S5P_WAKEUP_MASK S5P_PMUREG(0x0608) 36#define S5P_WAKEUP_MASK S5P_PMUREG(0x0608)
diff --git a/arch/arm/mach-exynos4/irq-eint.c b/arch/arm/mach-exynos4/irq-eint.c
index 9d87d2ac7f68..badb8c66fc9b 100644
--- a/arch/arm/mach-exynos4/irq-eint.c
+++ b/arch/arm/mach-exynos4/irq-eint.c
@@ -23,6 +23,8 @@
23 23
24#include <mach/regs-gpio.h> 24#include <mach/regs-gpio.h>
25 25
26#include <asm/mach/irq.h>
27
26static DEFINE_SPINLOCK(eint_lock); 28static DEFINE_SPINLOCK(eint_lock);
27 29
28static unsigned int eint0_15_data[16]; 30static unsigned int eint0_15_data[16];
@@ -184,8 +186,11 @@ static inline void exynos4_irq_demux_eint(unsigned int start)
184 186
185static void exynos4_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc) 187static void exynos4_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc)
186{ 188{
189 struct irq_chip *chip = irq_get_chip(irq);
190 chained_irq_enter(chip, desc);
187 exynos4_irq_demux_eint(IRQ_EINT(16)); 191 exynos4_irq_demux_eint(IRQ_EINT(16));
188 exynos4_irq_demux_eint(IRQ_EINT(24)); 192 exynos4_irq_demux_eint(IRQ_EINT(24));
193 chained_irq_exit(chip, desc);
189} 194}
190 195
191static void exynos4_irq_eint0_15(unsigned int irq, struct irq_desc *desc) 196static void exynos4_irq_eint0_15(unsigned int irq, struct irq_desc *desc)
@@ -193,6 +198,7 @@ static void exynos4_irq_eint0_15(unsigned int irq, struct irq_desc *desc)
193 u32 *irq_data = irq_get_handler_data(irq); 198 u32 *irq_data = irq_get_handler_data(irq);
194 struct irq_chip *chip = irq_get_chip(irq); 199 struct irq_chip *chip = irq_get_chip(irq);
195 200
201 chained_irq_enter(chip, desc);
196 chip->irq_mask(&desc->irq_data); 202 chip->irq_mask(&desc->irq_data);
197 203
198 if (chip->irq_ack) 204 if (chip->irq_ack)
@@ -201,6 +207,7 @@ static void exynos4_irq_eint0_15(unsigned int irq, struct irq_desc *desc)
201 generic_handle_irq(*irq_data); 207 generic_handle_irq(*irq_data);
202 208
203 chip->irq_unmask(&desc->irq_data); 209 chip->irq_unmask(&desc->irq_data);
210 chained_irq_exit(chip, desc);
204} 211}
205 212
206int __init exynos4_init_irq_eint(void) 213int __init exynos4_init_irq_eint(void)
diff --git a/arch/arm/mach-exynos4/mach-universal_c210.c b/arch/arm/mach-exynos4/mach-universal_c210.c
index 0e280d12301e..b3b5d8911004 100644
--- a/arch/arm/mach-exynos4/mach-universal_c210.c
+++ b/arch/arm/mach-exynos4/mach-universal_c210.c
@@ -79,7 +79,7 @@ static struct s3c2410_uartcfg universal_uartcfgs[] __initdata = {
79}; 79};
80 80
81static struct regulator_consumer_supply max8952_consumer = 81static struct regulator_consumer_supply max8952_consumer =
82 REGULATOR_SUPPLY("vddarm", NULL); 82 REGULATOR_SUPPLY("vdd_arm", NULL);
83 83
84static struct max8952_platform_data universal_max8952_pdata __initdata = { 84static struct max8952_platform_data universal_max8952_pdata __initdata = {
85 .gpio_vid0 = EXYNOS4_GPX0(3), 85 .gpio_vid0 = EXYNOS4_GPX0(3),
@@ -105,7 +105,7 @@ static struct max8952_platform_data universal_max8952_pdata __initdata = {
105}; 105};
106 106
107static struct regulator_consumer_supply lp3974_buck1_consumer = 107static struct regulator_consumer_supply lp3974_buck1_consumer =
108 REGULATOR_SUPPLY("vddint", NULL); 108 REGULATOR_SUPPLY("vdd_int", NULL);
109 109
110static struct regulator_consumer_supply lp3974_buck2_consumer = 110static struct regulator_consumer_supply lp3974_buck2_consumer =
111 REGULATOR_SUPPLY("vddg3d", NULL); 111 REGULATOR_SUPPLY("vddg3d", NULL);
diff --git a/arch/arm/mach-exynos4/mct.c b/arch/arm/mach-exynos4/mct.c
index 1ae059b7ad7b..ddd86864fb83 100644
--- a/arch/arm/mach-exynos4/mct.c
+++ b/arch/arm/mach-exynos4/mct.c
@@ -132,12 +132,18 @@ static cycle_t exynos4_frc_read(struct clocksource *cs)
132 return ((cycle_t)hi << 32) | lo; 132 return ((cycle_t)hi << 32) | lo;
133} 133}
134 134
135static void exynos4_frc_resume(struct clocksource *cs)
136{
137 exynos4_mct_frc_start(0, 0);
138}
139
135struct clocksource mct_frc = { 140struct clocksource mct_frc = {
136 .name = "mct-frc", 141 .name = "mct-frc",
137 .rating = 400, 142 .rating = 400,
138 .read = exynos4_frc_read, 143 .read = exynos4_frc_read,
139 .mask = CLOCKSOURCE_MASK(64), 144 .mask = CLOCKSOURCE_MASK(64),
140 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 145 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
146 .resume = exynos4_frc_resume,
141}; 147};
142 148
143static void __init exynos4_clocksource_init(void) 149static void __init exynos4_clocksource_init(void)
@@ -389,9 +395,11 @@ static void exynos4_mct_tick_init(struct clock_event_device *evt)
389} 395}
390 396
391/* Setup the local clock events for a CPU */ 397/* Setup the local clock events for a CPU */
392void __cpuinit local_timer_setup(struct clock_event_device *evt) 398int __cpuinit local_timer_setup(struct clock_event_device *evt)
393{ 399{
394 exynos4_mct_tick_init(evt); 400 exynos4_mct_tick_init(evt);
401
402 return 0;
395} 403}
396 404
397int local_timer_ack(void) 405int local_timer_ack(void)
diff --git a/arch/arm/mach-exynos4/platsmp.c b/arch/arm/mach-exynos4/platsmp.c
index 7c2282c6ba81..df6ef1b2f98b 100644
--- a/arch/arm/mach-exynos4/platsmp.c
+++ b/arch/arm/mach-exynos4/platsmp.c
@@ -106,6 +106,8 @@ void __cpuinit platform_secondary_init(unsigned int cpu)
106 */ 106 */
107 spin_lock(&boot_lock); 107 spin_lock(&boot_lock);
108 spin_unlock(&boot_lock); 108 spin_unlock(&boot_lock);
109
110 set_cpu_online(cpu, true);
109} 111}
110 112
111int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) 113int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
diff --git a/arch/arm/mach-exynos4/setup-keypad.c b/arch/arm/mach-exynos4/setup-keypad.c
index 1ee0ebff111f..7862bfb5933d 100644
--- a/arch/arm/mach-exynos4/setup-keypad.c
+++ b/arch/arm/mach-exynos4/setup-keypad.c
@@ -19,15 +19,16 @@ void samsung_keypad_cfg_gpio(unsigned int rows, unsigned int cols)
19 19
20 if (rows > 8) { 20 if (rows > 8) {
21 /* Set all the necessary GPX2 pins: KP_ROW[0~7] */ 21 /* Set all the necessary GPX2 pins: KP_ROW[0~7] */
22 s3c_gpio_cfgrange_nopull(EXYNOS4_GPX2(0), 8, S3C_GPIO_SFN(3)); 22 s3c_gpio_cfgall_range(EXYNOS4_GPX2(0), 8, S3C_GPIO_SFN(3),
23 S3C_GPIO_PULL_UP);
23 24
24 /* Set all the necessary GPX3 pins: KP_ROW[8~] */ 25 /* Set all the necessary GPX3 pins: KP_ROW[8~] */
25 s3c_gpio_cfgrange_nopull(EXYNOS4_GPX3(0), (rows - 8), 26 s3c_gpio_cfgall_range(EXYNOS4_GPX3(0), (rows - 8),
26 S3C_GPIO_SFN(3)); 27 S3C_GPIO_SFN(3), S3C_GPIO_PULL_UP);
27 } else { 28 } else {
28 /* Set all the necessary GPX2 pins: KP_ROW[x] */ 29 /* Set all the necessary GPX2 pins: KP_ROW[x] */
29 s3c_gpio_cfgrange_nopull(EXYNOS4_GPX2(0), rows, 30 s3c_gpio_cfgall_range(EXYNOS4_GPX2(0), rows, S3C_GPIO_SFN(3),
30 S3C_GPIO_SFN(3)); 31 S3C_GPIO_PULL_UP);
31 } 32 }
32 33
33 /* Set all the necessary GPX1 pins to special-function 3: KP_COL[x] */ 34 /* Set all the necessary GPX1 pins to special-function 3: KP_COL[x] */
diff --git a/arch/arm/mach-exynos4/setup-usb-phy.c b/arch/arm/mach-exynos4/setup-usb-phy.c
index 0883c1b824b9..39aca045f660 100644
--- a/arch/arm/mach-exynos4/setup-usb-phy.c
+++ b/arch/arm/mach-exynos4/setup-usb-phy.c
@@ -82,7 +82,7 @@ static int exynos4_usb_phy1_init(struct platform_device *pdev)
82 82
83 rstcon &= ~(HOST_LINK_PORT_SWRST_MASK | PHY1_SWRST_MASK); 83 rstcon &= ~(HOST_LINK_PORT_SWRST_MASK | PHY1_SWRST_MASK);
84 writel(rstcon, EXYNOS4_RSTCON); 84 writel(rstcon, EXYNOS4_RSTCON);
85 udelay(50); 85 udelay(80);
86 86
87 clk_disable(otg_clk); 87 clk_disable(otg_clk);
88 clk_put(otg_clk); 88 clk_put(otg_clk);
diff --git a/arch/arm/mach-footbridge/Kconfig b/arch/arm/mach-footbridge/Kconfig
index dc26fff22cf0..c8e7afcf14ec 100644
--- a/arch/arm/mach-footbridge/Kconfig
+++ b/arch/arm/mach-footbridge/Kconfig
@@ -62,6 +62,7 @@ config ARCH_EBSA285_HOST
62config ARCH_NETWINDER 62config ARCH_NETWINDER
63 bool "NetWinder" 63 bool "NetWinder"
64 select CLKSRC_I8253 64 select CLKSRC_I8253
65 select CLKEVT_I8253
65 select FOOTBRIDGE_HOST 66 select FOOTBRIDGE_HOST
66 select ISA 67 select ISA
67 select ISA_DMA 68 select ISA_DMA
diff --git a/arch/arm/mach-footbridge/dc21285.c b/arch/arm/mach-footbridge/dc21285.c
index 1331fff51ae2..18c32a5541d9 100644
--- a/arch/arm/mach-footbridge/dc21285.c
+++ b/arch/arm/mach-footbridge/dc21285.c
@@ -18,6 +18,7 @@
18#include <linux/irq.h> 18#include <linux/irq.h>
19#include <linux/io.h> 19#include <linux/io.h>
20#include <linux/spinlock.h> 20#include <linux/spinlock.h>
21#include <video/vga.h>
21 22
22#include <asm/irq.h> 23#include <asm/irq.h>
23#include <asm/system.h> 24#include <asm/system.h>
diff --git a/arch/arm/mach-imx/mach-cpuimx27.c b/arch/arm/mach-imx/mach-cpuimx27.c
index 87887ac5806b..f851fe903687 100644
--- a/arch/arm/mach-imx/mach-cpuimx27.c
+++ b/arch/arm/mach-imx/mach-cpuimx27.c
@@ -310,7 +310,7 @@ static struct sys_timer eukrea_cpuimx27_timer = {
310 .init = eukrea_cpuimx27_timer_init, 310 .init = eukrea_cpuimx27_timer_init,
311}; 311};
312 312
313MACHINE_START(CPUIMX27, "EUKREA CPUIMX27") 313MACHINE_START(EUKREA_CPUIMX27, "EUKREA CPUIMX27")
314 .boot_params = MX27_PHYS_OFFSET + 0x100, 314 .boot_params = MX27_PHYS_OFFSET + 0x100,
315 .map_io = mx27_map_io, 315 .map_io = mx27_map_io,
316 .init_early = imx27_init_early, 316 .init_early = imx27_init_early,
diff --git a/arch/arm/mach-imx/mach-cpuimx35.c b/arch/arm/mach-imx/mach-cpuimx35.c
index f39a478ba1a6..4bd083ba9af2 100644
--- a/arch/arm/mach-imx/mach-cpuimx35.c
+++ b/arch/arm/mach-imx/mach-cpuimx35.c
@@ -192,7 +192,7 @@ struct sys_timer eukrea_cpuimx35_timer = {
192 .init = eukrea_cpuimx35_timer_init, 192 .init = eukrea_cpuimx35_timer_init,
193}; 193};
194 194
195MACHINE_START(EUKREA_CPUIMX35, "Eukrea CPUIMX35") 195MACHINE_START(EUKREA_CPUIMX35SD, "Eukrea CPUIMX35")
196 /* Maintainer: Eukrea Electromatique */ 196 /* Maintainer: Eukrea Electromatique */
197 .boot_params = MX3x_PHYS_OFFSET + 0x100, 197 .boot_params = MX3x_PHYS_OFFSET + 0x100,
198 .map_io = mx35_map_io, 198 .map_io = mx35_map_io,
diff --git a/arch/arm/mach-imx/mach-eukrea_cpuimx25.c b/arch/arm/mach-imx/mach-eukrea_cpuimx25.c
index da36da52969d..2442d5da883d 100644
--- a/arch/arm/mach-imx/mach-eukrea_cpuimx25.c
+++ b/arch/arm/mach-imx/mach-eukrea_cpuimx25.c
@@ -161,7 +161,7 @@ static struct sys_timer eukrea_cpuimx25_timer = {
161 .init = eukrea_cpuimx25_timer_init, 161 .init = eukrea_cpuimx25_timer_init,
162}; 162};
163 163
164MACHINE_START(EUKREA_CPUIMX25, "Eukrea CPUIMX25") 164MACHINE_START(EUKREA_CPUIMX25SD, "Eukrea CPUIMX25")
165 /* Maintainer: Eukrea Electromatique */ 165 /* Maintainer: Eukrea Electromatique */
166 .boot_params = MX25_PHYS_OFFSET + 0x100, 166 .boot_params = MX25_PHYS_OFFSET + 0x100,
167 .map_io = mx25_map_io, 167 .map_io = mx25_map_io,
diff --git a/arch/arm/mach-integrator/integrator_ap.c b/arch/arm/mach-integrator/integrator_ap.c
index 2fbbdd5eac35..8cdc730dcb3a 100644
--- a/arch/arm/mach-integrator/integrator_ap.c
+++ b/arch/arm/mach-integrator/integrator_ap.c
@@ -32,6 +32,7 @@
32#include <linux/interrupt.h> 32#include <linux/interrupt.h>
33#include <linux/io.h> 33#include <linux/io.h>
34#include <linux/mtd/physmap.h> 34#include <linux/mtd/physmap.h>
35#include <video/vga.h>
35 36
36#include <mach/hardware.h> 37#include <mach/hardware.h>
37#include <mach/platform.h> 38#include <mach/platform.h>
@@ -154,6 +155,7 @@ static struct map_desc ap_io_desc[] __initdata = {
154static void __init ap_map_io(void) 155static void __init ap_map_io(void)
155{ 156{
156 iotable_init(ap_io_desc, ARRAY_SIZE(ap_io_desc)); 157 iotable_init(ap_io_desc, ARRAY_SIZE(ap_io_desc));
158 vga_base = PCI_MEMORY_VADDR;
157} 159}
158 160
159#define INTEGRATOR_SC_VALID_INT 0x003fffff 161#define INTEGRATOR_SC_VALID_INT 0x003fffff
@@ -337,15 +339,15 @@ static unsigned long timer_reload;
337static void integrator_clocksource_init(u32 khz) 339static void integrator_clocksource_init(u32 khz)
338{ 340{
339 void __iomem *base = (void __iomem *)TIMER2_VA_BASE; 341 void __iomem *base = (void __iomem *)TIMER2_VA_BASE;
340 u32 ctrl = TIMER_CTRL_ENABLE; 342 u32 ctrl = TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC;
341 343
342 if (khz >= 1500) { 344 if (khz >= 1500) {
343 khz /= 16; 345 khz /= 16;
344 ctrl = TIMER_CTRL_DIV16; 346 ctrl |= TIMER_CTRL_DIV16;
345 } 347 }
346 348
347 writel(ctrl, base + TIMER_CTRL);
348 writel(0xffff, base + TIMER_LOAD); 349 writel(0xffff, base + TIMER_LOAD);
350 writel(ctrl, base + TIMER_CTRL);
349 351
350 clocksource_mmio_init(base + TIMER_VALUE, "timer2", 352 clocksource_mmio_init(base + TIMER_VALUE, "timer2",
351 khz * 1000, 200, 16, clocksource_mmio_readl_down); 353 khz * 1000, 200, 16, clocksource_mmio_readl_down);
diff --git a/arch/arm/mach-integrator/pci_v3.c b/arch/arm/mach-integrator/pci_v3.c
index dd56bfb351e3..11b86e5b71c2 100644
--- a/arch/arm/mach-integrator/pci_v3.c
+++ b/arch/arm/mach-integrator/pci_v3.c
@@ -27,7 +27,6 @@
27#include <linux/spinlock.h> 27#include <linux/spinlock.h>
28#include <linux/init.h> 28#include <linux/init.h>
29#include <linux/io.h> 29#include <linux/io.h>
30#include <video/vga.h>
31 30
32#include <mach/hardware.h> 31#include <mach/hardware.h>
33#include <mach/platform.h> 32#include <mach/platform.h>
@@ -505,7 +504,6 @@ void __init pci_v3_preinit(void)
505 504
506 pcibios_min_io = 0x6000; 505 pcibios_min_io = 0x6000;
507 pcibios_min_mem = 0x00100000; 506 pcibios_min_mem = 0x00100000;
508 vga_base = PCI_MEMORY_VADDR;
509 507
510 /* 508 /*
511 * Hook in our fault handler for PCI errors 509 * Hook in our fault handler for PCI errors
diff --git a/arch/arm/mach-omap2/clock3xxx_data.c b/arch/arm/mach-omap2/clock3xxx_data.c
index ffd55b1c4396..b9b844683147 100644
--- a/arch/arm/mach-omap2/clock3xxx_data.c
+++ b/arch/arm/mach-omap2/clock3xxx_data.c
@@ -3078,6 +3078,7 @@ static struct clk gpt12_fck = {
3078 .name = "gpt12_fck", 3078 .name = "gpt12_fck",
3079 .ops = &clkops_null, 3079 .ops = &clkops_null,
3080 .parent = &secure_32k_fck, 3080 .parent = &secure_32k_fck,
3081 .clkdm_name = "wkup_clkdm",
3081 .recalc = &followparent_recalc, 3082 .recalc = &followparent_recalc,
3082}; 3083};
3083 3084
@@ -3085,6 +3086,7 @@ static struct clk wdt1_fck = {
3085 .name = "wdt1_fck", 3086 .name = "wdt1_fck",
3086 .ops = &clkops_null, 3087 .ops = &clkops_null,
3087 .parent = &secure_32k_fck, 3088 .parent = &secure_32k_fck,
3089 .clkdm_name = "wkup_clkdm",
3088 .recalc = &followparent_recalc, 3090 .recalc = &followparent_recalc,
3089}; 3091};
3090 3092
diff --git a/arch/arm/mach-omap2/clock44xx_data.c b/arch/arm/mach-omap2/clock44xx_data.c
index 2af0e3f00ce1..c0b6fbda3408 100644
--- a/arch/arm/mach-omap2/clock44xx_data.c
+++ b/arch/arm/mach-omap2/clock44xx_data.c
@@ -3376,10 +3376,18 @@ int __init omap4xxx_clk_init(void)
3376 } else if (cpu_is_omap446x()) { 3376 } else if (cpu_is_omap446x()) {
3377 cpu_mask = RATE_IN_4460; 3377 cpu_mask = RATE_IN_4460;
3378 cpu_clkflg = CK_446X; 3378 cpu_clkflg = CK_446X;
3379 } else {
3380 return 0;
3379 } 3381 }
3380 3382
3381 clk_init(&omap2_clk_functions); 3383 clk_init(&omap2_clk_functions);
3382 omap2_clk_disable_clkdm_control(); 3384
3385 /*
3386 * Must stay commented until all OMAP SoC drivers are
3387 * converted to runtime PM, or drivers may start crashing
3388 *
3389 * omap2_clk_disable_clkdm_control();
3390 */
3383 3391
3384 for (c = omap44xx_clks; c < omap44xx_clks + ARRAY_SIZE(omap44xx_clks); 3392 for (c = omap44xx_clks; c < omap44xx_clks + ARRAY_SIZE(omap44xx_clks);
3385 c++) 3393 c++)
diff --git a/arch/arm/mach-omap2/clockdomain.c b/arch/arm/mach-omap2/clockdomain.c
index ab7db083f97f..8f0890685d7b 100644
--- a/arch/arm/mach-omap2/clockdomain.c
+++ b/arch/arm/mach-omap2/clockdomain.c
@@ -747,6 +747,7 @@ int clkdm_wakeup(struct clockdomain *clkdm)
747 spin_lock_irqsave(&clkdm->lock, flags); 747 spin_lock_irqsave(&clkdm->lock, flags);
748 clkdm->_flags &= ~_CLKDM_FLAG_HWSUP_ENABLED; 748 clkdm->_flags &= ~_CLKDM_FLAG_HWSUP_ENABLED;
749 ret = arch_clkdm->clkdm_wakeup(clkdm); 749 ret = arch_clkdm->clkdm_wakeup(clkdm);
750 ret |= pwrdm_state_switch(clkdm->pwrdm.ptr);
750 spin_unlock_irqrestore(&clkdm->lock, flags); 751 spin_unlock_irqrestore(&clkdm->lock, flags);
751 return ret; 752 return ret;
752} 753}
@@ -818,6 +819,7 @@ void clkdm_deny_idle(struct clockdomain *clkdm)
818 spin_lock_irqsave(&clkdm->lock, flags); 819 spin_lock_irqsave(&clkdm->lock, flags);
819 clkdm->_flags &= ~_CLKDM_FLAG_HWSUP_ENABLED; 820 clkdm->_flags &= ~_CLKDM_FLAG_HWSUP_ENABLED;
820 arch_clkdm->clkdm_deny_idle(clkdm); 821 arch_clkdm->clkdm_deny_idle(clkdm);
822 pwrdm_state_switch(clkdm->pwrdm.ptr);
821 spin_unlock_irqrestore(&clkdm->lock, flags); 823 spin_unlock_irqrestore(&clkdm->lock, flags);
822} 824}
823 825
diff --git a/arch/arm/mach-omap2/omap_hwmod_2430_data.c b/arch/arm/mach-omap2/omap_hwmod_2430_data.c
index 16743c7d6e8e..408193d8e044 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2430_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2430_data.c
@@ -192,6 +192,7 @@ static struct omap_hwmod_addr_space omap2430_usbhsotg_addrs[] = {
192 .pa_end = OMAP243X_HS_BASE + SZ_4K - 1, 192 .pa_end = OMAP243X_HS_BASE + SZ_4K - 1,
193 .flags = ADDR_TYPE_RT 193 .flags = ADDR_TYPE_RT
194 }, 194 },
195 { }
195}; 196};
196 197
197/* l4_core ->usbhsotg interface */ 198/* l4_core ->usbhsotg interface */
diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c
index 3feb35911a32..472bf22d5e84 100644
--- a/arch/arm/mach-omap2/pm.c
+++ b/arch/arm/mach-omap2/pm.c
@@ -130,7 +130,6 @@ int omap_set_pwrdm_state(struct powerdomain *pwrdm, u32 state)
130 } else { 130 } else {
131 hwsup = clkdm_in_hwsup(pwrdm->pwrdm_clkdms[0]); 131 hwsup = clkdm_in_hwsup(pwrdm->pwrdm_clkdms[0]);
132 clkdm_wakeup(pwrdm->pwrdm_clkdms[0]); 132 clkdm_wakeup(pwrdm->pwrdm_clkdms[0]);
133 pwrdm_wait_transition(pwrdm);
134 sleep_switch = FORCEWAKEUP_SWITCH; 133 sleep_switch = FORCEWAKEUP_SWITCH;
135 } 134 }
136 } 135 }
@@ -156,7 +155,6 @@ int omap_set_pwrdm_state(struct powerdomain *pwrdm, u32 state)
156 return ret; 155 return ret;
157 } 156 }
158 157
159 pwrdm_wait_transition(pwrdm);
160 pwrdm_state_switch(pwrdm); 158 pwrdm_state_switch(pwrdm);
161err: 159err:
162 return ret; 160 return ret;
diff --git a/arch/arm/mach-omap2/powerdomain.c b/arch/arm/mach-omap2/powerdomain.c
index 9af08473bf10..ef71fdd40fc4 100644
--- a/arch/arm/mach-omap2/powerdomain.c
+++ b/arch/arm/mach-omap2/powerdomain.c
@@ -195,28 +195,35 @@ static int _pwrdm_post_transition_cb(struct powerdomain *pwrdm, void *unused)
195 195
196/** 196/**
197 * pwrdm_init - set up the powerdomain layer 197 * pwrdm_init - set up the powerdomain layer
198 * @pwrdm_list: array of struct powerdomain pointers to register 198 * @pwrdms: array of struct powerdomain pointers to register
199 * @custom_funcs: func pointers for arch specific implementations 199 * @custom_funcs: func pointers for arch specific implementations
200 * 200 *
201 * Loop through the array of powerdomains @pwrdm_list, registering all 201 * Loop through the array of powerdomains @pwrdms, registering all
202 * that are available on the current CPU. If pwrdm_list is supplied 202 * that are available on the current CPU. Also, program all
203 * and not null, all of the referenced powerdomains will be 203 * powerdomain target state as ON; this is to prevent domains from
204 * registered. No return value. XXX pwrdm_list is not really a 204 * hitting low power states (if bootloader has target states set to
205 * "list"; it is an array. Rename appropriately. 205 * something other than ON) and potentially even losing context while
206 * PM is not fully initialized. The PM late init code can then program
207 * the desired target state for all the power domains. No return
208 * value.
206 */ 209 */
207void pwrdm_init(struct powerdomain **pwrdm_list, struct pwrdm_ops *custom_funcs) 210void pwrdm_init(struct powerdomain **pwrdms, struct pwrdm_ops *custom_funcs)
208{ 211{
209 struct powerdomain **p = NULL; 212 struct powerdomain **p = NULL;
213 struct powerdomain *temp_p;
210 214
211 if (!custom_funcs) 215 if (!custom_funcs)
212 WARN(1, "powerdomain: No custom pwrdm functions registered\n"); 216 WARN(1, "powerdomain: No custom pwrdm functions registered\n");
213 else 217 else
214 arch_pwrdm = custom_funcs; 218 arch_pwrdm = custom_funcs;
215 219
216 if (pwrdm_list) { 220 if (pwrdms) {
217 for (p = pwrdm_list; *p; p++) 221 for (p = pwrdms; *p; p++)
218 _pwrdm_register(*p); 222 _pwrdm_register(*p);
219 } 223 }
224
225 list_for_each_entry(temp_p, &pwrdm_list, node)
226 pwrdm_set_next_pwrst(temp_p, PWRDM_POWER_ON);
220} 227}
221 228
222/** 229/**
diff --git a/arch/arm/mach-orion5x/dns323-setup.c b/arch/arm/mach-orion5x/dns323-setup.c
index a6eddae82a0b..c105556a0ee1 100644
--- a/arch/arm/mach-orion5x/dns323-setup.c
+++ b/arch/arm/mach-orion5x/dns323-setup.c
@@ -77,7 +77,7 @@ static int __init dns323_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
77 /* 77 /*
78 * Check for devices with hard-wired IRQs. 78 * Check for devices with hard-wired IRQs.
79 */ 79 */
80 irq = orion5x_pci_map_irq(const dev, slot, pin); 80 irq = orion5x_pci_map_irq(dev, slot, pin);
81 if (irq != -1) 81 if (irq != -1)
82 return irq; 82 return irq;
83 83
diff --git a/arch/arm/mach-orion5x/pci.c b/arch/arm/mach-orion5x/pci.c
index 28b8760ab9fa..bc4a920e26ee 100644
--- a/arch/arm/mach-orion5x/pci.c
+++ b/arch/arm/mach-orion5x/pci.c
@@ -14,6 +14,7 @@
14#include <linux/pci.h> 14#include <linux/pci.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/mbus.h> 16#include <linux/mbus.h>
17#include <video/vga.h>
17#include <asm/irq.h> 18#include <asm/irq.h>
18#include <asm/mach/pci.h> 19#include <asm/mach/pci.h>
19#include <plat/pcie.h> 20#include <plat/pcie.h>
diff --git a/arch/arm/mach-prima2/clock.c b/arch/arm/mach-prima2/clock.c
index f9a2aaf63f71..615a4e75ceab 100644
--- a/arch/arm/mach-prima2/clock.c
+++ b/arch/arm/mach-prima2/clock.c
@@ -481,6 +481,7 @@ static void __init sirfsoc_clk_init(void)
481 481
482static struct of_device_id clkc_ids[] = { 482static struct of_device_id clkc_ids[] = {
483 { .compatible = "sirf,prima2-clkc" }, 483 { .compatible = "sirf,prima2-clkc" },
484 {},
484}; 485};
485 486
486void __init sirfsoc_of_clk_init(void) 487void __init sirfsoc_of_clk_init(void)
diff --git a/arch/arm/mach-prima2/irq.c b/arch/arm/mach-prima2/irq.c
index c3404cbb6ff7..7af254d046ba 100644
--- a/arch/arm/mach-prima2/irq.c
+++ b/arch/arm/mach-prima2/irq.c
@@ -51,6 +51,7 @@ static __init void sirfsoc_irq_init(void)
51 51
52static struct of_device_id intc_ids[] = { 52static struct of_device_id intc_ids[] = {
53 { .compatible = "sirf,prima2-intc" }, 53 { .compatible = "sirf,prima2-intc" },
54 {},
54}; 55};
55 56
56void __init sirfsoc_of_irq_init(void) 57void __init sirfsoc_of_irq_init(void)
diff --git a/arch/arm/mach-prima2/rstc.c b/arch/arm/mach-prima2/rstc.c
index d074786e83d4..492cfa8d2610 100644
--- a/arch/arm/mach-prima2/rstc.c
+++ b/arch/arm/mach-prima2/rstc.c
@@ -19,6 +19,7 @@ static DEFINE_MUTEX(rstc_lock);
19 19
20static struct of_device_id rstc_ids[] = { 20static struct of_device_id rstc_ids[] = {
21 { .compatible = "sirf,prima2-rstc" }, 21 { .compatible = "sirf,prima2-rstc" },
22 {},
22}; 23};
23 24
24static int __init sirfsoc_of_rstc_init(void) 25static int __init sirfsoc_of_rstc_init(void)
diff --git a/arch/arm/mach-prima2/timer.c b/arch/arm/mach-prima2/timer.c
index 44027f34a88a..ed7ec48d11da 100644
--- a/arch/arm/mach-prima2/timer.c
+++ b/arch/arm/mach-prima2/timer.c
@@ -190,6 +190,7 @@ static void __init sirfsoc_timer_init(void)
190 190
191static struct of_device_id timer_ids[] = { 191static struct of_device_id timer_ids[] = {
192 { .compatible = "sirf,prima2-tick" }, 192 { .compatible = "sirf,prima2-tick" },
193 {},
193}; 194};
194 195
195static void __init sirfsoc_of_timer_map(void) 196static void __init sirfsoc_of_timer_map(void)
diff --git a/arch/arm/mach-realview/include/mach/system.h b/arch/arm/mach-realview/include/mach/system.h
index a30f2e3ec178..6657ff231161 100644
--- a/arch/arm/mach-realview/include/mach/system.h
+++ b/arch/arm/mach-realview/include/mach/system.h
@@ -44,6 +44,7 @@ static inline void arch_reset(char mode, const char *cmd)
44 */ 44 */
45 if (realview_reset) 45 if (realview_reset)
46 realview_reset(mode); 46 realview_reset(mode);
47 dsb();
47} 48}
48 49
49#endif 50#endif
diff --git a/arch/arm/mach-s3c64xx/mach-smdk6410.c b/arch/arm/mach-s3c64xx/mach-smdk6410.c
index ecbea92bf83b..a9f3183e0290 100644
--- a/arch/arm/mach-s3c64xx/mach-smdk6410.c
+++ b/arch/arm/mach-s3c64xx/mach-smdk6410.c
@@ -262,45 +262,6 @@ static struct samsung_keypad_platdata smdk6410_keypad_data __initdata = {
262 .cols = 8, 262 .cols = 8,
263}; 263};
264 264
265static int smdk6410_backlight_init(struct device *dev)
266{
267 int ret;
268
269 ret = gpio_request(S3C64XX_GPF(15), "Backlight");
270 if (ret) {
271 printk(KERN_ERR "failed to request GPF for PWM-OUT1\n");
272 return ret;
273 }
274
275 /* Configure GPIO pin with S3C64XX_GPF15_PWM_TOUT1 */
276 s3c_gpio_cfgpin(S3C64XX_GPF(15), S3C_GPIO_SFN(2));
277
278 return 0;
279}
280
281static void smdk6410_backlight_exit(struct device *dev)
282{
283 s3c_gpio_cfgpin(S3C64XX_GPF(15), S3C_GPIO_OUTPUT);
284 gpio_free(S3C64XX_GPF(15));
285}
286
287static struct platform_pwm_backlight_data smdk6410_backlight_data = {
288 .pwm_id = 1,
289 .max_brightness = 255,
290 .dft_brightness = 255,
291 .pwm_period_ns = 78770,
292 .init = smdk6410_backlight_init,
293 .exit = smdk6410_backlight_exit,
294};
295
296static struct platform_device smdk6410_backlight_device = {
297 .name = "pwm-backlight",
298 .dev = {
299 .parent = &s3c_device_timer[1].dev,
300 .platform_data = &smdk6410_backlight_data,
301 },
302};
303
304static struct map_desc smdk6410_iodesc[] = {}; 265static struct map_desc smdk6410_iodesc[] = {};
305 266
306static struct platform_device *smdk6410_devices[] __initdata = { 267static struct platform_device *smdk6410_devices[] __initdata = {
diff --git a/arch/arm/mach-s3c64xx/pm.c b/arch/arm/mach-s3c64xx/pm.c
index 8bad64370689..055e2858b0dd 100644
--- a/arch/arm/mach-s3c64xx/pm.c
+++ b/arch/arm/mach-s3c64xx/pm.c
@@ -16,6 +16,7 @@
16#include <linux/suspend.h> 16#include <linux/suspend.h>
17#include <linux/serial_core.h> 17#include <linux/serial_core.h>
18#include <linux/io.h> 18#include <linux/io.h>
19#include <linux/gpio.h>
19 20
20#include <mach/map.h> 21#include <mach/map.h>
21#include <mach/irqs.h> 22#include <mach/irqs.h>
diff --git a/arch/arm/mach-s5p64x0/irq-eint.c b/arch/arm/mach-s5p64x0/irq-eint.c
index 69ed4545112b..fe7380f5c3cd 100644
--- a/arch/arm/mach-s5p64x0/irq-eint.c
+++ b/arch/arm/mach-s5p64x0/irq-eint.c
@@ -129,7 +129,7 @@ static int s5p64x0_alloc_gc(void)
129 } 129 }
130 130
131 ct = gc->chip_types; 131 ct = gc->chip_types;
132 ct->chip.irq_ack = irq_gc_ack; 132 ct->chip.irq_ack = irq_gc_ack_set_bit;
133 ct->chip.irq_mask = irq_gc_mask_set_bit; 133 ct->chip.irq_mask = irq_gc_mask_set_bit;
134 ct->chip.irq_unmask = irq_gc_mask_clr_bit; 134 ct->chip.irq_unmask = irq_gc_mask_clr_bit;
135 ct->chip.irq_set_type = s5p64x0_irq_eint_set_type; 135 ct->chip.irq_set_type = s5p64x0_irq_eint_set_type;
diff --git a/arch/arm/mach-s5pv210/pm.c b/arch/arm/mach-s5pv210/pm.c
index 309e388a8a83..f149d278377b 100644
--- a/arch/arm/mach-s5pv210/pm.c
+++ b/arch/arm/mach-s5pv210/pm.c
@@ -88,7 +88,7 @@ static struct sleep_save s5pv210_core_save[] = {
88 SAVE_ITEM(S3C2410_TCNTO(0)), 88 SAVE_ITEM(S3C2410_TCNTO(0)),
89}; 89};
90 90
91void s5pv210_cpu_suspend(unsigned long arg) 91static int s5pv210_cpu_suspend(unsigned long arg)
92{ 92{
93 unsigned long tmp; 93 unsigned long tmp;
94 94
diff --git a/arch/arm/mach-shmobile/board-ag5evm.c b/arch/arm/mach-shmobile/board-ag5evm.c
index ce5c2513c6ce..cdfdd624d21d 100644
--- a/arch/arm/mach-shmobile/board-ag5evm.c
+++ b/arch/arm/mach-shmobile/board-ag5evm.c
@@ -341,6 +341,7 @@ static struct platform_device mipidsi0_device = {
341static struct sh_mobile_sdhi_info sdhi0_info = { 341static struct sh_mobile_sdhi_info sdhi0_info = {
342 .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX, 342 .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX,
343 .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX, 343 .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX,
344 .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT,
344 .tmio_caps = MMC_CAP_SD_HIGHSPEED, 345 .tmio_caps = MMC_CAP_SD_HIGHSPEED,
345 .tmio_ocr_mask = MMC_VDD_27_28 | MMC_VDD_28_29, 346 .tmio_ocr_mask = MMC_VDD_27_28 | MMC_VDD_28_29,
346}; 347};
@@ -382,7 +383,7 @@ void ag5evm_sdhi1_set_pwr(struct platform_device *pdev, int state)
382} 383}
383 384
384static struct sh_mobile_sdhi_info sh_sdhi1_info = { 385static struct sh_mobile_sdhi_info sh_sdhi1_info = {
385 .tmio_flags = TMIO_MMC_WRPROTECT_DISABLE, 386 .tmio_flags = TMIO_MMC_WRPROTECT_DISABLE | TMIO_MMC_HAS_IDLE_WAIT,
386 .tmio_caps = MMC_CAP_NONREMOVABLE | MMC_CAP_SDIO_IRQ, 387 .tmio_caps = MMC_CAP_NONREMOVABLE | MMC_CAP_SDIO_IRQ,
387 .tmio_ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, 388 .tmio_ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
388 .set_pwr = ag5evm_sdhi1_set_pwr, 389 .set_pwr = ag5evm_sdhi1_set_pwr,
diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c
index 9e0856b2f9e9..523f608eb8cf 100644
--- a/arch/arm/mach-shmobile/board-ap4evb.c
+++ b/arch/arm/mach-shmobile/board-ap4evb.c
@@ -1412,6 +1412,7 @@ static void __init ap4evb_init(void)
1412 fsi_init_pm_clock(); 1412 fsi_init_pm_clock();
1413 sh7372_pm_init(); 1413 sh7372_pm_init();
1414 pm_clk_add(&fsi_device.dev, "spu2"); 1414 pm_clk_add(&fsi_device.dev, "spu2");
1415 pm_clk_add(&lcdc1_device.dev, "hdmi");
1415} 1416}
1416 1417
1417static void __init ap4evb_timer_init(void) 1418static void __init ap4evb_timer_init(void)
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c
index d41c01f83f15..17c19dc25604 100644
--- a/arch/arm/mach-shmobile/board-mackerel.c
+++ b/arch/arm/mach-shmobile/board-mackerel.c
@@ -641,6 +641,8 @@ static struct usbhs_private usbhs0_private = {
641 }, 641 },
642 .driver_param = { 642 .driver_param = {
643 .buswait_bwait = 4, 643 .buswait_bwait = 4,
644 .d0_tx_id = SHDMA_SLAVE_USB0_TX,
645 .d1_rx_id = SHDMA_SLAVE_USB0_RX,
644 }, 646 },
645 }, 647 },
646}; 648};
@@ -810,6 +812,8 @@ static struct usbhs_private usbhs1_private = {
810 .buswait_bwait = 4, 812 .buswait_bwait = 4,
811 .pipe_type = usbhs1_pipe_cfg, 813 .pipe_type = usbhs1_pipe_cfg,
812 .pipe_size = ARRAY_SIZE(usbhs1_pipe_cfg), 814 .pipe_size = ARRAY_SIZE(usbhs1_pipe_cfg),
815 .d0_tx_id = SHDMA_SLAVE_USB1_TX,
816 .d1_rx_id = SHDMA_SLAVE_USB1_RX,
813 }, 817 },
814 }, 818 },
815}; 819};
@@ -1588,6 +1592,7 @@ static void __init mackerel_init(void)
1588 hdmi_init_pm_clock(); 1592 hdmi_init_pm_clock();
1589 sh7372_pm_init(); 1593 sh7372_pm_init();
1590 pm_clk_add(&fsi_device.dev, "spu2"); 1594 pm_clk_add(&fsi_device.dev, "spu2");
1595 pm_clk_add(&hdmi_lcdc_device.dev, "hdmi");
1591} 1596}
1592 1597
1593static void __init mackerel_timer_init(void) 1598static void __init mackerel_timer_init(void)
diff --git a/arch/arm/mach-shmobile/clock-sh7372.c b/arch/arm/mach-shmobile/clock-sh7372.c
index 6b1619a65dba..66975921e646 100644
--- a/arch/arm/mach-shmobile/clock-sh7372.c
+++ b/arch/arm/mach-shmobile/clock-sh7372.c
@@ -503,16 +503,17 @@ static struct clk *late_main_clks[] = {
503 &sh7372_fsidivb_clk, 503 &sh7372_fsidivb_clk,
504}; 504};
505 505
506enum { MSTP001, 506enum { MSTP001, MSTP000,
507 MSTP131, MSTP130, 507 MSTP131, MSTP130,
508 MSTP129, MSTP128, MSTP127, MSTP126, MSTP125, 508 MSTP129, MSTP128, MSTP127, MSTP126, MSTP125,
509 MSTP118, MSTP117, MSTP116, MSTP113, 509 MSTP118, MSTP117, MSTP116, MSTP113,
510 MSTP106, MSTP101, MSTP100, 510 MSTP106, MSTP101, MSTP100,
511 MSTP223, 511 MSTP223,
512 MSTP218, MSTP217, MSTP216, 512 MSTP218, MSTP217, MSTP216, MSTP214, MSTP208, MSTP207,
513 MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200, 513 MSTP206, MSTP205, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
514 MSTP329, MSTP328, MSTP323, MSTP322, MSTP314, MSTP313, MSTP312, 514 MSTP328, MSTP323, MSTP322, MSTP314, MSTP313, MSTP312,
515 MSTP423, MSTP415, MSTP413, MSTP411, MSTP410, MSTP406, MSTP403, 515 MSTP423, MSTP415, MSTP413, MSTP411, MSTP410, MSTP407, MSTP406,
516 MSTP405, MSTP404, MSTP403, MSTP400,
516 MSTP_NR }; 517 MSTP_NR };
517 518
518#define MSTP(_parent, _reg, _bit, _flags) \ 519#define MSTP(_parent, _reg, _bit, _flags) \
@@ -520,6 +521,7 @@ enum { MSTP001,
520 521
521static struct clk mstp_clks[MSTP_NR] = { 522static struct clk mstp_clks[MSTP_NR] = {
522 [MSTP001] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR0, 1, 0), /* IIC2 */ 523 [MSTP001] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR0, 1, 0), /* IIC2 */
524 [MSTP000] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR0, 0, 0), /* MSIOF0 */
523 [MSTP131] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 31, 0), /* VEU3 */ 525 [MSTP131] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 31, 0), /* VEU3 */
524 [MSTP130] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 30, 0), /* VEU2 */ 526 [MSTP130] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 30, 0), /* VEU2 */
525 [MSTP129] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 29, 0), /* VEU1 */ 527 [MSTP129] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 29, 0), /* VEU1 */
@@ -538,14 +540,16 @@ static struct clk mstp_clks[MSTP_NR] = {
538 [MSTP218] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 18, 0), /* DMAC1 */ 540 [MSTP218] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 18, 0), /* DMAC1 */
539 [MSTP217] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 17, 0), /* DMAC2 */ 541 [MSTP217] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 17, 0), /* DMAC2 */
540 [MSTP216] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 16, 0), /* DMAC3 */ 542 [MSTP216] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 16, 0), /* DMAC3 */
543 [MSTP214] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 14, 0), /* USBDMAC */
544 [MSTP208] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 8, 0), /* MSIOF1 */
541 [MSTP207] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 7, 0), /* SCIFA5 */ 545 [MSTP207] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 7, 0), /* SCIFA5 */
542 [MSTP206] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 6, 0), /* SCIFB */ 546 [MSTP206] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 6, 0), /* SCIFB */
547 [MSTP205] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 5, 0), /* MSIOF2 */
543 [MSTP204] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 4, 0), /* SCIFA0 */ 548 [MSTP204] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 4, 0), /* SCIFA0 */
544 [MSTP203] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 3, 0), /* SCIFA1 */ 549 [MSTP203] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 3, 0), /* SCIFA1 */
545 [MSTP202] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 2, 0), /* SCIFA2 */ 550 [MSTP202] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 2, 0), /* SCIFA2 */
546 [MSTP201] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 1, 0), /* SCIFA3 */ 551 [MSTP201] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 1, 0), /* SCIFA3 */
547 [MSTP200] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 0, 0), /* SCIFA4 */ 552 [MSTP200] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 0, 0), /* SCIFA4 */
548 [MSTP329] = MSTP(&r_clk, SMSTPCR3, 29, 0), /* CMT10 */
549 [MSTP328] = MSTP(&div6_clks[DIV6_SPU], SMSTPCR3, 28, 0), /* FSI2 */ 553 [MSTP328] = MSTP(&div6_clks[DIV6_SPU], SMSTPCR3, 28, 0), /* FSI2 */
550 [MSTP323] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 23, 0), /* IIC1 */ 554 [MSTP323] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 23, 0), /* IIC1 */
551 [MSTP322] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 22, 0), /* USB0 */ 555 [MSTP322] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 22, 0), /* USB0 */
@@ -557,8 +561,12 @@ static struct clk mstp_clks[MSTP_NR] = {
557 [MSTP413] = MSTP(&pllc1_div2_clk, SMSTPCR4, 13, 0), /* HDMI */ 561 [MSTP413] = MSTP(&pllc1_div2_clk, SMSTPCR4, 13, 0), /* HDMI */
558 [MSTP411] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR4, 11, 0), /* IIC3 */ 562 [MSTP411] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR4, 11, 0), /* IIC3 */
559 [MSTP410] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR4, 10, 0), /* IIC4 */ 563 [MSTP410] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR4, 10, 0), /* IIC4 */
564 [MSTP407] = MSTP(&div4_clks[DIV4_HP], SMSTPCR4, 7, 0), /* USB-DMAC1 */
560 [MSTP406] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR4, 6, 0), /* USB1 */ 565 [MSTP406] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR4, 6, 0), /* USB1 */
566 [MSTP405] = MSTP(&r_clk, SMSTPCR4, 5, 0), /* CMT4 */
567 [MSTP404] = MSTP(&r_clk, SMSTPCR4, 4, 0), /* CMT3 */
561 [MSTP403] = MSTP(&r_clk, SMSTPCR4, 3, 0), /* KEYSC */ 568 [MSTP403] = MSTP(&r_clk, SMSTPCR4, 3, 0), /* KEYSC */
569 [MSTP400] = MSTP(&r_clk, SMSTPCR4, 0, 0), /* CMT2 */
562}; 570};
563 571
564static struct clk_lookup lookups[] = { 572static struct clk_lookup lookups[] = {
@@ -609,6 +617,7 @@ static struct clk_lookup lookups[] = {
609 617
610 /* MSTP32 clocks */ 618 /* MSTP32 clocks */
611 CLKDEV_DEV_ID("i2c-sh_mobile.2", &mstp_clks[MSTP001]), /* IIC2 */ 619 CLKDEV_DEV_ID("i2c-sh_mobile.2", &mstp_clks[MSTP001]), /* IIC2 */
620 CLKDEV_DEV_ID("spi_sh_msiof.0", &mstp_clks[MSTP000]), /* MSIOF0 */
612 CLKDEV_DEV_ID("uio_pdrv_genirq.4", &mstp_clks[MSTP131]), /* VEU3 */ 621 CLKDEV_DEV_ID("uio_pdrv_genirq.4", &mstp_clks[MSTP131]), /* VEU3 */
613 CLKDEV_DEV_ID("uio_pdrv_genirq.3", &mstp_clks[MSTP130]), /* VEU2 */ 622 CLKDEV_DEV_ID("uio_pdrv_genirq.3", &mstp_clks[MSTP130]), /* VEU2 */
614 CLKDEV_DEV_ID("uio_pdrv_genirq.2", &mstp_clks[MSTP129]), /* VEU1 */ 623 CLKDEV_DEV_ID("uio_pdrv_genirq.2", &mstp_clks[MSTP129]), /* VEU1 */
@@ -629,14 +638,16 @@ static struct clk_lookup lookups[] = {
629 CLKDEV_DEV_ID("sh-dma-engine.0", &mstp_clks[MSTP218]), /* DMAC1 */ 638 CLKDEV_DEV_ID("sh-dma-engine.0", &mstp_clks[MSTP218]), /* DMAC1 */
630 CLKDEV_DEV_ID("sh-dma-engine.1", &mstp_clks[MSTP217]), /* DMAC2 */ 639 CLKDEV_DEV_ID("sh-dma-engine.1", &mstp_clks[MSTP217]), /* DMAC2 */
631 CLKDEV_DEV_ID("sh-dma-engine.2", &mstp_clks[MSTP216]), /* DMAC3 */ 640 CLKDEV_DEV_ID("sh-dma-engine.2", &mstp_clks[MSTP216]), /* DMAC3 */
641 CLKDEV_DEV_ID("sh-dma-engine.3", &mstp_clks[MSTP214]), /* USB-DMAC0 */
642 CLKDEV_DEV_ID("spi_sh_msiof.1", &mstp_clks[MSTP208]), /* MSIOF1 */
632 CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP207]), /* SCIFA5 */ 643 CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP207]), /* SCIFA5 */
633 CLKDEV_DEV_ID("sh-sci.6", &mstp_clks[MSTP206]), /* SCIFB */ 644 CLKDEV_DEV_ID("sh-sci.6", &mstp_clks[MSTP206]), /* SCIFB */
645 CLKDEV_DEV_ID("spi_sh_msiof.2", &mstp_clks[MSTP205]), /* MSIOF2 */
634 CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[MSTP204]), /* SCIFA0 */ 646 CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[MSTP204]), /* SCIFA0 */
635 CLKDEV_DEV_ID("sh-sci.1", &mstp_clks[MSTP203]), /* SCIFA1 */ 647 CLKDEV_DEV_ID("sh-sci.1", &mstp_clks[MSTP203]), /* SCIFA1 */
636 CLKDEV_DEV_ID("sh-sci.2", &mstp_clks[MSTP202]), /* SCIFA2 */ 648 CLKDEV_DEV_ID("sh-sci.2", &mstp_clks[MSTP202]), /* SCIFA2 */
637 CLKDEV_DEV_ID("sh-sci.3", &mstp_clks[MSTP201]), /* SCIFA3 */ 649 CLKDEV_DEV_ID("sh-sci.3", &mstp_clks[MSTP201]), /* SCIFA3 */
638 CLKDEV_DEV_ID("sh-sci.4", &mstp_clks[MSTP200]), /* SCIFA4 */ 650 CLKDEV_DEV_ID("sh-sci.4", &mstp_clks[MSTP200]), /* SCIFA4 */
639 CLKDEV_DEV_ID("sh_cmt.10", &mstp_clks[MSTP329]), /* CMT10 */
640 CLKDEV_DEV_ID("sh_fsi2", &mstp_clks[MSTP328]), /* FSI2 */ 651 CLKDEV_DEV_ID("sh_fsi2", &mstp_clks[MSTP328]), /* FSI2 */
641 CLKDEV_DEV_ID("i2c-sh_mobile.1", &mstp_clks[MSTP323]), /* IIC1 */ 652 CLKDEV_DEV_ID("i2c-sh_mobile.1", &mstp_clks[MSTP323]), /* IIC1 */
642 CLKDEV_DEV_ID("r8a66597_hcd.0", &mstp_clks[MSTP322]), /* USB0 */ 653 CLKDEV_DEV_ID("r8a66597_hcd.0", &mstp_clks[MSTP322]), /* USB0 */
@@ -650,11 +661,17 @@ static struct clk_lookup lookups[] = {
650 CLKDEV_DEV_ID("sh-mobile-hdmi", &mstp_clks[MSTP413]), /* HDMI */ 661 CLKDEV_DEV_ID("sh-mobile-hdmi", &mstp_clks[MSTP413]), /* HDMI */
651 CLKDEV_DEV_ID("i2c-sh_mobile.3", &mstp_clks[MSTP411]), /* IIC3 */ 662 CLKDEV_DEV_ID("i2c-sh_mobile.3", &mstp_clks[MSTP411]), /* IIC3 */
652 CLKDEV_DEV_ID("i2c-sh_mobile.4", &mstp_clks[MSTP410]), /* IIC4 */ 663 CLKDEV_DEV_ID("i2c-sh_mobile.4", &mstp_clks[MSTP410]), /* IIC4 */
664 CLKDEV_DEV_ID("sh-dma-engine.4", &mstp_clks[MSTP407]), /* USB-DMAC1 */
653 CLKDEV_DEV_ID("r8a66597_hcd.1", &mstp_clks[MSTP406]), /* USB1 */ 665 CLKDEV_DEV_ID("r8a66597_hcd.1", &mstp_clks[MSTP406]), /* USB1 */
654 CLKDEV_DEV_ID("r8a66597_udc.1", &mstp_clks[MSTP406]), /* USB1 */ 666 CLKDEV_DEV_ID("r8a66597_udc.1", &mstp_clks[MSTP406]), /* USB1 */
655 CLKDEV_DEV_ID("renesas_usbhs.1", &mstp_clks[MSTP406]), /* USB1 */ 667 CLKDEV_DEV_ID("renesas_usbhs.1", &mstp_clks[MSTP406]), /* USB1 */
668 CLKDEV_DEV_ID("sh_cmt.4", &mstp_clks[MSTP405]), /* CMT4 */
669 CLKDEV_DEV_ID("sh_cmt.3", &mstp_clks[MSTP404]), /* CMT3 */
656 CLKDEV_DEV_ID("sh_keysc.0", &mstp_clks[MSTP403]), /* KEYSC */ 670 CLKDEV_DEV_ID("sh_keysc.0", &mstp_clks[MSTP403]), /* KEYSC */
671 CLKDEV_DEV_ID("sh_cmt.2", &mstp_clks[MSTP400]), /* CMT2 */
657 672
673 CLKDEV_ICK_ID("hdmi", "sh_mobile_lcdc_fb.1",
674 &div6_reparent_clks[DIV6_HDMI]),
658 CLKDEV_ICK_ID("ick", "sh-mobile-hdmi", &div6_reparent_clks[DIV6_HDMI]), 675 CLKDEV_ICK_ID("ick", "sh-mobile-hdmi", &div6_reparent_clks[DIV6_HDMI]),
659 CLKDEV_ICK_ID("icka", "sh_fsi2", &div6_reparent_clks[DIV6_FSIA]), 676 CLKDEV_ICK_ID("icka", "sh_fsi2", &div6_reparent_clks[DIV6_FSIA]),
660 CLKDEV_ICK_ID("ickb", "sh_fsi2", &div6_reparent_clks[DIV6_FSIB]), 677 CLKDEV_ICK_ID("ickb", "sh_fsi2", &div6_reparent_clks[DIV6_FSIB]),
diff --git a/arch/arm/mach-shmobile/clock-sh73a0.c b/arch/arm/mach-shmobile/clock-sh73a0.c
index 6db2ccabc2bf..61a846bb30f2 100644
--- a/arch/arm/mach-shmobile/clock-sh73a0.c
+++ b/arch/arm/mach-shmobile/clock-sh73a0.c
@@ -365,7 +365,7 @@ void __init sh73a0_clock_init(void)
365 __raw_writel(0x108, SD2CKCR); 365 __raw_writel(0x108, SD2CKCR);
366 366
367 /* detect main clock parent */ 367 /* detect main clock parent */
368 switch ((__raw_readl(CKSCR) >> 24) & 0x03) { 368 switch ((__raw_readl(CKSCR) >> 28) & 0x03) {
369 case 0: 369 case 0:
370 main_clk.parent = &sh73a0_extal1_clk; 370 main_clk.parent = &sh73a0_extal1_clk;
371 break; 371 break;
diff --git a/arch/arm/mach-shmobile/include/mach/sh7372.h b/arch/arm/mach-shmobile/include/mach/sh7372.h
index ce595cee86cd..24e63a85e669 100644
--- a/arch/arm/mach-shmobile/include/mach/sh7372.h
+++ b/arch/arm/mach-shmobile/include/mach/sh7372.h
@@ -459,6 +459,10 @@ enum {
459 SHDMA_SLAVE_SDHI2_TX, 459 SHDMA_SLAVE_SDHI2_TX,
460 SHDMA_SLAVE_MMCIF_RX, 460 SHDMA_SLAVE_MMCIF_RX,
461 SHDMA_SLAVE_MMCIF_TX, 461 SHDMA_SLAVE_MMCIF_TX,
462 SHDMA_SLAVE_USB0_TX,
463 SHDMA_SLAVE_USB0_RX,
464 SHDMA_SLAVE_USB1_TX,
465 SHDMA_SLAVE_USB1_RX,
462}; 466};
463 467
464extern struct clk sh7372_extal1_clk; 468extern struct clk sh7372_extal1_clk;
diff --git a/arch/arm/mach-shmobile/intc-sh7372.c b/arch/arm/mach-shmobile/intc-sh7372.c
index 3b28743c77eb..739315e30eb9 100644
--- a/arch/arm/mach-shmobile/intc-sh7372.c
+++ b/arch/arm/mach-shmobile/intc-sh7372.c
@@ -379,7 +379,7 @@ enum {
379 /* BBIF2 */ 379 /* BBIF2 */
380 VPU, 380 VPU,
381 TSIF1, 381 TSIF1,
382 _3DG_SGX530, 382 /* 3DG */
383 _2DDMAC, 383 _2DDMAC,
384 IIC2_ALI2, IIC2_TACKI2, IIC2_WAITI2, IIC2_DTEI2, 384 IIC2_ALI2, IIC2_TACKI2, IIC2_WAITI2, IIC2_DTEI2,
385 IPMMU_IPMMUR, IPMMU_IPMMUR2, 385 IPMMU_IPMMUR, IPMMU_IPMMUR2,
@@ -436,7 +436,7 @@ static struct intc_vect intcs_vectors[] = {
436 /* BBIF2 */ 436 /* BBIF2 */
437 INTCS_VECT(VPU, 0x980), 437 INTCS_VECT(VPU, 0x980),
438 INTCS_VECT(TSIF1, 0x9a0), 438 INTCS_VECT(TSIF1, 0x9a0),
439 INTCS_VECT(_3DG_SGX530, 0x9e0), 439 /* 3DG */
440 INTCS_VECT(_2DDMAC, 0xa00), 440 INTCS_VECT(_2DDMAC, 0xa00),
441 INTCS_VECT(IIC2_ALI2, 0xa80), INTCS_VECT(IIC2_TACKI2, 0xaa0), 441 INTCS_VECT(IIC2_ALI2, 0xa80), INTCS_VECT(IIC2_TACKI2, 0xaa0),
442 INTCS_VECT(IIC2_WAITI2, 0xac0), INTCS_VECT(IIC2_DTEI2, 0xae0), 442 INTCS_VECT(IIC2_WAITI2, 0xac0), INTCS_VECT(IIC2_DTEI2, 0xae0),
@@ -521,7 +521,7 @@ static struct intc_mask_reg intcs_mask_registers[] = {
521 RTDMAC_1_DEI3, RTDMAC_1_DEI2, RTDMAC_1_DEI1, RTDMAC_1_DEI0 } }, 521 RTDMAC_1_DEI3, RTDMAC_1_DEI2, RTDMAC_1_DEI1, RTDMAC_1_DEI0 } },
522 { 0xffd20198, 0xffd201d8, 8, /* IMR6SA / IMCR6SA */ 522 { 0xffd20198, 0xffd201d8, 8, /* IMR6SA / IMCR6SA */
523 { 0, 0, MSIOF, 0, 523 { 0, 0, MSIOF, 0,
524 _3DG_SGX530, 0, 0, 0 } }, 524 0, 0, 0, 0 } },
525 { 0xffd2019c, 0xffd201dc, 8, /* IMR7SA / IMCR7SA */ 525 { 0xffd2019c, 0xffd201dc, 8, /* IMR7SA / IMCR7SA */
526 { 0, TMU_TUNI2, TMU_TUNI1, TMU_TUNI0, 526 { 0, TMU_TUNI2, TMU_TUNI1, TMU_TUNI0,
527 0, 0, 0, 0 } }, 527 0, 0, 0, 0 } },
@@ -561,7 +561,6 @@ static struct intc_prio_reg intcs_prio_registers[] = {
561 TMU_TUNI2, TSIF1 } }, 561 TMU_TUNI2, TSIF1 } },
562 { 0xffd2001c, 0, 16, 4, /* IPRHS */ { 0, 0, VEU, BEU } }, 562 { 0xffd2001c, 0, 16, 4, /* IPRHS */ { 0, 0, VEU, BEU } },
563 { 0xffd20020, 0, 16, 4, /* IPRIS */ { 0, MSIOF, TSIF0, IIC0 } }, 563 { 0xffd20020, 0, 16, 4, /* IPRIS */ { 0, MSIOF, TSIF0, IIC0 } },
564 { 0xffd20024, 0, 16, 4, /* IPRJS */ { 0, _3DG_SGX530, 0, 0 } },
565 { 0xffd20028, 0, 16, 4, /* IPRKS */ { 0, 0, LMB, 0 } }, 564 { 0xffd20028, 0, 16, 4, /* IPRKS */ { 0, 0, LMB, 0 } },
566 { 0xffd2002c, 0, 16, 4, /* IPRLS */ { IPMMU, 0, 0, 0 } }, 565 { 0xffd2002c, 0, 16, 4, /* IPRLS */ { IPMMU, 0, 0, 0 } },
567 { 0xffd20030, 0, 16, 4, /* IPRMS */ { IIC2, 0, 0, 0 } }, 566 { 0xffd20030, 0, 16, 4, /* IPRMS */ { IIC2, 0, 0, 0 } },
diff --git a/arch/arm/mach-shmobile/setup-sh7372.c b/arch/arm/mach-shmobile/setup-sh7372.c
index 79f0413d8725..2d9b1b1a2538 100644
--- a/arch/arm/mach-shmobile/setup-sh7372.c
+++ b/arch/arm/mach-shmobile/setup-sh7372.c
@@ -169,35 +169,35 @@ static struct platform_device scif6_device = {
169}; 169};
170 170
171/* CMT */ 171/* CMT */
172static struct sh_timer_config cmt10_platform_data = { 172static struct sh_timer_config cmt2_platform_data = {
173 .name = "CMT10", 173 .name = "CMT2",
174 .channel_offset = 0x10, 174 .channel_offset = 0x40,
175 .timer_bit = 0, 175 .timer_bit = 5,
176 .clockevent_rating = 125, 176 .clockevent_rating = 125,
177 .clocksource_rating = 125, 177 .clocksource_rating = 125,
178}; 178};
179 179
180static struct resource cmt10_resources[] = { 180static struct resource cmt2_resources[] = {
181 [0] = { 181 [0] = {
182 .name = "CMT10", 182 .name = "CMT2",
183 .start = 0xe6138010, 183 .start = 0xe6130040,
184 .end = 0xe613801b, 184 .end = 0xe613004b,
185 .flags = IORESOURCE_MEM, 185 .flags = IORESOURCE_MEM,
186 }, 186 },
187 [1] = { 187 [1] = {
188 .start = evt2irq(0x0b00), /* CMT1_CMT10 */ 188 .start = evt2irq(0x0b80), /* CMT2 */
189 .flags = IORESOURCE_IRQ, 189 .flags = IORESOURCE_IRQ,
190 }, 190 },
191}; 191};
192 192
193static struct platform_device cmt10_device = { 193static struct platform_device cmt2_device = {
194 .name = "sh_cmt", 194 .name = "sh_cmt",
195 .id = 10, 195 .id = 2,
196 .dev = { 196 .dev = {
197 .platform_data = &cmt10_platform_data, 197 .platform_data = &cmt2_platform_data,
198 }, 198 },
199 .resource = cmt10_resources, 199 .resource = cmt2_resources,
200 .num_resources = ARRAY_SIZE(cmt10_resources), 200 .num_resources = ARRAY_SIZE(cmt2_resources),
201}; 201};
202 202
203/* TMU */ 203/* TMU */
@@ -602,6 +602,150 @@ static struct platform_device dma2_device = {
602 }, 602 },
603}; 603};
604 604
605/*
606 * USB-DMAC
607 */
608
609unsigned int usbts_shift[] = {3, 4, 5};
610
611enum {
612 XMIT_SZ_8BYTE = 0,
613 XMIT_SZ_16BYTE = 1,
614 XMIT_SZ_32BYTE = 2,
615};
616
617#define USBTS_INDEX2VAL(i) (((i) & 3) << 6)
618
619static const struct sh_dmae_channel sh7372_usb_dmae_channels[] = {
620 {
621 .offset = 0,
622 }, {
623 .offset = 0x20,
624 },
625};
626
627/* USB DMAC0 */
628static const struct sh_dmae_slave_config sh7372_usb_dmae0_slaves[] = {
629 {
630 .slave_id = SHDMA_SLAVE_USB0_TX,
631 .chcr = USBTS_INDEX2VAL(XMIT_SZ_8BYTE),
632 }, {
633 .slave_id = SHDMA_SLAVE_USB0_RX,
634 .chcr = USBTS_INDEX2VAL(XMIT_SZ_8BYTE),
635 },
636};
637
638static struct sh_dmae_pdata usb_dma0_platform_data = {
639 .slave = sh7372_usb_dmae0_slaves,
640 .slave_num = ARRAY_SIZE(sh7372_usb_dmae0_slaves),
641 .channel = sh7372_usb_dmae_channels,
642 .channel_num = ARRAY_SIZE(sh7372_usb_dmae_channels),
643 .ts_low_shift = 6,
644 .ts_low_mask = 0xc0,
645 .ts_high_shift = 0,
646 .ts_high_mask = 0,
647 .ts_shift = usbts_shift,
648 .ts_shift_num = ARRAY_SIZE(usbts_shift),
649 .dmaor_init = DMAOR_DME,
650 .chcr_offset = 0x14,
651 .chcr_ie_bit = 1 << 5,
652 .dmaor_is_32bit = 1,
653 .needs_tend_set = 1,
654 .no_dmars = 1,
655};
656
657static struct resource sh7372_usb_dmae0_resources[] = {
658 {
659 /* Channel registers and DMAOR */
660 .start = 0xe68a0020,
661 .end = 0xe68a0064 - 1,
662 .flags = IORESOURCE_MEM,
663 },
664 {
665 /* VCR/SWR/DMICR */
666 .start = 0xe68a0000,
667 .end = 0xe68a0014 - 1,
668 .flags = IORESOURCE_MEM,
669 },
670 {
671 /* IRQ for channels */
672 .start = evt2irq(0x0a00),
673 .end = evt2irq(0x0a00),
674 .flags = IORESOURCE_IRQ,
675 },
676};
677
678static struct platform_device usb_dma0_device = {
679 .name = "sh-dma-engine",
680 .id = 3,
681 .resource = sh7372_usb_dmae0_resources,
682 .num_resources = ARRAY_SIZE(sh7372_usb_dmae0_resources),
683 .dev = {
684 .platform_data = &usb_dma0_platform_data,
685 },
686};
687
688/* USB DMAC1 */
689static const struct sh_dmae_slave_config sh7372_usb_dmae1_slaves[] = {
690 {
691 .slave_id = SHDMA_SLAVE_USB1_TX,
692 .chcr = USBTS_INDEX2VAL(XMIT_SZ_8BYTE),
693 }, {
694 .slave_id = SHDMA_SLAVE_USB1_RX,
695 .chcr = USBTS_INDEX2VAL(XMIT_SZ_8BYTE),
696 },
697};
698
699static struct sh_dmae_pdata usb_dma1_platform_data = {
700 .slave = sh7372_usb_dmae1_slaves,
701 .slave_num = ARRAY_SIZE(sh7372_usb_dmae1_slaves),
702 .channel = sh7372_usb_dmae_channels,
703 .channel_num = ARRAY_SIZE(sh7372_usb_dmae_channels),
704 .ts_low_shift = 6,
705 .ts_low_mask = 0xc0,
706 .ts_high_shift = 0,
707 .ts_high_mask = 0,
708 .ts_shift = usbts_shift,
709 .ts_shift_num = ARRAY_SIZE(usbts_shift),
710 .dmaor_init = DMAOR_DME,
711 .chcr_offset = 0x14,
712 .chcr_ie_bit = 1 << 5,
713 .dmaor_is_32bit = 1,
714 .needs_tend_set = 1,
715 .no_dmars = 1,
716};
717
718static struct resource sh7372_usb_dmae1_resources[] = {
719 {
720 /* Channel registers and DMAOR */
721 .start = 0xe68c0020,
722 .end = 0xe68c0064 - 1,
723 .flags = IORESOURCE_MEM,
724 },
725 {
726 /* VCR/SWR/DMICR */
727 .start = 0xe68c0000,
728 .end = 0xe68c0014 - 1,
729 .flags = IORESOURCE_MEM,
730 },
731 {
732 /* IRQ for channels */
733 .start = evt2irq(0x1d00),
734 .end = evt2irq(0x1d00),
735 .flags = IORESOURCE_IRQ,
736 },
737};
738
739static struct platform_device usb_dma1_device = {
740 .name = "sh-dma-engine",
741 .id = 4,
742 .resource = sh7372_usb_dmae1_resources,
743 .num_resources = ARRAY_SIZE(sh7372_usb_dmae1_resources),
744 .dev = {
745 .platform_data = &usb_dma1_platform_data,
746 },
747};
748
605/* VPU */ 749/* VPU */
606static struct uio_info vpu_platform_data = { 750static struct uio_info vpu_platform_data = {
607 .name = "VPU5HG", 751 .name = "VPU5HG",
@@ -818,7 +962,7 @@ static struct platform_device *sh7372_early_devices[] __initdata = {
818 &scif4_device, 962 &scif4_device,
819 &scif5_device, 963 &scif5_device,
820 &scif6_device, 964 &scif6_device,
821 &cmt10_device, 965 &cmt2_device,
822 &tmu00_device, 966 &tmu00_device,
823 &tmu01_device, 967 &tmu01_device,
824}; 968};
@@ -829,6 +973,8 @@ static struct platform_device *sh7372_late_devices[] __initdata = {
829 &dma0_device, 973 &dma0_device,
830 &dma1_device, 974 &dma1_device,
831 &dma2_device, 975 &dma2_device,
976 &usb_dma0_device,
977 &usb_dma1_device,
832 &vpu_device, 978 &vpu_device,
833 &veu0_device, 979 &veu0_device,
834 &veu1_device, 980 &veu1_device,
diff --git a/arch/arm/mach-vexpress/v2m.c b/arch/arm/mach-vexpress/v2m.c
index 9e6b93b1a043..d0d267a8d3f9 100644
--- a/arch/arm/mach-vexpress/v2m.c
+++ b/arch/arm/mach-vexpress/v2m.c
@@ -318,6 +318,10 @@ static struct clk v2m_sp804_clk = {
318 .rate = 1000000, 318 .rate = 1000000,
319}; 319};
320 320
321static struct clk v2m_ref_clk = {
322 .rate = 32768,
323};
324
321static struct clk dummy_apb_pclk; 325static struct clk dummy_apb_pclk;
322 326
323static struct clk_lookup v2m_lookups[] = { 327static struct clk_lookup v2m_lookups[] = {
@@ -348,6 +352,9 @@ static struct clk_lookup v2m_lookups[] = {
348 }, { /* CLCD */ 352 }, { /* CLCD */
349 .dev_id = "mb:clcd", 353 .dev_id = "mb:clcd",
350 .clk = &osc1_clk, 354 .clk = &osc1_clk,
355 }, { /* SP805 WDT */
356 .dev_id = "mb:wdt",
357 .clk = &v2m_ref_clk,
351 }, { /* SP804 timers */ 358 }, { /* SP804 timers */
352 .dev_id = "sp804", 359 .dev_id = "sp804",
353 .con_id = "v2m-timer0", 360 .con_id = "v2m-timer0",
diff --git a/arch/arm/mm/abort-macro.S b/arch/arm/mm/abort-macro.S
index 52162d59407a..2cbf68ef0e83 100644
--- a/arch/arm/mm/abort-macro.S
+++ b/arch/arm/mm/abort-macro.S
@@ -17,7 +17,7 @@
17 cmp \tmp, # 0x5600 @ Is it ldrsb? 17 cmp \tmp, # 0x5600 @ Is it ldrsb?
18 orreq \tmp, \tmp, #1 << 11 @ Set L-bit if yes 18 orreq \tmp, \tmp, #1 << 11 @ Set L-bit if yes
19 tst \tmp, #1 << 11 @ L = 0 -> write 19 tst \tmp, #1 << 11 @ L = 0 -> write
20 orreq \psr, \psr, #1 << 11 @ yes. 20 orreq \fsr, \fsr, #1 << 11 @ yes.
21 b do_DataAbort 21 b do_DataAbort
22not_thumb: 22not_thumb:
23 .endm 23 .endm
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 44c086710d2b..9ecfdb511951 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -277,6 +277,25 @@ static void l2x0_disable(void)
277 spin_unlock_irqrestore(&l2x0_lock, flags); 277 spin_unlock_irqrestore(&l2x0_lock, flags);
278} 278}
279 279
280static void __init l2x0_unlock(__u32 cache_id)
281{
282 int lockregs;
283 int i;
284
285 if (cache_id == L2X0_CACHE_ID_PART_L310)
286 lockregs = 8;
287 else
288 /* L210 and unknown types */
289 lockregs = 1;
290
291 for (i = 0; i < lockregs; i++) {
292 writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_D_BASE +
293 i * L2X0_LOCKDOWN_STRIDE);
294 writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_I_BASE +
295 i * L2X0_LOCKDOWN_STRIDE);
296 }
297}
298
280void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) 299void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
281{ 300{
282 __u32 aux; 301 __u32 aux;
@@ -328,6 +347,8 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
328 * accessing the below registers will fault. 347 * accessing the below registers will fault.
329 */ 348 */
330 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) { 349 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
350 /* Make sure that I&D is not locked down when starting */
351 l2x0_unlock(cache_id);
331 352
332 /* l2x0 controller is disabled */ 353 /* l2x0 controller is disabled */
333 writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL); 354 writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 91bca355cd31..cc7e2d8be9aa 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -298,7 +298,7 @@ static void __init arm_bootmem_free(unsigned long min, unsigned long max_low,
298#ifdef CONFIG_HAVE_ARCH_PFN_VALID 298#ifdef CONFIG_HAVE_ARCH_PFN_VALID
299int pfn_valid(unsigned long pfn) 299int pfn_valid(unsigned long pfn)
300{ 300{
301 return memblock_is_memory(pfn << PAGE_SHIFT); 301 return memblock_is_memory(__pfn_to_phys(pfn));
302} 302}
303EXPORT_SYMBOL(pfn_valid); 303EXPORT_SYMBOL(pfn_valid);
304#endif 304#endif
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
index 92bd102e3982..2e6849b41f66 100644
--- a/arch/arm/mm/proc-arm920.S
+++ b/arch/arm/mm/proc-arm920.S
@@ -379,7 +379,7 @@ ENTRY(cpu_arm920_set_pte_ext)
379 379
380/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ 380/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
381.globl cpu_arm920_suspend_size 381.globl cpu_arm920_suspend_size
382.equ cpu_arm920_suspend_size, 4 * 3 382.equ cpu_arm920_suspend_size, 4 * 4
383#ifdef CONFIG_PM_SLEEP 383#ifdef CONFIG_PM_SLEEP
384ENTRY(cpu_arm920_do_suspend) 384ENTRY(cpu_arm920_do_suspend)
385 stmfd sp!, {r4 - r7, lr} 385 stmfd sp!, {r4 - r7, lr}
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
index 2bbcf053dffd..cd8f79c3a282 100644
--- a/arch/arm/mm/proc-arm926.S
+++ b/arch/arm/mm/proc-arm926.S
@@ -394,7 +394,7 @@ ENTRY(cpu_arm926_set_pte_ext)
394 394
395/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ 395/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
396.globl cpu_arm926_suspend_size 396.globl cpu_arm926_suspend_size
397.equ cpu_arm926_suspend_size, 4 * 3 397.equ cpu_arm926_suspend_size, 4 * 4
398#ifdef CONFIG_PM_SLEEP 398#ifdef CONFIG_PM_SLEEP
399ENTRY(cpu_arm926_do_suspend) 399ENTRY(cpu_arm926_do_suspend)
400 stmfd sp!, {r4 - r7, lr} 400 stmfd sp!, {r4 - r7, lr}
diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S
index 07219c2ae114..69e7f2ef7384 100644
--- a/arch/arm/mm/proc-sa1100.S
+++ b/arch/arm/mm/proc-sa1100.S
@@ -182,11 +182,11 @@ ENDPROC(cpu_sa1100_do_suspend)
182 182
183ENTRY(cpu_sa1100_do_resume) 183ENTRY(cpu_sa1100_do_resume)
184 ldmia r0, {r4 - r7} @ load cp regs 184 ldmia r0, {r4 - r7} @ load cp regs
185 mov r1, #0 185 mov ip, #0
186 mcr p15, 0, r1, c8, c7, 0 @ flush I+D TLBs 186 mcr p15, 0, ip, c8, c7, 0 @ flush I+D TLBs
187 mcr p15, 0, r1, c7, c7, 0 @ flush I&D cache 187 mcr p15, 0, ip, c7, c7, 0 @ flush I&D cache
188 mcr p15, 0, r1, c9, c0, 0 @ invalidate RB 188 mcr p15, 0, ip, c9, c0, 0 @ invalidate RB
189 mcr p15, 0, r1, c9, c0, 5 @ allow user space to use RB 189 mcr p15, 0, ip, c9, c0, 5 @ allow user space to use RB
190 190
191 mcr p15, 0, r4, c3, c0, 0 @ domain ID 191 mcr p15, 0, r4, c3, c0, 0 @ domain ID
192 mcr p15, 0, r5, c2, c0, 0 @ translation table base addr 192 mcr p15, 0, r5, c2, c0, 0 @ translation table base addr
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index 219138d2f158..a923aa0fd00d 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -223,6 +223,22 @@ __v6_setup:
223 mrc p15, 0, r0, c1, c0, 0 @ read control register 223 mrc p15, 0, r0, c1, c0, 0 @ read control register
224 bic r0, r0, r5 @ clear bits them 224 bic r0, r0, r5 @ clear bits them
225 orr r0, r0, r6 @ set them 225 orr r0, r0, r6 @ set them
226#ifdef CONFIG_ARM_ERRATA_364296
227 /*
228 * Workaround for the 364296 ARM1136 r0p2 erratum (possible cache data
229 * corruption with hit-under-miss enabled). The conditional code below
230 * (setting the undocumented bit 31 in the auxiliary control register
231 * and the FI bit in the control register) disables hit-under-miss
232 * without putting the processor into full low interrupt latency mode.
233 */
234 ldr r6, =0x4107b362 @ id for ARM1136 r0p2
235 mrc p15, 0, r5, c0, c0, 0 @ get processor id
236 teq r5, r6 @ check for the faulty core
237 mrceq p15, 0, r5, c1, c0, 1 @ load aux control reg
238 orreq r5, r5, #(1 << 31) @ set the undocumented bit 31
239 mcreq p15, 0, r5, c1, c0, 1 @ write aux control reg
240 orreq r0, r0, #(1 << 21) @ low interrupt latency configuration
241#endif
226 mov pc, lr @ return to head.S:__ret 242 mov pc, lr @ return to head.S:__ret
227 243
228 /* 244 /*
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index a30e78542ccf..9049c0764db2 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -66,6 +66,7 @@ ENDPROC(cpu_v7_proc_fin)
66ENTRY(cpu_v7_reset) 66ENTRY(cpu_v7_reset)
67 mrc p15, 0, r1, c1, c0, 0 @ ctrl register 67 mrc p15, 0, r1, c1, c0, 0 @ ctrl register
68 bic r1, r1, #0x1 @ ...............m 68 bic r1, r1, #0x1 @ ...............m
69 THUMB( bic r1, r1, #1 << 30 ) @ SCTLR.TE (Thumb exceptions)
69 mcr p15, 0, r1, c1, c0, 0 @ disable MMU 70 mcr p15, 0, r1, c1, c0, 0 @ disable MMU
70 isb 71 isb
71 mov pc, r0 72 mov pc, r0
@@ -247,13 +248,16 @@ ENTRY(cpu_v7_do_resume)
247 mcr p15, 0, r7, c2, c0, 0 @ TTB 0 248 mcr p15, 0, r7, c2, c0, 0 @ TTB 0
248 mcr p15, 0, r8, c2, c0, 1 @ TTB 1 249 mcr p15, 0, r8, c2, c0, 1 @ TTB 1
249 mcr p15, 0, ip, c2, c0, 2 @ TTB control register 250 mcr p15, 0, ip, c2, c0, 2 @ TTB control register
250 mcr p15, 0, r10, c1, c0, 1 @ Auxiliary control register 251 mrc p15, 0, r4, c1, c0, 1 @ Read Auxiliary control register
252 teq r4, r10 @ Is it already set?
253 mcrne p15, 0, r10, c1, c0, 1 @ No, so write it
251 mcr p15, 0, r11, c1, c0, 2 @ Co-processor access control 254 mcr p15, 0, r11, c1, c0, 2 @ Co-processor access control
252 ldr r4, =PRRR @ PRRR 255 ldr r4, =PRRR @ PRRR
253 ldr r5, =NMRR @ NMRR 256 ldr r5, =NMRR @ NMRR
254 mcr p15, 0, r4, c10, c2, 0 @ write PRRR 257 mcr p15, 0, r4, c10, c2, 0 @ write PRRR
255 mcr p15, 0, r5, c10, c2, 1 @ write NMRR 258 mcr p15, 0, r5, c10, c2, 1 @ write NMRR
256 isb 259 isb
260 dsb
257 mov r0, r9 @ control register 261 mov r0, r9 @ control register
258 mov r2, r7, lsr #14 @ get TTB0 base 262 mov r2, r7, lsr #14 @ get TTB0 base
259 mov r2, r2, lsl #14 263 mov r2, r2, lsl #14
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
index 28c72a2006a1..755e1bf22681 100644
--- a/arch/arm/mm/proc-xsc3.S
+++ b/arch/arm/mm/proc-xsc3.S
@@ -406,7 +406,7 @@ ENTRY(cpu_xsc3_set_pte_ext)
406 .align 406 .align
407 407
408.globl cpu_xsc3_suspend_size 408.globl cpu_xsc3_suspend_size
409.equ cpu_xsc3_suspend_size, 4 * 8 409.equ cpu_xsc3_suspend_size, 4 * 7
410#ifdef CONFIG_PM_SLEEP 410#ifdef CONFIG_PM_SLEEP
411ENTRY(cpu_xsc3_do_suspend) 411ENTRY(cpu_xsc3_do_suspend)
412 stmfd sp!, {r4 - r10, lr} 412 stmfd sp!, {r4 - r10, lr}
@@ -418,12 +418,12 @@ ENTRY(cpu_xsc3_do_suspend)
418 mrc p15, 0, r9, c1, c0, 1 @ auxiliary control reg 418 mrc p15, 0, r9, c1, c0, 1 @ auxiliary control reg
419 mrc p15, 0, r10, c1, c0, 0 @ control reg 419 mrc p15, 0, r10, c1, c0, 0 @ control reg
420 bic r4, r4, #2 @ clear frequency change bit 420 bic r4, r4, #2 @ clear frequency change bit
421 stmia r0, {r1, r4 - r10} @ store v:p offset + cp regs 421 stmia r0, {r4 - r10} @ store cp regs
422 ldmia sp!, {r4 - r10, pc} 422 ldmia sp!, {r4 - r10, pc}
423ENDPROC(cpu_xsc3_do_suspend) 423ENDPROC(cpu_xsc3_do_suspend)
424 424
425ENTRY(cpu_xsc3_do_resume) 425ENTRY(cpu_xsc3_do_resume)
426 ldmia r0, {r1, r4 - r10} @ load v:p offset + cp regs 426 ldmia r0, {r4 - r10} @ load cp regs
427 mov ip, #0 427 mov ip, #0
428 mcr p15, 0, ip, c7, c7, 0 @ invalidate I & D caches, BTB 428 mcr p15, 0, ip, c7, c7, 0 @ invalidate I & D caches, BTB
429 mcr p15, 0, ip, c7, c10, 4 @ drain write (&fill) buffer 429 mcr p15, 0, ip, c7, c10, 4 @ drain write (&fill) buffer
diff --git a/arch/arm/plat-omap/omap_device.c b/arch/arm/plat-omap/omap_device.c
index b6b409744954..02609eee0562 100644
--- a/arch/arm/plat-omap/omap_device.c
+++ b/arch/arm/plat-omap/omap_device.c
@@ -615,6 +615,9 @@ static int _od_resume_noirq(struct device *dev)
615 615
616 return pm_generic_resume_noirq(dev); 616 return pm_generic_resume_noirq(dev);
617} 617}
618#else
619#define _od_suspend_noirq NULL
620#define _od_resume_noirq NULL
618#endif 621#endif
619 622
620static struct dev_pm_domain omap_device_pm_domain = { 623static struct dev_pm_domain omap_device_pm_domain = {
@@ -622,7 +625,8 @@ static struct dev_pm_domain omap_device_pm_domain = {
622 SET_RUNTIME_PM_OPS(_od_runtime_suspend, _od_runtime_resume, 625 SET_RUNTIME_PM_OPS(_od_runtime_suspend, _od_runtime_resume,
623 _od_runtime_idle) 626 _od_runtime_idle)
624 USE_PLATFORM_PM_SLEEP_OPS 627 USE_PLATFORM_PM_SLEEP_OPS
625 SET_SYSTEM_SLEEP_PM_OPS(_od_suspend_noirq, _od_resume_noirq) 628 .suspend_noirq = _od_suspend_noirq,
629 .resume_noirq = _od_resume_noirq,
626 } 630 }
627}; 631};
628 632
diff --git a/arch/arm/plat-s5p/clock.c b/arch/arm/plat-s5p/clock.c
index 02af235298e2..5f84a3f13ef9 100644
--- a/arch/arm/plat-s5p/clock.c
+++ b/arch/arm/plat-s5p/clock.c
@@ -192,7 +192,7 @@ unsigned long s5p_spdif_get_rate(struct clk *clk)
192 if (IS_ERR(pclk)) 192 if (IS_ERR(pclk))
193 return -EINVAL; 193 return -EINVAL;
194 194
195 rate = pclk->ops->get_rate(clk); 195 rate = pclk->ops->get_rate(pclk);
196 clk_put(pclk); 196 clk_put(pclk);
197 197
198 return rate; 198 return rate;
diff --git a/arch/arm/plat-s5p/irq-gpioint.c b/arch/arm/plat-s5p/irq-gpioint.c
index 327ab9f662e8..f71078ef6bb5 100644
--- a/arch/arm/plat-s5p/irq-gpioint.c
+++ b/arch/arm/plat-s5p/irq-gpioint.c
@@ -23,6 +23,8 @@
23#include <plat/gpio-core.h> 23#include <plat/gpio-core.h>
24#include <plat/gpio-cfg.h> 24#include <plat/gpio-cfg.h>
25 25
26#include <asm/mach/irq.h>
27
26#define GPIO_BASE(chip) (((unsigned long)(chip)->base) & 0xFFFFF000u) 28#define GPIO_BASE(chip) (((unsigned long)(chip)->base) & 0xFFFFF000u)
27 29
28#define CON_OFFSET 0x700 30#define CON_OFFSET 0x700
@@ -81,6 +83,9 @@ static void s5p_gpioint_handler(unsigned int irq, struct irq_desc *desc)
81 int group, pend_offset, mask_offset; 83 int group, pend_offset, mask_offset;
82 unsigned int pend, mask; 84 unsigned int pend, mask;
83 85
86 struct irq_chip *chip = irq_get_chip(irq);
87 chained_irq_enter(chip, desc);
88
84 for (group = 0; group < bank->nr_groups; group++) { 89 for (group = 0; group < bank->nr_groups; group++) {
85 struct s3c_gpio_chip *chip = bank->chips[group]; 90 struct s3c_gpio_chip *chip = bank->chips[group];
86 if (!chip) 91 if (!chip)
@@ -102,6 +107,7 @@ static void s5p_gpioint_handler(unsigned int irq, struct irq_desc *desc)
102 pend &= ~BIT(offset); 107 pend &= ~BIT(offset);
103 } 108 }
104 } 109 }
110 chained_irq_exit(chip, desc);
105} 111}
106 112
107static __init int s5p_gpioint_add(struct s3c_gpio_chip *chip) 113static __init int s5p_gpioint_add(struct s3c_gpio_chip *chip)
diff --git a/arch/arm/plat-samsung/clock.c b/arch/arm/plat-samsung/clock.c
index 302c42670bd1..3b4451979d1b 100644
--- a/arch/arm/plat-samsung/clock.c
+++ b/arch/arm/plat-samsung/clock.c
@@ -64,6 +64,17 @@ static LIST_HEAD(clocks);
64 */ 64 */
65DEFINE_SPINLOCK(clocks_lock); 65DEFINE_SPINLOCK(clocks_lock);
66 66
67/* Global watchdog clock used by arch_wtd_reset() callback */
68struct clk *s3c2410_wdtclk;
69static int __init s3c_wdt_reset_init(void)
70{
71 s3c2410_wdtclk = clk_get(NULL, "watchdog");
72 if (IS_ERR(s3c2410_wdtclk))
73 printk(KERN_WARNING "%s: warning: cannot get watchdog clock\n", __func__);
74 return 0;
75}
76arch_initcall(s3c_wdt_reset_init);
77
67/* enable and disable calls for use with the clk struct */ 78/* enable and disable calls for use with the clk struct */
68 79
69static int clk_null_enable(struct clk *clk, int enable) 80static int clk_null_enable(struct clk *clk, int enable)
diff --git a/arch/arm/plat-samsung/include/plat/backlight.h b/arch/arm/plat-samsung/include/plat/backlight.h
index 51d8da846a62..ad530c78fe8c 100644
--- a/arch/arm/plat-samsung/include/plat/backlight.h
+++ b/arch/arm/plat-samsung/include/plat/backlight.h
@@ -20,7 +20,7 @@ struct samsung_bl_gpio_info {
20 int func; 20 int func;
21}; 21};
22 22
23extern void samsung_bl_set(struct samsung_bl_gpio_info *gpio_info, 23extern void __init samsung_bl_set(struct samsung_bl_gpio_info *gpio_info,
24 struct platform_pwm_backlight_data *bl_data); 24 struct platform_pwm_backlight_data *bl_data);
25 25
26#endif /* __ASM_PLAT_BACKLIGHT_H */ 26#endif /* __ASM_PLAT_BACKLIGHT_H */
diff --git a/arch/arm/plat-samsung/include/plat/clock.h b/arch/arm/plat-samsung/include/plat/clock.h
index 87d5b38a86fb..73c66d4d10fa 100644
--- a/arch/arm/plat-samsung/include/plat/clock.h
+++ b/arch/arm/plat-samsung/include/plat/clock.h
@@ -9,6 +9,9 @@
9 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
10*/ 10*/
11 11
12#ifndef __ASM_PLAT_CLOCK_H
13#define __ASM_PLAT_CLOCK_H __FILE__
14
12#include <linux/spinlock.h> 15#include <linux/spinlock.h>
13#include <linux/clkdev.h> 16#include <linux/clkdev.h>
14 17
@@ -121,3 +124,8 @@ extern int s3c64xx_sclk_ctrl(struct clk *clk, int enable);
121 124
122extern void s3c_pwmclk_init(void); 125extern void s3c_pwmclk_init(void);
123 126
127/* Global watchdog clock used by arch_wtd_reset() callback */
128
129extern struct clk *s3c2410_wdtclk;
130
131#endif /* __ASM_PLAT_CLOCK_H */
diff --git a/arch/arm/plat-samsung/include/plat/watchdog-reset.h b/arch/arm/plat-samsung/include/plat/watchdog-reset.h
index 54b762acb5a0..40dbb2b0ae22 100644
--- a/arch/arm/plat-samsung/include/plat/watchdog-reset.h
+++ b/arch/arm/plat-samsung/include/plat/watchdog-reset.h
@@ -10,6 +10,7 @@
10 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
11*/ 11*/
12 12
13#include <plat/clock.h>
13#include <plat/regs-watchdog.h> 14#include <plat/regs-watchdog.h>
14#include <mach/map.h> 15#include <mach/map.h>
15 16
@@ -19,17 +20,12 @@
19 20
20static inline void arch_wdt_reset(void) 21static inline void arch_wdt_reset(void)
21{ 22{
22 struct clk *wdtclk;
23
24 printk("arch_reset: attempting watchdog reset\n"); 23 printk("arch_reset: attempting watchdog reset\n");
25 24
26 __raw_writel(0, S3C2410_WTCON); /* disable watchdog, to be safe */ 25 __raw_writel(0, S3C2410_WTCON); /* disable watchdog, to be safe */
27 26
28 wdtclk = clk_get(NULL, "watchdog"); 27 if (s3c2410_wdtclk)
29 if (!IS_ERR(wdtclk)) { 28 clk_enable(s3c2410_wdtclk);
30 clk_enable(wdtclk);
31 } else
32 printk(KERN_WARNING "%s: warning: cannot get watchdog clock\n", __func__);
33 29
34 /* put initial values into count and data */ 30 /* put initial values into count and data */
35 __raw_writel(0x80, S3C2410_WTCNT); 31 __raw_writel(0x80, S3C2410_WTCNT);
diff --git a/arch/arm/plat-samsung/irq-vic-timer.c b/arch/arm/plat-samsung/irq-vic-timer.c
index f714d060370d..51583cd30164 100644
--- a/arch/arm/plat-samsung/irq-vic-timer.c
+++ b/arch/arm/plat-samsung/irq-vic-timer.c
@@ -22,9 +22,14 @@
22#include <plat/irq-vic-timer.h> 22#include <plat/irq-vic-timer.h>
23#include <plat/regs-timer.h> 23#include <plat/regs-timer.h>
24 24
25#include <asm/mach/irq.h>
26
25static void s3c_irq_demux_vic_timer(unsigned int irq, struct irq_desc *desc) 27static void s3c_irq_demux_vic_timer(unsigned int irq, struct irq_desc *desc)
26{ 28{
29 struct irq_chip *chip = irq_get_chip(irq);
30 chained_irq_enter(chip, desc);
27 generic_handle_irq((int)desc->irq_data.handler_data); 31 generic_handle_irq((int)desc->irq_data.handler_data);
32 chained_irq_exit(chip, desc);
28} 33}
29 34
30/* We assume the IRQ_TIMER0..IRQ_TIMER4 range is continuous. */ 35/* We assume the IRQ_TIMER0..IRQ_TIMER4 range is continuous. */
diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types
index fff68d0d521b..62cc8f981171 100644
--- a/arch/arm/tools/mach-types
+++ b/arch/arm/tools/mach-types
@@ -351,7 +351,7 @@ centro MACH_CENTRO CENTRO 1944
351nokia_rx51 MACH_NOKIA_RX51 NOKIA_RX51 1955 351nokia_rx51 MACH_NOKIA_RX51 NOKIA_RX51 1955
352omap_zoom2 MACH_OMAP_ZOOM2 OMAP_ZOOM2 1967 352omap_zoom2 MACH_OMAP_ZOOM2 OMAP_ZOOM2 1967
353cpuat9260 MACH_CPUAT9260 CPUAT9260 1973 353cpuat9260 MACH_CPUAT9260 CPUAT9260 1973
354eukrea_cpuimx27 MACH_CPUIMX27 CPUIMX27 1975 354eukrea_cpuimx27 MACH_EUKREA_CPUIMX27 EUKREA_CPUIMX27 1975
355acs5k MACH_ACS5K ACS5K 1982 355acs5k MACH_ACS5K ACS5K 1982
356snapper_9260 MACH_SNAPPER_9260 SNAPPER_9260 1987 356snapper_9260 MACH_SNAPPER_9260 SNAPPER_9260 1987
357dsm320 MACH_DSM320 DSM320 1988 357dsm320 MACH_DSM320 DSM320 1988
@@ -476,8 +476,8 @@ cns3420vb MACH_CNS3420VB CNS3420VB 2776
476omap4_panda MACH_OMAP4_PANDA OMAP4_PANDA 2791 476omap4_panda MACH_OMAP4_PANDA OMAP4_PANDA 2791
477ti8168evm MACH_TI8168EVM TI8168EVM 2800 477ti8168evm MACH_TI8168EVM TI8168EVM 2800
478teton_bga MACH_TETON_BGA TETON_BGA 2816 478teton_bga MACH_TETON_BGA TETON_BGA 2816
479eukrea_cpuimx25sd MACH_EUKREA_CPUIMX25 EUKREA_CPUIMX25 2820 479eukrea_cpuimx25sd MACH_EUKREA_CPUIMX25SD EUKREA_CPUIMX25SD 2820
480eukrea_cpuimx35sd MACH_EUKREA_CPUIMX35 EUKREA_CPUIMX35 2821 480eukrea_cpuimx35sd MACH_EUKREA_CPUIMX35SD EUKREA_CPUIMX35SD 2821
481eukrea_cpuimx51sd MACH_EUKREA_CPUIMX51SD EUKREA_CPUIMX51SD 2822 481eukrea_cpuimx51sd MACH_EUKREA_CPUIMX51SD EUKREA_CPUIMX51SD 2822
482eukrea_cpuimx51 MACH_EUKREA_CPUIMX51 EUKREA_CPUIMX51 2823 482eukrea_cpuimx51 MACH_EUKREA_CPUIMX51 EUKREA_CPUIMX51 2823
483smdkc210 MACH_SMDKC210 SMDKC210 2838 483smdkc210 MACH_SMDKC210 SMDKC210 2838
diff --git a/arch/avr32/kernel/syscall_table.S b/arch/avr32/kernel/syscall_table.S
index c7fd394d28a4..6eba53530d1c 100644
--- a/arch/avr32/kernel/syscall_table.S
+++ b/arch/avr32/kernel/syscall_table.S
@@ -158,7 +158,7 @@ sys_call_table:
158 .long sys_sched_rr_get_interval 158 .long sys_sched_rr_get_interval
159 .long sys_nanosleep 159 .long sys_nanosleep
160 .long sys_poll 160 .long sys_poll
161 .long sys_nfsservctl /* 145 */ 161 .long sys_ni_syscall /* 145 was nfsservctl */
162 .long sys_setresgid 162 .long sys_setresgid
163 .long sys_getresgid 163 .long sys_getresgid
164 .long sys_prctl 164 .long sys_prctl
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S
index 225d311c9701..e4137297b790 100644
--- a/arch/blackfin/mach-common/entry.S
+++ b/arch/blackfin/mach-common/entry.S
@@ -1543,7 +1543,7 @@ ENTRY(_sys_call_table)
1543 .long _sys_ni_syscall /* for vm86 */ 1543 .long _sys_ni_syscall /* for vm86 */
1544 .long _sys_ni_syscall /* old "query_module" */ 1544 .long _sys_ni_syscall /* old "query_module" */
1545 .long _sys_ni_syscall /* sys_poll */ 1545 .long _sys_ni_syscall /* sys_poll */
1546 .long _sys_nfsservctl 1546 .long _sys_ni_syscall /* old nfsservctl */
1547 .long _sys_setresgid /* setresgid16 */ /* 170 */ 1547 .long _sys_setresgid /* setresgid16 */ /* 170 */
1548 .long _sys_getresgid /* getresgid16 */ 1548 .long _sys_getresgid /* getresgid16 */
1549 .long _sys_prctl 1549 .long _sys_prctl
diff --git a/arch/cris/arch-v10/kernel/entry.S b/arch/cris/arch-v10/kernel/entry.S
index 1161883eb582..592fbe9dfb62 100644
--- a/arch/cris/arch-v10/kernel/entry.S
+++ b/arch/cris/arch-v10/kernel/entry.S
@@ -771,7 +771,7 @@ sys_call_table:
771 .long sys_ni_syscall /* sys_vm86 */ 771 .long sys_ni_syscall /* sys_vm86 */
772 .long sys_ni_syscall /* Old sys_query_module */ 772 .long sys_ni_syscall /* Old sys_query_module */
773 .long sys_poll 773 .long sys_poll
774 .long sys_nfsservctl 774 .long sys_ni_syscall /* old nfsservctl */
775 .long sys_setresgid16 /* 170 */ 775 .long sys_setresgid16 /* 170 */
776 .long sys_getresgid16 776 .long sys_getresgid16
777 .long sys_prctl 777 .long sys_prctl
diff --git a/arch/cris/arch-v32/kernel/entry.S b/arch/cris/arch-v32/kernel/entry.S
index 84fed7e91ada..c3ea4694fbaf 100644
--- a/arch/cris/arch-v32/kernel/entry.S
+++ b/arch/cris/arch-v32/kernel/entry.S
@@ -714,7 +714,7 @@ sys_call_table:
714 .long sys_ni_syscall /* sys_vm86 */ 714 .long sys_ni_syscall /* sys_vm86 */
715 .long sys_ni_syscall /* Old sys_query_module */ 715 .long sys_ni_syscall /* Old sys_query_module */
716 .long sys_poll 716 .long sys_poll
717 .long sys_nfsservctl 717 .long sys_ni_syscall /* Old nfsservctl */
718 .long sys_setresgid16 /* 170 */ 718 .long sys_setresgid16 /* 170 */
719 .long sys_getresgid16 719 .long sys_getresgid16
720 .long sys_prctl 720 .long sys_prctl
diff --git a/arch/cris/include/asm/serial.h b/arch/cris/include/asm/serial.h
new file mode 100644
index 000000000000..af7535a955fb
--- /dev/null
+++ b/arch/cris/include/asm/serial.h
@@ -0,0 +1,9 @@
1#ifndef _ASM_SERIAL_H
2#define _ASM_SERIAL_H
3
4/*
5 * This assumes you have a 1.8432 MHz clock for your UART.
6 */
7#define BASE_BAUD (1843200 / 16)
8
9#endif /* _ASM_SERIAL_H */
diff --git a/arch/frv/kernel/entry.S b/arch/frv/kernel/entry.S
index 017d6d7b784f..5ba23f715ea5 100644
--- a/arch/frv/kernel/entry.S
+++ b/arch/frv/kernel/entry.S
@@ -1358,7 +1358,7 @@ sys_call_table:
1358 .long sys_ni_syscall /* for vm86 */ 1358 .long sys_ni_syscall /* for vm86 */
1359 .long sys_ni_syscall /* Old sys_query_module */ 1359 .long sys_ni_syscall /* Old sys_query_module */
1360 .long sys_poll 1360 .long sys_poll
1361 .long sys_nfsservctl 1361 .long sys_ni_syscall /* Old nfsservctl */
1362 .long sys_setresgid16 /* 170 */ 1362 .long sys_setresgid16 /* 170 */
1363 .long sys_getresgid16 1363 .long sys_getresgid16
1364 .long sys_prctl 1364 .long sys_prctl
diff --git a/arch/h8300/kernel/syscalls.S b/arch/h8300/kernel/syscalls.S
index f4b2e67bcc34..4be2ea2fbe26 100644
--- a/arch/h8300/kernel/syscalls.S
+++ b/arch/h8300/kernel/syscalls.S
@@ -183,7 +183,7 @@ SYMBOL_NAME_LABEL(sys_call_table)
183 .long SYMBOL_NAME(sys_ni_syscall) /* for vm86 */ 183 .long SYMBOL_NAME(sys_ni_syscall) /* for vm86 */
184 .long SYMBOL_NAME(sys_ni_syscall) /* sys_query_module */ 184 .long SYMBOL_NAME(sys_ni_syscall) /* sys_query_module */
185 .long SYMBOL_NAME(sys_poll) 185 .long SYMBOL_NAME(sys_poll)
186 .long SYMBOL_NAME(sys_nfsservctl) 186 .long SYMBOL_NAME(sys_ni_syscall) /* old nfsservctl */
187 .long SYMBOL_NAME(sys_setresgid16) /* 170 */ 187 .long SYMBOL_NAME(sys_setresgid16) /* 170 */
188 .long SYMBOL_NAME(sys_getresgid16) 188 .long SYMBOL_NAME(sys_getresgid16)
189 .long SYMBOL_NAME(sys_prctl) 189 .long SYMBOL_NAME(sys_prctl)
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 124854714958..3ff7785b3beb 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -162,7 +162,6 @@ config IA64_GENERIC
162 select ACPI_NUMA 162 select ACPI_NUMA
163 select SWIOTLB 163 select SWIOTLB
164 select PCI_MSI 164 select PCI_MSI
165 select DMAR
166 help 165 help
167 This selects the system type of your hardware. A "generic" kernel 166 This selects the system type of your hardware. A "generic" kernel
168 will run on any supported IA-64 system. However, if you configure 167 will run on any supported IA-64 system. However, if you configure
diff --git a/arch/ia64/configs/generic_defconfig b/arch/ia64/configs/generic_defconfig
index 1d7bca0a396d..0e5cd1405e0e 100644
--- a/arch/ia64/configs/generic_defconfig
+++ b/arch/ia64/configs/generic_defconfig
@@ -234,3 +234,4 @@ CONFIG_CRYPTO_MD5=y
234# CONFIG_CRYPTO_ANSI_CPRNG is not set 234# CONFIG_CRYPTO_ANSI_CPRNG is not set
235CONFIG_CRC_T10DIF=y 235CONFIG_CRC_T10DIF=y
236CONFIG_MISC_DEVICES=y 236CONFIG_MISC_DEVICES=y
237CONFIG_DMAR=y
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index 97dd2abdeb1a..198c753d1006 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1614,7 +1614,7 @@ sys_call_table:
1614 data8 sys_sched_get_priority_min 1614 data8 sys_sched_get_priority_min
1615 data8 sys_sched_rr_get_interval 1615 data8 sys_sched_rr_get_interval
1616 data8 sys_nanosleep 1616 data8 sys_nanosleep
1617 data8 sys_nfsservctl 1617 data8 sys_ni_syscall // old nfsservctl
1618 data8 sys_prctl // 1170 1618 data8 sys_prctl // 1170
1619 data8 sys_getpagesize 1619 data8 sys_getpagesize
1620 data8 sys_mmap2 1620 data8 sys_mmap2
diff --git a/arch/m32r/kernel/syscall_table.S b/arch/m32r/kernel/syscall_table.S
index 528f2e6ad064..f365c19795ef 100644
--- a/arch/m32r/kernel/syscall_table.S
+++ b/arch/m32r/kernel/syscall_table.S
@@ -168,7 +168,7 @@ ENTRY(sys_call_table)
168 .long sys_tas /* vm86 syscall holder */ 168 .long sys_tas /* vm86 syscall holder */
169 .long sys_ni_syscall /* query_module syscall holder */ 169 .long sys_ni_syscall /* query_module syscall holder */
170 .long sys_poll 170 .long sys_poll
171 .long sys_nfsservctl 171 .long sys_ni_syscall /* was nfsservctl */
172 .long sys_setresgid /* 170 */ 172 .long sys_setresgid /* 170 */
173 .long sys_getresgid 173 .long sys_getresgid
174 .long sys_prctl 174 .long sys_prctl
diff --git a/arch/m68k/include/asm/page_mm.h b/arch/m68k/include/asm/page_mm.h
index 31d5570d6567..89f201434b5a 100644
--- a/arch/m68k/include/asm/page_mm.h
+++ b/arch/m68k/include/asm/page_mm.h
@@ -162,7 +162,7 @@ static inline __attribute_const__ int __virt_to_node_shift(void)
162 pgdat->node_mem_map + (__pfn - pgdat->node_start_pfn); \ 162 pgdat->node_mem_map + (__pfn - pgdat->node_start_pfn); \
163}) 163})
164#define page_to_pfn(_page) ({ \ 164#define page_to_pfn(_page) ({ \
165 struct page *__p = (_page); \ 165 const struct page *__p = (_page); \
166 struct pglist_data *pgdat; \ 166 struct pglist_data *pgdat; \
167 pgdat = &pg_data_map[page_to_nid(__p)]; \ 167 pgdat = &pg_data_map[page_to_nid(__p)]; \
168 ((__p) - pgdat->node_mem_map) + pgdat->node_start_pfn; \ 168 ((__p) - pgdat->node_mem_map) + pgdat->node_start_pfn; \
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S
index 00d1452f9571..c468f2edaa85 100644
--- a/arch/m68k/kernel/syscalltable.S
+++ b/arch/m68k/kernel/syscalltable.S
@@ -189,7 +189,7 @@ ENTRY(sys_call_table)
189 .long sys_getpagesize 189 .long sys_getpagesize
190 .long sys_ni_syscall /* old "query_module" */ 190 .long sys_ni_syscall /* old "query_module" */
191 .long sys_poll 191 .long sys_poll
192 .long sys_nfsservctl 192 .long sys_ni_syscall /* old nfsservctl */
193 .long sys_setresgid16 /* 170 */ 193 .long sys_setresgid16 /* 170 */
194 .long sys_getresgid16 194 .long sys_getresgid16
195 .long sys_prctl 195 .long sys_prctl
diff --git a/arch/microblaze/kernel/syscall_table.S b/arch/microblaze/kernel/syscall_table.S
index d915a122c865..8789daa2a346 100644
--- a/arch/microblaze/kernel/syscall_table.S
+++ b/arch/microblaze/kernel/syscall_table.S
@@ -173,7 +173,7 @@ ENTRY(sys_call_table)
173 .long sys_ni_syscall /* sys_vm86 */ 173 .long sys_ni_syscall /* sys_vm86 */
174 .long sys_ni_syscall /* Old sys_query_module */ 174 .long sys_ni_syscall /* Old sys_query_module */
175 .long sys_poll 175 .long sys_poll
176 .long sys_nfsservctl 176 .long sys_ni_syscall /* old nfsservctl */
177 .long sys_setresgid /* 170 */ 177 .long sys_setresgid /* 170 */
178 .long sys_getresgid 178 .long sys_getresgid
179 .long sys_prctl 179 .long sys_prctl
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index e521420a45a5..865bc7a6f5a1 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -424,7 +424,7 @@ einval: li v0, -ENOSYS
424 sys sys_getresuid 3 424 sys sys_getresuid 3
425 sys sys_ni_syscall 0 /* was sys_query_module */ 425 sys sys_ni_syscall 0 /* was sys_query_module */
426 sys sys_poll 3 426 sys sys_poll 3
427 sys sys_nfsservctl 3 427 sys sys_ni_syscall 0 /* was nfsservctl */
428 sys sys_setresgid 3 /* 4190 */ 428 sys sys_setresgid 3 /* 4190 */
429 sys sys_getresgid 3 429 sys sys_getresgid 3
430 sys sys_prctl 5 430 sys sys_prctl 5
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index 85874d6a8a70..fb7334bea731 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -299,7 +299,7 @@ sys_call_table:
299 PTR sys_ni_syscall /* 5170, was get_kernel_syms */ 299 PTR sys_ni_syscall /* 5170, was get_kernel_syms */
300 PTR sys_ni_syscall /* was query_module */ 300 PTR sys_ni_syscall /* was query_module */
301 PTR sys_quotactl 301 PTR sys_quotactl
302 PTR sys_nfsservctl 302 PTR sys_ni_syscall /* was nfsservctl */
303 PTR sys_ni_syscall /* res. for getpmsg */ 303 PTR sys_ni_syscall /* res. for getpmsg */
304 PTR sys_ni_syscall /* 5175 for putpmsg */ 304 PTR sys_ni_syscall /* 5175 for putpmsg */
305 PTR sys_ni_syscall /* res. for afs_syscall */ 305 PTR sys_ni_syscall /* res. for afs_syscall */
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index b85842fc87ae..f9296e894e46 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -294,7 +294,7 @@ EXPORT(sysn32_call_table)
294 PTR sys_ni_syscall /* 6170, was get_kernel_syms */ 294 PTR sys_ni_syscall /* 6170, was get_kernel_syms */
295 PTR sys_ni_syscall /* was query_module */ 295 PTR sys_ni_syscall /* was query_module */
296 PTR sys_quotactl 296 PTR sys_quotactl
297 PTR compat_sys_nfsservctl 297 PTR sys_ni_syscall /* was nfsservctl */
298 PTR sys_ni_syscall /* res. for getpmsg */ 298 PTR sys_ni_syscall /* res. for getpmsg */
299 PTR sys_ni_syscall /* 6175 for putpmsg */ 299 PTR sys_ni_syscall /* 6175 for putpmsg */
300 PTR sys_ni_syscall /* res. for afs_syscall */ 300 PTR sys_ni_syscall /* res. for afs_syscall */
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 46c4763edf21..4d7c9827706f 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -392,7 +392,7 @@ sys_call_table:
392 PTR sys_getresuid 392 PTR sys_getresuid
393 PTR sys_ni_syscall /* was query_module */ 393 PTR sys_ni_syscall /* was query_module */
394 PTR sys_poll 394 PTR sys_poll
395 PTR compat_sys_nfsservctl 395 PTR sys_ni_syscall /* was nfsservctl */
396 PTR sys_setresgid /* 4190 */ 396 PTR sys_setresgid /* 4190 */
397 PTR sys_getresgid 397 PTR sys_getresgid
398 PTR sys_prctl 398 PTR sys_prctl
diff --git a/arch/mn10300/kernel/entry.S b/arch/mn10300/kernel/entry.S
index ae435e1d5669..3e3620d9fc45 100644
--- a/arch/mn10300/kernel/entry.S
+++ b/arch/mn10300/kernel/entry.S
@@ -589,7 +589,7 @@ ENTRY(sys_call_table)
589 .long sys_ni_syscall /* vm86 */ 589 .long sys_ni_syscall /* vm86 */
590 .long sys_ni_syscall /* Old sys_query_module */ 590 .long sys_ni_syscall /* Old sys_query_module */
591 .long sys_poll 591 .long sys_poll
592 .long sys_nfsservctl 592 .long sys_ni_syscall /* was nfsservctl */
593 .long sys_setresgid16 /* 170 */ 593 .long sys_setresgid16 /* 170 */
594 .long sys_getresgid16 594 .long sys_getresgid16
595 .long sys_prctl 595 .long sys_prctl
diff --git a/arch/openrisc/include/asm/dma-mapping.h b/arch/openrisc/include/asm/dma-mapping.h
index 052f877b52a5..60b472233900 100644
--- a/arch/openrisc/include/asm/dma-mapping.h
+++ b/arch/openrisc/include/asm/dma-mapping.h
@@ -31,7 +31,6 @@
31 31
32#define DMA_ERROR_CODE (~(dma_addr_t)0x0) 32#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
33 33
34int dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
35 34
36#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 35#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
37#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 36#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
@@ -47,6 +46,12 @@ dma_addr_t or1k_map_page(struct device *dev, struct page *page,
47void or1k_unmap_page(struct device *dev, dma_addr_t dma_handle, 46void or1k_unmap_page(struct device *dev, dma_addr_t dma_handle,
48 size_t size, enum dma_data_direction dir, 47 size_t size, enum dma_data_direction dir,
49 struct dma_attrs *attrs); 48 struct dma_attrs *attrs);
49int or1k_map_sg(struct device *dev, struct scatterlist *sg,
50 int nents, enum dma_data_direction dir,
51 struct dma_attrs *attrs);
52void or1k_unmap_sg(struct device *dev, struct scatterlist *sg,
53 int nents, enum dma_data_direction dir,
54 struct dma_attrs *attrs);
50void or1k_sync_single_for_cpu(struct device *dev, 55void or1k_sync_single_for_cpu(struct device *dev,
51 dma_addr_t dma_handle, size_t size, 56 dma_addr_t dma_handle, size_t size,
52 enum dma_data_direction dir); 57 enum dma_data_direction dir);
@@ -98,6 +103,51 @@ static inline void dma_unmap_single(struct device *dev, dma_addr_t addr,
98 debug_dma_unmap_page(dev, addr, size, dir, true); 103 debug_dma_unmap_page(dev, addr, size, dir, true);
99} 104}
100 105
106static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
107 int nents, enum dma_data_direction dir)
108{
109 int i, ents;
110 struct scatterlist *s;
111
112 for_each_sg(sg, s, nents, i)
113 kmemcheck_mark_initialized(sg_virt(s), s->length);
114 BUG_ON(!valid_dma_direction(dir));
115 ents = or1k_map_sg(dev, sg, nents, dir, NULL);
116 debug_dma_map_sg(dev, sg, nents, ents, dir);
117
118 return ents;
119}
120
121static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
122 int nents, enum dma_data_direction dir)
123{
124 BUG_ON(!valid_dma_direction(dir));
125 debug_dma_unmap_sg(dev, sg, nents, dir);
126 or1k_unmap_sg(dev, sg, nents, dir, NULL);
127}
128
129static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
130 size_t offset, size_t size,
131 enum dma_data_direction dir)
132{
133 dma_addr_t addr;
134
135 kmemcheck_mark_initialized(page_address(page) + offset, size);
136 BUG_ON(!valid_dma_direction(dir));
137 addr = or1k_map_page(dev, page, offset, size, dir, NULL);
138 debug_dma_map_page(dev, page, offset, size, dir, addr, false);
139
140 return addr;
141}
142
143static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
144 size_t size, enum dma_data_direction dir)
145{
146 BUG_ON(!valid_dma_direction(dir));
147 or1k_unmap_page(dev, addr, size, dir, NULL);
148 debug_dma_unmap_page(dev, addr, size, dir, true);
149}
150
101static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, 151static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
102 size_t size, 152 size_t size,
103 enum dma_data_direction dir) 153 enum dma_data_direction dir)
@@ -119,7 +169,12 @@ static inline void dma_sync_single_for_device(struct device *dev,
119static inline int dma_supported(struct device *dev, u64 dma_mask) 169static inline int dma_supported(struct device *dev, u64 dma_mask)
120{ 170{
121 /* Support 32 bit DMA mask exclusively */ 171 /* Support 32 bit DMA mask exclusively */
122 return dma_mask == 0xffffffffULL; 172 return dma_mask == DMA_BIT_MASK(32);
173}
174
175static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
176{
177 return 0;
123} 178}
124 179
125static inline int dma_set_mask(struct device *dev, u64 dma_mask) 180static inline int dma_set_mask(struct device *dev, u64 dma_mask)
diff --git a/arch/openrisc/include/asm/sigcontext.h b/arch/openrisc/include/asm/sigcontext.h
index 54a5c50132e3..b79c2b19afbe 100644
--- a/arch/openrisc/include/asm/sigcontext.h
+++ b/arch/openrisc/include/asm/sigcontext.h
@@ -23,16 +23,11 @@
23 23
24/* This struct is saved by setup_frame in signal.c, to keep the current 24/* This struct is saved by setup_frame in signal.c, to keep the current
25 context while a signal handler is executed. It's restored by sys_sigreturn. 25 context while a signal handler is executed. It's restored by sys_sigreturn.
26
27 To keep things simple, we use pt_regs here even though normally you just
28 specify the list of regs to save. Then we can use copy_from_user on the
29 entire regs instead of a bunch of get_user's as well...
30*/ 26*/
31 27
32struct sigcontext { 28struct sigcontext {
33 struct pt_regs regs; /* needs to be first */ 29 struct user_regs_struct regs; /* needs to be first */
34 unsigned long oldmask; 30 unsigned long oldmask;
35 unsigned long usp; /* usp before stacking this gunk on it */
36}; 31};
37 32
38#endif /* __ASM_OPENRISC_SIGCONTEXT_H */ 33#endif /* __ASM_OPENRISC_SIGCONTEXT_H */
diff --git a/arch/openrisc/kernel/dma.c b/arch/openrisc/kernel/dma.c
index 968d3ee477e3..f1c8ee2895d0 100644
--- a/arch/openrisc/kernel/dma.c
+++ b/arch/openrisc/kernel/dma.c
@@ -154,6 +154,33 @@ void or1k_unmap_page(struct device *dev, dma_addr_t dma_handle,
154 /* Nothing special to do here... */ 154 /* Nothing special to do here... */
155} 155}
156 156
157int or1k_map_sg(struct device *dev, struct scatterlist *sg,
158 int nents, enum dma_data_direction dir,
159 struct dma_attrs *attrs)
160{
161 struct scatterlist *s;
162 int i;
163
164 for_each_sg(sg, s, nents, i) {
165 s->dma_address = or1k_map_page(dev, sg_page(s), s->offset,
166 s->length, dir, NULL);
167 }
168
169 return nents;
170}
171
172void or1k_unmap_sg(struct device *dev, struct scatterlist *sg,
173 int nents, enum dma_data_direction dir,
174 struct dma_attrs *attrs)
175{
176 struct scatterlist *s;
177 int i;
178
179 for_each_sg(sg, s, nents, i) {
180 or1k_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, NULL);
181 }
182}
183
157void or1k_sync_single_for_cpu(struct device *dev, 184void or1k_sync_single_for_cpu(struct device *dev,
158 dma_addr_t dma_handle, size_t size, 185 dma_addr_t dma_handle, size_t size,
159 enum dma_data_direction dir) 186 enum dma_data_direction dir)
@@ -187,5 +214,4 @@ static int __init dma_init(void)
187 214
188 return 0; 215 return 0;
189} 216}
190
191fs_initcall(dma_init); 217fs_initcall(dma_init);
diff --git a/arch/openrisc/kernel/signal.c b/arch/openrisc/kernel/signal.c
index 5f759c76834e..95207ab0c99e 100644
--- a/arch/openrisc/kernel/signal.c
+++ b/arch/openrisc/kernel/signal.c
@@ -52,31 +52,25 @@ struct rt_sigframe {
52static int restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc) 52static int restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc)
53{ 53{
54 unsigned int err = 0; 54 unsigned int err = 0;
55 unsigned long old_usp;
56 55
57 /* Alwys make any pending restarted system call return -EINTR */ 56 /* Alwys make any pending restarted system call return -EINTR */
58 current_thread_info()->restart_block.fn = do_no_restart_syscall; 57 current_thread_info()->restart_block.fn = do_no_restart_syscall;
59 58
60 /* restore the regs from &sc->regs (same as sc, since regs is first) 59 /*
60 * Restore the regs from &sc->regs.
61 * (sc is already checked for VERIFY_READ since the sigframe was 61 * (sc is already checked for VERIFY_READ since the sigframe was
62 * checked in sys_sigreturn previously) 62 * checked in sys_sigreturn previously)
63 */ 63 */
64 64 if (__copy_from_user(regs, sc->regs.gpr, 32 * sizeof(unsigned long)))
65 if (__copy_from_user(regs, sc, sizeof(struct pt_regs))) 65 goto badframe;
66 if (__copy_from_user(&regs->pc, &sc->regs.pc, sizeof(unsigned long)))
67 goto badframe;
68 if (__copy_from_user(&regs->sr, &sc->regs.sr, sizeof(unsigned long)))
66 goto badframe; 69 goto badframe;
67 70
68 /* make sure the SM-bit is cleared so user-mode cannot fool us */ 71 /* make sure the SM-bit is cleared so user-mode cannot fool us */
69 regs->sr &= ~SPR_SR_SM; 72 regs->sr &= ~SPR_SR_SM;
70 73
71 /* restore the old USP as it was before we stacked the sc etc.
72 * (we cannot just pop the sigcontext since we aligned the sp and
73 * stuff after pushing it)
74 */
75
76 err |= __get_user(old_usp, &sc->usp);
77
78 regs->sp = old_usp;
79
80 /* TODO: the other ports use regs->orig_XX to disable syscall checks 74 /* TODO: the other ports use regs->orig_XX to disable syscall checks
81 * after this completes, but we don't use that mechanism. maybe we can 75 * after this completes, but we don't use that mechanism. maybe we can
82 * use it now ? 76 * use it now ?
@@ -137,18 +131,17 @@ static int setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
137 unsigned long mask) 131 unsigned long mask)
138{ 132{
139 int err = 0; 133 int err = 0;
140 unsigned long usp = regs->sp;
141 134
142 /* copy the regs. they are first in sc so we can use sc directly */ 135 /* copy the regs */
143 136
144 err |= __copy_to_user(sc, regs, sizeof(struct pt_regs)); 137 err |= __copy_to_user(sc->regs.gpr, regs, 32 * sizeof(unsigned long));
138 err |= __copy_to_user(&sc->regs.pc, &regs->pc, sizeof(unsigned long));
139 err |= __copy_to_user(&sc->regs.sr, &regs->sr, sizeof(unsigned long));
145 140
146 /* then some other stuff */ 141 /* then some other stuff */
147 142
148 err |= __put_user(mask, &sc->oldmask); 143 err |= __put_user(mask, &sc->oldmask);
149 144
150 err |= __put_user(usp, &sc->usp);
151
152 return err; 145 return err;
153} 146}
154 147
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index e66366fd2abc..3735abd7f8f6 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -259,7 +259,7 @@
259 ENTRY_SAME(ni_syscall) /* query_module */ 259 ENTRY_SAME(ni_syscall) /* query_module */
260 ENTRY_SAME(poll) 260 ENTRY_SAME(poll)
261 /* structs contain pointers and an in_addr... */ 261 /* structs contain pointers and an in_addr... */
262 ENTRY_COMP(nfsservctl) 262 ENTRY_SAME(ni_syscall) /* was nfsservctl */
263 ENTRY_SAME(setresgid) /* 170 */ 263 ENTRY_SAME(setresgid) /* 170 */
264 ENTRY_SAME(getresgid) 264 ENTRY_SAME(getresgid)
265 ENTRY_SAME(prctl) 265 ENTRY_SAME(prctl)
diff --git a/arch/powerpc/boot/dts/p1023rds.dts b/arch/powerpc/boot/dts/p1023rds.dts
index bfa96aa8f2ca..d9b776740a67 100644
--- a/arch/powerpc/boot/dts/p1023rds.dts
+++ b/arch/powerpc/boot/dts/p1023rds.dts
@@ -387,7 +387,7 @@
387 #size-cells = <1>; 387 #size-cells = <1>;
388 compatible = "cfi-flash"; 388 compatible = "cfi-flash";
389 reg = <0x0 0x0 0x02000000>; 389 reg = <0x0 0x0 0x02000000>;
390 bank-width = <1>; 390 bank-width = <2>;
391 device-width = <1>; 391 device-width = <1>;
392 partition@0 { 392 partition@0 {
393 label = "ramdisk"; 393 label = "ramdisk";
diff --git a/arch/powerpc/configs/85xx/p1023rds_defconfig b/arch/powerpc/configs/85xx/p1023rds_defconfig
index 980ff8f61fd4..3ff5a81c709f 100644
--- a/arch/powerpc/configs/85xx/p1023rds_defconfig
+++ b/arch/powerpc/configs/85xx/p1023rds_defconfig
@@ -171,3 +171,4 @@ CONFIG_CRYPTO_SHA256=y
171CONFIG_CRYPTO_SHA512=y 171CONFIG_CRYPTO_SHA512=y
172CONFIG_CRYPTO_AES=y 172CONFIG_CRYPTO_AES=y
173# CONFIG_CRYPTO_ANSI_CPRNG is not set 173# CONFIG_CRYPTO_ANSI_CPRNG is not set
174CONFIG_CRYPTO_DEV_FSL_CAAM=y
diff --git a/arch/powerpc/configs/corenet32_smp_defconfig b/arch/powerpc/configs/corenet32_smp_defconfig
index 10562a5c65b9..4311d02a3bfd 100644
--- a/arch/powerpc/configs/corenet32_smp_defconfig
+++ b/arch/powerpc/configs/corenet32_smp_defconfig
@@ -185,3 +185,4 @@ CONFIG_CRYPTO_SHA256=y
185CONFIG_CRYPTO_SHA512=y 185CONFIG_CRYPTO_SHA512=y
186CONFIG_CRYPTO_AES=y 186CONFIG_CRYPTO_AES=y
187# CONFIG_CRYPTO_ANSI_CPRNG is not set 187# CONFIG_CRYPTO_ANSI_CPRNG is not set
188CONFIG_CRYPTO_DEV_FSL_CAAM=y
diff --git a/arch/powerpc/configs/corenet64_smp_defconfig b/arch/powerpc/configs/corenet64_smp_defconfig
index d32283555b53..c92c204a204b 100644
--- a/arch/powerpc/configs/corenet64_smp_defconfig
+++ b/arch/powerpc/configs/corenet64_smp_defconfig
@@ -100,5 +100,8 @@ CONFIG_DEBUG_INFO=y
100CONFIG_SYSCTL_SYSCALL_CHECK=y 100CONFIG_SYSCTL_SYSCALL_CHECK=y
101CONFIG_VIRQ_DEBUG=y 101CONFIG_VIRQ_DEBUG=y
102CONFIG_CRYPTO_PCBC=m 102CONFIG_CRYPTO_PCBC=m
103CONFIG_CRYPTO_SHA256=y
104CONFIG_CRYPTO_SHA512=y
105CONFIG_CRYPTO_AES=y
103# CONFIG_CRYPTO_ANSI_CPRNG is not set 106# CONFIG_CRYPTO_ANSI_CPRNG is not set
104CONFIG_CRYPTO_DEV_TALITOS=y 107CONFIG_CRYPTO_DEV_FSL_CAAM=y
diff --git a/arch/powerpc/configs/mpc85xx_defconfig b/arch/powerpc/configs/mpc85xx_defconfig
index fcd85d2c72dc..a3467bfb7671 100644
--- a/arch/powerpc/configs/mpc85xx_defconfig
+++ b/arch/powerpc/configs/mpc85xx_defconfig
@@ -139,6 +139,7 @@ CONFIG_SND=y
139CONFIG_SND_INTEL8X0=y 139CONFIG_SND_INTEL8X0=y
140# CONFIG_SND_PPC is not set 140# CONFIG_SND_PPC is not set
141# CONFIG_SND_USB is not set 141# CONFIG_SND_USB is not set
142CONFIG_SND_SOC=y
142CONFIG_HID_A4TECH=y 143CONFIG_HID_A4TECH=y
143CONFIG_HID_APPLE=y 144CONFIG_HID_APPLE=y
144CONFIG_HID_BELKIN=y 145CONFIG_HID_BELKIN=y
diff --git a/arch/powerpc/configs/mpc85xx_smp_defconfig b/arch/powerpc/configs/mpc85xx_smp_defconfig
index 908c941fc24c..9693f6ed3da0 100644
--- a/arch/powerpc/configs/mpc85xx_smp_defconfig
+++ b/arch/powerpc/configs/mpc85xx_smp_defconfig
@@ -140,6 +140,7 @@ CONFIG_SND=y
140CONFIG_SND_INTEL8X0=y 140CONFIG_SND_INTEL8X0=y
141# CONFIG_SND_PPC is not set 141# CONFIG_SND_PPC is not set
142# CONFIG_SND_USB is not set 142# CONFIG_SND_USB is not set
143CONFIG_SND_SOC=y
143CONFIG_HID_A4TECH=y 144CONFIG_HID_A4TECH=y
144CONFIG_HID_APPLE=y 145CONFIG_HID_APPLE=y
145CONFIG_HID_BELKIN=y 146CONFIG_HID_BELKIN=y
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index f6736b7da463..fa0d27a400de 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -171,7 +171,7 @@ SYSCALL_SPU(setresuid)
171SYSCALL_SPU(getresuid) 171SYSCALL_SPU(getresuid)
172SYSCALL(ni_syscall) 172SYSCALL(ni_syscall)
173SYSCALL_SPU(poll) 173SYSCALL_SPU(poll)
174COMPAT_SYS(nfsservctl) 174SYSCALL(ni_syscall)
175SYSCALL_SPU(setresgid) 175SYSCALL_SPU(setresgid)
176SYSCALL_SPU(getresgid) 176SYSCALL_SPU(getresgid)
177COMPAT_SYS_SPU(prctl) 177COMPAT_SYS_SPU(prctl)
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c
index 2de8551df40f..c65f75aa7ff7 100644
--- a/arch/powerpc/sysdev/fsl_rio.c
+++ b/arch/powerpc/sysdev/fsl_rio.c
@@ -54,6 +54,7 @@
54#define ODSR_CLEAR 0x1c00 54#define ODSR_CLEAR 0x1c00
55#define LTLEECSR_ENABLE_ALL 0xFFC000FC 55#define LTLEECSR_ENABLE_ALL 0xFFC000FC
56#define ESCSR_CLEAR 0x07120204 56#define ESCSR_CLEAR 0x07120204
57#define IECSR_CLEAR 0x80000000
57 58
58#define RIO_PORT1_EDCSR 0x0640 59#define RIO_PORT1_EDCSR 0x0640
59#define RIO_PORT2_EDCSR 0x0680 60#define RIO_PORT2_EDCSR 0x0680
@@ -1089,11 +1090,11 @@ static void port_error_handler(struct rio_mport *port, int offset)
1089 1090
1090 if (offset == 0) { 1091 if (offset == 0) {
1091 out_be32((u32 *)(rio_regs_win + RIO_PORT1_EDCSR), 0); 1092 out_be32((u32 *)(rio_regs_win + RIO_PORT1_EDCSR), 0);
1092 out_be32((u32 *)(rio_regs_win + RIO_PORT1_IECSR), 0); 1093 out_be32((u32 *)(rio_regs_win + RIO_PORT1_IECSR), IECSR_CLEAR);
1093 out_be32((u32 *)(rio_regs_win + RIO_ESCSR), ESCSR_CLEAR); 1094 out_be32((u32 *)(rio_regs_win + RIO_ESCSR), ESCSR_CLEAR);
1094 } else { 1095 } else {
1095 out_be32((u32 *)(rio_regs_win + RIO_PORT2_EDCSR), 0); 1096 out_be32((u32 *)(rio_regs_win + RIO_PORT2_EDCSR), 0);
1096 out_be32((u32 *)(rio_regs_win + RIO_PORT2_IECSR), 0); 1097 out_be32((u32 *)(rio_regs_win + RIO_PORT2_IECSR), IECSR_CLEAR);
1097 out_be32((u32 *)(rio_regs_win + RIO_PORT2_ESCSR), ESCSR_CLEAR); 1098 out_be32((u32 *)(rio_regs_win + RIO_PORT2_ESCSR), ESCSR_CLEAR);
1098 } 1099 }
1099} 1100}
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
index 08ab9aa6a0d5..7526db6bf501 100644
--- a/arch/s390/kernel/compat_wrapper.S
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -665,12 +665,6 @@ ENTRY(sys32_poll_wrapper)
665 lgfr %r4,%r4 # long 665 lgfr %r4,%r4 # long
666 jg sys_poll # branch to system call 666 jg sys_poll # branch to system call
667 667
668ENTRY(compat_sys_nfsservctl_wrapper)
669 lgfr %r2,%r2 # int
670 llgtr %r3,%r3 # struct compat_nfsctl_arg*
671 llgtr %r4,%r4 # union compat_nfsctl_res*
672 jg compat_sys_nfsservctl # branch to system call
673
674ENTRY(sys32_setresgid16_wrapper) 668ENTRY(sys32_setresgid16_wrapper)
675 llgfr %r2,%r2 # __kernel_old_gid_emu31_t 669 llgfr %r2,%r2 # __kernel_old_gid_emu31_t
676 llgfr %r3,%r3 # __kernel_old_gid_emu31_t 670 llgfr %r3,%r3 # __kernel_old_gid_emu31_t
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 068f8465c4ee..f297456dba7a 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -396,17 +396,19 @@ static __init void detect_machine_facilities(void)
396static __init void rescue_initrd(void) 396static __init void rescue_initrd(void)
397{ 397{
398#ifdef CONFIG_BLK_DEV_INITRD 398#ifdef CONFIG_BLK_DEV_INITRD
399 unsigned long min_initrd_addr = (unsigned long) _end + (4UL << 20);
399 /* 400 /*
400 * Move the initrd right behind the bss section in case it starts 401 * Just like in case of IPL from VM reader we make sure there is a
401 * within the bss section. So we don't overwrite it when the bss 402 * gap of 4MB between end of kernel and start of initrd.
402 * section gets cleared. 403 * That way we can also be sure that saving an NSS will succeed,
404 * which however only requires different segments.
403 */ 405 */
404 if (!INITRD_START || !INITRD_SIZE) 406 if (!INITRD_START || !INITRD_SIZE)
405 return; 407 return;
406 if (INITRD_START >= (unsigned long) __bss_stop) 408 if (INITRD_START >= min_initrd_addr)
407 return; 409 return;
408 memmove(__bss_stop, (void *) INITRD_START, INITRD_SIZE); 410 memmove((void *) min_initrd_addr, (void *) INITRD_START, INITRD_SIZE);
409 INITRD_START = (unsigned long) __bss_stop; 411 INITRD_START = min_initrd_addr;
410#endif 412#endif
411} 413}
412 414
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 04361d5a4279..48c710206366 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -1220,7 +1220,7 @@ static int __init reipl_fcp_init(void)
1220 /* sysfs: create fcp kset for mixing attr group and bin attrs */ 1220 /* sysfs: create fcp kset for mixing attr group and bin attrs */
1221 reipl_fcp_kset = kset_create_and_add(IPL_FCP_STR, NULL, 1221 reipl_fcp_kset = kset_create_and_add(IPL_FCP_STR, NULL,
1222 &reipl_kset->kobj); 1222 &reipl_kset->kobj);
1223 if (!reipl_kset) { 1223 if (!reipl_fcp_kset) {
1224 free_page((unsigned long) reipl_block_fcp); 1224 free_page((unsigned long) reipl_block_fcp);
1225 return -ENOMEM; 1225 return -ENOMEM;
1226 } 1226 }
@@ -1618,7 +1618,8 @@ static struct shutdown_action vmcmd_action = {SHUTDOWN_ACTION_VMCMD_STR,
1618 1618
1619static void stop_run(struct shutdown_trigger *trigger) 1619static void stop_run(struct shutdown_trigger *trigger)
1620{ 1620{
1621 if (strcmp(trigger->name, ON_PANIC_STR) == 0) 1621 if (strcmp(trigger->name, ON_PANIC_STR) == 0 ||
1622 strcmp(trigger->name, ON_RESTART_STR) == 0)
1622 disabled_wait((unsigned long) __builtin_return_address(0)); 1623 disabled_wait((unsigned long) __builtin_return_address(0));
1623 while (sigp(smp_processor_id(), sigp_stop) == sigp_busy) 1624 while (sigp(smp_processor_id(), sigp_stop) == sigp_busy)
1624 cpu_relax(); 1625 cpu_relax();
@@ -1717,7 +1718,7 @@ static void do_panic(void)
1717/* on restart */ 1718/* on restart */
1718 1719
1719static struct shutdown_trigger on_restart_trigger = {ON_RESTART_STR, 1720static struct shutdown_trigger on_restart_trigger = {ON_RESTART_STR,
1720 &reipl_action}; 1721 &stop_action};
1721 1722
1722static ssize_t on_restart_show(struct kobject *kobj, 1723static ssize_t on_restart_show(struct kobject *kobj,
1723 struct kobj_attribute *attr, char *page) 1724 struct kobj_attribute *attr, char *page)
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index 6ee39ef8fe4a..73eb08c874fb 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -177,7 +177,7 @@ SYSCALL(sys_getresuid16,sys_ni_syscall,sys32_getresuid16_wrapper) /* 165 old get
177NI_SYSCALL /* for vm86 */ 177NI_SYSCALL /* for vm86 */
178NI_SYSCALL /* old sys_query_module */ 178NI_SYSCALL /* old sys_query_module */
179SYSCALL(sys_poll,sys_poll,sys32_poll_wrapper) 179SYSCALL(sys_poll,sys_poll,sys32_poll_wrapper)
180SYSCALL(sys_nfsservctl,sys_nfsservctl,compat_sys_nfsservctl_wrapper) 180NI_SYSCALL /* old nfsservctl */
181SYSCALL(sys_setresgid16,sys_ni_syscall,sys32_setresgid16_wrapper) /* 170 old setresgid16 syscall */ 181SYSCALL(sys_setresgid16,sys_ni_syscall,sys32_setresgid16_wrapper) /* 170 old setresgid16 syscall */
182SYSCALL(sys_getresgid16,sys_ni_syscall,sys32_getresgid16_wrapper) /* old getresgid16 syscall */ 182SYSCALL(sys_getresgid16,sys_ni_syscall,sys32_getresgid16_wrapper) /* old getresgid16 syscall */
183SYSCALL(sys_prctl,sys_prctl,sys32_prctl_wrapper) 183SYSCALL(sys_prctl,sys_prctl,sys32_prctl_wrapper)
diff --git a/arch/sh/include/asm/ptrace.h b/arch/sh/include/asm/ptrace.h
index b97baf81a87b..2d3679b2447f 100644
--- a/arch/sh/include/asm/ptrace.h
+++ b/arch/sh/include/asm/ptrace.h
@@ -123,7 +123,7 @@ static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
123struct perf_event; 123struct perf_event;
124struct perf_sample_data; 124struct perf_sample_data;
125 125
126extern void ptrace_triggered(struct perf_event *bp, int nmi, 126extern void ptrace_triggered(struct perf_event *bp,
127 struct perf_sample_data *data, struct pt_regs *regs); 127 struct perf_sample_data *data, struct pt_regs *regs);
128 128
129#define task_pt_regs(task) \ 129#define task_pt_regs(task) \
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7757.c b/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
index e915deafac89..05559295d2ca 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
@@ -15,6 +15,7 @@
15#include <linux/serial_sci.h> 15#include <linux/serial_sci.h>
16#include <linux/io.h> 16#include <linux/io.h>
17#include <linux/mm.h> 17#include <linux/mm.h>
18#include <linux/dma-mapping.h>
18#include <linux/sh_timer.h> 19#include <linux/sh_timer.h>
19#include <linux/sh_dma.h> 20#include <linux/sh_dma.h>
20 21
diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c
index 32114e0941ae..db4ecd731a00 100644
--- a/arch/sh/kernel/idle.c
+++ b/arch/sh/kernel/idle.c
@@ -22,7 +22,7 @@
22#include <linux/atomic.h> 22#include <linux/atomic.h>
23#include <asm/smp.h> 23#include <asm/smp.h>
24 24
25static void (*pm_idle)(void); 25void (*pm_idle)(void);
26 26
27static int hlt_counter; 27static int hlt_counter;
28 28
diff --git a/arch/sh/kernel/syscalls_32.S b/arch/sh/kernel/syscalls_32.S
index 39b051de4c7c..293e39c59c00 100644
--- a/arch/sh/kernel/syscalls_32.S
+++ b/arch/sh/kernel/syscalls_32.S
@@ -185,7 +185,7 @@ ENTRY(sys_call_table)
185 .long sys_ni_syscall /* vm86 */ 185 .long sys_ni_syscall /* vm86 */
186 .long sys_ni_syscall /* old "query_module" */ 186 .long sys_ni_syscall /* old "query_module" */
187 .long sys_poll 187 .long sys_poll
188 .long sys_nfsservctl 188 .long sys_ni_syscall /* was nfsservctl */
189 .long sys_setresgid16 /* 170 */ 189 .long sys_setresgid16 /* 170 */
190 .long sys_getresgid16 190 .long sys_getresgid16
191 .long sys_prctl 191 .long sys_prctl
diff --git a/arch/sh/kernel/syscalls_64.S b/arch/sh/kernel/syscalls_64.S
index 089c4d825d08..ceb34b94afa9 100644
--- a/arch/sh/kernel/syscalls_64.S
+++ b/arch/sh/kernel/syscalls_64.S
@@ -189,7 +189,7 @@ sys_call_table:
189 .long sys_ni_syscall /* vm86 */ 189 .long sys_ni_syscall /* vm86 */
190 .long sys_ni_syscall /* old "query_module" */ 190 .long sys_ni_syscall /* old "query_module" */
191 .long sys_poll 191 .long sys_poll
192 .long sys_nfsservctl 192 .long sys_ni_syscall /* was nfsservctl */
193 .long sys_setresgid16 /* 170 */ 193 .long sys_setresgid16 /* 170 */
194 .long sys_getresgid16 194 .long sys_getresgid16
195 .long sys_prctl 195 .long sys_prctl
diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c
index d9006f8ffc14..7bbef95c9d1b 100644
--- a/arch/sh/kernel/traps_32.c
+++ b/arch/sh/kernel/traps_32.c
@@ -316,6 +316,35 @@ static int handle_unaligned_ins(insn_size_t instruction, struct pt_regs *regs,
316 break; 316 break;
317 } 317 }
318 break; 318 break;
319
320 case 9: /* mov.w @(disp,PC),Rn */
321 srcu = (unsigned char __user *)regs->pc;
322 srcu += 4;
323 srcu += (instruction & 0x00FF) << 1;
324 dst = (unsigned char *)rn;
325 *(unsigned long *)dst = 0;
326
327#if !defined(__LITTLE_ENDIAN__)
328 dst += 2;
329#endif
330
331 if (ma->from(dst, srcu, 2))
332 goto fetch_fault;
333 sign_extend(2, dst);
334 ret = 0;
335 break;
336
337 case 0xd: /* mov.l @(disp,PC),Rn */
338 srcu = (unsigned char __user *)(regs->pc & ~0x3);
339 srcu += 4;
340 srcu += (instruction & 0x00FF) << 2;
341 dst = (unsigned char *)rn;
342 *(unsigned long *)dst = 0;
343
344 if (ma->from(dst, srcu, 4))
345 goto fetch_fault;
346 ret = 0;
347 break;
319 } 348 }
320 return ret; 349 return ret;
321 350
@@ -466,6 +495,7 @@ int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
466 case 0x0500: /* mov.w @(disp,Rm),R0 */ 495 case 0x0500: /* mov.w @(disp,Rm),R0 */
467 goto simple; 496 goto simple;
468 case 0x0B00: /* bf lab - no delayslot*/ 497 case 0x0B00: /* bf lab - no delayslot*/
498 ret = 0;
469 break; 499 break;
470 case 0x0F00: /* bf/s lab */ 500 case 0x0F00: /* bf/s lab */
471 ret = handle_delayslot(regs, instruction, ma); 501 ret = handle_delayslot(regs, instruction, ma);
@@ -479,6 +509,7 @@ int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
479 } 509 }
480 break; 510 break;
481 case 0x0900: /* bt lab - no delayslot */ 511 case 0x0900: /* bt lab - no delayslot */
512 ret = 0;
482 break; 513 break;
483 case 0x0D00: /* bt/s lab */ 514 case 0x0D00: /* bt/s lab */
484 ret = handle_delayslot(regs, instruction, ma); 515 ret = handle_delayslot(regs, instruction, ma);
@@ -494,6 +525,9 @@ int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
494 } 525 }
495 break; 526 break;
496 527
528 case 0x9000: /* mov.w @(disp,Rm),Rn */
529 goto simple;
530
497 case 0xA000: /* bra label */ 531 case 0xA000: /* bra label */
498 ret = handle_delayslot(regs, instruction, ma); 532 ret = handle_delayslot(regs, instruction, ma);
499 if (ret==0) 533 if (ret==0)
@@ -507,6 +541,9 @@ int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
507 regs->pc += SH_PC_12BIT_OFFSET(instruction); 541 regs->pc += SH_PC_12BIT_OFFSET(instruction);
508 } 542 }
509 break; 543 break;
544
545 case 0xD000: /* mov.l @(disp,Rm),Rn */
546 goto simple;
510 } 547 }
511 return ret; 548 return ret;
512 549
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 42c67beadcae..1a6f20d4e7e6 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -55,6 +55,7 @@ config SPARC64
55 select PERF_USE_VMALLOC 55 select PERF_USE_VMALLOC
56 select IRQ_PREFLOW_FASTEOI 56 select IRQ_PREFLOW_FASTEOI
57 select ARCH_HAVE_NMI_SAFE_CMPXCHG 57 select ARCH_HAVE_NMI_SAFE_CMPXCHG
58 select HAVE_C_RECORDMCOUNT
58 59
59config ARCH_DEFCONFIG 60config ARCH_DEFCONFIG
60 string 61 string
diff --git a/arch/sparc/include/asm/sigcontext.h b/arch/sparc/include/asm/sigcontext.h
index a1607d180354..69914d748130 100644
--- a/arch/sparc/include/asm/sigcontext.h
+++ b/arch/sparc/include/asm/sigcontext.h
@@ -45,6 +45,19 @@ typedef struct {
45 int si_mask; 45 int si_mask;
46} __siginfo32_t; 46} __siginfo32_t;
47 47
48#define __SIGC_MAXWIN 7
49
50typedef struct {
51 unsigned long locals[8];
52 unsigned long ins[8];
53} __siginfo_reg_window;
54
55typedef struct {
56 int wsaved;
57 __siginfo_reg_window reg_window[__SIGC_MAXWIN];
58 unsigned long rwbuf_stkptrs[__SIGC_MAXWIN];
59} __siginfo_rwin_t;
60
48#ifdef CONFIG_SPARC64 61#ifdef CONFIG_SPARC64
49typedef struct { 62typedef struct {
50 unsigned int si_float_regs [64]; 63 unsigned int si_float_regs [64];
@@ -73,6 +86,7 @@ struct sigcontext {
73 unsigned long ss_size; 86 unsigned long ss_size;
74 } sigc_stack; 87 } sigc_stack;
75 unsigned long sigc_mask; 88 unsigned long sigc_mask;
89 __siginfo_rwin_t * sigc_rwin_save;
76}; 90};
77 91
78#else 92#else
diff --git a/arch/sparc/include/asm/spinlock_32.h b/arch/sparc/include/asm/spinlock_32.h
index 5f5b8bf3f50d..bcc98fc35281 100644
--- a/arch/sparc/include/asm/spinlock_32.h
+++ b/arch/sparc/include/asm/spinlock_32.h
@@ -131,6 +131,15 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
131 *(volatile __u32 *)&lp->lock = ~0U; 131 *(volatile __u32 *)&lp->lock = ~0U;
132} 132}
133 133
134static void inline arch_write_unlock(arch_rwlock_t *lock)
135{
136 __asm__ __volatile__(
137" st %%g0, [%0]"
138 : /* no outputs */
139 : "r" (lock)
140 : "memory");
141}
142
134static inline int arch_write_trylock(arch_rwlock_t *rw) 143static inline int arch_write_trylock(arch_rwlock_t *rw)
135{ 144{
136 unsigned int val; 145 unsigned int val;
@@ -175,8 +184,6 @@ static inline int __arch_read_trylock(arch_rwlock_t *rw)
175 res; \ 184 res; \
176}) 185})
177 186
178#define arch_write_unlock(rw) do { (rw)->lock = 0; } while(0)
179
180#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) 187#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
181#define arch_read_lock_flags(rw, flags) arch_read_lock(rw) 188#define arch_read_lock_flags(rw, flags) arch_read_lock(rw)
182#define arch_write_lock_flags(rw, flags) arch_write_lock(rw) 189#define arch_write_lock_flags(rw, flags) arch_write_lock(rw)
diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
index 073936a8b275..968917694978 100644
--- a/arch/sparc/include/asm/spinlock_64.h
+++ b/arch/sparc/include/asm/spinlock_64.h
@@ -210,14 +210,8 @@ static int inline arch_write_trylock(arch_rwlock_t *lock)
210 return result; 210 return result;
211} 211}
212 212
213#define arch_read_lock(p) arch_read_lock(p)
214#define arch_read_lock_flags(p, f) arch_read_lock(p) 213#define arch_read_lock_flags(p, f) arch_read_lock(p)
215#define arch_read_trylock(p) arch_read_trylock(p)
216#define arch_read_unlock(p) arch_read_unlock(p)
217#define arch_write_lock(p) arch_write_lock(p)
218#define arch_write_lock_flags(p, f) arch_write_lock(p) 214#define arch_write_lock_flags(p, f) arch_write_lock(p)
219#define arch_write_unlock(p) arch_write_unlock(p)
220#define arch_write_trylock(p) arch_write_trylock(p)
221 215
222#define arch_read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) 216#define arch_read_can_lock(rw) (!((rw)->lock & 0x80000000UL))
223#define arch_write_can_lock(rw) (!(rw)->lock) 217#define arch_write_can_lock(rw) (!(rw)->lock)
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index b90b4a1d070a..cb85458f89d2 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -32,6 +32,7 @@ obj-$(CONFIG_SPARC32) += sun4m_irq.o sun4c_irq.o sun4d_irq.o
32 32
33obj-y += process_$(BITS).o 33obj-y += process_$(BITS).o
34obj-y += signal_$(BITS).o 34obj-y += signal_$(BITS).o
35obj-y += sigutil_$(BITS).o
35obj-$(CONFIG_SPARC32) += ioport.o 36obj-$(CONFIG_SPARC32) += ioport.o
36obj-y += setup_$(BITS).o 37obj-y += setup_$(BITS).o
37obj-y += idprom.o 38obj-y += idprom.o
diff --git a/arch/sparc/kernel/irq.h b/arch/sparc/kernel/irq.h
index 100b9c204e78..42851122bbd9 100644
--- a/arch/sparc/kernel/irq.h
+++ b/arch/sparc/kernel/irq.h
@@ -88,7 +88,7 @@ BTFIXUPDEF_CALL(void, set_irq_udt, int)
88#define set_irq_udt(cpu) BTFIXUP_CALL(set_irq_udt)(cpu) 88#define set_irq_udt(cpu) BTFIXUP_CALL(set_irq_udt)(cpu)
89 89
90/* All SUN4D IPIs are sent on this IRQ, may be shared with hard IRQs */ 90/* All SUN4D IPIs are sent on this IRQ, may be shared with hard IRQs */
91#define SUN4D_IPI_IRQ 14 91#define SUN4D_IPI_IRQ 13
92 92
93extern void sun4d_ipi_interrupt(void); 93extern void sun4d_ipi_interrupt(void);
94 94
diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c
index a19f04195478..1aaf8c180be5 100644
--- a/arch/sparc/kernel/pcic.c
+++ b/arch/sparc/kernel/pcic.c
@@ -352,8 +352,8 @@ int __init pcic_probe(void)
352 strcpy(pbm->prom_name, namebuf); 352 strcpy(pbm->prom_name, namebuf);
353 353
354 { 354 {
355 extern volatile int t_nmi[1]; 355 extern volatile int t_nmi[4];
356 extern int pcic_nmi_trap_patch[1]; 356 extern int pcic_nmi_trap_patch[4];
357 357
358 t_nmi[0] = pcic_nmi_trap_patch[0]; 358 t_nmi[0] = pcic_nmi_trap_patch[0];
359 t_nmi[1] = pcic_nmi_trap_patch[1]; 359 t_nmi[1] = pcic_nmi_trap_patch[1];
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
index 3e9daea1653d..3c5bb784214f 100644
--- a/arch/sparc/kernel/setup_64.c
+++ b/arch/sparc/kernel/setup_64.c
@@ -440,8 +440,14 @@ static void __init init_sparc64_elf_hwcap(void)
440 cap |= AV_SPARC_VIS; 440 cap |= AV_SPARC_VIS;
441 if (tlb_type == cheetah || tlb_type == cheetah_plus) 441 if (tlb_type == cheetah || tlb_type == cheetah_plus)
442 cap |= AV_SPARC_VIS | AV_SPARC_VIS2; 442 cap |= AV_SPARC_VIS | AV_SPARC_VIS2;
443 if (tlb_type == cheetah_plus) 443 if (tlb_type == cheetah_plus) {
444 cap |= AV_SPARC_POPC; 444 unsigned long impl, ver;
445
446 __asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver));
447 impl = ((ver >> 32) & 0xffff);
448 if (impl == PANTHER_IMPL)
449 cap |= AV_SPARC_POPC;
450 }
445 if (tlb_type == hypervisor) { 451 if (tlb_type == hypervisor) {
446 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1) 452 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1)
447 cap |= AV_SPARC_ASI_BLK_INIT; 453 cap |= AV_SPARC_ASI_BLK_INIT;
diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c
index 75fad425e249..1ba95aff5d59 100644
--- a/arch/sparc/kernel/signal32.c
+++ b/arch/sparc/kernel/signal32.c
@@ -29,6 +29,8 @@
29#include <asm/visasm.h> 29#include <asm/visasm.h>
30#include <asm/compat_signal.h> 30#include <asm/compat_signal.h>
31 31
32#include "sigutil.h"
33
32#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) 34#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
33 35
34/* This magic should be in g_upper[0] for all upper parts 36/* This magic should be in g_upper[0] for all upper parts
@@ -44,14 +46,14 @@ typedef struct {
44struct signal_frame32 { 46struct signal_frame32 {
45 struct sparc_stackf32 ss; 47 struct sparc_stackf32 ss;
46 __siginfo32_t info; 48 __siginfo32_t info;
47 /* __siginfo_fpu32_t * */ u32 fpu_save; 49 /* __siginfo_fpu_t * */ u32 fpu_save;
48 unsigned int insns[2]; 50 unsigned int insns[2];
49 unsigned int extramask[_COMPAT_NSIG_WORDS - 1]; 51 unsigned int extramask[_COMPAT_NSIG_WORDS - 1];
50 unsigned int extra_size; /* Should be sizeof(siginfo_extra_v8plus_t) */ 52 unsigned int extra_size; /* Should be sizeof(siginfo_extra_v8plus_t) */
51 /* Only valid if (info.si_regs.psr & (PSR_VERS|PSR_IMPL)) == PSR_V8PLUS */ 53 /* Only valid if (info.si_regs.psr & (PSR_VERS|PSR_IMPL)) == PSR_V8PLUS */
52 siginfo_extra_v8plus_t v8plus; 54 siginfo_extra_v8plus_t v8plus;
53 __siginfo_fpu_t fpu_state; 55 /* __siginfo_rwin_t * */u32 rwin_save;
54}; 56} __attribute__((aligned(8)));
55 57
56typedef struct compat_siginfo{ 58typedef struct compat_siginfo{
57 int si_signo; 59 int si_signo;
@@ -110,18 +112,14 @@ struct rt_signal_frame32 {
110 compat_siginfo_t info; 112 compat_siginfo_t info;
111 struct pt_regs32 regs; 113 struct pt_regs32 regs;
112 compat_sigset_t mask; 114 compat_sigset_t mask;
113 /* __siginfo_fpu32_t * */ u32 fpu_save; 115 /* __siginfo_fpu_t * */ u32 fpu_save;
114 unsigned int insns[2]; 116 unsigned int insns[2];
115 stack_t32 stack; 117 stack_t32 stack;
116 unsigned int extra_size; /* Should be sizeof(siginfo_extra_v8plus_t) */ 118 unsigned int extra_size; /* Should be sizeof(siginfo_extra_v8plus_t) */
117 /* Only valid if (regs.psr & (PSR_VERS|PSR_IMPL)) == PSR_V8PLUS */ 119 /* Only valid if (regs.psr & (PSR_VERS|PSR_IMPL)) == PSR_V8PLUS */
118 siginfo_extra_v8plus_t v8plus; 120 siginfo_extra_v8plus_t v8plus;
119 __siginfo_fpu_t fpu_state; 121 /* __siginfo_rwin_t * */u32 rwin_save;
120}; 122} __attribute__((aligned(8)));
121
122/* Align macros */
123#define SF_ALIGNEDSZ (((sizeof(struct signal_frame32) + 15) & (~15)))
124#define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame32) + 15) & (~15)))
125 123
126int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from) 124int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
127{ 125{
@@ -192,30 +190,13 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
192 return 0; 190 return 0;
193} 191}
194 192
195static int restore_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
196{
197 unsigned long *fpregs = current_thread_info()->fpregs;
198 unsigned long fprs;
199 int err;
200
201 err = __get_user(fprs, &fpu->si_fprs);
202 fprs_write(0);
203 regs->tstate &= ~TSTATE_PEF;
204 if (fprs & FPRS_DL)
205 err |= copy_from_user(fpregs, &fpu->si_float_regs[0], (sizeof(unsigned int) * 32));
206 if (fprs & FPRS_DU)
207 err |= copy_from_user(fpregs+16, &fpu->si_float_regs[32], (sizeof(unsigned int) * 32));
208 err |= __get_user(current_thread_info()->xfsr[0], &fpu->si_fsr);
209 err |= __get_user(current_thread_info()->gsr[0], &fpu->si_gsr);
210 current_thread_info()->fpsaved[0] |= fprs;
211 return err;
212}
213
214void do_sigreturn32(struct pt_regs *regs) 193void do_sigreturn32(struct pt_regs *regs)
215{ 194{
216 struct signal_frame32 __user *sf; 195 struct signal_frame32 __user *sf;
196 compat_uptr_t fpu_save;
197 compat_uptr_t rwin_save;
217 unsigned int psr; 198 unsigned int psr;
218 unsigned pc, npc, fpu_save; 199 unsigned pc, npc;
219 sigset_t set; 200 sigset_t set;
220 unsigned seta[_COMPAT_NSIG_WORDS]; 201 unsigned seta[_COMPAT_NSIG_WORDS];
221 int err, i; 202 int err, i;
@@ -273,8 +254,13 @@ void do_sigreturn32(struct pt_regs *regs)
273 pt_regs_clear_syscall(regs); 254 pt_regs_clear_syscall(regs);
274 255
275 err |= __get_user(fpu_save, &sf->fpu_save); 256 err |= __get_user(fpu_save, &sf->fpu_save);
276 if (fpu_save) 257 if (!err && fpu_save)
277 err |= restore_fpu_state32(regs, &sf->fpu_state); 258 err |= restore_fpu_state(regs, compat_ptr(fpu_save));
259 err |= __get_user(rwin_save, &sf->rwin_save);
260 if (!err && rwin_save) {
261 if (restore_rwin_state(compat_ptr(rwin_save)))
262 goto segv;
263 }
278 err |= __get_user(seta[0], &sf->info.si_mask); 264 err |= __get_user(seta[0], &sf->info.si_mask);
279 err |= copy_from_user(seta+1, &sf->extramask, 265 err |= copy_from_user(seta+1, &sf->extramask,
280 (_COMPAT_NSIG_WORDS - 1) * sizeof(unsigned int)); 266 (_COMPAT_NSIG_WORDS - 1) * sizeof(unsigned int));
@@ -300,7 +286,9 @@ segv:
300asmlinkage void do_rt_sigreturn32(struct pt_regs *regs) 286asmlinkage void do_rt_sigreturn32(struct pt_regs *regs)
301{ 287{
302 struct rt_signal_frame32 __user *sf; 288 struct rt_signal_frame32 __user *sf;
303 unsigned int psr, pc, npc, fpu_save, u_ss_sp; 289 unsigned int psr, pc, npc, u_ss_sp;
290 compat_uptr_t fpu_save;
291 compat_uptr_t rwin_save;
304 mm_segment_t old_fs; 292 mm_segment_t old_fs;
305 sigset_t set; 293 sigset_t set;
306 compat_sigset_t seta; 294 compat_sigset_t seta;
@@ -359,8 +347,8 @@ asmlinkage void do_rt_sigreturn32(struct pt_regs *regs)
359 pt_regs_clear_syscall(regs); 347 pt_regs_clear_syscall(regs);
360 348
361 err |= __get_user(fpu_save, &sf->fpu_save); 349 err |= __get_user(fpu_save, &sf->fpu_save);
362 if (fpu_save) 350 if (!err && fpu_save)
363 err |= restore_fpu_state32(regs, &sf->fpu_state); 351 err |= restore_fpu_state(regs, compat_ptr(fpu_save));
364 err |= copy_from_user(&seta, &sf->mask, sizeof(compat_sigset_t)); 352 err |= copy_from_user(&seta, &sf->mask, sizeof(compat_sigset_t));
365 err |= __get_user(u_ss_sp, &sf->stack.ss_sp); 353 err |= __get_user(u_ss_sp, &sf->stack.ss_sp);
366 st.ss_sp = compat_ptr(u_ss_sp); 354 st.ss_sp = compat_ptr(u_ss_sp);
@@ -376,6 +364,12 @@ asmlinkage void do_rt_sigreturn32(struct pt_regs *regs)
376 do_sigaltstack((stack_t __user *) &st, NULL, (unsigned long)sf); 364 do_sigaltstack((stack_t __user *) &st, NULL, (unsigned long)sf);
377 set_fs(old_fs); 365 set_fs(old_fs);
378 366
367 err |= __get_user(rwin_save, &sf->rwin_save);
368 if (!err && rwin_save) {
369 if (restore_rwin_state(compat_ptr(rwin_save)))
370 goto segv;
371 }
372
379 switch (_NSIG_WORDS) { 373 switch (_NSIG_WORDS) {
380 case 4: set.sig[3] = seta.sig[6] + (((long)seta.sig[7]) << 32); 374 case 4: set.sig[3] = seta.sig[6] + (((long)seta.sig[7]) << 32);
381 case 3: set.sig[2] = seta.sig[4] + (((long)seta.sig[5]) << 32); 375 case 3: set.sig[2] = seta.sig[4] + (((long)seta.sig[5]) << 32);
@@ -433,26 +427,6 @@ static void __user *get_sigframe(struct sigaction *sa, struct pt_regs *regs, uns
433 return (void __user *) sp; 427 return (void __user *) sp;
434} 428}
435 429
436static int save_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
437{
438 unsigned long *fpregs = current_thread_info()->fpregs;
439 unsigned long fprs;
440 int err = 0;
441
442 fprs = current_thread_info()->fpsaved[0];
443 if (fprs & FPRS_DL)
444 err |= copy_to_user(&fpu->si_float_regs[0], fpregs,
445 (sizeof(unsigned int) * 32));
446 if (fprs & FPRS_DU)
447 err |= copy_to_user(&fpu->si_float_regs[32], fpregs+16,
448 (sizeof(unsigned int) * 32));
449 err |= __put_user(current_thread_info()->xfsr[0], &fpu->si_fsr);
450 err |= __put_user(current_thread_info()->gsr[0], &fpu->si_gsr);
451 err |= __put_user(fprs, &fpu->si_fprs);
452
453 return err;
454}
455
456/* The I-cache flush instruction only works in the primary ASI, which 430/* The I-cache flush instruction only works in the primary ASI, which
457 * right now is the nucleus, aka. kernel space. 431 * right now is the nucleus, aka. kernel space.
458 * 432 *
@@ -515,18 +489,23 @@ static int setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
515 int signo, sigset_t *oldset) 489 int signo, sigset_t *oldset)
516{ 490{
517 struct signal_frame32 __user *sf; 491 struct signal_frame32 __user *sf;
492 int i, err, wsaved;
493 void __user *tail;
518 int sigframe_size; 494 int sigframe_size;
519 u32 psr; 495 u32 psr;
520 int i, err;
521 unsigned int seta[_COMPAT_NSIG_WORDS]; 496 unsigned int seta[_COMPAT_NSIG_WORDS];
522 497
523 /* 1. Make sure everything is clean */ 498 /* 1. Make sure everything is clean */
524 synchronize_user_stack(); 499 synchronize_user_stack();
525 save_and_clear_fpu(); 500 save_and_clear_fpu();
526 501
527 sigframe_size = SF_ALIGNEDSZ; 502 wsaved = get_thread_wsaved();
528 if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) 503
529 sigframe_size -= sizeof(__siginfo_fpu_t); 504 sigframe_size = sizeof(*sf);
505 if (current_thread_info()->fpsaved[0] & FPRS_FEF)
506 sigframe_size += sizeof(__siginfo_fpu_t);
507 if (wsaved)
508 sigframe_size += sizeof(__siginfo_rwin_t);
530 509
531 sf = (struct signal_frame32 __user *) 510 sf = (struct signal_frame32 __user *)
532 get_sigframe(&ka->sa, regs, sigframe_size); 511 get_sigframe(&ka->sa, regs, sigframe_size);
@@ -534,8 +513,7 @@ static int setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
534 if (invalid_frame_pointer(sf, sigframe_size)) 513 if (invalid_frame_pointer(sf, sigframe_size))
535 goto sigill; 514 goto sigill;
536 515
537 if (get_thread_wsaved() != 0) 516 tail = (sf + 1);
538 goto sigill;
539 517
540 /* 2. Save the current process state */ 518 /* 2. Save the current process state */
541 if (test_thread_flag(TIF_32BIT)) { 519 if (test_thread_flag(TIF_32BIT)) {
@@ -560,11 +538,22 @@ static int setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
560 &sf->v8plus.asi); 538 &sf->v8plus.asi);
561 539
562 if (psr & PSR_EF) { 540 if (psr & PSR_EF) {
563 err |= save_fpu_state32(regs, &sf->fpu_state); 541 __siginfo_fpu_t __user *fp = tail;
564 err |= __put_user((u64)&sf->fpu_state, &sf->fpu_save); 542 tail += sizeof(*fp);
543 err |= save_fpu_state(regs, fp);
544 err |= __put_user((u64)fp, &sf->fpu_save);
565 } else { 545 } else {
566 err |= __put_user(0, &sf->fpu_save); 546 err |= __put_user(0, &sf->fpu_save);
567 } 547 }
548 if (wsaved) {
549 __siginfo_rwin_t __user *rwp = tail;
550 tail += sizeof(*rwp);
551 err |= save_rwin_state(wsaved, rwp);
552 err |= __put_user((u64)rwp, &sf->rwin_save);
553 set_thread_wsaved(0);
554 } else {
555 err |= __put_user(0, &sf->rwin_save);
556 }
568 557
569 switch (_NSIG_WORDS) { 558 switch (_NSIG_WORDS) {
570 case 4: seta[7] = (oldset->sig[3] >> 32); 559 case 4: seta[7] = (oldset->sig[3] >> 32);
@@ -580,10 +569,21 @@ static int setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
580 err |= __copy_to_user(sf->extramask, seta + 1, 569 err |= __copy_to_user(sf->extramask, seta + 1,
581 (_COMPAT_NSIG_WORDS - 1) * sizeof(unsigned int)); 570 (_COMPAT_NSIG_WORDS - 1) * sizeof(unsigned int));
582 571
583 err |= copy_in_user((u32 __user *)sf, 572 if (!wsaved) {
584 (u32 __user *)(regs->u_regs[UREG_FP]), 573 err |= copy_in_user((u32 __user *)sf,
585 sizeof(struct reg_window32)); 574 (u32 __user *)(regs->u_regs[UREG_FP]),
586 575 sizeof(struct reg_window32));
576 } else {
577 struct reg_window *rp;
578
579 rp = &current_thread_info()->reg_window[wsaved - 1];
580 for (i = 0; i < 8; i++)
581 err |= __put_user(rp->locals[i], &sf->ss.locals[i]);
582 for (i = 0; i < 6; i++)
583 err |= __put_user(rp->ins[i], &sf->ss.ins[i]);
584 err |= __put_user(rp->ins[6], &sf->ss.fp);
585 err |= __put_user(rp->ins[7], &sf->ss.callers_pc);
586 }
587 if (err) 587 if (err)
588 goto sigsegv; 588 goto sigsegv;
589 589
@@ -613,7 +613,6 @@ static int setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
613 err |= __put_user(0x91d02010, &sf->insns[1]); /*t 0x10*/ 613 err |= __put_user(0x91d02010, &sf->insns[1]); /*t 0x10*/
614 if (err) 614 if (err)
615 goto sigsegv; 615 goto sigsegv;
616
617 flush_signal_insns(address); 616 flush_signal_insns(address);
618 } 617 }
619 return 0; 618 return 0;
@@ -632,18 +631,23 @@ static int setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs,
632 siginfo_t *info) 631 siginfo_t *info)
633{ 632{
634 struct rt_signal_frame32 __user *sf; 633 struct rt_signal_frame32 __user *sf;
634 int i, err, wsaved;
635 void __user *tail;
635 int sigframe_size; 636 int sigframe_size;
636 u32 psr; 637 u32 psr;
637 int i, err;
638 compat_sigset_t seta; 638 compat_sigset_t seta;
639 639
640 /* 1. Make sure everything is clean */ 640 /* 1. Make sure everything is clean */
641 synchronize_user_stack(); 641 synchronize_user_stack();
642 save_and_clear_fpu(); 642 save_and_clear_fpu();
643 643
644 sigframe_size = RT_ALIGNEDSZ; 644 wsaved = get_thread_wsaved();
645 if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) 645
646 sigframe_size -= sizeof(__siginfo_fpu_t); 646 sigframe_size = sizeof(*sf);
647 if (current_thread_info()->fpsaved[0] & FPRS_FEF)
648 sigframe_size += sizeof(__siginfo_fpu_t);
649 if (wsaved)
650 sigframe_size += sizeof(__siginfo_rwin_t);
647 651
648 sf = (struct rt_signal_frame32 __user *) 652 sf = (struct rt_signal_frame32 __user *)
649 get_sigframe(&ka->sa, regs, sigframe_size); 653 get_sigframe(&ka->sa, regs, sigframe_size);
@@ -651,8 +655,7 @@ static int setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs,
651 if (invalid_frame_pointer(sf, sigframe_size)) 655 if (invalid_frame_pointer(sf, sigframe_size))
652 goto sigill; 656 goto sigill;
653 657
654 if (get_thread_wsaved() != 0) 658 tail = (sf + 1);
655 goto sigill;
656 659
657 /* 2. Save the current process state */ 660 /* 2. Save the current process state */
658 if (test_thread_flag(TIF_32BIT)) { 661 if (test_thread_flag(TIF_32BIT)) {
@@ -677,11 +680,22 @@ static int setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs,
677 &sf->v8plus.asi); 680 &sf->v8plus.asi);
678 681
679 if (psr & PSR_EF) { 682 if (psr & PSR_EF) {
680 err |= save_fpu_state32(regs, &sf->fpu_state); 683 __siginfo_fpu_t __user *fp = tail;
681 err |= __put_user((u64)&sf->fpu_state, &sf->fpu_save); 684 tail += sizeof(*fp);
685 err |= save_fpu_state(regs, fp);
686 err |= __put_user((u64)fp, &sf->fpu_save);
682 } else { 687 } else {
683 err |= __put_user(0, &sf->fpu_save); 688 err |= __put_user(0, &sf->fpu_save);
684 } 689 }
690 if (wsaved) {
691 __siginfo_rwin_t __user *rwp = tail;
692 tail += sizeof(*rwp);
693 err |= save_rwin_state(wsaved, rwp);
694 err |= __put_user((u64)rwp, &sf->rwin_save);
695 set_thread_wsaved(0);
696 } else {
697 err |= __put_user(0, &sf->rwin_save);
698 }
685 699
686 /* Update the siginfo structure. */ 700 /* Update the siginfo structure. */
687 err |= copy_siginfo_to_user32(&sf->info, info); 701 err |= copy_siginfo_to_user32(&sf->info, info);
@@ -703,9 +717,21 @@ static int setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs,
703 } 717 }
704 err |= __copy_to_user(&sf->mask, &seta, sizeof(compat_sigset_t)); 718 err |= __copy_to_user(&sf->mask, &seta, sizeof(compat_sigset_t));
705 719
706 err |= copy_in_user((u32 __user *)sf, 720 if (!wsaved) {
707 (u32 __user *)(regs->u_regs[UREG_FP]), 721 err |= copy_in_user((u32 __user *)sf,
708 sizeof(struct reg_window32)); 722 (u32 __user *)(regs->u_regs[UREG_FP]),
723 sizeof(struct reg_window32));
724 } else {
725 struct reg_window *rp;
726
727 rp = &current_thread_info()->reg_window[wsaved - 1];
728 for (i = 0; i < 8; i++)
729 err |= __put_user(rp->locals[i], &sf->ss.locals[i]);
730 for (i = 0; i < 6; i++)
731 err |= __put_user(rp->ins[i], &sf->ss.ins[i]);
732 err |= __put_user(rp->ins[6], &sf->ss.fp);
733 err |= __put_user(rp->ins[7], &sf->ss.callers_pc);
734 }
709 if (err) 735 if (err)
710 goto sigsegv; 736 goto sigsegv;
711 737
diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c
index 5e5c5fd03783..04ede8f04add 100644
--- a/arch/sparc/kernel/signal_32.c
+++ b/arch/sparc/kernel/signal_32.c
@@ -26,6 +26,8 @@
26#include <asm/pgtable.h> 26#include <asm/pgtable.h>
27#include <asm/cacheflush.h> /* flush_sig_insns */ 27#include <asm/cacheflush.h> /* flush_sig_insns */
28 28
29#include "sigutil.h"
30
29#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) 31#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
30 32
31extern void fpsave(unsigned long *fpregs, unsigned long *fsr, 33extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
@@ -39,8 +41,8 @@ struct signal_frame {
39 unsigned long insns[2] __attribute__ ((aligned (8))); 41 unsigned long insns[2] __attribute__ ((aligned (8)));
40 unsigned int extramask[_NSIG_WORDS - 1]; 42 unsigned int extramask[_NSIG_WORDS - 1];
41 unsigned int extra_size; /* Should be 0 */ 43 unsigned int extra_size; /* Should be 0 */
42 __siginfo_fpu_t fpu_state; 44 __siginfo_rwin_t __user *rwin_save;
43}; 45} __attribute__((aligned(8)));
44 46
45struct rt_signal_frame { 47struct rt_signal_frame {
46 struct sparc_stackf ss; 48 struct sparc_stackf ss;
@@ -51,8 +53,8 @@ struct rt_signal_frame {
51 unsigned int insns[2]; 53 unsigned int insns[2];
52 stack_t stack; 54 stack_t stack;
53 unsigned int extra_size; /* Should be 0 */ 55 unsigned int extra_size; /* Should be 0 */
54 __siginfo_fpu_t fpu_state; 56 __siginfo_rwin_t __user *rwin_save;
55}; 57} __attribute__((aligned(8)));
56 58
57/* Align macros */ 59/* Align macros */
58#define SF_ALIGNEDSZ (((sizeof(struct signal_frame) + 7) & (~7))) 60#define SF_ALIGNEDSZ (((sizeof(struct signal_frame) + 7) & (~7)))
@@ -79,43 +81,13 @@ asmlinkage int sys_sigsuspend(old_sigset_t set)
79 return _sigpause_common(set); 81 return _sigpause_common(set);
80} 82}
81 83
82static inline int
83restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
84{
85 int err;
86#ifdef CONFIG_SMP
87 if (test_tsk_thread_flag(current, TIF_USEDFPU))
88 regs->psr &= ~PSR_EF;
89#else
90 if (current == last_task_used_math) {
91 last_task_used_math = NULL;
92 regs->psr &= ~PSR_EF;
93 }
94#endif
95 set_used_math();
96 clear_tsk_thread_flag(current, TIF_USEDFPU);
97
98 if (!access_ok(VERIFY_READ, fpu, sizeof(*fpu)))
99 return -EFAULT;
100
101 err = __copy_from_user(&current->thread.float_regs[0], &fpu->si_float_regs[0],
102 (sizeof(unsigned long) * 32));
103 err |= __get_user(current->thread.fsr, &fpu->si_fsr);
104 err |= __get_user(current->thread.fpqdepth, &fpu->si_fpqdepth);
105 if (current->thread.fpqdepth != 0)
106 err |= __copy_from_user(&current->thread.fpqueue[0],
107 &fpu->si_fpqueue[0],
108 ((sizeof(unsigned long) +
109 (sizeof(unsigned long *)))*16));
110 return err;
111}
112
113asmlinkage void do_sigreturn(struct pt_regs *regs) 84asmlinkage void do_sigreturn(struct pt_regs *regs)
114{ 85{
115 struct signal_frame __user *sf; 86 struct signal_frame __user *sf;
116 unsigned long up_psr, pc, npc; 87 unsigned long up_psr, pc, npc;
117 sigset_t set; 88 sigset_t set;
118 __siginfo_fpu_t __user *fpu_save; 89 __siginfo_fpu_t __user *fpu_save;
90 __siginfo_rwin_t __user *rwin_save;
119 int err; 91 int err;
120 92
121 /* Always make any pending restarted system calls return -EINTR */ 93 /* Always make any pending restarted system calls return -EINTR */
@@ -150,9 +122,11 @@ asmlinkage void do_sigreturn(struct pt_regs *regs)
150 pt_regs_clear_syscall(regs); 122 pt_regs_clear_syscall(regs);
151 123
152 err |= __get_user(fpu_save, &sf->fpu_save); 124 err |= __get_user(fpu_save, &sf->fpu_save);
153
154 if (fpu_save) 125 if (fpu_save)
155 err |= restore_fpu_state(regs, fpu_save); 126 err |= restore_fpu_state(regs, fpu_save);
127 err |= __get_user(rwin_save, &sf->rwin_save);
128 if (rwin_save)
129 err |= restore_rwin_state(rwin_save);
156 130
157 /* This is pretty much atomic, no amount locking would prevent 131 /* This is pretty much atomic, no amount locking would prevent
158 * the races which exist anyways. 132 * the races which exist anyways.
@@ -180,6 +154,7 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
180 struct rt_signal_frame __user *sf; 154 struct rt_signal_frame __user *sf;
181 unsigned int psr, pc, npc; 155 unsigned int psr, pc, npc;
182 __siginfo_fpu_t __user *fpu_save; 156 __siginfo_fpu_t __user *fpu_save;
157 __siginfo_rwin_t __user *rwin_save;
183 mm_segment_t old_fs; 158 mm_segment_t old_fs;
184 sigset_t set; 159 sigset_t set;
185 stack_t st; 160 stack_t st;
@@ -207,8 +182,7 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
207 pt_regs_clear_syscall(regs); 182 pt_regs_clear_syscall(regs);
208 183
209 err |= __get_user(fpu_save, &sf->fpu_save); 184 err |= __get_user(fpu_save, &sf->fpu_save);
210 185 if (!err && fpu_save)
211 if (fpu_save)
212 err |= restore_fpu_state(regs, fpu_save); 186 err |= restore_fpu_state(regs, fpu_save);
213 err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t)); 187 err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t));
214 188
@@ -228,6 +202,12 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
228 do_sigaltstack((const stack_t __user *) &st, NULL, (unsigned long)sf); 202 do_sigaltstack((const stack_t __user *) &st, NULL, (unsigned long)sf);
229 set_fs(old_fs); 203 set_fs(old_fs);
230 204
205 err |= __get_user(rwin_save, &sf->rwin_save);
206 if (!err && rwin_save) {
207 if (restore_rwin_state(rwin_save))
208 goto segv;
209 }
210
231 sigdelsetmask(&set, ~_BLOCKABLE); 211 sigdelsetmask(&set, ~_BLOCKABLE);
232 spin_lock_irq(&current->sighand->siglock); 212 spin_lock_irq(&current->sighand->siglock);
233 current->blocked = set; 213 current->blocked = set;
@@ -280,53 +260,23 @@ static inline void __user *get_sigframe(struct sigaction *sa, struct pt_regs *re
280 return (void __user *) sp; 260 return (void __user *) sp;
281} 261}
282 262
283static inline int
284save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
285{
286 int err = 0;
287#ifdef CONFIG_SMP
288 if (test_tsk_thread_flag(current, TIF_USEDFPU)) {
289 put_psr(get_psr() | PSR_EF);
290 fpsave(&current->thread.float_regs[0], &current->thread.fsr,
291 &current->thread.fpqueue[0], &current->thread.fpqdepth);
292 regs->psr &= ~(PSR_EF);
293 clear_tsk_thread_flag(current, TIF_USEDFPU);
294 }
295#else
296 if (current == last_task_used_math) {
297 put_psr(get_psr() | PSR_EF);
298 fpsave(&current->thread.float_regs[0], &current->thread.fsr,
299 &current->thread.fpqueue[0], &current->thread.fpqdepth);
300 last_task_used_math = NULL;
301 regs->psr &= ~(PSR_EF);
302 }
303#endif
304 err |= __copy_to_user(&fpu->si_float_regs[0],
305 &current->thread.float_regs[0],
306 (sizeof(unsigned long) * 32));
307 err |= __put_user(current->thread.fsr, &fpu->si_fsr);
308 err |= __put_user(current->thread.fpqdepth, &fpu->si_fpqdepth);
309 if (current->thread.fpqdepth != 0)
310 err |= __copy_to_user(&fpu->si_fpqueue[0],
311 &current->thread.fpqueue[0],
312 ((sizeof(unsigned long) +
313 (sizeof(unsigned long *)))*16));
314 clear_used_math();
315 return err;
316}
317
318static int setup_frame(struct k_sigaction *ka, struct pt_regs *regs, 263static int setup_frame(struct k_sigaction *ka, struct pt_regs *regs,
319 int signo, sigset_t *oldset) 264 int signo, sigset_t *oldset)
320{ 265{
321 struct signal_frame __user *sf; 266 struct signal_frame __user *sf;
322 int sigframe_size, err; 267 int sigframe_size, err, wsaved;
268 void __user *tail;
323 269
324 /* 1. Make sure everything is clean */ 270 /* 1. Make sure everything is clean */
325 synchronize_user_stack(); 271 synchronize_user_stack();
326 272
327 sigframe_size = SF_ALIGNEDSZ; 273 wsaved = current_thread_info()->w_saved;
328 if (!used_math()) 274
329 sigframe_size -= sizeof(__siginfo_fpu_t); 275 sigframe_size = sizeof(*sf);
276 if (used_math())
277 sigframe_size += sizeof(__siginfo_fpu_t);
278 if (wsaved)
279 sigframe_size += sizeof(__siginfo_rwin_t);
330 280
331 sf = (struct signal_frame __user *) 281 sf = (struct signal_frame __user *)
332 get_sigframe(&ka->sa, regs, sigframe_size); 282 get_sigframe(&ka->sa, regs, sigframe_size);
@@ -334,8 +284,7 @@ static int setup_frame(struct k_sigaction *ka, struct pt_regs *regs,
334 if (invalid_frame_pointer(sf, sigframe_size)) 284 if (invalid_frame_pointer(sf, sigframe_size))
335 goto sigill_and_return; 285 goto sigill_and_return;
336 286
337 if (current_thread_info()->w_saved != 0) 287 tail = sf + 1;
338 goto sigill_and_return;
339 288
340 /* 2. Save the current process state */ 289 /* 2. Save the current process state */
341 err = __copy_to_user(&sf->info.si_regs, regs, sizeof(struct pt_regs)); 290 err = __copy_to_user(&sf->info.si_regs, regs, sizeof(struct pt_regs));
@@ -343,17 +292,34 @@ static int setup_frame(struct k_sigaction *ka, struct pt_regs *regs,
343 err |= __put_user(0, &sf->extra_size); 292 err |= __put_user(0, &sf->extra_size);
344 293
345 if (used_math()) { 294 if (used_math()) {
346 err |= save_fpu_state(regs, &sf->fpu_state); 295 __siginfo_fpu_t __user *fp = tail;
347 err |= __put_user(&sf->fpu_state, &sf->fpu_save); 296 tail += sizeof(*fp);
297 err |= save_fpu_state(regs, fp);
298 err |= __put_user(fp, &sf->fpu_save);
348 } else { 299 } else {
349 err |= __put_user(0, &sf->fpu_save); 300 err |= __put_user(0, &sf->fpu_save);
350 } 301 }
302 if (wsaved) {
303 __siginfo_rwin_t __user *rwp = tail;
304 tail += sizeof(*rwp);
305 err |= save_rwin_state(wsaved, rwp);
306 err |= __put_user(rwp, &sf->rwin_save);
307 } else {
308 err |= __put_user(0, &sf->rwin_save);
309 }
351 310
352 err |= __put_user(oldset->sig[0], &sf->info.si_mask); 311 err |= __put_user(oldset->sig[0], &sf->info.si_mask);
353 err |= __copy_to_user(sf->extramask, &oldset->sig[1], 312 err |= __copy_to_user(sf->extramask, &oldset->sig[1],
354 (_NSIG_WORDS - 1) * sizeof(unsigned int)); 313 (_NSIG_WORDS - 1) * sizeof(unsigned int));
355 err |= __copy_to_user(sf, (char *) regs->u_regs[UREG_FP], 314 if (!wsaved) {
356 sizeof(struct reg_window32)); 315 err |= __copy_to_user(sf, (char *) regs->u_regs[UREG_FP],
316 sizeof(struct reg_window32));
317 } else {
318 struct reg_window32 *rp;
319
320 rp = &current_thread_info()->reg_window[wsaved - 1];
321 err |= __copy_to_user(sf, rp, sizeof(struct reg_window32));
322 }
357 if (err) 323 if (err)
358 goto sigsegv; 324 goto sigsegv;
359 325
@@ -399,21 +365,24 @@ static int setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
399 int signo, sigset_t *oldset, siginfo_t *info) 365 int signo, sigset_t *oldset, siginfo_t *info)
400{ 366{
401 struct rt_signal_frame __user *sf; 367 struct rt_signal_frame __user *sf;
402 int sigframe_size; 368 int sigframe_size, wsaved;
369 void __user *tail;
403 unsigned int psr; 370 unsigned int psr;
404 int err; 371 int err;
405 372
406 synchronize_user_stack(); 373 synchronize_user_stack();
407 sigframe_size = RT_ALIGNEDSZ; 374 wsaved = current_thread_info()->w_saved;
408 if (!used_math()) 375 sigframe_size = sizeof(*sf);
409 sigframe_size -= sizeof(__siginfo_fpu_t); 376 if (used_math())
377 sigframe_size += sizeof(__siginfo_fpu_t);
378 if (wsaved)
379 sigframe_size += sizeof(__siginfo_rwin_t);
410 sf = (struct rt_signal_frame __user *) 380 sf = (struct rt_signal_frame __user *)
411 get_sigframe(&ka->sa, regs, sigframe_size); 381 get_sigframe(&ka->sa, regs, sigframe_size);
412 if (invalid_frame_pointer(sf, sigframe_size)) 382 if (invalid_frame_pointer(sf, sigframe_size))
413 goto sigill; 383 goto sigill;
414 if (current_thread_info()->w_saved != 0)
415 goto sigill;
416 384
385 tail = sf + 1;
417 err = __put_user(regs->pc, &sf->regs.pc); 386 err = __put_user(regs->pc, &sf->regs.pc);
418 err |= __put_user(regs->npc, &sf->regs.npc); 387 err |= __put_user(regs->npc, &sf->regs.npc);
419 err |= __put_user(regs->y, &sf->regs.y); 388 err |= __put_user(regs->y, &sf->regs.y);
@@ -425,11 +394,21 @@ static int setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
425 err |= __put_user(0, &sf->extra_size); 394 err |= __put_user(0, &sf->extra_size);
426 395
427 if (psr & PSR_EF) { 396 if (psr & PSR_EF) {
428 err |= save_fpu_state(regs, &sf->fpu_state); 397 __siginfo_fpu_t *fp = tail;
429 err |= __put_user(&sf->fpu_state, &sf->fpu_save); 398 tail += sizeof(*fp);
399 err |= save_fpu_state(regs, fp);
400 err |= __put_user(fp, &sf->fpu_save);
430 } else { 401 } else {
431 err |= __put_user(0, &sf->fpu_save); 402 err |= __put_user(0, &sf->fpu_save);
432 } 403 }
404 if (wsaved) {
405 __siginfo_rwin_t *rwp = tail;
406 tail += sizeof(*rwp);
407 err |= save_rwin_state(wsaved, rwp);
408 err |= __put_user(rwp, &sf->rwin_save);
409 } else {
410 err |= __put_user(0, &sf->rwin_save);
411 }
433 err |= __copy_to_user(&sf->mask, &oldset->sig[0], sizeof(sigset_t)); 412 err |= __copy_to_user(&sf->mask, &oldset->sig[0], sizeof(sigset_t));
434 413
435 /* Setup sigaltstack */ 414 /* Setup sigaltstack */
@@ -437,8 +416,15 @@ static int setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
437 err |= __put_user(sas_ss_flags(regs->u_regs[UREG_FP]), &sf->stack.ss_flags); 416 err |= __put_user(sas_ss_flags(regs->u_regs[UREG_FP]), &sf->stack.ss_flags);
438 err |= __put_user(current->sas_ss_size, &sf->stack.ss_size); 417 err |= __put_user(current->sas_ss_size, &sf->stack.ss_size);
439 418
440 err |= __copy_to_user(sf, (char *) regs->u_regs[UREG_FP], 419 if (!wsaved) {
441 sizeof(struct reg_window32)); 420 err |= __copy_to_user(sf, (char *) regs->u_regs[UREG_FP],
421 sizeof(struct reg_window32));
422 } else {
423 struct reg_window32 *rp;
424
425 rp = &current_thread_info()->reg_window[wsaved - 1];
426 err |= __copy_to_user(sf, rp, sizeof(struct reg_window32));
427 }
442 428
443 err |= copy_siginfo_to_user(&sf->info, info); 429 err |= copy_siginfo_to_user(&sf->info, info);
444 430
diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
index 006fe4515886..47509df3b893 100644
--- a/arch/sparc/kernel/signal_64.c
+++ b/arch/sparc/kernel/signal_64.c
@@ -34,6 +34,7 @@
34 34
35#include "entry.h" 35#include "entry.h"
36#include "systbls.h" 36#include "systbls.h"
37#include "sigutil.h"
37 38
38#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) 39#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
39 40
@@ -236,7 +237,7 @@ struct rt_signal_frame {
236 __siginfo_fpu_t __user *fpu_save; 237 __siginfo_fpu_t __user *fpu_save;
237 stack_t stack; 238 stack_t stack;
238 sigset_t mask; 239 sigset_t mask;
239 __siginfo_fpu_t fpu_state; 240 __siginfo_rwin_t *rwin_save;
240}; 241};
241 242
242static long _sigpause_common(old_sigset_t set) 243static long _sigpause_common(old_sigset_t set)
@@ -266,33 +267,12 @@ asmlinkage long sys_sigsuspend(old_sigset_t set)
266 return _sigpause_common(set); 267 return _sigpause_common(set);
267} 268}
268 269
269static inline int
270restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
271{
272 unsigned long *fpregs = current_thread_info()->fpregs;
273 unsigned long fprs;
274 int err;
275
276 err = __get_user(fprs, &fpu->si_fprs);
277 fprs_write(0);
278 regs->tstate &= ~TSTATE_PEF;
279 if (fprs & FPRS_DL)
280 err |= copy_from_user(fpregs, &fpu->si_float_regs[0],
281 (sizeof(unsigned int) * 32));
282 if (fprs & FPRS_DU)
283 err |= copy_from_user(fpregs+16, &fpu->si_float_regs[32],
284 (sizeof(unsigned int) * 32));
285 err |= __get_user(current_thread_info()->xfsr[0], &fpu->si_fsr);
286 err |= __get_user(current_thread_info()->gsr[0], &fpu->si_gsr);
287 current_thread_info()->fpsaved[0] |= fprs;
288 return err;
289}
290
291void do_rt_sigreturn(struct pt_regs *regs) 270void do_rt_sigreturn(struct pt_regs *regs)
292{ 271{
293 struct rt_signal_frame __user *sf; 272 struct rt_signal_frame __user *sf;
294 unsigned long tpc, tnpc, tstate; 273 unsigned long tpc, tnpc, tstate;
295 __siginfo_fpu_t __user *fpu_save; 274 __siginfo_fpu_t __user *fpu_save;
275 __siginfo_rwin_t __user *rwin_save;
296 sigset_t set; 276 sigset_t set;
297 int err; 277 int err;
298 278
@@ -325,8 +305,8 @@ void do_rt_sigreturn(struct pt_regs *regs)
325 regs->tstate |= (tstate & (TSTATE_ASI | TSTATE_ICC | TSTATE_XCC)); 305 regs->tstate |= (tstate & (TSTATE_ASI | TSTATE_ICC | TSTATE_XCC));
326 306
327 err |= __get_user(fpu_save, &sf->fpu_save); 307 err |= __get_user(fpu_save, &sf->fpu_save);
328 if (fpu_save) 308 if (!err && fpu_save)
329 err |= restore_fpu_state(regs, &sf->fpu_state); 309 err |= restore_fpu_state(regs, fpu_save);
330 310
331 err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t)); 311 err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t));
332 err |= do_sigaltstack(&sf->stack, NULL, (unsigned long)sf); 312 err |= do_sigaltstack(&sf->stack, NULL, (unsigned long)sf);
@@ -334,6 +314,12 @@ void do_rt_sigreturn(struct pt_regs *regs)
334 if (err) 314 if (err)
335 goto segv; 315 goto segv;
336 316
317 err |= __get_user(rwin_save, &sf->rwin_save);
318 if (!err && rwin_save) {
319 if (restore_rwin_state(rwin_save))
320 goto segv;
321 }
322
337 regs->tpc = tpc; 323 regs->tpc = tpc;
338 regs->tnpc = tnpc; 324 regs->tnpc = tnpc;
339 325
@@ -351,34 +337,13 @@ segv:
351} 337}
352 338
353/* Checks if the fp is valid */ 339/* Checks if the fp is valid */
354static int invalid_frame_pointer(void __user *fp, int fplen) 340static int invalid_frame_pointer(void __user *fp)
355{ 341{
356 if (((unsigned long) fp) & 15) 342 if (((unsigned long) fp) & 15)
357 return 1; 343 return 1;
358 return 0; 344 return 0;
359} 345}
360 346
361static inline int
362save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
363{
364 unsigned long *fpregs = current_thread_info()->fpregs;
365 unsigned long fprs;
366 int err = 0;
367
368 fprs = current_thread_info()->fpsaved[0];
369 if (fprs & FPRS_DL)
370 err |= copy_to_user(&fpu->si_float_regs[0], fpregs,
371 (sizeof(unsigned int) * 32));
372 if (fprs & FPRS_DU)
373 err |= copy_to_user(&fpu->si_float_regs[32], fpregs+16,
374 (sizeof(unsigned int) * 32));
375 err |= __put_user(current_thread_info()->xfsr[0], &fpu->si_fsr);
376 err |= __put_user(current_thread_info()->gsr[0], &fpu->si_gsr);
377 err |= __put_user(fprs, &fpu->si_fprs);
378
379 return err;
380}
381
382static inline void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, unsigned long framesize) 347static inline void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, unsigned long framesize)
383{ 348{
384 unsigned long sp = regs->u_regs[UREG_FP] + STACK_BIAS; 349 unsigned long sp = regs->u_regs[UREG_FP] + STACK_BIAS;
@@ -414,34 +379,48 @@ setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
414 int signo, sigset_t *oldset, siginfo_t *info) 379 int signo, sigset_t *oldset, siginfo_t *info)
415{ 380{
416 struct rt_signal_frame __user *sf; 381 struct rt_signal_frame __user *sf;
417 int sigframe_size, err; 382 int wsaved, err, sf_size;
383 void __user *tail;
418 384
419 /* 1. Make sure everything is clean */ 385 /* 1. Make sure everything is clean */
420 synchronize_user_stack(); 386 synchronize_user_stack();
421 save_and_clear_fpu(); 387 save_and_clear_fpu();
422 388
423 sigframe_size = sizeof(struct rt_signal_frame); 389 wsaved = get_thread_wsaved();
424 if (!(current_thread_info()->fpsaved[0] & FPRS_FEF))
425 sigframe_size -= sizeof(__siginfo_fpu_t);
426 390
391 sf_size = sizeof(struct rt_signal_frame);
392 if (current_thread_info()->fpsaved[0] & FPRS_FEF)
393 sf_size += sizeof(__siginfo_fpu_t);
394 if (wsaved)
395 sf_size += sizeof(__siginfo_rwin_t);
427 sf = (struct rt_signal_frame __user *) 396 sf = (struct rt_signal_frame __user *)
428 get_sigframe(ka, regs, sigframe_size); 397 get_sigframe(ka, regs, sf_size);
429
430 if (invalid_frame_pointer (sf, sigframe_size))
431 goto sigill;
432 398
433 if (get_thread_wsaved() != 0) 399 if (invalid_frame_pointer (sf))
434 goto sigill; 400 goto sigill;
435 401
402 tail = (sf + 1);
403
436 /* 2. Save the current process state */ 404 /* 2. Save the current process state */
437 err = copy_to_user(&sf->regs, regs, sizeof (*regs)); 405 err = copy_to_user(&sf->regs, regs, sizeof (*regs));
438 406
439 if (current_thread_info()->fpsaved[0] & FPRS_FEF) { 407 if (current_thread_info()->fpsaved[0] & FPRS_FEF) {
440 err |= save_fpu_state(regs, &sf->fpu_state); 408 __siginfo_fpu_t __user *fpu_save = tail;
441 err |= __put_user((u64)&sf->fpu_state, &sf->fpu_save); 409 tail += sizeof(__siginfo_fpu_t);
410 err |= save_fpu_state(regs, fpu_save);
411 err |= __put_user((u64)fpu_save, &sf->fpu_save);
442 } else { 412 } else {
443 err |= __put_user(0, &sf->fpu_save); 413 err |= __put_user(0, &sf->fpu_save);
444 } 414 }
415 if (wsaved) {
416 __siginfo_rwin_t __user *rwin_save = tail;
417 tail += sizeof(__siginfo_rwin_t);
418 err |= save_rwin_state(wsaved, rwin_save);
419 err |= __put_user((u64)rwin_save, &sf->rwin_save);
420 set_thread_wsaved(0);
421 } else {
422 err |= __put_user(0, &sf->rwin_save);
423 }
445 424
446 /* Setup sigaltstack */ 425 /* Setup sigaltstack */
447 err |= __put_user(current->sas_ss_sp, &sf->stack.ss_sp); 426 err |= __put_user(current->sas_ss_sp, &sf->stack.ss_sp);
@@ -450,10 +429,17 @@ setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
450 429
451 err |= copy_to_user(&sf->mask, oldset, sizeof(sigset_t)); 430 err |= copy_to_user(&sf->mask, oldset, sizeof(sigset_t));
452 431
453 err |= copy_in_user((u64 __user *)sf, 432 if (!wsaved) {
454 (u64 __user *)(regs->u_regs[UREG_FP]+STACK_BIAS), 433 err |= copy_in_user((u64 __user *)sf,
455 sizeof(struct reg_window)); 434 (u64 __user *)(regs->u_regs[UREG_FP] +
435 STACK_BIAS),
436 sizeof(struct reg_window));
437 } else {
438 struct reg_window *rp;
456 439
440 rp = &current_thread_info()->reg_window[wsaved - 1];
441 err |= copy_to_user(sf, rp, sizeof(struct reg_window));
442 }
457 if (info) 443 if (info)
458 err |= copy_siginfo_to_user(&sf->info, info); 444 err |= copy_siginfo_to_user(&sf->info, info);
459 else { 445 else {
diff --git a/arch/sparc/kernel/sigutil.h b/arch/sparc/kernel/sigutil.h
new file mode 100644
index 000000000000..d223aa432bb6
--- /dev/null
+++ b/arch/sparc/kernel/sigutil.h
@@ -0,0 +1,9 @@
1#ifndef _SIGUTIL_H
2#define _SIGUTIL_H
3
4int save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu);
5int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu);
6int save_rwin_state(int wsaved, __siginfo_rwin_t __user *rwin);
7int restore_rwin_state(__siginfo_rwin_t __user *rp);
8
9#endif /* _SIGUTIL_H */
diff --git a/arch/sparc/kernel/sigutil_32.c b/arch/sparc/kernel/sigutil_32.c
new file mode 100644
index 000000000000..35c7897b009a
--- /dev/null
+++ b/arch/sparc/kernel/sigutil_32.c
@@ -0,0 +1,120 @@
1#include <linux/kernel.h>
2#include <linux/types.h>
3#include <linux/thread_info.h>
4#include <linux/uaccess.h>
5#include <linux/sched.h>
6
7#include <asm/sigcontext.h>
8#include <asm/fpumacro.h>
9#include <asm/ptrace.h>
10
11#include "sigutil.h"
12
13int save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
14{
15 int err = 0;
16#ifdef CONFIG_SMP
17 if (test_tsk_thread_flag(current, TIF_USEDFPU)) {
18 put_psr(get_psr() | PSR_EF);
19 fpsave(&current->thread.float_regs[0], &current->thread.fsr,
20 &current->thread.fpqueue[0], &current->thread.fpqdepth);
21 regs->psr &= ~(PSR_EF);
22 clear_tsk_thread_flag(current, TIF_USEDFPU);
23 }
24#else
25 if (current == last_task_used_math) {
26 put_psr(get_psr() | PSR_EF);
27 fpsave(&current->thread.float_regs[0], &current->thread.fsr,
28 &current->thread.fpqueue[0], &current->thread.fpqdepth);
29 last_task_used_math = NULL;
30 regs->psr &= ~(PSR_EF);
31 }
32#endif
33 err |= __copy_to_user(&fpu->si_float_regs[0],
34 &current->thread.float_regs[0],
35 (sizeof(unsigned long) * 32));
36 err |= __put_user(current->thread.fsr, &fpu->si_fsr);
37 err |= __put_user(current->thread.fpqdepth, &fpu->si_fpqdepth);
38 if (current->thread.fpqdepth != 0)
39 err |= __copy_to_user(&fpu->si_fpqueue[0],
40 &current->thread.fpqueue[0],
41 ((sizeof(unsigned long) +
42 (sizeof(unsigned long *)))*16));
43 clear_used_math();
44 return err;
45}
46
47int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
48{
49 int err;
50#ifdef CONFIG_SMP
51 if (test_tsk_thread_flag(current, TIF_USEDFPU))
52 regs->psr &= ~PSR_EF;
53#else
54 if (current == last_task_used_math) {
55 last_task_used_math = NULL;
56 regs->psr &= ~PSR_EF;
57 }
58#endif
59 set_used_math();
60 clear_tsk_thread_flag(current, TIF_USEDFPU);
61
62 if (!access_ok(VERIFY_READ, fpu, sizeof(*fpu)))
63 return -EFAULT;
64
65 err = __copy_from_user(&current->thread.float_regs[0], &fpu->si_float_regs[0],
66 (sizeof(unsigned long) * 32));
67 err |= __get_user(current->thread.fsr, &fpu->si_fsr);
68 err |= __get_user(current->thread.fpqdepth, &fpu->si_fpqdepth);
69 if (current->thread.fpqdepth != 0)
70 err |= __copy_from_user(&current->thread.fpqueue[0],
71 &fpu->si_fpqueue[0],
72 ((sizeof(unsigned long) +
73 (sizeof(unsigned long *)))*16));
74 return err;
75}
76
77int save_rwin_state(int wsaved, __siginfo_rwin_t __user *rwin)
78{
79 int i, err = __put_user(wsaved, &rwin->wsaved);
80
81 for (i = 0; i < wsaved; i++) {
82 struct reg_window32 *rp;
83 unsigned long fp;
84
85 rp = &current_thread_info()->reg_window[i];
86 fp = current_thread_info()->rwbuf_stkptrs[i];
87 err |= copy_to_user(&rwin->reg_window[i], rp,
88 sizeof(struct reg_window32));
89 err |= __put_user(fp, &rwin->rwbuf_stkptrs[i]);
90 }
91 return err;
92}
93
94int restore_rwin_state(__siginfo_rwin_t __user *rp)
95{
96 struct thread_info *t = current_thread_info();
97 int i, wsaved, err;
98
99 __get_user(wsaved, &rp->wsaved);
100 if (wsaved > NSWINS)
101 return -EFAULT;
102
103 err = 0;
104 for (i = 0; i < wsaved; i++) {
105 err |= copy_from_user(&t->reg_window[i],
106 &rp->reg_window[i],
107 sizeof(struct reg_window32));
108 err |= __get_user(t->rwbuf_stkptrs[i],
109 &rp->rwbuf_stkptrs[i]);
110 }
111 if (err)
112 return err;
113
114 t->w_saved = wsaved;
115 synchronize_user_stack();
116 if (t->w_saved)
117 return -EFAULT;
118 return 0;
119
120}
diff --git a/arch/sparc/kernel/sigutil_64.c b/arch/sparc/kernel/sigutil_64.c
new file mode 100644
index 000000000000..e7dc508c38eb
--- /dev/null
+++ b/arch/sparc/kernel/sigutil_64.c
@@ -0,0 +1,93 @@
1#include <linux/kernel.h>
2#include <linux/types.h>
3#include <linux/thread_info.h>
4#include <linux/uaccess.h>
5
6#include <asm/sigcontext.h>
7#include <asm/fpumacro.h>
8#include <asm/ptrace.h>
9
10#include "sigutil.h"
11
12int save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
13{
14 unsigned long *fpregs = current_thread_info()->fpregs;
15 unsigned long fprs;
16 int err = 0;
17
18 fprs = current_thread_info()->fpsaved[0];
19 if (fprs & FPRS_DL)
20 err |= copy_to_user(&fpu->si_float_regs[0], fpregs,
21 (sizeof(unsigned int) * 32));
22 if (fprs & FPRS_DU)
23 err |= copy_to_user(&fpu->si_float_regs[32], fpregs+16,
24 (sizeof(unsigned int) * 32));
25 err |= __put_user(current_thread_info()->xfsr[0], &fpu->si_fsr);
26 err |= __put_user(current_thread_info()->gsr[0], &fpu->si_gsr);
27 err |= __put_user(fprs, &fpu->si_fprs);
28
29 return err;
30}
31
32int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
33{
34 unsigned long *fpregs = current_thread_info()->fpregs;
35 unsigned long fprs;
36 int err;
37
38 err = __get_user(fprs, &fpu->si_fprs);
39 fprs_write(0);
40 regs->tstate &= ~TSTATE_PEF;
41 if (fprs & FPRS_DL)
42 err |= copy_from_user(fpregs, &fpu->si_float_regs[0],
43 (sizeof(unsigned int) * 32));
44 if (fprs & FPRS_DU)
45 err |= copy_from_user(fpregs+16, &fpu->si_float_regs[32],
46 (sizeof(unsigned int) * 32));
47 err |= __get_user(current_thread_info()->xfsr[0], &fpu->si_fsr);
48 err |= __get_user(current_thread_info()->gsr[0], &fpu->si_gsr);
49 current_thread_info()->fpsaved[0] |= fprs;
50 return err;
51}
52
53int save_rwin_state(int wsaved, __siginfo_rwin_t __user *rwin)
54{
55 int i, err = __put_user(wsaved, &rwin->wsaved);
56
57 for (i = 0; i < wsaved; i++) {
58 struct reg_window *rp = &current_thread_info()->reg_window[i];
59 unsigned long fp = current_thread_info()->rwbuf_stkptrs[i];
60
61 err |= copy_to_user(&rwin->reg_window[i], rp,
62 sizeof(struct reg_window));
63 err |= __put_user(fp, &rwin->rwbuf_stkptrs[i]);
64 }
65 return err;
66}
67
68int restore_rwin_state(__siginfo_rwin_t __user *rp)
69{
70 struct thread_info *t = current_thread_info();
71 int i, wsaved, err;
72
73 __get_user(wsaved, &rp->wsaved);
74 if (wsaved > NSWINS)
75 return -EFAULT;
76
77 err = 0;
78 for (i = 0; i < wsaved; i++) {
79 err |= copy_from_user(&t->reg_window[i],
80 &rp->reg_window[i],
81 sizeof(struct reg_window));
82 err |= __get_user(t->rwbuf_stkptrs[i],
83 &rp->rwbuf_stkptrs[i]);
84 }
85 if (err)
86 return err;
87
88 set_thread_wsaved(wsaved);
89 synchronize_user_stack();
90 if (get_thread_wsaved())
91 return -EFAULT;
92 return 0;
93}
diff --git a/arch/sparc/kernel/sys32.S b/arch/sparc/kernel/sys32.S
index 44e5faf1ad5f..d97f3eb72e06 100644
--- a/arch/sparc/kernel/sys32.S
+++ b/arch/sparc/kernel/sys32.S
@@ -81,7 +81,6 @@ SIGN2(sys32_fadvise64, compat_sys_fadvise64, %o0, %o4)
81SIGN2(sys32_fadvise64_64, compat_sys_fadvise64_64, %o0, %o5) 81SIGN2(sys32_fadvise64_64, compat_sys_fadvise64_64, %o0, %o5)
82SIGN2(sys32_bdflush, sys_bdflush, %o0, %o1) 82SIGN2(sys32_bdflush, sys_bdflush, %o0, %o1)
83SIGN1(sys32_mlockall, sys_mlockall, %o0) 83SIGN1(sys32_mlockall, sys_mlockall, %o0)
84SIGN1(sys32_nfsservctl, compat_sys_nfsservctl, %o0)
85SIGN1(sys32_clock_nanosleep, compat_sys_clock_nanosleep, %o1) 84SIGN1(sys32_clock_nanosleep, compat_sys_clock_nanosleep, %o1)
86SIGN1(sys32_timer_settime, compat_sys_timer_settime, %o1) 85SIGN1(sys32_timer_settime, compat_sys_timer_settime, %o1)
87SIGN1(sys32_io_submit, compat_sys_io_submit, %o1) 86SIGN1(sys32_io_submit, compat_sys_io_submit, %o1)
diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S
index 6e492d59f6b1..09d8ec454450 100644
--- a/arch/sparc/kernel/systbls_32.S
+++ b/arch/sparc/kernel/systbls_32.S
@@ -67,7 +67,7 @@ sys_call_table:
67/*235*/ .long sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys_mlockall 67/*235*/ .long sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys_mlockall
68/*240*/ .long sys_munlockall, sys_sched_setparam, sys_sched_getparam, sys_sched_setscheduler, sys_sched_getscheduler 68/*240*/ .long sys_munlockall, sys_sched_setparam, sys_sched_getparam, sys_sched_setscheduler, sys_sched_getscheduler
69/*245*/ .long sys_sched_yield, sys_sched_get_priority_max, sys_sched_get_priority_min, sys_sched_rr_get_interval, sys_nanosleep 69/*245*/ .long sys_sched_yield, sys_sched_get_priority_max, sys_sched_get_priority_min, sys_sched_rr_get_interval, sys_nanosleep
70/*250*/ .long sys_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_nfsservctl 70/*250*/ .long sys_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_ni_syscall
71/*255*/ .long sys_sync_file_range, sys_clock_settime, sys_clock_gettime, sys_clock_getres, sys_clock_nanosleep 71/*255*/ .long sys_sync_file_range, sys_clock_settime, sys_clock_gettime, sys_clock_getres, sys_clock_nanosleep
72/*260*/ .long sys_sched_getaffinity, sys_sched_setaffinity, sys_timer_settime, sys_timer_gettime, sys_timer_getoverrun 72/*260*/ .long sys_sched_getaffinity, sys_sched_setaffinity, sys_timer_settime, sys_timer_gettime, sys_timer_getoverrun
73/*265*/ .long sys_timer_delete, sys_timer_create, sys_nis_syscall, sys_io_setup, sys_io_destroy 73/*265*/ .long sys_timer_delete, sys_timer_create, sys_nis_syscall, sys_io_setup, sys_io_destroy
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index f566518483b5..edbec45d4688 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -68,7 +68,7 @@ sys_call_table32:
68 .word compat_sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys32_mlockall 68 .word compat_sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys32_mlockall
69/*240*/ .word sys_munlockall, sys32_sched_setparam, sys32_sched_getparam, sys32_sched_setscheduler, sys32_sched_getscheduler 69/*240*/ .word sys_munlockall, sys32_sched_setparam, sys32_sched_getparam, sys32_sched_setscheduler, sys32_sched_getscheduler
70 .word sys_sched_yield, sys32_sched_get_priority_max, sys32_sched_get_priority_min, sys32_sched_rr_get_interval, compat_sys_nanosleep 70 .word sys_sched_yield, sys32_sched_get_priority_max, sys32_sched_get_priority_min, sys32_sched_rr_get_interval, compat_sys_nanosleep
71/*250*/ .word sys_mremap, compat_sys_sysctl, sys32_getsid, sys_fdatasync, sys32_nfsservctl 71/*250*/ .word sys_mremap, compat_sys_sysctl, sys32_getsid, sys_fdatasync, sys_nis_syscall
72 .word sys32_sync_file_range, compat_sys_clock_settime, compat_sys_clock_gettime, compat_sys_clock_getres, sys32_clock_nanosleep 72 .word sys32_sync_file_range, compat_sys_clock_settime, compat_sys_clock_gettime, compat_sys_clock_getres, sys32_clock_nanosleep
73/*260*/ .word compat_sys_sched_getaffinity, compat_sys_sched_setaffinity, sys32_timer_settime, compat_sys_timer_gettime, sys_timer_getoverrun 73/*260*/ .word compat_sys_sched_getaffinity, compat_sys_sched_setaffinity, sys32_timer_settime, compat_sys_timer_gettime, sys_timer_getoverrun
74 .word sys_timer_delete, compat_sys_timer_create, sys_ni_syscall, compat_sys_io_setup, sys_io_destroy 74 .word sys_timer_delete, compat_sys_timer_create, sys_ni_syscall, compat_sys_io_setup, sys_io_destroy
@@ -145,7 +145,7 @@ sys_call_table:
145 .word sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys_mlockall 145 .word sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys_mlockall
146/*240*/ .word sys_munlockall, sys_sched_setparam, sys_sched_getparam, sys_sched_setscheduler, sys_sched_getscheduler 146/*240*/ .word sys_munlockall, sys_sched_setparam, sys_sched_getparam, sys_sched_setscheduler, sys_sched_getscheduler
147 .word sys_sched_yield, sys_sched_get_priority_max, sys_sched_get_priority_min, sys_sched_rr_get_interval, sys_nanosleep 147 .word sys_sched_yield, sys_sched_get_priority_max, sys_sched_get_priority_min, sys_sched_rr_get_interval, sys_nanosleep
148/*250*/ .word sys_64_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_nfsservctl 148/*250*/ .word sys_64_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_nis_syscall
149 .word sys_sync_file_range, sys_clock_settime, sys_clock_gettime, sys_clock_getres, sys_clock_nanosleep 149 .word sys_sync_file_range, sys_clock_settime, sys_clock_gettime, sys_clock_getres, sys_clock_nanosleep
150/*260*/ .word sys_sched_getaffinity, sys_sched_setaffinity, sys_timer_settime, sys_timer_gettime, sys_timer_getoverrun 150/*260*/ .word sys_sched_getaffinity, sys_sched_setaffinity, sys_timer_settime, sys_timer_gettime, sys_timer_getoverrun
151 .word sys_timer_delete, sys_timer_create, sys_ni_syscall, sys_io_setup, sys_io_destroy 151 .word sys_timer_delete, sys_timer_create, sys_ni_syscall, sys_io_setup, sys_io_destroy
diff --git a/arch/um/Kconfig.x86 b/arch/um/Kconfig.x86
index d31ecf346b4e..21bebe63df66 100644
--- a/arch/um/Kconfig.x86
+++ b/arch/um/Kconfig.x86
@@ -10,6 +10,10 @@ config CMPXCHG_LOCAL
10 bool 10 bool
11 default n 11 default n
12 12
13config CMPXCHG_DOUBLE
14 bool
15 default n
16
13source "arch/x86/Kconfig.cpu" 17source "arch/x86/Kconfig.cpu"
14 18
15endmenu 19endmenu
diff --git a/arch/um/Makefile b/arch/um/Makefile
index fab8121d2b32..c0f712cc7c5f 100644
--- a/arch/um/Makefile
+++ b/arch/um/Makefile
@@ -41,7 +41,7 @@ KBUILD_CPPFLAGS += -I$(srctree)/$(ARCH_DIR)/sys-$(SUBARCH)
41KBUILD_CFLAGS += $(CFLAGS) $(CFLAGS-y) -D__arch_um__ -DSUBARCH=\"$(SUBARCH)\" \ 41KBUILD_CFLAGS += $(CFLAGS) $(CFLAGS-y) -D__arch_um__ -DSUBARCH=\"$(SUBARCH)\" \
42 $(ARCH_INCLUDE) $(MODE_INCLUDE) -Dvmap=kernel_vmap \ 42 $(ARCH_INCLUDE) $(MODE_INCLUDE) -Dvmap=kernel_vmap \
43 -Din6addr_loopback=kernel_in6addr_loopback \ 43 -Din6addr_loopback=kernel_in6addr_loopback \
44 -Din6addr_any=kernel_in6addr_any 44 -Din6addr_any=kernel_in6addr_any -Dstrrchr=kernel_strrchr
45 45
46KBUILD_AFLAGS += $(ARCH_INCLUDE) 46KBUILD_AFLAGS += $(ARCH_INCLUDE)
47 47
diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c
index d51c404239a8..364c8a15c4c3 100644
--- a/arch/um/drivers/line.c
+++ b/arch/um/drivers/line.c
@@ -399,8 +399,8 @@ int line_setup_irq(int fd, int input, int output, struct line *line, void *data)
399 * is done under a spinlock. Checking whether the device is in use is 399 * is done under a spinlock. Checking whether the device is in use is
400 * line->tty->count > 1, also under the spinlock. 400 * line->tty->count > 1, also under the spinlock.
401 * 401 *
402 * tty->count serves to decide whether the device should be enabled or 402 * line->count serves to decide whether the device should be enabled or
403 * disabled on the host. If it's equal to 1, then we are doing the 403 * disabled on the host. If it's equal to 0, then we are doing the
404 * first open or last close. Otherwise, open and close just return. 404 * first open or last close. Otherwise, open and close just return.
405 */ 405 */
406 406
@@ -414,16 +414,16 @@ int line_open(struct line *lines, struct tty_struct *tty)
414 goto out_unlock; 414 goto out_unlock;
415 415
416 err = 0; 416 err = 0;
417 if (tty->count > 1) 417 if (line->count++)
418 goto out_unlock; 418 goto out_unlock;
419 419
420 spin_unlock(&line->count_lock); 420 BUG_ON(tty->driver_data);
421
422 tty->driver_data = line; 421 tty->driver_data = line;
423 line->tty = tty; 422 line->tty = tty;
424 423
424 spin_unlock(&line->count_lock);
425 err = enable_chan(line); 425 err = enable_chan(line);
426 if (err) 426 if (err) /* line_close() will be called by our caller */
427 return err; 427 return err;
428 428
429 INIT_DELAYED_WORK(&line->task, line_timer_cb); 429 INIT_DELAYED_WORK(&line->task, line_timer_cb);
@@ -436,7 +436,7 @@ int line_open(struct line *lines, struct tty_struct *tty)
436 chan_window_size(&line->chan_list, &tty->winsize.ws_row, 436 chan_window_size(&line->chan_list, &tty->winsize.ws_row,
437 &tty->winsize.ws_col); 437 &tty->winsize.ws_col);
438 438
439 return err; 439 return 0;
440 440
441out_unlock: 441out_unlock:
442 spin_unlock(&line->count_lock); 442 spin_unlock(&line->count_lock);
@@ -460,17 +460,16 @@ void line_close(struct tty_struct *tty, struct file * filp)
460 flush_buffer(line); 460 flush_buffer(line);
461 461
462 spin_lock(&line->count_lock); 462 spin_lock(&line->count_lock);
463 if (!line->valid) 463 BUG_ON(!line->valid);
464 goto out_unlock;
465 464
466 if (tty->count > 1) 465 if (--line->count)
467 goto out_unlock; 466 goto out_unlock;
468 467
469 spin_unlock(&line->count_lock);
470
471 line->tty = NULL; 468 line->tty = NULL;
472 tty->driver_data = NULL; 469 tty->driver_data = NULL;
473 470
471 spin_unlock(&line->count_lock);
472
474 if (line->sigio) { 473 if (line->sigio) {
475 unregister_winch(tty); 474 unregister_winch(tty);
476 line->sigio = 0; 475 line->sigio = 0;
@@ -498,7 +497,7 @@ static int setup_one_line(struct line *lines, int n, char *init, int init_prio,
498 497
499 spin_lock(&line->count_lock); 498 spin_lock(&line->count_lock);
500 499
501 if (line->tty != NULL) { 500 if (line->count) {
502 *error_out = "Device is already open"; 501 *error_out = "Device is already open";
503 goto out; 502 goto out;
504 } 503 }
@@ -722,41 +721,53 @@ struct winch {
722 int pid; 721 int pid;
723 struct tty_struct *tty; 722 struct tty_struct *tty;
724 unsigned long stack; 723 unsigned long stack;
724 struct work_struct work;
725}; 725};
726 726
727static void free_winch(struct winch *winch, int free_irq_ok) 727static void __free_winch(struct work_struct *work)
728{ 728{
729 if (free_irq_ok) 729 struct winch *winch = container_of(work, struct winch, work);
730 free_irq(WINCH_IRQ, winch); 730 free_irq(WINCH_IRQ, winch);
731
732 list_del(&winch->list);
733 731
734 if (winch->pid != -1) 732 if (winch->pid != -1)
735 os_kill_process(winch->pid, 1); 733 os_kill_process(winch->pid, 1);
736 if (winch->fd != -1)
737 os_close_file(winch->fd);
738 if (winch->stack != 0) 734 if (winch->stack != 0)
739 free_stack(winch->stack, 0); 735 free_stack(winch->stack, 0);
740 kfree(winch); 736 kfree(winch);
741} 737}
742 738
739static void free_winch(struct winch *winch)
740{
741 int fd = winch->fd;
742 winch->fd = -1;
743 if (fd != -1)
744 os_close_file(fd);
745 list_del(&winch->list);
746 __free_winch(&winch->work);
747}
748
743static irqreturn_t winch_interrupt(int irq, void *data) 749static irqreturn_t winch_interrupt(int irq, void *data)
744{ 750{
745 struct winch *winch = data; 751 struct winch *winch = data;
746 struct tty_struct *tty; 752 struct tty_struct *tty;
747 struct line *line; 753 struct line *line;
754 int fd = winch->fd;
748 int err; 755 int err;
749 char c; 756 char c;
750 757
751 if (winch->fd != -1) { 758 if (fd != -1) {
752 err = generic_read(winch->fd, &c, NULL); 759 err = generic_read(fd, &c, NULL);
753 if (err < 0) { 760 if (err < 0) {
754 if (err != -EAGAIN) { 761 if (err != -EAGAIN) {
762 winch->fd = -1;
763 list_del(&winch->list);
764 os_close_file(fd);
755 printk(KERN_ERR "winch_interrupt : " 765 printk(KERN_ERR "winch_interrupt : "
756 "read failed, errno = %d\n", -err); 766 "read failed, errno = %d\n", -err);
757 printk(KERN_ERR "fd %d is losing SIGWINCH " 767 printk(KERN_ERR "fd %d is losing SIGWINCH "
758 "support\n", winch->tty_fd); 768 "support\n", winch->tty_fd);
759 free_winch(winch, 0); 769 INIT_WORK(&winch->work, __free_winch);
770 schedule_work(&winch->work);
760 return IRQ_HANDLED; 771 return IRQ_HANDLED;
761 } 772 }
762 goto out; 773 goto out;
@@ -828,7 +839,7 @@ static void unregister_winch(struct tty_struct *tty)
828 list_for_each_safe(ele, next, &winch_handlers) { 839 list_for_each_safe(ele, next, &winch_handlers) {
829 winch = list_entry(ele, struct winch, list); 840 winch = list_entry(ele, struct winch, list);
830 if (winch->tty == tty) { 841 if (winch->tty == tty) {
831 free_winch(winch, 1); 842 free_winch(winch);
832 break; 843 break;
833 } 844 }
834 } 845 }
@@ -844,7 +855,7 @@ static void winch_cleanup(void)
844 855
845 list_for_each_safe(ele, next, &winch_handlers) { 856 list_for_each_safe(ele, next, &winch_handlers) {
846 winch = list_entry(ele, struct winch, list); 857 winch = list_entry(ele, struct winch, list);
847 free_winch(winch, 1); 858 free_winch(winch);
848 } 859 }
849 860
850 spin_unlock(&winch_handler_lock); 861 spin_unlock(&winch_handler_lock);
diff --git a/arch/um/drivers/xterm.c b/arch/um/drivers/xterm.c
index 8ac7146c237f..2e1de5728604 100644
--- a/arch/um/drivers/xterm.c
+++ b/arch/um/drivers/xterm.c
@@ -123,6 +123,7 @@ static int xterm_open(int input, int output, int primary, void *d,
123 err = -errno; 123 err = -errno;
124 printk(UM_KERN_ERR "xterm_open : unlink failed, errno = %d\n", 124 printk(UM_KERN_ERR "xterm_open : unlink failed, errno = %d\n",
125 errno); 125 errno);
126 close(fd);
126 return err; 127 return err;
127 } 128 }
128 close(fd); 129 close(fd);
diff --git a/arch/um/include/asm/ptrace-generic.h b/arch/um/include/asm/ptrace-generic.h
index ae084ad1a3a0..1a7d2757fe05 100644
--- a/arch/um/include/asm/ptrace-generic.h
+++ b/arch/um/include/asm/ptrace-generic.h
@@ -42,10 +42,6 @@ extern long subarch_ptrace(struct task_struct *child, long request,
42 unsigned long addr, unsigned long data); 42 unsigned long addr, unsigned long data);
43extern unsigned long getreg(struct task_struct *child, int regno); 43extern unsigned long getreg(struct task_struct *child, int regno);
44extern int putreg(struct task_struct *child, int regno, unsigned long value); 44extern int putreg(struct task_struct *child, int regno, unsigned long value);
45extern int get_fpregs(struct user_i387_struct __user *buf,
46 struct task_struct *child);
47extern int set_fpregs(struct user_i387_struct __user *buf,
48 struct task_struct *child);
49 45
50extern int arch_copy_tls(struct task_struct *new); 46extern int arch_copy_tls(struct task_struct *new);
51extern void clear_flushed_tls(struct task_struct *task); 47extern void clear_flushed_tls(struct task_struct *task);
diff --git a/arch/um/include/shared/line.h b/arch/um/include/shared/line.h
index 72f4f25af247..63df3ca02ac2 100644
--- a/arch/um/include/shared/line.h
+++ b/arch/um/include/shared/line.h
@@ -33,6 +33,7 @@ struct line_driver {
33struct line { 33struct line {
34 struct tty_struct *tty; 34 struct tty_struct *tty;
35 spinlock_t count_lock; 35 spinlock_t count_lock;
36 unsigned long count;
36 int valid; 37 int valid;
37 38
38 char *init_str; 39 char *init_str;
diff --git a/arch/um/include/shared/registers.h b/arch/um/include/shared/registers.h
index b0b4589e0ebc..f1e0aa56c52a 100644
--- a/arch/um/include/shared/registers.h
+++ b/arch/um/include/shared/registers.h
@@ -16,7 +16,7 @@ extern int restore_fpx_registers(int pid, unsigned long *fp_regs);
16extern int save_registers(int pid, struct uml_pt_regs *regs); 16extern int save_registers(int pid, struct uml_pt_regs *regs);
17extern int restore_registers(int pid, struct uml_pt_regs *regs); 17extern int restore_registers(int pid, struct uml_pt_regs *regs);
18extern int init_registers(int pid); 18extern int init_registers(int pid);
19extern void get_safe_registers(unsigned long *regs); 19extern void get_safe_registers(unsigned long *regs, unsigned long *fp_regs);
20extern unsigned long get_thread_reg(int reg, jmp_buf *buf); 20extern unsigned long get_thread_reg(int reg, jmp_buf *buf);
21extern int get_fp_registers(int pid, unsigned long *regs); 21extern int get_fp_registers(int pid, unsigned long *regs);
22extern int put_fp_registers(int pid, unsigned long *regs); 22extern int put_fp_registers(int pid, unsigned long *regs);
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index fab4371184f6..21c1ae7c3d75 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -202,7 +202,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
202 arch_copy_thread(&current->thread.arch, &p->thread.arch); 202 arch_copy_thread(&current->thread.arch, &p->thread.arch);
203 } 203 }
204 else { 204 else {
205 get_safe_registers(p->thread.regs.regs.gp); 205 get_safe_registers(p->thread.regs.regs.gp, p->thread.regs.regs.fp);
206 p->thread.request.u.thread = current->thread.request.u.thread; 206 p->thread.request.u.thread = current->thread.request.u.thread;
207 handler = new_thread_handler; 207 handler = new_thread_handler;
208 } 208 }
diff --git a/arch/um/kernel/ptrace.c b/arch/um/kernel/ptrace.c
index 701b672c1122..c9da32b0c707 100644
--- a/arch/um/kernel/ptrace.c
+++ b/arch/um/kernel/ptrace.c
@@ -50,23 +50,11 @@ long arch_ptrace(struct task_struct *child, long request,
50 void __user *vp = p; 50 void __user *vp = p;
51 51
52 switch (request) { 52 switch (request) {
53 /* read word at location addr. */
54 case PTRACE_PEEKTEXT:
55 case PTRACE_PEEKDATA:
56 ret = generic_ptrace_peekdata(child, addr, data);
57 break;
58
59 /* read the word at location addr in the USER area. */ 53 /* read the word at location addr in the USER area. */
60 case PTRACE_PEEKUSR: 54 case PTRACE_PEEKUSR:
61 ret = peek_user(child, addr, data); 55 ret = peek_user(child, addr, data);
62 break; 56 break;
63 57
64 /* write the word at location addr. */
65 case PTRACE_POKETEXT:
66 case PTRACE_POKEDATA:
67 ret = generic_ptrace_pokedata(child, addr, data);
68 break;
69
70 /* write the word at location addr in the USER area */ 58 /* write the word at location addr in the USER area */
71 case PTRACE_POKEUSR: 59 case PTRACE_POKEUSR:
72 ret = poke_user(child, addr, data); 60 ret = poke_user(child, addr, data);
@@ -107,16 +95,6 @@ long arch_ptrace(struct task_struct *child, long request,
107 break; 95 break;
108 } 96 }
109#endif 97#endif
110#ifdef PTRACE_GETFPREGS
111 case PTRACE_GETFPREGS: /* Get the child FPU state. */
112 ret = get_fpregs(vp, child);
113 break;
114#endif
115#ifdef PTRACE_SETFPREGS
116 case PTRACE_SETFPREGS: /* Set the child FPU state. */
117 ret = set_fpregs(vp, child);
118 break;
119#endif
120 case PTRACE_GET_THREAD_AREA: 98 case PTRACE_GET_THREAD_AREA:
121 ret = ptrace_get_thread_area(child, addr, vp); 99 ret = ptrace_get_thread_area(child, addr, vp);
122 break; 100 break;
@@ -154,12 +132,6 @@ long arch_ptrace(struct task_struct *child, long request,
154 break; 132 break;
155 } 133 }
156#endif 134#endif
157#ifdef PTRACE_ARCH_PRCTL
158 case PTRACE_ARCH_PRCTL:
159 /* XXX Calls ptrace on the host - needs some SMP thinking */
160 ret = arch_prctl(child, data, (void __user *) addr);
161 break;
162#endif
163 default: 135 default:
164 ret = ptrace_request(child, request, addr, data); 136 ret = ptrace_request(child, request, addr, data);
165 if (ret == -EIO) 137 if (ret == -EIO)
diff --git a/arch/um/os-Linux/registers.c b/arch/um/os-Linux/registers.c
index 830fe6a1518a..b866b9e3bef9 100644
--- a/arch/um/os-Linux/registers.c
+++ b/arch/um/os-Linux/registers.c
@@ -8,6 +8,8 @@
8#include <string.h> 8#include <string.h>
9#include <sys/ptrace.h> 9#include <sys/ptrace.h>
10#include "sysdep/ptrace.h" 10#include "sysdep/ptrace.h"
11#include "sysdep/ptrace_user.h"
12#include "registers.h"
11 13
12int save_registers(int pid, struct uml_pt_regs *regs) 14int save_registers(int pid, struct uml_pt_regs *regs)
13{ 15{
@@ -32,6 +34,7 @@ int restore_registers(int pid, struct uml_pt_regs *regs)
32/* This is set once at boot time and not changed thereafter */ 34/* This is set once at boot time and not changed thereafter */
33 35
34static unsigned long exec_regs[MAX_REG_NR]; 36static unsigned long exec_regs[MAX_REG_NR];
37static unsigned long exec_fp_regs[FP_SIZE];
35 38
36int init_registers(int pid) 39int init_registers(int pid)
37{ 40{
@@ -42,10 +45,14 @@ int init_registers(int pid)
42 return -errno; 45 return -errno;
43 46
44 arch_init_registers(pid); 47 arch_init_registers(pid);
48 get_fp_registers(pid, exec_fp_regs);
45 return 0; 49 return 0;
46} 50}
47 51
48void get_safe_registers(unsigned long *regs) 52void get_safe_registers(unsigned long *regs, unsigned long *fp_regs)
49{ 53{
50 memcpy(regs, exec_regs, sizeof(exec_regs)); 54 memcpy(regs, exec_regs, sizeof(exec_regs));
55
56 if (fp_regs)
57 memcpy(fp_regs, exec_fp_regs, sizeof(exec_fp_regs));
51} 58}
diff --git a/arch/um/os-Linux/skas/mem.c b/arch/um/os-Linux/skas/mem.c
index d261f170d120..e771398be5f3 100644
--- a/arch/um/os-Linux/skas/mem.c
+++ b/arch/um/os-Linux/skas/mem.c
@@ -39,7 +39,7 @@ static unsigned long syscall_regs[MAX_REG_NR];
39 39
40static int __init init_syscall_regs(void) 40static int __init init_syscall_regs(void)
41{ 41{
42 get_safe_registers(syscall_regs); 42 get_safe_registers(syscall_regs, NULL);
43 syscall_regs[REGS_IP_INDEX] = STUB_CODE + 43 syscall_regs[REGS_IP_INDEX] = STUB_CODE +
44 ((unsigned long) &batch_syscall_stub - 44 ((unsigned long) &batch_syscall_stub -
45 (unsigned long) &__syscall_stub_start); 45 (unsigned long) &__syscall_stub_start);
diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c
index d6e0a2234b86..dee0e8cf8ad0 100644
--- a/arch/um/os-Linux/skas/process.c
+++ b/arch/um/os-Linux/skas/process.c
@@ -373,6 +373,9 @@ void userspace(struct uml_pt_regs *regs)
373 if (ptrace(PTRACE_SETREGS, pid, 0, regs->gp)) 373 if (ptrace(PTRACE_SETREGS, pid, 0, regs->gp))
374 fatal_sigsegv(); 374 fatal_sigsegv();
375 375
376 if (put_fp_registers(pid, regs->fp))
377 fatal_sigsegv();
378
376 /* Now we set local_using_sysemu to be used for one loop */ 379 /* Now we set local_using_sysemu to be used for one loop */
377 local_using_sysemu = get_using_sysemu(); 380 local_using_sysemu = get_using_sysemu();
378 381
@@ -399,6 +402,12 @@ void userspace(struct uml_pt_regs *regs)
399 fatal_sigsegv(); 402 fatal_sigsegv();
400 } 403 }
401 404
405 if (get_fp_registers(pid, regs->fp)) {
406 printk(UM_KERN_ERR "userspace - get_fp_registers failed, "
407 "errno = %d\n", errno);
408 fatal_sigsegv();
409 }
410
402 UPT_SYSCALL_NR(regs) = -1; /* Assume: It's not a syscall */ 411 UPT_SYSCALL_NR(regs) = -1; /* Assume: It's not a syscall */
403 412
404 if (WIFSTOPPED(status)) { 413 if (WIFSTOPPED(status)) {
@@ -457,10 +466,11 @@ void userspace(struct uml_pt_regs *regs)
457} 466}
458 467
459static unsigned long thread_regs[MAX_REG_NR]; 468static unsigned long thread_regs[MAX_REG_NR];
469static unsigned long thread_fp_regs[FP_SIZE];
460 470
461static int __init init_thread_regs(void) 471static int __init init_thread_regs(void)
462{ 472{
463 get_safe_registers(thread_regs); 473 get_safe_registers(thread_regs, thread_fp_regs);
464 /* Set parent's instruction pointer to start of clone-stub */ 474 /* Set parent's instruction pointer to start of clone-stub */
465 thread_regs[REGS_IP_INDEX] = STUB_CODE + 475 thread_regs[REGS_IP_INDEX] = STUB_CODE +
466 (unsigned long) stub_clone_handler - 476 (unsigned long) stub_clone_handler -
@@ -503,6 +513,13 @@ int copy_context_skas0(unsigned long new_stack, int pid)
503 return err; 513 return err;
504 } 514 }
505 515
516 err = put_fp_registers(pid, thread_fp_regs);
517 if (err < 0) {
518 printk(UM_KERN_ERR "copy_context_skas0 : put_fp_registers "
519 "failed, pid = %d, err = %d\n", pid, err);
520 return err;
521 }
522
506 /* set a well known return code for detection of child write failure */ 523 /* set a well known return code for detection of child write failure */
507 child_data->err = 12345678; 524 child_data->err = 12345678;
508 525
diff --git a/arch/um/sys-i386/asm/ptrace.h b/arch/um/sys-i386/asm/ptrace.h
index 0273e4d09af7..5d2a59112537 100644
--- a/arch/um/sys-i386/asm/ptrace.h
+++ b/arch/um/sys-i386/asm/ptrace.h
@@ -42,11 +42,6 @@
42 */ 42 */
43struct user_desc; 43struct user_desc;
44 44
45extern int get_fpxregs(struct user_fxsr_struct __user *buf,
46 struct task_struct *child);
47extern int set_fpxregs(struct user_fxsr_struct __user *buf,
48 struct task_struct *tsk);
49
50extern int ptrace_get_thread_area(struct task_struct *child, int idx, 45extern int ptrace_get_thread_area(struct task_struct *child, int idx,
51 struct user_desc __user *user_desc); 46 struct user_desc __user *user_desc);
52 47
diff --git a/arch/um/sys-i386/ptrace.c b/arch/um/sys-i386/ptrace.c
index d23b2d3ea384..3375c2717851 100644
--- a/arch/um/sys-i386/ptrace.c
+++ b/arch/um/sys-i386/ptrace.c
@@ -145,7 +145,7 @@ int peek_user(struct task_struct *child, long addr, long data)
145 return put_user(tmp, (unsigned long __user *) data); 145 return put_user(tmp, (unsigned long __user *) data);
146} 146}
147 147
148int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) 148static int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
149{ 149{
150 int err, n, cpu = ((struct thread_info *) child->stack)->cpu; 150 int err, n, cpu = ((struct thread_info *) child->stack)->cpu;
151 struct user_i387_struct fpregs; 151 struct user_i387_struct fpregs;
@@ -161,7 +161,7 @@ int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
161 return n; 161 return n;
162} 162}
163 163
164int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) 164static int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
165{ 165{
166 int n, cpu = ((struct thread_info *) child->stack)->cpu; 166 int n, cpu = ((struct thread_info *) child->stack)->cpu;
167 struct user_i387_struct fpregs; 167 struct user_i387_struct fpregs;
@@ -174,7 +174,7 @@ int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
174 (unsigned long *) &fpregs); 174 (unsigned long *) &fpregs);
175} 175}
176 176
177int get_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child) 177static int get_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child)
178{ 178{
179 int err, n, cpu = ((struct thread_info *) child->stack)->cpu; 179 int err, n, cpu = ((struct thread_info *) child->stack)->cpu;
180 struct user_fxsr_struct fpregs; 180 struct user_fxsr_struct fpregs;
@@ -190,7 +190,7 @@ int get_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child)
190 return n; 190 return n;
191} 191}
192 192
193int set_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child) 193static int set_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child)
194{ 194{
195 int n, cpu = ((struct thread_info *) child->stack)->cpu; 195 int n, cpu = ((struct thread_info *) child->stack)->cpu;
196 struct user_fxsr_struct fpregs; 196 struct user_fxsr_struct fpregs;
@@ -206,5 +206,23 @@ int set_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child)
206long subarch_ptrace(struct task_struct *child, long request, 206long subarch_ptrace(struct task_struct *child, long request,
207 unsigned long addr, unsigned long data) 207 unsigned long addr, unsigned long data)
208{ 208{
209 return -EIO; 209 int ret = -EIO;
210 void __user *datap = (void __user *) data;
211 switch (request) {
212 case PTRACE_GETFPREGS: /* Get the child FPU state. */
213 ret = get_fpregs(datap, child);
214 break;
215 case PTRACE_SETFPREGS: /* Set the child FPU state. */
216 ret = set_fpregs(datap, child);
217 break;
218 case PTRACE_GETFPXREGS: /* Get the child FPU state. */
219 ret = get_fpxregs(datap, child);
220 break;
221 case PTRACE_SETFPXREGS: /* Set the child FPU state. */
222 ret = set_fpxregs(datap, child);
223 break;
224 default:
225 ret = -EIO;
226 }
227 return ret;
210} 228}
diff --git a/arch/um/sys-i386/shared/sysdep/ptrace.h b/arch/um/sys-i386/shared/sysdep/ptrace.h
index d50e62e07070..c398a5076111 100644
--- a/arch/um/sys-i386/shared/sysdep/ptrace.h
+++ b/arch/um/sys-i386/shared/sysdep/ptrace.h
@@ -53,6 +53,7 @@ extern int sysemu_supported;
53 53
54struct uml_pt_regs { 54struct uml_pt_regs {
55 unsigned long gp[MAX_REG_NR]; 55 unsigned long gp[MAX_REG_NR];
56 unsigned long fp[HOST_FPX_SIZE];
56 struct faultinfo faultinfo; 57 struct faultinfo faultinfo;
57 long syscall; 58 long syscall;
58 int is_user; 59 int is_user;
diff --git a/arch/um/sys-x86_64/ptrace.c b/arch/um/sys-x86_64/ptrace.c
index f43613643cdb..4005506834fd 100644
--- a/arch/um/sys-x86_64/ptrace.c
+++ b/arch/um/sys-x86_64/ptrace.c
@@ -145,7 +145,7 @@ int is_syscall(unsigned long addr)
145 return instr == 0x050f; 145 return instr == 0x050f;
146} 146}
147 147
148int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) 148static int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
149{ 149{
150 int err, n, cpu = ((struct thread_info *) child->stack)->cpu; 150 int err, n, cpu = ((struct thread_info *) child->stack)->cpu;
151 long fpregs[HOST_FP_SIZE]; 151 long fpregs[HOST_FP_SIZE];
@@ -162,7 +162,7 @@ int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
162 return n; 162 return n;
163} 163}
164 164
165int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) 165static int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
166{ 166{
167 int n, cpu = ((struct thread_info *) child->stack)->cpu; 167 int n, cpu = ((struct thread_info *) child->stack)->cpu;
168 long fpregs[HOST_FP_SIZE]; 168 long fpregs[HOST_FP_SIZE];
@@ -182,12 +182,16 @@ long subarch_ptrace(struct task_struct *child, long request,
182 void __user *datap = (void __user *) data; 182 void __user *datap = (void __user *) data;
183 183
184 switch (request) { 184 switch (request) {
185 case PTRACE_GETFPXREGS: /* Get the child FPU state. */ 185 case PTRACE_GETFPREGS: /* Get the child FPU state. */
186 ret = get_fpregs(datap, child); 186 ret = get_fpregs(datap, child);
187 break; 187 break;
188 case PTRACE_SETFPXREGS: /* Set the child FPU state. */ 188 case PTRACE_SETFPREGS: /* Set the child FPU state. */
189 ret = set_fpregs(datap, child); 189 ret = set_fpregs(datap, child);
190 break; 190 break;
191 case PTRACE_ARCH_PRCTL:
192 /* XXX Calls ptrace on the host - needs some SMP thinking */
193 ret = arch_prctl(child, data, (void __user *) addr);
194 break;
191 } 195 }
192 196
193 return ret; 197 return ret;
diff --git a/arch/um/sys-x86_64/shared/sysdep/ptrace.h b/arch/um/sys-x86_64/shared/sysdep/ptrace.h
index fdba5457947a..8ee8f8e12af1 100644
--- a/arch/um/sys-x86_64/shared/sysdep/ptrace.h
+++ b/arch/um/sys-x86_64/shared/sysdep/ptrace.h
@@ -85,6 +85,7 @@
85 85
86struct uml_pt_regs { 86struct uml_pt_regs {
87 unsigned long gp[MAX_REG_NR]; 87 unsigned long gp[MAX_REG_NR];
88 unsigned long fp[HOST_FP_SIZE];
88 struct faultinfo faultinfo; 89 struct faultinfo faultinfo;
89 long syscall; 90 long syscall;
90 int is_user; 91 int is_user;
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index a0e866d233ee..54edb207ff3a 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -672,7 +672,7 @@ ia32_sys_call_table:
672 .quad sys32_vm86_warning /* vm86 */ 672 .quad sys32_vm86_warning /* vm86 */
673 .quad quiet_ni_syscall /* query_module */ 673 .quad quiet_ni_syscall /* query_module */
674 .quad sys_poll 674 .quad sys_poll
675 .quad compat_sys_nfsservctl 675 .quad quiet_ni_syscall /* old nfsservctl */
676 .quad sys_setresgid16 /* 170 */ 676 .quad sys_setresgid16 /* 170 */
677 .quad sys_getresgid16 677 .quad sys_getresgid16
678 .quad sys_prctl 678 .quad sys_prctl
diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
index 4554cc6fb96a..091508b533b4 100644
--- a/arch/x86/include/asm/alternative-asm.h
+++ b/arch/x86/include/asm/alternative-asm.h
@@ -16,7 +16,6 @@
16#endif 16#endif
17 17
18.macro altinstruction_entry orig alt feature orig_len alt_len 18.macro altinstruction_entry orig alt feature orig_len alt_len
19 .align 8
20 .long \orig - . 19 .long \orig - .
21 .long \alt - . 20 .long \alt - .
22 .word \feature 21 .word \feature
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index 23fb6d79f209..37ad100a2210 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -48,9 +48,6 @@ struct alt_instr {
48 u16 cpuid; /* cpuid bit set for replacement */ 48 u16 cpuid; /* cpuid bit set for replacement */
49 u8 instrlen; /* length of original instruction */ 49 u8 instrlen; /* length of original instruction */
50 u8 replacementlen; /* length of new instruction, <= instrlen */ 50 u8 replacementlen; /* length of new instruction, <= instrlen */
51#ifdef CONFIG_X86_64
52 u32 pad2;
53#endif
54}; 51};
55 52
56extern void alternative_instructions(void); 53extern void alternative_instructions(void);
@@ -83,7 +80,6 @@ static inline int alternatives_text_reserved(void *start, void *end)
83 \ 80 \
84 "661:\n\t" oldinstr "\n662:\n" \ 81 "661:\n\t" oldinstr "\n662:\n" \
85 ".section .altinstructions,\"a\"\n" \ 82 ".section .altinstructions,\"a\"\n" \
86 _ASM_ALIGN "\n" \
87 " .long 661b - .\n" /* label */ \ 83 " .long 661b - .\n" /* label */ \
88 " .long 663f - .\n" /* new instruction */ \ 84 " .long 663f - .\n" /* new instruction */ \
89 " .word " __stringify(feature) "\n" /* feature bit */ \ 85 " .word " __stringify(feature) "\n" /* feature bit */ \
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 4258aac99a6e..88b23a43f340 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -332,7 +332,6 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
332 asm goto("1: jmp %l[t_no]\n" 332 asm goto("1: jmp %l[t_no]\n"
333 "2:\n" 333 "2:\n"
334 ".section .altinstructions,\"a\"\n" 334 ".section .altinstructions,\"a\"\n"
335 _ASM_ALIGN "\n"
336 " .long 1b - .\n" 335 " .long 1b - .\n"
337 " .long 0\n" /* no replacement */ 336 " .long 0\n" /* no replacement */
338 " .word %P0\n" /* feature bit */ 337 " .word %P0\n" /* feature bit */
@@ -350,7 +349,6 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
350 asm volatile("1: movb $0,%0\n" 349 asm volatile("1: movb $0,%0\n"
351 "2:\n" 350 "2:\n"
352 ".section .altinstructions,\"a\"\n" 351 ".section .altinstructions,\"a\"\n"
353 _ASM_ALIGN "\n"
354 " .long 1b - .\n" 352 " .long 1b - .\n"
355 " .long 3f - .\n" 353 " .long 3f - .\n"
356 " .word %P1\n" /* feature bit */ 354 " .word %P1\n" /* feature bit */
diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h
index a518c0a45044..c59cc97fe6c1 100644
--- a/arch/x86/include/asm/pvclock.h
+++ b/arch/x86/include/asm/pvclock.h
@@ -44,7 +44,7 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
44 : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) ); 44 : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) );
45#elif defined(__x86_64__) 45#elif defined(__x86_64__)
46 __asm__ ( 46 __asm__ (
47 "mul %[mul_frac] ; shrd $32, %[hi], %[lo]" 47 "mulq %[mul_frac] ; shrd $32, %[hi], %[lo]"
48 : [lo]"=a"(product), 48 : [lo]"=a"(product),
49 [hi]"=d"(tmp) 49 [hi]"=d"(tmp)
50 : "0"(delta), 50 : "0"(delta),
diff --git a/arch/x86/include/asm/unistd_64.h b/arch/x86/include/asm/unistd_64.h
index d92641cc7acc..201040573444 100644
--- a/arch/x86/include/asm/unistd_64.h
+++ b/arch/x86/include/asm/unistd_64.h
@@ -414,7 +414,7 @@ __SYSCALL(__NR_query_module, sys_ni_syscall)
414__SYSCALL(__NR_quotactl, sys_quotactl) 414__SYSCALL(__NR_quotactl, sys_quotactl)
415 415
416#define __NR_nfsservctl 180 416#define __NR_nfsservctl 180
417__SYSCALL(__NR_nfsservctl, sys_nfsservctl) 417__SYSCALL(__NR_nfsservctl, sys_ni_syscall)
418 418
419/* reserved for LiS/STREAMS */ 419/* reserved for LiS/STREAMS */
420#define __NR_getpmsg 181 420#define __NR_getpmsg 181
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index 64a619d47d34..7ff4669580cf 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -39,7 +39,7 @@ typedef struct xpaddr {
39 ((unsigned long)((u64)CONFIG_XEN_MAX_DOMAIN_MEMORY * 1024 * 1024 * 1024 / PAGE_SIZE)) 39 ((unsigned long)((u64)CONFIG_XEN_MAX_DOMAIN_MEMORY * 1024 * 1024 * 1024 / PAGE_SIZE))
40 40
41extern unsigned long *machine_to_phys_mapping; 41extern unsigned long *machine_to_phys_mapping;
42extern unsigned int machine_to_phys_order; 42extern unsigned long machine_to_phys_nr;
43 43
44extern unsigned long get_phys_to_machine(unsigned long pfn); 44extern unsigned long get_phys_to_machine(unsigned long pfn);
45extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn); 45extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn);
@@ -87,7 +87,7 @@ static inline unsigned long mfn_to_pfn(unsigned long mfn)
87 if (xen_feature(XENFEAT_auto_translated_physmap)) 87 if (xen_feature(XENFEAT_auto_translated_physmap))
88 return mfn; 88 return mfn;
89 89
90 if (unlikely((mfn >> machine_to_phys_order) != 0)) { 90 if (unlikely(mfn >= machine_to_phys_nr)) {
91 pfn = ~0; 91 pfn = ~0;
92 goto try_override; 92 goto try_override;
93 } 93 }
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index adc66c3a1fef..34b18594e724 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -207,7 +207,6 @@ static int __cpuinit uv_wakeup_secondary(int phys_apicid, unsigned long start_ri
207 ((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | 207 ((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) |
208 APIC_DM_INIT; 208 APIC_DM_INIT;
209 uv_write_global_mmr64(pnode, UVH_IPI_INT, val); 209 uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
210 mdelay(10);
211 210
212 val = (1UL << UVH_IPI_INT_SEND_SHFT) | 211 val = (1UL << UVH_IPI_INT_SEND_SHFT) |
213 (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) | 212 (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) |
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index 08119a37e53c..6b96110bb0c3 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -149,7 +149,6 @@ struct set_mtrr_data {
149 */ 149 */
150static int mtrr_rendezvous_handler(void *info) 150static int mtrr_rendezvous_handler(void *info)
151{ 151{
152#ifdef CONFIG_SMP
153 struct set_mtrr_data *data = info; 152 struct set_mtrr_data *data = info;
154 153
155 /* 154 /*
@@ -171,7 +170,6 @@ static int mtrr_rendezvous_handler(void *info)
171 } else if (mtrr_aps_delayed_init || !cpu_online(smp_processor_id())) { 170 } else if (mtrr_aps_delayed_init || !cpu_online(smp_processor_id())) {
172 mtrr_if->set_all(); 171 mtrr_if->set_all();
173 } 172 }
174#endif
175 return 0; 173 return 0;
176} 174}
177 175
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 4ee3abf20ed6..cfa62ec090ec 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1900,6 +1900,9 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
1900 1900
1901 perf_callchain_store(entry, regs->ip); 1901 perf_callchain_store(entry, regs->ip);
1902 1902
1903 if (!current->mm)
1904 return;
1905
1903 if (perf_callchain_user32(regs, entry)) 1906 if (perf_callchain_user32(regs, entry))
1904 return; 1907 return;
1905 1908
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 5c1a91974918..f3f6f5344001 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -54,6 +54,7 @@
54#include <asm/ftrace.h> 54#include <asm/ftrace.h>
55#include <asm/irq_vectors.h> 55#include <asm/irq_vectors.h>
56#include <asm/cpufeature.h> 56#include <asm/cpufeature.h>
57#include <asm/alternative-asm.h>
57 58
58/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ 59/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
59#include <linux/elf-em.h> 60#include <linux/elf-em.h>
@@ -873,12 +874,7 @@ ENTRY(simd_coprocessor_error)
873661: pushl_cfi $do_general_protection 874661: pushl_cfi $do_general_protection
874662: 875662:
875.section .altinstructions,"a" 876.section .altinstructions,"a"
876 .balign 4 877 altinstruction_entry 661b, 663f, X86_FEATURE_XMM, 662b-661b, 664f-663f
877 .long 661b
878 .long 663f
879 .word X86_FEATURE_XMM
880 .byte 662b-661b
881 .byte 664f-663f
882.previous 878.previous
883.section .altinstr_replacement,"ax" 879.section .altinstr_replacement,"ax"
884663: pushl $do_simd_coprocessor_error 880663: pushl $do_simd_coprocessor_error
diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
index fbb0a045a1a2..bc19be332bc9 100644
--- a/arch/x86/kernel/syscall_table_32.S
+++ b/arch/x86/kernel/syscall_table_32.S
@@ -168,7 +168,7 @@ ENTRY(sys_call_table)
168 .long ptregs_vm86 168 .long ptregs_vm86
169 .long sys_ni_syscall /* Old sys_query_module */ 169 .long sys_ni_syscall /* Old sys_query_module */
170 .long sys_poll 170 .long sys_poll
171 .long sys_nfsservctl 171 .long sys_ni_syscall /* Old nfsservctl */
172 .long sys_setresgid16 /* 170 */ 172 .long sys_setresgid16 /* 170 */
173 .long sys_getresgid16 173 .long sys_getresgid16
174 .long sys_prctl 174 .long sys_prctl
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index 988724b236b6..ff5790d8e990 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -22,6 +22,8 @@ config KVM
22 depends on HAVE_KVM 22 depends on HAVE_KVM
23 # for device assignment: 23 # for device assignment:
24 depends on PCI 24 depends on PCI
25 # for TASKSTATS/TASK_DELAY_ACCT:
26 depends on NET
25 select PREEMPT_NOTIFIERS 27 select PREEMPT_NOTIFIERS
26 select MMU_NOTIFIER 28 select MMU_NOTIFIER
27 select ANON_INODES 29 select ANON_INODES
@@ -31,6 +33,7 @@ config KVM
31 select KVM_ASYNC_PF 33 select KVM_ASYNC_PF
32 select USER_RETURN_NOTIFIER 34 select USER_RETURN_NOTIFIER
33 select KVM_MMIO 35 select KVM_MMIO
36 select TASKSTATS
34 select TASK_DELAY_ACCT 37 select TASK_DELAY_ACCT
35 ---help--- 38 ---help---
36 Support hosting fully virtualized guest machines using hardware 39 Support hosting fully virtualized guest machines using hardware
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 247aae3dc008..0d17c8c50acd 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -17,6 +17,7 @@
17#include <asm/traps.h> /* dotraplinkage, ... */ 17#include <asm/traps.h> /* dotraplinkage, ... */
18#include <asm/pgalloc.h> /* pgd_*(), ... */ 18#include <asm/pgalloc.h> /* pgd_*(), ... */
19#include <asm/kmemcheck.h> /* kmemcheck_*(), ... */ 19#include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
20#include <asm/vsyscall.h>
20 21
21/* 22/*
22 * Page fault error code bits: 23 * Page fault error code bits:
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index ae3cb23cd89b..039d91315bc5 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -360,6 +360,20 @@ struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_pci_root *root)
360 } 360 }
361 } 361 }
362 362
363 /* After the PCI-E bus has been walked and all devices discovered,
364 * configure any settings of the fabric that might be necessary.
365 */
366 if (bus) {
367 struct pci_bus *child;
368 list_for_each_entry(child, &bus->children, node) {
369 struct pci_dev *self = child->self;
370 if (!self)
371 continue;
372
373 pcie_bus_configure_settings(child, self->pcie_mpss);
374 }
375 }
376
363 if (!bus) 377 if (!bus)
364 kfree(sd); 378 kfree(sd);
365 379
diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
index 7000e74b3087..58425adc22c6 100644
--- a/arch/x86/platform/mrst/mrst.c
+++ b/arch/x86/platform/mrst/mrst.c
@@ -689,7 +689,9 @@ static int __init sfi_parse_devs(struct sfi_table_header *table)
689 irq_attr.trigger = 1; 689 irq_attr.trigger = 1;
690 irq_attr.polarity = 1; 690 irq_attr.polarity = 1;
691 io_apic_set_pci_routing(NULL, pentry->irq, &irq_attr); 691 io_apic_set_pci_routing(NULL, pentry->irq, &irq_attr);
692 } 692 } else
693 pentry->irq = 0; /* No irq */
694
693 switch (pentry->type) { 695 switch (pentry->type) {
694 case SFI_DEV_TYPE_IPC: 696 case SFI_DEV_TYPE_IPC:
695 /* ID as IRQ is a hack that will go away */ 697 /* ID as IRQ is a hack that will go away */
diff --git a/arch/x86/platform/olpc/olpc.c b/arch/x86/platform/olpc/olpc.c
index 8b9940e78e2f..7cce722667b8 100644
--- a/arch/x86/platform/olpc/olpc.c
+++ b/arch/x86/platform/olpc/olpc.c
@@ -161,13 +161,13 @@ restart:
161 if (inbuf && inlen) { 161 if (inbuf && inlen) {
162 /* write data to EC */ 162 /* write data to EC */
163 for (i = 0; i < inlen; i++) { 163 for (i = 0; i < inlen; i++) {
164 pr_devel("olpc-ec: sending cmd arg 0x%x\n", inbuf[i]);
165 outb(inbuf[i], 0x68);
164 if (wait_on_ibf(0x6c, 0)) { 166 if (wait_on_ibf(0x6c, 0)) {
165 printk(KERN_ERR "olpc-ec: timeout waiting for" 167 printk(KERN_ERR "olpc-ec: timeout waiting for"
166 " EC accept data!\n"); 168 " EC accept data!\n");
167 goto err; 169 goto err;
168 } 170 }
169 pr_devel("olpc-ec: sending cmd arg 0x%x\n", inbuf[i]);
170 outb(inbuf[i], 0x68);
171 } 171 }
172 } 172 }
173 if (outbuf && outlen) { 173 if (outbuf && outlen) {
diff --git a/arch/x86/vdso/vdso32/sysenter.S b/arch/x86/vdso/vdso32/sysenter.S
index e2800affa754..e354bceee0e0 100644
--- a/arch/x86/vdso/vdso32/sysenter.S
+++ b/arch/x86/vdso/vdso32/sysenter.S
@@ -43,7 +43,7 @@ __kernel_vsyscall:
43 .space 7,0x90 43 .space 7,0x90
44 44
45 /* 14: System call restart point is here! (SYSENTER_RETURN-2) */ 45 /* 14: System call restart point is here! (SYSENTER_RETURN-2) */
46 jmp .Lenter_kernel 46 int $0x80
47 /* 16: System call normal return point is here! */ 47 /* 16: System call normal return point is here! */
48VDSO32_SYSENTER_RETURN: /* Symbol used by sysenter.c via vdso32-syms.h */ 48VDSO32_SYSENTER_RETURN: /* Symbol used by sysenter.c via vdso32-syms.h */
49 pop %ebp 49 pop %ebp
diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile
index 3326204e251f..add2c2d729ce 100644
--- a/arch/x86/xen/Makefile
+++ b/arch/x86/xen/Makefile
@@ -15,7 +15,7 @@ obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \
15 grant-table.o suspend.o platform-pci-unplug.o \ 15 grant-table.o suspend.o platform-pci-unplug.o \
16 p2m.o 16 p2m.o
17 17
18obj-$(CONFIG_FTRACE) += trace.o 18obj-$(CONFIG_EVENT_TRACING) += trace.o
19 19
20obj-$(CONFIG_SMP) += smp.o 20obj-$(CONFIG_SMP) += smp.o
21obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o 21obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index e2345af01af0..2d69617950f7 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -77,8 +77,8 @@ EXPORT_SYMBOL_GPL(xen_domain_type);
77 77
78unsigned long *machine_to_phys_mapping = (void *)MACH2PHYS_VIRT_START; 78unsigned long *machine_to_phys_mapping = (void *)MACH2PHYS_VIRT_START;
79EXPORT_SYMBOL(machine_to_phys_mapping); 79EXPORT_SYMBOL(machine_to_phys_mapping);
80unsigned int machine_to_phys_order; 80unsigned long machine_to_phys_nr;
81EXPORT_SYMBOL(machine_to_phys_order); 81EXPORT_SYMBOL(machine_to_phys_nr);
82 82
83struct start_info *xen_start_info; 83struct start_info *xen_start_info;
84EXPORT_SYMBOL_GPL(xen_start_info); 84EXPORT_SYMBOL_GPL(xen_start_info);
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 8cce339db5e7..3dd53f997b11 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1713,15 +1713,17 @@ static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
1713void __init xen_setup_machphys_mapping(void) 1713void __init xen_setup_machphys_mapping(void)
1714{ 1714{
1715 struct xen_machphys_mapping mapping; 1715 struct xen_machphys_mapping mapping;
1716 unsigned long machine_to_phys_nr_ents;
1717 1716
1718 if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) { 1717 if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
1719 machine_to_phys_mapping = (unsigned long *)mapping.v_start; 1718 machine_to_phys_mapping = (unsigned long *)mapping.v_start;
1720 machine_to_phys_nr_ents = mapping.max_mfn + 1; 1719 machine_to_phys_nr = mapping.max_mfn + 1;
1721 } else { 1720 } else {
1722 machine_to_phys_nr_ents = MACH2PHYS_NR_ENTRIES; 1721 machine_to_phys_nr = MACH2PHYS_NR_ENTRIES;
1723 } 1722 }
1724 machine_to_phys_order = fls(machine_to_phys_nr_ents - 1); 1723#ifdef CONFIG_X86_32
1724 WARN_ON((machine_to_phys_mapping + (machine_to_phys_nr - 1))
1725 < machine_to_phys_mapping);
1726#endif
1725} 1727}
1726 1728
1727#ifdef CONFIG_X86_64 1729#ifdef CONFIG_X86_64
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index df118a825f39..46d6d21dbdbe 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -184,6 +184,19 @@ static unsigned long __init xen_set_identity(const struct e820entry *list,
184 PFN_UP(start_pci), PFN_DOWN(last)); 184 PFN_UP(start_pci), PFN_DOWN(last));
185 return identity; 185 return identity;
186} 186}
187
188static unsigned long __init xen_get_max_pages(void)
189{
190 unsigned long max_pages = MAX_DOMAIN_PAGES;
191 domid_t domid = DOMID_SELF;
192 int ret;
193
194 ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
195 if (ret > 0)
196 max_pages = ret;
197 return min(max_pages, MAX_DOMAIN_PAGES);
198}
199
187/** 200/**
188 * machine_specific_memory_setup - Hook for machine specific memory setup. 201 * machine_specific_memory_setup - Hook for machine specific memory setup.
189 **/ 202 **/
@@ -292,6 +305,14 @@ char * __init xen_memory_setup(void)
292 305
293 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); 306 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
294 307
308 extra_limit = xen_get_max_pages();
309 if (max_pfn + extra_pages > extra_limit) {
310 if (extra_limit > max_pfn)
311 extra_pages = extra_limit - max_pfn;
312 else
313 extra_pages = 0;
314 }
315
295 extra_pages += xen_return_unused_memory(xen_start_info->nr_pages, &e820); 316 extra_pages += xen_return_unused_memory(xen_start_info->nr_pages, &e820);
296 317
297 /* 318 /*
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index b4533a86d7e4..041d4fe9dfe4 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -32,6 +32,7 @@
32#include <xen/page.h> 32#include <xen/page.h>
33#include <xen/events.h> 33#include <xen/events.h>
34 34
35#include <xen/hvc-console.h>
35#include "xen-ops.h" 36#include "xen-ops.h"
36#include "mmu.h" 37#include "mmu.h"
37 38
@@ -207,6 +208,15 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
207 unsigned cpu; 208 unsigned cpu;
208 unsigned int i; 209 unsigned int i;
209 210
211 if (skip_ioapic_setup) {
212 char *m = (max_cpus == 0) ?
213 "The nosmp parameter is incompatible with Xen; " \
214 "use Xen dom0_max_vcpus=1 parameter" :
215 "The noapic parameter is incompatible with Xen";
216
217 xen_raw_printk(m);
218 panic(m);
219 }
210 xen_init_lock_cpu(0); 220 xen_init_lock_cpu(0);
211 221
212 smp_store_cpu_info(0); 222 smp_store_cpu_info(0);
@@ -521,10 +531,7 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
521 native_smp_prepare_cpus(max_cpus); 531 native_smp_prepare_cpus(max_cpus);
522 WARN_ON(xen_smp_intr_init(0)); 532 WARN_ON(xen_smp_intr_init(0));
523 533
524 if (!xen_have_vector_callback)
525 return;
526 xen_init_lock_cpu(0); 534 xen_init_lock_cpu(0);
527 xen_init_spinlocks();
528} 535}
529 536
530static int __cpuinit xen_hvm_cpu_up(unsigned int cpu) 537static int __cpuinit xen_hvm_cpu_up(unsigned int cpu)
@@ -546,6 +553,8 @@ static void xen_hvm_cpu_die(unsigned int cpu)
546 553
547void __init xen_hvm_smp_init(void) 554void __init xen_hvm_smp_init(void)
548{ 555{
556 if (!xen_have_vector_callback)
557 return;
549 smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus; 558 smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
550 smp_ops.smp_send_reschedule = xen_smp_send_reschedule; 559 smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
551 smp_ops.cpu_up = xen_hvm_cpu_up; 560 smp_ops.cpu_up = xen_hvm_cpu_up;
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 5158c505bef9..163b4679556e 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -168,9 +168,10 @@ cycle_t xen_clocksource_read(void)
168 struct pvclock_vcpu_time_info *src; 168 struct pvclock_vcpu_time_info *src;
169 cycle_t ret; 169 cycle_t ret;
170 170
171 src = &get_cpu_var(xen_vcpu)->time; 171 preempt_disable_notrace();
172 src = &__get_cpu_var(xen_vcpu)->time;
172 ret = pvclock_clocksource_read(src); 173 ret = pvclock_clocksource_read(src);
173 put_cpu_var(xen_vcpu); 174 preempt_enable_notrace();
174 return ret; 175 return ret;
175} 176}
176 177
diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
index 22a2093b5862..b040b0e518ca 100644
--- a/arch/x86/xen/xen-asm_32.S
+++ b/arch/x86/xen/xen-asm_32.S
@@ -113,11 +113,13 @@ xen_iret_start_crit:
113 113
114 /* 114 /*
115 * If there's something pending, mask events again so we can 115 * If there's something pending, mask events again so we can
116 * jump back into xen_hypervisor_callback 116 * jump back into xen_hypervisor_callback. Otherwise do not
117 * touch XEN_vcpu_info_mask.
117 */ 118 */
118 sete XEN_vcpu_info_mask(%eax) 119 jne 1f
120 movb $1, XEN_vcpu_info_mask(%eax)
119 121
120 popl %eax 1221: popl %eax
121 123
122 /* 124 /*
123 * From this point on the registers are restored and the stack 125 * From this point on the registers are restored and the stack
diff --git a/arch/xtensa/include/asm/unistd.h b/arch/xtensa/include/asm/unistd.h
index a6f934f37f1a..798ee6d285a1 100644
--- a/arch/xtensa/include/asm/unistd.h
+++ b/arch/xtensa/include/asm/unistd.h
@@ -455,7 +455,7 @@ __SYSCALL(203, sys_reboot, 3)
455#define __NR_quotactl 204 455#define __NR_quotactl 204
456__SYSCALL(204, sys_quotactl, 4) 456__SYSCALL(204, sys_quotactl, 4)
457#define __NR_nfsservctl 205 457#define __NR_nfsservctl 205
458__SYSCALL(205, sys_nfsservctl, 3) 458__SYSCALL(205, sys_ni_syscall, 0)
459#define __NR__sysctl 206 459#define __NR__sysctl 206
460__SYSCALL(206, sys_sysctl, 1) 460__SYSCALL(206, sys_sysctl, 1)
461#define __NR_bdflush 207 461#define __NR_bdflush 207
diff --git a/block/Kconfig b/block/Kconfig
index 60be1e0455da..e97934eececa 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -65,6 +65,16 @@ config BLK_DEV_BSG
65 65
66 If unsure, say Y. 66 If unsure, say Y.
67 67
68config BLK_DEV_BSGLIB
69 bool "Block layer SG support v4 helper lib"
70 default n
71 select BLK_DEV_BSG
72 help
73 Subsystems will normally enable this if needed. Users will not
74 normally need to manually enable this.
75
76 If unsure, say N.
77
68config BLK_DEV_INTEGRITY 78config BLK_DEV_INTEGRITY
69 bool "Block layer data integrity support" 79 bool "Block layer data integrity support"
70 ---help--- 80 ---help---
diff --git a/block/Makefile b/block/Makefile
index 0fec4b3fab51..514c6e4f427a 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_BLOCK) := elevator.o blk-core.o blk-tag.o blk-sysfs.o \
8 blk-iopoll.o blk-lib.o ioctl.o genhd.o scsi_ioctl.o 8 blk-iopoll.o blk-lib.o ioctl.o genhd.o scsi_ioctl.o
9 9
10obj-$(CONFIG_BLK_DEV_BSG) += bsg.o 10obj-$(CONFIG_BLK_DEV_BSG) += bsg.o
11obj-$(CONFIG_BLK_DEV_BSGLIB) += bsg-lib.o
11obj-$(CONFIG_BLK_CGROUP) += blk-cgroup.o 12obj-$(CONFIG_BLK_CGROUP) += blk-cgroup.o
12obj-$(CONFIG_BLK_DEV_THROTTLING) += blk-throttle.o 13obj-$(CONFIG_BLK_DEV_THROTTLING) += blk-throttle.o
13obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o 14obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index bcaf16ee6ad1..b596e54ddd71 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -785,10 +785,10 @@ static int blkio_policy_parse_and_set(char *buf,
785{ 785{
786 char *s[4], *p, *major_s = NULL, *minor_s = NULL; 786 char *s[4], *p, *major_s = NULL, *minor_s = NULL;
787 int ret; 787 int ret;
788 unsigned long major, minor, temp; 788 unsigned long major, minor;
789 int i = 0; 789 int i = 0;
790 dev_t dev; 790 dev_t dev;
791 u64 bps, iops; 791 u64 temp;
792 792
793 memset(s, 0, sizeof(s)); 793 memset(s, 0, sizeof(s));
794 794
@@ -826,20 +826,23 @@ static int blkio_policy_parse_and_set(char *buf,
826 826
827 dev = MKDEV(major, minor); 827 dev = MKDEV(major, minor);
828 828
829 ret = blkio_check_dev_num(dev); 829 ret = strict_strtoull(s[1], 10, &temp);
830 if (ret) 830 if (ret)
831 return ret; 831 return -EINVAL;
832 832
833 newpn->dev = dev; 833 /* For rule removal, do not check for device presence. */
834 if (temp) {
835 ret = blkio_check_dev_num(dev);
836 if (ret)
837 return ret;
838 }
834 839
835 if (s[1] == NULL) 840 newpn->dev = dev;
836 return -EINVAL;
837 841
838 switch (plid) { 842 switch (plid) {
839 case BLKIO_POLICY_PROP: 843 case BLKIO_POLICY_PROP:
840 ret = strict_strtoul(s[1], 10, &temp); 844 if ((temp < BLKIO_WEIGHT_MIN && temp > 0) ||
841 if (ret || (temp < BLKIO_WEIGHT_MIN && temp > 0) || 845 temp > BLKIO_WEIGHT_MAX)
842 temp > BLKIO_WEIGHT_MAX)
843 return -EINVAL; 846 return -EINVAL;
844 847
845 newpn->plid = plid; 848 newpn->plid = plid;
@@ -850,26 +853,18 @@ static int blkio_policy_parse_and_set(char *buf,
850 switch(fileid) { 853 switch(fileid) {
851 case BLKIO_THROTL_read_bps_device: 854 case BLKIO_THROTL_read_bps_device:
852 case BLKIO_THROTL_write_bps_device: 855 case BLKIO_THROTL_write_bps_device:
853 ret = strict_strtoull(s[1], 10, &bps);
854 if (ret)
855 return -EINVAL;
856
857 newpn->plid = plid; 856 newpn->plid = plid;
858 newpn->fileid = fileid; 857 newpn->fileid = fileid;
859 newpn->val.bps = bps; 858 newpn->val.bps = temp;
860 break; 859 break;
861 case BLKIO_THROTL_read_iops_device: 860 case BLKIO_THROTL_read_iops_device:
862 case BLKIO_THROTL_write_iops_device: 861 case BLKIO_THROTL_write_iops_device:
863 ret = strict_strtoull(s[1], 10, &iops); 862 if (temp > THROTL_IOPS_MAX)
864 if (ret)
865 return -EINVAL;
866
867 if (iops > THROTL_IOPS_MAX)
868 return -EINVAL; 863 return -EINVAL;
869 864
870 newpn->plid = plid; 865 newpn->plid = plid;
871 newpn->fileid = fileid; 866 newpn->fileid = fileid;
872 newpn->val.iops = (unsigned int)iops; 867 newpn->val.iops = (unsigned int)temp;
873 break; 868 break;
874 } 869 }
875 break; 870 break;
diff --git a/block/blk-core.c b/block/blk-core.c
index b627558c461f..b2ed78afd9f0 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1167,7 +1167,7 @@ static bool bio_attempt_front_merge(struct request_queue *q,
1167 * true if merge was successful, otherwise false. 1167 * true if merge was successful, otherwise false.
1168 */ 1168 */
1169static bool attempt_plug_merge(struct task_struct *tsk, struct request_queue *q, 1169static bool attempt_plug_merge(struct task_struct *tsk, struct request_queue *q,
1170 struct bio *bio) 1170 struct bio *bio, unsigned int *request_count)
1171{ 1171{
1172 struct blk_plug *plug; 1172 struct blk_plug *plug;
1173 struct request *rq; 1173 struct request *rq;
@@ -1176,10 +1176,13 @@ static bool attempt_plug_merge(struct task_struct *tsk, struct request_queue *q,
1176 plug = tsk->plug; 1176 plug = tsk->plug;
1177 if (!plug) 1177 if (!plug)
1178 goto out; 1178 goto out;
1179 *request_count = 0;
1179 1180
1180 list_for_each_entry_reverse(rq, &plug->list, queuelist) { 1181 list_for_each_entry_reverse(rq, &plug->list, queuelist) {
1181 int el_ret; 1182 int el_ret;
1182 1183
1184 (*request_count)++;
1185
1183 if (rq->q != q) 1186 if (rq->q != q)
1184 continue; 1187 continue;
1185 1188
@@ -1219,6 +1222,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
1219 struct blk_plug *plug; 1222 struct blk_plug *plug;
1220 int el_ret, rw_flags, where = ELEVATOR_INSERT_SORT; 1223 int el_ret, rw_flags, where = ELEVATOR_INSERT_SORT;
1221 struct request *req; 1224 struct request *req;
1225 unsigned int request_count = 0;
1222 1226
1223 /* 1227 /*
1224 * low level driver can indicate that it wants pages above a 1228 * low level driver can indicate that it wants pages above a
@@ -1237,7 +1241,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
1237 * Check if we can merge with the plugged list before grabbing 1241 * Check if we can merge with the plugged list before grabbing
1238 * any locks. 1242 * any locks.
1239 */ 1243 */
1240 if (attempt_plug_merge(current, q, bio)) 1244 if (attempt_plug_merge(current, q, bio, &request_count))
1241 goto out; 1245 goto out;
1242 1246
1243 spin_lock_irq(q->queue_lock); 1247 spin_lock_irq(q->queue_lock);
@@ -1302,11 +1306,10 @@ get_rq:
1302 if (__rq->q != q) 1306 if (__rq->q != q)
1303 plug->should_sort = 1; 1307 plug->should_sort = 1;
1304 } 1308 }
1309 if (request_count >= BLK_MAX_REQUEST_COUNT)
1310 blk_flush_plug_list(plug, false);
1305 list_add_tail(&req->queuelist, &plug->list); 1311 list_add_tail(&req->queuelist, &plug->list);
1306 plug->count++;
1307 drive_stat_acct(req, 1); 1312 drive_stat_acct(req, 1);
1308 if (plug->count >= BLK_MAX_REQUEST_COUNT)
1309 blk_flush_plug_list(plug, false);
1310 } else { 1313 } else {
1311 spin_lock_irq(q->queue_lock); 1314 spin_lock_irq(q->queue_lock);
1312 add_acct_request(q, req, where); 1315 add_acct_request(q, req, where);
@@ -1702,6 +1705,7 @@ EXPORT_SYMBOL_GPL(blk_rq_check_limits);
1702int blk_insert_cloned_request(struct request_queue *q, struct request *rq) 1705int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
1703{ 1706{
1704 unsigned long flags; 1707 unsigned long flags;
1708 int where = ELEVATOR_INSERT_BACK;
1705 1709
1706 if (blk_rq_check_limits(q, rq)) 1710 if (blk_rq_check_limits(q, rq))
1707 return -EIO; 1711 return -EIO;
@@ -1718,7 +1722,10 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
1718 */ 1722 */
1719 BUG_ON(blk_queued_rq(rq)); 1723 BUG_ON(blk_queued_rq(rq));
1720 1724
1721 add_acct_request(q, rq, ELEVATOR_INSERT_BACK); 1725 if (rq->cmd_flags & (REQ_FLUSH|REQ_FUA))
1726 where = ELEVATOR_INSERT_FLUSH;
1727
1728 add_acct_request(q, rq, where);
1722 spin_unlock_irqrestore(q->queue_lock, flags); 1729 spin_unlock_irqrestore(q->queue_lock, flags);
1723 1730
1724 return 0; 1731 return 0;
@@ -2275,7 +2282,7 @@ static bool blk_end_bidi_request(struct request *rq, int error,
2275 * %false - we are done with this request 2282 * %false - we are done with this request
2276 * %true - still buffers pending for this request 2283 * %true - still buffers pending for this request
2277 **/ 2284 **/
2278static bool __blk_end_bidi_request(struct request *rq, int error, 2285bool __blk_end_bidi_request(struct request *rq, int error,
2279 unsigned int nr_bytes, unsigned int bidi_bytes) 2286 unsigned int nr_bytes, unsigned int bidi_bytes)
2280{ 2287{
2281 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) 2288 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
@@ -2630,7 +2637,6 @@ void blk_start_plug(struct blk_plug *plug)
2630 INIT_LIST_HEAD(&plug->list); 2637 INIT_LIST_HEAD(&plug->list);
2631 INIT_LIST_HEAD(&plug->cb_list); 2638 INIT_LIST_HEAD(&plug->cb_list);
2632 plug->should_sort = 0; 2639 plug->should_sort = 0;
2633 plug->count = 0;
2634 2640
2635 /* 2641 /*
2636 * If this is a nested plug, don't actually assign it. It will be 2642 * If this is a nested plug, don't actually assign it. It will be
@@ -2714,7 +2720,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
2714 return; 2720 return;
2715 2721
2716 list_splice_init(&plug->list, &list); 2722 list_splice_init(&plug->list, &list);
2717 plug->count = 0;
2718 2723
2719 if (plug->should_sort) { 2724 if (plug->should_sort) {
2720 list_sort(NULL, &list, plug_rq_cmp); 2725 list_sort(NULL, &list, plug_rq_cmp);
diff --git a/block/blk-flush.c b/block/blk-flush.c
index bb21e4c36f70..491eb30a242d 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -95,11 +95,12 @@ static unsigned int blk_flush_policy(unsigned int fflags, struct request *rq)
95{ 95{
96 unsigned int policy = 0; 96 unsigned int policy = 0;
97 97
98 if (blk_rq_sectors(rq))
99 policy |= REQ_FSEQ_DATA;
100
98 if (fflags & REQ_FLUSH) { 101 if (fflags & REQ_FLUSH) {
99 if (rq->cmd_flags & REQ_FLUSH) 102 if (rq->cmd_flags & REQ_FLUSH)
100 policy |= REQ_FSEQ_PREFLUSH; 103 policy |= REQ_FSEQ_PREFLUSH;
101 if (blk_rq_sectors(rq))
102 policy |= REQ_FSEQ_DATA;
103 if (!(fflags & REQ_FUA) && (rq->cmd_flags & REQ_FUA)) 104 if (!(fflags & REQ_FUA) && (rq->cmd_flags & REQ_FUA))
104 policy |= REQ_FSEQ_POSTFLUSH; 105 policy |= REQ_FSEQ_POSTFLUSH;
105 } 106 }
@@ -122,7 +123,7 @@ static void blk_flush_restore_request(struct request *rq)
122 123
123 /* make @rq a normal request */ 124 /* make @rq a normal request */
124 rq->cmd_flags &= ~REQ_FLUSH_SEQ; 125 rq->cmd_flags &= ~REQ_FLUSH_SEQ;
125 rq->end_io = NULL; 126 rq->end_io = rq->flush.saved_end_io;
126} 127}
127 128
128/** 129/**
@@ -300,9 +301,6 @@ void blk_insert_flush(struct request *rq)
300 unsigned int fflags = q->flush_flags; /* may change, cache */ 301 unsigned int fflags = q->flush_flags; /* may change, cache */
301 unsigned int policy = blk_flush_policy(fflags, rq); 302 unsigned int policy = blk_flush_policy(fflags, rq);
302 303
303 BUG_ON(rq->end_io);
304 BUG_ON(!rq->bio || rq->bio != rq->biotail);
305
306 /* 304 /*
307 * @policy now records what operations need to be done. Adjust 305 * @policy now records what operations need to be done. Adjust
308 * REQ_FLUSH and FUA for the driver. 306 * REQ_FLUSH and FUA for the driver.
@@ -312,6 +310,19 @@ void blk_insert_flush(struct request *rq)
312 rq->cmd_flags &= ~REQ_FUA; 310 rq->cmd_flags &= ~REQ_FUA;
313 311
314 /* 312 /*
313 * An empty flush handed down from a stacking driver may
314 * translate into nothing if the underlying device does not
315 * advertise a write-back cache. In this case, simply
316 * complete the request.
317 */
318 if (!policy) {
319 __blk_end_bidi_request(rq, 0, 0, 0);
320 return;
321 }
322
323 BUG_ON(!rq->bio || rq->bio != rq->biotail);
324
325 /*
315 * If there's data but flush is not necessary, the request can be 326 * If there's data but flush is not necessary, the request can be
316 * processed directly without going through flush machinery. Queue 327 * processed directly without going through flush machinery. Queue
317 * for normal execution. 328 * for normal execution.
@@ -319,6 +330,7 @@ void blk_insert_flush(struct request *rq)
319 if ((policy & REQ_FSEQ_DATA) && 330 if ((policy & REQ_FSEQ_DATA) &&
320 !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) { 331 !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
321 list_add_tail(&rq->queuelist, &q->queue_head); 332 list_add_tail(&rq->queuelist, &q->queue_head);
333 blk_run_queue_async(q);
322 return; 334 return;
323 } 335 }
324 336
@@ -329,6 +341,7 @@ void blk_insert_flush(struct request *rq)
329 memset(&rq->flush, 0, sizeof(rq->flush)); 341 memset(&rq->flush, 0, sizeof(rq->flush));
330 INIT_LIST_HEAD(&rq->flush.list); 342 INIT_LIST_HEAD(&rq->flush.list);
331 rq->cmd_flags |= REQ_FLUSH_SEQ; 343 rq->cmd_flags |= REQ_FLUSH_SEQ;
344 rq->flush.saved_end_io = rq->end_io; /* Usually NULL */
332 rq->end_io = flush_data_end_io; 345 rq->end_io = flush_data_end_io;
333 346
334 blk_flush_complete_seq(rq, REQ_FSEQ_ACTIONS & ~policy, 0); 347 blk_flush_complete_seq(rq, REQ_FSEQ_ACTIONS & ~policy, 0);
diff --git a/block/blk-softirq.c b/block/blk-softirq.c
index 475fab809a80..1366a89d8e66 100644
--- a/block/blk-softirq.c
+++ b/block/blk-softirq.c
@@ -115,7 +115,7 @@ void __blk_complete_request(struct request *req)
115 /* 115 /*
116 * Select completion CPU 116 * Select completion CPU
117 */ 117 */
118 if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) && req->cpu != -1) { 118 if (req->cpu != -1) {
119 ccpu = req->cpu; 119 ccpu = req->cpu;
120 if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags)) { 120 if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags)) {
121 ccpu = blk_cpu_to_group(ccpu); 121 ccpu = blk_cpu_to_group(ccpu);
@@ -124,6 +124,14 @@ void __blk_complete_request(struct request *req)
124 } else 124 } else
125 ccpu = cpu; 125 ccpu = cpu;
126 126
127 /*
128 * If current CPU and requested CPU are in the same group, running
129 * softirq in current CPU. One might concern this is just like
130 * QUEUE_FLAG_SAME_FORCE, but actually not. blk_complete_request() is
131 * running in interrupt handler, and currently I/O controller doesn't
132 * support multiple interrupts, so current CPU is unique actually. This
133 * avoids IPI sending from current CPU to the first CPU of a group.
134 */
127 if (ccpu == cpu || ccpu == group_cpu) { 135 if (ccpu == cpu || ccpu == group_cpu) {
128 struct list_head *list; 136 struct list_head *list;
129do_local: 137do_local:
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 0ee17b5e7fb6..e681805cdb47 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -258,11 +258,13 @@ queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
258 258
259 ret = queue_var_store(&val, page, count); 259 ret = queue_var_store(&val, page, count);
260 spin_lock_irq(q->queue_lock); 260 spin_lock_irq(q->queue_lock);
261 if (val) { 261 if (val == 2) {
262 queue_flag_set(QUEUE_FLAG_SAME_COMP, q); 262 queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
263 if (val == 2) 263 queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
264 queue_flag_set(QUEUE_FLAG_SAME_FORCE, q); 264 } else if (val == 1) {
265 } else { 265 queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
266 queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
267 } else if (val == 0) {
266 queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); 268 queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
267 queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); 269 queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
268 } 270 }
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index f6a794120505..a19f58c6fc3a 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -746,7 +746,7 @@ static bool tg_may_dispatch(struct throtl_data *td, struct throtl_grp *tg,
746static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio) 746static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
747{ 747{
748 bool rw = bio_data_dir(bio); 748 bool rw = bio_data_dir(bio);
749 bool sync = bio->bi_rw & REQ_SYNC; 749 bool sync = rw_is_sync(bio->bi_rw);
750 750
751 /* Charge the bio to the group */ 751 /* Charge the bio to the group */
752 tg->bytes_disp[rw] += bio->bi_size; 752 tg->bytes_disp[rw] += bio->bi_size;
@@ -1150,7 +1150,7 @@ int blk_throtl_bio(struct request_queue *q, struct bio **biop)
1150 1150
1151 if (tg_no_rule_group(tg, rw)) { 1151 if (tg_no_rule_group(tg, rw)) {
1152 blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size, 1152 blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size,
1153 rw, bio->bi_rw & REQ_SYNC); 1153 rw, rw_is_sync(bio->bi_rw));
1154 rcu_read_unlock(); 1154 rcu_read_unlock();
1155 return 0; 1155 return 0;
1156 } 1156 }
diff --git a/block/blk.h b/block/blk.h
index d6586287adc9..20b900a377c9 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -17,6 +17,8 @@ int blk_rq_append_bio(struct request_queue *q, struct request *rq,
17 struct bio *bio); 17 struct bio *bio);
18void blk_dequeue_request(struct request *rq); 18void blk_dequeue_request(struct request *rq);
19void __blk_queue_free_tags(struct request_queue *q); 19void __blk_queue_free_tags(struct request_queue *q);
20bool __blk_end_bidi_request(struct request *rq, int error,
21 unsigned int nr_bytes, unsigned int bidi_bytes);
20 22
21void blk_rq_timed_out_timer(unsigned long data); 23void blk_rq_timed_out_timer(unsigned long data);
22void blk_delete_timer(struct request *); 24void blk_delete_timer(struct request *);
diff --git a/block/bsg-lib.c b/block/bsg-lib.c
new file mode 100644
index 000000000000..6690e6e41037
--- /dev/null
+++ b/block/bsg-lib.c
@@ -0,0 +1,298 @@
1/*
2 * BSG helper library
3 *
4 * Copyright (C) 2008 James Smart, Emulex Corporation
5 * Copyright (C) 2011 Red Hat, Inc. All rights reserved.
6 * Copyright (C) 2011 Mike Christie
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23#include <linux/slab.h>
24#include <linux/blkdev.h>
25#include <linux/delay.h>
26#include <linux/scatterlist.h>
27#include <linux/bsg-lib.h>
28#include <linux/module.h>
29#include <scsi/scsi_cmnd.h>
30
31/**
32 * bsg_destroy_job - routine to teardown/delete a bsg job
33 * @job: bsg_job that is to be torn down
34 */
35static void bsg_destroy_job(struct bsg_job *job)
36{
37 put_device(job->dev); /* release reference for the request */
38
39 kfree(job->request_payload.sg_list);
40 kfree(job->reply_payload.sg_list);
41 kfree(job);
42}
43
44/**
45 * bsg_job_done - completion routine for bsg requests
46 * @job: bsg_job that is complete
47 * @result: job reply result
48 * @reply_payload_rcv_len: length of payload recvd
49 *
50 * The LLD should call this when the bsg job has completed.
51 */
52void bsg_job_done(struct bsg_job *job, int result,
53 unsigned int reply_payload_rcv_len)
54{
55 struct request *req = job->req;
56 struct request *rsp = req->next_rq;
57 int err;
58
59 err = job->req->errors = result;
60 if (err < 0)
61 /* we're only returning the result field in the reply */
62 job->req->sense_len = sizeof(u32);
63 else
64 job->req->sense_len = job->reply_len;
65 /* we assume all request payload was transferred, residual == 0 */
66 req->resid_len = 0;
67
68 if (rsp) {
69 WARN_ON(reply_payload_rcv_len > rsp->resid_len);
70
71 /* set reply (bidi) residual */
72 rsp->resid_len -= min(reply_payload_rcv_len, rsp->resid_len);
73 }
74 blk_complete_request(req);
75}
76EXPORT_SYMBOL_GPL(bsg_job_done);
77
78/**
79 * bsg_softirq_done - softirq done routine for destroying the bsg requests
80 * @rq: BSG request that holds the job to be destroyed
81 */
82static void bsg_softirq_done(struct request *rq)
83{
84 struct bsg_job *job = rq->special;
85
86 blk_end_request_all(rq, rq->errors);
87 bsg_destroy_job(job);
88}
89
90static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req)
91{
92 size_t sz = (sizeof(struct scatterlist) * req->nr_phys_segments);
93
94 BUG_ON(!req->nr_phys_segments);
95
96 buf->sg_list = kzalloc(sz, GFP_KERNEL);
97 if (!buf->sg_list)
98 return -ENOMEM;
99 sg_init_table(buf->sg_list, req->nr_phys_segments);
100 buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list);
101 buf->payload_len = blk_rq_bytes(req);
102 return 0;
103}
104
105/**
106 * bsg_create_job - create the bsg_job structure for the bsg request
107 * @dev: device that is being sent the bsg request
108 * @req: BSG request that needs a job structure
109 */
110static int bsg_create_job(struct device *dev, struct request *req)
111{
112 struct request *rsp = req->next_rq;
113 struct request_queue *q = req->q;
114 struct bsg_job *job;
115 int ret;
116
117 BUG_ON(req->special);
118
119 job = kzalloc(sizeof(struct bsg_job) + q->bsg_job_size, GFP_KERNEL);
120 if (!job)
121 return -ENOMEM;
122
123 req->special = job;
124 job->req = req;
125 if (q->bsg_job_size)
126 job->dd_data = (void *)&job[1];
127 job->request = req->cmd;
128 job->request_len = req->cmd_len;
129 job->reply = req->sense;
130 job->reply_len = SCSI_SENSE_BUFFERSIZE; /* Size of sense buffer
131 * allocated */
132 if (req->bio) {
133 ret = bsg_map_buffer(&job->request_payload, req);
134 if (ret)
135 goto failjob_rls_job;
136 }
137 if (rsp && rsp->bio) {
138 ret = bsg_map_buffer(&job->reply_payload, rsp);
139 if (ret)
140 goto failjob_rls_rqst_payload;
141 }
142 job->dev = dev;
143 /* take a reference for the request */
144 get_device(job->dev);
145 return 0;
146
147failjob_rls_rqst_payload:
148 kfree(job->request_payload.sg_list);
149failjob_rls_job:
150 kfree(job);
151 return -ENOMEM;
152}
153
154/*
155 * bsg_goose_queue - restart queue in case it was stopped
156 * @q: request q to be restarted
157 */
158void bsg_goose_queue(struct request_queue *q)
159{
160 if (!q)
161 return;
162
163 blk_run_queue_async(q);
164}
165EXPORT_SYMBOL_GPL(bsg_goose_queue);
166
167/**
168 * bsg_request_fn - generic handler for bsg requests
169 * @q: request queue to manage
170 *
171 * On error the create_bsg_job function should return a -Exyz error value
172 * that will be set to the req->errors.
173 *
174 * Drivers/subsys should pass this to the queue init function.
175 */
176void bsg_request_fn(struct request_queue *q)
177{
178 struct device *dev = q->queuedata;
179 struct request *req;
180 struct bsg_job *job;
181 int ret;
182
183 if (!get_device(dev))
184 return;
185
186 while (1) {
187 req = blk_fetch_request(q);
188 if (!req)
189 break;
190 spin_unlock_irq(q->queue_lock);
191
192 ret = bsg_create_job(dev, req);
193 if (ret) {
194 req->errors = ret;
195 blk_end_request_all(req, ret);
196 spin_lock_irq(q->queue_lock);
197 continue;
198 }
199
200 job = req->special;
201 ret = q->bsg_job_fn(job);
202 spin_lock_irq(q->queue_lock);
203 if (ret)
204 break;
205 }
206
207 spin_unlock_irq(q->queue_lock);
208 put_device(dev);
209 spin_lock_irq(q->queue_lock);
210}
211EXPORT_SYMBOL_GPL(bsg_request_fn);
212
213/**
214 * bsg_setup_queue - Create and add the bsg hooks so we can receive requests
215 * @dev: device to attach bsg device to
216 * @q: request queue setup by caller
217 * @name: device to give bsg device
218 * @job_fn: bsg job handler
219 * @dd_job_size: size of LLD data needed for each job
220 *
221 * The caller should have setup the reuqest queue with bsg_request_fn
222 * as the request_fn.
223 */
224int bsg_setup_queue(struct device *dev, struct request_queue *q,
225 char *name, bsg_job_fn *job_fn, int dd_job_size)
226{
227 int ret;
228
229 q->queuedata = dev;
230 q->bsg_job_size = dd_job_size;
231 q->bsg_job_fn = job_fn;
232 queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
233 blk_queue_softirq_done(q, bsg_softirq_done);
234 blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
235
236 ret = bsg_register_queue(q, dev, name, NULL);
237 if (ret) {
238 printk(KERN_ERR "%s: bsg interface failed to "
239 "initialize - register queue\n", dev->kobj.name);
240 return ret;
241 }
242
243 return 0;
244}
245EXPORT_SYMBOL_GPL(bsg_setup_queue);
246
247/**
248 * bsg_remove_queue - Deletes the bsg dev from the q
249 * @q: the request_queue that is to be torn down.
250 *
251 * Notes:
252 * Before unregistering the queue empty any requests that are blocked
253 */
254void bsg_remove_queue(struct request_queue *q)
255{
256 struct request *req; /* block request */
257 int counts; /* totals for request_list count and starved */
258
259 if (!q)
260 return;
261
262 /* Stop taking in new requests */
263 spin_lock_irq(q->queue_lock);
264 blk_stop_queue(q);
265
266 /* drain all requests in the queue */
267 while (1) {
268 /* need the lock to fetch a request
269 * this may fetch the same reqeust as the previous pass
270 */
271 req = blk_fetch_request(q);
272 /* save requests in use and starved */
273 counts = q->rq.count[0] + q->rq.count[1] +
274 q->rq.starved[0] + q->rq.starved[1];
275 spin_unlock_irq(q->queue_lock);
276 /* any requests still outstanding? */
277 if (counts == 0)
278 break;
279
280 /* This may be the same req as the previous iteration,
281 * always send the blk_end_request_all after a prefetch.
282 * It is not okay to not end the request because the
283 * prefetch started the request.
284 */
285 if (req) {
286 /* return -ENXIO to indicate that this queue is
287 * going away
288 */
289 req->errors = -ENXIO;
290 blk_end_request_all(req, -ENXIO);
291 }
292
293 msleep(200); /* allow bsg to possibly finish */
294 spin_lock_irq(q->queue_lock);
295 }
296 bsg_unregister_queue(q);
297}
298EXPORT_SYMBOL_GPL(bsg_remove_queue);
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 1f96ad6254f1..16ace89613bc 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -130,6 +130,8 @@ struct cfq_queue {
130 unsigned long slice_end; 130 unsigned long slice_end;
131 long slice_resid; 131 long slice_resid;
132 132
133 /* pending priority requests */
134 int prio_pending;
133 /* number of requests that are on the dispatch list or inside driver */ 135 /* number of requests that are on the dispatch list or inside driver */
134 int dispatched; 136 int dispatched;
135 137
@@ -682,6 +684,9 @@ cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2,
682 if (rq_is_sync(rq1) != rq_is_sync(rq2)) 684 if (rq_is_sync(rq1) != rq_is_sync(rq2))
683 return rq_is_sync(rq1) ? rq1 : rq2; 685 return rq_is_sync(rq1) ? rq1 : rq2;
684 686
687 if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_PRIO)
688 return rq1->cmd_flags & REQ_PRIO ? rq1 : rq2;
689
685 s1 = blk_rq_pos(rq1); 690 s1 = blk_rq_pos(rq1);
686 s2 = blk_rq_pos(rq2); 691 s2 = blk_rq_pos(rq2);
687 692
@@ -1209,6 +1214,9 @@ static void cfq_destroy_cfqg(struct cfq_data *cfqd, struct cfq_group *cfqg)
1209 1214
1210 hlist_del_init(&cfqg->cfqd_node); 1215 hlist_del_init(&cfqg->cfqd_node);
1211 1216
1217 BUG_ON(cfqd->nr_blkcg_linked_grps <= 0);
1218 cfqd->nr_blkcg_linked_grps--;
1219
1212 /* 1220 /*
1213 * Put the reference taken at the time of creation so that when all 1221 * Put the reference taken at the time of creation so that when all
1214 * queues are gone, group can be destroyed. 1222 * queues are gone, group can be destroyed.
@@ -1604,6 +1612,10 @@ static void cfq_remove_request(struct request *rq)
1604 cfqq->cfqd->rq_queued--; 1612 cfqq->cfqd->rq_queued--;
1605 cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg, 1613 cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
1606 rq_data_dir(rq), rq_is_sync(rq)); 1614 rq_data_dir(rq), rq_is_sync(rq));
1615 if (rq->cmd_flags & REQ_PRIO) {
1616 WARN_ON(!cfqq->prio_pending);
1617 cfqq->prio_pending--;
1618 }
1607} 1619}
1608 1620
1609static int cfq_merge(struct request_queue *q, struct request **req, 1621static int cfq_merge(struct request_queue *q, struct request **req,
@@ -3357,6 +3369,13 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
3357 return true; 3369 return true;
3358 3370
3359 /* 3371 /*
3372 * So both queues are sync. Let the new request get disk time if
3373 * it's a metadata request and the current queue is doing regular IO.
3374 */
3375 if ((rq->cmd_flags & REQ_PRIO) && !cfqq->prio_pending)
3376 return true;
3377
3378 /*
3360 * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice. 3379 * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
3361 */ 3380 */
3362 if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq)) 3381 if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
@@ -3420,6 +3439,8 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3420 struct cfq_io_context *cic = RQ_CIC(rq); 3439 struct cfq_io_context *cic = RQ_CIC(rq);
3421 3440
3422 cfqd->rq_queued++; 3441 cfqd->rq_queued++;
3442 if (rq->cmd_flags & REQ_PRIO)
3443 cfqq->prio_pending++;
3423 3444
3424 cfq_update_io_thinktime(cfqd, cfqq, cic); 3445 cfq_update_io_thinktime(cfqd, cfqq, cic);
3425 cfq_update_io_seektime(cfqd, cfqq, rq); 3446 cfq_update_io_seektime(cfqd, cfqq, rq);
diff --git a/block/genhd.c b/block/genhd.c
index 5cb51c55f6d8..e2f67902dd02 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -1146,17 +1146,17 @@ static int diskstats_show(struct seq_file *seqf, void *v)
1146 cpu = part_stat_lock(); 1146 cpu = part_stat_lock();
1147 part_round_stats(cpu, hd); 1147 part_round_stats(cpu, hd);
1148 part_stat_unlock(); 1148 part_stat_unlock();
1149 seq_printf(seqf, "%4d %7d %s %lu %lu %llu " 1149 seq_printf(seqf, "%4d %7d %s %lu %lu %lu "
1150 "%u %lu %lu %llu %u %u %u %u\n", 1150 "%u %lu %lu %lu %u %u %u %u\n",
1151 MAJOR(part_devt(hd)), MINOR(part_devt(hd)), 1151 MAJOR(part_devt(hd)), MINOR(part_devt(hd)),
1152 disk_name(gp, hd->partno, buf), 1152 disk_name(gp, hd->partno, buf),
1153 part_stat_read(hd, ios[READ]), 1153 part_stat_read(hd, ios[READ]),
1154 part_stat_read(hd, merges[READ]), 1154 part_stat_read(hd, merges[READ]),
1155 (unsigned long long)part_stat_read(hd, sectors[READ]), 1155 part_stat_read(hd, sectors[READ]),
1156 jiffies_to_msecs(part_stat_read(hd, ticks[READ])), 1156 jiffies_to_msecs(part_stat_read(hd, ticks[READ])),
1157 part_stat_read(hd, ios[WRITE]), 1157 part_stat_read(hd, ios[WRITE]),
1158 part_stat_read(hd, merges[WRITE]), 1158 part_stat_read(hd, merges[WRITE]),
1159 (unsigned long long)part_stat_read(hd, sectors[WRITE]), 1159 part_stat_read(hd, sectors[WRITE]),
1160 jiffies_to_msecs(part_stat_read(hd, ticks[WRITE])), 1160 jiffies_to_msecs(part_stat_read(hd, ticks[WRITE])),
1161 part_in_flight(hd), 1161 part_in_flight(hd),
1162 jiffies_to_msecs(part_stat_read(hd, io_ticks)), 1162 jiffies_to_msecs(part_stat_read(hd, io_ticks)),
diff --git a/drivers/acpi/acpica/acconfig.h b/drivers/acpi/acpica/acconfig.h
index bc533dde16c4..f895a244ca7e 100644
--- a/drivers/acpi/acpica/acconfig.h
+++ b/drivers/acpi/acpica/acconfig.h
@@ -121,7 +121,7 @@
121 121
122/* Maximum sleep allowed via Sleep() operator */ 122/* Maximum sleep allowed via Sleep() operator */
123 123
124#define ACPI_MAX_SLEEP 20000 /* Two seconds */ 124#define ACPI_MAX_SLEEP 2000 /* Two seconds */
125 125
126/****************************************************************************** 126/******************************************************************************
127 * 127 *
diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig
index c34aa51af4ee..e3f47872ec22 100644
--- a/drivers/acpi/apei/Kconfig
+++ b/drivers/acpi/apei/Kconfig
@@ -13,6 +13,7 @@ config ACPI_APEI_GHES
13 bool "APEI Generic Hardware Error Source" 13 bool "APEI Generic Hardware Error Source"
14 depends on ACPI_APEI && X86 14 depends on ACPI_APEI && X86
15 select ACPI_HED 15 select ACPI_HED
16 select IRQ_WORK
16 select LLIST 17 select LLIST
17 select GENERIC_ALLOCATOR 18 select GENERIC_ALLOCATOR
18 help 19 help
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
index 8041248fce9b..61540360d5ce 100644
--- a/drivers/acpi/apei/apei-base.c
+++ b/drivers/acpi/apei/apei-base.c
@@ -618,7 +618,7 @@ int apei_osc_setup(void)
618 }; 618 };
619 619
620 capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE; 620 capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
621 capbuf[OSC_SUPPORT_TYPE] = 0; 621 capbuf[OSC_SUPPORT_TYPE] = 1;
622 capbuf[OSC_CONTROL_TYPE] = 0; 622 capbuf[OSC_CONTROL_TYPE] = 0;
623 623
624 if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle)) 624 if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle))
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index ca3e6be44a04..5987e0ba8c2d 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -468,6 +468,15 @@ config PATA_ICSIDE
468 interface card. This is not required for ICS partition support. 468 interface card. This is not required for ICS partition support.
469 If you are unsure, say N to this. 469 If you are unsure, say N to this.
470 470
471config PATA_IMX
472 tristate "PATA support for Freescale iMX"
473 depends on ARCH_MXC
474 help
475 This option enables support for the PATA host available on Freescale
476 iMX SoCs.
477
478 If unsure, say N.
479
471config PATA_IT8213 480config PATA_IT8213
472 tristate "IT8213 PATA support (Experimental)" 481 tristate "IT8213 PATA support (Experimental)"
473 depends on PCI && EXPERIMENTAL 482 depends on PCI && EXPERIMENTAL
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index 8ac64e1aa051..9550d691fd19 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -48,6 +48,7 @@ obj-$(CONFIG_PATA_HPT37X) += pata_hpt37x.o
48obj-$(CONFIG_PATA_HPT3X2N) += pata_hpt3x2n.o 48obj-$(CONFIG_PATA_HPT3X2N) += pata_hpt3x2n.o
49obj-$(CONFIG_PATA_HPT3X3) += pata_hpt3x3.o 49obj-$(CONFIG_PATA_HPT3X3) += pata_hpt3x3.o
50obj-$(CONFIG_PATA_ICSIDE) += pata_icside.o 50obj-$(CONFIG_PATA_ICSIDE) += pata_icside.o
51obj-$(CONFIG_PATA_IMX) += pata_imx.o
51obj-$(CONFIG_PATA_IT8213) += pata_it8213.o 52obj-$(CONFIG_PATA_IT8213) += pata_it8213.o
52obj-$(CONFIG_PATA_IT821X) += pata_it821x.o 53obj-$(CONFIG_PATA_IT821X) += pata_it821x.o
53obj-$(CONFIG_PATA_JMICRON) += pata_jmicron.o 54obj-$(CONFIG_PATA_JMICRON) += pata_jmicron.o
diff --git a/drivers/ata/pata_imx.c b/drivers/ata/pata_imx.c
new file mode 100644
index 000000000000..ca9d9caedfa3
--- /dev/null
+++ b/drivers/ata/pata_imx.c
@@ -0,0 +1,253 @@
1/*
2 * Freescale iMX PATA driver
3 *
4 * Copyright (C) 2011 Arnaud Patard <arnaud.patard@rtp-net.org>
5 *
6 * Based on pata_platform - Copyright (C) 2006 - 2007 Paul Mundt
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 *
12 * TODO:
13 * - dmaengine support
14 * - check if timing stuff needed
15 */
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/init.h>
19#include <linux/blkdev.h>
20#include <scsi/scsi_host.h>
21#include <linux/ata.h>
22#include <linux/libata.h>
23#include <linux/platform_device.h>
24#include <linux/clk.h>
25
26#define DRV_NAME "pata_imx"
27
28#define PATA_IMX_ATA_CONTROL 0x24
29#define PATA_IMX_ATA_CTRL_FIFO_RST_B (1<<7)
30#define PATA_IMX_ATA_CTRL_ATA_RST_B (1<<6)
31#define PATA_IMX_ATA_CTRL_IORDY_EN (1<<0)
32#define PATA_IMX_ATA_INT_EN 0x2C
33#define PATA_IMX_ATA_INTR_ATA_INTRQ2 (1<<3)
34#define PATA_IMX_DRIVE_DATA 0xA0
35#define PATA_IMX_DRIVE_CONTROL 0xD8
36
37struct pata_imx_priv {
38 struct clk *clk;
39 /* timings/interrupt/control regs */
40 u8 *host_regs;
41 u32 ata_ctl;
42};
43
44static int pata_imx_set_mode(struct ata_link *link, struct ata_device **unused)
45{
46 struct ata_device *dev;
47 struct ata_port *ap = link->ap;
48 struct pata_imx_priv *priv = ap->host->private_data;
49 u32 val;
50
51 ata_for_each_dev(dev, link, ENABLED) {
52 dev->pio_mode = dev->xfer_mode = XFER_PIO_0;
53 dev->xfer_shift = ATA_SHIFT_PIO;
54 dev->flags |= ATA_DFLAG_PIO;
55
56 val = __raw_readl(priv->host_regs + PATA_IMX_ATA_CONTROL);
57 if (ata_pio_need_iordy(dev))
58 val |= PATA_IMX_ATA_CTRL_IORDY_EN;
59 else
60 val &= ~PATA_IMX_ATA_CTRL_IORDY_EN;
61 __raw_writel(val, priv->host_regs + PATA_IMX_ATA_CONTROL);
62
63 ata_dev_printk(dev, KERN_INFO, "configured for PIO\n");
64 }
65 return 0;
66}
67
68static struct scsi_host_template pata_imx_sht = {
69 ATA_PIO_SHT(DRV_NAME),
70};
71
72static struct ata_port_operations pata_imx_port_ops = {
73 .inherits = &ata_sff_port_ops,
74 .sff_data_xfer = ata_sff_data_xfer_noirq,
75 .cable_detect = ata_cable_unknown,
76 .set_mode = pata_imx_set_mode,
77};
78
79static void pata_imx_setup_port(struct ata_ioports *ioaddr)
80{
81 /* Fixup the port shift for platforms that need it */
82 ioaddr->data_addr = ioaddr->cmd_addr + (ATA_REG_DATA << 2);
83 ioaddr->error_addr = ioaddr->cmd_addr + (ATA_REG_ERR << 2);
84 ioaddr->feature_addr = ioaddr->cmd_addr + (ATA_REG_FEATURE << 2);
85 ioaddr->nsect_addr = ioaddr->cmd_addr + (ATA_REG_NSECT << 2);
86 ioaddr->lbal_addr = ioaddr->cmd_addr + (ATA_REG_LBAL << 2);
87 ioaddr->lbam_addr = ioaddr->cmd_addr + (ATA_REG_LBAM << 2);
88 ioaddr->lbah_addr = ioaddr->cmd_addr + (ATA_REG_LBAH << 2);
89 ioaddr->device_addr = ioaddr->cmd_addr + (ATA_REG_DEVICE << 2);
90 ioaddr->status_addr = ioaddr->cmd_addr + (ATA_REG_STATUS << 2);
91 ioaddr->command_addr = ioaddr->cmd_addr + (ATA_REG_CMD << 2);
92}
93
94static int __devinit pata_imx_probe(struct platform_device *pdev)
95{
96 struct ata_host *host;
97 struct ata_port *ap;
98 struct pata_imx_priv *priv;
99 int irq = 0;
100 struct resource *io_res;
101
102 io_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
103 if (io_res == NULL)
104 return -EINVAL;
105
106 irq = platform_get_irq(pdev, 0);
107 if (irq <= 0)
108 return -EINVAL;
109
110 priv = devm_kzalloc(&pdev->dev,
111 sizeof(struct pata_imx_priv), GFP_KERNEL);
112 if (!priv)
113 return -ENOMEM;
114
115 priv->clk = clk_get(&pdev->dev, NULL);
116 if (IS_ERR(priv->clk)) {
117 dev_err(&pdev->dev, "Failed to get clock\n");
118 return PTR_ERR(priv->clk);
119 }
120
121 clk_enable(priv->clk);
122
123 host = ata_host_alloc(&pdev->dev, 1);
124 if (!host)
125 goto free_priv;
126
127 host->private_data = priv;
128 ap = host->ports[0];
129
130 ap->ops = &pata_imx_port_ops;
131 ap->pio_mask = ATA_PIO0;
132 ap->flags |= ATA_FLAG_SLAVE_POSS;
133
134 priv->host_regs = devm_ioremap(&pdev->dev, io_res->start,
135 resource_size(io_res));
136 if (!priv->host_regs) {
137 dev_err(&pdev->dev, "failed to map IO/CTL base\n");
138 goto free_priv;
139 }
140
141 ap->ioaddr.cmd_addr = priv->host_regs + PATA_IMX_DRIVE_DATA;
142 ap->ioaddr.ctl_addr = priv->host_regs + PATA_IMX_DRIVE_CONTROL;
143
144 ap->ioaddr.altstatus_addr = ap->ioaddr.ctl_addr;
145
146 pata_imx_setup_port(&ap->ioaddr);
147
148 ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx",
149 (unsigned long long)io_res->start + PATA_IMX_DRIVE_DATA,
150 (unsigned long long)io_res->start + PATA_IMX_DRIVE_CONTROL);
151
152 /* deassert resets */
153 __raw_writel(PATA_IMX_ATA_CTRL_FIFO_RST_B |
154 PATA_IMX_ATA_CTRL_ATA_RST_B,
155 priv->host_regs + PATA_IMX_ATA_CONTROL);
156 /* enable interrupts */
157 __raw_writel(PATA_IMX_ATA_INTR_ATA_INTRQ2,
158 priv->host_regs + PATA_IMX_ATA_INT_EN);
159
160 /* activate */
161 return ata_host_activate(host, irq, ata_sff_interrupt, 0,
162 &pata_imx_sht);
163
164free_priv:
165 clk_disable(priv->clk);
166 clk_put(priv->clk);
167 return -ENOMEM;
168}
169
170static int __devexit pata_imx_remove(struct platform_device *pdev)
171{
172 struct ata_host *host = dev_get_drvdata(&pdev->dev);
173 struct pata_imx_priv *priv = host->private_data;
174
175 ata_host_detach(host);
176
177 __raw_writel(0, priv->host_regs + PATA_IMX_ATA_INT_EN);
178
179 clk_disable(priv->clk);
180 clk_put(priv->clk);
181
182 return 0;
183}
184
185#ifdef CONFIG_PM
186static int pata_imx_suspend(struct device *dev)
187{
188 struct ata_host *host = dev_get_drvdata(dev);
189 struct pata_imx_priv *priv = host->private_data;
190 int ret;
191
192 ret = ata_host_suspend(host, PMSG_SUSPEND);
193 if (!ret) {
194 __raw_writel(0, priv->host_regs + PATA_IMX_ATA_INT_EN);
195 priv->ata_ctl =
196 __raw_readl(priv->host_regs + PATA_IMX_ATA_CONTROL);
197 clk_disable(priv->clk);
198 }
199
200 return ret;
201}
202
203static int pata_imx_resume(struct device *dev)
204{
205 struct ata_host *host = dev_get_drvdata(dev);
206 struct pata_imx_priv *priv = host->private_data;
207
208 clk_enable(priv->clk);
209
210 __raw_writel(priv->ata_ctl, priv->host_regs + PATA_IMX_ATA_CONTROL);
211
212 __raw_writel(PATA_IMX_ATA_INTR_ATA_INTRQ2,
213 priv->host_regs + PATA_IMX_ATA_INT_EN);
214
215 ata_host_resume(host);
216
217 return 0;
218}
219
220static const struct dev_pm_ops pata_imx_pm_ops = {
221 .suspend = pata_imx_suspend,
222 .resume = pata_imx_resume,
223};
224#endif
225
226static struct platform_driver pata_imx_driver = {
227 .probe = pata_imx_probe,
228 .remove = __devexit_p(pata_imx_remove),
229 .driver = {
230 .name = DRV_NAME,
231 .owner = THIS_MODULE,
232#ifdef CONFIG_PM
233 .pm = &pata_imx_pm_ops,
234#endif
235 },
236};
237
238static int __init pata_imx_init(void)
239{
240 return platform_driver_register(&pata_imx_driver);
241}
242
243static void __exit pata_imx_exit(void)
244{
245 platform_driver_unregister(&pata_imx_driver);
246}
247module_init(pata_imx_init);
248module_exit(pata_imx_exit);
249
250MODULE_AUTHOR("Arnaud Patard <arnaud.patard@rtp-net.org>");
251MODULE_DESCRIPTION("low-level driver for iMX PATA");
252MODULE_LICENSE("GPL");
253MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
index 65e4be6be220..8e9f5048a10a 100644
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -124,6 +124,17 @@ static const struct via_isa_bridge {
124 { NULL } 124 { NULL }
125}; 125};
126 126
127static const struct dmi_system_id no_atapi_dma_dmi_table[] = {
128 {
129 .ident = "AVERATEC 3200",
130 .matches = {
131 DMI_MATCH(DMI_BOARD_VENDOR, "AVERATEC"),
132 DMI_MATCH(DMI_BOARD_NAME, "3200"),
133 },
134 },
135 { }
136};
137
127struct via_port { 138struct via_port {
128 u8 cached_device; 139 u8 cached_device;
129}; 140};
@@ -355,6 +366,13 @@ static unsigned long via_mode_filter(struct ata_device *dev, unsigned long mask)
355 mask &= ~ ATA_MASK_UDMA; 366 mask &= ~ ATA_MASK_UDMA;
356 } 367 }
357 } 368 }
369
370 if (dev->class == ATA_DEV_ATAPI &&
371 dmi_check_system(no_atapi_dma_dmi_table)) {
372 ata_dev_warn(dev, "controller locks up on ATAPI DMA, forcing PIO\n");
373 mask &= ATA_MASK_PIO;
374 }
375
358 return mask; 376 return mask;
359} 377}
360 378
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
index 0a9a774a7e1e..5c4237452f50 100644
--- a/drivers/ata/sata_dwc_460ex.c
+++ b/drivers/ata/sata_dwc_460ex.c
@@ -1329,7 +1329,7 @@ static int sata_dwc_port_start(struct ata_port *ap)
1329 dev_err(ap->dev, "%s: dma_alloc_coherent failed\n", 1329 dev_err(ap->dev, "%s: dma_alloc_coherent failed\n",
1330 __func__); 1330 __func__);
1331 err = -ENOMEM; 1331 err = -ENOMEM;
1332 goto CLEANUP; 1332 goto CLEANUP_ALLOC;
1333 } 1333 }
1334 } 1334 }
1335 1335
@@ -1349,15 +1349,13 @@ static int sata_dwc_port_start(struct ata_port *ap)
1349 /* Clear any error bits before libata starts issuing commands */ 1349 /* Clear any error bits before libata starts issuing commands */
1350 clear_serror(); 1350 clear_serror();
1351 ap->private_data = hsdevp; 1351 ap->private_data = hsdevp;
1352 dev_dbg(ap->dev, "%s: done\n", __func__);
1353 return 0;
1352 1354
1355CLEANUP_ALLOC:
1356 kfree(hsdevp);
1353CLEANUP: 1357CLEANUP:
1354 if (err) { 1358 dev_dbg(ap->dev, "%s: fail. ap->id = %d\n", __func__, ap->print_id);
1355 sata_dwc_port_stop(ap);
1356 dev_dbg(ap->dev, "%s: fail\n", __func__);
1357 } else {
1358 dev_dbg(ap->dev, "%s: done\n", __func__);
1359 }
1360
1361 return err; 1359 return err;
1362} 1360}
1363 1361
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index 98c1d780f552..9dfb40b8c2c9 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -438,7 +438,7 @@ static void sil_host_intr(struct ata_port *ap, u32 bmdma2)
438 u8 status; 438 u8 status;
439 439
440 if (unlikely(bmdma2 & SIL_DMA_SATA_IRQ)) { 440 if (unlikely(bmdma2 & SIL_DMA_SATA_IRQ)) {
441 u32 serror; 441 u32 serror = 0xffffffff;
442 442
443 /* SIEN doesn't mask SATA IRQs on some 3112s. Those 443 /* SIEN doesn't mask SATA IRQs on some 3112s. Those
444 * controllers continue to assert IRQ as long as 444 * controllers continue to assert IRQ as long as
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index cf7a0c788052..65cd74832450 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -397,6 +397,7 @@ static int remove_nodes(struct device *dev,
397 397
398static int release_nodes(struct device *dev, struct list_head *first, 398static int release_nodes(struct device *dev, struct list_head *first,
399 struct list_head *end, unsigned long flags) 399 struct list_head *end, unsigned long flags)
400 __releases(&dev->devres_lock)
400{ 401{
401 LIST_HEAD(todo); 402 LIST_HEAD(todo);
402 int cnt; 403 int cnt;
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index 33e1bed68fdd..a4760e095ff5 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -376,7 +376,7 @@ int devtmpfs_mount(const char *mntdir)
376 return err; 376 return err;
377} 377}
378 378
379static __initdata DECLARE_COMPLETION(setup_done); 379static DECLARE_COMPLETION(setup_done);
380 380
381static int handle(const char *name, mode_t mode, struct device *dev) 381static int handle(const char *name, mode_t mode, struct device *dev)
382{ 382{
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index bbb03e6f7255..06ed6b4e7df5 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -521,11 +521,6 @@ static int _request_firmware(const struct firmware **firmware_p,
521 if (!firmware_p) 521 if (!firmware_p)
522 return -EINVAL; 522 return -EINVAL;
523 523
524 if (WARN_ON(usermodehelper_is_disabled())) {
525 dev_err(device, "firmware: %s will not be loaded\n", name);
526 return -EBUSY;
527 }
528
529 *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL); 524 *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL);
530 if (!firmware) { 525 if (!firmware) {
531 dev_err(device, "%s: kmalloc(struct firmware) failed\n", 526 dev_err(device, "%s: kmalloc(struct firmware) failed\n",
@@ -539,6 +534,12 @@ static int _request_firmware(const struct firmware **firmware_p,
539 return 0; 534 return 0;
540 } 535 }
541 536
537 if (WARN_ON(usermodehelper_is_disabled())) {
538 dev_err(device, "firmware: %s will not be loaded\n", name);
539 retval = -EBUSY;
540 goto out;
541 }
542
542 if (uevent) 543 if (uevent)
543 dev_dbg(device, "firmware: requesting %s\n", name); 544 dev_dbg(device, "firmware: requesting %s\n", name);
544 545
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 0cad9c7f6bb5..99a5272d7c2f 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -33,7 +33,7 @@ EXPORT_SYMBOL_GPL(platform_bus);
33 33
34/** 34/**
35 * arch_setup_pdev_archdata - Allow manipulation of archdata before its used 35 * arch_setup_pdev_archdata - Allow manipulation of archdata before its used
36 * @dev: platform device 36 * @pdev: platform device
37 * 37 *
38 * This is called before platform_device_add() such that any pdev_archdata may 38 * This is called before platform_device_add() such that any pdev_archdata may
39 * be setup before the platform_notifier is called. So if a user needs to 39 * be setup before the platform_notifier is called. So if a user needs to
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index a846b2f95cfb..2c18d584066d 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -19,7 +19,7 @@
19 19
20struct pm_clk_data { 20struct pm_clk_data {
21 struct list_head clock_list; 21 struct list_head clock_list;
22 struct mutex lock; 22 spinlock_t lock;
23}; 23};
24 24
25enum pce_status { 25enum pce_status {
@@ -73,9 +73,9 @@ int pm_clk_add(struct device *dev, const char *con_id)
73 } 73 }
74 } 74 }
75 75
76 mutex_lock(&pcd->lock); 76 spin_lock_irq(&pcd->lock);
77 list_add_tail(&ce->node, &pcd->clock_list); 77 list_add_tail(&ce->node, &pcd->clock_list);
78 mutex_unlock(&pcd->lock); 78 spin_unlock_irq(&pcd->lock);
79 return 0; 79 return 0;
80} 80}
81 81
@@ -83,8 +83,8 @@ int pm_clk_add(struct device *dev, const char *con_id)
83 * __pm_clk_remove - Destroy PM clock entry. 83 * __pm_clk_remove - Destroy PM clock entry.
84 * @ce: PM clock entry to destroy. 84 * @ce: PM clock entry to destroy.
85 * 85 *
86 * This routine must be called under the mutex protecting the PM list of clocks 86 * This routine must be called under the spinlock protecting the PM list of
87 * corresponding the the @ce's device. 87 * clocks corresponding the the @ce's device.
88 */ 88 */
89static void __pm_clk_remove(struct pm_clock_entry *ce) 89static void __pm_clk_remove(struct pm_clock_entry *ce)
90{ 90{
@@ -123,7 +123,7 @@ void pm_clk_remove(struct device *dev, const char *con_id)
123 if (!pcd) 123 if (!pcd)
124 return; 124 return;
125 125
126 mutex_lock(&pcd->lock); 126 spin_lock_irq(&pcd->lock);
127 127
128 list_for_each_entry(ce, &pcd->clock_list, node) { 128 list_for_each_entry(ce, &pcd->clock_list, node) {
129 if (!con_id && !ce->con_id) { 129 if (!con_id && !ce->con_id) {
@@ -137,7 +137,7 @@ void pm_clk_remove(struct device *dev, const char *con_id)
137 } 137 }
138 } 138 }
139 139
140 mutex_unlock(&pcd->lock); 140 spin_unlock_irq(&pcd->lock);
141} 141}
142 142
143/** 143/**
@@ -158,7 +158,7 @@ int pm_clk_init(struct device *dev)
158 } 158 }
159 159
160 INIT_LIST_HEAD(&pcd->clock_list); 160 INIT_LIST_HEAD(&pcd->clock_list);
161 mutex_init(&pcd->lock); 161 spin_lock_init(&pcd->lock);
162 dev->power.subsys_data = pcd; 162 dev->power.subsys_data = pcd;
163 return 0; 163 return 0;
164} 164}
@@ -181,12 +181,12 @@ void pm_clk_destroy(struct device *dev)
181 181
182 dev->power.subsys_data = NULL; 182 dev->power.subsys_data = NULL;
183 183
184 mutex_lock(&pcd->lock); 184 spin_lock_irq(&pcd->lock);
185 185
186 list_for_each_entry_safe_reverse(ce, c, &pcd->clock_list, node) 186 list_for_each_entry_safe_reverse(ce, c, &pcd->clock_list, node)
187 __pm_clk_remove(ce); 187 __pm_clk_remove(ce);
188 188
189 mutex_unlock(&pcd->lock); 189 spin_unlock_irq(&pcd->lock);
190 190
191 kfree(pcd); 191 kfree(pcd);
192} 192}
@@ -220,13 +220,14 @@ int pm_clk_suspend(struct device *dev)
220{ 220{
221 struct pm_clk_data *pcd = __to_pcd(dev); 221 struct pm_clk_data *pcd = __to_pcd(dev);
222 struct pm_clock_entry *ce; 222 struct pm_clock_entry *ce;
223 unsigned long flags;
223 224
224 dev_dbg(dev, "%s()\n", __func__); 225 dev_dbg(dev, "%s()\n", __func__);
225 226
226 if (!pcd) 227 if (!pcd)
227 return 0; 228 return 0;
228 229
229 mutex_lock(&pcd->lock); 230 spin_lock_irqsave(&pcd->lock, flags);
230 231
231 list_for_each_entry_reverse(ce, &pcd->clock_list, node) { 232 list_for_each_entry_reverse(ce, &pcd->clock_list, node) {
232 if (ce->status == PCE_STATUS_NONE) 233 if (ce->status == PCE_STATUS_NONE)
@@ -238,7 +239,7 @@ int pm_clk_suspend(struct device *dev)
238 } 239 }
239 } 240 }
240 241
241 mutex_unlock(&pcd->lock); 242 spin_unlock_irqrestore(&pcd->lock, flags);
242 243
243 return 0; 244 return 0;
244} 245}
@@ -251,13 +252,14 @@ int pm_clk_resume(struct device *dev)
251{ 252{
252 struct pm_clk_data *pcd = __to_pcd(dev); 253 struct pm_clk_data *pcd = __to_pcd(dev);
253 struct pm_clock_entry *ce; 254 struct pm_clock_entry *ce;
255 unsigned long flags;
254 256
255 dev_dbg(dev, "%s()\n", __func__); 257 dev_dbg(dev, "%s()\n", __func__);
256 258
257 if (!pcd) 259 if (!pcd)
258 return 0; 260 return 0;
259 261
260 mutex_lock(&pcd->lock); 262 spin_lock_irqsave(&pcd->lock, flags);
261 263
262 list_for_each_entry(ce, &pcd->clock_list, node) { 264 list_for_each_entry(ce, &pcd->clock_list, node) {
263 if (ce->status == PCE_STATUS_NONE) 265 if (ce->status == PCE_STATUS_NONE)
@@ -269,7 +271,7 @@ int pm_clk_resume(struct device *dev)
269 } 271 }
270 } 272 }
271 273
272 mutex_unlock(&pcd->lock); 274 spin_unlock_irqrestore(&pcd->lock, flags);
273 275
274 return 0; 276 return 0;
275} 277}
@@ -344,6 +346,7 @@ int pm_clk_suspend(struct device *dev)
344{ 346{
345 struct pm_clk_data *pcd = __to_pcd(dev); 347 struct pm_clk_data *pcd = __to_pcd(dev);
346 struct pm_clock_entry *ce; 348 struct pm_clock_entry *ce;
349 unsigned long flags;
347 350
348 dev_dbg(dev, "%s()\n", __func__); 351 dev_dbg(dev, "%s()\n", __func__);
349 352
@@ -351,12 +354,12 @@ int pm_clk_suspend(struct device *dev)
351 if (!pcd || !dev->driver) 354 if (!pcd || !dev->driver)
352 return 0; 355 return 0;
353 356
354 mutex_lock(&pcd->lock); 357 spin_lock_irqsave(&pcd->lock, flags);
355 358
356 list_for_each_entry_reverse(ce, &pcd->clock_list, node) 359 list_for_each_entry_reverse(ce, &pcd->clock_list, node)
357 clk_disable(ce->clk); 360 clk_disable(ce->clk);
358 361
359 mutex_unlock(&pcd->lock); 362 spin_unlock_irqrestore(&pcd->lock, flags);
360 363
361 return 0; 364 return 0;
362} 365}
@@ -369,6 +372,7 @@ int pm_clk_resume(struct device *dev)
369{ 372{
370 struct pm_clk_data *pcd = __to_pcd(dev); 373 struct pm_clk_data *pcd = __to_pcd(dev);
371 struct pm_clock_entry *ce; 374 struct pm_clock_entry *ce;
375 unsigned long flags;
372 376
373 dev_dbg(dev, "%s()\n", __func__); 377 dev_dbg(dev, "%s()\n", __func__);
374 378
@@ -376,12 +380,12 @@ int pm_clk_resume(struct device *dev)
376 if (!pcd || !dev->driver) 380 if (!pcd || !dev->driver)
377 return 0; 381 return 0;
378 382
379 mutex_lock(&pcd->lock); 383 spin_lock_irqsave(&pcd->lock, flags);
380 384
381 list_for_each_entry(ce, &pcd->clock_list, node) 385 list_for_each_entry(ce, &pcd->clock_list, node)
382 clk_enable(ce->clk); 386 clk_enable(ce->clk);
383 387
384 mutex_unlock(&pcd->lock); 388 spin_unlock_irqrestore(&pcd->lock, flags);
385 389
386 return 0; 390 return 0;
387} 391}
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index e18566a0fedd..1c374579407c 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -460,6 +460,21 @@ static int pm_genpd_runtime_resume(struct device *dev)
460 return 0; 460 return 0;
461} 461}
462 462
463/**
464 * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use.
465 */
466void pm_genpd_poweroff_unused(void)
467{
468 struct generic_pm_domain *genpd;
469
470 mutex_lock(&gpd_list_lock);
471
472 list_for_each_entry(genpd, &gpd_list, gpd_list_node)
473 genpd_queue_power_off_work(genpd);
474
475 mutex_unlock(&gpd_list_lock);
476}
477
463#else 478#else
464 479
465static inline void genpd_power_off_work_fn(struct work_struct *work) {} 480static inline void genpd_power_off_work_fn(struct work_struct *work) {}
@@ -1255,18 +1270,3 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
1255 list_add(&genpd->gpd_list_node, &gpd_list); 1270 list_add(&genpd->gpd_list_node, &gpd_list);
1256 mutex_unlock(&gpd_list_lock); 1271 mutex_unlock(&gpd_list_lock);
1257} 1272}
1258
1259/**
1260 * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use.
1261 */
1262void pm_genpd_poweroff_unused(void)
1263{
1264 struct generic_pm_domain *genpd;
1265
1266 mutex_lock(&gpd_list_lock);
1267
1268 list_for_each_entry(genpd, &gpd_list, gpd_list_node)
1269 genpd_queue_power_off_work(genpd);
1270
1271 mutex_unlock(&gpd_list_lock);
1272}
diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c
index c2231ff06cbc..c4f7a45cd2c3 100644
--- a/drivers/base/regmap/regmap-i2c.c
+++ b/drivers/base/regmap/regmap-i2c.c
@@ -113,3 +113,4 @@ struct regmap *regmap_init_i2c(struct i2c_client *i2c,
113} 113}
114EXPORT_SYMBOL_GPL(regmap_init_i2c); 114EXPORT_SYMBOL_GPL(regmap_init_i2c);
115 115
116MODULE_LICENSE("GPL");
diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c
index 4deba0621bc7..f8396945d6ed 100644
--- a/drivers/base/regmap/regmap-spi.c
+++ b/drivers/base/regmap/regmap-spi.c
@@ -13,6 +13,7 @@
13#include <linux/regmap.h> 13#include <linux/regmap.h>
14#include <linux/spi/spi.h> 14#include <linux/spi/spi.h>
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/module.h>
16 17
17static int regmap_spi_write(struct device *dev, const void *data, size_t count) 18static int regmap_spi_write(struct device *dev, const void *data, size_t count)
18{ 19{
@@ -70,3 +71,5 @@ struct regmap *regmap_init_spi(struct spi_device *spi,
70 return regmap_init(&spi->dev, &regmap_spi, config); 71 return regmap_init(&spi->dev, &regmap_spi, config);
71} 72}
72EXPORT_SYMBOL_GPL(regmap_init_spi); 73EXPORT_SYMBOL_GPL(regmap_init_spi);
74
75MODULE_LICENSE("GPL");
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index cf3565cae93d..20663f8dae45 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -168,13 +168,11 @@ struct regmap *regmap_init(struct device *dev,
168 map->work_buf = kmalloc(map->format.buf_size, GFP_KERNEL); 168 map->work_buf = kmalloc(map->format.buf_size, GFP_KERNEL);
169 if (map->work_buf == NULL) { 169 if (map->work_buf == NULL) {
170 ret = -ENOMEM; 170 ret = -ENOMEM;
171 goto err_bus; 171 goto err_map;
172 } 172 }
173 173
174 return map; 174 return map;
175 175
176err_bus:
177 module_put(map->bus->owner);
178err_map: 176err_map:
179 kfree(map); 177 kfree(map);
180err: 178err:
@@ -188,7 +186,6 @@ EXPORT_SYMBOL_GPL(regmap_init);
188void regmap_exit(struct regmap *map) 186void regmap_exit(struct regmap *map)
189{ 187{
190 kfree(map->work_buf); 188 kfree(map->work_buf);
191 module_put(map->bus->owner);
192 kfree(map); 189 kfree(map);
193} 190}
194EXPORT_SYMBOL_GPL(regmap_exit); 191EXPORT_SYMBOL_GPL(regmap_exit);
@@ -317,7 +314,7 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
317 u8[0] |= map->bus->read_flag_mask; 314 u8[0] |= map->bus->read_flag_mask;
318 315
319 ret = map->bus->read(map->dev, map->work_buf, map->format.reg_bytes, 316 ret = map->bus->read(map->dev, map->work_buf, map->format.reg_bytes,
320 val, map->format.val_bytes); 317 val, val_len);
321 if (ret != 0) 318 if (ret != 0)
322 return ret; 319 return ret;
323 320
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 7072216a2a3f..8c09c3e547cd 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -15,6 +15,7 @@ MODULE_LICENSE("GPL");
15static int bcma_bus_match(struct device *dev, struct device_driver *drv); 15static int bcma_bus_match(struct device *dev, struct device_driver *drv);
16static int bcma_device_probe(struct device *dev); 16static int bcma_device_probe(struct device *dev);
17static int bcma_device_remove(struct device *dev); 17static int bcma_device_remove(struct device *dev);
18static int bcma_device_uevent(struct device *dev, struct kobj_uevent_env *env);
18 19
19static ssize_t manuf_show(struct device *dev, struct device_attribute *attr, char *buf) 20static ssize_t manuf_show(struct device *dev, struct device_attribute *attr, char *buf)
20{ 21{
@@ -49,6 +50,7 @@ static struct bus_type bcma_bus_type = {
49 .match = bcma_bus_match, 50 .match = bcma_bus_match,
50 .probe = bcma_device_probe, 51 .probe = bcma_device_probe,
51 .remove = bcma_device_remove, 52 .remove = bcma_device_remove,
53 .uevent = bcma_device_uevent,
52 .dev_attrs = bcma_device_attrs, 54 .dev_attrs = bcma_device_attrs,
53}; 55};
54 56
@@ -295,6 +297,16 @@ static int bcma_device_remove(struct device *dev)
295 return 0; 297 return 0;
296} 298}
297 299
300static int bcma_device_uevent(struct device *dev, struct kobj_uevent_env *env)
301{
302 struct bcma_device *core = container_of(dev, struct bcma_device, dev);
303
304 return add_uevent_var(env,
305 "MODALIAS=bcma:m%04Xid%04Xrev%02Xcl%02X",
306 core->id.manuf, core->id.id,
307 core->id.rev, core->id.class);
308}
309
298static int __init bcma_modinit(void) 310static int __init bcma_modinit(void)
299{ 311{
300 int err; 312 int err;
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 717d6e4e18d3..6f07ec1c2f58 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -256,6 +256,21 @@ config BLK_DEV_LOOP
256 256
257 Most users will answer N here. 257 Most users will answer N here.
258 258
259config BLK_DEV_LOOP_MIN_COUNT
260 int "Number of loop devices to pre-create at init time"
261 depends on BLK_DEV_LOOP
262 default 8
263 help
264 Static number of loop devices to be unconditionally pre-created
265 at init time.
266
267 This default value can be overwritten on the kernel command
268 line or with module-parameter loop.max_loop.
269
270 The historic default is 8. If a late 2011 version of losetup(8)
271 is used, it can be set to 0, since needed loop devices can be
272 dynamically allocated with the /dev/loop-control interface.
273
259config BLK_DEV_CRYPTOLOOP 274config BLK_DEV_CRYPTOLOOP
260 tristate "Cryptoloop Support" 275 tristate "Cryptoloop Support"
261 select CRYPTO 276 select CRYPTO
@@ -471,7 +486,7 @@ config XEN_BLKDEV_FRONTEND
471 in another domain which drives the actual block device. 486 in another domain which drives the actual block device.
472 487
473config XEN_BLKDEV_BACKEND 488config XEN_BLKDEV_BACKEND
474 tristate "Block-device backend driver" 489 tristate "Xen block-device backend driver"
475 depends on XEN_BACKEND 490 depends on XEN_BACKEND
476 help 491 help
477 The block-device backend driver allows the kernel to export its 492 The block-device backend driver allows the kernel to export its
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 515bcd948a43..0feab261e295 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -1829,10 +1829,10 @@ static int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *n
1829 1829
1830 /* silently ignore cpu mask on UP kernel */ 1830 /* silently ignore cpu mask on UP kernel */
1831 if (nr_cpu_ids > 1 && sc.cpu_mask[0] != 0) { 1831 if (nr_cpu_ids > 1 && sc.cpu_mask[0] != 0) {
1832 err = __bitmap_parse(sc.cpu_mask, 32, 0, 1832 err = bitmap_parse(sc.cpu_mask, 32,
1833 cpumask_bits(new_cpu_mask), nr_cpu_ids); 1833 cpumask_bits(new_cpu_mask), nr_cpu_ids);
1834 if (err) { 1834 if (err) {
1835 dev_warn(DEV, "__bitmap_parse() failed with %d\n", err); 1835 dev_warn(DEV, "bitmap_parse() failed with %d\n", err);
1836 retcode = ERR_CPU_MASK_PARSE; 1836 retcode = ERR_CPU_MASK_PARSE;
1837 goto fail; 1837 goto fail;
1838 } 1838 }
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 98de8f418676..9955a53733b2 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -4250,7 +4250,7 @@ static int __init floppy_init(void)
4250 use_virtual_dma = can_use_virtual_dma & 1; 4250 use_virtual_dma = can_use_virtual_dma & 1;
4251 fdc_state[0].address = FDC1; 4251 fdc_state[0].address = FDC1;
4252 if (fdc_state[0].address == -1) { 4252 if (fdc_state[0].address == -1) {
4253 del_timer(&fd_timeout); 4253 del_timer_sync(&fd_timeout);
4254 err = -ENODEV; 4254 err = -ENODEV;
4255 goto out_unreg_region; 4255 goto out_unreg_region;
4256 } 4256 }
@@ -4261,7 +4261,7 @@ static int __init floppy_init(void)
4261 fdc = 0; /* reset fdc in case of unexpected interrupt */ 4261 fdc = 0; /* reset fdc in case of unexpected interrupt */
4262 err = floppy_grab_irq_and_dma(); 4262 err = floppy_grab_irq_and_dma();
4263 if (err) { 4263 if (err) {
4264 del_timer(&fd_timeout); 4264 del_timer_sync(&fd_timeout);
4265 err = -EBUSY; 4265 err = -EBUSY;
4266 goto out_unreg_region; 4266 goto out_unreg_region;
4267 } 4267 }
@@ -4318,7 +4318,7 @@ static int __init floppy_init(void)
4318 user_reset_fdc(-1, FD_RESET_ALWAYS, false); 4318 user_reset_fdc(-1, FD_RESET_ALWAYS, false);
4319 } 4319 }
4320 fdc = 0; 4320 fdc = 0;
4321 del_timer(&fd_timeout); 4321 del_timer_sync(&fd_timeout);
4322 current_drive = 0; 4322 current_drive = 0;
4323 initialized = true; 4323 initialized = true;
4324 if (have_no_fdc) { 4324 if (have_no_fdc) {
@@ -4368,7 +4368,7 @@ out_unreg_blkdev:
4368 unregister_blkdev(FLOPPY_MAJOR, "fd"); 4368 unregister_blkdev(FLOPPY_MAJOR, "fd");
4369out_put_disk: 4369out_put_disk:
4370 while (dr--) { 4370 while (dr--) {
4371 del_timer(&motor_off_timer[dr]); 4371 del_timer_sync(&motor_off_timer[dr]);
4372 if (disks[dr]->queue) 4372 if (disks[dr]->queue)
4373 blk_cleanup_queue(disks[dr]->queue); 4373 blk_cleanup_queue(disks[dr]->queue);
4374 put_disk(disks[dr]); 4374 put_disk(disks[dr]);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 76c8da78212b..4720c7ade0ae 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -75,11 +75,11 @@
75#include <linux/kthread.h> 75#include <linux/kthread.h>
76#include <linux/splice.h> 76#include <linux/splice.h>
77#include <linux/sysfs.h> 77#include <linux/sysfs.h>
78 78#include <linux/miscdevice.h>
79#include <asm/uaccess.h> 79#include <asm/uaccess.h>
80 80
81static LIST_HEAD(loop_devices); 81static DEFINE_IDR(loop_index_idr);
82static DEFINE_MUTEX(loop_devices_mutex); 82static DEFINE_MUTEX(loop_index_mutex);
83 83
84static int max_part; 84static int max_part;
85static int part_shift; 85static int part_shift;
@@ -722,17 +722,10 @@ static inline int is_loop_device(struct file *file)
722static ssize_t loop_attr_show(struct device *dev, char *page, 722static ssize_t loop_attr_show(struct device *dev, char *page,
723 ssize_t (*callback)(struct loop_device *, char *)) 723 ssize_t (*callback)(struct loop_device *, char *))
724{ 724{
725 struct loop_device *l, *lo = NULL; 725 struct gendisk *disk = dev_to_disk(dev);
726 726 struct loop_device *lo = disk->private_data;
727 mutex_lock(&loop_devices_mutex);
728 list_for_each_entry(l, &loop_devices, lo_list)
729 if (disk_to_dev(l->lo_disk) == dev) {
730 lo = l;
731 break;
732 }
733 mutex_unlock(&loop_devices_mutex);
734 727
735 return lo ? callback(lo, page) : -EIO; 728 return callback(lo, page);
736} 729}
737 730
738#define LOOP_ATTR_RO(_name) \ 731#define LOOP_ATTR_RO(_name) \
@@ -750,10 +743,10 @@ static ssize_t loop_attr_backing_file_show(struct loop_device *lo, char *buf)
750 ssize_t ret; 743 ssize_t ret;
751 char *p = NULL; 744 char *p = NULL;
752 745
753 mutex_lock(&lo->lo_ctl_mutex); 746 spin_lock_irq(&lo->lo_lock);
754 if (lo->lo_backing_file) 747 if (lo->lo_backing_file)
755 p = d_path(&lo->lo_backing_file->f_path, buf, PAGE_SIZE - 1); 748 p = d_path(&lo->lo_backing_file->f_path, buf, PAGE_SIZE - 1);
756 mutex_unlock(&lo->lo_ctl_mutex); 749 spin_unlock_irq(&lo->lo_lock);
757 750
758 if (IS_ERR_OR_NULL(p)) 751 if (IS_ERR_OR_NULL(p))
759 ret = PTR_ERR(p); 752 ret = PTR_ERR(p);
@@ -1007,7 +1000,9 @@ static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev)
1007 1000
1008 kthread_stop(lo->lo_thread); 1001 kthread_stop(lo->lo_thread);
1009 1002
1003 spin_lock_irq(&lo->lo_lock);
1010 lo->lo_backing_file = NULL; 1004 lo->lo_backing_file = NULL;
1005 spin_unlock_irq(&lo->lo_lock);
1011 1006
1012 loop_release_xfer(lo); 1007 loop_release_xfer(lo);
1013 lo->transfer = NULL; 1008 lo->transfer = NULL;
@@ -1485,13 +1480,22 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
1485 1480
1486static int lo_open(struct block_device *bdev, fmode_t mode) 1481static int lo_open(struct block_device *bdev, fmode_t mode)
1487{ 1482{
1488 struct loop_device *lo = bdev->bd_disk->private_data; 1483 struct loop_device *lo;
1484 int err = 0;
1485
1486 mutex_lock(&loop_index_mutex);
1487 lo = bdev->bd_disk->private_data;
1488 if (!lo) {
1489 err = -ENXIO;
1490 goto out;
1491 }
1489 1492
1490 mutex_lock(&lo->lo_ctl_mutex); 1493 mutex_lock(&lo->lo_ctl_mutex);
1491 lo->lo_refcnt++; 1494 lo->lo_refcnt++;
1492 mutex_unlock(&lo->lo_ctl_mutex); 1495 mutex_unlock(&lo->lo_ctl_mutex);
1493 1496out:
1494 return 0; 1497 mutex_unlock(&loop_index_mutex);
1498 return err;
1495} 1499}
1496 1500
1497static int lo_release(struct gendisk *disk, fmode_t mode) 1501static int lo_release(struct gendisk *disk, fmode_t mode)
@@ -1557,40 +1561,71 @@ int loop_register_transfer(struct loop_func_table *funcs)
1557 return 0; 1561 return 0;
1558} 1562}
1559 1563
1564static int unregister_transfer_cb(int id, void *ptr, void *data)
1565{
1566 struct loop_device *lo = ptr;
1567 struct loop_func_table *xfer = data;
1568
1569 mutex_lock(&lo->lo_ctl_mutex);
1570 if (lo->lo_encryption == xfer)
1571 loop_release_xfer(lo);
1572 mutex_unlock(&lo->lo_ctl_mutex);
1573 return 0;
1574}
1575
1560int loop_unregister_transfer(int number) 1576int loop_unregister_transfer(int number)
1561{ 1577{
1562 unsigned int n = number; 1578 unsigned int n = number;
1563 struct loop_device *lo;
1564 struct loop_func_table *xfer; 1579 struct loop_func_table *xfer;
1565 1580
1566 if (n == 0 || n >= MAX_LO_CRYPT || (xfer = xfer_funcs[n]) == NULL) 1581 if (n == 0 || n >= MAX_LO_CRYPT || (xfer = xfer_funcs[n]) == NULL)
1567 return -EINVAL; 1582 return -EINVAL;
1568 1583
1569 xfer_funcs[n] = NULL; 1584 xfer_funcs[n] = NULL;
1570 1585 idr_for_each(&loop_index_idr, &unregister_transfer_cb, xfer);
1571 list_for_each_entry(lo, &loop_devices, lo_list) {
1572 mutex_lock(&lo->lo_ctl_mutex);
1573
1574 if (lo->lo_encryption == xfer)
1575 loop_release_xfer(lo);
1576
1577 mutex_unlock(&lo->lo_ctl_mutex);
1578 }
1579
1580 return 0; 1586 return 0;
1581} 1587}
1582 1588
1583EXPORT_SYMBOL(loop_register_transfer); 1589EXPORT_SYMBOL(loop_register_transfer);
1584EXPORT_SYMBOL(loop_unregister_transfer); 1590EXPORT_SYMBOL(loop_unregister_transfer);
1585 1591
1586static struct loop_device *loop_alloc(int i) 1592static int loop_add(struct loop_device **l, int i)
1587{ 1593{
1588 struct loop_device *lo; 1594 struct loop_device *lo;
1589 struct gendisk *disk; 1595 struct gendisk *disk;
1596 int err;
1590 1597
1591 lo = kzalloc(sizeof(*lo), GFP_KERNEL); 1598 lo = kzalloc(sizeof(*lo), GFP_KERNEL);
1592 if (!lo) 1599 if (!lo) {
1600 err = -ENOMEM;
1593 goto out; 1601 goto out;
1602 }
1603
1604 err = idr_pre_get(&loop_index_idr, GFP_KERNEL);
1605 if (err < 0)
1606 goto out_free_dev;
1607
1608 if (i >= 0) {
1609 int m;
1610
1611 /* create specific i in the index */
1612 err = idr_get_new_above(&loop_index_idr, lo, i, &m);
1613 if (err >= 0 && i != m) {
1614 idr_remove(&loop_index_idr, m);
1615 err = -EEXIST;
1616 }
1617 } else if (i == -1) {
1618 int m;
1619
1620 /* get next free nr */
1621 err = idr_get_new(&loop_index_idr, lo, &m);
1622 if (err >= 0)
1623 i = m;
1624 } else {
1625 err = -EINVAL;
1626 }
1627 if (err < 0)
1628 goto out_free_dev;
1594 1629
1595 lo->lo_queue = blk_alloc_queue(GFP_KERNEL); 1630 lo->lo_queue = blk_alloc_queue(GFP_KERNEL);
1596 if (!lo->lo_queue) 1631 if (!lo->lo_queue)
@@ -1611,81 +1646,158 @@ static struct loop_device *loop_alloc(int i)
1611 disk->private_data = lo; 1646 disk->private_data = lo;
1612 disk->queue = lo->lo_queue; 1647 disk->queue = lo->lo_queue;
1613 sprintf(disk->disk_name, "loop%d", i); 1648 sprintf(disk->disk_name, "loop%d", i);
1614 return lo; 1649 add_disk(disk);
1650 *l = lo;
1651 return lo->lo_number;
1615 1652
1616out_free_queue: 1653out_free_queue:
1617 blk_cleanup_queue(lo->lo_queue); 1654 blk_cleanup_queue(lo->lo_queue);
1618out_free_dev: 1655out_free_dev:
1619 kfree(lo); 1656 kfree(lo);
1620out: 1657out:
1621 return NULL; 1658 return err;
1622} 1659}
1623 1660
1624static void loop_free(struct loop_device *lo) 1661static void loop_remove(struct loop_device *lo)
1625{ 1662{
1663 del_gendisk(lo->lo_disk);
1626 blk_cleanup_queue(lo->lo_queue); 1664 blk_cleanup_queue(lo->lo_queue);
1627 put_disk(lo->lo_disk); 1665 put_disk(lo->lo_disk);
1628 list_del(&lo->lo_list);
1629 kfree(lo); 1666 kfree(lo);
1630} 1667}
1631 1668
1632static struct loop_device *loop_init_one(int i) 1669static int find_free_cb(int id, void *ptr, void *data)
1670{
1671 struct loop_device *lo = ptr;
1672 struct loop_device **l = data;
1673
1674 if (lo->lo_state == Lo_unbound) {
1675 *l = lo;
1676 return 1;
1677 }
1678 return 0;
1679}
1680
1681static int loop_lookup(struct loop_device **l, int i)
1633{ 1682{
1634 struct loop_device *lo; 1683 struct loop_device *lo;
1684 int ret = -ENODEV;
1635 1685
1636 list_for_each_entry(lo, &loop_devices, lo_list) { 1686 if (i < 0) {
1637 if (lo->lo_number == i) 1687 int err;
1638 return lo; 1688
1689 err = idr_for_each(&loop_index_idr, &find_free_cb, &lo);
1690 if (err == 1) {
1691 *l = lo;
1692 ret = lo->lo_number;
1693 }
1694 goto out;
1639 } 1695 }
1640 1696
1641 lo = loop_alloc(i); 1697 /* lookup and return a specific i */
1698 lo = idr_find(&loop_index_idr, i);
1642 if (lo) { 1699 if (lo) {
1643 add_disk(lo->lo_disk); 1700 *l = lo;
1644 list_add_tail(&lo->lo_list, &loop_devices); 1701 ret = lo->lo_number;
1645 } 1702 }
1646 return lo; 1703out:
1647} 1704 return ret;
1648
1649static void loop_del_one(struct loop_device *lo)
1650{
1651 del_gendisk(lo->lo_disk);
1652 loop_free(lo);
1653} 1705}
1654 1706
1655static struct kobject *loop_probe(dev_t dev, int *part, void *data) 1707static struct kobject *loop_probe(dev_t dev, int *part, void *data)
1656{ 1708{
1657 struct loop_device *lo; 1709 struct loop_device *lo;
1658 struct kobject *kobj; 1710 struct kobject *kobj;
1711 int err;
1659 1712
1660 mutex_lock(&loop_devices_mutex); 1713 mutex_lock(&loop_index_mutex);
1661 lo = loop_init_one(MINOR(dev) >> part_shift); 1714 err = loop_lookup(&lo, MINOR(dev) >> part_shift);
1662 kobj = lo ? get_disk(lo->lo_disk) : ERR_PTR(-ENOMEM); 1715 if (err < 0)
1663 mutex_unlock(&loop_devices_mutex); 1716 err = loop_add(&lo, MINOR(dev) >> part_shift);
1717 if (err < 0)
1718 kobj = ERR_PTR(err);
1719 else
1720 kobj = get_disk(lo->lo_disk);
1721 mutex_unlock(&loop_index_mutex);
1664 1722
1665 *part = 0; 1723 *part = 0;
1666 return kobj; 1724 return kobj;
1667} 1725}
1668 1726
1727static long loop_control_ioctl(struct file *file, unsigned int cmd,
1728 unsigned long parm)
1729{
1730 struct loop_device *lo;
1731 int ret = -ENOSYS;
1732
1733 mutex_lock(&loop_index_mutex);
1734 switch (cmd) {
1735 case LOOP_CTL_ADD:
1736 ret = loop_lookup(&lo, parm);
1737 if (ret >= 0) {
1738 ret = -EEXIST;
1739 break;
1740 }
1741 ret = loop_add(&lo, parm);
1742 break;
1743 case LOOP_CTL_REMOVE:
1744 ret = loop_lookup(&lo, parm);
1745 if (ret < 0)
1746 break;
1747 mutex_lock(&lo->lo_ctl_mutex);
1748 if (lo->lo_state != Lo_unbound) {
1749 ret = -EBUSY;
1750 mutex_unlock(&lo->lo_ctl_mutex);
1751 break;
1752 }
1753 if (lo->lo_refcnt > 0) {
1754 ret = -EBUSY;
1755 mutex_unlock(&lo->lo_ctl_mutex);
1756 break;
1757 }
1758 lo->lo_disk->private_data = NULL;
1759 mutex_unlock(&lo->lo_ctl_mutex);
1760 idr_remove(&loop_index_idr, lo->lo_number);
1761 loop_remove(lo);
1762 break;
1763 case LOOP_CTL_GET_FREE:
1764 ret = loop_lookup(&lo, -1);
1765 if (ret >= 0)
1766 break;
1767 ret = loop_add(&lo, -1);
1768 }
1769 mutex_unlock(&loop_index_mutex);
1770
1771 return ret;
1772}
1773
1774static const struct file_operations loop_ctl_fops = {
1775 .open = nonseekable_open,
1776 .unlocked_ioctl = loop_control_ioctl,
1777 .compat_ioctl = loop_control_ioctl,
1778 .owner = THIS_MODULE,
1779 .llseek = noop_llseek,
1780};
1781
1782static struct miscdevice loop_misc = {
1783 .minor = LOOP_CTRL_MINOR,
1784 .name = "loop-control",
1785 .fops = &loop_ctl_fops,
1786};
1787
1788MODULE_ALIAS_MISCDEV(LOOP_CTRL_MINOR);
1789MODULE_ALIAS("devname:loop-control");
1790
1669static int __init loop_init(void) 1791static int __init loop_init(void)
1670{ 1792{
1671 int i, nr; 1793 int i, nr;
1672 unsigned long range; 1794 unsigned long range;
1673 struct loop_device *lo, *next; 1795 struct loop_device *lo;
1796 int err;
1674 1797
1675 /* 1798 err = misc_register(&loop_misc);
1676 * loop module now has a feature to instantiate underlying device 1799 if (err < 0)
1677 * structure on-demand, provided that there is an access dev node. 1800 return err;
1678 * However, this will not work well with user space tool that doesn't
1679 * know about such "feature". In order to not break any existing
1680 * tool, we do the following:
1681 *
1682 * (1) if max_loop is specified, create that many upfront, and this
1683 * also becomes a hard limit.
1684 * (2) if max_loop is not specified, create 8 loop device on module
1685 * load, user can further extend loop device by create dev node
1686 * themselves and have kernel automatically instantiate actual
1687 * device on-demand.
1688 */
1689 1801
1690 part_shift = 0; 1802 part_shift = 0;
1691 if (max_part > 0) { 1803 if (max_part > 0) {
@@ -1708,57 +1820,60 @@ static int __init loop_init(void)
1708 if (max_loop > 1UL << (MINORBITS - part_shift)) 1820 if (max_loop > 1UL << (MINORBITS - part_shift))
1709 return -EINVAL; 1821 return -EINVAL;
1710 1822
1823 /*
1824 * If max_loop is specified, create that many devices upfront.
1825 * This also becomes a hard limit. If max_loop is not specified,
1826 * create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module
1827 * init time. Loop devices can be requested on-demand with the
1828 * /dev/loop-control interface, or be instantiated by accessing
1829 * a 'dead' device node.
1830 */
1711 if (max_loop) { 1831 if (max_loop) {
1712 nr = max_loop; 1832 nr = max_loop;
1713 range = max_loop << part_shift; 1833 range = max_loop << part_shift;
1714 } else { 1834 } else {
1715 nr = 8; 1835 nr = CONFIG_BLK_DEV_LOOP_MIN_COUNT;
1716 range = 1UL << MINORBITS; 1836 range = 1UL << MINORBITS;
1717 } 1837 }
1718 1838
1719 if (register_blkdev(LOOP_MAJOR, "loop")) 1839 if (register_blkdev(LOOP_MAJOR, "loop"))
1720 return -EIO; 1840 return -EIO;
1721 1841
1722 for (i = 0; i < nr; i++) {
1723 lo = loop_alloc(i);
1724 if (!lo)
1725 goto Enomem;
1726 list_add_tail(&lo->lo_list, &loop_devices);
1727 }
1728
1729 /* point of no return */
1730
1731 list_for_each_entry(lo, &loop_devices, lo_list)
1732 add_disk(lo->lo_disk);
1733
1734 blk_register_region(MKDEV(LOOP_MAJOR, 0), range, 1842 blk_register_region(MKDEV(LOOP_MAJOR, 0), range,
1735 THIS_MODULE, loop_probe, NULL, NULL); 1843 THIS_MODULE, loop_probe, NULL, NULL);
1736 1844
1845 /* pre-create number of devices given by config or max_loop */
1846 mutex_lock(&loop_index_mutex);
1847 for (i = 0; i < nr; i++)
1848 loop_add(&lo, i);
1849 mutex_unlock(&loop_index_mutex);
1850
1737 printk(KERN_INFO "loop: module loaded\n"); 1851 printk(KERN_INFO "loop: module loaded\n");
1738 return 0; 1852 return 0;
1853}
1739 1854
1740Enomem: 1855static int loop_exit_cb(int id, void *ptr, void *data)
1741 printk(KERN_INFO "loop: out of memory\n"); 1856{
1742 1857 struct loop_device *lo = ptr;
1743 list_for_each_entry_safe(lo, next, &loop_devices, lo_list)
1744 loop_free(lo);
1745 1858
1746 unregister_blkdev(LOOP_MAJOR, "loop"); 1859 loop_remove(lo);
1747 return -ENOMEM; 1860 return 0;
1748} 1861}
1749 1862
1750static void __exit loop_exit(void) 1863static void __exit loop_exit(void)
1751{ 1864{
1752 unsigned long range; 1865 unsigned long range;
1753 struct loop_device *lo, *next;
1754 1866
1755 range = max_loop ? max_loop << part_shift : 1UL << MINORBITS; 1867 range = max_loop ? max_loop << part_shift : 1UL << MINORBITS;
1756 1868
1757 list_for_each_entry_safe(lo, next, &loop_devices, lo_list) 1869 idr_for_each(&loop_index_idr, &loop_exit_cb, NULL);
1758 loop_del_one(lo); 1870 idr_remove_all(&loop_index_idr);
1871 idr_destroy(&loop_index_idr);
1759 1872
1760 blk_unregister_region(MKDEV(LOOP_MAJOR, 0), range); 1873 blk_unregister_region(MKDEV(LOOP_MAJOR, 0), range);
1761 unregister_blkdev(LOOP_MAJOR, "loop"); 1874 unregister_blkdev(LOOP_MAJOR, "loop");
1875
1876 misc_deregister(&loop_misc);
1762} 1877}
1763 1878
1764module_init(loop_init); 1879module_init(loop_init);
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 773bfa792777..ae3e167e17ad 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -1184,6 +1184,7 @@ static struct of_device_id swim3_match[] =
1184 { 1184 {
1185 .compatible = "swim3" 1185 .compatible = "swim3"
1186 }, 1186 },
1187 { /* end of list */ }
1187}; 1188};
1188 1189
1189static struct macio_driver swim3_driver = 1190static struct macio_driver swim3_driver =
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index 9e40b283a468..00c57c90e2d6 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -46,7 +46,7 @@
46 46
47#define DRV_PFX "xen-blkback:" 47#define DRV_PFX "xen-blkback:"
48#define DPRINTK(fmt, args...) \ 48#define DPRINTK(fmt, args...) \
49 pr_debug(DRV_PFX "(%s:%d) " fmt ".\n", \ 49 pr_debug(DRV_PFX "(%s:%d) " fmt ".\n", \
50 __func__, __LINE__, ##args) 50 __func__, __LINE__, ##args)
51 51
52 52
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 3f129b45451a..5fd2010f7d2b 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -590,7 +590,7 @@ static void frontend_changed(struct xenbus_device *dev,
590 590
591 /* 591 /*
592 * Enforce precondition before potential leak point. 592 * Enforce precondition before potential leak point.
593 * blkif_disconnect() is idempotent. 593 * xen_blkif_disconnect() is idempotent.
594 */ 594 */
595 xen_blkif_disconnect(be->blkif); 595 xen_blkif_disconnect(be->blkif);
596 596
@@ -601,17 +601,17 @@ static void frontend_changed(struct xenbus_device *dev,
601 break; 601 break;
602 602
603 case XenbusStateClosing: 603 case XenbusStateClosing:
604 xen_blkif_disconnect(be->blkif);
605 xenbus_switch_state(dev, XenbusStateClosing); 604 xenbus_switch_state(dev, XenbusStateClosing);
606 break; 605 break;
607 606
608 case XenbusStateClosed: 607 case XenbusStateClosed:
608 xen_blkif_disconnect(be->blkif);
609 xenbus_switch_state(dev, XenbusStateClosed); 609 xenbus_switch_state(dev, XenbusStateClosed);
610 if (xenbus_dev_is_online(dev)) 610 if (xenbus_dev_is_online(dev))
611 break; 611 break;
612 /* fall through if not online */ 612 /* fall through if not online */
613 case XenbusStateUnknown: 613 case XenbusStateUnknown:
614 /* implies blkif_disconnect() via blkback_remove() */ 614 /* implies xen_blkif_disconnect() via xen_blkbk_remove() */
615 device_unregister(&dev->dev); 615 device_unregister(&dev->dev);
616 break; 616 break;
617 617
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index b536a9cef917..9ea8c2576c70 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -123,8 +123,8 @@ static DEFINE_SPINLOCK(minor_lock);
123#define BLKIF_MINOR_EXT(dev) ((dev)&(~EXTENDED)) 123#define BLKIF_MINOR_EXT(dev) ((dev)&(~EXTENDED))
124#define EMULATED_HD_DISK_MINOR_OFFSET (0) 124#define EMULATED_HD_DISK_MINOR_OFFSET (0)
125#define EMULATED_HD_DISK_NAME_OFFSET (EMULATED_HD_DISK_MINOR_OFFSET / 256) 125#define EMULATED_HD_DISK_NAME_OFFSET (EMULATED_HD_DISK_MINOR_OFFSET / 256)
126#define EMULATED_SD_DISK_MINOR_OFFSET (EMULATED_HD_DISK_MINOR_OFFSET + (4 * 16)) 126#define EMULATED_SD_DISK_MINOR_OFFSET (0)
127#define EMULATED_SD_DISK_NAME_OFFSET (EMULATED_HD_DISK_NAME_OFFSET + 4) 127#define EMULATED_SD_DISK_NAME_OFFSET (EMULATED_SD_DISK_MINOR_OFFSET / 256)
128 128
129#define DEV_NAME "xvd" /* name in /dev */ 129#define DEV_NAME "xvd" /* name in /dev */
130 130
@@ -529,7 +529,7 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
529 minor = BLKIF_MINOR_EXT(info->vdevice); 529 minor = BLKIF_MINOR_EXT(info->vdevice);
530 nr_parts = PARTS_PER_EXT_DISK; 530 nr_parts = PARTS_PER_EXT_DISK;
531 offset = minor / nr_parts; 531 offset = minor / nr_parts;
532 if (xen_hvm_domain() && offset <= EMULATED_HD_DISK_NAME_OFFSET + 4) 532 if (xen_hvm_domain() && offset < EMULATED_HD_DISK_NAME_OFFSET + 4)
533 printk(KERN_WARNING "blkfront: vdevice 0x%x might conflict with " 533 printk(KERN_WARNING "blkfront: vdevice 0x%x might conflict with "
534 "emulated IDE disks,\n\t choose an xvd device name" 534 "emulated IDE disks,\n\t choose an xvd device name"
535 "from xvde on\n", info->vdevice); 535 "from xvde on\n", info->vdevice);
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index a5854735bb2e..db7cb8111fbe 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -63,6 +63,7 @@ static struct usb_device_id ath3k_table[] = {
63 /* Atheros AR3011 with sflash firmware*/ 63 /* Atheros AR3011 with sflash firmware*/
64 { USB_DEVICE(0x0CF3, 0x3002) }, 64 { USB_DEVICE(0x0CF3, 0x3002) },
65 { USB_DEVICE(0x13d3, 0x3304) }, 65 { USB_DEVICE(0x13d3, 0x3304) },
66 { USB_DEVICE(0x0930, 0x0215) },
66 67
67 /* Atheros AR9285 Malbec with sflash firmware */ 68 /* Atheros AR9285 Malbec with sflash firmware */
68 { USB_DEVICE(0x03F0, 0x311D) }, 69 { USB_DEVICE(0x03F0, 0x311D) },
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 91d13a9e8c65..9cbac6b445e1 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -72,9 +72,15 @@ static struct usb_device_id btusb_table[] = {
72 /* Apple MacBookAir3,1, MacBookAir3,2 */ 72 /* Apple MacBookAir3,1, MacBookAir3,2 */
73 { USB_DEVICE(0x05ac, 0x821b) }, 73 { USB_DEVICE(0x05ac, 0x821b) },
74 74
75 /* Apple MacBookAir4,1 */
76 { USB_DEVICE(0x05ac, 0x821f) },
77
75 /* Apple MacBookPro8,2 */ 78 /* Apple MacBookPro8,2 */
76 { USB_DEVICE(0x05ac, 0x821a) }, 79 { USB_DEVICE(0x05ac, 0x821a) },
77 80
81 /* Apple MacMini5,1 */
82 { USB_DEVICE(0x05ac, 0x8281) },
83
78 /* AVM BlueFRITZ! USB v2.0 */ 84 /* AVM BlueFRITZ! USB v2.0 */
79 { USB_DEVICE(0x057c, 0x3800) }, 85 { USB_DEVICE(0x057c, 0x3800) },
80 86
@@ -106,6 +112,7 @@ static struct usb_device_id blacklist_table[] = {
106 /* Atheros 3011 with sflash firmware */ 112 /* Atheros 3011 with sflash firmware */
107 { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE }, 113 { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE },
108 { USB_DEVICE(0x13d3, 0x3304), .driver_info = BTUSB_IGNORE }, 114 { USB_DEVICE(0x13d3, 0x3304), .driver_info = BTUSB_IGNORE },
115 { USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE },
109 116
110 /* Atheros AR9285 Malbec with sflash firmware */ 117 /* Atheros AR9285 Malbec with sflash firmware */
111 { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE }, 118 { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE },
@@ -256,7 +263,9 @@ static void btusb_intr_complete(struct urb *urb)
256 263
257 err = usb_submit_urb(urb, GFP_ATOMIC); 264 err = usb_submit_urb(urb, GFP_ATOMIC);
258 if (err < 0) { 265 if (err < 0) {
259 if (err != -EPERM) 266 /* -EPERM: urb is being killed;
267 * -ENODEV: device got disconnected */
268 if (err != -EPERM && err != -ENODEV)
260 BT_ERR("%s urb %p failed to resubmit (%d)", 269 BT_ERR("%s urb %p failed to resubmit (%d)",
261 hdev->name, urb, -err); 270 hdev->name, urb, -err);
262 usb_unanchor_urb(urb); 271 usb_unanchor_urb(urb);
@@ -341,7 +350,9 @@ static void btusb_bulk_complete(struct urb *urb)
341 350
342 err = usb_submit_urb(urb, GFP_ATOMIC); 351 err = usb_submit_urb(urb, GFP_ATOMIC);
343 if (err < 0) { 352 if (err < 0) {
344 if (err != -EPERM) 353 /* -EPERM: urb is being killed;
354 * -ENODEV: device got disconnected */
355 if (err != -EPERM && err != -ENODEV)
345 BT_ERR("%s urb %p failed to resubmit (%d)", 356 BT_ERR("%s urb %p failed to resubmit (%d)",
346 hdev->name, urb, -err); 357 hdev->name, urb, -err);
347 usb_unanchor_urb(urb); 358 usb_unanchor_urb(urb);
@@ -431,7 +442,9 @@ static void btusb_isoc_complete(struct urb *urb)
431 442
432 err = usb_submit_urb(urb, GFP_ATOMIC); 443 err = usb_submit_urb(urb, GFP_ATOMIC);
433 if (err < 0) { 444 if (err < 0) {
434 if (err != -EPERM) 445 /* -EPERM: urb is being killed;
446 * -ENODEV: device got disconnected */
447 if (err != -EPERM && err != -ENODEV)
435 BT_ERR("%s urb %p failed to resubmit (%d)", 448 BT_ERR("%s urb %p failed to resubmit (%d)",
436 hdev->name, urb, -err); 449 hdev->name, urb, -err);
437 usb_unanchor_urb(urb); 450 usb_unanchor_urb(urb);
diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
index 65d27aff553a..04d353f58d71 100644
--- a/drivers/bluetooth/btwilink.c
+++ b/drivers/bluetooth/btwilink.c
@@ -125,6 +125,13 @@ static long st_receive(void *priv_data, struct sk_buff *skb)
125/* protocol structure registered with shared transport */ 125/* protocol structure registered with shared transport */
126static struct st_proto_s ti_st_proto[MAX_BT_CHNL_IDS] = { 126static struct st_proto_s ti_st_proto[MAX_BT_CHNL_IDS] = {
127 { 127 {
128 .chnl_id = HCI_EVENT_PKT, /* HCI Events */
129 .hdr_len = sizeof(struct hci_event_hdr),
130 .offset_len_in_hdr = offsetof(struct hci_event_hdr, plen),
131 .len_size = 1, /* sizeof(plen) in struct hci_event_hdr */
132 .reserve = 8,
133 },
134 {
128 .chnl_id = HCI_ACLDATA_PKT, /* ACL */ 135 .chnl_id = HCI_ACLDATA_PKT, /* ACL */
129 .hdr_len = sizeof(struct hci_acl_hdr), 136 .hdr_len = sizeof(struct hci_acl_hdr),
130 .offset_len_in_hdr = offsetof(struct hci_acl_hdr, dlen), 137 .offset_len_in_hdr = offsetof(struct hci_acl_hdr, dlen),
@@ -138,13 +145,6 @@ static struct st_proto_s ti_st_proto[MAX_BT_CHNL_IDS] = {
138 .len_size = 1, /* sizeof(dlen) in struct hci_sco_hdr */ 145 .len_size = 1, /* sizeof(dlen) in struct hci_sco_hdr */
139 .reserve = 8, 146 .reserve = 8,
140 }, 147 },
141 {
142 .chnl_id = HCI_EVENT_PKT, /* HCI Events */
143 .hdr_len = sizeof(struct hci_event_hdr),
144 .offset_len_in_hdr = offsetof(struct hci_event_hdr, plen),
145 .len_size = 1, /* sizeof(plen) in struct hci_event_hdr */
146 .reserve = 8,
147 },
148}; 148};
149 149
150/* Called from HCI core to initialize the device */ 150/* Called from HCI core to initialize the device */
@@ -240,7 +240,7 @@ static int ti_st_close(struct hci_dev *hdev)
240 if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags)) 240 if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
241 return 0; 241 return 0;
242 242
243 for (i = 0; i < MAX_BT_CHNL_IDS; i++) { 243 for (i = MAX_BT_CHNL_IDS-1; i >= 0; i--) {
244 err = st_unregister(&ti_st_proto[i]); 244 err = st_unregister(&ti_st_proto[i]);
245 if (err) 245 if (err)
246 BT_ERR("st_unregister(%d) failed with error %d", 246 BT_ERR("st_unregister(%d) failed with error %d",
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 75fb965b8f72..f997c27d79e2 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -1929,11 +1929,17 @@ static int dvd_read_manufact(struct cdrom_device_info *cdi, dvd_struct *s,
1929 goto out; 1929 goto out;
1930 1930
1931 s->manufact.len = buf[0] << 8 | buf[1]; 1931 s->manufact.len = buf[0] << 8 | buf[1];
1932 if (s->manufact.len < 0 || s->manufact.len > 2048) { 1932 if (s->manufact.len < 0) {
1933 cdinfo(CD_WARNING, "Received invalid manufacture info length" 1933 cdinfo(CD_WARNING, "Received invalid manufacture info length"
1934 " (%d)\n", s->manufact.len); 1934 " (%d)\n", s->manufact.len);
1935 ret = -EIO; 1935 ret = -EIO;
1936 } else { 1936 } else {
1937 if (s->manufact.len > 2048) {
1938 cdinfo(CD_WARNING, "Received invalid manufacture info "
1939 "length (%d): truncating to 2048\n",
1940 s->manufact.len);
1941 s->manufact.len = 2048;
1942 }
1937 memcpy(s->manufact.value, &buf[4], s->manufact.len); 1943 memcpy(s->manufact.value, &buf[4], s->manufact.len);
1938 } 1944 }
1939 1945
diff --git a/drivers/char/msm_smd_pkt.c b/drivers/char/msm_smd_pkt.c
index b6f8a65c9960..8eca55deb3a3 100644
--- a/drivers/char/msm_smd_pkt.c
+++ b/drivers/char/msm_smd_pkt.c
@@ -379,9 +379,8 @@ static int __init smd_pkt_init(void)
379 for (i = 0; i < NUM_SMD_PKT_PORTS; ++i) { 379 for (i = 0; i < NUM_SMD_PKT_PORTS; ++i) {
380 smd_pkt_devp[i] = kzalloc(sizeof(struct smd_pkt_dev), 380 smd_pkt_devp[i] = kzalloc(sizeof(struct smd_pkt_dev),
381 GFP_KERNEL); 381 GFP_KERNEL);
382 if (IS_ERR(smd_pkt_devp[i])) { 382 if (!smd_pkt_devp[i]) {
383 r = PTR_ERR(smd_pkt_devp[i]); 383 pr_err("kmalloc() failed\n");
384 pr_err("kmalloc() failed %d\n", r);
385 goto clean_cdevs; 384 goto clean_cdevs;
386 } 385 }
387 386
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index dc7c033ef587..32a77becc098 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -26,6 +26,7 @@
26#include <linux/clk.h> 26#include <linux/clk.h>
27#include <linux/irq.h> 27#include <linux/irq.h>
28#include <linux/err.h> 28#include <linux/err.h>
29#include <linux/delay.h>
29#include <linux/clocksource.h> 30#include <linux/clocksource.h>
30#include <linux/clockchips.h> 31#include <linux/clockchips.h>
31#include <linux/sh_timer.h> 32#include <linux/sh_timer.h>
@@ -150,13 +151,13 @@ static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start)
150 151
151static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate) 152static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
152{ 153{
153 int ret; 154 int k, ret;
154 155
155 /* enable clock */ 156 /* enable clock */
156 ret = clk_enable(p->clk); 157 ret = clk_enable(p->clk);
157 if (ret) { 158 if (ret) {
158 dev_err(&p->pdev->dev, "cannot enable clock\n"); 159 dev_err(&p->pdev->dev, "cannot enable clock\n");
159 return ret; 160 goto err0;
160 } 161 }
161 162
162 /* make sure channel is disabled */ 163 /* make sure channel is disabled */
@@ -174,9 +175,38 @@ static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
174 sh_cmt_write(p, CMCOR, 0xffffffff); 175 sh_cmt_write(p, CMCOR, 0xffffffff);
175 sh_cmt_write(p, CMCNT, 0); 176 sh_cmt_write(p, CMCNT, 0);
176 177
178 /*
179 * According to the sh73a0 user's manual, as CMCNT can be operated
180 * only by the RCLK (Pseudo 32 KHz), there's one restriction on
181 * modifying CMCNT register; two RCLK cycles are necessary before
182 * this register is either read or any modification of the value
183 * it holds is reflected in the LSI's actual operation.
184 *
185 * While at it, we're supposed to clear out the CMCNT as of this
186 * moment, so make sure it's processed properly here. This will
187 * take RCLKx2 at maximum.
188 */
189 for (k = 0; k < 100; k++) {
190 if (!sh_cmt_read(p, CMCNT))
191 break;
192 udelay(1);
193 }
194
195 if (sh_cmt_read(p, CMCNT)) {
196 dev_err(&p->pdev->dev, "cannot clear CMCNT\n");
197 ret = -ETIMEDOUT;
198 goto err1;
199 }
200
177 /* enable channel */ 201 /* enable channel */
178 sh_cmt_start_stop_ch(p, 1); 202 sh_cmt_start_stop_ch(p, 1);
179 return 0; 203 return 0;
204 err1:
205 /* stop clock */
206 clk_disable(p->clk);
207
208 err0:
209 return ret;
180} 210}
181 211
182static void sh_cmt_disable(struct sh_cmt_priv *p) 212static void sh_cmt_disable(struct sh_cmt_priv *p)
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index 7b0603eb0129..cdc02ac8f41a 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -261,6 +261,9 @@ static int pcc_get_offset(int cpu)
261 pr = per_cpu(processors, cpu); 261 pr = per_cpu(processors, cpu);
262 pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu); 262 pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu);
263 263
264 if (!pr)
265 return -ENODEV;
266
264 status = acpi_evaluate_object(pr->handle, "PCCP", NULL, &buffer); 267 status = acpi_evaluate_object(pr->handle, "PCCP", NULL, &buffer);
265 if (ACPI_FAILURE(status)) 268 if (ACPI_FAILURE(status))
266 return -ENODEV; 269 return -ENODEV;
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index cd3a7c726bf8..467e4dcb20a0 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -174,8 +174,10 @@ struct d40_base;
174 * @tasklet: Tasklet that gets scheduled from interrupt context to complete a 174 * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
175 * transfer and call client callback. 175 * transfer and call client callback.
176 * @client: Cliented owned descriptor list. 176 * @client: Cliented owned descriptor list.
177 * @pending_queue: Submitted jobs, to be issued by issue_pending()
177 * @active: Active descriptor. 178 * @active: Active descriptor.
178 * @queue: Queued jobs. 179 * @queue: Queued jobs.
180 * @prepare_queue: Prepared jobs.
179 * @dma_cfg: The client configuration of this dma channel. 181 * @dma_cfg: The client configuration of this dma channel.
180 * @configured: whether the dma_cfg configuration is valid 182 * @configured: whether the dma_cfg configuration is valid
181 * @base: Pointer to the device instance struct. 183 * @base: Pointer to the device instance struct.
@@ -203,6 +205,7 @@ struct d40_chan {
203 struct list_head pending_queue; 205 struct list_head pending_queue;
204 struct list_head active; 206 struct list_head active;
205 struct list_head queue; 207 struct list_head queue;
208 struct list_head prepare_queue;
206 struct stedma40_chan_cfg dma_cfg; 209 struct stedma40_chan_cfg dma_cfg;
207 bool configured; 210 bool configured;
208 struct d40_base *base; 211 struct d40_base *base;
@@ -477,7 +480,6 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
477 480
478 list_for_each_entry_safe(d, _d, &d40c->client, node) 481 list_for_each_entry_safe(d, _d, &d40c->client, node)
479 if (async_tx_test_ack(&d->txd)) { 482 if (async_tx_test_ack(&d->txd)) {
480 d40_pool_lli_free(d40c, d);
481 d40_desc_remove(d); 483 d40_desc_remove(d);
482 desc = d; 484 desc = d;
483 memset(desc, 0, sizeof(*desc)); 485 memset(desc, 0, sizeof(*desc));
@@ -644,8 +646,11 @@ static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
644 return d; 646 return d;
645} 647}
646 648
649/* remove desc from current queue and add it to the pending_queue */
647static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc) 650static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
648{ 651{
652 d40_desc_remove(desc);
653 desc->is_in_client_list = false;
649 list_add_tail(&desc->node, &d40c->pending_queue); 654 list_add_tail(&desc->node, &d40c->pending_queue);
650} 655}
651 656
@@ -803,6 +808,7 @@ done:
803static void d40_term_all(struct d40_chan *d40c) 808static void d40_term_all(struct d40_chan *d40c)
804{ 809{
805 struct d40_desc *d40d; 810 struct d40_desc *d40d;
811 struct d40_desc *_d;
806 812
807 /* Release active descriptors */ 813 /* Release active descriptors */
808 while ((d40d = d40_first_active_get(d40c))) { 814 while ((d40d = d40_first_active_get(d40c))) {
@@ -822,6 +828,21 @@ static void d40_term_all(struct d40_chan *d40c)
822 d40_desc_free(d40c, d40d); 828 d40_desc_free(d40c, d40d);
823 } 829 }
824 830
831 /* Release client owned descriptors */
832 if (!list_empty(&d40c->client))
833 list_for_each_entry_safe(d40d, _d, &d40c->client, node) {
834 d40_desc_remove(d40d);
835 d40_desc_free(d40c, d40d);
836 }
837
838 /* Release descriptors in prepare queue */
839 if (!list_empty(&d40c->prepare_queue))
840 list_for_each_entry_safe(d40d, _d,
841 &d40c->prepare_queue, node) {
842 d40_desc_remove(d40d);
843 d40_desc_free(d40c, d40d);
844 }
845
825 d40c->pending_tx = 0; 846 d40c->pending_tx = 0;
826 d40c->busy = false; 847 d40c->busy = false;
827} 848}
@@ -1208,7 +1229,6 @@ static void dma_tasklet(unsigned long data)
1208 1229
1209 if (!d40d->cyclic) { 1230 if (!d40d->cyclic) {
1210 if (async_tx_test_ack(&d40d->txd)) { 1231 if (async_tx_test_ack(&d40d->txd)) {
1211 d40_pool_lli_free(d40c, d40d);
1212 d40_desc_remove(d40d); 1232 d40_desc_remove(d40d);
1213 d40_desc_free(d40c, d40d); 1233 d40_desc_free(d40c, d40d);
1214 } else { 1234 } else {
@@ -1595,21 +1615,10 @@ static int d40_free_dma(struct d40_chan *d40c)
1595 u32 event; 1615 u32 event;
1596 struct d40_phy_res *phy = d40c->phy_chan; 1616 struct d40_phy_res *phy = d40c->phy_chan;
1597 bool is_src; 1617 bool is_src;
1598 struct d40_desc *d;
1599 struct d40_desc *_d;
1600
1601 1618
1602 /* Terminate all queued and active transfers */ 1619 /* Terminate all queued and active transfers */
1603 d40_term_all(d40c); 1620 d40_term_all(d40c);
1604 1621
1605 /* Release client owned descriptors */
1606 if (!list_empty(&d40c->client))
1607 list_for_each_entry_safe(d, _d, &d40c->client, node) {
1608 d40_pool_lli_free(d40c, d);
1609 d40_desc_remove(d);
1610 d40_desc_free(d40c, d);
1611 }
1612
1613 if (phy == NULL) { 1622 if (phy == NULL) {
1614 chan_err(d40c, "phy == null\n"); 1623 chan_err(d40c, "phy == null\n");
1615 return -EINVAL; 1624 return -EINVAL;
@@ -1911,6 +1920,12 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
1911 goto err; 1920 goto err;
1912 } 1921 }
1913 1922
1923 /*
1924 * add descriptor to the prepare queue in order to be able
1925 * to free them later in terminate_all
1926 */
1927 list_add_tail(&desc->node, &chan->prepare_queue);
1928
1914 spin_unlock_irqrestore(&chan->lock, flags); 1929 spin_unlock_irqrestore(&chan->lock, flags);
1915 1930
1916 return &desc->txd; 1931 return &desc->txd;
@@ -2400,6 +2415,7 @@ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
2400 INIT_LIST_HEAD(&d40c->queue); 2415 INIT_LIST_HEAD(&d40c->queue);
2401 INIT_LIST_HEAD(&d40c->pending_queue); 2416 INIT_LIST_HEAD(&d40c->pending_queue);
2402 INIT_LIST_HEAD(&d40c->client); 2417 INIT_LIST_HEAD(&d40c->client);
2418 INIT_LIST_HEAD(&d40c->prepare_queue);
2403 2419
2404 tasklet_init(&d40c->tasklet, dma_tasklet, 2420 tasklet_init(&d40c->tasklet, dma_tasklet,
2405 (unsigned long) d40c); 2421 (unsigned long) d40c);
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 04f1e7ce02b1..f6cf448d69b4 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -1670,7 +1670,7 @@ static void i7core_mce_output_error(struct mem_ctl_info *mci,
1670 char *type, *optype, *err, *msg; 1670 char *type, *optype, *err, *msg;
1671 unsigned long error = m->status & 0x1ff0000l; 1671 unsigned long error = m->status & 0x1ff0000l;
1672 u32 optypenum = (m->status >> 4) & 0x07; 1672 u32 optypenum = (m->status >> 4) & 0x07;
1673 u32 core_err_cnt = (m->status >> 38) && 0x7fff; 1673 u32 core_err_cnt = (m->status >> 38) & 0x7fff;
1674 u32 dimm = (m->misc >> 16) & 0x3; 1674 u32 dimm = (m->misc >> 16) & 0x3;
1675 u32 channel = (m->misc >> 18) & 0x3; 1675 u32 channel = (m->misc >> 18) & 0x3;
1676 u32 syndrome = m->misc >> 32; 1676 u32 syndrome = m->misc >> 32;
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index e6ad3bb6c1a6..4799393247c8 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -216,15 +216,33 @@ struct inbound_phy_packet_event {
216 struct fw_cdev_event_phy_packet phy_packet; 216 struct fw_cdev_event_phy_packet phy_packet;
217}; 217};
218 218
219static inline void __user *u64_to_uptr(__u64 value) 219#ifdef CONFIG_COMPAT
220static void __user *u64_to_uptr(u64 value)
221{
222 if (is_compat_task())
223 return compat_ptr(value);
224 else
225 return (void __user *)(unsigned long)value;
226}
227
228static u64 uptr_to_u64(void __user *ptr)
229{
230 if (is_compat_task())
231 return ptr_to_compat(ptr);
232 else
233 return (u64)(unsigned long)ptr;
234}
235#else
236static inline void __user *u64_to_uptr(u64 value)
220{ 237{
221 return (void __user *)(unsigned long)value; 238 return (void __user *)(unsigned long)value;
222} 239}
223 240
224static inline __u64 uptr_to_u64(void __user *ptr) 241static inline u64 uptr_to_u64(void __user *ptr)
225{ 242{
226 return (__u64)(unsigned long)ptr; 243 return (u64)(unsigned long)ptr;
227} 244}
245#endif /* CONFIG_COMPAT */
228 246
229static int fw_device_op_open(struct inode *inode, struct file *file) 247static int fw_device_op_open(struct inode *inode, struct file *file)
230{ 248{
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index 8ba7f7928f1f..f3b890da1e87 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -455,15 +455,20 @@ static struct device_attribute fw_device_attributes[] = {
455static int read_rom(struct fw_device *device, 455static int read_rom(struct fw_device *device,
456 int generation, int index, u32 *data) 456 int generation, int index, u32 *data)
457{ 457{
458 int rcode; 458 u64 offset = (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + index * 4;
459 int i, rcode;
459 460
460 /* device->node_id, accessed below, must not be older than generation */ 461 /* device->node_id, accessed below, must not be older than generation */
461 smp_rmb(); 462 smp_rmb();
462 463
463 rcode = fw_run_transaction(device->card, TCODE_READ_QUADLET_REQUEST, 464 for (i = 10; i < 100; i += 10) {
464 device->node_id, generation, device->max_speed, 465 rcode = fw_run_transaction(device->card,
465 (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + index * 4, 466 TCODE_READ_QUADLET_REQUEST, device->node_id,
466 data, 4); 467 generation, device->max_speed, offset, data, 4);
468 if (rcode != RCODE_BUSY)
469 break;
470 msleep(i);
471 }
467 be32_to_cpus(data); 472 be32_to_cpus(data);
468 473
469 return rcode; 474 return rcode;
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index bcf792fac442..fd7170a9ad2c 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -290,6 +290,9 @@ static const struct {
290 {PCI_VENDOR_ID_NEC, PCI_ANY_ID, PCI_ANY_ID, 290 {PCI_VENDOR_ID_NEC, PCI_ANY_ID, PCI_ANY_ID,
291 QUIRK_CYCLE_TIMER}, 291 QUIRK_CYCLE_TIMER},
292 292
293 {PCI_VENDOR_ID_O2, PCI_ANY_ID, PCI_ANY_ID,
294 QUIRK_NO_MSI},
295
293 {PCI_VENDOR_ID_RICOH, PCI_ANY_ID, PCI_ANY_ID, 296 {PCI_VENDOR_ID_RICOH, PCI_ANY_ID, PCI_ANY_ID,
294 QUIRK_CYCLE_TIMER}, 297 QUIRK_CYCLE_TIMER},
295 298
@@ -2179,8 +2182,13 @@ static int ohci_enable(struct fw_card *card,
2179 ohci_driver_name, ohci)) { 2182 ohci_driver_name, ohci)) {
2180 fw_error("Failed to allocate interrupt %d.\n", dev->irq); 2183 fw_error("Failed to allocate interrupt %d.\n", dev->irq);
2181 pci_disable_msi(dev); 2184 pci_disable_msi(dev);
2182 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, 2185
2183 ohci->config_rom, ohci->config_rom_bus); 2186 if (config_rom) {
2187 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
2188 ohci->next_config_rom,
2189 ohci->next_config_rom_bus);
2190 ohci->next_config_rom = NULL;
2191 }
2184 return -EIO; 2192 return -EIO;
2185 } 2193 }
2186 2194
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index 41841a3e3f99..17cef864506a 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -1198,6 +1198,10 @@ static int sbp2_remove(struct device *dev)
1198{ 1198{
1199 struct fw_unit *unit = fw_unit(dev); 1199 struct fw_unit *unit = fw_unit(dev);
1200 struct sbp2_target *tgt = dev_get_drvdata(&unit->device); 1200 struct sbp2_target *tgt = dev_get_drvdata(&unit->device);
1201 struct sbp2_logical_unit *lu;
1202
1203 list_for_each_entry(lu, &tgt->lu_list, link)
1204 cancel_delayed_work_sync(&lu->work);
1201 1205
1202 sbp2_target_put(tgt); 1206 sbp2_target_put(tgt);
1203 return 0; 1207 return 0;
diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
index 68810fd1a59d..aa83de9db1b9 100644
--- a/drivers/firmware/google/gsmi.c
+++ b/drivers/firmware/google/gsmi.c
@@ -420,7 +420,7 @@ static efi_status_t gsmi_get_next_variable(unsigned long *name_size,
420 420
421static efi_status_t gsmi_set_variable(efi_char16_t *name, 421static efi_status_t gsmi_set_variable(efi_char16_t *name,
422 efi_guid_t *vendor, 422 efi_guid_t *vendor,
423 unsigned long attr, 423 u32 attr,
424 unsigned long data_size, 424 unsigned long data_size,
425 void *data) 425 void *data)
426{ 426{
diff --git a/drivers/gpio/gpio-generic.c b/drivers/gpio/gpio-generic.c
index 231714def4d2..4e24436b0f82 100644
--- a/drivers/gpio/gpio-generic.c
+++ b/drivers/gpio/gpio-generic.c
@@ -351,7 +351,7 @@ static int bgpio_setup_direction(struct bgpio_chip *bgc,
351 return 0; 351 return 0;
352} 352}
353 353
354int __devexit bgpio_remove(struct bgpio_chip *bgc) 354int bgpio_remove(struct bgpio_chip *bgc)
355{ 355{
356 int err = gpiochip_remove(&bgc->gc); 356 int err = gpiochip_remove(&bgc->gc);
357 357
@@ -361,15 +361,10 @@ int __devexit bgpio_remove(struct bgpio_chip *bgc)
361} 361}
362EXPORT_SYMBOL_GPL(bgpio_remove); 362EXPORT_SYMBOL_GPL(bgpio_remove);
363 363
364int __devinit bgpio_init(struct bgpio_chip *bgc, 364int bgpio_init(struct bgpio_chip *bgc, struct device *dev,
365 struct device *dev, 365 unsigned long sz, void __iomem *dat, void __iomem *set,
366 unsigned long sz, 366 void __iomem *clr, void __iomem *dirout, void __iomem *dirin,
367 void __iomem *dat, 367 bool big_endian)
368 void __iomem *set,
369 void __iomem *clr,
370 void __iomem *dirout,
371 void __iomem *dirin,
372 bool big_endian)
373{ 368{
374 int ret; 369 int ret;
375 370
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 82db18506662..fe738f05309b 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -499,6 +499,7 @@ void drm_connector_cleanup(struct drm_connector *connector)
499 mutex_lock(&dev->mode_config.mutex); 499 mutex_lock(&dev->mode_config.mutex);
500 drm_mode_object_put(dev, &connector->base); 500 drm_mode_object_put(dev, &connector->base);
501 list_del(&connector->head); 501 list_del(&connector->head);
502 dev->mode_config.num_connector--;
502 mutex_unlock(&dev->mode_config.mutex); 503 mutex_unlock(&dev->mode_config.mutex);
503} 504}
504EXPORT_SYMBOL(drm_connector_cleanup); 505EXPORT_SYMBOL(drm_connector_cleanup);
@@ -529,6 +530,7 @@ void drm_encoder_cleanup(struct drm_encoder *encoder)
529 mutex_lock(&dev->mode_config.mutex); 530 mutex_lock(&dev->mode_config.mutex);
530 drm_mode_object_put(dev, &encoder->base); 531 drm_mode_object_put(dev, &encoder->base);
531 list_del(&encoder->head); 532 list_del(&encoder->head);
533 dev->mode_config.num_encoder--;
532 mutex_unlock(&dev->mode_config.mutex); 534 mutex_unlock(&dev->mode_config.mutex);
533} 535}
534EXPORT_SYMBOL(drm_encoder_cleanup); 536EXPORT_SYMBOL(drm_encoder_cleanup);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 802b61ac3139..f7c6854eb4dd 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -256,7 +256,6 @@ int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed,
256{ 256{
257 printk(KERN_ERR "panic occurred, switching back to text console\n"); 257 printk(KERN_ERR "panic occurred, switching back to text console\n");
258 return drm_fb_helper_force_kernel_mode(); 258 return drm_fb_helper_force_kernel_mode();
259 return 0;
260} 259}
261EXPORT_SYMBOL(drm_fb_helper_panic); 260EXPORT_SYMBOL(drm_fb_helper_panic);
262 261
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index a8ab6263e0d7..3c395a59da35 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -499,7 +499,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
499 seq_printf(m, "Interrupts received: %d\n", 499 seq_printf(m, "Interrupts received: %d\n",
500 atomic_read(&dev_priv->irq_received)); 500 atomic_read(&dev_priv->irq_received));
501 for (i = 0; i < I915_NUM_RINGS; i++) { 501 for (i = 0; i < I915_NUM_RINGS; i++) {
502 if (IS_GEN6(dev)) { 502 if (IS_GEN6(dev) || IS_GEN7(dev)) {
503 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n", 503 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
504 dev_priv->ring[i].name, 504 dev_priv->ring[i].name,
505 I915_READ_IMR(&dev_priv->ring[i])); 505 I915_READ_IMR(&dev_priv->ring[i]));
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index feb4f164fd1b..7916bd97d5c1 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -36,6 +36,7 @@
36#include <linux/io-mapping.h> 36#include <linux/io-mapping.h>
37#include <linux/i2c.h> 37#include <linux/i2c.h>
38#include <drm/intel-gtt.h> 38#include <drm/intel-gtt.h>
39#include <linux/backlight.h>
39 40
40/* General customization: 41/* General customization:
41 */ 42 */
@@ -690,6 +691,7 @@ typedef struct drm_i915_private {
690 int child_dev_num; 691 int child_dev_num;
691 struct child_device_config *child_dev; 692 struct child_device_config *child_dev;
692 struct drm_connector *int_lvds_connector; 693 struct drm_connector *int_lvds_connector;
694 struct drm_connector *int_edp_connector;
693 695
694 bool mchbar_need_disable; 696 bool mchbar_need_disable;
695 697
@@ -723,6 +725,8 @@ typedef struct drm_i915_private {
723 /* list of fbdev register on this device */ 725 /* list of fbdev register on this device */
724 struct intel_fbdev *fbdev; 726 struct intel_fbdev *fbdev;
725 727
728 struct backlight_device *backlight;
729
726 struct drm_property *broadcast_rgb_property; 730 struct drm_property *broadcast_rgb_property;
727 struct drm_property *force_audio_property; 731 struct drm_property *force_audio_property;
728 732
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 02f96fd0d52d..9cbb0cd8f46a 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2058,8 +2058,10 @@ void intel_irq_init(struct drm_device *dev)
2058 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 2058 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
2059 } 2059 }
2060 2060
2061 2061 if (drm_core_check_feature(dev, DRIVER_MODESET))
2062 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; 2062 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
2063 else
2064 dev->driver->get_vblank_timestamp = NULL;
2063 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 2065 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
2064 2066
2065 if (IS_IVYBRIDGE(dev)) { 2067 if (IS_IVYBRIDGE(dev)) {
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index d1331f771e2f..542453f7498c 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -375,6 +375,7 @@
375# define MI_FLUSH_ENABLE (1 << 11) 375# define MI_FLUSH_ENABLE (1 << 11)
376 376
377#define GFX_MODE 0x02520 377#define GFX_MODE 0x02520
378#define GFX_MODE_GEN7 0x0229c
378#define GFX_RUN_LIST_ENABLE (1<<15) 379#define GFX_RUN_LIST_ENABLE (1<<15)
379#define GFX_TLB_INVALIDATE_ALWAYS (1<<13) 380#define GFX_TLB_INVALIDATE_ALWAYS (1<<13)
380#define GFX_SURFACE_FAULT_ENABLE (1<<12) 381#define GFX_SURFACE_FAULT_ENABLE (1<<12)
@@ -382,6 +383,9 @@
382#define GFX_PSMI_GRANULARITY (1<<10) 383#define GFX_PSMI_GRANULARITY (1<<10)
383#define GFX_PPGTT_ENABLE (1<<9) 384#define GFX_PPGTT_ENABLE (1<<9)
384 385
386#define GFX_MODE_ENABLE(bit) (((bit) << 16) | (bit))
387#define GFX_MODE_DISABLE(bit) (((bit) << 16) | (0))
388
385#define SCPD0 0x0209c /* 915+ only */ 389#define SCPD0 0x0209c /* 915+ only */
386#define IER 0x020a0 390#define IER 0x020a0
387#define IIR 0x020a4 391#define IIR 0x020a4
@@ -1318,6 +1322,7 @@
1318#define ADPA_PIPE_SELECT_MASK (1<<30) 1322#define ADPA_PIPE_SELECT_MASK (1<<30)
1319#define ADPA_PIPE_A_SELECT 0 1323#define ADPA_PIPE_A_SELECT 0
1320#define ADPA_PIPE_B_SELECT (1<<30) 1324#define ADPA_PIPE_B_SELECT (1<<30)
1325#define ADPA_PIPE_SELECT(pipe) ((pipe) << 30)
1321#define ADPA_USE_VGA_HVPOLARITY (1<<15) 1326#define ADPA_USE_VGA_HVPOLARITY (1<<15)
1322#define ADPA_SETS_HVPOLARITY 0 1327#define ADPA_SETS_HVPOLARITY 0
1323#define ADPA_VSYNC_CNTL_DISABLE (1<<11) 1328#define ADPA_VSYNC_CNTL_DISABLE (1<<11)
@@ -1460,6 +1465,7 @@
1460/* Selects pipe B for LVDS data. Must be set on pre-965. */ 1465/* Selects pipe B for LVDS data. Must be set on pre-965. */
1461#define LVDS_PIPEB_SELECT (1 << 30) 1466#define LVDS_PIPEB_SELECT (1 << 30)
1462#define LVDS_PIPE_MASK (1 << 30) 1467#define LVDS_PIPE_MASK (1 << 30)
1468#define LVDS_PIPE(pipe) ((pipe) << 30)
1463/* LVDS dithering flag on 965/g4x platform */ 1469/* LVDS dithering flag on 965/g4x platform */
1464#define LVDS_ENABLE_DITHER (1 << 25) 1470#define LVDS_ENABLE_DITHER (1 << 25)
1465/* LVDS sync polarity flags. Set to invert (i.e. negative) */ 1471/* LVDS sync polarity flags. Set to invert (i.e. negative) */
@@ -1499,9 +1505,6 @@
1499#define LVDS_B0B3_POWER_DOWN (0 << 2) 1505#define LVDS_B0B3_POWER_DOWN (0 << 2)
1500#define LVDS_B0B3_POWER_UP (3 << 2) 1506#define LVDS_B0B3_POWER_UP (3 << 2)
1501 1507
1502#define LVDS_PIPE_ENABLED(V, P) \
1503 (((V) & (LVDS_PIPE_MASK | LVDS_PORT_EN)) == ((P) << 30 | LVDS_PORT_EN))
1504
1505/* Video Data Island Packet control */ 1508/* Video Data Island Packet control */
1506#define VIDEO_DIP_DATA 0x61178 1509#define VIDEO_DIP_DATA 0x61178
1507#define VIDEO_DIP_CTL 0x61170 1510#define VIDEO_DIP_CTL 0x61170
@@ -3256,14 +3259,12 @@
3256#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17) 3259#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17)
3257#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16) 3260#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
3258 3261
3259#define ADPA_PIPE_ENABLED(V, P) \
3260 (((V) & (ADPA_TRANS_SELECT_MASK | ADPA_DAC_ENABLE)) == ((P) << 30 | ADPA_DAC_ENABLE))
3261
3262/* or SDVOB */ 3262/* or SDVOB */
3263#define HDMIB 0xe1140 3263#define HDMIB 0xe1140
3264#define PORT_ENABLE (1 << 31) 3264#define PORT_ENABLE (1 << 31)
3265#define TRANSCODER_A (0) 3265#define TRANSCODER_A (0)
3266#define TRANSCODER_B (1 << 30) 3266#define TRANSCODER_B (1 << 30)
3267#define TRANSCODER(pipe) ((pipe) << 30)
3267#define TRANSCODER_MASK (1 << 30) 3268#define TRANSCODER_MASK (1 << 30)
3268#define COLOR_FORMAT_8bpc (0) 3269#define COLOR_FORMAT_8bpc (0)
3269#define COLOR_FORMAT_12bpc (3 << 26) 3270#define COLOR_FORMAT_12bpc (3 << 26)
@@ -3280,9 +3281,6 @@
3280#define HSYNC_ACTIVE_HIGH (1 << 3) 3281#define HSYNC_ACTIVE_HIGH (1 << 3)
3281#define PORT_DETECTED (1 << 2) 3282#define PORT_DETECTED (1 << 2)
3282 3283
3283#define HDMI_PIPE_ENABLED(V, P) \
3284 (((V) & (TRANSCODER_MASK | PORT_ENABLE)) == ((P) << 30 | PORT_ENABLE))
3285
3286/* PCH SDVOB multiplex with HDMIB */ 3284/* PCH SDVOB multiplex with HDMIB */
3287#define PCH_SDVOB HDMIB 3285#define PCH_SDVOB HDMIB
3288 3286
@@ -3349,6 +3347,7 @@
3349#define PORT_TRANS_B_SEL_CPT (1<<29) 3347#define PORT_TRANS_B_SEL_CPT (1<<29)
3350#define PORT_TRANS_C_SEL_CPT (2<<29) 3348#define PORT_TRANS_C_SEL_CPT (2<<29)
3351#define PORT_TRANS_SEL_MASK (3<<29) 3349#define PORT_TRANS_SEL_MASK (3<<29)
3350#define PORT_TRANS_SEL_CPT(pipe) ((pipe) << 29)
3352 3351
3353#define TRANS_DP_CTL_A 0xe0300 3352#define TRANS_DP_CTL_A 0xe0300
3354#define TRANS_DP_CTL_B 0xe1300 3353#define TRANS_DP_CTL_B 0xe1300
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 87677d60d0df..f10742359ec9 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -871,7 +871,8 @@ int i915_restore_state(struct drm_device *dev)
871 } 871 }
872 mutex_unlock(&dev->struct_mutex); 872 mutex_unlock(&dev->struct_mutex);
873 873
874 intel_init_clock_gating(dev); 874 if (drm_core_check_feature(dev, DRIVER_MODESET))
875 intel_init_clock_gating(dev);
875 876
876 if (IS_IRONLAKE_M(dev)) { 877 if (IS_IRONLAKE_M(dev)) {
877 ironlake_enable_drps(dev); 878 ironlake_enable_drps(dev);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 35364e68a091..56a8554d9039 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -878,7 +878,7 @@ static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
878 int pp_reg, lvds_reg; 878 int pp_reg, lvds_reg;
879 u32 val; 879 u32 val;
880 enum pipe panel_pipe = PIPE_A; 880 enum pipe panel_pipe = PIPE_A;
881 bool locked = locked; 881 bool locked = true;
882 882
883 if (HAS_PCH_SPLIT(dev_priv->dev)) { 883 if (HAS_PCH_SPLIT(dev_priv->dev)) {
884 pp_reg = PCH_PP_CONTROL; 884 pp_reg = PCH_PP_CONTROL;
@@ -980,8 +980,8 @@ static void assert_transcoder_disabled(struct drm_i915_private *dev_priv,
980 pipe_name(pipe)); 980 pipe_name(pipe));
981} 981}
982 982
983static bool dp_pipe_enabled(struct drm_i915_private *dev_priv, enum pipe pipe, 983static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
984 int reg, u32 port_sel, u32 val) 984 enum pipe pipe, u32 port_sel, u32 val)
985{ 985{
986 if ((val & DP_PORT_EN) == 0) 986 if ((val & DP_PORT_EN) == 0)
987 return false; 987 return false;
@@ -998,11 +998,58 @@ static bool dp_pipe_enabled(struct drm_i915_private *dev_priv, enum pipe pipe,
998 return true; 998 return true;
999} 999}
1000 1000
1001static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1002 enum pipe pipe, u32 val)
1003{
1004 if ((val & PORT_ENABLE) == 0)
1005 return false;
1006
1007 if (HAS_PCH_CPT(dev_priv->dev)) {
1008 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1009 return false;
1010 } else {
1011 if ((val & TRANSCODER_MASK) != TRANSCODER(pipe))
1012 return false;
1013 }
1014 return true;
1015}
1016
1017static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1018 enum pipe pipe, u32 val)
1019{
1020 if ((val & LVDS_PORT_EN) == 0)
1021 return false;
1022
1023 if (HAS_PCH_CPT(dev_priv->dev)) {
1024 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1025 return false;
1026 } else {
1027 if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1028 return false;
1029 }
1030 return true;
1031}
1032
1033static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1034 enum pipe pipe, u32 val)
1035{
1036 if ((val & ADPA_DAC_ENABLE) == 0)
1037 return false;
1038 if (HAS_PCH_CPT(dev_priv->dev)) {
1039 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1040 return false;
1041 } else {
1042 if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1043 return false;
1044 }
1045 return true;
1046}
1047
1001static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, 1048static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1002 enum pipe pipe, int reg, u32 port_sel) 1049 enum pipe pipe, int reg, u32 port_sel)
1003{ 1050{
1004 u32 val = I915_READ(reg); 1051 u32 val = I915_READ(reg);
1005 WARN(dp_pipe_enabled(dev_priv, pipe, reg, port_sel, val), 1052 WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
1006 "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", 1053 "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1007 reg, pipe_name(pipe)); 1054 reg, pipe_name(pipe));
1008} 1055}
@@ -1011,7 +1058,7 @@ static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1011 enum pipe pipe, int reg) 1058 enum pipe pipe, int reg)
1012{ 1059{
1013 u32 val = I915_READ(reg); 1060 u32 val = I915_READ(reg);
1014 WARN(HDMI_PIPE_ENABLED(val, pipe), 1061 WARN(hdmi_pipe_enabled(dev_priv, val, pipe),
1015 "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", 1062 "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1016 reg, pipe_name(pipe)); 1063 reg, pipe_name(pipe));
1017} 1064}
@@ -1028,13 +1075,13 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1028 1075
1029 reg = PCH_ADPA; 1076 reg = PCH_ADPA;
1030 val = I915_READ(reg); 1077 val = I915_READ(reg);
1031 WARN(ADPA_PIPE_ENABLED(val, pipe), 1078 WARN(adpa_pipe_enabled(dev_priv, val, pipe),
1032 "PCH VGA enabled on transcoder %c, should be disabled\n", 1079 "PCH VGA enabled on transcoder %c, should be disabled\n",
1033 pipe_name(pipe)); 1080 pipe_name(pipe));
1034 1081
1035 reg = PCH_LVDS; 1082 reg = PCH_LVDS;
1036 val = I915_READ(reg); 1083 val = I915_READ(reg);
1037 WARN(LVDS_PIPE_ENABLED(val, pipe), 1084 WARN(lvds_pipe_enabled(dev_priv, val, pipe),
1038 "PCH LVDS enabled on transcoder %c, should be disabled\n", 1085 "PCH LVDS enabled on transcoder %c, should be disabled\n",
1039 pipe_name(pipe)); 1086 pipe_name(pipe));
1040 1087
@@ -1360,7 +1407,7 @@ static void disable_pch_dp(struct drm_i915_private *dev_priv,
1360 enum pipe pipe, int reg, u32 port_sel) 1407 enum pipe pipe, int reg, u32 port_sel)
1361{ 1408{
1362 u32 val = I915_READ(reg); 1409 u32 val = I915_READ(reg);
1363 if (dp_pipe_enabled(dev_priv, pipe, reg, port_sel, val)) { 1410 if (dp_pipe_enabled(dev_priv, pipe, port_sel, val)) {
1364 DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe); 1411 DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe);
1365 I915_WRITE(reg, val & ~DP_PORT_EN); 1412 I915_WRITE(reg, val & ~DP_PORT_EN);
1366 } 1413 }
@@ -1370,7 +1417,7 @@ static void disable_pch_hdmi(struct drm_i915_private *dev_priv,
1370 enum pipe pipe, int reg) 1417 enum pipe pipe, int reg)
1371{ 1418{
1372 u32 val = I915_READ(reg); 1419 u32 val = I915_READ(reg);
1373 if (HDMI_PIPE_ENABLED(val, pipe)) { 1420 if (hdmi_pipe_enabled(dev_priv, val, pipe)) {
1374 DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n", 1421 DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n",
1375 reg, pipe); 1422 reg, pipe);
1376 I915_WRITE(reg, val & ~PORT_ENABLE); 1423 I915_WRITE(reg, val & ~PORT_ENABLE);
@@ -1392,12 +1439,13 @@ static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
1392 1439
1393 reg = PCH_ADPA; 1440 reg = PCH_ADPA;
1394 val = I915_READ(reg); 1441 val = I915_READ(reg);
1395 if (ADPA_PIPE_ENABLED(val, pipe)) 1442 if (adpa_pipe_enabled(dev_priv, val, pipe))
1396 I915_WRITE(reg, val & ~ADPA_DAC_ENABLE); 1443 I915_WRITE(reg, val & ~ADPA_DAC_ENABLE);
1397 1444
1398 reg = PCH_LVDS; 1445 reg = PCH_LVDS;
1399 val = I915_READ(reg); 1446 val = I915_READ(reg);
1400 if (LVDS_PIPE_ENABLED(val, pipe)) { 1447 if (lvds_pipe_enabled(dev_priv, val, pipe)) {
1448 DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val);
1401 I915_WRITE(reg, val & ~LVDS_PORT_EN); 1449 I915_WRITE(reg, val & ~LVDS_PORT_EN);
1402 POSTING_READ(reg); 1450 POSTING_READ(reg);
1403 udelay(100); 1451 udelay(100);
@@ -5049,6 +5097,81 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
5049 return ret; 5097 return ret;
5050} 5098}
5051 5099
5100static void ironlake_update_pch_refclk(struct drm_device *dev)
5101{
5102 struct drm_i915_private *dev_priv = dev->dev_private;
5103 struct drm_mode_config *mode_config = &dev->mode_config;
5104 struct drm_crtc *crtc;
5105 struct intel_encoder *encoder;
5106 struct intel_encoder *has_edp_encoder = NULL;
5107 u32 temp;
5108 bool has_lvds = false;
5109
5110 /* We need to take the global config into account */
5111 list_for_each_entry(crtc, &mode_config->crtc_list, head) {
5112 if (!crtc->enabled)
5113 continue;
5114
5115 list_for_each_entry(encoder, &mode_config->encoder_list,
5116 base.head) {
5117 if (encoder->base.crtc != crtc)
5118 continue;
5119
5120 switch (encoder->type) {
5121 case INTEL_OUTPUT_LVDS:
5122 has_lvds = true;
5123 case INTEL_OUTPUT_EDP:
5124 has_edp_encoder = encoder;
5125 break;
5126 }
5127 }
5128 }
5129
5130 /* Ironlake: try to setup display ref clock before DPLL
5131 * enabling. This is only under driver's control after
5132 * PCH B stepping, previous chipset stepping should be
5133 * ignoring this setting.
5134 */
5135 temp = I915_READ(PCH_DREF_CONTROL);
5136 /* Always enable nonspread source */
5137 temp &= ~DREF_NONSPREAD_SOURCE_MASK;
5138 temp |= DREF_NONSPREAD_SOURCE_ENABLE;
5139 temp &= ~DREF_SSC_SOURCE_MASK;
5140 temp |= DREF_SSC_SOURCE_ENABLE;
5141 I915_WRITE(PCH_DREF_CONTROL, temp);
5142
5143 POSTING_READ(PCH_DREF_CONTROL);
5144 udelay(200);
5145
5146 if (has_edp_encoder) {
5147 if (intel_panel_use_ssc(dev_priv)) {
5148 temp |= DREF_SSC1_ENABLE;
5149 I915_WRITE(PCH_DREF_CONTROL, temp);
5150
5151 POSTING_READ(PCH_DREF_CONTROL);
5152 udelay(200);
5153 }
5154 temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5155
5156 /* Enable CPU source on CPU attached eDP */
5157 if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5158 if (intel_panel_use_ssc(dev_priv))
5159 temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
5160 else
5161 temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
5162 } else {
5163 /* Enable SSC on PCH eDP if needed */
5164 if (intel_panel_use_ssc(dev_priv)) {
5165 DRM_ERROR("enabling SSC on PCH\n");
5166 temp |= DREF_SUPERSPREAD_SOURCE_ENABLE;
5167 }
5168 }
5169 I915_WRITE(PCH_DREF_CONTROL, temp);
5170 POSTING_READ(PCH_DREF_CONTROL);
5171 udelay(200);
5172 }
5173}
5174
5052static int ironlake_crtc_mode_set(struct drm_crtc *crtc, 5175static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5053 struct drm_display_mode *mode, 5176 struct drm_display_mode *mode,
5054 struct drm_display_mode *adjusted_mode, 5177 struct drm_display_mode *adjusted_mode,
@@ -5244,49 +5367,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5244 ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw, 5367 ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw,
5245 &m_n); 5368 &m_n);
5246 5369
5247 /* Ironlake: try to setup display ref clock before DPLL 5370 ironlake_update_pch_refclk(dev);
5248 * enabling. This is only under driver's control after
5249 * PCH B stepping, previous chipset stepping should be
5250 * ignoring this setting.
5251 */
5252 temp = I915_READ(PCH_DREF_CONTROL);
5253 /* Always enable nonspread source */
5254 temp &= ~DREF_NONSPREAD_SOURCE_MASK;
5255 temp |= DREF_NONSPREAD_SOURCE_ENABLE;
5256 temp &= ~DREF_SSC_SOURCE_MASK;
5257 temp |= DREF_SSC_SOURCE_ENABLE;
5258 I915_WRITE(PCH_DREF_CONTROL, temp);
5259
5260 POSTING_READ(PCH_DREF_CONTROL);
5261 udelay(200);
5262
5263 if (has_edp_encoder) {
5264 if (intel_panel_use_ssc(dev_priv)) {
5265 temp |= DREF_SSC1_ENABLE;
5266 I915_WRITE(PCH_DREF_CONTROL, temp);
5267
5268 POSTING_READ(PCH_DREF_CONTROL);
5269 udelay(200);
5270 }
5271 temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5272
5273 /* Enable CPU source on CPU attached eDP */
5274 if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5275 if (intel_panel_use_ssc(dev_priv))
5276 temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
5277 else
5278 temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
5279 } else {
5280 /* Enable SSC on PCH eDP if needed */
5281 if (intel_panel_use_ssc(dev_priv)) {
5282 DRM_ERROR("enabling SSC on PCH\n");
5283 temp |= DREF_SUPERSPREAD_SOURCE_ENABLE;
5284 }
5285 }
5286 I915_WRITE(PCH_DREF_CONTROL, temp);
5287 POSTING_READ(PCH_DREF_CONTROL);
5288 udelay(200);
5289 }
5290 5371
5291 fp = clock.n << 16 | clock.m1 << 8 | clock.m2; 5372 fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
5292 if (has_reduced_clock) 5373 if (has_reduced_clock)
@@ -7157,8 +7238,6 @@ static void intel_setup_outputs(struct drm_device *dev)
7157 intel_encoder_clones(dev, encoder->clone_mask); 7238 intel_encoder_clones(dev, encoder->clone_mask);
7158 } 7239 }
7159 7240
7160 intel_panel_setup_backlight(dev);
7161
7162 /* disable all the possible outputs/crtcs before entering KMS mode */ 7241 /* disable all the possible outputs/crtcs before entering KMS mode */
7163 drm_helper_disable_unused_functions(dev); 7242 drm_helper_disable_unused_functions(dev);
7164} 7243}
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 0feae908bb37..44fef5e1c490 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1841,6 +1841,11 @@ done:
1841static void 1841static void
1842intel_dp_destroy (struct drm_connector *connector) 1842intel_dp_destroy (struct drm_connector *connector)
1843{ 1843{
1844 struct drm_device *dev = connector->dev;
1845
1846 if (intel_dpd_is_edp(dev))
1847 intel_panel_destroy_backlight(dev);
1848
1844 drm_sysfs_connector_remove(connector); 1849 drm_sysfs_connector_remove(connector);
1845 drm_connector_cleanup(connector); 1850 drm_connector_cleanup(connector);
1846 kfree(connector); 1851 kfree(connector);
@@ -2072,6 +2077,8 @@ intel_dp_init(struct drm_device *dev, int output_reg)
2072 DRM_MODE_TYPE_PREFERRED; 2077 DRM_MODE_TYPE_PREFERRED;
2073 } 2078 }
2074 } 2079 }
2080 dev_priv->int_edp_connector = connector;
2081 intel_panel_setup_backlight(dev);
2075 } 2082 }
2076 2083
2077 intel_dp_add_properties(intel_dp, connector); 2084 intel_dp_add_properties(intel_dp, connector);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 7b330e76a435..0b2ee9d39980 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -297,9 +297,10 @@ extern void intel_pch_panel_fitting(struct drm_device *dev,
297extern u32 intel_panel_get_max_backlight(struct drm_device *dev); 297extern u32 intel_panel_get_max_backlight(struct drm_device *dev);
298extern u32 intel_panel_get_backlight(struct drm_device *dev); 298extern u32 intel_panel_get_backlight(struct drm_device *dev);
299extern void intel_panel_set_backlight(struct drm_device *dev, u32 level); 299extern void intel_panel_set_backlight(struct drm_device *dev, u32 level);
300extern void intel_panel_setup_backlight(struct drm_device *dev); 300extern int intel_panel_setup_backlight(struct drm_device *dev);
301extern void intel_panel_enable_backlight(struct drm_device *dev); 301extern void intel_panel_enable_backlight(struct drm_device *dev);
302extern void intel_panel_disable_backlight(struct drm_device *dev); 302extern void intel_panel_disable_backlight(struct drm_device *dev);
303extern void intel_panel_destroy_backlight(struct drm_device *dev);
303extern enum drm_connector_status intel_panel_detect(struct drm_device *dev); 304extern enum drm_connector_status intel_panel_detect(struct drm_device *dev);
304 305
305extern void intel_crtc_load_lut(struct drm_crtc *crtc); 306extern void intel_crtc_load_lut(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 2e8ddfcba40c..31da77f5c051 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -72,14 +72,16 @@ static void intel_lvds_enable(struct intel_lvds *intel_lvds)
72{ 72{
73 struct drm_device *dev = intel_lvds->base.base.dev; 73 struct drm_device *dev = intel_lvds->base.base.dev;
74 struct drm_i915_private *dev_priv = dev->dev_private; 74 struct drm_i915_private *dev_priv = dev->dev_private;
75 u32 ctl_reg, lvds_reg; 75 u32 ctl_reg, lvds_reg, stat_reg;
76 76
77 if (HAS_PCH_SPLIT(dev)) { 77 if (HAS_PCH_SPLIT(dev)) {
78 ctl_reg = PCH_PP_CONTROL; 78 ctl_reg = PCH_PP_CONTROL;
79 lvds_reg = PCH_LVDS; 79 lvds_reg = PCH_LVDS;
80 stat_reg = PCH_PP_STATUS;
80 } else { 81 } else {
81 ctl_reg = PP_CONTROL; 82 ctl_reg = PP_CONTROL;
82 lvds_reg = LVDS; 83 lvds_reg = LVDS;
84 stat_reg = PP_STATUS;
83 } 85 }
84 86
85 I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN); 87 I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN);
@@ -94,17 +96,16 @@ static void intel_lvds_enable(struct intel_lvds *intel_lvds)
94 DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n", 96 DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
95 intel_lvds->pfit_control, 97 intel_lvds->pfit_control,
96 intel_lvds->pfit_pgm_ratios); 98 intel_lvds->pfit_pgm_ratios);
97 if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000)) { 99
98 DRM_ERROR("timed out waiting for panel to power off\n"); 100 I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios);
99 } else { 101 I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control);
100 I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios); 102 intel_lvds->pfit_dirty = false;
101 I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control);
102 intel_lvds->pfit_dirty = false;
103 }
104 } 103 }
105 104
106 I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON); 105 I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
107 POSTING_READ(lvds_reg); 106 POSTING_READ(lvds_reg);
107 if (wait_for((I915_READ(stat_reg) & PP_ON) != 0, 1000))
108 DRM_ERROR("timed out waiting for panel to power on\n");
108 109
109 intel_panel_enable_backlight(dev); 110 intel_panel_enable_backlight(dev);
110} 111}
@@ -113,24 +114,25 @@ static void intel_lvds_disable(struct intel_lvds *intel_lvds)
113{ 114{
114 struct drm_device *dev = intel_lvds->base.base.dev; 115 struct drm_device *dev = intel_lvds->base.base.dev;
115 struct drm_i915_private *dev_priv = dev->dev_private; 116 struct drm_i915_private *dev_priv = dev->dev_private;
116 u32 ctl_reg, lvds_reg; 117 u32 ctl_reg, lvds_reg, stat_reg;
117 118
118 if (HAS_PCH_SPLIT(dev)) { 119 if (HAS_PCH_SPLIT(dev)) {
119 ctl_reg = PCH_PP_CONTROL; 120 ctl_reg = PCH_PP_CONTROL;
120 lvds_reg = PCH_LVDS; 121 lvds_reg = PCH_LVDS;
122 stat_reg = PCH_PP_STATUS;
121 } else { 123 } else {
122 ctl_reg = PP_CONTROL; 124 ctl_reg = PP_CONTROL;
123 lvds_reg = LVDS; 125 lvds_reg = LVDS;
126 stat_reg = PP_STATUS;
124 } 127 }
125 128
126 intel_panel_disable_backlight(dev); 129 intel_panel_disable_backlight(dev);
127 130
128 I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON); 131 I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
132 if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000))
133 DRM_ERROR("timed out waiting for panel to power off\n");
129 134
130 if (intel_lvds->pfit_control) { 135 if (intel_lvds->pfit_control) {
131 if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000))
132 DRM_ERROR("timed out waiting for panel to power off\n");
133
134 I915_WRITE(PFIT_CONTROL, 0); 136 I915_WRITE(PFIT_CONTROL, 0);
135 intel_lvds->pfit_dirty = true; 137 intel_lvds->pfit_dirty = true;
136 } 138 }
@@ -398,53 +400,21 @@ out:
398 400
399static void intel_lvds_prepare(struct drm_encoder *encoder) 401static void intel_lvds_prepare(struct drm_encoder *encoder)
400{ 402{
401 struct drm_device *dev = encoder->dev;
402 struct drm_i915_private *dev_priv = dev->dev_private;
403 struct intel_lvds *intel_lvds = to_intel_lvds(encoder); 403 struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
404 404
405 /* We try to do the minimum that is necessary in order to unlock 405 /*
406 * the registers for mode setting.
407 *
408 * On Ironlake, this is quite simple as we just set the unlock key
409 * and ignore all subtleties. (This may cause some issues...)
410 *
411 * Prior to Ironlake, we must disable the pipe if we want to adjust 406 * Prior to Ironlake, we must disable the pipe if we want to adjust
412 * the panel fitter. However at all other times we can just reset 407 * the panel fitter. However at all other times we can just reset
413 * the registers regardless. 408 * the registers regardless.
414 */ 409 */
415 410 if (!HAS_PCH_SPLIT(encoder->dev) && intel_lvds->pfit_dirty)
416 if (HAS_PCH_SPLIT(dev)) { 411 intel_lvds_disable(intel_lvds);
417 I915_WRITE(PCH_PP_CONTROL,
418 I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
419 } else if (intel_lvds->pfit_dirty) {
420 I915_WRITE(PP_CONTROL,
421 (I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS)
422 & ~POWER_TARGET_ON);
423 } else {
424 I915_WRITE(PP_CONTROL,
425 I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
426 }
427} 412}
428 413
429static void intel_lvds_commit(struct drm_encoder *encoder) 414static void intel_lvds_commit(struct drm_encoder *encoder)
430{ 415{
431 struct drm_device *dev = encoder->dev;
432 struct drm_i915_private *dev_priv = dev->dev_private;
433 struct intel_lvds *intel_lvds = to_intel_lvds(encoder); 416 struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
434 417
435 /* Undo any unlocking done in prepare to prevent accidental
436 * adjustment of the registers.
437 */
438 if (HAS_PCH_SPLIT(dev)) {
439 u32 val = I915_READ(PCH_PP_CONTROL);
440 if ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS)
441 I915_WRITE(PCH_PP_CONTROL, val & 0x3);
442 } else {
443 u32 val = I915_READ(PP_CONTROL);
444 if ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS)
445 I915_WRITE(PP_CONTROL, val & 0x3);
446 }
447
448 /* Always do a full power on as we do not know what state 418 /* Always do a full power on as we do not know what state
449 * we were left in. 419 * we were left in.
450 */ 420 */
@@ -582,6 +552,8 @@ static void intel_lvds_destroy(struct drm_connector *connector)
582 struct drm_device *dev = connector->dev; 552 struct drm_device *dev = connector->dev;
583 struct drm_i915_private *dev_priv = dev->dev_private; 553 struct drm_i915_private *dev_priv = dev->dev_private;
584 554
555 intel_panel_destroy_backlight(dev);
556
585 if (dev_priv->lid_notifier.notifier_call) 557 if (dev_priv->lid_notifier.notifier_call)
586 acpi_lid_notifier_unregister(&dev_priv->lid_notifier); 558 acpi_lid_notifier_unregister(&dev_priv->lid_notifier);
587 drm_sysfs_connector_remove(connector); 559 drm_sysfs_connector_remove(connector);
@@ -1040,6 +1012,19 @@ out:
1040 pwm = I915_READ(BLC_PWM_PCH_CTL1); 1012 pwm = I915_READ(BLC_PWM_PCH_CTL1);
1041 pwm |= PWM_PCH_ENABLE; 1013 pwm |= PWM_PCH_ENABLE;
1042 I915_WRITE(BLC_PWM_PCH_CTL1, pwm); 1014 I915_WRITE(BLC_PWM_PCH_CTL1, pwm);
1015 /*
1016 * Unlock registers and just
1017 * leave them unlocked
1018 */
1019 I915_WRITE(PCH_PP_CONTROL,
1020 I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
1021 } else {
1022 /*
1023 * Unlock registers and just
1024 * leave them unlocked
1025 */
1026 I915_WRITE(PP_CONTROL,
1027 I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
1043 } 1028 }
1044 dev_priv->lid_notifier.notifier_call = intel_lid_notify; 1029 dev_priv->lid_notifier.notifier_call = intel_lid_notify;
1045 if (acpi_lid_notifier_register(&dev_priv->lid_notifier)) { 1030 if (acpi_lid_notifier_register(&dev_priv->lid_notifier)) {
@@ -1049,6 +1034,9 @@ out:
1049 /* keep the LVDS connector */ 1034 /* keep the LVDS connector */
1050 dev_priv->int_lvds_connector = connector; 1035 dev_priv->int_lvds_connector = connector;
1051 drm_sysfs_connector_add(connector); 1036 drm_sysfs_connector_add(connector);
1037
1038 intel_panel_setup_backlight(dev);
1039
1052 return true; 1040 return true;
1053 1041
1054failed: 1042failed:
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index b7c5ddb564d1..b8e8158bb16e 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -227,7 +227,6 @@ void intel_opregion_asle_intr(struct drm_device *dev)
227 asle->aslc = asle_stat; 227 asle->aslc = asle_stat;
228} 228}
229 229
230/* Only present on Ironlake+ */
231void intel_opregion_gse_intr(struct drm_device *dev) 230void intel_opregion_gse_intr(struct drm_device *dev)
232{ 231{
233 struct drm_i915_private *dev_priv = dev->dev_private; 232 struct drm_i915_private *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 05f500cd9c24..a9e0c7bcd317 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -277,7 +277,7 @@ void intel_panel_enable_backlight(struct drm_device *dev)
277 dev_priv->backlight_enabled = true; 277 dev_priv->backlight_enabled = true;
278} 278}
279 279
280void intel_panel_setup_backlight(struct drm_device *dev) 280static void intel_panel_init_backlight(struct drm_device *dev)
281{ 281{
282 struct drm_i915_private *dev_priv = dev->dev_private; 282 struct drm_i915_private *dev_priv = dev->dev_private;
283 283
@@ -309,3 +309,73 @@ intel_panel_detect(struct drm_device *dev)
309 309
310 return connector_status_unknown; 310 return connector_status_unknown;
311} 311}
312
313#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
314static int intel_panel_update_status(struct backlight_device *bd)
315{
316 struct drm_device *dev = bl_get_data(bd);
317 intel_panel_set_backlight(dev, bd->props.brightness);
318 return 0;
319}
320
321static int intel_panel_get_brightness(struct backlight_device *bd)
322{
323 struct drm_device *dev = bl_get_data(bd);
324 return intel_panel_get_backlight(dev);
325}
326
327static const struct backlight_ops intel_panel_bl_ops = {
328 .update_status = intel_panel_update_status,
329 .get_brightness = intel_panel_get_brightness,
330};
331
332int intel_panel_setup_backlight(struct drm_device *dev)
333{
334 struct drm_i915_private *dev_priv = dev->dev_private;
335 struct backlight_properties props;
336 struct drm_connector *connector;
337
338 intel_panel_init_backlight(dev);
339
340 if (dev_priv->int_lvds_connector)
341 connector = dev_priv->int_lvds_connector;
342 else if (dev_priv->int_edp_connector)
343 connector = dev_priv->int_edp_connector;
344 else
345 return -ENODEV;
346
347 props.type = BACKLIGHT_RAW;
348 props.max_brightness = intel_panel_get_max_backlight(dev);
349 dev_priv->backlight =
350 backlight_device_register("intel_backlight",
351 &connector->kdev, dev,
352 &intel_panel_bl_ops, &props);
353
354 if (IS_ERR(dev_priv->backlight)) {
355 DRM_ERROR("Failed to register backlight: %ld\n",
356 PTR_ERR(dev_priv->backlight));
357 dev_priv->backlight = NULL;
358 return -ENODEV;
359 }
360 dev_priv->backlight->props.brightness = intel_panel_get_backlight(dev);
361 return 0;
362}
363
364void intel_panel_destroy_backlight(struct drm_device *dev)
365{
366 struct drm_i915_private *dev_priv = dev->dev_private;
367 if (dev_priv->backlight)
368 backlight_device_unregister(dev_priv->backlight);
369}
370#else
371int intel_panel_setup_backlight(struct drm_device *dev)
372{
373 intel_panel_init_backlight(dev);
374 return 0;
375}
376
377void intel_panel_destroy_backlight(struct drm_device *dev)
378{
379 return;
380}
381#endif
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 47b9b2777038..c30626ea9f93 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -290,6 +290,10 @@ static int init_render_ring(struct intel_ring_buffer *ring)
290 if (IS_GEN6(dev) || IS_GEN7(dev)) 290 if (IS_GEN6(dev) || IS_GEN7(dev))
291 mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE; 291 mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
292 I915_WRITE(MI_MODE, mode); 292 I915_WRITE(MI_MODE, mode);
293 if (IS_GEN7(dev))
294 I915_WRITE(GFX_MODE_GEN7,
295 GFX_MODE_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
296 GFX_MODE_ENABLE(GFX_REPLAY_MODE));
293 } 297 }
294 298
295 if (INTEL_INFO(dev)->gen >= 6) { 299 if (INTEL_INFO(dev)->gen >= 6) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 8d02d875376d..c919cfc8f2fd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -530,7 +530,8 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
530 nouveau_gpuobj_ref(NULL, &obj); 530 nouveau_gpuobj_ref(NULL, &obj);
531 if (ret) 531 if (ret)
532 return ret; 532 return ret;
533 } else { 533 } else
534 if (USE_SEMA(dev)) {
534 /* map fence bo into channel's vm */ 535 /* map fence bo into channel's vm */
535 ret = nouveau_bo_vma_add(dev_priv->fence.bo, chan->vm, 536 ret = nouveau_bo_vma_add(dev_priv->fence.bo, chan->vm,
536 &chan->fence.vma); 537 &chan->fence.vma);
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index c444cadbf849..2706cb3d871a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -37,8 +37,11 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
37 return -ENOMEM; 37 return -ENOMEM;
38 38
39 nvbe->ttm_alloced = kmalloc(sizeof(bool) * num_pages, GFP_KERNEL); 39 nvbe->ttm_alloced = kmalloc(sizeof(bool) * num_pages, GFP_KERNEL);
40 if (!nvbe->ttm_alloced) 40 if (!nvbe->ttm_alloced) {
41 kfree(nvbe->pages);
42 nvbe->pages = NULL;
41 return -ENOMEM; 43 return -ENOMEM;
44 }
42 45
43 nvbe->nr_pages = 0; 46 nvbe->nr_pages = 0;
44 while (num_pages--) { 47 while (num_pages--) {
@@ -126,7 +129,7 @@ nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
126 129
127 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) { 130 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) {
128 nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3); 131 nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3);
129 dma_offset += NV_CTXDMA_PAGE_SIZE; 132 offset_l += NV_CTXDMA_PAGE_SIZE;
130 } 133 }
131 } 134 }
132 135
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index 118261d4927a..5e45398a9e2d 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -781,11 +781,20 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
781 struct drm_device *dev = crtc->dev; 781 struct drm_device *dev = crtc->dev;
782 struct drm_nouveau_private *dev_priv = dev->dev_private; 782 struct drm_nouveau_private *dev_priv = dev->dev_private;
783 struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index]; 783 struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
784 struct drm_framebuffer *drm_fb = nv_crtc->base.fb; 784 struct drm_framebuffer *drm_fb;
785 struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb); 785 struct nouveau_framebuffer *fb;
786 int arb_burst, arb_lwm; 786 int arb_burst, arb_lwm;
787 int ret; 787 int ret;
788 788
789 NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
790
791 /* no fb bound */
792 if (!atomic && !crtc->fb) {
793 NV_DEBUG_KMS(dev, "No FB bound\n");
794 return 0;
795 }
796
797
789 /* If atomic, we want to switch to the fb we were passed, so 798 /* If atomic, we want to switch to the fb we were passed, so
790 * now we update pointers to do that. (We don't pin; just 799 * now we update pointers to do that. (We don't pin; just
791 * assume we're already pinned and update the base address.) 800 * assume we're already pinned and update the base address.)
@@ -794,6 +803,8 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
794 drm_fb = passed_fb; 803 drm_fb = passed_fb;
795 fb = nouveau_framebuffer(passed_fb); 804 fb = nouveau_framebuffer(passed_fb);
796 } else { 805 } else {
806 drm_fb = crtc->fb;
807 fb = nouveau_framebuffer(crtc->fb);
797 /* If not atomic, we can go ahead and pin, and unpin the 808 /* If not atomic, we can go ahead and pin, and unpin the
798 * old fb we were passed. 809 * old fb we were passed.
799 */ 810 */
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index 46ad59ea2185..5d989073ba6e 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -519,12 +519,18 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
519 struct drm_device *dev = nv_crtc->base.dev; 519 struct drm_device *dev = nv_crtc->base.dev;
520 struct drm_nouveau_private *dev_priv = dev->dev_private; 520 struct drm_nouveau_private *dev_priv = dev->dev_private;
521 struct nouveau_channel *evo = nv50_display(dev)->master; 521 struct nouveau_channel *evo = nv50_display(dev)->master;
522 struct drm_framebuffer *drm_fb = nv_crtc->base.fb; 522 struct drm_framebuffer *drm_fb;
523 struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb); 523 struct nouveau_framebuffer *fb;
524 int ret; 524 int ret;
525 525
526 NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); 526 NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
527 527
528 /* no fb bound */
529 if (!atomic && !crtc->fb) {
530 NV_DEBUG_KMS(dev, "No FB bound\n");
531 return 0;
532 }
533
528 /* If atomic, we want to switch to the fb we were passed, so 534 /* If atomic, we want to switch to the fb we were passed, so
529 * now we update pointers to do that. (We don't pin; just 535 * now we update pointers to do that. (We don't pin; just
530 * assume we're already pinned and update the base address.) 536 * assume we're already pinned and update the base address.)
@@ -533,6 +539,8 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
533 drm_fb = passed_fb; 539 drm_fb = passed_fb;
534 fb = nouveau_framebuffer(passed_fb); 540 fb = nouveau_framebuffer(passed_fb);
535 } else { 541 } else {
542 drm_fb = crtc->fb;
543 fb = nouveau_framebuffer(crtc->fb);
536 /* If not atomic, we can go ahead and pin, and unpin the 544 /* If not atomic, we can go ahead and pin, and unpin the
537 * old fb we were passed. 545 * old fb we were passed.
538 */ 546 */
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 645b84b3d203..7ad43c6b1db7 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -613,6 +613,18 @@ static bool radeon_dp_get_link_status(struct radeon_connector *radeon_connector,
613 return true; 613 return true;
614} 614}
615 615
616bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector)
617{
618 u8 link_status[DP_LINK_STATUS_SIZE];
619 struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
620
621 if (!radeon_dp_get_link_status(radeon_connector, link_status))
622 return false;
623 if (dp_channel_eq_ok(link_status, dig->dp_lane_count))
624 return false;
625 return true;
626}
627
616struct radeon_dp_link_train_info { 628struct radeon_dp_link_train_info {
617 struct radeon_device *rdev; 629 struct radeon_device *rdev;
618 struct drm_encoder *encoder; 630 struct drm_encoder *encoder;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 14dce9f22172..e8a746712b5b 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -41,6 +41,31 @@ static void evergreen_gpu_init(struct radeon_device *rdev);
41void evergreen_fini(struct radeon_device *rdev); 41void evergreen_fini(struct radeon_device *rdev);
42static void evergreen_pcie_gen2_enable(struct radeon_device *rdev); 42static void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
43 43
44void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
45{
46 u16 ctl, v;
47 int cap, err;
48
49 cap = pci_pcie_cap(rdev->pdev);
50 if (!cap)
51 return;
52
53 err = pci_read_config_word(rdev->pdev, cap + PCI_EXP_DEVCTL, &ctl);
54 if (err)
55 return;
56
57 v = (ctl & PCI_EXP_DEVCTL_READRQ) >> 12;
58
59 /* if bios or OS sets MAX_READ_REQUEST_SIZE to an invalid value, fix it
60 * to avoid hangs or perfomance issues
61 */
62 if ((v == 0) || (v == 6) || (v == 7)) {
63 ctl &= ~PCI_EXP_DEVCTL_READRQ;
64 ctl |= (2 << 12);
65 pci_write_config_word(rdev->pdev, cap + PCI_EXP_DEVCTL, ctl);
66 }
67}
68
44void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc) 69void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc)
45{ 70{
46 /* enable the pflip int */ 71 /* enable the pflip int */
@@ -743,7 +768,7 @@ static void evergreen_program_watermarks(struct radeon_device *rdev,
743 !evergreen_average_bandwidth_vs_available_bandwidth(&wm) || 768 !evergreen_average_bandwidth_vs_available_bandwidth(&wm) ||
744 !evergreen_check_latency_hiding(&wm) || 769 !evergreen_check_latency_hiding(&wm) ||
745 (rdev->disp_priority == 2)) { 770 (rdev->disp_priority == 2)) {
746 DRM_INFO("force priority to high\n"); 771 DRM_DEBUG_KMS("force priority to high\n");
747 priority_a_cnt |= PRIORITY_ALWAYS_ON; 772 priority_a_cnt |= PRIORITY_ALWAYS_ON;
748 priority_b_cnt |= PRIORITY_ALWAYS_ON; 773 priority_b_cnt |= PRIORITY_ALWAYS_ON;
749 } 774 }
@@ -1357,6 +1382,7 @@ int evergreen_cp_resume(struct radeon_device *rdev)
1357 SOFT_RESET_PA | 1382 SOFT_RESET_PA |
1358 SOFT_RESET_SH | 1383 SOFT_RESET_SH |
1359 SOFT_RESET_VGT | 1384 SOFT_RESET_VGT |
1385 SOFT_RESET_SPI |
1360 SOFT_RESET_SX)); 1386 SOFT_RESET_SX));
1361 RREG32(GRBM_SOFT_RESET); 1387 RREG32(GRBM_SOFT_RESET);
1362 mdelay(15); 1388 mdelay(15);
@@ -1378,7 +1404,8 @@ int evergreen_cp_resume(struct radeon_device *rdev)
1378 /* Initialize the ring buffer's read and write pointers */ 1404 /* Initialize the ring buffer's read and write pointers */
1379 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); 1405 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
1380 WREG32(CP_RB_RPTR_WR, 0); 1406 WREG32(CP_RB_RPTR_WR, 0);
1381 WREG32(CP_RB_WPTR, 0); 1407 rdev->cp.wptr = 0;
1408 WREG32(CP_RB_WPTR, rdev->cp.wptr);
1382 1409
1383 /* set the wb address wether it's enabled or not */ 1410 /* set the wb address wether it's enabled or not */
1384 WREG32(CP_RB_RPTR_ADDR, 1411 WREG32(CP_RB_RPTR_ADDR,
@@ -1400,7 +1427,6 @@ int evergreen_cp_resume(struct radeon_device *rdev)
1400 WREG32(CP_DEBUG, (1 << 27) | (1 << 28)); 1427 WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
1401 1428
1402 rdev->cp.rptr = RREG32(CP_RB_RPTR); 1429 rdev->cp.rptr = RREG32(CP_RB_RPTR);
1403 rdev->cp.wptr = RREG32(CP_RB_WPTR);
1404 1430
1405 evergreen_cp_start(rdev); 1431 evergreen_cp_start(rdev);
1406 rdev->cp.ready = true; 1432 rdev->cp.ready = true;
@@ -1862,6 +1888,8 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1862 1888
1863 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); 1889 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
1864 1890
1891 evergreen_fix_pci_max_read_req_size(rdev);
1892
1865 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & ~2; 1893 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & ~2;
1866 1894
1867 cc_gc_shader_pipe_config |= 1895 cc_gc_shader_pipe_config |=
@@ -3143,21 +3171,23 @@ int evergreen_suspend(struct radeon_device *rdev)
3143} 3171}
3144 3172
3145int evergreen_copy_blit(struct radeon_device *rdev, 3173int evergreen_copy_blit(struct radeon_device *rdev,
3146 uint64_t src_offset, uint64_t dst_offset, 3174 uint64_t src_offset,
3147 unsigned num_pages, struct radeon_fence *fence) 3175 uint64_t dst_offset,
3176 unsigned num_gpu_pages,
3177 struct radeon_fence *fence)
3148{ 3178{
3149 int r; 3179 int r;
3150 3180
3151 mutex_lock(&rdev->r600_blit.mutex); 3181 mutex_lock(&rdev->r600_blit.mutex);
3152 rdev->r600_blit.vb_ib = NULL; 3182 rdev->r600_blit.vb_ib = NULL;
3153 r = evergreen_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE); 3183 r = evergreen_blit_prepare_copy(rdev, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
3154 if (r) { 3184 if (r) {
3155 if (rdev->r600_blit.vb_ib) 3185 if (rdev->r600_blit.vb_ib)
3156 radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); 3186 radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
3157 mutex_unlock(&rdev->r600_blit.mutex); 3187 mutex_unlock(&rdev->r600_blit.mutex);
3158 return r; 3188 return r;
3159 } 3189 }
3160 evergreen_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE); 3190 evergreen_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
3161 evergreen_blit_done_copy(rdev, fence); 3191 evergreen_blit_done_copy(rdev, fence);
3162 mutex_unlock(&rdev->r600_blit.mutex); 3192 mutex_unlock(&rdev->r600_blit.mutex);
3163 return 0; 3193 return 0;
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 44c4750f4518..99fbd793c08c 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -39,6 +39,7 @@ extern int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
39extern void evergreen_mc_program(struct radeon_device *rdev); 39extern void evergreen_mc_program(struct radeon_device *rdev);
40extern void evergreen_irq_suspend(struct radeon_device *rdev); 40extern void evergreen_irq_suspend(struct radeon_device *rdev);
41extern int evergreen_mc_init(struct radeon_device *rdev); 41extern int evergreen_mc_init(struct radeon_device *rdev);
42extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
42 43
43#define EVERGREEN_PFP_UCODE_SIZE 1120 44#define EVERGREEN_PFP_UCODE_SIZE 1120
44#define EVERGREEN_PM4_UCODE_SIZE 1376 45#define EVERGREEN_PM4_UCODE_SIZE 1376
@@ -669,6 +670,8 @@ static void cayman_gpu_init(struct radeon_device *rdev)
669 670
670 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); 671 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
671 672
673 evergreen_fix_pci_max_read_req_size(rdev);
674
672 mc_shared_chmap = RREG32(MC_SHARED_CHMAP); 675 mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
673 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); 676 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
674 677
@@ -1159,6 +1162,7 @@ int cayman_cp_resume(struct radeon_device *rdev)
1159 SOFT_RESET_PA | 1162 SOFT_RESET_PA |
1160 SOFT_RESET_SH | 1163 SOFT_RESET_SH |
1161 SOFT_RESET_VGT | 1164 SOFT_RESET_VGT |
1165 SOFT_RESET_SPI |
1162 SOFT_RESET_SX)); 1166 SOFT_RESET_SX));
1163 RREG32(GRBM_SOFT_RESET); 1167 RREG32(GRBM_SOFT_RESET);
1164 mdelay(15); 1168 mdelay(15);
@@ -1183,7 +1187,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
1183 1187
1184 /* Initialize the ring buffer's read and write pointers */ 1188 /* Initialize the ring buffer's read and write pointers */
1185 WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA); 1189 WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
1186 WREG32(CP_RB0_WPTR, 0); 1190 rdev->cp.wptr = 0;
1191 WREG32(CP_RB0_WPTR, rdev->cp.wptr);
1187 1192
1188 /* set the wb address wether it's enabled or not */ 1193 /* set the wb address wether it's enabled or not */
1189 WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC); 1194 WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
@@ -1203,7 +1208,6 @@ int cayman_cp_resume(struct radeon_device *rdev)
1203 WREG32(CP_RB0_BASE, rdev->cp.gpu_addr >> 8); 1208 WREG32(CP_RB0_BASE, rdev->cp.gpu_addr >> 8);
1204 1209
1205 rdev->cp.rptr = RREG32(CP_RB0_RPTR); 1210 rdev->cp.rptr = RREG32(CP_RB0_RPTR);
1206 rdev->cp.wptr = RREG32(CP_RB0_WPTR);
1207 1211
1208 /* ring1 - compute only */ 1212 /* ring1 - compute only */
1209 /* Set ring buffer size */ 1213 /* Set ring buffer size */
@@ -1216,7 +1220,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
1216 1220
1217 /* Initialize the ring buffer's read and write pointers */ 1221 /* Initialize the ring buffer's read and write pointers */
1218 WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA); 1222 WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA);
1219 WREG32(CP_RB1_WPTR, 0); 1223 rdev->cp1.wptr = 0;
1224 WREG32(CP_RB1_WPTR, rdev->cp1.wptr);
1220 1225
1221 /* set the wb address wether it's enabled or not */ 1226 /* set the wb address wether it's enabled or not */
1222 WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC); 1227 WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC);
@@ -1228,7 +1233,6 @@ int cayman_cp_resume(struct radeon_device *rdev)
1228 WREG32(CP_RB1_BASE, rdev->cp1.gpu_addr >> 8); 1233 WREG32(CP_RB1_BASE, rdev->cp1.gpu_addr >> 8);
1229 1234
1230 rdev->cp1.rptr = RREG32(CP_RB1_RPTR); 1235 rdev->cp1.rptr = RREG32(CP_RB1_RPTR);
1231 rdev->cp1.wptr = RREG32(CP_RB1_WPTR);
1232 1236
1233 /* ring2 - compute only */ 1237 /* ring2 - compute only */
1234 /* Set ring buffer size */ 1238 /* Set ring buffer size */
@@ -1241,7 +1245,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
1241 1245
1242 /* Initialize the ring buffer's read and write pointers */ 1246 /* Initialize the ring buffer's read and write pointers */
1243 WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA); 1247 WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA);
1244 WREG32(CP_RB2_WPTR, 0); 1248 rdev->cp2.wptr = 0;
1249 WREG32(CP_RB2_WPTR, rdev->cp2.wptr);
1245 1250
1246 /* set the wb address wether it's enabled or not */ 1251 /* set the wb address wether it's enabled or not */
1247 WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC); 1252 WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC);
@@ -1253,7 +1258,6 @@ int cayman_cp_resume(struct radeon_device *rdev)
1253 WREG32(CP_RB2_BASE, rdev->cp2.gpu_addr >> 8); 1258 WREG32(CP_RB2_BASE, rdev->cp2.gpu_addr >> 8);
1254 1259
1255 rdev->cp2.rptr = RREG32(CP_RB2_RPTR); 1260 rdev->cp2.rptr = RREG32(CP_RB2_RPTR);
1256 rdev->cp2.wptr = RREG32(CP_RB2_WPTR);
1257 1261
1258 /* start the rings */ 1262 /* start the rings */
1259 cayman_cp_start(rdev); 1263 cayman_cp_start(rdev);
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index f2204cb1ccdf..5b1837b4aacf 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -721,11 +721,11 @@ void r100_fence_ring_emit(struct radeon_device *rdev,
721int r100_copy_blit(struct radeon_device *rdev, 721int r100_copy_blit(struct radeon_device *rdev,
722 uint64_t src_offset, 722 uint64_t src_offset,
723 uint64_t dst_offset, 723 uint64_t dst_offset,
724 unsigned num_pages, 724 unsigned num_gpu_pages,
725 struct radeon_fence *fence) 725 struct radeon_fence *fence)
726{ 726{
727 uint32_t cur_pages; 727 uint32_t cur_pages;
728 uint32_t stride_bytes = PAGE_SIZE; 728 uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE;
729 uint32_t pitch; 729 uint32_t pitch;
730 uint32_t stride_pixels; 730 uint32_t stride_pixels;
731 unsigned ndw; 731 unsigned ndw;
@@ -737,7 +737,7 @@ int r100_copy_blit(struct radeon_device *rdev,
737 /* radeon pitch is /64 */ 737 /* radeon pitch is /64 */
738 pitch = stride_bytes / 64; 738 pitch = stride_bytes / 64;
739 stride_pixels = stride_bytes / 4; 739 stride_pixels = stride_bytes / 4;
740 num_loops = DIV_ROUND_UP(num_pages, 8191); 740 num_loops = DIV_ROUND_UP(num_gpu_pages, 8191);
741 741
742 /* Ask for enough room for blit + flush + fence */ 742 /* Ask for enough room for blit + flush + fence */
743 ndw = 64 + (10 * num_loops); 743 ndw = 64 + (10 * num_loops);
@@ -746,12 +746,12 @@ int r100_copy_blit(struct radeon_device *rdev,
746 DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw); 746 DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);
747 return -EINVAL; 747 return -EINVAL;
748 } 748 }
749 while (num_pages > 0) { 749 while (num_gpu_pages > 0) {
750 cur_pages = num_pages; 750 cur_pages = num_gpu_pages;
751 if (cur_pages > 8191) { 751 if (cur_pages > 8191) {
752 cur_pages = 8191; 752 cur_pages = 8191;
753 } 753 }
754 num_pages -= cur_pages; 754 num_gpu_pages -= cur_pages;
755 755
756 /* pages are in Y direction - height 756 /* pages are in Y direction - height
757 page width in X direction - width */ 757 page width in X direction - width */
@@ -773,8 +773,8 @@ int r100_copy_blit(struct radeon_device *rdev,
773 radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16)); 773 radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
774 radeon_ring_write(rdev, 0); 774 radeon_ring_write(rdev, 0);
775 radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16)); 775 radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
776 radeon_ring_write(rdev, num_pages); 776 radeon_ring_write(rdev, cur_pages);
777 radeon_ring_write(rdev, num_pages); 777 radeon_ring_write(rdev, cur_pages);
778 radeon_ring_write(rdev, cur_pages | (stride_pixels << 16)); 778 radeon_ring_write(rdev, cur_pages | (stride_pixels << 16));
779 } 779 }
780 radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0)); 780 radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));
@@ -990,7 +990,8 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
990 /* Force read & write ptr to 0 */ 990 /* Force read & write ptr to 0 */
991 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE); 991 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE);
992 WREG32(RADEON_CP_RB_RPTR_WR, 0); 992 WREG32(RADEON_CP_RB_RPTR_WR, 0);
993 WREG32(RADEON_CP_RB_WPTR, 0); 993 rdev->cp.wptr = 0;
994 WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr);
994 995
995 /* set the wb address whether it's enabled or not */ 996 /* set the wb address whether it's enabled or not */
996 WREG32(R_00070C_CP_RB_RPTR_ADDR, 997 WREG32(R_00070C_CP_RB_RPTR_ADDR,
@@ -1007,9 +1008,6 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
1007 WREG32(RADEON_CP_RB_CNTL, tmp); 1008 WREG32(RADEON_CP_RB_CNTL, tmp);
1008 udelay(10); 1009 udelay(10);
1009 rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); 1010 rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
1010 rdev->cp.wptr = RREG32(RADEON_CP_RB_WPTR);
1011 /* protect against crazy HW on resume */
1012 rdev->cp.wptr &= rdev->cp.ptr_mask;
1013 /* Set cp mode to bus mastering & enable cp*/ 1011 /* Set cp mode to bus mastering & enable cp*/
1014 WREG32(RADEON_CP_CSQ_MODE, 1012 WREG32(RADEON_CP_CSQ_MODE,
1015 REG_SET(RADEON_INDIRECT2_START, indirect2_start) | 1013 REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index f24058300413..a1f3ba063c2d 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -84,7 +84,7 @@ static int r200_get_vtx_size_0(uint32_t vtx_fmt_0)
84int r200_copy_dma(struct radeon_device *rdev, 84int r200_copy_dma(struct radeon_device *rdev,
85 uint64_t src_offset, 85 uint64_t src_offset,
86 uint64_t dst_offset, 86 uint64_t dst_offset,
87 unsigned num_pages, 87 unsigned num_gpu_pages,
88 struct radeon_fence *fence) 88 struct radeon_fence *fence)
89{ 89{
90 uint32_t size; 90 uint32_t size;
@@ -93,7 +93,7 @@ int r200_copy_dma(struct radeon_device *rdev,
93 int r = 0; 93 int r = 0;
94 94
95 /* radeon pitch is /64 */ 95 /* radeon pitch is /64 */
96 size = num_pages << PAGE_SHIFT; 96 size = num_gpu_pages << RADEON_GPU_PAGE_SHIFT;
97 num_loops = DIV_ROUND_UP(size, 0x1FFFFF); 97 num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
98 r = radeon_ring_lock(rdev, num_loops * 4 + 64); 98 r = radeon_ring_lock(rdev, num_loops * 4 + 64);
99 if (r) { 99 if (r) {
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index aa5571b73aa0..720dd99163f8 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2209,7 +2209,8 @@ int r600_cp_resume(struct radeon_device *rdev)
2209 /* Initialize the ring buffer's read and write pointers */ 2209 /* Initialize the ring buffer's read and write pointers */
2210 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); 2210 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
2211 WREG32(CP_RB_RPTR_WR, 0); 2211 WREG32(CP_RB_RPTR_WR, 0);
2212 WREG32(CP_RB_WPTR, 0); 2212 rdev->cp.wptr = 0;
2213 WREG32(CP_RB_WPTR, rdev->cp.wptr);
2213 2214
2214 /* set the wb address whether it's enabled or not */ 2215 /* set the wb address whether it's enabled or not */
2215 WREG32(CP_RB_RPTR_ADDR, 2216 WREG32(CP_RB_RPTR_ADDR,
@@ -2231,7 +2232,6 @@ int r600_cp_resume(struct radeon_device *rdev)
2231 WREG32(CP_DEBUG, (1 << 27) | (1 << 28)); 2232 WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
2232 2233
2233 rdev->cp.rptr = RREG32(CP_RB_RPTR); 2234 rdev->cp.rptr = RREG32(CP_RB_RPTR);
2234 rdev->cp.wptr = RREG32(CP_RB_WPTR);
2235 2235
2236 r600_cp_start(rdev); 2236 r600_cp_start(rdev);
2237 rdev->cp.ready = true; 2237 rdev->cp.ready = true;
@@ -2353,21 +2353,23 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
2353} 2353}
2354 2354
2355int r600_copy_blit(struct radeon_device *rdev, 2355int r600_copy_blit(struct radeon_device *rdev,
2356 uint64_t src_offset, uint64_t dst_offset, 2356 uint64_t src_offset,
2357 unsigned num_pages, struct radeon_fence *fence) 2357 uint64_t dst_offset,
2358 unsigned num_gpu_pages,
2359 struct radeon_fence *fence)
2358{ 2360{
2359 int r; 2361 int r;
2360 2362
2361 mutex_lock(&rdev->r600_blit.mutex); 2363 mutex_lock(&rdev->r600_blit.mutex);
2362 rdev->r600_blit.vb_ib = NULL; 2364 rdev->r600_blit.vb_ib = NULL;
2363 r = r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE); 2365 r = r600_blit_prepare_copy(rdev, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
2364 if (r) { 2366 if (r) {
2365 if (rdev->r600_blit.vb_ib) 2367 if (rdev->r600_blit.vb_ib)
2366 radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); 2368 radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
2367 mutex_unlock(&rdev->r600_blit.mutex); 2369 mutex_unlock(&rdev->r600_blit.mutex);
2368 return r; 2370 return r;
2369 } 2371 }
2370 r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE); 2372 r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
2371 r600_blit_done_copy(rdev, fence); 2373 r600_blit_done_copy(rdev, fence);
2372 mutex_unlock(&rdev->r600_blit.mutex); 2374 mutex_unlock(&rdev->r600_blit.mutex);
2373 return 0; 2375 return 0;
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 32807baf55e2..c1e056b35b29 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -322,6 +322,7 @@ union radeon_gart_table {
322 322
323#define RADEON_GPU_PAGE_SIZE 4096 323#define RADEON_GPU_PAGE_SIZE 4096
324#define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1) 324#define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1)
325#define RADEON_GPU_PAGE_SHIFT 12
325 326
326struct radeon_gart { 327struct radeon_gart {
327 dma_addr_t table_addr; 328 dma_addr_t table_addr;
@@ -914,17 +915,17 @@ struct radeon_asic {
914 int (*copy_blit)(struct radeon_device *rdev, 915 int (*copy_blit)(struct radeon_device *rdev,
915 uint64_t src_offset, 916 uint64_t src_offset,
916 uint64_t dst_offset, 917 uint64_t dst_offset,
917 unsigned num_pages, 918 unsigned num_gpu_pages,
918 struct radeon_fence *fence); 919 struct radeon_fence *fence);
919 int (*copy_dma)(struct radeon_device *rdev, 920 int (*copy_dma)(struct radeon_device *rdev,
920 uint64_t src_offset, 921 uint64_t src_offset,
921 uint64_t dst_offset, 922 uint64_t dst_offset,
922 unsigned num_pages, 923 unsigned num_gpu_pages,
923 struct radeon_fence *fence); 924 struct radeon_fence *fence);
924 int (*copy)(struct radeon_device *rdev, 925 int (*copy)(struct radeon_device *rdev,
925 uint64_t src_offset, 926 uint64_t src_offset,
926 uint64_t dst_offset, 927 uint64_t dst_offset,
927 unsigned num_pages, 928 unsigned num_gpu_pages,
928 struct radeon_fence *fence); 929 struct radeon_fence *fence);
929 uint32_t (*get_engine_clock)(struct radeon_device *rdev); 930 uint32_t (*get_engine_clock)(struct radeon_device *rdev);
930 void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock); 931 void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock);
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 3d7a0d7c6a9a..3dedaa07aac1 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -75,7 +75,7 @@ uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg);
75int r100_copy_blit(struct radeon_device *rdev, 75int r100_copy_blit(struct radeon_device *rdev,
76 uint64_t src_offset, 76 uint64_t src_offset,
77 uint64_t dst_offset, 77 uint64_t dst_offset,
78 unsigned num_pages, 78 unsigned num_gpu_pages,
79 struct radeon_fence *fence); 79 struct radeon_fence *fence);
80int r100_set_surface_reg(struct radeon_device *rdev, int reg, 80int r100_set_surface_reg(struct radeon_device *rdev, int reg,
81 uint32_t tiling_flags, uint32_t pitch, 81 uint32_t tiling_flags, uint32_t pitch,
@@ -143,7 +143,7 @@ extern void r100_post_page_flip(struct radeon_device *rdev, int crtc);
143extern int r200_copy_dma(struct radeon_device *rdev, 143extern int r200_copy_dma(struct radeon_device *rdev,
144 uint64_t src_offset, 144 uint64_t src_offset,
145 uint64_t dst_offset, 145 uint64_t dst_offset,
146 unsigned num_pages, 146 unsigned num_gpu_pages,
147 struct radeon_fence *fence); 147 struct radeon_fence *fence);
148void r200_set_safe_registers(struct radeon_device *rdev); 148void r200_set_safe_registers(struct radeon_device *rdev);
149 149
@@ -311,7 +311,7 @@ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
311int r600_ring_test(struct radeon_device *rdev); 311int r600_ring_test(struct radeon_device *rdev);
312int r600_copy_blit(struct radeon_device *rdev, 312int r600_copy_blit(struct radeon_device *rdev,
313 uint64_t src_offset, uint64_t dst_offset, 313 uint64_t src_offset, uint64_t dst_offset,
314 unsigned num_pages, struct radeon_fence *fence); 314 unsigned num_gpu_pages, struct radeon_fence *fence);
315void r600_hpd_init(struct radeon_device *rdev); 315void r600_hpd_init(struct radeon_device *rdev);
316void r600_hpd_fini(struct radeon_device *rdev); 316void r600_hpd_fini(struct radeon_device *rdev);
317bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); 317bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
@@ -403,7 +403,7 @@ void evergreen_bandwidth_update(struct radeon_device *rdev);
403void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); 403void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
404int evergreen_copy_blit(struct radeon_device *rdev, 404int evergreen_copy_blit(struct radeon_device *rdev,
405 uint64_t src_offset, uint64_t dst_offset, 405 uint64_t src_offset, uint64_t dst_offset,
406 unsigned num_pages, struct radeon_fence *fence); 406 unsigned num_gpu_pages, struct radeon_fence *fence);
407void evergreen_hpd_init(struct radeon_device *rdev); 407void evergreen_hpd_init(struct radeon_device *rdev);
408void evergreen_hpd_fini(struct radeon_device *rdev); 408void evergreen_hpd_fini(struct radeon_device *rdev);
409bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); 409bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
index dcd0863e31ae..b6e18c8db9f5 100644
--- a/drivers/gpu/drm/radeon/radeon_clocks.c
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -219,6 +219,9 @@ void radeon_get_clock_info(struct drm_device *dev)
219 } else { 219 } else {
220 DRM_INFO("Using generic clock info\n"); 220 DRM_INFO("Using generic clock info\n");
221 221
222 /* may need to be per card */
223 rdev->clock.max_pixel_clock = 35000;
224
222 if (rdev->flags & RADEON_IS_IGP) { 225 if (rdev->flags & RADEON_IS_IGP) {
223 p1pll->reference_freq = 1432; 226 p1pll->reference_freq = 1432;
224 p2pll->reference_freq = 1432; 227 p2pll->reference_freq = 1432;
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index e0138b674aca..63675241c7ff 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -3298,6 +3298,14 @@ void radeon_combios_asic_init(struct drm_device *dev)
3298 rdev->pdev->subsystem_device == 0x30a4) 3298 rdev->pdev->subsystem_device == 0x30a4)
3299 return; 3299 return;
3300 3300
3301 /* quirk for rs4xx Compaq Presario V5245EU laptop to make it resume
3302 * - it hangs on resume inside the dynclk 1 table.
3303 */
3304 if (rdev->family == CHIP_RS480 &&
3305 rdev->pdev->subsystem_vendor == 0x103c &&
3306 rdev->pdev->subsystem_device == 0x30ae)
3307 return;
3308
3301 /* DYN CLK 1 */ 3309 /* DYN CLK 1 */
3302 table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE); 3310 table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
3303 if (table) 3311 if (table)
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 6d6b5f16bc09..c4b8741dbf58 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -60,18 +60,20 @@ void radeon_connector_hotplug(struct drm_connector *connector)
60 60
61 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); 61 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
62 62
63 /* powering up/down the eDP panel generates hpd events which 63 /* if the connector is already off, don't turn it back on */
64 * can interfere with modesetting. 64 if (connector->dpms != DRM_MODE_DPMS_ON)
65 */
66 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
67 return; 65 return;
68 66
69 /* pre-r600 did not always have the hpd pins mapped accurately to connectors */ 67 /* just deal with DP (not eDP) here. */
70 if (rdev->family >= CHIP_R600) { 68 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
71 if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) 69 int saved_dpms = connector->dpms;
70
71 if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd) &&
72 radeon_dp_needs_link_train(radeon_connector))
72 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); 73 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
73 else 74 else
74 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); 75 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
76 connector->dpms = saved_dpms;
75 } 77 }
76} 78}
77 79
@@ -464,6 +466,16 @@ static bool radeon_connector_needs_extended_probe(struct radeon_device *dev,
464 (supported_device == ATOM_DEVICE_DFP2_SUPPORT)) 466 (supported_device == ATOM_DEVICE_DFP2_SUPPORT))
465 return true; 467 return true;
466 } 468 }
469 /* TOSHIBA Satellite L300D with ATI Mobility Radeon x1100
470 * (RS690M) sends data to i2c bus for a HDMI connector that
471 * is not implemented */
472 if ((dev->pdev->device == 0x791f) &&
473 (dev->pdev->subsystem_vendor == 0x1179) &&
474 (dev->pdev->subsystem_device == 0xff68)) {
475 if ((connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
476 (supported_device == ATOM_DEVICE_DFP2_SUPPORT))
477 return true;
478 }
467 479
468 /* Default: no EDID header probe required for DDC probing */ 480 /* Default: no EDID header probe required for DDC probing */
469 return false; 481 return false;
@@ -474,11 +486,19 @@ static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder,
474{ 486{
475 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 487 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
476 struct drm_display_mode *native_mode = &radeon_encoder->native_mode; 488 struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
489 struct drm_display_mode *t, *mode;
490
491 /* If the EDID preferred mode doesn't match the native mode, use it */
492 list_for_each_entry_safe(mode, t, &connector->probed_modes, head) {
493 if (mode->type & DRM_MODE_TYPE_PREFERRED) {
494 if (mode->hdisplay != native_mode->hdisplay ||
495 mode->vdisplay != native_mode->vdisplay)
496 memcpy(native_mode, mode, sizeof(*mode));
497 }
498 }
477 499
478 /* Try to get native mode details from EDID if necessary */ 500 /* Try to get native mode details from EDID if necessary */
479 if (!native_mode->clock) { 501 if (!native_mode->clock) {
480 struct drm_display_mode *t, *mode;
481
482 list_for_each_entry_safe(mode, t, &connector->probed_modes, head) { 502 list_for_each_entry_safe(mode, t, &connector->probed_modes, head) {
483 if (mode->hdisplay == native_mode->hdisplay && 503 if (mode->hdisplay == native_mode->hdisplay &&
484 mode->vdisplay == native_mode->vdisplay) { 504 mode->vdisplay == native_mode->vdisplay) {
@@ -489,6 +509,7 @@ static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder,
489 } 509 }
490 } 510 }
491 } 511 }
512
492 if (!native_mode->clock) { 513 if (!native_mode->clock) {
493 DRM_DEBUG_KMS("No LVDS native mode details, disabling RMX\n"); 514 DRM_DEBUG_KMS("No LVDS native mode details, disabling RMX\n");
494 radeon_encoder->rmx_type = RMX_OFF; 515 radeon_encoder->rmx_type = RMX_OFF;
@@ -1276,12 +1297,33 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
1276 if (!radeon_dig_connector->edp_on) 1297 if (!radeon_dig_connector->edp_on)
1277 atombios_set_edp_panel_power(connector, 1298 atombios_set_edp_panel_power(connector,
1278 ATOM_TRANSMITTER_ACTION_POWER_OFF); 1299 ATOM_TRANSMITTER_ACTION_POWER_OFF);
1279 } else { 1300 } else if (radeon_connector_encoder_is_dp_bridge(connector)) {
1280 /* need to setup ddc on the bridge */ 1301 /* DP bridges are always DP */
1281 if (radeon_connector_encoder_is_dp_bridge(connector)) { 1302 radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT;
1303 /* get the DPCD from the bridge */
1304 radeon_dp_getdpcd(radeon_connector);
1305
1306 if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
1307 ret = connector_status_connected;
1308 else {
1309 /* need to setup ddc on the bridge */
1282 if (encoder) 1310 if (encoder)
1283 radeon_atom_ext_encoder_setup_ddc(encoder); 1311 radeon_atom_ext_encoder_setup_ddc(encoder);
1312 if (radeon_ddc_probe(radeon_connector,
1313 radeon_connector->requires_extended_probe))
1314 ret = connector_status_connected;
1315 }
1316
1317 if ((ret == connector_status_disconnected) &&
1318 radeon_connector->dac_load_detect) {
1319 struct drm_encoder *encoder = radeon_best_single_encoder(connector);
1320 struct drm_encoder_helper_funcs *encoder_funcs;
1321 if (encoder) {
1322 encoder_funcs = encoder->helper_private;
1323 ret = encoder_funcs->detect(encoder, connector);
1324 }
1284 } 1325 }
1326 } else {
1285 radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector); 1327 radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector);
1286 if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) { 1328 if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
1287 ret = connector_status_connected; 1329 ret = connector_status_connected;
@@ -1297,16 +1339,6 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
1297 ret = connector_status_connected; 1339 ret = connector_status_connected;
1298 } 1340 }
1299 } 1341 }
1300
1301 if ((ret == connector_status_disconnected) &&
1302 radeon_connector->dac_load_detect) {
1303 struct drm_encoder *encoder = radeon_best_single_encoder(connector);
1304 struct drm_encoder_helper_funcs *encoder_funcs;
1305 if (encoder) {
1306 encoder_funcs = encoder->helper_private;
1307 ret = encoder_funcs->detect(encoder, connector);
1308 }
1309 }
1310 } 1342 }
1311 1343
1312 radeon_connector_update_scratch_regs(connector, ret); 1344 radeon_connector_update_scratch_regs(connector, ret);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 440e6ecccc40..b51e15725c6e 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -32,6 +32,7 @@
32#include <drm/radeon_drm.h> 32#include <drm/radeon_drm.h>
33#include <linux/vgaarb.h> 33#include <linux/vgaarb.h>
34#include <linux/vga_switcheroo.h> 34#include <linux/vga_switcheroo.h>
35#include <linux/efi.h>
35#include "radeon_reg.h" 36#include "radeon_reg.h"
36#include "radeon.h" 37#include "radeon.h"
37#include "atom.h" 38#include "atom.h"
@@ -300,6 +301,8 @@ void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64
300 mc->mc_vram_size = mc->aper_size; 301 mc->mc_vram_size = mc->aper_size;
301 } 302 }
302 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; 303 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
304 if (radeon_vram_limit && radeon_vram_limit < mc->real_vram_size)
305 mc->real_vram_size = radeon_vram_limit;
303 dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n", 306 dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
304 mc->mc_vram_size >> 20, mc->vram_start, 307 mc->mc_vram_size >> 20, mc->vram_start,
305 mc->vram_end, mc->real_vram_size >> 20); 308 mc->vram_end, mc->real_vram_size >> 20);
@@ -348,6 +351,9 @@ bool radeon_card_posted(struct radeon_device *rdev)
348{ 351{
349 uint32_t reg; 352 uint32_t reg;
350 353
354 if (efi_enabled && rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE)
355 return false;
356
351 /* first check CRTCs */ 357 /* first check CRTCs */
352 if (ASIC_IS_DCE41(rdev)) { 358 if (ASIC_IS_DCE41(rdev)) {
353 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | 359 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 1a858944e4f3..6adb3e58affd 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -473,8 +473,8 @@ pflip_cleanup:
473 spin_lock_irqsave(&dev->event_lock, flags); 473 spin_lock_irqsave(&dev->event_lock, flags);
474 radeon_crtc->unpin_work = NULL; 474 radeon_crtc->unpin_work = NULL;
475unlock_free: 475unlock_free:
476 drm_gem_object_unreference_unlocked(old_radeon_fb->obj);
477 spin_unlock_irqrestore(&dev->event_lock, flags); 476 spin_unlock_irqrestore(&dev->event_lock, flags);
477 drm_gem_object_unreference_unlocked(old_radeon_fb->obj);
478 radeon_fence_unref(&work->fence); 478 radeon_fence_unref(&work->fence);
479 kfree(work); 479 kfree(work);
480 480
@@ -707,16 +707,21 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
707 radeon_router_select_ddc_port(radeon_connector); 707 radeon_router_select_ddc_port(radeon_connector);
708 708
709 if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) || 709 if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
710 (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) { 710 (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) ||
711 radeon_connector_encoder_is_dp_bridge(&radeon_connector->base)) {
711 struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; 712 struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
713
712 if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT || 714 if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||
713 dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) && dig->dp_i2c_bus) 715 dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) && dig->dp_i2c_bus)
714 radeon_connector->edid = drm_get_edid(&radeon_connector->base, &dig->dp_i2c_bus->adapter); 716 radeon_connector->edid = drm_get_edid(&radeon_connector->base,
715 } 717 &dig->dp_i2c_bus->adapter);
716 if (!radeon_connector->ddc_bus) 718 else if (radeon_connector->ddc_bus && !radeon_connector->edid)
717 return -1; 719 radeon_connector->edid = drm_get_edid(&radeon_connector->base,
718 if (!radeon_connector->edid) { 720 &radeon_connector->ddc_bus->adapter);
719 radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); 721 } else {
722 if (radeon_connector->ddc_bus && !radeon_connector->edid)
723 radeon_connector->edid = drm_get_edid(&radeon_connector->base,
724 &radeon_connector->ddc_bus->adapter);
720 } 725 }
721 726
722 if (!radeon_connector->edid) { 727 if (!radeon_connector->edid) {
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index b293487e5aa3..319d85d7e759 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -2323,6 +2323,9 @@ radeon_add_atom_encoder(struct drm_device *dev,
2323 default: 2323 default:
2324 encoder->possible_crtcs = 0x3; 2324 encoder->possible_crtcs = 0x3;
2325 break; 2325 break;
2326 case 4:
2327 encoder->possible_crtcs = 0xf;
2328 break;
2326 case 6: 2329 case 6:
2327 encoder->possible_crtcs = 0x3f; 2330 encoder->possible_crtcs = 0x3f;
2328 break; 2331 break;
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index d09031c03e26..68820f5f6303 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -479,6 +479,7 @@ extern void radeon_dp_set_link_config(struct drm_connector *connector,
479 struct drm_display_mode *mode); 479 struct drm_display_mode *mode);
480extern void radeon_dp_link_train(struct drm_encoder *encoder, 480extern void radeon_dp_link_train(struct drm_encoder *encoder,
481 struct drm_connector *connector); 481 struct drm_connector *connector);
482extern bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector);
482extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector); 483extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector);
483extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector); 484extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector);
484extern void atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode); 485extern void atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode);
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index dee4a0c1b4b2..602fa3541c45 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -40,10 +40,14 @@ void radeon_test_moves(struct radeon_device *rdev)
40 size = 1024 * 1024; 40 size = 1024 * 1024;
41 41
42 /* Number of tests = 42 /* Number of tests =
43 * (Total GTT - IB pool - writeback page - ring buffer) / test size 43 * (Total GTT - IB pool - writeback page - ring buffers) / test size
44 */ 44 */
45 n = ((u32)(rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - RADEON_GPU_PAGE_SIZE - 45 n = rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - rdev->cp.ring_size;
46 rdev->cp.ring_size)) / size; 46 if (rdev->wb.wb_obj)
47 n -= RADEON_GPU_PAGE_SIZE;
48 if (rdev->ih.ring_obj)
49 n -= rdev->ih.ring_size;
50 n /= size;
47 51
48 gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL); 52 gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);
49 if (!gtt_obj) { 53 if (!gtt_obj) {
@@ -132,9 +136,15 @@ void radeon_test_moves(struct radeon_device *rdev)
132 gtt_start++, vram_start++) { 136 gtt_start++, vram_start++) {
133 if (*vram_start != gtt_start) { 137 if (*vram_start != gtt_start) {
134 DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, " 138 DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, "
135 "expected 0x%p (GTT map 0x%p-0x%p)\n", 139 "expected 0x%p (GTT/VRAM offset "
136 i, *vram_start, gtt_start, gtt_map, 140 "0x%16llx/0x%16llx)\n",
137 gtt_end); 141 i, *vram_start, gtt_start,
142 (unsigned long long)
143 (gtt_addr - rdev->mc.gtt_start +
144 (void*)gtt_start - gtt_map),
145 (unsigned long long)
146 (vram_addr - rdev->mc.vram_start +
147 (void*)gtt_start - gtt_map));
138 radeon_bo_kunmap(vram_obj); 148 radeon_bo_kunmap(vram_obj);
139 goto out_cleanup; 149 goto out_cleanup;
140 } 150 }
@@ -175,9 +185,15 @@ void radeon_test_moves(struct radeon_device *rdev)
175 gtt_start++, vram_start++) { 185 gtt_start++, vram_start++) {
176 if (*gtt_start != vram_start) { 186 if (*gtt_start != vram_start) {
177 DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, " 187 DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, "
178 "expected 0x%p (VRAM map 0x%p-0x%p)\n", 188 "expected 0x%p (VRAM/GTT offset "
179 i, *gtt_start, vram_start, vram_map, 189 "0x%16llx/0x%16llx)\n",
180 vram_end); 190 i, *gtt_start, vram_start,
191 (unsigned long long)
192 (vram_addr - rdev->mc.vram_start +
193 (void*)vram_start - vram_map),
194 (unsigned long long)
195 (gtt_addr - rdev->mc.gtt_start +
196 (void*)vram_start - vram_map));
181 radeon_bo_kunmap(gtt_obj[i]); 197 radeon_bo_kunmap(gtt_obj[i]);
182 goto out_cleanup; 198 goto out_cleanup;
183 } 199 }
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 60125ddba1e9..0b5468bfaf54 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -277,7 +277,12 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
277 DRM_ERROR("Trying to move memory with CP turned off.\n"); 277 DRM_ERROR("Trying to move memory with CP turned off.\n");
278 return -EINVAL; 278 return -EINVAL;
279 } 279 }
280 r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence); 280
281 BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
282
283 r = radeon_copy(rdev, old_start, new_start,
284 new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */
285 fence);
281 /* FIXME: handle copy error */ 286 /* FIXME: handle copy error */
282 r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL, 287 r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL,
283 evict, no_wait_reserve, no_wait_gpu, new_mem); 288 evict, no_wait_reserve, no_wait_gpu, new_mem);
@@ -450,6 +455,29 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_
450 return -EINVAL; 455 return -EINVAL;
451 mem->bus.base = rdev->mc.aper_base; 456 mem->bus.base = rdev->mc.aper_base;
452 mem->bus.is_iomem = true; 457 mem->bus.is_iomem = true;
458#ifdef __alpha__
459 /*
460 * Alpha: use bus.addr to hold the ioremap() return,
461 * so we can modify bus.base below.
462 */
463 if (mem->placement & TTM_PL_FLAG_WC)
464 mem->bus.addr =
465 ioremap_wc(mem->bus.base + mem->bus.offset,
466 mem->bus.size);
467 else
468 mem->bus.addr =
469 ioremap_nocache(mem->bus.base + mem->bus.offset,
470 mem->bus.size);
471
472 /*
473 * Alpha: Use just the bus offset plus
474 * the hose/domain memory base for bus.base.
475 * It then can be used to build PTEs for VRAM
476 * access, as done in ttm_bo_vm_fault().
477 */
478 mem->bus.base = (mem->bus.base & 0x0ffffffffUL) +
479 rdev->ddev->hose->dense_mem_base;
480#endif
453 break; 481 break;
454 default: 482 default:
455 return -EINVAL; 483 return -EINVAL;
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 56619f64b6bf..ef06194c5aa6 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -353,8 +353,10 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
353 353
354 ret = ttm_tt_set_user(bo->ttm, current, 354 ret = ttm_tt_set_user(bo->ttm, current,
355 bo->buffer_start, bo->num_pages); 355 bo->buffer_start, bo->num_pages);
356 if (unlikely(ret != 0)) 356 if (unlikely(ret != 0)) {
357 ttm_tt_destroy(bo->ttm); 357 ttm_tt_destroy(bo->ttm);
358 bo->ttm = NULL;
359 }
358 break; 360 break;
359 default: 361 default:
360 printk(KERN_ERR TTM_PFX "Illegal buffer object type\n"); 362 printk(KERN_ERR TTM_PFX "Illegal buffer object type\n");
@@ -390,10 +392,13 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
390 * Create and bind a ttm if required. 392 * Create and bind a ttm if required.
391 */ 393 */
392 394
393 if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm == NULL)) { 395 if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
394 ret = ttm_bo_add_ttm(bo, false); 396 if (bo->ttm == NULL) {
395 if (ret) 397 bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED);
396 goto out_err; 398 ret = ttm_bo_add_ttm(bo, zero);
399 if (ret)
400 goto out_err;
401 }
397 402
398 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement); 403 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
399 if (ret) 404 if (ret)
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 77dbf408c0d0..ae3c6f5dd2b7 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -635,13 +635,13 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
635 if (ret) 635 if (ret)
636 return ret; 636 return ret;
637 637
638 ttm_bo_free_old_node(bo);
639 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && 638 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
640 (bo->ttm != NULL)) { 639 (bo->ttm != NULL)) {
641 ttm_tt_unbind(bo->ttm); 640 ttm_tt_unbind(bo->ttm);
642 ttm_tt_destroy(bo->ttm); 641 ttm_tt_destroy(bo->ttm);
643 bo->ttm = NULL; 642 bo->ttm = NULL;
644 } 643 }
644 ttm_bo_free_old_node(bo);
645 } else { 645 } else {
646 /** 646 /**
647 * This should help pipeline ordinary buffer moves. 647 * This should help pipeline ordinary buffer moves.
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 306b15f39c9c..1130a8987125 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -589,6 +589,7 @@ config HID_WACOM_POWER_SUPPLY
589config HID_WIIMOTE 589config HID_WIIMOTE
590 tristate "Nintendo Wii Remote support" 590 tristate "Nintendo Wii Remote support"
591 depends on BT_HIDP 591 depends on BT_HIDP
592 depends on LEDS_CLASS
592 ---help--- 593 ---help---
593 Support for the Nintendo Wii Remote bluetooth device. 594 Support for the Nintendo Wii Remote bluetooth device.
594 595
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index b85744fe8464..18b3bc646bf3 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -444,6 +444,12 @@ static const struct hid_device_id apple_devices[] = {
444 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS), 444 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS),
445 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN | 445 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN |
446 APPLE_RDESC_JIS }, 446 APPLE_RDESC_JIS },
447 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ANSI),
448 .driver_data = APPLE_HAS_FN },
449 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ISO),
450 .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
451 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_JIS),
452 .driver_data = APPLE_HAS_FN },
447 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI), 453 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI),
448 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN }, 454 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
449 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO), 455 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO),
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 1a5cf0c9cfca..242353df3dc4 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1340,6 +1340,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
1340 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI) }, 1340 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI) },
1341 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO) }, 1341 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO) },
1342 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS) }, 1342 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS) },
1343 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ANSI) },
1344 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ISO) },
1345 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_JIS) },
1343 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) }, 1346 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },
1344 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) }, 1347 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },
1345 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) }, 1348 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index db63ccf21cc8..7484e1b67249 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -109,6 +109,9 @@
109#define USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI 0x0245 109#define USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI 0x0245
110#define USB_DEVICE_ID_APPLE_WELLSPRING5_ISO 0x0246 110#define USB_DEVICE_ID_APPLE_WELLSPRING5_ISO 0x0246
111#define USB_DEVICE_ID_APPLE_WELLSPRING5_JIS 0x0247 111#define USB_DEVICE_ID_APPLE_WELLSPRING5_JIS 0x0247
112#define USB_DEVICE_ID_APPLE_ALU_REVB_ANSI 0x024f
113#define USB_DEVICE_ID_APPLE_ALU_REVB_ISO 0x0250
114#define USB_DEVICE_ID_APPLE_ALU_REVB_JIS 0x0251
112#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI 0x0239 115#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI 0x0239
113#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO 0x023a 116#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO 0x023a
114#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b 117#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b
@@ -274,6 +277,7 @@
274#define USB_DEVICE_ID_PENPOWER 0x00f4 277#define USB_DEVICE_ID_PENPOWER 0x00f4
275 278
276#define USB_VENDOR_ID_GREENASIA 0x0e8f 279#define USB_VENDOR_ID_GREENASIA 0x0e8f
280#define USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD 0x3013
277 281
278#define USB_VENDOR_ID_GRETAGMACBETH 0x0971 282#define USB_VENDOR_ID_GRETAGMACBETH 0x0971
279#define USB_DEVICE_ID_GRETAGMACBETH_HUEY 0x2005 283#define USB_DEVICE_ID_GRETAGMACBETH_HUEY 0x2005
@@ -576,6 +580,9 @@
576#define USB_DEVICE_ID_SAMSUNG_IR_REMOTE 0x0001 580#define USB_DEVICE_ID_SAMSUNG_IR_REMOTE 0x0001
577#define USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE 0x0600 581#define USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE 0x0600
578 582
583#define USB_VENDOR_ID_SIGMA_MICRO 0x1c4f
584#define USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD 0x0002
585
579#define USB_VENDOR_ID_SKYCABLE 0x1223 586#define USB_VENDOR_ID_SKYCABLE 0x1223
580#define USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER 0x3F07 587#define USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER 0x3F07
581 588
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index 0ec91c18a421..f0fbd7bd239e 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -81,6 +81,28 @@ MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state fie
81#define NO_TOUCHES -1 81#define NO_TOUCHES -1
82#define SINGLE_TOUCH_UP -2 82#define SINGLE_TOUCH_UP -2
83 83
84/* Touch surface information. Dimension is in hundredths of a mm, min and max
85 * are in units. */
86#define MOUSE_DIMENSION_X (float)9056
87#define MOUSE_MIN_X -1100
88#define MOUSE_MAX_X 1258
89#define MOUSE_RES_X ((MOUSE_MAX_X - MOUSE_MIN_X) / (MOUSE_DIMENSION_X / 100))
90#define MOUSE_DIMENSION_Y (float)5152
91#define MOUSE_MIN_Y -1589
92#define MOUSE_MAX_Y 2047
93#define MOUSE_RES_Y ((MOUSE_MAX_Y - MOUSE_MIN_Y) / (MOUSE_DIMENSION_Y / 100))
94
95#define TRACKPAD_DIMENSION_X (float)13000
96#define TRACKPAD_MIN_X -2909
97#define TRACKPAD_MAX_X 3167
98#define TRACKPAD_RES_X \
99 ((TRACKPAD_MAX_X - TRACKPAD_MIN_X) / (TRACKPAD_DIMENSION_X / 100))
100#define TRACKPAD_DIMENSION_Y (float)11000
101#define TRACKPAD_MIN_Y -2456
102#define TRACKPAD_MAX_Y 2565
103#define TRACKPAD_RES_Y \
104 ((TRACKPAD_MAX_Y - TRACKPAD_MIN_Y) / (TRACKPAD_DIMENSION_Y / 100))
105
84/** 106/**
85 * struct magicmouse_sc - Tracks Magic Mouse-specific data. 107 * struct magicmouse_sc - Tracks Magic Mouse-specific data.
86 * @input: Input device through which we report events. 108 * @input: Input device through which we report events.
@@ -406,17 +428,31 @@ static void magicmouse_setup_input(struct input_dev *input, struct hid_device *h
406 * inverse of the reported Y. 428 * inverse of the reported Y.
407 */ 429 */
408 if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) { 430 if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) {
409 input_set_abs_params(input, ABS_MT_POSITION_X, -1100, 431 input_set_abs_params(input, ABS_MT_POSITION_X,
410 1358, 4, 0); 432 MOUSE_MIN_X, MOUSE_MAX_X, 4, 0);
411 input_set_abs_params(input, ABS_MT_POSITION_Y, -1589, 433 input_set_abs_params(input, ABS_MT_POSITION_Y,
412 2047, 4, 0); 434 MOUSE_MIN_Y, MOUSE_MAX_Y, 4, 0);
435
436 input_abs_set_res(input, ABS_MT_POSITION_X,
437 MOUSE_RES_X);
438 input_abs_set_res(input, ABS_MT_POSITION_Y,
439 MOUSE_RES_Y);
413 } else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */ 440 } else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
414 input_set_abs_params(input, ABS_X, -2909, 3167, 4, 0); 441 input_set_abs_params(input, ABS_X, TRACKPAD_MIN_X,
415 input_set_abs_params(input, ABS_Y, -2456, 2565, 4, 0); 442 TRACKPAD_MAX_X, 4, 0);
416 input_set_abs_params(input, ABS_MT_POSITION_X, -2909, 443 input_set_abs_params(input, ABS_Y, TRACKPAD_MIN_Y,
417 3167, 4, 0); 444 TRACKPAD_MAX_Y, 4, 0);
418 input_set_abs_params(input, ABS_MT_POSITION_Y, -2456, 445 input_set_abs_params(input, ABS_MT_POSITION_X,
419 2565, 4, 0); 446 TRACKPAD_MIN_X, TRACKPAD_MAX_X, 4, 0);
447 input_set_abs_params(input, ABS_MT_POSITION_Y,
448 TRACKPAD_MIN_Y, TRACKPAD_MAX_Y, 4, 0);
449
450 input_abs_set_res(input, ABS_X, TRACKPAD_RES_X);
451 input_abs_set_res(input, ABS_Y, TRACKPAD_RES_Y);
452 input_abs_set_res(input, ABS_MT_POSITION_X,
453 TRACKPAD_RES_X);
454 input_abs_set_res(input, ABS_MT_POSITION_Y,
455 TRACKPAD_RES_Y);
420 } 456 }
421 457
422 input_set_events_per_packet(input, 60); 458 input_set_events_per_packet(input, 60);
@@ -501,9 +537,17 @@ static int magicmouse_probe(struct hid_device *hdev,
501 } 537 }
502 report->size = 6; 538 report->size = 6;
503 539
540 /*
541 * Some devices repond with 'invalid report id' when feature
542 * report switching it into multitouch mode is sent to it.
543 *
544 * This results in -EIO from the _raw low-level transport callback,
545 * but there seems to be no other way of switching the mode.
546 * Thus the super-ugly hacky success check below.
547 */
504 ret = hdev->hid_output_raw_report(hdev, feature, sizeof(feature), 548 ret = hdev->hid_output_raw_report(hdev, feature, sizeof(feature),
505 HID_FEATURE_REPORT); 549 HID_FEATURE_REPORT);
506 if (ret != sizeof(feature)) { 550 if (ret != -EIO && ret != sizeof(feature)) {
507 hid_err(hdev, "unable to request touch data (%d)\n", ret); 551 hid_err(hdev, "unable to request touch data (%d)\n", ret);
508 goto err_stop_hw; 552 goto err_stop_hw;
509 } 553 }
diff --git a/drivers/hid/hid-wacom.c b/drivers/hid/hid-wacom.c
index 06888323828c..72ca689b6474 100644
--- a/drivers/hid/hid-wacom.c
+++ b/drivers/hid/hid-wacom.c
@@ -353,11 +353,7 @@ static int wacom_probe(struct hid_device *hdev,
353 if (ret) { 353 if (ret) {
354 hid_warn(hdev, "can't create sysfs battery attribute, err: %d\n", 354 hid_warn(hdev, "can't create sysfs battery attribute, err: %d\n",
355 ret); 355 ret);
356 /* 356 goto err_battery;
357 * battery attribute is not critical for the tablet, but if it
358 * failed then there is no need to create ac attribute
359 */
360 goto move_on;
361 } 357 }
362 358
363 wdata->ac.properties = wacom_ac_props; 359 wdata->ac.properties = wacom_ac_props;
@@ -371,18 +367,14 @@ static int wacom_probe(struct hid_device *hdev,
371 if (ret) { 367 if (ret) {
372 hid_warn(hdev, 368 hid_warn(hdev,
373 "can't create ac battery attribute, err: %d\n", ret); 369 "can't create ac battery attribute, err: %d\n", ret);
374 /* 370 goto err_ac;
375 * ac attribute is not critical for the tablet, but if it
376 * failed then we don't want to battery attribute to exist
377 */
378 power_supply_unregister(&wdata->battery);
379 } 371 }
380
381move_on:
382#endif 372#endif
383 hidinput = list_entry(hdev->inputs.next, struct hid_input, list); 373 hidinput = list_entry(hdev->inputs.next, struct hid_input, list);
384 input = hidinput->input; 374 input = hidinput->input;
385 375
376 __set_bit(INPUT_PROP_POINTER, input->propbit);
377
386 /* Basics */ 378 /* Basics */
387 input->evbit[0] |= BIT(EV_KEY) | BIT(EV_ABS) | BIT(EV_REL); 379 input->evbit[0] |= BIT(EV_KEY) | BIT(EV_ABS) | BIT(EV_REL);
388 380
@@ -416,6 +408,13 @@ move_on:
416 408
417 return 0; 409 return 0;
418 410
411#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
412err_ac:
413 power_supply_unregister(&wdata->battery);
414err_battery:
415 device_remove_file(&hdev->dev, &dev_attr_speed);
416 hid_hw_stop(hdev);
417#endif
419err_free: 418err_free:
420 kfree(wdata); 419 kfree(wdata);
421 return ret; 420 return ret;
@@ -426,6 +425,7 @@ static void wacom_remove(struct hid_device *hdev)
426#ifdef CONFIG_HID_WACOM_POWER_SUPPLY 425#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
427 struct wacom_data *wdata = hid_get_drvdata(hdev); 426 struct wacom_data *wdata = hid_get_drvdata(hdev);
428#endif 427#endif
428 device_remove_file(&hdev->dev, &dev_attr_speed);
429 hid_hw_stop(hdev); 429 hid_hw_stop(hdev);
430 430
431#ifdef CONFIG_HID_WACOM_POWER_SUPPLY 431#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
diff --git a/drivers/hid/hid-wiimote.c b/drivers/hid/hid-wiimote.c
index a594383ce03d..85a02e5f9fe8 100644
--- a/drivers/hid/hid-wiimote.c
+++ b/drivers/hid/hid-wiimote.c
@@ -10,10 +10,10 @@
10 * any later version. 10 * any later version.
11 */ 11 */
12 12
13#include <linux/atomic.h>
14#include <linux/device.h> 13#include <linux/device.h>
15#include <linux/hid.h> 14#include <linux/hid.h>
16#include <linux/input.h> 15#include <linux/input.h>
16#include <linux/leds.h>
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/spinlock.h> 18#include <linux/spinlock.h>
19#include "hid-ids.h" 19#include "hid-ids.h"
@@ -33,9 +33,9 @@ struct wiimote_state {
33}; 33};
34 34
35struct wiimote_data { 35struct wiimote_data {
36 atomic_t ready;
37 struct hid_device *hdev; 36 struct hid_device *hdev;
38 struct input_dev *input; 37 struct input_dev *input;
38 struct led_classdev *leds[4];
39 39
40 spinlock_t qlock; 40 spinlock_t qlock;
41 __u8 head; 41 __u8 head;
@@ -53,8 +53,15 @@ struct wiimote_data {
53#define WIIPROTO_FLAGS_LEDS (WIIPROTO_FLAG_LED1 | WIIPROTO_FLAG_LED2 | \ 53#define WIIPROTO_FLAGS_LEDS (WIIPROTO_FLAG_LED1 | WIIPROTO_FLAG_LED2 | \
54 WIIPROTO_FLAG_LED3 | WIIPROTO_FLAG_LED4) 54 WIIPROTO_FLAG_LED3 | WIIPROTO_FLAG_LED4)
55 55
56/* return flag for led \num */
57#define WIIPROTO_FLAG_LED(num) (WIIPROTO_FLAG_LED1 << (num - 1))
58
56enum wiiproto_reqs { 59enum wiiproto_reqs {
60 WIIPROTO_REQ_NULL = 0x0,
57 WIIPROTO_REQ_LED = 0x11, 61 WIIPROTO_REQ_LED = 0x11,
62 WIIPROTO_REQ_DRM = 0x12,
63 WIIPROTO_REQ_STATUS = 0x20,
64 WIIPROTO_REQ_RETURN = 0x22,
58 WIIPROTO_REQ_DRM_K = 0x30, 65 WIIPROTO_REQ_DRM_K = 0x30,
59}; 66};
60 67
@@ -87,9 +94,6 @@ static __u16 wiiproto_keymap[] = {
87 BTN_MODE, /* WIIPROTO_KEY_HOME */ 94 BTN_MODE, /* WIIPROTO_KEY_HOME */
88}; 95};
89 96
90#define dev_to_wii(pdev) hid_get_drvdata(container_of(pdev, struct hid_device, \
91 dev))
92
93static ssize_t wiimote_hid_send(struct hid_device *hdev, __u8 *buffer, 97static ssize_t wiimote_hid_send(struct hid_device *hdev, __u8 *buffer,
94 size_t count) 98 size_t count)
95{ 99{
@@ -192,66 +196,96 @@ static void wiiproto_req_leds(struct wiimote_data *wdata, int leds)
192 wiimote_queue(wdata, cmd, sizeof(cmd)); 196 wiimote_queue(wdata, cmd, sizeof(cmd));
193} 197}
194 198
195#define wiifs_led_show_set(num) \ 199/*
196static ssize_t wiifs_led_show_##num(struct device *dev, \ 200 * Check what peripherals of the wiimote are currently
197 struct device_attribute *attr, char *buf) \ 201 * active and select a proper DRM that supports all of
198{ \ 202 * the requested data inputs.
199 struct wiimote_data *wdata = dev_to_wii(dev); \ 203 */
200 unsigned long flags; \ 204static __u8 select_drm(struct wiimote_data *wdata)
201 int state; \ 205{
202 \ 206 return WIIPROTO_REQ_DRM_K;
203 if (!atomic_read(&wdata->ready)) \ 207}
204 return -EBUSY; \ 208
205 \ 209static void wiiproto_req_drm(struct wiimote_data *wdata, __u8 drm)
206 spin_lock_irqsave(&wdata->state.lock, flags); \ 210{
207 state = !!(wdata->state.flags & WIIPROTO_FLAG_LED##num); \ 211 __u8 cmd[3];
208 spin_unlock_irqrestore(&wdata->state.lock, flags); \ 212
209 \ 213 if (drm == WIIPROTO_REQ_NULL)
210 return sprintf(buf, "%d\n", state); \ 214 drm = select_drm(wdata);
211} \ 215
212static ssize_t wiifs_led_set_##num(struct device *dev, \ 216 cmd[0] = WIIPROTO_REQ_DRM;
213 struct device_attribute *attr, const char *buf, size_t count) \ 217 cmd[1] = 0;
214{ \ 218 cmd[2] = drm;
215 struct wiimote_data *wdata = dev_to_wii(dev); \ 219
216 int tmp = simple_strtoul(buf, NULL, 10); \ 220 wiimote_queue(wdata, cmd, sizeof(cmd));
217 unsigned long flags; \ 221}
218 __u8 state; \ 222
219 \ 223static enum led_brightness wiimote_leds_get(struct led_classdev *led_dev)
220 if (!atomic_read(&wdata->ready)) \ 224{
221 return -EBUSY; \ 225 struct wiimote_data *wdata;
222 \ 226 struct device *dev = led_dev->dev->parent;
223 spin_lock_irqsave(&wdata->state.lock, flags); \ 227 int i;
224 \ 228 unsigned long flags;
225 state = wdata->state.flags; \ 229 bool value = false;
226 \ 230
227 if (tmp) \ 231 wdata = hid_get_drvdata(container_of(dev, struct hid_device, dev));
228 wiiproto_req_leds(wdata, state | WIIPROTO_FLAG_LED##num);\ 232
229 else \ 233 for (i = 0; i < 4; ++i) {
230 wiiproto_req_leds(wdata, state & ~WIIPROTO_FLAG_LED##num);\ 234 if (wdata->leds[i] == led_dev) {
231 \ 235 spin_lock_irqsave(&wdata->state.lock, flags);
232 spin_unlock_irqrestore(&wdata->state.lock, flags); \ 236 value = wdata->state.flags & WIIPROTO_FLAG_LED(i + 1);
233 \ 237 spin_unlock_irqrestore(&wdata->state.lock, flags);
234 return count; \ 238 break;
235} \ 239 }
236static DEVICE_ATTR(led##num, S_IRUGO | S_IWUSR, wiifs_led_show_##num, \ 240 }
237 wiifs_led_set_##num) 241
238 242 return value ? LED_FULL : LED_OFF;
239wiifs_led_show_set(1); 243}
240wiifs_led_show_set(2); 244
241wiifs_led_show_set(3); 245static void wiimote_leds_set(struct led_classdev *led_dev,
242wiifs_led_show_set(4); 246 enum led_brightness value)
247{
248 struct wiimote_data *wdata;
249 struct device *dev = led_dev->dev->parent;
250 int i;
251 unsigned long flags;
252 __u8 state, flag;
253
254 wdata = hid_get_drvdata(container_of(dev, struct hid_device, dev));
255
256 for (i = 0; i < 4; ++i) {
257 if (wdata->leds[i] == led_dev) {
258 flag = WIIPROTO_FLAG_LED(i + 1);
259 spin_lock_irqsave(&wdata->state.lock, flags);
260 state = wdata->state.flags;
261 if (value == LED_OFF)
262 wiiproto_req_leds(wdata, state & ~flag);
263 else
264 wiiproto_req_leds(wdata, state | flag);
265 spin_unlock_irqrestore(&wdata->state.lock, flags);
266 break;
267 }
268 }
269}
243 270
244static int wiimote_input_event(struct input_dev *dev, unsigned int type, 271static int wiimote_input_event(struct input_dev *dev, unsigned int type,
245 unsigned int code, int value) 272 unsigned int code, int value)
246{ 273{
274 return 0;
275}
276
277static int wiimote_input_open(struct input_dev *dev)
278{
247 struct wiimote_data *wdata = input_get_drvdata(dev); 279 struct wiimote_data *wdata = input_get_drvdata(dev);
248 280
249 if (!atomic_read(&wdata->ready)) 281 return hid_hw_open(wdata->hdev);
250 return -EBUSY; 282}
251 /* smp_rmb: Make sure wdata->xy is available when wdata->ready is 1 */
252 smp_rmb();
253 283
254 return 0; 284static void wiimote_input_close(struct input_dev *dev)
285{
286 struct wiimote_data *wdata = input_get_drvdata(dev);
287
288 hid_hw_close(wdata->hdev);
255} 289}
256 290
257static void handler_keys(struct wiimote_data *wdata, const __u8 *payload) 291static void handler_keys(struct wiimote_data *wdata, const __u8 *payload)
@@ -281,6 +315,26 @@ static void handler_keys(struct wiimote_data *wdata, const __u8 *payload)
281 input_sync(wdata->input); 315 input_sync(wdata->input);
282} 316}
283 317
318static void handler_status(struct wiimote_data *wdata, const __u8 *payload)
319{
320 handler_keys(wdata, payload);
321
322 /* on status reports the drm is reset so we need to resend the drm */
323 wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL);
324}
325
326static void handler_return(struct wiimote_data *wdata, const __u8 *payload)
327{
328 __u8 err = payload[3];
329 __u8 cmd = payload[2];
330
331 handler_keys(wdata, payload);
332
333 if (err)
334 hid_warn(wdata->hdev, "Remote error %hhu on req %hhu\n", err,
335 cmd);
336}
337
284struct wiiproto_handler { 338struct wiiproto_handler {
285 __u8 id; 339 __u8 id;
286 size_t size; 340 size_t size;
@@ -288,6 +342,8 @@ struct wiiproto_handler {
288}; 342};
289 343
290static struct wiiproto_handler handlers[] = { 344static struct wiiproto_handler handlers[] = {
345 { .id = WIIPROTO_REQ_STATUS, .size = 6, .func = handler_status },
346 { .id = WIIPROTO_REQ_RETURN, .size = 4, .func = handler_return },
291 { .id = WIIPROTO_REQ_DRM_K, .size = 2, .func = handler_keys }, 347 { .id = WIIPROTO_REQ_DRM_K, .size = 2, .func = handler_keys },
292 { .id = 0 } 348 { .id = 0 }
293}; 349};
@@ -300,11 +356,6 @@ static int wiimote_hid_event(struct hid_device *hdev, struct hid_report *report,
300 int i; 356 int i;
301 unsigned long flags; 357 unsigned long flags;
302 358
303 if (!atomic_read(&wdata->ready))
304 return -EBUSY;
305 /* smp_rmb: Make sure wdata->xy is available when wdata->ready is 1 */
306 smp_rmb();
307
308 if (size < 1) 359 if (size < 1)
309 return -EINVAL; 360 return -EINVAL;
310 361
@@ -321,6 +372,58 @@ static int wiimote_hid_event(struct hid_device *hdev, struct hid_report *report,
321 return 0; 372 return 0;
322} 373}
323 374
375static void wiimote_leds_destroy(struct wiimote_data *wdata)
376{
377 int i;
378 struct led_classdev *led;
379
380 for (i = 0; i < 4; ++i) {
381 if (wdata->leds[i]) {
382 led = wdata->leds[i];
383 wdata->leds[i] = NULL;
384 led_classdev_unregister(led);
385 kfree(led);
386 }
387 }
388}
389
390static int wiimote_leds_create(struct wiimote_data *wdata)
391{
392 int i, ret;
393 struct device *dev = &wdata->hdev->dev;
394 size_t namesz = strlen(dev_name(dev)) + 9;
395 struct led_classdev *led;
396 char *name;
397
398 for (i = 0; i < 4; ++i) {
399 led = kzalloc(sizeof(struct led_classdev) + namesz, GFP_KERNEL);
400 if (!led) {
401 ret = -ENOMEM;
402 goto err;
403 }
404 name = (void*)&led[1];
405 snprintf(name, namesz, "%s:blue:p%d", dev_name(dev), i);
406 led->name = name;
407 led->brightness = 0;
408 led->max_brightness = 1;
409 led->brightness_get = wiimote_leds_get;
410 led->brightness_set = wiimote_leds_set;
411
412 ret = led_classdev_register(dev, led);
413 if (ret) {
414 kfree(led);
415 goto err;
416 }
417 wdata->leds[i] = led;
418 }
419
420 return 0;
421
422err:
423 wiimote_leds_destroy(wdata);
424 return ret;
425}
426
324static struct wiimote_data *wiimote_create(struct hid_device *hdev) 427static struct wiimote_data *wiimote_create(struct hid_device *hdev)
325{ 428{
326 struct wiimote_data *wdata; 429 struct wiimote_data *wdata;
@@ -341,6 +444,8 @@ static struct wiimote_data *wiimote_create(struct hid_device *hdev)
341 444
342 input_set_drvdata(wdata->input, wdata); 445 input_set_drvdata(wdata->input, wdata);
343 wdata->input->event = wiimote_input_event; 446 wdata->input->event = wiimote_input_event;
447 wdata->input->open = wiimote_input_open;
448 wdata->input->close = wiimote_input_close;
344 wdata->input->dev.parent = &wdata->hdev->dev; 449 wdata->input->dev.parent = &wdata->hdev->dev;
345 wdata->input->id.bustype = wdata->hdev->bus; 450 wdata->input->id.bustype = wdata->hdev->bus;
346 wdata->input->id.vendor = wdata->hdev->vendor; 451 wdata->input->id.vendor = wdata->hdev->vendor;
@@ -362,6 +467,12 @@ static struct wiimote_data *wiimote_create(struct hid_device *hdev)
362 467
363static void wiimote_destroy(struct wiimote_data *wdata) 468static void wiimote_destroy(struct wiimote_data *wdata)
364{ 469{
470 wiimote_leds_destroy(wdata);
471
472 input_unregister_device(wdata->input);
473 cancel_work_sync(&wdata->worker);
474 hid_hw_stop(wdata->hdev);
475
365 kfree(wdata); 476 kfree(wdata);
366} 477}
367 478
@@ -377,19 +488,6 @@ static int wiimote_hid_probe(struct hid_device *hdev,
377 return -ENOMEM; 488 return -ENOMEM;
378 } 489 }
379 490
380 ret = device_create_file(&hdev->dev, &dev_attr_led1);
381 if (ret)
382 goto err;
383 ret = device_create_file(&hdev->dev, &dev_attr_led2);
384 if (ret)
385 goto err;
386 ret = device_create_file(&hdev->dev, &dev_attr_led3);
387 if (ret)
388 goto err;
389 ret = device_create_file(&hdev->dev, &dev_attr_led4);
390 if (ret)
391 goto err;
392
393 ret = hid_parse(hdev); 491 ret = hid_parse(hdev);
394 if (ret) { 492 if (ret) {
395 hid_err(hdev, "HID parse failed\n"); 493 hid_err(hdev, "HID parse failed\n");
@@ -408,9 +506,10 @@ static int wiimote_hid_probe(struct hid_device *hdev,
408 goto err_stop; 506 goto err_stop;
409 } 507 }
410 508
411 /* smp_wmb: Write wdata->xy first before wdata->ready is set to 1 */ 509 ret = wiimote_leds_create(wdata);
412 smp_wmb(); 510 if (ret)
413 atomic_set(&wdata->ready, 1); 511 goto err_free;
512
414 hid_info(hdev, "New device registered\n"); 513 hid_info(hdev, "New device registered\n");
415 514
416 /* by default set led1 after device initialization */ 515 /* by default set led1 after device initialization */
@@ -420,15 +519,15 @@ static int wiimote_hid_probe(struct hid_device *hdev,
420 519
421 return 0; 520 return 0;
422 521
522err_free:
523 wiimote_destroy(wdata);
524 return ret;
525
423err_stop: 526err_stop:
424 hid_hw_stop(hdev); 527 hid_hw_stop(hdev);
425err: 528err:
426 input_free_device(wdata->input); 529 input_free_device(wdata->input);
427 device_remove_file(&hdev->dev, &dev_attr_led1); 530 kfree(wdata);
428 device_remove_file(&hdev->dev, &dev_attr_led2);
429 device_remove_file(&hdev->dev, &dev_attr_led3);
430 device_remove_file(&hdev->dev, &dev_attr_led4);
431 wiimote_destroy(wdata);
432 return ret; 531 return ret;
433} 532}
434 533
@@ -437,16 +536,6 @@ static void wiimote_hid_remove(struct hid_device *hdev)
437 struct wiimote_data *wdata = hid_get_drvdata(hdev); 536 struct wiimote_data *wdata = hid_get_drvdata(hdev);
438 537
439 hid_info(hdev, "Device removed\n"); 538 hid_info(hdev, "Device removed\n");
440
441 device_remove_file(&hdev->dev, &dev_attr_led1);
442 device_remove_file(&hdev->dev, &dev_attr_led2);
443 device_remove_file(&hdev->dev, &dev_attr_led3);
444 device_remove_file(&hdev->dev, &dev_attr_led4);
445
446 hid_hw_stop(hdev);
447 input_unregister_device(wdata->input);
448
449 cancel_work_sync(&wdata->worker);
450 wiimote_destroy(wdata); 539 wiimote_destroy(wdata);
451} 540}
452 541
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 621959d5cc42..3146fdcda272 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -47,6 +47,7 @@ static const struct hid_blacklist {
47 { USB_VENDOR_ID_AFATECH, USB_DEVICE_ID_AFATECH_AF9016, HID_QUIRK_FULLSPEED_INTERVAL }, 47 { USB_VENDOR_ID_AFATECH, USB_DEVICE_ID_AFATECH_AF9016, HID_QUIRK_FULLSPEED_INTERVAL },
48 48
49 { USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH, HID_QUIRK_MULTI_INPUT }, 49 { USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH, HID_QUIRK_MULTI_INPUT },
50 { USB_VENDOR_ID_GREENASIA, USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD, HID_QUIRK_MULTI_INPUT },
50 { USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK, HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS }, 51 { USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK, HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS },
51 { USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII, HID_QUIRK_MULTI_INPUT }, 52 { USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII, HID_QUIRK_MULTI_INPUT },
52 { USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS, HID_QUIRK_MULTI_INPUT }, 53 { USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS, HID_QUIRK_MULTI_INPUT },
@@ -89,6 +90,7 @@ static const struct hid_blacklist {
89 90
90 { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_MULTI_TOUCH, HID_QUIRK_MULTI_INPUT }, 91 { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_MULTI_TOUCH, HID_QUIRK_MULTI_INPUT },
91 { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS, HID_QUIRK_MULTI_INPUT }, 92 { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS, HID_QUIRK_MULTI_INPUT },
93 { USB_VENDOR_ID_SIGMA_MICRO, USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD, HID_QUIRK_NO_INIT_REPORTS },
92 { 0, 0 } 94 { 0, 0 }
93}; 95};
94 96
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 59d83e83da7f..411257676133 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -601,7 +601,12 @@ static int create_core_data(struct platform_data *pdata,
601 err = rdmsr_safe_on_cpu(cpu, tdata->intrpt_reg, &eax, &edx); 601 err = rdmsr_safe_on_cpu(cpu, tdata->intrpt_reg, &eax, &edx);
602 if (!err) { 602 if (!err) {
603 tdata->attr_size += MAX_THRESH_ATTRS; 603 tdata->attr_size += MAX_THRESH_ATTRS;
604 tdata->ttarget = tdata->tjmax - ((eax >> 16) & 0x7f) * 1000; 604 tdata->tmin = tdata->tjmax -
605 ((eax & THERM_MASK_THRESHOLD0) >>
606 THERM_SHIFT_THRESHOLD0) * 1000;
607 tdata->ttarget = tdata->tjmax -
608 ((eax & THERM_MASK_THRESHOLD1) >>
609 THERM_SHIFT_THRESHOLD1) * 1000;
605 } 610 }
606 611
607 pdata->core_data[attr_no] = tdata; 612 pdata->core_data[attr_no] = tdata;
diff --git a/drivers/hwmon/i5k_amb.c b/drivers/hwmon/i5k_amb.c
index c4c40be0edbf..d22f241b6a67 100644
--- a/drivers/hwmon/i5k_amb.c
+++ b/drivers/hwmon/i5k_amb.c
@@ -114,7 +114,6 @@ struct i5k_amb_data {
114 void __iomem *amb_mmio; 114 void __iomem *amb_mmio;
115 struct i5k_device_attribute *attrs; 115 struct i5k_device_attribute *attrs;
116 unsigned int num_attrs; 116 unsigned int num_attrs;
117 unsigned long chipset_id;
118}; 117};
119 118
120static ssize_t show_name(struct device *dev, struct device_attribute *devattr, 119static ssize_t show_name(struct device *dev, struct device_attribute *devattr,
@@ -444,8 +443,6 @@ static int __devinit i5k_find_amb_registers(struct i5k_amb_data *data,
444 goto out; 443 goto out;
445 } 444 }
446 445
447 data->chipset_id = devid;
448
449 res = 0; 446 res = 0;
450out: 447out:
451 pci_dev_put(pcidev); 448 pci_dev_put(pcidev);
@@ -478,23 +475,13 @@ out:
478 return res; 475 return res;
479} 476}
480 477
481static unsigned long i5k_channel_pci_id(struct i5k_amb_data *data, 478static struct {
482 unsigned long channel) 479 unsigned long err;
483{ 480 unsigned long fbd0;
484 switch (data->chipset_id) { 481} chipset_ids[] __devinitdata = {
485 case PCI_DEVICE_ID_INTEL_5000_ERR: 482 { PCI_DEVICE_ID_INTEL_5000_ERR, PCI_DEVICE_ID_INTEL_5000_FBD0 },
486 return PCI_DEVICE_ID_INTEL_5000_FBD0 + channel; 483 { PCI_DEVICE_ID_INTEL_5400_ERR, PCI_DEVICE_ID_INTEL_5400_FBD0 },
487 case PCI_DEVICE_ID_INTEL_5400_ERR: 484 { 0, 0 }
488 return PCI_DEVICE_ID_INTEL_5400_FBD0 + channel;
489 default:
490 BUG();
491 }
492}
493
494static unsigned long chipset_ids[] = {
495 PCI_DEVICE_ID_INTEL_5000_ERR,
496 PCI_DEVICE_ID_INTEL_5400_ERR,
497 0
498}; 485};
499 486
500#ifdef MODULE 487#ifdef MODULE
@@ -510,8 +497,7 @@ static int __devinit i5k_amb_probe(struct platform_device *pdev)
510{ 497{
511 struct i5k_amb_data *data; 498 struct i5k_amb_data *data;
512 struct resource *reso; 499 struct resource *reso;
513 int i; 500 int i, res;
514 int res = -ENODEV;
515 501
516 data = kzalloc(sizeof(*data), GFP_KERNEL); 502 data = kzalloc(sizeof(*data), GFP_KERNEL);
517 if (!data) 503 if (!data)
@@ -520,22 +506,22 @@ static int __devinit i5k_amb_probe(struct platform_device *pdev)
520 /* Figure out where the AMB registers live */ 506 /* Figure out where the AMB registers live */
521 i = 0; 507 i = 0;
522 do { 508 do {
523 res = i5k_find_amb_registers(data, chipset_ids[i]); 509 res = i5k_find_amb_registers(data, chipset_ids[i].err);
510 if (res == 0)
511 break;
524 i++; 512 i++;
525 } while (res && chipset_ids[i]); 513 } while (chipset_ids[i].err);
526 514
527 if (res) 515 if (res)
528 goto err; 516 goto err;
529 517
530 /* Copy the DIMM presence map for the first two channels */ 518 /* Copy the DIMM presence map for the first two channels */
531 res = i5k_channel_probe(&data->amb_present[0], 519 res = i5k_channel_probe(&data->amb_present[0], chipset_ids[i].fbd0);
532 i5k_channel_pci_id(data, 0));
533 if (res) 520 if (res)
534 goto err; 521 goto err;
535 522
536 /* Copy the DIMM presence map for the optional second two channels */ 523 /* Copy the DIMM presence map for the optional second two channels */
537 i5k_channel_probe(&data->amb_present[2], 524 i5k_channel_probe(&data->amb_present[2], chipset_ids[i].fbd0 + 1);
538 i5k_channel_pci_id(data, 1));
539 525
540 /* Set up resource regions */ 526 /* Set up resource regions */
541 reso = request_mem_region(data->amb_base, data->amb_len, DRVNAME); 527 reso = request_mem_region(data->amb_base, data->amb_len, DRVNAME);
diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
index 1a409c5bc9bc..c316294c48b4 100644
--- a/drivers/hwmon/ibmaem.c
+++ b/drivers/hwmon/ibmaem.c
@@ -432,13 +432,15 @@ static int aem_read_sensor(struct aem_data *data, u8 elt, u8 reg,
432 aem_send_message(ipmi); 432 aem_send_message(ipmi);
433 433
434 res = wait_for_completion_timeout(&ipmi->read_complete, IPMI_TIMEOUT); 434 res = wait_for_completion_timeout(&ipmi->read_complete, IPMI_TIMEOUT);
435 if (!res) 435 if (!res) {
436 return -ETIMEDOUT; 436 res = -ETIMEDOUT;
437 goto out;
438 }
437 439
438 if (ipmi->rx_result || ipmi->rx_msg_len != rs_size || 440 if (ipmi->rx_result || ipmi->rx_msg_len != rs_size ||
439 memcmp(&rs_resp->id, &system_x_id, sizeof(system_x_id))) { 441 memcmp(&rs_resp->id, &system_x_id, sizeof(system_x_id))) {
440 kfree(rs_resp); 442 res = -ENOENT;
441 return -ENOENT; 443 goto out;
442 } 444 }
443 445
444 switch (size) { 446 switch (size) {
@@ -463,8 +465,11 @@ static int aem_read_sensor(struct aem_data *data, u8 elt, u8 reg,
463 break; 465 break;
464 } 466 }
465 } 467 }
468 res = 0;
466 469
467 return 0; 470out:
471 kfree(rs_resp);
472 return res;
468} 473}
469 474
470/* Update AEM energy registers */ 475/* Update AEM energy registers */
diff --git a/drivers/hwmon/max16065.c b/drivers/hwmon/max16065.c
index d94a24fdf4ba..dd2d7b9620c2 100644
--- a/drivers/hwmon/max16065.c
+++ b/drivers/hwmon/max16065.c
@@ -124,7 +124,7 @@ static inline int MV_TO_LIMIT(int mv, int range)
124 124
125static inline int ADC_TO_CURR(int adc, int gain) 125static inline int ADC_TO_CURR(int adc, int gain)
126{ 126{
127 return adc * 1400000 / gain * 255; 127 return adc * 1400000 / (gain * 255);
128} 128}
129 129
130/* 130/*
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index d7926f4336b5..eab11615dced 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -211,8 +211,7 @@ static int lookup_comp(struct ntc_data *data,
211 if (data->comp[mid].ohm <= ohm) { 211 if (data->comp[mid].ohm <= ohm) {
212 *i_low = mid; 212 *i_low = mid;
213 *i_high = mid - 1; 213 *i_high = mid - 1;
214 } 214 } else {
215 if (data->comp[mid].ohm > ohm) {
216 *i_low = mid + 1; 215 *i_low = mid + 1;
217 *i_high = mid; 216 *i_high = mid;
218 } 217 }
diff --git a/drivers/hwmon/pmbus/lm25066.c b/drivers/hwmon/pmbus/lm25066.c
index d4bc114572de..ac254fba551b 100644
--- a/drivers/hwmon/pmbus/lm25066.c
+++ b/drivers/hwmon/pmbus/lm25066.c
@@ -161,6 +161,17 @@ static int lm25066_write_word_data(struct i2c_client *client, int page, int reg,
161 return ret; 161 return ret;
162} 162}
163 163
164static int lm25066_write_byte(struct i2c_client *client, int page, u8 value)
165{
166 if (page > 1)
167 return -EINVAL;
168
169 if (page == 0)
170 return pmbus_write_byte(client, 0, value);
171
172 return 0;
173}
174
164static int lm25066_probe(struct i2c_client *client, 175static int lm25066_probe(struct i2c_client *client,
165 const struct i2c_device_id *id) 176 const struct i2c_device_id *id)
166{ 177{
@@ -204,6 +215,7 @@ static int lm25066_probe(struct i2c_client *client,
204 215
205 info->read_word_data = lm25066_read_word_data; 216 info->read_word_data = lm25066_read_word_data;
206 info->write_word_data = lm25066_write_word_data; 217 info->write_word_data = lm25066_write_word_data;
218 info->write_byte = lm25066_write_byte;
207 219
208 switch (id->driver_data) { 220 switch (id->driver_data) {
209 case lm25066: 221 case lm25066:
diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h
index 0808d986d75b..a6ae20ffef6b 100644
--- a/drivers/hwmon/pmbus/pmbus.h
+++ b/drivers/hwmon/pmbus/pmbus.h
@@ -325,6 +325,7 @@ struct pmbus_driver_info {
325 int (*read_word_data)(struct i2c_client *client, int page, int reg); 325 int (*read_word_data)(struct i2c_client *client, int page, int reg);
326 int (*write_word_data)(struct i2c_client *client, int page, int reg, 326 int (*write_word_data)(struct i2c_client *client, int page, int reg,
327 u16 word); 327 u16 word);
328 int (*write_byte)(struct i2c_client *client, int page, u8 value);
328 /* 329 /*
329 * The identify function determines supported PMBus functionality. 330 * The identify function determines supported PMBus functionality.
330 * This function is only necessary if a chip driver supports multiple 331 * This function is only necessary if a chip driver supports multiple
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index 5c1b6cf31701..397fc59b5682 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -182,6 +182,24 @@ int pmbus_write_byte(struct i2c_client *client, int page, u8 value)
182} 182}
183EXPORT_SYMBOL_GPL(pmbus_write_byte); 183EXPORT_SYMBOL_GPL(pmbus_write_byte);
184 184
185/*
186 * _pmbus_write_byte() is similar to pmbus_write_byte(), but checks if
187 * a device specific mapping funcion exists and calls it if necessary.
188 */
189static int _pmbus_write_byte(struct i2c_client *client, int page, u8 value)
190{
191 struct pmbus_data *data = i2c_get_clientdata(client);
192 const struct pmbus_driver_info *info = data->info;
193 int status;
194
195 if (info->write_byte) {
196 status = info->write_byte(client, page, value);
197 if (status != -ENODATA)
198 return status;
199 }
200 return pmbus_write_byte(client, page, value);
201}
202
185int pmbus_write_word_data(struct i2c_client *client, u8 page, u8 reg, u16 word) 203int pmbus_write_word_data(struct i2c_client *client, u8 page, u8 reg, u16 word)
186{ 204{
187 int rv; 205 int rv;
@@ -281,7 +299,7 @@ static int _pmbus_read_byte_data(struct i2c_client *client, int page, int reg)
281 299
282static void pmbus_clear_fault_page(struct i2c_client *client, int page) 300static void pmbus_clear_fault_page(struct i2c_client *client, int page)
283{ 301{
284 pmbus_write_byte(client, page, PMBUS_CLEAR_FAULTS); 302 _pmbus_write_byte(client, page, PMBUS_CLEAR_FAULTS);
285} 303}
286 304
287void pmbus_clear_faults(struct i2c_client *client) 305void pmbus_clear_faults(struct i2c_client *client)
@@ -960,6 +978,8 @@ static void pmbus_find_max_attr(struct i2c_client *client,
960struct pmbus_limit_attr { 978struct pmbus_limit_attr {
961 u16 reg; /* Limit register */ 979 u16 reg; /* Limit register */
962 bool update; /* True if register needs updates */ 980 bool update; /* True if register needs updates */
981 bool low; /* True if low limit; for limits with compare
982 functions only */
963 const char *attr; /* Attribute name */ 983 const char *attr; /* Attribute name */
964 const char *alarm; /* Alarm attribute name */ 984 const char *alarm; /* Alarm attribute name */
965 u32 sbit; /* Alarm attribute status bit */ 985 u32 sbit; /* Alarm attribute status bit */
@@ -1011,7 +1031,8 @@ static bool pmbus_add_limit_attrs(struct i2c_client *client,
1011 if (attr->compare) { 1031 if (attr->compare) {
1012 pmbus_add_boolean_cmp(data, name, 1032 pmbus_add_boolean_cmp(data, name,
1013 l->alarm, index, 1033 l->alarm, index,
1014 cbase, cindex, 1034 l->low ? cindex : cbase,
1035 l->low ? cbase : cindex,
1015 attr->sbase + page, l->sbit); 1036 attr->sbase + page, l->sbit);
1016 } else { 1037 } else {
1017 pmbus_add_boolean_reg(data, name, 1038 pmbus_add_boolean_reg(data, name,
@@ -1348,11 +1369,13 @@ static const struct pmbus_sensor_attr power_attributes[] = {
1348static const struct pmbus_limit_attr temp_limit_attrs[] = { 1369static const struct pmbus_limit_attr temp_limit_attrs[] = {
1349 { 1370 {
1350 .reg = PMBUS_UT_WARN_LIMIT, 1371 .reg = PMBUS_UT_WARN_LIMIT,
1372 .low = true,
1351 .attr = "min", 1373 .attr = "min",
1352 .alarm = "min_alarm", 1374 .alarm = "min_alarm",
1353 .sbit = PB_TEMP_UT_WARNING, 1375 .sbit = PB_TEMP_UT_WARNING,
1354 }, { 1376 }, {
1355 .reg = PMBUS_UT_FAULT_LIMIT, 1377 .reg = PMBUS_UT_FAULT_LIMIT,
1378 .low = true,
1356 .attr = "lcrit", 1379 .attr = "lcrit",
1357 .alarm = "lcrit_alarm", 1380 .alarm = "lcrit_alarm",
1358 .sbit = PB_TEMP_UT_FAULT, 1381 .sbit = PB_TEMP_UT_FAULT,
@@ -1381,11 +1404,13 @@ static const struct pmbus_limit_attr temp_limit_attrs[] = {
1381static const struct pmbus_limit_attr temp_limit_attrs23[] = { 1404static const struct pmbus_limit_attr temp_limit_attrs23[] = {
1382 { 1405 {
1383 .reg = PMBUS_UT_WARN_LIMIT, 1406 .reg = PMBUS_UT_WARN_LIMIT,
1407 .low = true,
1384 .attr = "min", 1408 .attr = "min",
1385 .alarm = "min_alarm", 1409 .alarm = "min_alarm",
1386 .sbit = PB_TEMP_UT_WARNING, 1410 .sbit = PB_TEMP_UT_WARNING,
1387 }, { 1411 }, {
1388 .reg = PMBUS_UT_FAULT_LIMIT, 1412 .reg = PMBUS_UT_FAULT_LIMIT,
1413 .low = true,
1389 .attr = "lcrit", 1414 .attr = "lcrit",
1390 .alarm = "lcrit_alarm", 1415 .alarm = "lcrit_alarm",
1391 .sbit = PB_TEMP_UT_FAULT, 1416 .sbit = PB_TEMP_UT_FAULT,
diff --git a/drivers/hwmon/pmbus/ucd9000.c b/drivers/hwmon/pmbus/ucd9000.c
index ace1c7319734..d0ddb60155c9 100644
--- a/drivers/hwmon/pmbus/ucd9000.c
+++ b/drivers/hwmon/pmbus/ucd9000.c
@@ -141,13 +141,11 @@ static int ucd9000_probe(struct i2c_client *client,
141 block_buffer[ret] = '\0'; 141 block_buffer[ret] = '\0';
142 dev_info(&client->dev, "Device ID %s\n", block_buffer); 142 dev_info(&client->dev, "Device ID %s\n", block_buffer);
143 143
144 mid = NULL; 144 for (mid = ucd9000_id; mid->name[0]; mid++) {
145 for (i = 0; i < ARRAY_SIZE(ucd9000_id); i++) {
146 mid = &ucd9000_id[i];
147 if (!strncasecmp(mid->name, block_buffer, strlen(mid->name))) 145 if (!strncasecmp(mid->name, block_buffer, strlen(mid->name)))
148 break; 146 break;
149 } 147 }
150 if (!mid || !strlen(mid->name)) { 148 if (!mid->name[0]) {
151 dev_err(&client->dev, "Unsupported device\n"); 149 dev_err(&client->dev, "Unsupported device\n");
152 return -ENODEV; 150 return -ENODEV;
153 } 151 }
diff --git a/drivers/hwmon/pmbus/ucd9200.c b/drivers/hwmon/pmbus/ucd9200.c
index ffcc1cf3609d..c65e9da707cc 100644
--- a/drivers/hwmon/pmbus/ucd9200.c
+++ b/drivers/hwmon/pmbus/ucd9200.c
@@ -68,13 +68,11 @@ static int ucd9200_probe(struct i2c_client *client,
68 block_buffer[ret] = '\0'; 68 block_buffer[ret] = '\0';
69 dev_info(&client->dev, "Device ID %s\n", block_buffer); 69 dev_info(&client->dev, "Device ID %s\n", block_buffer);
70 70
71 mid = NULL; 71 for (mid = ucd9200_id; mid->name[0]; mid++) {
72 for (i = 0; i < ARRAY_SIZE(ucd9200_id); i++) {
73 mid = &ucd9200_id[i];
74 if (!strncasecmp(mid->name, block_buffer, strlen(mid->name))) 72 if (!strncasecmp(mid->name, block_buffer, strlen(mid->name)))
75 break; 73 break;
76 } 74 }
77 if (!mid || !strlen(mid->name)) { 75 if (!mid->name[0]) {
78 dev_err(&client->dev, "Unsupported device\n"); 76 dev_err(&client->dev, "Unsupported device\n");
79 return -ENODEV; 77 return -ENODEV;
80 } 78 }
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
index 0c731ca69f15..b228e09c5d05 100644
--- a/drivers/i2c/busses/i2c-nomadik.c
+++ b/drivers/i2c/busses/i2c-nomadik.c
@@ -146,6 +146,7 @@ struct i2c_nmk_client {
146 * @stop: stop condition 146 * @stop: stop condition
147 * @xfer_complete: acknowledge completion for a I2C message 147 * @xfer_complete: acknowledge completion for a I2C message
148 * @result: controller propogated result 148 * @result: controller propogated result
149 * @regulator: pointer to i2c regulator
149 * @busy: Busy doing transfer 150 * @busy: Busy doing transfer
150 */ 151 */
151struct nmk_i2c_dev { 152struct nmk_i2c_dev {
@@ -417,12 +418,12 @@ static int read_i2c(struct nmk_i2c_dev *dev)
417 writel(readl(dev->virtbase + I2C_IMSCR) | irq_mask, 418 writel(readl(dev->virtbase + I2C_IMSCR) | irq_mask,
418 dev->virtbase + I2C_IMSCR); 419 dev->virtbase + I2C_IMSCR);
419 420
420 timeout = wait_for_completion_interruptible_timeout( 421 timeout = wait_for_completion_timeout(
421 &dev->xfer_complete, dev->adap.timeout); 422 &dev->xfer_complete, dev->adap.timeout);
422 423
423 if (timeout < 0) { 424 if (timeout < 0) {
424 dev_err(&dev->pdev->dev, 425 dev_err(&dev->pdev->dev,
425 "wait_for_completion_interruptible_timeout" 426 "wait_for_completion_timeout"
426 "returned %d waiting for event\n", timeout); 427 "returned %d waiting for event\n", timeout);
427 status = timeout; 428 status = timeout;
428 } 429 }
@@ -504,12 +505,12 @@ static int write_i2c(struct nmk_i2c_dev *dev)
504 writel(readl(dev->virtbase + I2C_IMSCR) | irq_mask, 505 writel(readl(dev->virtbase + I2C_IMSCR) | irq_mask,
505 dev->virtbase + I2C_IMSCR); 506 dev->virtbase + I2C_IMSCR);
506 507
507 timeout = wait_for_completion_interruptible_timeout( 508 timeout = wait_for_completion_timeout(
508 &dev->xfer_complete, dev->adap.timeout); 509 &dev->xfer_complete, dev->adap.timeout);
509 510
510 if (timeout < 0) { 511 if (timeout < 0) {
511 dev_err(&dev->pdev->dev, 512 dev_err(&dev->pdev->dev,
512 "wait_for_completion_interruptible_timeout" 513 "wait_for_completion_timeout "
513 "returned %d waiting for event\n", timeout); 514 "returned %d waiting for event\n", timeout);
514 status = timeout; 515 status = timeout;
515 } 516 }
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 1a766cf74f6b..2dfb63176856 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -1139,41 +1139,12 @@ omap_i2c_remove(struct platform_device *pdev)
1139 return 0; 1139 return 0;
1140} 1140}
1141 1141
1142#ifdef CONFIG_SUSPEND
1143static int omap_i2c_suspend(struct device *dev)
1144{
1145 if (!pm_runtime_suspended(dev))
1146 if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend)
1147 dev->bus->pm->runtime_suspend(dev);
1148
1149 return 0;
1150}
1151
1152static int omap_i2c_resume(struct device *dev)
1153{
1154 if (!pm_runtime_suspended(dev))
1155 if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume)
1156 dev->bus->pm->runtime_resume(dev);
1157
1158 return 0;
1159}
1160
1161static struct dev_pm_ops omap_i2c_pm_ops = {
1162 .suspend = omap_i2c_suspend,
1163 .resume = omap_i2c_resume,
1164};
1165#define OMAP_I2C_PM_OPS (&omap_i2c_pm_ops)
1166#else
1167#define OMAP_I2C_PM_OPS NULL
1168#endif
1169
1170static struct platform_driver omap_i2c_driver = { 1142static struct platform_driver omap_i2c_driver = {
1171 .probe = omap_i2c_probe, 1143 .probe = omap_i2c_probe,
1172 .remove = omap_i2c_remove, 1144 .remove = omap_i2c_remove,
1173 .driver = { 1145 .driver = {
1174 .name = "omap_i2c", 1146 .name = "omap_i2c",
1175 .owner = THIS_MODULE, 1147 .owner = THIS_MODULE,
1176 .pm = OMAP_I2C_PM_OPS,
1177 }, 1148 },
1178}; 1149};
1179 1150
diff --git a/drivers/i2c/busses/i2c-pxa-pci.c b/drivers/i2c/busses/i2c-pxa-pci.c
index 6659d269b841..b73da6cd6f91 100644
--- a/drivers/i2c/busses/i2c-pxa-pci.c
+++ b/drivers/i2c/busses/i2c-pxa-pci.c
@@ -109,12 +109,15 @@ static int __devinit ce4100_i2c_probe(struct pci_dev *dev,
109 return -EINVAL; 109 return -EINVAL;
110 } 110 }
111 sds = kzalloc(sizeof(*sds), GFP_KERNEL); 111 sds = kzalloc(sizeof(*sds), GFP_KERNEL);
112 if (!sds) 112 if (!sds) {
113 ret = -ENOMEM;
113 goto err_mem; 114 goto err_mem;
115 }
114 116
115 for (i = 0; i < ARRAY_SIZE(sds->pdev); i++) { 117 for (i = 0; i < ARRAY_SIZE(sds->pdev); i++) {
116 sds->pdev[i] = add_i2c_device(dev, i); 118 sds->pdev[i] = add_i2c_device(dev, i);
117 if (IS_ERR(sds->pdev[i])) { 119 if (IS_ERR(sds->pdev[i])) {
120 ret = PTR_ERR(sds->pdev[i]);
118 while (--i >= 0) 121 while (--i >= 0)
119 platform_device_unregister(sds->pdev[i]); 122 platform_device_unregister(sds->pdev[i]);
120 goto err_dev_add; 123 goto err_dev_add;
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 2440b7411978..3c94c4a81a55 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -270,14 +270,30 @@ static int tegra_i2c_fill_tx_fifo(struct tegra_i2c_dev *i2c_dev)
270 270
271 /* Rounds down to not include partial word at the end of buf */ 271 /* Rounds down to not include partial word at the end of buf */
272 words_to_transfer = buf_remaining / BYTES_PER_FIFO_WORD; 272 words_to_transfer = buf_remaining / BYTES_PER_FIFO_WORD;
273 if (words_to_transfer > tx_fifo_avail)
274 words_to_transfer = tx_fifo_avail;
275 273
276 i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer); 274 /* It's very common to have < 4 bytes, so optimize that case. */
277 275 if (words_to_transfer) {
278 buf += words_to_transfer * BYTES_PER_FIFO_WORD; 276 if (words_to_transfer > tx_fifo_avail)
279 buf_remaining -= words_to_transfer * BYTES_PER_FIFO_WORD; 277 words_to_transfer = tx_fifo_avail;
280 tx_fifo_avail -= words_to_transfer; 278
279 /*
280 * Update state before writing to FIFO. If this casues us
281 * to finish writing all bytes (AKA buf_remaining goes to 0) we
282 * have a potential for an interrupt (PACKET_XFER_COMPLETE is
283 * not maskable). We need to make sure that the isr sees
284 * buf_remaining as 0 and doesn't call us back re-entrantly.
285 */
286 buf_remaining -= words_to_transfer * BYTES_PER_FIFO_WORD;
287 tx_fifo_avail -= words_to_transfer;
288 i2c_dev->msg_buf_remaining = buf_remaining;
289 i2c_dev->msg_buf = buf +
290 words_to_transfer * BYTES_PER_FIFO_WORD;
291 barrier();
292
293 i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer);
294
295 buf += words_to_transfer * BYTES_PER_FIFO_WORD;
296 }
281 297
282 /* 298 /*
283 * If there is a partial word at the end of buf, handle it manually to 299 * If there is a partial word at the end of buf, handle it manually to
@@ -287,14 +303,15 @@ static int tegra_i2c_fill_tx_fifo(struct tegra_i2c_dev *i2c_dev)
287 if (tx_fifo_avail > 0 && buf_remaining > 0) { 303 if (tx_fifo_avail > 0 && buf_remaining > 0) {
288 BUG_ON(buf_remaining > 3); 304 BUG_ON(buf_remaining > 3);
289 memcpy(&val, buf, buf_remaining); 305 memcpy(&val, buf, buf_remaining);
306
307 /* Again update before writing to FIFO to make sure isr sees. */
308 i2c_dev->msg_buf_remaining = 0;
309 i2c_dev->msg_buf = NULL;
310 barrier();
311
290 i2c_writel(i2c_dev, val, I2C_TX_FIFO); 312 i2c_writel(i2c_dev, val, I2C_TX_FIFO);
291 buf_remaining = 0;
292 tx_fifo_avail--;
293 } 313 }
294 314
295 BUG_ON(tx_fifo_avail > 0 && buf_remaining > 0);
296 i2c_dev->msg_buf_remaining = buf_remaining;
297 i2c_dev->msg_buf = buf;
298 return 0; 315 return 0;
299} 316}
300 317
@@ -411,9 +428,10 @@ static irqreturn_t tegra_i2c_isr(int irq, void *dev_id)
411 tegra_i2c_mask_irq(i2c_dev, I2C_INT_TX_FIFO_DATA_REQ); 428 tegra_i2c_mask_irq(i2c_dev, I2C_INT_TX_FIFO_DATA_REQ);
412 } 429 }
413 430
414 if ((status & I2C_INT_PACKET_XFER_COMPLETE) && 431 if (status & I2C_INT_PACKET_XFER_COMPLETE) {
415 !i2c_dev->msg_buf_remaining) 432 BUG_ON(i2c_dev->msg_buf_remaining);
416 complete(&i2c_dev->msg_complete); 433 complete(&i2c_dev->msg_complete);
434 }
417 435
418 i2c_writel(i2c_dev, status, I2C_INT_STATUS); 436 i2c_writel(i2c_dev, status, I2C_INT_STATUS);
419 if (i2c_dev->is_dvc) 437 if (i2c_dev->is_dvc)
@@ -531,7 +549,7 @@ static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
531 549
532static u32 tegra_i2c_func(struct i2c_adapter *adap) 550static u32 tegra_i2c_func(struct i2c_adapter *adap)
533{ 551{
534 return I2C_FUNC_I2C; 552 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
535} 553}
536 554
537static const struct i2c_algorithm tegra_i2c_algo = { 555static const struct i2c_algorithm tegra_i2c_algo = {
@@ -719,6 +737,17 @@ static int tegra_i2c_resume(struct platform_device *pdev)
719} 737}
720#endif 738#endif
721 739
740#if defined(CONFIG_OF)
741/* Match table for of_platform binding */
742static const struct of_device_id tegra_i2c_of_match[] __devinitconst = {
743 { .compatible = "nvidia,tegra20-i2c", },
744 {},
745};
746MODULE_DEVICE_TABLE(of, tegra_i2c_of_match);
747#else
748#define tegra_i2c_of_match NULL
749#endif
750
722static struct platform_driver tegra_i2c_driver = { 751static struct platform_driver tegra_i2c_driver = {
723 .probe = tegra_i2c_probe, 752 .probe = tegra_i2c_probe,
724 .remove = tegra_i2c_remove, 753 .remove = tegra_i2c_remove,
@@ -729,6 +758,7 @@ static struct platform_driver tegra_i2c_driver = {
729 .driver = { 758 .driver = {
730 .name = "tegra-i2c", 759 .name = "tegra-i2c",
731 .owner = THIS_MODULE, 760 .owner = THIS_MODULE,
761 .of_match_table = tegra_i2c_of_match,
732 }, 762 },
733}; 763};
734 764
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index aa30915c71ea..7567b6000230 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -717,11 +717,13 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
717{ 717{
718 struct ipoib_dev_priv *priv = netdev_priv(dev); 718 struct ipoib_dev_priv *priv = netdev_priv(dev);
719 struct ipoib_neigh *neigh; 719 struct ipoib_neigh *neigh;
720 struct neighbour *n; 720 struct neighbour *n = NULL;
721 unsigned long flags; 721 unsigned long flags;
722 722
723 n = dst_get_neighbour(skb_dst(skb)); 723 if (likely(skb_dst(skb)))
724 if (likely(skb_dst(skb) && n)) { 724 n = dst_get_neighbour(skb_dst(skb));
725
726 if (likely(n)) {
725 if (unlikely(!*to_ipoib_neigh(n))) { 727 if (unlikely(!*to_ipoib_neigh(n))) {
726 ipoib_path_lookup(skb, dev); 728 ipoib_path_lookup(skb, dev);
727 return NETDEV_TX_OK; 729 return NETDEV_TX_OK;
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 8db008de5392..9c61b9c2c597 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -101,13 +101,17 @@ iscsi_iser_recv(struct iscsi_conn *conn,
101 101
102 /* verify PDU length */ 102 /* verify PDU length */
103 datalen = ntoh24(hdr->dlength); 103 datalen = ntoh24(hdr->dlength);
104 if (datalen != rx_data_len) { 104 if (datalen > rx_data_len || (datalen + 4) < rx_data_len) {
105 printk(KERN_ERR "iscsi_iser: datalen %d (hdr) != %d (IB) \n", 105 iser_err("wrong datalen %d (hdr), %d (IB)\n",
106 datalen, rx_data_len); 106 datalen, rx_data_len);
107 rc = ISCSI_ERR_DATALEN; 107 rc = ISCSI_ERR_DATALEN;
108 goto error; 108 goto error;
109 } 109 }
110 110
111 if (datalen != rx_data_len)
112 iser_dbg("aligned datalen (%d) hdr, %d (IB)\n",
113 datalen, rx_data_len);
114
111 /* read AHS */ 115 /* read AHS */
112 ahslen = hdr->hlength * 4; 116 ahslen = hdr->hlength * 4;
113 117
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 342cbc1bdaae..db6f3ce9f3bf 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -89,7 +89,7 @@
89 } while (0) 89 } while (0)
90 90
91#define SHIFT_4K 12 91#define SHIFT_4K 12
92#define SIZE_4K (1UL << SHIFT_4K) 92#define SIZE_4K (1ULL << SHIFT_4K)
93#define MASK_4K (~(SIZE_4K-1)) 93#define MASK_4K (~(SIZE_4K-1))
94 94
95 /* support up to 512KB in one RDMA */ 95 /* support up to 512KB in one RDMA */
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 5745b7fe158c..f299de6b419b 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -412,7 +412,7 @@ int iser_send_control(struct iscsi_conn *conn,
412 memcpy(iser_conn->ib_conn->login_buf, task->data, 412 memcpy(iser_conn->ib_conn->login_buf, task->data,
413 task->data_count); 413 task->data_count);
414 tx_dsg->addr = iser_conn->ib_conn->login_dma; 414 tx_dsg->addr = iser_conn->ib_conn->login_dma;
415 tx_dsg->length = data_seg_len; 415 tx_dsg->length = task->data_count;
416 tx_dsg->lkey = device->mr->lkey; 416 tx_dsg->lkey = device->mr->lkey;
417 mdesc->num_sge = 2; 417 mdesc->num_sge = 2;
418 } 418 }
diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c
index 9882971827e6..358cd7ee905b 100644
--- a/drivers/input/joystick/analog.c
+++ b/drivers/input/joystick/analog.c
@@ -139,7 +139,7 @@ struct analog_port {
139#include <linux/i8253.h> 139#include <linux/i8253.h>
140 140
141#define GET_TIME(x) do { if (cpu_has_tsc) rdtscl(x); else x = get_time_pit(); } while (0) 141#define GET_TIME(x) do { if (cpu_has_tsc) rdtscl(x); else x = get_time_pit(); } while (0)
142#define DELTA(x,y) (cpu_has_tsc ? ((y) - (x)) : ((x) - (y) + ((x) < (y) ? CLOCK_TICK_RATE / HZ : 0))) 142#define DELTA(x,y) (cpu_has_tsc ? ((y) - (x)) : ((x) - (y) + ((x) < (y) ? PIT_TICK_RATE / HZ : 0)))
143#define TIME_NAME (cpu_has_tsc?"TSC":"PIT") 143#define TIME_NAME (cpu_has_tsc?"TSC":"PIT")
144static unsigned int get_time_pit(void) 144static unsigned int get_time_pit(void)
145{ 145{
diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c
index 7b404e5443ed..e34eeb8ae371 100644
--- a/drivers/input/keyboard/adp5588-keys.c
+++ b/drivers/input/keyboard/adp5588-keys.c
@@ -668,4 +668,3 @@ module_exit(adp5588_exit);
668MODULE_LICENSE("GPL"); 668MODULE_LICENSE("GPL");
669MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); 669MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
670MODULE_DESCRIPTION("ADP5588/87 Keypad driver"); 670MODULE_DESCRIPTION("ADP5588/87 Keypad driver");
671MODULE_ALIAS("platform:adp5588-keys");
diff --git a/drivers/input/keyboard/ep93xx_keypad.c b/drivers/input/keyboard/ep93xx_keypad.c
index c8242dd190d0..aa17e024d803 100644
--- a/drivers/input/keyboard/ep93xx_keypad.c
+++ b/drivers/input/keyboard/ep93xx_keypad.c
@@ -20,6 +20,7 @@
20 * flag. 20 * flag.
21 */ 21 */
22 22
23#include <linux/module.h>
23#include <linux/platform_device.h> 24#include <linux/platform_device.h>
24#include <linux/interrupt.h> 25#include <linux/interrupt.h>
25#include <linux/clk.h> 26#include <linux/clk.h>
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c
index f270447ba951..a5a77915c650 100644
--- a/drivers/input/keyboard/tegra-kbc.c
+++ b/drivers/input/keyboard/tegra-kbc.c
@@ -702,7 +702,7 @@ err_iounmap:
702err_free_mem_region: 702err_free_mem_region:
703 release_mem_region(res->start, resource_size(res)); 703 release_mem_region(res->start, resource_size(res));
704err_free_mem: 704err_free_mem:
705 input_free_device(kbc->idev); 705 input_free_device(input_dev);
706 kfree(kbc); 706 kfree(kbc);
707 707
708 return err; 708 return err;
diff --git a/drivers/input/misc/ad714x-i2c.c b/drivers/input/misc/ad714x-i2c.c
index e21deb1baa8a..025417d74ca2 100644
--- a/drivers/input/misc/ad714x-i2c.c
+++ b/drivers/input/misc/ad714x-i2c.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * AD714X CapTouch Programmable Controller driver (I2C bus) 2 * AD714X CapTouch Programmable Controller driver (I2C bus)
3 * 3 *
4 * Copyright 2009 Analog Devices Inc. 4 * Copyright 2009-2011 Analog Devices Inc.
5 * 5 *
6 * Licensed under the GPL-2 or later. 6 * Licensed under the GPL-2 or later.
7 */ 7 */
@@ -27,54 +27,49 @@ static int ad714x_i2c_resume(struct device *dev)
27 27
28static SIMPLE_DEV_PM_OPS(ad714x_i2c_pm, ad714x_i2c_suspend, ad714x_i2c_resume); 28static SIMPLE_DEV_PM_OPS(ad714x_i2c_pm, ad714x_i2c_suspend, ad714x_i2c_resume);
29 29
30static int ad714x_i2c_write(struct device *dev, unsigned short reg, 30static int ad714x_i2c_write(struct ad714x_chip *chip,
31 unsigned short data) 31 unsigned short reg, unsigned short data)
32{ 32{
33 struct i2c_client *client = to_i2c_client(dev); 33 struct i2c_client *client = to_i2c_client(chip->dev);
34 int ret = 0; 34 int error;
35 u8 *_reg = (u8 *)&reg; 35
36 u8 *_data = (u8 *)&data; 36 chip->xfer_buf[0] = cpu_to_be16(reg);
37 37 chip->xfer_buf[1] = cpu_to_be16(data);
38 u8 tx[4] = { 38
39 _reg[1], 39 error = i2c_master_send(client, (u8 *)chip->xfer_buf,
40 _reg[0], 40 2 * sizeof(*chip->xfer_buf));
41 _data[1], 41 if (unlikely(error < 0)) {
42 _data[0] 42 dev_err(&client->dev, "I2C write error: %d\n", error);
43 }; 43 return error;
44 44 }
45 ret = i2c_master_send(client, tx, 4); 45
46 if (ret < 0) 46 return 0;
47 dev_err(&client->dev, "I2C write error\n");
48
49 return ret;
50} 47}
51 48
52static int ad714x_i2c_read(struct device *dev, unsigned short reg, 49static int ad714x_i2c_read(struct ad714x_chip *chip,
53 unsigned short *data) 50 unsigned short reg, unsigned short *data, size_t len)
54{ 51{
55 struct i2c_client *client = to_i2c_client(dev); 52 struct i2c_client *client = to_i2c_client(chip->dev);
56 int ret = 0; 53 int i;
57 u8 *_reg = (u8 *)&reg; 54 int error;
58 u8 *_data = (u8 *)data; 55
59 56 chip->xfer_buf[0] = cpu_to_be16(reg);
60 u8 tx[2] = { 57
61 _reg[1], 58 error = i2c_master_send(client, (u8 *)chip->xfer_buf,
62 _reg[0] 59 sizeof(*chip->xfer_buf));
63 }; 60 if (error >= 0)
64 u8 rx[2]; 61 error = i2c_master_recv(client, (u8 *)chip->xfer_buf,
65 62 len * sizeof(*chip->xfer_buf));
66 ret = i2c_master_send(client, tx, 2); 63
67 if (ret >= 0) 64 if (unlikely(error < 0)) {
68 ret = i2c_master_recv(client, rx, 2); 65 dev_err(&client->dev, "I2C read error: %d\n", error);
69 66 return error;
70 if (unlikely(ret < 0)) {
71 dev_err(&client->dev, "I2C read error\n");
72 } else {
73 _data[0] = rx[1];
74 _data[1] = rx[0];
75 } 67 }
76 68
77 return ret; 69 for (i = 0; i < len; i++)
70 data[i] = be16_to_cpu(chip->xfer_buf[i]);
71
72 return 0;
78} 73}
79 74
80static int __devinit ad714x_i2c_probe(struct i2c_client *client, 75static int __devinit ad714x_i2c_probe(struct i2c_client *client,
diff --git a/drivers/input/misc/ad714x-spi.c b/drivers/input/misc/ad714x-spi.c
index 4120dd549305..875b50811361 100644
--- a/drivers/input/misc/ad714x-spi.c
+++ b/drivers/input/misc/ad714x-spi.c
@@ -1,12 +1,12 @@
1/* 1/*
2 * AD714X CapTouch Programmable Controller driver (SPI bus) 2 * AD714X CapTouch Programmable Controller driver (SPI bus)
3 * 3 *
4 * Copyright 2009 Analog Devices Inc. 4 * Copyright 2009-2011 Analog Devices Inc.
5 * 5 *
6 * Licensed under the GPL-2 or later. 6 * Licensed under the GPL-2 or later.
7 */ 7 */
8 8
9#include <linux/input.h> /* BUS_I2C */ 9#include <linux/input.h> /* BUS_SPI */
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/spi/spi.h> 11#include <linux/spi/spi.h>
12#include <linux/pm.h> 12#include <linux/pm.h>
@@ -30,30 +30,68 @@ static int ad714x_spi_resume(struct device *dev)
30 30
31static SIMPLE_DEV_PM_OPS(ad714x_spi_pm, ad714x_spi_suspend, ad714x_spi_resume); 31static SIMPLE_DEV_PM_OPS(ad714x_spi_pm, ad714x_spi_suspend, ad714x_spi_resume);
32 32
33static int ad714x_spi_read(struct device *dev, unsigned short reg, 33static int ad714x_spi_read(struct ad714x_chip *chip,
34 unsigned short *data) 34 unsigned short reg, unsigned short *data, size_t len)
35{ 35{
36 struct spi_device *spi = to_spi_device(dev); 36 struct spi_device *spi = to_spi_device(chip->dev);
37 unsigned short tx = AD714x_SPI_CMD_PREFIX | AD714x_SPI_READ | reg; 37 struct spi_message message;
38 struct spi_transfer xfer[2];
39 int i;
40 int error;
41
42 spi_message_init(&message);
43 memset(xfer, 0, sizeof(xfer));
44
45 chip->xfer_buf[0] = cpu_to_be16(AD714x_SPI_CMD_PREFIX |
46 AD714x_SPI_READ | reg);
47 xfer[0].tx_buf = &chip->xfer_buf[0];
48 xfer[0].len = sizeof(chip->xfer_buf[0]);
49 spi_message_add_tail(&xfer[0], &message);
50
51 xfer[1].rx_buf = &chip->xfer_buf[1];
52 xfer[1].len = sizeof(chip->xfer_buf[1]) * len;
53 spi_message_add_tail(&xfer[1], &message);
54
55 error = spi_sync(spi, &message);
56 if (unlikely(error)) {
57 dev_err(chip->dev, "SPI read error: %d\n", error);
58 return error;
59 }
60
61 for (i = 0; i < len; i++)
62 data[i] = be16_to_cpu(chip->xfer_buf[i + 1]);
38 63
39 return spi_write_then_read(spi, (u8 *)&tx, 2, (u8 *)data, 2); 64 return 0;
40} 65}
41 66
42static int ad714x_spi_write(struct device *dev, unsigned short reg, 67static int ad714x_spi_write(struct ad714x_chip *chip,
43 unsigned short data) 68 unsigned short reg, unsigned short data)
44{ 69{
45 struct spi_device *spi = to_spi_device(dev); 70 struct spi_device *spi = to_spi_device(chip->dev);
46 unsigned short tx[2] = { 71 int error;
47 AD714x_SPI_CMD_PREFIX | reg, 72
48 data 73 chip->xfer_buf[0] = cpu_to_be16(AD714x_SPI_CMD_PREFIX | reg);
49 }; 74 chip->xfer_buf[1] = cpu_to_be16(data);
75
76 error = spi_write(spi, (u8 *)chip->xfer_buf,
77 2 * sizeof(*chip->xfer_buf));
78 if (unlikely(error)) {
79 dev_err(chip->dev, "SPI write error: %d\n", error);
80 return error;
81 }
50 82
51 return spi_write(spi, (u8 *)tx, 4); 83 return 0;
52} 84}
53 85
54static int __devinit ad714x_spi_probe(struct spi_device *spi) 86static int __devinit ad714x_spi_probe(struct spi_device *spi)
55{ 87{
56 struct ad714x_chip *chip; 88 struct ad714x_chip *chip;
89 int err;
90
91 spi->bits_per_word = 8;
92 err = spi_setup(spi);
93 if (err < 0)
94 return err;
57 95
58 chip = ad714x_probe(&spi->dev, BUS_SPI, spi->irq, 96 chip = ad714x_probe(&spi->dev, BUS_SPI, spi->irq,
59 ad714x_spi_read, ad714x_spi_write); 97 ad714x_spi_read, ad714x_spi_write);
diff --git a/drivers/input/misc/ad714x.c b/drivers/input/misc/ad714x.c
index c3a62c42cd28..ca42c7d2a3c7 100644
--- a/drivers/input/misc/ad714x.c
+++ b/drivers/input/misc/ad714x.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * AD714X CapTouch Programmable Controller driver supporting AD7142/3/7/8/7A 2 * AD714X CapTouch Programmable Controller driver supporting AD7142/3/7/8/7A
3 * 3 *
4 * Copyright 2009 Analog Devices Inc. 4 * Copyright 2009-2011 Analog Devices Inc.
5 * 5 *
6 * Licensed under the GPL-2 or later. 6 * Licensed under the GPL-2 or later.
7 */ 7 */
@@ -59,7 +59,6 @@
59#define STAGE11_AMBIENT 0x27D 59#define STAGE11_AMBIENT 0x27D
60 60
61#define PER_STAGE_REG_NUM 36 61#define PER_STAGE_REG_NUM 36
62#define STAGE_NUM 12
63#define STAGE_CFGREG_NUM 8 62#define STAGE_CFGREG_NUM 8
64#define SYS_CFGREG_NUM 8 63#define SYS_CFGREG_NUM 8
65 64
@@ -124,27 +123,6 @@ struct ad714x_driver_data {
124 * information to integrate all things which will be private data 123 * information to integrate all things which will be private data
125 * of spi/i2c device 124 * of spi/i2c device
126 */ 125 */
127struct ad714x_chip {
128 unsigned short h_state;
129 unsigned short l_state;
130 unsigned short c_state;
131 unsigned short adc_reg[STAGE_NUM];
132 unsigned short amb_reg[STAGE_NUM];
133 unsigned short sensor_val[STAGE_NUM];
134
135 struct ad714x_platform_data *hw;
136 struct ad714x_driver_data *sw;
137
138 int irq;
139 struct device *dev;
140 ad714x_read_t read;
141 ad714x_write_t write;
142
143 struct mutex mutex;
144
145 unsigned product;
146 unsigned version;
147};
148 126
149static void ad714x_use_com_int(struct ad714x_chip *ad714x, 127static void ad714x_use_com_int(struct ad714x_chip *ad714x,
150 int start_stage, int end_stage) 128 int start_stage, int end_stage)
@@ -154,13 +132,13 @@ static void ad714x_use_com_int(struct ad714x_chip *ad714x,
154 132
155 mask = ((1 << (end_stage + 1)) - 1) - ((1 << start_stage) - 1); 133 mask = ((1 << (end_stage + 1)) - 1) - ((1 << start_stage) - 1);
156 134
157 ad714x->read(ad714x->dev, STG_COM_INT_EN_REG, &data); 135 ad714x->read(ad714x, STG_COM_INT_EN_REG, &data, 1);
158 data |= 1 << end_stage; 136 data |= 1 << end_stage;
159 ad714x->write(ad714x->dev, STG_COM_INT_EN_REG, data); 137 ad714x->write(ad714x, STG_COM_INT_EN_REG, data);
160 138
161 ad714x->read(ad714x->dev, STG_HIGH_INT_EN_REG, &data); 139 ad714x->read(ad714x, STG_HIGH_INT_EN_REG, &data, 1);
162 data &= ~mask; 140 data &= ~mask;
163 ad714x->write(ad714x->dev, STG_HIGH_INT_EN_REG, data); 141 ad714x->write(ad714x, STG_HIGH_INT_EN_REG, data);
164} 142}
165 143
166static void ad714x_use_thr_int(struct ad714x_chip *ad714x, 144static void ad714x_use_thr_int(struct ad714x_chip *ad714x,
@@ -171,13 +149,13 @@ static void ad714x_use_thr_int(struct ad714x_chip *ad714x,
171 149
172 mask = ((1 << (end_stage + 1)) - 1) - ((1 << start_stage) - 1); 150 mask = ((1 << (end_stage + 1)) - 1) - ((1 << start_stage) - 1);
173 151
174 ad714x->read(ad714x->dev, STG_COM_INT_EN_REG, &data); 152 ad714x->read(ad714x, STG_COM_INT_EN_REG, &data, 1);
175 data &= ~(1 << end_stage); 153 data &= ~(1 << end_stage);
176 ad714x->write(ad714x->dev, STG_COM_INT_EN_REG, data); 154 ad714x->write(ad714x, STG_COM_INT_EN_REG, data);
177 155
178 ad714x->read(ad714x->dev, STG_HIGH_INT_EN_REG, &data); 156 ad714x->read(ad714x, STG_HIGH_INT_EN_REG, &data, 1);
179 data |= mask; 157 data |= mask;
180 ad714x->write(ad714x->dev, STG_HIGH_INT_EN_REG, data); 158 ad714x->write(ad714x, STG_HIGH_INT_EN_REG, data);
181} 159}
182 160
183static int ad714x_cal_highest_stage(struct ad714x_chip *ad714x, 161static int ad714x_cal_highest_stage(struct ad714x_chip *ad714x,
@@ -273,15 +251,16 @@ static void ad714x_slider_cal_sensor_val(struct ad714x_chip *ad714x, int idx)
273 struct ad714x_slider_plat *hw = &ad714x->hw->slider[idx]; 251 struct ad714x_slider_plat *hw = &ad714x->hw->slider[idx];
274 int i; 252 int i;
275 253
254 ad714x->read(ad714x, CDC_RESULT_S0 + hw->start_stage,
255 &ad714x->adc_reg[hw->start_stage],
256 hw->end_stage - hw->start_stage + 1);
257
276 for (i = hw->start_stage; i <= hw->end_stage; i++) { 258 for (i = hw->start_stage; i <= hw->end_stage; i++) {
277 ad714x->read(ad714x->dev, CDC_RESULT_S0 + i, 259 ad714x->read(ad714x, STAGE0_AMBIENT + i * PER_STAGE_REG_NUM,
278 &ad714x->adc_reg[i]); 260 &ad714x->amb_reg[i], 1);
279 ad714x->read(ad714x->dev, 261
280 STAGE0_AMBIENT + i * PER_STAGE_REG_NUM, 262 ad714x->sensor_val[i] =
281 &ad714x->amb_reg[i]); 263 abs(ad714x->adc_reg[i] - ad714x->amb_reg[i]);
282
283 ad714x->sensor_val[i] = abs(ad714x->adc_reg[i] -
284 ad714x->amb_reg[i]);
285 } 264 }
286} 265}
287 266
@@ -444,15 +423,16 @@ static void ad714x_wheel_cal_sensor_val(struct ad714x_chip *ad714x, int idx)
444 struct ad714x_wheel_plat *hw = &ad714x->hw->wheel[idx]; 423 struct ad714x_wheel_plat *hw = &ad714x->hw->wheel[idx];
445 int i; 424 int i;
446 425
426 ad714x->read(ad714x, CDC_RESULT_S0 + hw->start_stage,
427 &ad714x->adc_reg[hw->start_stage],
428 hw->end_stage - hw->start_stage + 1);
429
447 for (i = hw->start_stage; i <= hw->end_stage; i++) { 430 for (i = hw->start_stage; i <= hw->end_stage; i++) {
448 ad714x->read(ad714x->dev, CDC_RESULT_S0 + i, 431 ad714x->read(ad714x, STAGE0_AMBIENT + i * PER_STAGE_REG_NUM,
449 &ad714x->adc_reg[i]); 432 &ad714x->amb_reg[i], 1);
450 ad714x->read(ad714x->dev,
451 STAGE0_AMBIENT + i * PER_STAGE_REG_NUM,
452 &ad714x->amb_reg[i]);
453 if (ad714x->adc_reg[i] > ad714x->amb_reg[i]) 433 if (ad714x->adc_reg[i] > ad714x->amb_reg[i])
454 ad714x->sensor_val[i] = ad714x->adc_reg[i] - 434 ad714x->sensor_val[i] =
455 ad714x->amb_reg[i]; 435 ad714x->adc_reg[i] - ad714x->amb_reg[i];
456 else 436 else
457 ad714x->sensor_val[i] = 0; 437 ad714x->sensor_val[i] = 0;
458 } 438 }
@@ -597,15 +577,16 @@ static void touchpad_cal_sensor_val(struct ad714x_chip *ad714x, int idx)
597 struct ad714x_touchpad_plat *hw = &ad714x->hw->touchpad[idx]; 577 struct ad714x_touchpad_plat *hw = &ad714x->hw->touchpad[idx];
598 int i; 578 int i;
599 579
580 ad714x->read(ad714x, CDC_RESULT_S0 + hw->x_start_stage,
581 &ad714x->adc_reg[hw->x_start_stage],
582 hw->x_end_stage - hw->x_start_stage + 1);
583
600 for (i = hw->x_start_stage; i <= hw->x_end_stage; i++) { 584 for (i = hw->x_start_stage; i <= hw->x_end_stage; i++) {
601 ad714x->read(ad714x->dev, CDC_RESULT_S0 + i, 585 ad714x->read(ad714x, STAGE0_AMBIENT + i * PER_STAGE_REG_NUM,
602 &ad714x->adc_reg[i]); 586 &ad714x->amb_reg[i], 1);
603 ad714x->read(ad714x->dev,
604 STAGE0_AMBIENT + i * PER_STAGE_REG_NUM,
605 &ad714x->amb_reg[i]);
606 if (ad714x->adc_reg[i] > ad714x->amb_reg[i]) 587 if (ad714x->adc_reg[i] > ad714x->amb_reg[i])
607 ad714x->sensor_val[i] = ad714x->adc_reg[i] - 588 ad714x->sensor_val[i] =
608 ad714x->amb_reg[i]; 589 ad714x->adc_reg[i] - ad714x->amb_reg[i];
609 else 590 else
610 ad714x->sensor_val[i] = 0; 591 ad714x->sensor_val[i] = 0;
611 } 592 }
@@ -891,7 +872,7 @@ static int ad714x_hw_detect(struct ad714x_chip *ad714x)
891{ 872{
892 unsigned short data; 873 unsigned short data;
893 874
894 ad714x->read(ad714x->dev, AD714X_PARTID_REG, &data); 875 ad714x->read(ad714x, AD714X_PARTID_REG, &data, 1);
895 switch (data & 0xFFF0) { 876 switch (data & 0xFFF0) {
896 case AD7142_PARTID: 877 case AD7142_PARTID:
897 ad714x->product = 0x7142; 878 ad714x->product = 0x7142;
@@ -940,23 +921,20 @@ static void ad714x_hw_init(struct ad714x_chip *ad714x)
940 for (i = 0; i < STAGE_NUM; i++) { 921 for (i = 0; i < STAGE_NUM; i++) {
941 reg_base = AD714X_STAGECFG_REG + i * STAGE_CFGREG_NUM; 922 reg_base = AD714X_STAGECFG_REG + i * STAGE_CFGREG_NUM;
942 for (j = 0; j < STAGE_CFGREG_NUM; j++) 923 for (j = 0; j < STAGE_CFGREG_NUM; j++)
943 ad714x->write(ad714x->dev, reg_base + j, 924 ad714x->write(ad714x, reg_base + j,
944 ad714x->hw->stage_cfg_reg[i][j]); 925 ad714x->hw->stage_cfg_reg[i][j]);
945 } 926 }
946 927
947 for (i = 0; i < SYS_CFGREG_NUM; i++) 928 for (i = 0; i < SYS_CFGREG_NUM; i++)
948 ad714x->write(ad714x->dev, AD714X_SYSCFG_REG + i, 929 ad714x->write(ad714x, AD714X_SYSCFG_REG + i,
949 ad714x->hw->sys_cfg_reg[i]); 930 ad714x->hw->sys_cfg_reg[i]);
950 for (i = 0; i < SYS_CFGREG_NUM; i++) 931 for (i = 0; i < SYS_CFGREG_NUM; i++)
951 ad714x->read(ad714x->dev, AD714X_SYSCFG_REG + i, 932 ad714x->read(ad714x, AD714X_SYSCFG_REG + i, &data, 1);
952 &data);
953 933
954 ad714x->write(ad714x->dev, AD714X_STG_CAL_EN_REG, 0xFFF); 934 ad714x->write(ad714x, AD714X_STG_CAL_EN_REG, 0xFFF);
955 935
956 /* clear all interrupts */ 936 /* clear all interrupts */
957 ad714x->read(ad714x->dev, STG_LOW_INT_STA_REG, &data); 937 ad714x->read(ad714x, STG_LOW_INT_STA_REG, &ad714x->l_state, 3);
958 ad714x->read(ad714x->dev, STG_HIGH_INT_STA_REG, &data);
959 ad714x->read(ad714x->dev, STG_COM_INT_STA_REG, &data);
960} 938}
961 939
962static irqreturn_t ad714x_interrupt_thread(int irq, void *data) 940static irqreturn_t ad714x_interrupt_thread(int irq, void *data)
@@ -966,9 +944,7 @@ static irqreturn_t ad714x_interrupt_thread(int irq, void *data)
966 944
967 mutex_lock(&ad714x->mutex); 945 mutex_lock(&ad714x->mutex);
968 946
969 ad714x->read(ad714x->dev, STG_LOW_INT_STA_REG, &ad714x->l_state); 947 ad714x->read(ad714x, STG_LOW_INT_STA_REG, &ad714x->l_state, 3);
970 ad714x->read(ad714x->dev, STG_HIGH_INT_STA_REG, &ad714x->h_state);
971 ad714x->read(ad714x->dev, STG_COM_INT_STA_REG, &ad714x->c_state);
972 948
973 for (i = 0; i < ad714x->hw->button_num; i++) 949 for (i = 0; i < ad714x->hw->button_num; i++)
974 ad714x_button_state_machine(ad714x, i); 950 ad714x_button_state_machine(ad714x, i);
@@ -1245,7 +1221,7 @@ int ad714x_disable(struct ad714x_chip *ad714x)
1245 mutex_lock(&ad714x->mutex); 1221 mutex_lock(&ad714x->mutex);
1246 1222
1247 data = ad714x->hw->sys_cfg_reg[AD714X_PWR_CTRL] | 0x3; 1223 data = ad714x->hw->sys_cfg_reg[AD714X_PWR_CTRL] | 0x3;
1248 ad714x->write(ad714x->dev, AD714X_PWR_CTRL, data); 1224 ad714x->write(ad714x, AD714X_PWR_CTRL, data);
1249 1225
1250 mutex_unlock(&ad714x->mutex); 1226 mutex_unlock(&ad714x->mutex);
1251 1227
@@ -1255,24 +1231,20 @@ EXPORT_SYMBOL(ad714x_disable);
1255 1231
1256int ad714x_enable(struct ad714x_chip *ad714x) 1232int ad714x_enable(struct ad714x_chip *ad714x)
1257{ 1233{
1258 unsigned short data;
1259
1260 dev_dbg(ad714x->dev, "%s enter\n", __func__); 1234 dev_dbg(ad714x->dev, "%s enter\n", __func__);
1261 1235
1262 mutex_lock(&ad714x->mutex); 1236 mutex_lock(&ad714x->mutex);
1263 1237
1264 /* resume to non-shutdown mode */ 1238 /* resume to non-shutdown mode */
1265 1239
1266 ad714x->write(ad714x->dev, AD714X_PWR_CTRL, 1240 ad714x->write(ad714x, AD714X_PWR_CTRL,
1267 ad714x->hw->sys_cfg_reg[AD714X_PWR_CTRL]); 1241 ad714x->hw->sys_cfg_reg[AD714X_PWR_CTRL]);
1268 1242
1269 /* make sure the interrupt output line is not low level after resume, 1243 /* make sure the interrupt output line is not low level after resume,
1270 * otherwise we will get no chance to enter falling-edge irq again 1244 * otherwise we will get no chance to enter falling-edge irq again
1271 */ 1245 */
1272 1246
1273 ad714x->read(ad714x->dev, STG_LOW_INT_STA_REG, &data); 1247 ad714x->read(ad714x, STG_LOW_INT_STA_REG, &ad714x->l_state, 3);
1274 ad714x->read(ad714x->dev, STG_HIGH_INT_STA_REG, &data);
1275 ad714x->read(ad714x->dev, STG_COM_INT_STA_REG, &data);
1276 1248
1277 mutex_unlock(&ad714x->mutex); 1249 mutex_unlock(&ad714x->mutex);
1278 1250
diff --git a/drivers/input/misc/ad714x.h b/drivers/input/misc/ad714x.h
index 45c54fb13f07..3c85455aa66d 100644
--- a/drivers/input/misc/ad714x.h
+++ b/drivers/input/misc/ad714x.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * AD714X CapTouch Programmable Controller driver (bus interfaces) 2 * AD714X CapTouch Programmable Controller driver (bus interfaces)
3 * 3 *
4 * Copyright 2009 Analog Devices Inc. 4 * Copyright 2009-2011 Analog Devices Inc.
5 * 5 *
6 * Licensed under the GPL-2 or later. 6 * Licensed under the GPL-2 or later.
7 */ 7 */
@@ -11,11 +11,40 @@
11 11
12#include <linux/types.h> 12#include <linux/types.h>
13 13
14#define STAGE_NUM 12
15
14struct device; 16struct device;
17struct ad714x_platform_data;
18struct ad714x_driver_data;
15struct ad714x_chip; 19struct ad714x_chip;
16 20
17typedef int (*ad714x_read_t)(struct device *, unsigned short, unsigned short *); 21typedef int (*ad714x_read_t)(struct ad714x_chip *, unsigned short, unsigned short *, size_t);
18typedef int (*ad714x_write_t)(struct device *, unsigned short, unsigned short); 22typedef int (*ad714x_write_t)(struct ad714x_chip *, unsigned short, unsigned short);
23
24struct ad714x_chip {
25 unsigned short l_state;
26 unsigned short h_state;
27 unsigned short c_state;
28 unsigned short adc_reg[STAGE_NUM];
29 unsigned short amb_reg[STAGE_NUM];
30 unsigned short sensor_val[STAGE_NUM];
31
32 struct ad714x_platform_data *hw;
33 struct ad714x_driver_data *sw;
34
35 int irq;
36 struct device *dev;
37 ad714x_read_t read;
38 ad714x_write_t write;
39
40 struct mutex mutex;
41
42 unsigned product;
43 unsigned version;
44
45 __be16 xfer_buf[16] ____cacheline_aligned;
46
47};
19 48
20int ad714x_disable(struct ad714x_chip *ad714x); 49int ad714x_disable(struct ad714x_chip *ad714x);
21int ad714x_enable(struct ad714x_chip *ad714x); 50int ad714x_enable(struct ad714x_chip *ad714x);
diff --git a/drivers/input/misc/cm109.c b/drivers/input/misc/cm109.c
index b09c7d127219..ab860511f016 100644
--- a/drivers/input/misc/cm109.c
+++ b/drivers/input/misc/cm109.c
@@ -475,7 +475,7 @@ static void cm109_toggle_buzzer_sync(struct cm109_dev *dev, int on)
475 le16_to_cpu(dev->ctl_req->wIndex), 475 le16_to_cpu(dev->ctl_req->wIndex),
476 dev->ctl_data, 476 dev->ctl_data,
477 USB_PKT_LEN, USB_CTRL_SET_TIMEOUT); 477 USB_PKT_LEN, USB_CTRL_SET_TIMEOUT);
478 if (error && error != EINTR) 478 if (error < 0 && error != -EINTR)
479 err("%s: usb_control_msg() failed %d", __func__, error); 479 err("%s: usb_control_msg() failed %d", __func__, error);
480} 480}
481 481
diff --git a/drivers/input/misc/mma8450.c b/drivers/input/misc/mma8450.c
index 6c76cf792991..0794778295fc 100644
--- a/drivers/input/misc/mma8450.c
+++ b/drivers/input/misc/mma8450.c
@@ -234,7 +234,7 @@ static const struct of_device_id mma8450_dt_ids[] = {
234 { .compatible = "fsl,mma8450", }, 234 { .compatible = "fsl,mma8450", },
235 { /* sentinel */ } 235 { /* sentinel */ }
236}; 236};
237MODULE_DEVICE_TABLE(i2c, mma8450_dt_ids); 237MODULE_DEVICE_TABLE(of, mma8450_dt_ids);
238 238
239static struct i2c_driver mma8450_driver = { 239static struct i2c_driver mma8450_driver = {
240 .driver = { 240 .driver = {
diff --git a/drivers/input/misc/mpu3050.c b/drivers/input/misc/mpu3050.c
index b95fac15b2ea..f71dc728da58 100644
--- a/drivers/input/misc/mpu3050.c
+++ b/drivers/input/misc/mpu3050.c
@@ -282,7 +282,7 @@ err_free_irq:
282err_pm_set_suspended: 282err_pm_set_suspended:
283 pm_runtime_set_suspended(&client->dev); 283 pm_runtime_set_suspended(&client->dev);
284err_free_mem: 284err_free_mem:
285 input_unregister_device(idev); 285 input_free_device(idev);
286 kfree(sensor); 286 kfree(sensor);
287 return error; 287 return error;
288} 288}
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
index 3126983c004a..5ec617e28f7e 100644
--- a/drivers/input/mouse/bcm5974.c
+++ b/drivers/input/mouse/bcm5974.c
@@ -67,6 +67,18 @@
67#define USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI 0x0245 67#define USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI 0x0245
68#define USB_DEVICE_ID_APPLE_WELLSPRING5_ISO 0x0246 68#define USB_DEVICE_ID_APPLE_WELLSPRING5_ISO 0x0246
69#define USB_DEVICE_ID_APPLE_WELLSPRING5_JIS 0x0247 69#define USB_DEVICE_ID_APPLE_WELLSPRING5_JIS 0x0247
70/* MacbookAir4,1 (unibody, July 2011) */
71#define USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI 0x0249
72#define USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO 0x024a
73#define USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS 0x024b
74/* MacbookAir4,2 (unibody, July 2011) */
75#define USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI 0x024c
76#define USB_DEVICE_ID_APPLE_WELLSPRING6_ISO 0x024d
77#define USB_DEVICE_ID_APPLE_WELLSPRING6_JIS 0x024e
78/* Macbook8,2 (unibody) */
79#define USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI 0x0252
80#define USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO 0x0253
81#define USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS 0x0254
70 82
71#define BCM5974_DEVICE(prod) { \ 83#define BCM5974_DEVICE(prod) { \
72 .match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \ 84 .match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \
@@ -104,6 +116,18 @@ static const struct usb_device_id bcm5974_table[] = {
104 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI), 116 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI),
105 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_ISO), 117 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_ISO),
106 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_JIS), 118 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_JIS),
119 /* MacbookAir4,1 */
120 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI),
121 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO),
122 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS),
123 /* MacbookAir4,2 */
124 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI),
125 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6_ISO),
126 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6_JIS),
127 /* MacbookPro8,2 */
128 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI),
129 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO),
130 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS),
107 /* Terminating entry */ 131 /* Terminating entry */
108 {} 132 {}
109}; 133};
@@ -294,6 +318,42 @@ static const struct bcm5974_config bcm5974_config_table[] = {
294 { DIM_X, DIM_X / SN_COORD, -4415, 5050 }, 318 { DIM_X, DIM_X / SN_COORD, -4415, 5050 },
295 { DIM_Y, DIM_Y / SN_COORD, -55, 6680 } 319 { DIM_Y, DIM_Y / SN_COORD, -55, 6680 }
296 }, 320 },
321 {
322 USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI,
323 USB_DEVICE_ID_APPLE_WELLSPRING6_ISO,
324 USB_DEVICE_ID_APPLE_WELLSPRING6_JIS,
325 HAS_INTEGRATED_BUTTON,
326 0x84, sizeof(struct bt_data),
327 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
328 { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 },
329 { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 },
330 { DIM_X, DIM_X / SN_COORD, -4620, 5140 },
331 { DIM_Y, DIM_Y / SN_COORD, -150, 6600 }
332 },
333 {
334 USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI,
335 USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO,
336 USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS,
337 HAS_INTEGRATED_BUTTON,
338 0x84, sizeof(struct bt_data),
339 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
340 { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 },
341 { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 },
342 { DIM_X, DIM_X / SN_COORD, -4750, 5280 },
343 { DIM_Y, DIM_Y / SN_COORD, -150, 6730 }
344 },
345 {
346 USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI,
347 USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO,
348 USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS,
349 HAS_INTEGRATED_BUTTON,
350 0x84, sizeof(struct bt_data),
351 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
352 { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 },
353 { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 },
354 { DIM_X, DIM_X / SN_COORD, -4620, 5140 },
355 { DIM_Y, DIM_Y / SN_COORD, -150, 6600 }
356 },
297 {} 357 {}
298}; 358};
299 359
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
index 449c0a46dbac..958b4eb6369d 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -49,6 +49,7 @@ struct hid_descriptor {
49#define USB_REQ_GET_REPORT 0x01 49#define USB_REQ_GET_REPORT 0x01
50#define USB_REQ_SET_REPORT 0x09 50#define USB_REQ_SET_REPORT 0x09
51#define WAC_HID_FEATURE_REPORT 0x03 51#define WAC_HID_FEATURE_REPORT 0x03
52#define WAC_MSG_RETRIES 5
52 53
53static int usb_get_report(struct usb_interface *intf, unsigned char type, 54static int usb_get_report(struct usb_interface *intf, unsigned char type,
54 unsigned char id, void *buf, int size) 55 unsigned char id, void *buf, int size)
@@ -165,7 +166,7 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
165 report, 166 report,
166 hid_desc->wDescriptorLength, 167 hid_desc->wDescriptorLength,
167 5000); /* 5 secs */ 168 5000); /* 5 secs */
168 } while (result < 0 && limit++ < 5); 169 } while (result < 0 && limit++ < WAC_MSG_RETRIES);
169 170
170 /* No need to parse the Descriptor. It isn't an error though */ 171 /* No need to parse the Descriptor. It isn't an error though */
171 if (result < 0) 172 if (result < 0)
@@ -228,13 +229,6 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
228 get_unaligned_le16(&report[i + 3]); 229 get_unaligned_le16(&report[i + 3]);
229 i += 4; 230 i += 4;
230 } 231 }
231 } else if (usage == WCM_DIGITIZER) {
232 /* max pressure isn't reported
233 features->pressure_max = (unsigned short)
234 (report[i+4] << 8 | report[i + 3]);
235 */
236 features->pressure_max = 255;
237 i += 4;
238 } 232 }
239 break; 233 break;
240 234
@@ -290,13 +284,6 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
290 pen = 1; 284 pen = 1;
291 i++; 285 i++;
292 break; 286 break;
293
294 case HID_USAGE_UNDEFINED:
295 if (usage == WCM_DESKTOP && finger) /* capacity */
296 features->pressure_max =
297 get_unaligned_le16(&report[i + 3]);
298 i += 4;
299 break;
300 } 287 }
301 break; 288 break;
302 289
@@ -319,24 +306,26 @@ static int wacom_query_tablet_data(struct usb_interface *intf, struct wacom_feat
319 int limit = 0, report_id = 2; 306 int limit = 0, report_id = 2;
320 int error = -ENOMEM; 307 int error = -ENOMEM;
321 308
322 rep_data = kmalloc(2, GFP_KERNEL); 309 rep_data = kmalloc(4, GFP_KERNEL);
323 if (!rep_data) 310 if (!rep_data)
324 return error; 311 return error;
325 312
326 /* ask to report tablet data if it is 2FGT Tablet PC or 313 /* ask to report tablet data if it is MT Tablet PC or
327 * not a Tablet PC */ 314 * not a Tablet PC */
328 if (features->type == TABLETPC2FG) { 315 if (features->type == TABLETPC2FG) {
329 do { 316 do {
330 rep_data[0] = 3; 317 rep_data[0] = 3;
331 rep_data[1] = 4; 318 rep_data[1] = 4;
319 rep_data[2] = 0;
320 rep_data[3] = 0;
332 report_id = 3; 321 report_id = 3;
333 error = usb_set_report(intf, WAC_HID_FEATURE_REPORT, 322 error = usb_set_report(intf, WAC_HID_FEATURE_REPORT,
334 report_id, rep_data, 2); 323 report_id, rep_data, 4);
335 if (error >= 0) 324 if (error >= 0)
336 error = usb_get_report(intf, 325 error = usb_get_report(intf,
337 WAC_HID_FEATURE_REPORT, report_id, 326 WAC_HID_FEATURE_REPORT, report_id,
338 rep_data, 3); 327 rep_data, 4);
339 } while ((error < 0 || rep_data[1] != 4) && limit++ < 5); 328 } while ((error < 0 || rep_data[1] != 4) && limit++ < WAC_MSG_RETRIES);
340 } else if (features->type != TABLETPC) { 329 } else if (features->type != TABLETPC) {
341 do { 330 do {
342 rep_data[0] = 2; 331 rep_data[0] = 2;
@@ -347,7 +336,7 @@ static int wacom_query_tablet_data(struct usb_interface *intf, struct wacom_feat
347 error = usb_get_report(intf, 336 error = usb_get_report(intf,
348 WAC_HID_FEATURE_REPORT, report_id, 337 WAC_HID_FEATURE_REPORT, report_id,
349 rep_data, 2); 338 rep_data, 2);
350 } while ((error < 0 || rep_data[1] != 2) && limit++ < 5); 339 } while ((error < 0 || rep_data[1] != 2) && limit++ < WAC_MSG_RETRIES);
351 } 340 }
352 341
353 kfree(rep_data); 342 kfree(rep_data);
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 03ebcc8b24b5..0dc97ec15c28 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -800,25 +800,26 @@ static int wacom_bpt_touch(struct wacom_wac *wacom)
800 int i; 800 int i;
801 801
802 for (i = 0; i < 2; i++) { 802 for (i = 0; i < 2; i++) {
803 int p = data[9 * i + 2]; 803 int offset = (data[1] & 0x80) ? (8 * i) : (9 * i);
804 bool touch = p && !wacom->shared->stylus_in_proximity; 804 bool touch = data[offset + 3] & 0x80;
805 805
806 input_mt_slot(input, i);
807 input_mt_report_slot_state(input, MT_TOOL_FINGER, touch);
808 /* 806 /*
809 * Touch events need to be disabled while stylus is 807 * Touch events need to be disabled while stylus is
810 * in proximity because user's hand is resting on touchpad 808 * in proximity because user's hand is resting on touchpad
811 * and sending unwanted events. User expects tablet buttons 809 * and sending unwanted events. User expects tablet buttons
812 * to continue working though. 810 * to continue working though.
813 */ 811 */
812 touch = touch && !wacom->shared->stylus_in_proximity;
813
814 input_mt_slot(input, i);
815 input_mt_report_slot_state(input, MT_TOOL_FINGER, touch);
814 if (touch) { 816 if (touch) {
815 int x = get_unaligned_be16(&data[9 * i + 3]) & 0x7ff; 817 int x = get_unaligned_be16(&data[offset + 3]) & 0x7ff;
816 int y = get_unaligned_be16(&data[9 * i + 5]) & 0x7ff; 818 int y = get_unaligned_be16(&data[offset + 5]) & 0x7ff;
817 if (features->quirks & WACOM_QUIRK_BBTOUCH_LOWRES) { 819 if (features->quirks & WACOM_QUIRK_BBTOUCH_LOWRES) {
818 x <<= 5; 820 x <<= 5;
819 y <<= 5; 821 y <<= 5;
820 } 822 }
821 input_report_abs(input, ABS_MT_PRESSURE, p);
822 input_report_abs(input, ABS_MT_POSITION_X, x); 823 input_report_abs(input, ABS_MT_POSITION_X, x);
823 input_report_abs(input, ABS_MT_POSITION_Y, y); 824 input_report_abs(input, ABS_MT_POSITION_Y, y);
824 } 825 }
@@ -1056,10 +1057,11 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
1056 features->x_fuzz, 0); 1057 features->x_fuzz, 0);
1057 input_set_abs_params(input_dev, ABS_Y, 0, features->y_max, 1058 input_set_abs_params(input_dev, ABS_Y, 0, features->y_max,
1058 features->y_fuzz, 0); 1059 features->y_fuzz, 0);
1059 input_set_abs_params(input_dev, ABS_PRESSURE, 0, features->pressure_max,
1060 features->pressure_fuzz, 0);
1061 1060
1062 if (features->device_type == BTN_TOOL_PEN) { 1061 if (features->device_type == BTN_TOOL_PEN) {
1062 input_set_abs_params(input_dev, ABS_PRESSURE, 0, features->pressure_max,
1063 features->pressure_fuzz, 0);
1064
1063 /* penabled devices have fixed resolution for each model */ 1065 /* penabled devices have fixed resolution for each model */
1064 input_abs_set_res(input_dev, ABS_X, features->x_resolution); 1066 input_abs_set_res(input_dev, ABS_X, features->x_resolution);
1065 input_abs_set_res(input_dev, ABS_Y, features->y_resolution); 1067 input_abs_set_res(input_dev, ABS_Y, features->y_resolution);
@@ -1098,6 +1100,8 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
1098 __set_bit(BTN_TOOL_MOUSE, input_dev->keybit); 1100 __set_bit(BTN_TOOL_MOUSE, input_dev->keybit);
1099 __set_bit(BTN_STYLUS, input_dev->keybit); 1101 __set_bit(BTN_STYLUS, input_dev->keybit);
1100 __set_bit(BTN_STYLUS2, input_dev->keybit); 1102 __set_bit(BTN_STYLUS2, input_dev->keybit);
1103
1104 __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
1101 break; 1105 break;
1102 1106
1103 case WACOM_21UX2: 1107 case WACOM_21UX2:
@@ -1126,6 +1130,9 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
1126 } 1130 }
1127 1131
1128 input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); 1132 input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
1133
1134 __set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
1135
1129 wacom_setup_cintiq(wacom_wac); 1136 wacom_setup_cintiq(wacom_wac);
1130 break; 1137 break;
1131 1138
@@ -1150,6 +1157,8 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
1150 /* fall through */ 1157 /* fall through */
1151 1158
1152 case INTUOS: 1159 case INTUOS:
1160 __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
1161
1153 wacom_setup_intuos(wacom_wac); 1162 wacom_setup_intuos(wacom_wac);
1154 break; 1163 break;
1155 1164
@@ -1165,6 +1174,8 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
1165 1174
1166 input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); 1175 input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
1167 wacom_setup_intuos(wacom_wac); 1176 wacom_setup_intuos(wacom_wac);
1177
1178 __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
1168 break; 1179 break;
1169 1180
1170 case TABLETPC2FG: 1181 case TABLETPC2FG:
@@ -1183,26 +1194,40 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
1183 case TABLETPC: 1194 case TABLETPC:
1184 __clear_bit(ABS_MISC, input_dev->absbit); 1195 __clear_bit(ABS_MISC, input_dev->absbit);
1185 1196
1197 __set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
1198
1186 if (features->device_type != BTN_TOOL_PEN) 1199 if (features->device_type != BTN_TOOL_PEN)
1187 break; /* no need to process stylus stuff */ 1200 break; /* no need to process stylus stuff */
1188 1201
1189 /* fall through */ 1202 /* fall through */
1190 1203
1191 case PL: 1204 case PL:
1192 case PTU:
1193 case DTU: 1205 case DTU:
1194 __set_bit(BTN_TOOL_PEN, input_dev->keybit); 1206 __set_bit(BTN_TOOL_PEN, input_dev->keybit);
1207 __set_bit(BTN_TOOL_RUBBER, input_dev->keybit);
1195 __set_bit(BTN_STYLUS, input_dev->keybit); 1208 __set_bit(BTN_STYLUS, input_dev->keybit);
1196 __set_bit(BTN_STYLUS2, input_dev->keybit); 1209 __set_bit(BTN_STYLUS2, input_dev->keybit);
1210
1211 __set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
1212 break;
1213
1214 case PTU:
1215 __set_bit(BTN_STYLUS2, input_dev->keybit);
1197 /* fall through */ 1216 /* fall through */
1198 1217
1199 case PENPARTNER: 1218 case PENPARTNER:
1219 __set_bit(BTN_TOOL_PEN, input_dev->keybit);
1200 __set_bit(BTN_TOOL_RUBBER, input_dev->keybit); 1220 __set_bit(BTN_TOOL_RUBBER, input_dev->keybit);
1221 __set_bit(BTN_STYLUS, input_dev->keybit);
1222
1223 __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
1201 break; 1224 break;
1202 1225
1203 case BAMBOO_PT: 1226 case BAMBOO_PT:
1204 __clear_bit(ABS_MISC, input_dev->absbit); 1227 __clear_bit(ABS_MISC, input_dev->absbit);
1205 1228
1229 __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
1230
1206 if (features->device_type == BTN_TOOL_DOUBLETAP) { 1231 if (features->device_type == BTN_TOOL_DOUBLETAP) {
1207 __set_bit(BTN_LEFT, input_dev->keybit); 1232 __set_bit(BTN_LEFT, input_dev->keybit);
1208 __set_bit(BTN_FORWARD, input_dev->keybit); 1233 __set_bit(BTN_FORWARD, input_dev->keybit);
@@ -1460,6 +1485,9 @@ static const struct wacom_features wacom_features_0xD3 =
1460static const struct wacom_features wacom_features_0xD4 = 1485static const struct wacom_features wacom_features_0xD4 =
1461 { "Wacom Bamboo Pen", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023, 1486 { "Wacom Bamboo Pen", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023,
1462 63, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; 1487 63, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
1488static const struct wacom_features wacom_features_0xD5 =
1489 { "Wacom Bamboo Pen 6x8", WACOM_PKGLEN_BBFUN, 21648, 13530, 1023,
1490 63, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
1463static const struct wacom_features wacom_features_0xD6 = 1491static const struct wacom_features wacom_features_0xD6 =
1464 { "Wacom BambooPT 2FG 4x5", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023, 1492 { "Wacom BambooPT 2FG 4x5", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023,
1465 63, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; 1493 63, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -1564,6 +1592,7 @@ const struct usb_device_id wacom_ids[] = {
1564 { USB_DEVICE_WACOM(0xD2) }, 1592 { USB_DEVICE_WACOM(0xD2) },
1565 { USB_DEVICE_WACOM(0xD3) }, 1593 { USB_DEVICE_WACOM(0xD3) },
1566 { USB_DEVICE_WACOM(0xD4) }, 1594 { USB_DEVICE_WACOM(0xD4) },
1595 { USB_DEVICE_WACOM(0xD5) },
1567 { USB_DEVICE_WACOM(0xD6) }, 1596 { USB_DEVICE_WACOM(0xD6) },
1568 { USB_DEVICE_WACOM(0xD7) }, 1597 { USB_DEVICE_WACOM(0xD7) },
1569 { USB_DEVICE_WACOM(0xD8) }, 1598 { USB_DEVICE_WACOM(0xD8) },
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index ae00604a6a81..f5d66859f232 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -244,6 +244,7 @@ struct mxt_finger {
244 int x; 244 int x;
245 int y; 245 int y;
246 int area; 246 int area;
247 int pressure;
247}; 248};
248 249
249/* Each client has this additional data */ 250/* Each client has this additional data */
@@ -536,6 +537,8 @@ static void mxt_input_report(struct mxt_data *data, int single_id)
536 finger[id].x); 537 finger[id].x);
537 input_report_abs(input_dev, ABS_MT_POSITION_Y, 538 input_report_abs(input_dev, ABS_MT_POSITION_Y,
538 finger[id].y); 539 finger[id].y);
540 input_report_abs(input_dev, ABS_MT_PRESSURE,
541 finger[id].pressure);
539 } else { 542 } else {
540 finger[id].status = 0; 543 finger[id].status = 0;
541 } 544 }
@@ -546,6 +549,8 @@ static void mxt_input_report(struct mxt_data *data, int single_id)
546 if (status != MXT_RELEASE) { 549 if (status != MXT_RELEASE) {
547 input_report_abs(input_dev, ABS_X, finger[single_id].x); 550 input_report_abs(input_dev, ABS_X, finger[single_id].x);
548 input_report_abs(input_dev, ABS_Y, finger[single_id].y); 551 input_report_abs(input_dev, ABS_Y, finger[single_id].y);
552 input_report_abs(input_dev,
553 ABS_PRESSURE, finger[single_id].pressure);
549 } 554 }
550 555
551 input_sync(input_dev); 556 input_sync(input_dev);
@@ -560,6 +565,7 @@ static void mxt_input_touchevent(struct mxt_data *data,
560 int x; 565 int x;
561 int y; 566 int y;
562 int area; 567 int area;
568 int pressure;
563 569
564 /* Check the touch is present on the screen */ 570 /* Check the touch is present on the screen */
565 if (!(status & MXT_DETECT)) { 571 if (!(status & MXT_DETECT)) {
@@ -584,6 +590,7 @@ static void mxt_input_touchevent(struct mxt_data *data,
584 y = y >> 2; 590 y = y >> 2;
585 591
586 area = message->message[4]; 592 area = message->message[4];
593 pressure = message->message[5];
587 594
588 dev_dbg(dev, "[%d] %s x: %d, y: %d, area: %d\n", id, 595 dev_dbg(dev, "[%d] %s x: %d, y: %d, area: %d\n", id,
589 status & MXT_MOVE ? "moved" : "pressed", 596 status & MXT_MOVE ? "moved" : "pressed",
@@ -594,6 +601,7 @@ static void mxt_input_touchevent(struct mxt_data *data,
594 finger[id].x = x; 601 finger[id].x = x;
595 finger[id].y = y; 602 finger[id].y = y;
596 finger[id].area = area; 603 finger[id].area = area;
604 finger[id].pressure = pressure;
597 605
598 mxt_input_report(data, id); 606 mxt_input_report(data, id);
599} 607}
@@ -1116,6 +1124,8 @@ static int __devinit mxt_probe(struct i2c_client *client,
1116 0, data->max_x, 0, 0); 1124 0, data->max_x, 0, 0);
1117 input_set_abs_params(input_dev, ABS_Y, 1125 input_set_abs_params(input_dev, ABS_Y,
1118 0, data->max_y, 0, 0); 1126 0, data->max_y, 0, 0);
1127 input_set_abs_params(input_dev, ABS_PRESSURE,
1128 0, 255, 0, 0);
1119 1129
1120 /* For multi touch */ 1130 /* For multi touch */
1121 input_mt_init_slots(input_dev, MXT_MAX_FINGER); 1131 input_mt_init_slots(input_dev, MXT_MAX_FINGER);
@@ -1125,6 +1135,8 @@ static int __devinit mxt_probe(struct i2c_client *client,
1125 0, data->max_x, 0, 0); 1135 0, data->max_x, 0, 0);
1126 input_set_abs_params(input_dev, ABS_MT_POSITION_Y, 1136 input_set_abs_params(input_dev, ABS_MT_POSITION_Y,
1127 0, data->max_y, 0, 0); 1137 0, data->max_y, 0, 0);
1138 input_set_abs_params(input_dev, ABS_MT_PRESSURE,
1139 0, 255, 0, 0);
1128 1140
1129 input_set_drvdata(input_dev, data); 1141 input_set_drvdata(input_dev, data);
1130 i2c_set_clientdata(client, data); 1142 i2c_set_clientdata(client, data);
diff --git a/drivers/input/touchscreen/max11801_ts.c b/drivers/input/touchscreen/max11801_ts.c
index 4f2713d92791..4627fe55b401 100644
--- a/drivers/input/touchscreen/max11801_ts.c
+++ b/drivers/input/touchscreen/max11801_ts.c
@@ -9,7 +9,8 @@
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by 11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License. 12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
13 */ 14 */
14 15
15/* 16/*
diff --git a/drivers/input/touchscreen/tnetv107x-ts.c b/drivers/input/touchscreen/tnetv107x-ts.c
index 089b0a0f3d8c..0e8f63e5b36f 100644
--- a/drivers/input/touchscreen/tnetv107x-ts.c
+++ b/drivers/input/touchscreen/tnetv107x-ts.c
@@ -13,6 +13,7 @@
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 */ 14 */
15 15
16#include <linux/module.h>
16#include <linux/kernel.h> 17#include <linux/kernel.h>
17#include <linux/err.h> 18#include <linux/err.h>
18#include <linux/errno.h> 19#include <linux/errno.h>
diff --git a/drivers/input/touchscreen/wacom_w8001.c b/drivers/input/touchscreen/wacom_w8001.c
index c14412ef4648..9941d39df43d 100644
--- a/drivers/input/touchscreen/wacom_w8001.c
+++ b/drivers/input/touchscreen/wacom_w8001.c
@@ -383,6 +383,8 @@ static int w8001_setup(struct w8001 *w8001)
383 dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); 383 dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
384 strlcat(w8001->name, "Wacom Serial", sizeof(w8001->name)); 384 strlcat(w8001->name, "Wacom Serial", sizeof(w8001->name));
385 385
386 __set_bit(INPUT_PROP_DIRECT, dev->propbit);
387
386 /* penabled? */ 388 /* penabled? */
387 error = w8001_command(w8001, W8001_CMD_QUERY, true); 389 error = w8001_command(w8001, W8001_CMD_QUERY, true);
388 if (!error) { 390 if (!error) {
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index a14f8dc23462..0e4227f457af 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -605,7 +605,9 @@ static void build_inv_all(struct iommu_cmd *cmd)
605 * Writes the command to the IOMMUs command buffer and informs the 605 * Writes the command to the IOMMUs command buffer and informs the
606 * hardware about the new command. 606 * hardware about the new command.
607 */ 607 */
608static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) 608static int iommu_queue_command_sync(struct amd_iommu *iommu,
609 struct iommu_cmd *cmd,
610 bool sync)
609{ 611{
610 u32 left, tail, head, next_tail; 612 u32 left, tail, head, next_tail;
611 unsigned long flags; 613 unsigned long flags;
@@ -639,13 +641,18 @@ again:
639 copy_cmd_to_buffer(iommu, cmd, tail); 641 copy_cmd_to_buffer(iommu, cmd, tail);
640 642
641 /* We need to sync now to make sure all commands are processed */ 643 /* We need to sync now to make sure all commands are processed */
642 iommu->need_sync = true; 644 iommu->need_sync = sync;
643 645
644 spin_unlock_irqrestore(&iommu->lock, flags); 646 spin_unlock_irqrestore(&iommu->lock, flags);
645 647
646 return 0; 648 return 0;
647} 649}
648 650
651static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
652{
653 return iommu_queue_command_sync(iommu, cmd, true);
654}
655
649/* 656/*
650 * This function queues a completion wait command into the command 657 * This function queues a completion wait command into the command
651 * buffer of an IOMMU 658 * buffer of an IOMMU
@@ -661,7 +668,7 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
661 668
662 build_completion_wait(&cmd, (u64)&sem); 669 build_completion_wait(&cmd, (u64)&sem);
663 670
664 ret = iommu_queue_command(iommu, &cmd); 671 ret = iommu_queue_command_sync(iommu, &cmd, false);
665 if (ret) 672 if (ret)
666 return ret; 673 return ret;
667 674
@@ -840,14 +847,9 @@ static void domain_flush_complete(struct protection_domain *domain)
840static void domain_flush_devices(struct protection_domain *domain) 847static void domain_flush_devices(struct protection_domain *domain)
841{ 848{
842 struct iommu_dev_data *dev_data; 849 struct iommu_dev_data *dev_data;
843 unsigned long flags;
844
845 spin_lock_irqsave(&domain->lock, flags);
846 850
847 list_for_each_entry(dev_data, &domain->dev_list, list) 851 list_for_each_entry(dev_data, &domain->dev_list, list)
848 device_flush_dte(dev_data); 852 device_flush_dte(dev_data);
849
850 spin_unlock_irqrestore(&domain->lock, flags);
851} 853}
852 854
853/**************************************************************************** 855/****************************************************************************
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 3dc9befa5aec..6dcc7e2d54de 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -1388,7 +1388,7 @@ int dmar_set_interrupt(struct intel_iommu *iommu)
1388 return ret; 1388 return ret;
1389 } 1389 }
1390 1390
1391 ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu); 1391 ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
1392 if (ret) 1392 if (ret)
1393 printk(KERN_ERR "IOMMU: can't request irq\n"); 1393 printk(KERN_ERR "IOMMU: can't request irq\n");
1394 return ret; 1394 return ret;
diff --git a/drivers/leds/leds-ams-delta.c b/drivers/leds/leds-ams-delta.c
index b9826032450b..8c00937bf7e7 100644
--- a/drivers/leds/leds-ams-delta.c
+++ b/drivers/leds/leds-ams-delta.c
@@ -8,6 +8,7 @@
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10 10
11#include <linux/module.h>
11#include <linux/kernel.h> 12#include <linux/kernel.h>
12#include <linux/init.h> 13#include <linux/init.h>
13#include <linux/platform_device.h> 14#include <linux/platform_device.h>
diff --git a/drivers/leds/leds-bd2802.c b/drivers/leds/leds-bd2802.c
index 3ebe3824662d..ea2185531f82 100644
--- a/drivers/leds/leds-bd2802.c
+++ b/drivers/leds/leds-bd2802.c
@@ -662,6 +662,11 @@ failed_unregister_led1_R:
662static void bd2802_unregister_led_classdev(struct bd2802_led *led) 662static void bd2802_unregister_led_classdev(struct bd2802_led *led)
663{ 663{
664 cancel_work_sync(&led->work); 664 cancel_work_sync(&led->work);
665 led_classdev_unregister(&led->cdev_led2b);
666 led_classdev_unregister(&led->cdev_led2g);
667 led_classdev_unregister(&led->cdev_led2r);
668 led_classdev_unregister(&led->cdev_led1b);
669 led_classdev_unregister(&led->cdev_led1g);
665 led_classdev_unregister(&led->cdev_led1r); 670 led_classdev_unregister(&led->cdev_led1r);
666} 671}
667 672
diff --git a/drivers/leds/leds-hp6xx.c b/drivers/leds/leds-hp6xx.c
index e4ce1fd46338..bcfbd3a60eab 100644
--- a/drivers/leds/leds-hp6xx.c
+++ b/drivers/leds/leds-hp6xx.c
@@ -10,6 +10,7 @@
10 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
11 */ 11 */
12 12
13#include <linux/module.h>
13#include <linux/kernel.h> 14#include <linux/kernel.h>
14#include <linux/init.h> 15#include <linux/init.h>
15#include <linux/platform_device.h> 16#include <linux/platform_device.h>
diff --git a/drivers/leds/ledtrig-timer.c b/drivers/leds/ledtrig-timer.c
index d87c9d02f786..328c64c0841c 100644
--- a/drivers/leds/ledtrig-timer.c
+++ b/drivers/leds/ledtrig-timer.c
@@ -41,6 +41,7 @@ static ssize_t led_delay_on_store(struct device *dev,
41 41
42 if (count == size) { 42 if (count == size) {
43 led_blink_set(led_cdev, &state, &led_cdev->blink_delay_off); 43 led_blink_set(led_cdev, &state, &led_cdev->blink_delay_off);
44 led_cdev->blink_delay_on = state;
44 ret = count; 45 ret = count;
45 } 46 }
46 47
@@ -69,6 +70,7 @@ static ssize_t led_delay_off_store(struct device *dev,
69 70
70 if (count == size) { 71 if (count == size) {
71 led_blink_set(led_cdev, &led_cdev->blink_delay_on, &state); 72 led_blink_set(led_cdev, &led_cdev->blink_delay_on, &state);
73 led_cdev->blink_delay_off = state;
72 ret = count; 74 ret = count;
73 } 75 }
74 76
diff --git a/drivers/md/linear.h b/drivers/md/linear.h
index 0ce29b61605a..2f2da05b2ce9 100644
--- a/drivers/md/linear.h
+++ b/drivers/md/linear.h
@@ -10,9 +10,9 @@ typedef struct dev_info dev_info_t;
10 10
11struct linear_private_data 11struct linear_private_data
12{ 12{
13 struct rcu_head rcu;
13 sector_t array_sectors; 14 sector_t array_sectors;
14 dev_info_t disks[0]; 15 dev_info_t disks[0];
15 struct rcu_head rcu;
16}; 16};
17 17
18 18
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 8e221a20f5d9..5404b2295820 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -848,7 +848,7 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
848 bio->bi_end_io = super_written; 848 bio->bi_end_io = super_written;
849 849
850 atomic_inc(&mddev->pending_writes); 850 atomic_inc(&mddev->pending_writes);
851 submit_bio(REQ_WRITE | REQ_SYNC | REQ_FLUSH | REQ_FUA, bio); 851 submit_bio(WRITE_FLUSH_FUA, bio);
852} 852}
853 853
854void md_super_wait(mddev_t *mddev) 854void md_super_wait(mddev_t *mddev)
@@ -1138,8 +1138,11 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
1138 ret = 0; 1138 ret = 0;
1139 } 1139 }
1140 rdev->sectors = rdev->sb_start; 1140 rdev->sectors = rdev->sb_start;
1141 /* Limit to 4TB as metadata cannot record more than that */
1142 if (rdev->sectors >= (2ULL << 32))
1143 rdev->sectors = (2ULL << 32) - 2;
1141 1144
1142 if (rdev->sectors < sb->size * 2 && sb->level > 1) 1145 if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
1143 /* "this cannot possibly happen" ... */ 1146 /* "this cannot possibly happen" ... */
1144 ret = -EINVAL; 1147 ret = -EINVAL;
1145 1148
@@ -1173,7 +1176,7 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1173 mddev->clevel[0] = 0; 1176 mddev->clevel[0] = 0;
1174 mddev->layout = sb->layout; 1177 mddev->layout = sb->layout;
1175 mddev->raid_disks = sb->raid_disks; 1178 mddev->raid_disks = sb->raid_disks;
1176 mddev->dev_sectors = sb->size * 2; 1179 mddev->dev_sectors = ((sector_t)sb->size) * 2;
1177 mddev->events = ev1; 1180 mddev->events = ev1;
1178 mddev->bitmap_info.offset = 0; 1181 mddev->bitmap_info.offset = 0;
1179 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; 1182 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
@@ -1415,6 +1418,11 @@ super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
1415 rdev->sb_start = calc_dev_sboffset(rdev); 1418 rdev->sb_start = calc_dev_sboffset(rdev);
1416 if (!num_sectors || num_sectors > rdev->sb_start) 1419 if (!num_sectors || num_sectors > rdev->sb_start)
1417 num_sectors = rdev->sb_start; 1420 num_sectors = rdev->sb_start;
1421 /* Limit to 4TB as metadata cannot record more than that.
1422 * 4TB == 2^32 KB, or 2*2^32 sectors.
1423 */
1424 if (num_sectors >= (2ULL << 32))
1425 num_sectors = (2ULL << 32) - 2;
1418 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, 1426 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1419 rdev->sb_page); 1427 rdev->sb_page);
1420 md_super_wait(rdev->mddev); 1428 md_super_wait(rdev->mddev);
@@ -1738,6 +1746,11 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1738 sb->level = cpu_to_le32(mddev->level); 1746 sb->level = cpu_to_le32(mddev->level);
1739 sb->layout = cpu_to_le32(mddev->layout); 1747 sb->layout = cpu_to_le32(mddev->layout);
1740 1748
1749 if (test_bit(WriteMostly, &rdev->flags))
1750 sb->devflags |= WriteMostly1;
1751 else
1752 sb->devflags &= ~WriteMostly1;
1753
1741 if (mddev->bitmap && mddev->bitmap_info.file == NULL) { 1754 if (mddev->bitmap && mddev->bitmap_info.file == NULL) {
1742 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset); 1755 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset);
1743 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET); 1756 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
@@ -2561,7 +2574,10 @@ state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2561 int err = -EINVAL; 2574 int err = -EINVAL;
2562 if (cmd_match(buf, "faulty") && rdev->mddev->pers) { 2575 if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
2563 md_error(rdev->mddev, rdev); 2576 md_error(rdev->mddev, rdev);
2564 err = 0; 2577 if (test_bit(Faulty, &rdev->flags))
2578 err = 0;
2579 else
2580 err = -EBUSY;
2565 } else if (cmd_match(buf, "remove")) { 2581 } else if (cmd_match(buf, "remove")) {
2566 if (rdev->raid_disk >= 0) 2582 if (rdev->raid_disk >= 0)
2567 err = -EBUSY; 2583 err = -EBUSY;
@@ -2584,7 +2600,7 @@ state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2584 err = 0; 2600 err = 0;
2585 } else if (cmd_match(buf, "-blocked")) { 2601 } else if (cmd_match(buf, "-blocked")) {
2586 if (!test_bit(Faulty, &rdev->flags) && 2602 if (!test_bit(Faulty, &rdev->flags) &&
2587 test_bit(BlockedBadBlocks, &rdev->flags)) { 2603 rdev->badblocks.unacked_exist) {
2588 /* metadata handler doesn't understand badblocks, 2604 /* metadata handler doesn't understand badblocks,
2589 * so we need to fail the device 2605 * so we need to fail the device
2590 */ 2606 */
@@ -5983,6 +5999,8 @@ static int set_disk_faulty(mddev_t *mddev, dev_t dev)
5983 return -ENODEV; 5999 return -ENODEV;
5984 6000
5985 md_error(mddev, rdev); 6001 md_error(mddev, rdev);
6002 if (!test_bit(Faulty, &rdev->flags))
6003 return -EBUSY;
5986 return 0; 6004 return 0;
5987} 6005}
5988 6006
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 32323f0afd89..f4622dd8fc59 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1099,12 +1099,11 @@ read_again:
1099 bio_list_add(&conf->pending_bio_list, mbio); 1099 bio_list_add(&conf->pending_bio_list, mbio);
1100 spin_unlock_irqrestore(&conf->device_lock, flags); 1100 spin_unlock_irqrestore(&conf->device_lock, flags);
1101 } 1101 }
1102 r1_bio_write_done(r1_bio); 1102 /* Mustn't call r1_bio_write_done before this next test,
1103 1103 * as it could result in the bio being freed.
1104 /* In case raid1d snuck in to freeze_array */ 1104 */
1105 wake_up(&conf->wait_barrier);
1106
1107 if (sectors_handled < (bio->bi_size >> 9)) { 1105 if (sectors_handled < (bio->bi_size >> 9)) {
1106 r1_bio_write_done(r1_bio);
1108 /* We need another r1_bio. It has already been counted 1107 /* We need another r1_bio. It has already been counted
1109 * in bio->bi_phys_segments 1108 * in bio->bi_phys_segments
1110 */ 1109 */
@@ -1117,6 +1116,11 @@ read_again:
1117 goto retry_write; 1116 goto retry_write;
1118 } 1117 }
1119 1118
1119 r1_bio_write_done(r1_bio);
1120
1121 /* In case raid1d snuck in to freeze_array */
1122 wake_up(&conf->wait_barrier);
1123
1120 if (do_sync || !bitmap || !plugged) 1124 if (do_sync || !bitmap || !plugged)
1121 md_wakeup_thread(mddev->thread); 1125 md_wakeup_thread(mddev->thread);
1122 1126
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 8b29cd4f01c8..d7a8468ddeab 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -337,6 +337,21 @@ static void close_write(r10bio_t *r10_bio)
337 md_write_end(r10_bio->mddev); 337 md_write_end(r10_bio->mddev);
338} 338}
339 339
340static void one_write_done(r10bio_t *r10_bio)
341{
342 if (atomic_dec_and_test(&r10_bio->remaining)) {
343 if (test_bit(R10BIO_WriteError, &r10_bio->state))
344 reschedule_retry(r10_bio);
345 else {
346 close_write(r10_bio);
347 if (test_bit(R10BIO_MadeGood, &r10_bio->state))
348 reschedule_retry(r10_bio);
349 else
350 raid_end_bio_io(r10_bio);
351 }
352 }
353}
354
340static void raid10_end_write_request(struct bio *bio, int error) 355static void raid10_end_write_request(struct bio *bio, int error)
341{ 356{
342 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 357 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
@@ -387,17 +402,7 @@ static void raid10_end_write_request(struct bio *bio, int error)
387 * Let's see if all mirrored write operations have finished 402 * Let's see if all mirrored write operations have finished
388 * already. 403 * already.
389 */ 404 */
390 if (atomic_dec_and_test(&r10_bio->remaining)) { 405 one_write_done(r10_bio);
391 if (test_bit(R10BIO_WriteError, &r10_bio->state))
392 reschedule_retry(r10_bio);
393 else {
394 close_write(r10_bio);
395 if (test_bit(R10BIO_MadeGood, &r10_bio->state))
396 reschedule_retry(r10_bio);
397 else
398 raid_end_bio_io(r10_bio);
399 }
400 }
401 if (dec_rdev) 406 if (dec_rdev)
402 rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev); 407 rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
403} 408}
@@ -1127,20 +1132,12 @@ retry_write:
1127 spin_unlock_irqrestore(&conf->device_lock, flags); 1132 spin_unlock_irqrestore(&conf->device_lock, flags);
1128 } 1133 }
1129 1134
1130 if (atomic_dec_and_test(&r10_bio->remaining)) { 1135 /* Don't remove the bias on 'remaining' (one_write_done) until
1131 /* This matches the end of raid10_end_write_request() */ 1136 * after checking if we need to go around again.
1132 bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector, 1137 */
1133 r10_bio->sectors,
1134 !test_bit(R10BIO_Degraded, &r10_bio->state),
1135 0);
1136 md_write_end(mddev);
1137 raid_end_bio_io(r10_bio);
1138 }
1139
1140 /* In case raid10d snuck in to freeze_array */
1141 wake_up(&conf->wait_barrier);
1142 1138
1143 if (sectors_handled < (bio->bi_size >> 9)) { 1139 if (sectors_handled < (bio->bi_size >> 9)) {
1140 one_write_done(r10_bio);
1144 /* We need another r10_bio. It has already been counted 1141 /* We need another r10_bio. It has already been counted
1145 * in bio->bi_phys_segments. 1142 * in bio->bi_phys_segments.
1146 */ 1143 */
@@ -1154,6 +1151,10 @@ retry_write:
1154 r10_bio->state = 0; 1151 r10_bio->state = 0;
1155 goto retry_write; 1152 goto retry_write;
1156 } 1153 }
1154 one_write_done(r10_bio);
1155
1156 /* In case raid10d snuck in to freeze_array */
1157 wake_up(&conf->wait_barrier);
1157 1158
1158 if (do_sync || !mddev->bitmap || !plugged) 1159 if (do_sync || !mddev->bitmap || !plugged)
1159 md_wakeup_thread(mddev->thread); 1160 md_wakeup_thread(mddev->thread);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index dbae459fb02d..43709fa6b6df 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3336,7 +3336,7 @@ static void handle_stripe(struct stripe_head *sh)
3336 3336
3337finish: 3337finish:
3338 /* wait for this device to become unblocked */ 3338 /* wait for this device to become unblocked */
3339 if (unlikely(s.blocked_rdev)) 3339 if (conf->mddev->external && unlikely(s.blocked_rdev))
3340 md_wait_for_blocked_rdev(s.blocked_rdev, conf->mddev); 3340 md_wait_for_blocked_rdev(s.blocked_rdev, conf->mddev);
3341 3341
3342 if (s.handle_bad_blocks) 3342 if (s.handle_bad_blocks)
diff --git a/drivers/media/dvb/dvb-usb/vp7045.c b/drivers/media/dvb/dvb-usb/vp7045.c
index 3db89e3cb0bb..536c16c943bd 100644
--- a/drivers/media/dvb/dvb-usb/vp7045.c
+++ b/drivers/media/dvb/dvb-usb/vp7045.c
@@ -224,26 +224,8 @@ static struct dvb_usb_device_properties vp7045_properties;
224static int vp7045_usb_probe(struct usb_interface *intf, 224static int vp7045_usb_probe(struct usb_interface *intf,
225 const struct usb_device_id *id) 225 const struct usb_device_id *id)
226{ 226{
227 struct dvb_usb_device *d; 227 return dvb_usb_device_init(intf, &vp7045_properties,
228 int ret = dvb_usb_device_init(intf, &vp7045_properties, 228 THIS_MODULE, NULL, adapter_nr);
229 THIS_MODULE, &d, adapter_nr);
230 if (ret)
231 return ret;
232
233 d->priv = kmalloc(20, GFP_KERNEL);
234 if (!d->priv) {
235 dvb_usb_device_exit(intf);
236 return -ENOMEM;
237 }
238
239 return ret;
240}
241
242static void vp7045_usb_disconnect(struct usb_interface *intf)
243{
244 struct dvb_usb_device *d = usb_get_intfdata(intf);
245 kfree(d->priv);
246 dvb_usb_device_exit(intf);
247} 229}
248 230
249static struct usb_device_id vp7045_usb_table [] = { 231static struct usb_device_id vp7045_usb_table [] = {
@@ -258,7 +240,7 @@ MODULE_DEVICE_TABLE(usb, vp7045_usb_table);
258static struct dvb_usb_device_properties vp7045_properties = { 240static struct dvb_usb_device_properties vp7045_properties = {
259 .usb_ctrl = CYPRESS_FX2, 241 .usb_ctrl = CYPRESS_FX2,
260 .firmware = "dvb-usb-vp7045-01.fw", 242 .firmware = "dvb-usb-vp7045-01.fw",
261 .size_of_priv = sizeof(u8 *), 243 .size_of_priv = 20,
262 244
263 .num_adapters = 1, 245 .num_adapters = 1,
264 .adapter = { 246 .adapter = {
@@ -305,7 +287,7 @@ static struct dvb_usb_device_properties vp7045_properties = {
305static struct usb_driver vp7045_usb_driver = { 287static struct usb_driver vp7045_usb_driver = {
306 .name = "dvb_usb_vp7045", 288 .name = "dvb_usb_vp7045",
307 .probe = vp7045_usb_probe, 289 .probe = vp7045_usb_probe,
308 .disconnect = vp7045_usb_disconnect, 290 .disconnect = dvb_usb_device_exit,
309 .id_table = vp7045_usb_table, 291 .id_table = vp7045_usb_table,
310}; 292};
311 293
diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c
index eae05b500476..144f3f55d765 100644
--- a/drivers/media/rc/nuvoton-cir.c
+++ b/drivers/media/rc/nuvoton-cir.c
@@ -618,7 +618,6 @@ static void nvt_dump_rx_buf(struct nvt_dev *nvt)
618static void nvt_process_rx_ir_data(struct nvt_dev *nvt) 618static void nvt_process_rx_ir_data(struct nvt_dev *nvt)
619{ 619{
620 DEFINE_IR_RAW_EVENT(rawir); 620 DEFINE_IR_RAW_EVENT(rawir);
621 unsigned int count;
622 u32 carrier; 621 u32 carrier;
623 u8 sample; 622 u8 sample;
624 int i; 623 int i;
@@ -631,65 +630,38 @@ static void nvt_process_rx_ir_data(struct nvt_dev *nvt)
631 if (nvt->carrier_detect_enabled) 630 if (nvt->carrier_detect_enabled)
632 carrier = nvt_rx_carrier_detect(nvt); 631 carrier = nvt_rx_carrier_detect(nvt);
633 632
634 count = nvt->pkts; 633 nvt_dbg_verbose("Processing buffer of len %d", nvt->pkts);
635 nvt_dbg_verbose("Processing buffer of len %d", count);
636 634
637 init_ir_raw_event(&rawir); 635 init_ir_raw_event(&rawir);
638 636
639 for (i = 0; i < count; i++) { 637 for (i = 0; i < nvt->pkts; i++) {
640 nvt->pkts--;
641 sample = nvt->buf[i]; 638 sample = nvt->buf[i];
642 639
643 rawir.pulse = ((sample & BUF_PULSE_BIT) != 0); 640 rawir.pulse = ((sample & BUF_PULSE_BIT) != 0);
644 rawir.duration = US_TO_NS((sample & BUF_LEN_MASK) 641 rawir.duration = US_TO_NS((sample & BUF_LEN_MASK)
645 * SAMPLE_PERIOD); 642 * SAMPLE_PERIOD);
646 643
647 if ((sample & BUF_LEN_MASK) == BUF_LEN_MASK) { 644 nvt_dbg("Storing %s with duration %d",
648 if (nvt->rawir.pulse == rawir.pulse) 645 rawir.pulse ? "pulse" : "space", rawir.duration);
649 nvt->rawir.duration += rawir.duration;
650 else {
651 nvt->rawir.duration = rawir.duration;
652 nvt->rawir.pulse = rawir.pulse;
653 }
654 continue;
655 }
656
657 rawir.duration += nvt->rawir.duration;
658 646
659 init_ir_raw_event(&nvt->rawir); 647 ir_raw_event_store_with_filter(nvt->rdev, &rawir);
660 nvt->rawir.duration = 0;
661 nvt->rawir.pulse = rawir.pulse;
662
663 if (sample == BUF_PULSE_BIT)
664 rawir.pulse = false;
665
666 if (rawir.duration) {
667 nvt_dbg("Storing %s with duration %d",
668 rawir.pulse ? "pulse" : "space",
669 rawir.duration);
670
671 ir_raw_event_store_with_filter(nvt->rdev, &rawir);
672 }
673 648
674 /* 649 /*
675 * BUF_PULSE_BIT indicates end of IR data, BUF_REPEAT_BYTE 650 * BUF_PULSE_BIT indicates end of IR data, BUF_REPEAT_BYTE
676 * indicates end of IR signal, but new data incoming. In both 651 * indicates end of IR signal, but new data incoming. In both
677 * cases, it means we're ready to call ir_raw_event_handle 652 * cases, it means we're ready to call ir_raw_event_handle
678 */ 653 */
679 if ((sample == BUF_PULSE_BIT) && nvt->pkts) { 654 if ((sample == BUF_PULSE_BIT) && (i + 1 < nvt->pkts)) {
680 nvt_dbg("Calling ir_raw_event_handle (signal end)\n"); 655 nvt_dbg("Calling ir_raw_event_handle (signal end)\n");
681 ir_raw_event_handle(nvt->rdev); 656 ir_raw_event_handle(nvt->rdev);
682 } 657 }
683 } 658 }
684 659
660 nvt->pkts = 0;
661
685 nvt_dbg("Calling ir_raw_event_handle (buffer empty)\n"); 662 nvt_dbg("Calling ir_raw_event_handle (buffer empty)\n");
686 ir_raw_event_handle(nvt->rdev); 663 ir_raw_event_handle(nvt->rdev);
687 664
688 if (nvt->pkts) {
689 nvt_dbg("Odd, pkts should be 0 now... (its %u)", nvt->pkts);
690 nvt->pkts = 0;
691 }
692
693 nvt_dbg_verbose("%s done", __func__); 665 nvt_dbg_verbose("%s done", __func__);
694} 666}
695 667
@@ -1048,7 +1020,6 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
1048 1020
1049 spin_lock_init(&nvt->nvt_lock); 1021 spin_lock_init(&nvt->nvt_lock);
1050 spin_lock_init(&nvt->tx.lock); 1022 spin_lock_init(&nvt->tx.lock);
1051 init_ir_raw_event(&nvt->rawir);
1052 1023
1053 ret = -EBUSY; 1024 ret = -EBUSY;
1054 /* now claim resources */ 1025 /* now claim resources */
diff --git a/drivers/media/rc/nuvoton-cir.h b/drivers/media/rc/nuvoton-cir.h
index 1241fc89a36c..0d5e0872a2ea 100644
--- a/drivers/media/rc/nuvoton-cir.h
+++ b/drivers/media/rc/nuvoton-cir.h
@@ -67,7 +67,6 @@ static int debug;
67struct nvt_dev { 67struct nvt_dev {
68 struct pnp_dev *pdev; 68 struct pnp_dev *pdev;
69 struct rc_dev *rdev; 69 struct rc_dev *rdev;
70 struct ir_raw_event rawir;
71 70
72 spinlock_t nvt_lock; 71 spinlock_t nvt_lock;
73 72
diff --git a/drivers/media/video/gspca/ov519.c b/drivers/media/video/gspca/ov519.c
index 0800433b2092..18305c89083c 100644
--- a/drivers/media/video/gspca/ov519.c
+++ b/drivers/media/video/gspca/ov519.c
@@ -2858,7 +2858,6 @@ static void ov7xx0_configure(struct sd *sd)
2858 case 0x60: 2858 case 0x60:
2859 PDEBUG(D_PROBE, "Sensor is a OV7660"); 2859 PDEBUG(D_PROBE, "Sensor is a OV7660");
2860 sd->sensor = SEN_OV7660; 2860 sd->sensor = SEN_OV7660;
2861 sd->invert_led = 0;
2862 break; 2861 break;
2863 default: 2862 default:
2864 PDEBUG(D_PROBE, "Unknown sensor: 0x76%x", low); 2863 PDEBUG(D_PROBE, "Unknown sensor: 0x76%x", low);
@@ -3337,7 +3336,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
3337 case BRIDGE_OV519: 3336 case BRIDGE_OV519:
3338 cam->cam_mode = ov519_vga_mode; 3337 cam->cam_mode = ov519_vga_mode;
3339 cam->nmodes = ARRAY_SIZE(ov519_vga_mode); 3338 cam->nmodes = ARRAY_SIZE(ov519_vga_mode);
3340 sd->invert_led = !sd->invert_led;
3341 break; 3339 break;
3342 case BRIDGE_OVFX2: 3340 case BRIDGE_OVFX2:
3343 cam->cam_mode = ov519_vga_mode; 3341 cam->cam_mode = ov519_vga_mode;
@@ -5005,24 +5003,24 @@ static const struct sd_desc sd_desc = {
5005/* -- module initialisation -- */ 5003/* -- module initialisation -- */
5006static const struct usb_device_id device_table[] = { 5004static const struct usb_device_id device_table[] = {
5007 {USB_DEVICE(0x041e, 0x4003), .driver_info = BRIDGE_W9968CF }, 5005 {USB_DEVICE(0x041e, 0x4003), .driver_info = BRIDGE_W9968CF },
5008 {USB_DEVICE(0x041e, 0x4052), .driver_info = BRIDGE_OV519 }, 5006 {USB_DEVICE(0x041e, 0x4052),
5009 {USB_DEVICE(0x041e, 0x405f),
5010 .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED }, 5007 .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED },
5008 {USB_DEVICE(0x041e, 0x405f), .driver_info = BRIDGE_OV519 },
5011 {USB_DEVICE(0x041e, 0x4060), .driver_info = BRIDGE_OV519 }, 5009 {USB_DEVICE(0x041e, 0x4060), .driver_info = BRIDGE_OV519 },
5012 {USB_DEVICE(0x041e, 0x4061), .driver_info = BRIDGE_OV519 }, 5010 {USB_DEVICE(0x041e, 0x4061), .driver_info = BRIDGE_OV519 },
5013 {USB_DEVICE(0x041e, 0x4064), 5011 {USB_DEVICE(0x041e, 0x4064), .driver_info = BRIDGE_OV519 },
5014 .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED },
5015 {USB_DEVICE(0x041e, 0x4067), .driver_info = BRIDGE_OV519 }, 5012 {USB_DEVICE(0x041e, 0x4067), .driver_info = BRIDGE_OV519 },
5016 {USB_DEVICE(0x041e, 0x4068), 5013 {USB_DEVICE(0x041e, 0x4068), .driver_info = BRIDGE_OV519 },
5014 {USB_DEVICE(0x045e, 0x028c),
5017 .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED }, 5015 .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED },
5018 {USB_DEVICE(0x045e, 0x028c), .driver_info = BRIDGE_OV519 },
5019 {USB_DEVICE(0x054c, 0x0154), .driver_info = BRIDGE_OV519 }, 5016 {USB_DEVICE(0x054c, 0x0154), .driver_info = BRIDGE_OV519 },
5020 {USB_DEVICE(0x054c, 0x0155), 5017 {USB_DEVICE(0x054c, 0x0155), .driver_info = BRIDGE_OV519 },
5021 .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED },
5022 {USB_DEVICE(0x05a9, 0x0511), .driver_info = BRIDGE_OV511 }, 5018 {USB_DEVICE(0x05a9, 0x0511), .driver_info = BRIDGE_OV511 },
5023 {USB_DEVICE(0x05a9, 0x0518), .driver_info = BRIDGE_OV518 }, 5019 {USB_DEVICE(0x05a9, 0x0518), .driver_info = BRIDGE_OV518 },
5024 {USB_DEVICE(0x05a9, 0x0519), .driver_info = BRIDGE_OV519 }, 5020 {USB_DEVICE(0x05a9, 0x0519),
5025 {USB_DEVICE(0x05a9, 0x0530), .driver_info = BRIDGE_OV519 }, 5021 .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED },
5022 {USB_DEVICE(0x05a9, 0x0530),
5023 .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED },
5026 {USB_DEVICE(0x05a9, 0x2800), .driver_info = BRIDGE_OVFX2 }, 5024 {USB_DEVICE(0x05a9, 0x2800), .driver_info = BRIDGE_OVFX2 },
5027 {USB_DEVICE(0x05a9, 0x4519), .driver_info = BRIDGE_OV519 }, 5025 {USB_DEVICE(0x05a9, 0x4519), .driver_info = BRIDGE_OV519 },
5028 {USB_DEVICE(0x05a9, 0x8519), .driver_info = BRIDGE_OV519 }, 5026 {USB_DEVICE(0x05a9, 0x8519), .driver_info = BRIDGE_OV519 },
diff --git a/drivers/media/video/gspca/sonixj.c b/drivers/media/video/gspca/sonixj.c
index 81b8a600783b..c477ad11f103 100644
--- a/drivers/media/video/gspca/sonixj.c
+++ b/drivers/media/video/gspca/sonixj.c
@@ -2386,7 +2386,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
2386 reg_w1(gspca_dev, 0x01, 0x22); 2386 reg_w1(gspca_dev, 0x01, 0x22);
2387 msleep(100); 2387 msleep(100);
2388 reg01 = SCL_SEL_OD | S_PDN_INV; 2388 reg01 = SCL_SEL_OD | S_PDN_INV;
2389 reg17 &= MCK_SIZE_MASK; 2389 reg17 &= ~MCK_SIZE_MASK;
2390 reg17 |= 0x04; /* clock / 4 */ 2390 reg17 |= 0x04; /* clock / 4 */
2391 break; 2391 break;
2392 } 2392 }
@@ -2532,6 +2532,10 @@ static int sd_start(struct gspca_dev *gspca_dev)
2532 if (!mode) { /* if 640x480 */ 2532 if (!mode) { /* if 640x480 */
2533 reg17 &= ~MCK_SIZE_MASK; 2533 reg17 &= ~MCK_SIZE_MASK;
2534 reg17 |= 0x04; /* clock / 4 */ 2534 reg17 |= 0x04; /* clock / 4 */
2535 } else {
2536 reg01 &= ~SYS_SEL_48M; /* clk 24Mz */
2537 reg17 &= ~MCK_SIZE_MASK;
2538 reg17 |= 0x02; /* clock / 2 */
2535 } 2539 }
2536 break; 2540 break;
2537 case SENSOR_OV7630: 2541 case SENSOR_OV7630:
diff --git a/drivers/media/video/pwc/pwc-v4l.c b/drivers/media/video/pwc/pwc-v4l.c
index e9a0e94b9995..8c70e64444e7 100644
--- a/drivers/media/video/pwc/pwc-v4l.c
+++ b/drivers/media/video/pwc/pwc-v4l.c
@@ -338,7 +338,7 @@ int pwc_init_controls(struct pwc_device *pdev)
338 if (pdev->restore_factory) 338 if (pdev->restore_factory)
339 pdev->restore_factory->flags = V4L2_CTRL_FLAG_UPDATE; 339 pdev->restore_factory->flags = V4L2_CTRL_FLAG_UPDATE;
340 340
341 if (!pdev->features & FEATURE_MOTOR_PANTILT) 341 if (!(pdev->features & FEATURE_MOTOR_PANTILT))
342 return hdl->error; 342 return hdl->error;
343 343
344 /* Motor pan / tilt / reset */ 344 /* Motor pan / tilt / reset */
diff --git a/drivers/media/video/via-camera.c b/drivers/media/video/via-camera.c
index 85d3048c1d67..bb7f17f2a33c 100644
--- a/drivers/media/video/via-camera.c
+++ b/drivers/media/video/via-camera.c
@@ -1332,6 +1332,8 @@ static __devinit bool viacam_serial_is_enabled(void)
1332 struct pci_bus *pbus = pci_find_bus(0, 0); 1332 struct pci_bus *pbus = pci_find_bus(0, 0);
1333 u8 cbyte; 1333 u8 cbyte;
1334 1334
1335 if (!pbus)
1336 return false;
1335 pci_bus_read_config_byte(pbus, VIACAM_SERIAL_DEVFN, 1337 pci_bus_read_config_byte(pbus, VIACAM_SERIAL_DEVFN,
1336 VIACAM_SERIAL_CREG, &cbyte); 1338 VIACAM_SERIAL_CREG, &cbyte);
1337 if ((cbyte & VIACAM_SERIAL_BIT) == 0) 1339 if ((cbyte & VIACAM_SERIAL_BIT) == 0)
diff --git a/drivers/mfd/max8997.c b/drivers/mfd/max8997.c
index 5d1fca0277ef..f83103b8970d 100644
--- a/drivers/mfd/max8997.c
+++ b/drivers/mfd/max8997.c
@@ -135,10 +135,13 @@ static int max8997_i2c_probe(struct i2c_client *i2c,
135 max8997->dev = &i2c->dev; 135 max8997->dev = &i2c->dev;
136 max8997->i2c = i2c; 136 max8997->i2c = i2c;
137 max8997->type = id->driver_data; 137 max8997->type = id->driver_data;
138 max8997->irq = i2c->irq;
138 139
139 if (!pdata) 140 if (!pdata)
140 goto err; 141 goto err;
141 142
143 max8997->irq_base = pdata->irq_base;
144 max8997->ono = pdata->ono;
142 max8997->wakeup = pdata->wakeup; 145 max8997->wakeup = pdata->wakeup;
143 146
144 mutex_init(&max8997->iolock); 147 mutex_init(&max8997->iolock);
@@ -152,6 +155,8 @@ static int max8997_i2c_probe(struct i2c_client *i2c,
152 155
153 pm_runtime_set_active(max8997->dev); 156 pm_runtime_set_active(max8997->dev);
154 157
158 max8997_irq_init(max8997);
159
155 mfd_add_devices(max8997->dev, -1, max8997_devs, 160 mfd_add_devices(max8997->dev, -1, max8997_devs,
156 ARRAY_SIZE(max8997_devs), 161 ARRAY_SIZE(max8997_devs),
157 NULL, 0); 162 NULL, 0);
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
index 29601e7d606d..86e14583a082 100644
--- a/drivers/mfd/omap-usb-host.c
+++ b/drivers/mfd/omap-usb-host.c
@@ -17,6 +17,7 @@
17 * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */ 18 */
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/module.h>
20#include <linux/types.h> 21#include <linux/types.h>
21#include <linux/slab.h> 22#include <linux/slab.h>
22#include <linux/delay.h> 23#include <linux/delay.h>
@@ -676,7 +677,6 @@ static void usbhs_omap_tll_init(struct device *dev, u8 tll_channel_count)
676 | OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF 677 | OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF
677 | OMAP_TLL_CHANNEL_CONF_ULPIDDRMODE); 678 | OMAP_TLL_CHANNEL_CONF_ULPIDDRMODE);
678 679
679 reg |= (1 << (i + 1));
680 } else 680 } else
681 continue; 681 continue;
682 682
diff --git a/drivers/mfd/tps65910-irq.c b/drivers/mfd/tps65910-irq.c
index 2bfad5c86cc7..a56be931551c 100644
--- a/drivers/mfd/tps65910-irq.c
+++ b/drivers/mfd/tps65910-irq.c
@@ -178,8 +178,10 @@ int tps65910_irq_init(struct tps65910 *tps65910, int irq,
178 switch (tps65910_chip_id(tps65910)) { 178 switch (tps65910_chip_id(tps65910)) {
179 case TPS65910: 179 case TPS65910:
180 tps65910->irq_num = TPS65910_NUM_IRQ; 180 tps65910->irq_num = TPS65910_NUM_IRQ;
181 break;
181 case TPS65911: 182 case TPS65911:
182 tps65910->irq_num = TPS65911_NUM_IRQ; 183 tps65910->irq_num = TPS65911_NUM_IRQ;
184 break;
183 } 185 }
184 186
185 /* Register with genirq */ 187 /* Register with genirq */
diff --git a/drivers/mfd/twl4030-madc.c b/drivers/mfd/twl4030-madc.c
index b5d598c3aa71..7cbf2aa9e64f 100644
--- a/drivers/mfd/twl4030-madc.c
+++ b/drivers/mfd/twl4030-madc.c
@@ -510,8 +510,9 @@ int twl4030_madc_conversion(struct twl4030_madc_request *req)
510 u8 ch_msb, ch_lsb; 510 u8 ch_msb, ch_lsb;
511 int ret; 511 int ret;
512 512
513 if (!req) 513 if (!req || !twl4030_madc)
514 return -EINVAL; 514 return -EINVAL;
515
515 mutex_lock(&twl4030_madc->lock); 516 mutex_lock(&twl4030_madc->lock);
516 if (req->method < TWL4030_MADC_RT || req->method > TWL4030_MADC_SW2) { 517 if (req->method < TWL4030_MADC_RT || req->method > TWL4030_MADC_SW2) {
517 ret = -EINVAL; 518 ret = -EINVAL;
@@ -706,6 +707,8 @@ static int __devinit twl4030_madc_probe(struct platform_device *pdev)
706 if (!madc) 707 if (!madc)
707 return -ENOMEM; 708 return -ENOMEM;
708 709
710 madc->dev = &pdev->dev;
711
709 /* 712 /*
710 * Phoenix provides 2 interrupt lines. The first one is connected to 713 * Phoenix provides 2 interrupt lines. The first one is connected to
711 * the OMAP. The other one can be connected to the other processor such 714 * the OMAP. The other one can be connected to the other processor such
diff --git a/drivers/mfd/wm8350-gpio.c b/drivers/mfd/wm8350-gpio.c
index ebf99bef392f..d584f6b4d6e2 100644
--- a/drivers/mfd/wm8350-gpio.c
+++ b/drivers/mfd/wm8350-gpio.c
@@ -37,7 +37,7 @@ static int gpio_set_dir(struct wm8350 *wm8350, int gpio, int dir)
37 return ret; 37 return ret;
38} 38}
39 39
40static int gpio_set_debounce(struct wm8350 *wm8350, int gpio, int db) 40static int wm8350_gpio_set_debounce(struct wm8350 *wm8350, int gpio, int db)
41{ 41{
42 if (db == WM8350_GPIO_DEBOUNCE_ON) 42 if (db == WM8350_GPIO_DEBOUNCE_ON)
43 return wm8350_set_bits(wm8350, WM8350_GPIO_DEBOUNCE, 43 return wm8350_set_bits(wm8350, WM8350_GPIO_DEBOUNCE,
@@ -210,7 +210,7 @@ int wm8350_gpio_config(struct wm8350 *wm8350, int gpio, int dir, int func,
210 goto err; 210 goto err;
211 if (gpio_set_polarity(wm8350, gpio, pol)) 211 if (gpio_set_polarity(wm8350, gpio, pol))
212 goto err; 212 goto err;
213 if (gpio_set_debounce(wm8350, gpio, debounce)) 213 if (wm8350_gpio_set_debounce(wm8350, gpio, debounce))
214 goto err; 214 goto err;
215 if (gpio_set_dir(wm8350, gpio, dir)) 215 if (gpio_set_dir(wm8350, gpio, dir))
216 goto err; 216 goto err;
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 0a4d86c6c4a4..2d6423c2d193 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -146,6 +146,7 @@ config PHANTOM
146 146
147config INTEL_MID_PTI 147config INTEL_MID_PTI
148 tristate "Parallel Trace Interface for MIPI P1149.7 cJTAG standard" 148 tristate "Parallel Trace Interface for MIPI P1149.7 cJTAG standard"
149 depends on PCI
149 default n 150 default n
150 help 151 help
151 The PTI (Parallel Trace Interface) driver directs 152 The PTI (Parallel Trace Interface) driver directs
diff --git a/drivers/misc/ab8500-pwm.c b/drivers/misc/ab8500-pwm.c
index 54e3d05b63cc..35903154ca2e 100644
--- a/drivers/misc/ab8500-pwm.c
+++ b/drivers/misc/ab8500-pwm.c
@@ -164,5 +164,5 @@ subsys_initcall(ab8500_pwm_init);
164module_exit(ab8500_pwm_exit); 164module_exit(ab8500_pwm_exit);
165MODULE_AUTHOR("Arun MURTHY <arun.murthy@stericsson.com>"); 165MODULE_AUTHOR("Arun MURTHY <arun.murthy@stericsson.com>");
166MODULE_DESCRIPTION("AB8500 Pulse Width Modulation Driver"); 166MODULE_DESCRIPTION("AB8500 Pulse Width Modulation Driver");
167MODULE_ALIAS("AB8500 PWM driver"); 167MODULE_ALIAS("platform:ab8500-pwm");
168MODULE_LICENSE("GPL v2"); 168MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/cb710/core.c b/drivers/misc/cb710/core.c
index efec4139c3f6..68cd05b6d829 100644
--- a/drivers/misc/cb710/core.c
+++ b/drivers/misc/cb710/core.c
@@ -33,7 +33,7 @@ EXPORT_SYMBOL_GPL(cb710_pci_update_config_reg);
33static int __devinit cb710_pci_configure(struct pci_dev *pdev) 33static int __devinit cb710_pci_configure(struct pci_dev *pdev)
34{ 34{
35 unsigned int devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0); 35 unsigned int devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
36 struct pci_dev *pdev0 = pci_get_slot(pdev->bus, devfn); 36 struct pci_dev *pdev0;
37 u32 val; 37 u32 val;
38 38
39 cb710_pci_update_config_reg(pdev, 0x48, 39 cb710_pci_update_config_reg(pdev, 0x48,
@@ -43,6 +43,7 @@ static int __devinit cb710_pci_configure(struct pci_dev *pdev)
43 if (val & 0x80000000) 43 if (val & 0x80000000)
44 return 0; 44 return 0;
45 45
46 pdev0 = pci_get_slot(pdev->bus, devfn);
46 if (!pdev0) 47 if (!pdev0)
47 return -ENODEV; 48 return -ENODEV;
48 49
diff --git a/drivers/misc/fsa9480.c b/drivers/misc/fsa9480.c
index 5325a7e70dcf..27dc0d21aafa 100644
--- a/drivers/misc/fsa9480.c
+++ b/drivers/misc/fsa9480.c
@@ -455,7 +455,7 @@ static int __devinit fsa9480_probe(struct i2c_client *client,
455 455
456fail2: 456fail2:
457 if (client->irq) 457 if (client->irq)
458 free_irq(client->irq, NULL); 458 free_irq(client->irq, usbsw);
459fail1: 459fail1:
460 i2c_set_clientdata(client, NULL); 460 i2c_set_clientdata(client, NULL);
461 kfree(usbsw); 461 kfree(usbsw);
@@ -466,7 +466,7 @@ static int __devexit fsa9480_remove(struct i2c_client *client)
466{ 466{
467 struct fsa9480_usbsw *usbsw = i2c_get_clientdata(client); 467 struct fsa9480_usbsw *usbsw = i2c_get_clientdata(client);
468 if (client->irq) 468 if (client->irq)
469 free_irq(client->irq, NULL); 469 free_irq(client->irq, usbsw);
470 i2c_set_clientdata(client, NULL); 470 i2c_set_clientdata(client, NULL);
471 471
472 sysfs_remove_group(&client->dev.kobj, &fsa9480_group); 472 sysfs_remove_group(&client->dev.kobj, &fsa9480_group);
diff --git a/drivers/misc/pti.c b/drivers/misc/pti.c
index 8653bd0b1a33..0b56e3f43573 100644
--- a/drivers/misc/pti.c
+++ b/drivers/misc/pti.c
@@ -33,6 +33,8 @@
33#include <linux/mutex.h> 33#include <linux/mutex.h>
34#include <linux/miscdevice.h> 34#include <linux/miscdevice.h>
35#include <linux/pti.h> 35#include <linux/pti.h>
36#include <linux/slab.h>
37#include <linux/uaccess.h>
36 38
37#define DRIVERNAME "pti" 39#define DRIVERNAME "pti"
38#define PCINAME "pciPTI" 40#define PCINAME "pciPTI"
@@ -163,6 +165,11 @@ static void pti_write_to_aperture(struct pti_masterchannel *mc,
163static void pti_control_frame_built_and_sent(struct pti_masterchannel *mc, 165static void pti_control_frame_built_and_sent(struct pti_masterchannel *mc,
164 const char *thread_name) 166 const char *thread_name)
165{ 167{
168 /*
169 * Since we access the comm member in current's task_struct, we only
170 * need to be as large as what 'comm' in that structure is.
171 */
172 char comm[TASK_COMM_LEN];
166 struct pti_masterchannel mccontrol = {.master = CONTROL_ID, 173 struct pti_masterchannel mccontrol = {.master = CONTROL_ID,
167 .channel = 0}; 174 .channel = 0};
168 const char *thread_name_p; 175 const char *thread_name_p;
@@ -170,13 +177,6 @@ static void pti_control_frame_built_and_sent(struct pti_masterchannel *mc,
170 u8 control_frame[CONTROL_FRAME_LEN]; 177 u8 control_frame[CONTROL_FRAME_LEN];
171 178
172 if (!thread_name) { 179 if (!thread_name) {
173 /*
174 * Since we access the comm member in current's task_struct,
175 * we only need to be as large as what 'comm' in that
176 * structure is.
177 */
178 char comm[TASK_COMM_LEN];
179
180 if (!in_interrupt()) 180 if (!in_interrupt())
181 get_task_comm(comm, current); 181 get_task_comm(comm, current);
182 else 182 else
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
index 54c91ffe4a91..ba168a7d54d4 100644
--- a/drivers/misc/ti-st/st_core.c
+++ b/drivers/misc/ti-st/st_core.c
@@ -338,6 +338,12 @@ void st_int_recv(void *disc_data,
338 /* Unknow packet? */ 338 /* Unknow packet? */
339 default: 339 default:
340 type = *ptr; 340 type = *ptr;
341 if (st_gdata->list[type] == NULL) {
342 pr_err("chip/interface misbehavior dropping"
343 " frame starting with 0x%02x", type);
344 goto done;
345
346 }
341 st_gdata->rx_skb = alloc_skb( 347 st_gdata->rx_skb = alloc_skb(
342 st_gdata->list[type]->max_frame_size, 348 st_gdata->list[type]->max_frame_size,
343 GFP_ATOMIC); 349 GFP_ATOMIC);
@@ -354,6 +360,7 @@ void st_int_recv(void *disc_data,
354 ptr++; 360 ptr++;
355 count--; 361 count--;
356 } 362 }
363done:
357 spin_unlock_irqrestore(&st_gdata->lock, flags); 364 spin_unlock_irqrestore(&st_gdata->lock, flags);
358 pr_debug("done %s", __func__); 365 pr_debug("done %s", __func__);
359 return; 366 return;
@@ -717,9 +724,10 @@ static void st_tty_close(struct tty_struct *tty)
717 */ 724 */
718 spin_lock_irqsave(&st_gdata->lock, flags); 725 spin_lock_irqsave(&st_gdata->lock, flags);
719 for (i = ST_BT; i < ST_MAX_CHANNELS; i++) { 726 for (i = ST_BT; i < ST_MAX_CHANNELS; i++) {
720 if (st_gdata->list[i] != NULL) 727 if (st_gdata->is_registered[i] == true)
721 pr_err("%d not un-registered", i); 728 pr_err("%d not un-registered", i);
722 st_gdata->list[i] = NULL; 729 st_gdata->list[i] = NULL;
730 st_gdata->is_registered[i] = false;
723 } 731 }
724 st_gdata->protos_registered = 0; 732 st_gdata->protos_registered = 0;
725 spin_unlock_irqrestore(&st_gdata->lock, flags); 733 spin_unlock_irqrestore(&st_gdata->lock, flags);
diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c
index 38fd2f04c07e..3a3580566dfc 100644
--- a/drivers/misc/ti-st/st_kim.c
+++ b/drivers/misc/ti-st/st_kim.c
@@ -68,6 +68,7 @@ void validate_firmware_response(struct kim_data_s *kim_gdata)
68 if (unlikely(skb->data[5] != 0)) { 68 if (unlikely(skb->data[5] != 0)) {
69 pr_err("no proper response during fw download"); 69 pr_err("no proper response during fw download");
70 pr_err("data6 %x", skb->data[5]); 70 pr_err("data6 %x", skb->data[5]);
71 kfree_skb(skb);
71 return; /* keep waiting for the proper response */ 72 return; /* keep waiting for the proper response */
72 } 73 }
73 /* becos of all the script being downloaded */ 74 /* becos of all the script being downloaded */
@@ -210,6 +211,7 @@ static long read_local_version(struct kim_data_s *kim_gdata, char *bts_scr_name)
210 pr_err(" waiting for ver info- timed out "); 211 pr_err(" waiting for ver info- timed out ");
211 return -ETIMEDOUT; 212 return -ETIMEDOUT;
212 } 213 }
214 INIT_COMPLETION(kim_gdata->kim_rcvd);
213 215
214 version = 216 version =
215 MAKEWORD(kim_gdata->resp_buffer[13], 217 MAKEWORD(kim_gdata->resp_buffer[13],
@@ -298,6 +300,7 @@ static long download_firmware(struct kim_data_s *kim_gdata)
298 300
299 switch (((struct bts_action *)ptr)->type) { 301 switch (((struct bts_action *)ptr)->type) {
300 case ACTION_SEND_COMMAND: /* action send */ 302 case ACTION_SEND_COMMAND: /* action send */
303 pr_debug("S");
301 action_ptr = &(((struct bts_action *)ptr)->data[0]); 304 action_ptr = &(((struct bts_action *)ptr)->data[0]);
302 if (unlikely 305 if (unlikely
303 (((struct hci_command *)action_ptr)->opcode == 306 (((struct hci_command *)action_ptr)->opcode ==
@@ -335,6 +338,10 @@ static long download_firmware(struct kim_data_s *kim_gdata)
335 release_firmware(kim_gdata->fw_entry); 338 release_firmware(kim_gdata->fw_entry);
336 return -ETIMEDOUT; 339 return -ETIMEDOUT;
337 } 340 }
341 /* reinit completion before sending for the
342 * relevant wait
343 */
344 INIT_COMPLETION(kim_gdata->kim_rcvd);
338 345
339 /* 346 /*
340 * Free space found in uart buffer, call st_int_write 347 * Free space found in uart buffer, call st_int_write
@@ -361,6 +368,7 @@ static long download_firmware(struct kim_data_s *kim_gdata)
361 } 368 }
362 break; 369 break;
363 case ACTION_WAIT_EVENT: /* wait */ 370 case ACTION_WAIT_EVENT: /* wait */
371 pr_debug("W");
364 if (!wait_for_completion_timeout 372 if (!wait_for_completion_timeout
365 (&kim_gdata->kim_rcvd, 373 (&kim_gdata->kim_rcvd,
366 msecs_to_jiffies(CMD_RESP_TIME))) { 374 msecs_to_jiffies(CMD_RESP_TIME))) {
@@ -434,11 +442,17 @@ long st_kim_start(void *kim_data)
434{ 442{
435 long err = 0; 443 long err = 0;
436 long retry = POR_RETRY_COUNT; 444 long retry = POR_RETRY_COUNT;
445 struct ti_st_plat_data *pdata;
437 struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data; 446 struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data;
438 447
439 pr_info(" %s", __func__); 448 pr_info(" %s", __func__);
449 pdata = kim_gdata->kim_pdev->dev.platform_data;
440 450
441 do { 451 do {
452 /* platform specific enabling code here */
453 if (pdata->chip_enable)
454 pdata->chip_enable(kim_gdata);
455
442 /* Configure BT nShutdown to HIGH state */ 456 /* Configure BT nShutdown to HIGH state */
443 gpio_set_value(kim_gdata->nshutdown, GPIO_LOW); 457 gpio_set_value(kim_gdata->nshutdown, GPIO_LOW);
444 mdelay(5); /* FIXME: a proper toggle */ 458 mdelay(5); /* FIXME: a proper toggle */
@@ -460,6 +474,12 @@ long st_kim_start(void *kim_data)
460 pr_info("ldisc_install = 0"); 474 pr_info("ldisc_install = 0");
461 sysfs_notify(&kim_gdata->kim_pdev->dev.kobj, 475 sysfs_notify(&kim_gdata->kim_pdev->dev.kobj,
462 NULL, "install"); 476 NULL, "install");
477 /* the following wait is never going to be completed,
478 * since the ldisc was never installed, hence serving
479 * as a mdelay of LDISC_TIME msecs */
480 err = wait_for_completion_timeout
481 (&kim_gdata->ldisc_installed,
482 msecs_to_jiffies(LDISC_TIME));
463 err = -ETIMEDOUT; 483 err = -ETIMEDOUT;
464 continue; 484 continue;
465 } else { 485 } else {
@@ -472,6 +492,13 @@ long st_kim_start(void *kim_data)
472 pr_info("ldisc_install = 0"); 492 pr_info("ldisc_install = 0");
473 sysfs_notify(&kim_gdata->kim_pdev->dev.kobj, 493 sysfs_notify(&kim_gdata->kim_pdev->dev.kobj,
474 NULL, "install"); 494 NULL, "install");
495 /* this wait might be completed, though in the
496 * tty_close() since the ldisc is already
497 * installed */
498 err = wait_for_completion_timeout
499 (&kim_gdata->ldisc_installed,
500 msecs_to_jiffies(LDISC_TIME));
501 err = -EINVAL;
475 continue; 502 continue;
476 } else { /* on success don't retry */ 503 } else { /* on success don't retry */
477 break; 504 break;
@@ -489,6 +516,8 @@ long st_kim_stop(void *kim_data)
489{ 516{
490 long err = 0; 517 long err = 0;
491 struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data; 518 struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data;
519 struct ti_st_plat_data *pdata =
520 kim_gdata->kim_pdev->dev.platform_data;
492 521
493 INIT_COMPLETION(kim_gdata->ldisc_installed); 522 INIT_COMPLETION(kim_gdata->ldisc_installed);
494 523
@@ -515,6 +544,10 @@ long st_kim_stop(void *kim_data)
515 gpio_set_value(kim_gdata->nshutdown, GPIO_HIGH); 544 gpio_set_value(kim_gdata->nshutdown, GPIO_HIGH);
516 mdelay(1); 545 mdelay(1);
517 gpio_set_value(kim_gdata->nshutdown, GPIO_LOW); 546 gpio_set_value(kim_gdata->nshutdown, GPIO_LOW);
547
548 /* platform specific disable */
549 if (pdata->chip_disable)
550 pdata->chip_disable(kim_gdata);
518 return err; 551 return err;
519} 552}
520 553
diff --git a/drivers/misc/ti-st/st_ll.c b/drivers/misc/ti-st/st_ll.c
index 3f2495138855..1ff460a8e9c7 100644
--- a/drivers/misc/ti-st/st_ll.c
+++ b/drivers/misc/ti-st/st_ll.c
@@ -22,6 +22,7 @@
22#define pr_fmt(fmt) "(stll) :" fmt 22#define pr_fmt(fmt) "(stll) :" fmt
23#include <linux/skbuff.h> 23#include <linux/skbuff.h>
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/platform_device.h>
25#include <linux/ti_wilink_st.h> 26#include <linux/ti_wilink_st.h>
26 27
27/**********************************************************************/ 28/**********************************************************************/
@@ -37,6 +38,9 @@ static void send_ll_cmd(struct st_data_s *st_data,
37 38
38static void ll_device_want_to_sleep(struct st_data_s *st_data) 39static void ll_device_want_to_sleep(struct st_data_s *st_data)
39{ 40{
41 struct kim_data_s *kim_data;
42 struct ti_st_plat_data *pdata;
43
40 pr_debug("%s", __func__); 44 pr_debug("%s", __func__);
41 /* sanity check */ 45 /* sanity check */
42 if (st_data->ll_state != ST_LL_AWAKE) 46 if (st_data->ll_state != ST_LL_AWAKE)
@@ -46,10 +50,19 @@ static void ll_device_want_to_sleep(struct st_data_s *st_data)
46 send_ll_cmd(st_data, LL_SLEEP_ACK); 50 send_ll_cmd(st_data, LL_SLEEP_ACK);
47 /* update state */ 51 /* update state */
48 st_data->ll_state = ST_LL_ASLEEP; 52 st_data->ll_state = ST_LL_ASLEEP;
53
54 /* communicate to platform about chip asleep */
55 kim_data = st_data->kim_data;
56 pdata = kim_data->kim_pdev->dev.platform_data;
57 if (pdata->chip_asleep)
58 pdata->chip_asleep(NULL);
49} 59}
50 60
51static void ll_device_want_to_wakeup(struct st_data_s *st_data) 61static void ll_device_want_to_wakeup(struct st_data_s *st_data)
52{ 62{
63 struct kim_data_s *kim_data;
64 struct ti_st_plat_data *pdata;
65
53 /* diff actions in diff states */ 66 /* diff actions in diff states */
54 switch (st_data->ll_state) { 67 switch (st_data->ll_state) {
55 case ST_LL_ASLEEP: 68 case ST_LL_ASLEEP:
@@ -70,6 +83,12 @@ static void ll_device_want_to_wakeup(struct st_data_s *st_data)
70 } 83 }
71 /* update state */ 84 /* update state */
72 st_data->ll_state = ST_LL_AWAKE; 85 st_data->ll_state = ST_LL_AWAKE;
86
87 /* communicate to platform about chip wakeup */
88 kim_data = st_data->kim_data;
89 pdata = kim_data->kim_pdev->dev.platform_data;
90 if (pdata->chip_asleep)
91 pdata->chip_awake(NULL);
73} 92}
74 93
75/**********************************************************************/ 94/**********************************************************************/
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 1ff5486213fb..4c1a648d00fc 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -926,6 +926,9 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
926 /* 926 /*
927 * Reliable writes are used to implement Forced Unit Access and 927 * Reliable writes are used to implement Forced Unit Access and
928 * REQ_META accesses, and are supported only on MMCs. 928 * REQ_META accesses, and are supported only on MMCs.
929 *
930 * XXX: this really needs a good explanation of why REQ_META
931 * is treated special.
929 */ 932 */
930 bool do_rel_wr = ((req->cmd_flags & REQ_FUA) || 933 bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
931 (req->cmd_flags & REQ_META)) && 934 (req->cmd_flags & REQ_META)) &&
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index 006a5e9f8ab8..2bf229acd3b8 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -224,7 +224,7 @@ static void mmc_test_prepare_mrq(struct mmc_test_card *test,
224static int mmc_test_busy(struct mmc_command *cmd) 224static int mmc_test_busy(struct mmc_command *cmd)
225{ 225{
226 return !(cmd->resp[0] & R1_READY_FOR_DATA) || 226 return !(cmd->resp[0] & R1_READY_FOR_DATA) ||
227 (R1_CURRENT_STATE(cmd->resp[0]) == 7); 227 (R1_CURRENT_STATE(cmd->resp[0]) == R1_STATE_PRG);
228} 228}
229 229
230/* 230/*
@@ -2900,7 +2900,7 @@ static const struct file_operations mmc_test_fops_testlist = {
2900 .release = single_release, 2900 .release = single_release,
2901}; 2901};
2902 2902
2903static void mmc_test_free_file_test(struct mmc_card *card) 2903static void mmc_test_free_dbgfs_file(struct mmc_card *card)
2904{ 2904{
2905 struct mmc_test_dbgfs_file *df, *dfs; 2905 struct mmc_test_dbgfs_file *df, *dfs;
2906 2906
@@ -2917,34 +2917,21 @@ static void mmc_test_free_file_test(struct mmc_card *card)
2917 mutex_unlock(&mmc_test_lock); 2917 mutex_unlock(&mmc_test_lock);
2918} 2918}
2919 2919
2920static int mmc_test_register_file_test(struct mmc_card *card) 2920static int __mmc_test_register_dbgfs_file(struct mmc_card *card,
2921 const char *name, mode_t mode, const struct file_operations *fops)
2921{ 2922{
2922 struct dentry *file = NULL; 2923 struct dentry *file = NULL;
2923 struct mmc_test_dbgfs_file *df; 2924 struct mmc_test_dbgfs_file *df;
2924 int ret = 0;
2925
2926 mutex_lock(&mmc_test_lock);
2927
2928 if (card->debugfs_root)
2929 file = debugfs_create_file("test", S_IWUSR | S_IRUGO,
2930 card->debugfs_root, card, &mmc_test_fops_test);
2931
2932 if (IS_ERR_OR_NULL(file)) {
2933 dev_err(&card->dev,
2934 "Can't create test. Perhaps debugfs is disabled.\n");
2935 ret = -ENODEV;
2936 goto err;
2937 }
2938 2925
2939 if (card->debugfs_root) 2926 if (card->debugfs_root)
2940 file = debugfs_create_file("testlist", S_IRUGO, 2927 file = debugfs_create_file(name, mode, card->debugfs_root,
2941 card->debugfs_root, card, &mmc_test_fops_testlist); 2928 card, fops);
2942 2929
2943 if (IS_ERR_OR_NULL(file)) { 2930 if (IS_ERR_OR_NULL(file)) {
2944 dev_err(&card->dev, 2931 dev_err(&card->dev,
2945 "Can't create testlist. Perhaps debugfs is disabled.\n"); 2932 "Can't create %s. Perhaps debugfs is disabled.\n",
2946 ret = -ENODEV; 2933 name);
2947 goto err; 2934 return -ENODEV;
2948 } 2935 }
2949 2936
2950 df = kmalloc(sizeof(struct mmc_test_dbgfs_file), GFP_KERNEL); 2937 df = kmalloc(sizeof(struct mmc_test_dbgfs_file), GFP_KERNEL);
@@ -2952,14 +2939,31 @@ static int mmc_test_register_file_test(struct mmc_card *card)
2952 debugfs_remove(file); 2939 debugfs_remove(file);
2953 dev_err(&card->dev, 2940 dev_err(&card->dev,
2954 "Can't allocate memory for internal usage.\n"); 2941 "Can't allocate memory for internal usage.\n");
2955 ret = -ENOMEM; 2942 return -ENOMEM;
2956 goto err;
2957 } 2943 }
2958 2944
2959 df->card = card; 2945 df->card = card;
2960 df->file = file; 2946 df->file = file;
2961 2947
2962 list_add(&df->link, &mmc_test_file_test); 2948 list_add(&df->link, &mmc_test_file_test);
2949 return 0;
2950}
2951
2952static int mmc_test_register_dbgfs_file(struct mmc_card *card)
2953{
2954 int ret;
2955
2956 mutex_lock(&mmc_test_lock);
2957
2958 ret = __mmc_test_register_dbgfs_file(card, "test", S_IWUSR | S_IRUGO,
2959 &mmc_test_fops_test);
2960 if (ret)
2961 goto err;
2962
2963 ret = __mmc_test_register_dbgfs_file(card, "testlist", S_IRUGO,
2964 &mmc_test_fops_testlist);
2965 if (ret)
2966 goto err;
2963 2967
2964err: 2968err:
2965 mutex_unlock(&mmc_test_lock); 2969 mutex_unlock(&mmc_test_lock);
@@ -2974,7 +2978,7 @@ static int mmc_test_probe(struct mmc_card *card)
2974 if (!mmc_card_mmc(card) && !mmc_card_sd(card)) 2978 if (!mmc_card_mmc(card) && !mmc_card_sd(card))
2975 return -ENODEV; 2979 return -ENODEV;
2976 2980
2977 ret = mmc_test_register_file_test(card); 2981 ret = mmc_test_register_dbgfs_file(card);
2978 if (ret) 2982 if (ret)
2979 return ret; 2983 return ret;
2980 2984
@@ -2986,7 +2990,7 @@ static int mmc_test_probe(struct mmc_card *card)
2986static void mmc_test_remove(struct mmc_card *card) 2990static void mmc_test_remove(struct mmc_card *card)
2987{ 2991{
2988 mmc_test_free_result(card); 2992 mmc_test_free_result(card);
2989 mmc_test_free_file_test(card); 2993 mmc_test_free_dbgfs_file(card);
2990} 2994}
2991 2995
2992static struct mmc_driver mmc_driver = { 2996static struct mmc_driver mmc_driver = {
@@ -3006,7 +3010,7 @@ static void __exit mmc_test_exit(void)
3006{ 3010{
3007 /* Clear stalled data if card is still plugged */ 3011 /* Clear stalled data if card is still plugged */
3008 mmc_test_free_result(NULL); 3012 mmc_test_free_result(NULL);
3009 mmc_test_free_file_test(NULL); 3013 mmc_test_free_dbgfs_file(NULL);
3010 3014
3011 mmc_unregister_driver(&mmc_driver); 3015 mmc_unregister_driver(&mmc_driver);
3012} 3016}
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 89bdeaec7182..b27b94078c21 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -133,7 +133,7 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
133 if (mrq->done) 133 if (mrq->done)
134 mrq->done(mrq); 134 mrq->done(mrq);
135 135
136 mmc_host_clk_gate(host); 136 mmc_host_clk_release(host);
137 } 137 }
138} 138}
139 139
@@ -192,7 +192,7 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
192 mrq->stop->mrq = mrq; 192 mrq->stop->mrq = mrq;
193 } 193 }
194 } 194 }
195 mmc_host_clk_ungate(host); 195 mmc_host_clk_hold(host);
196 led_trigger_event(host->led, LED_FULL); 196 led_trigger_event(host->led, LED_FULL);
197 host->ops->request(host, mrq); 197 host->ops->request(host, mrq);
198} 198}
@@ -728,15 +728,17 @@ static inline void mmc_set_ios(struct mmc_host *host)
728 */ 728 */
729void mmc_set_chip_select(struct mmc_host *host, int mode) 729void mmc_set_chip_select(struct mmc_host *host, int mode)
730{ 730{
731 mmc_host_clk_hold(host);
731 host->ios.chip_select = mode; 732 host->ios.chip_select = mode;
732 mmc_set_ios(host); 733 mmc_set_ios(host);
734 mmc_host_clk_release(host);
733} 735}
734 736
735/* 737/*
736 * Sets the host clock to the highest possible frequency that 738 * Sets the host clock to the highest possible frequency that
737 * is below "hz". 739 * is below "hz".
738 */ 740 */
739void mmc_set_clock(struct mmc_host *host, unsigned int hz) 741static void __mmc_set_clock(struct mmc_host *host, unsigned int hz)
740{ 742{
741 WARN_ON(hz < host->f_min); 743 WARN_ON(hz < host->f_min);
742 744
@@ -747,6 +749,13 @@ void mmc_set_clock(struct mmc_host *host, unsigned int hz)
747 mmc_set_ios(host); 749 mmc_set_ios(host);
748} 750}
749 751
752void mmc_set_clock(struct mmc_host *host, unsigned int hz)
753{
754 mmc_host_clk_hold(host);
755 __mmc_set_clock(host, hz);
756 mmc_host_clk_release(host);
757}
758
750#ifdef CONFIG_MMC_CLKGATE 759#ifdef CONFIG_MMC_CLKGATE
751/* 760/*
752 * This gates the clock by setting it to 0 Hz. 761 * This gates the clock by setting it to 0 Hz.
@@ -779,7 +788,7 @@ void mmc_ungate_clock(struct mmc_host *host)
779 if (host->clk_old) { 788 if (host->clk_old) {
780 BUG_ON(host->ios.clock); 789 BUG_ON(host->ios.clock);
781 /* This call will also set host->clk_gated to false */ 790 /* This call will also set host->clk_gated to false */
782 mmc_set_clock(host, host->clk_old); 791 __mmc_set_clock(host, host->clk_old);
783 } 792 }
784} 793}
785 794
@@ -807,8 +816,10 @@ void mmc_set_ungated(struct mmc_host *host)
807 */ 816 */
808void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode) 817void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
809{ 818{
819 mmc_host_clk_hold(host);
810 host->ios.bus_mode = mode; 820 host->ios.bus_mode = mode;
811 mmc_set_ios(host); 821 mmc_set_ios(host);
822 mmc_host_clk_release(host);
812} 823}
813 824
814/* 825/*
@@ -816,8 +827,10 @@ void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
816 */ 827 */
817void mmc_set_bus_width(struct mmc_host *host, unsigned int width) 828void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
818{ 829{
830 mmc_host_clk_hold(host);
819 host->ios.bus_width = width; 831 host->ios.bus_width = width;
820 mmc_set_ios(host); 832 mmc_set_ios(host);
833 mmc_host_clk_release(host);
821} 834}
822 835
823/** 836/**
@@ -1015,8 +1028,10 @@ u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
1015 1028
1016 ocr &= 3 << bit; 1029 ocr &= 3 << bit;
1017 1030
1031 mmc_host_clk_hold(host);
1018 host->ios.vdd = bit; 1032 host->ios.vdd = bit;
1019 mmc_set_ios(host); 1033 mmc_set_ios(host);
1034 mmc_host_clk_release(host);
1020 } else { 1035 } else {
1021 pr_warning("%s: host doesn't support card's voltages\n", 1036 pr_warning("%s: host doesn't support card's voltages\n",
1022 mmc_hostname(host)); 1037 mmc_hostname(host));
@@ -1063,8 +1078,10 @@ int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, bool cmd11
1063 */ 1078 */
1064void mmc_set_timing(struct mmc_host *host, unsigned int timing) 1079void mmc_set_timing(struct mmc_host *host, unsigned int timing)
1065{ 1080{
1081 mmc_host_clk_hold(host);
1066 host->ios.timing = timing; 1082 host->ios.timing = timing;
1067 mmc_set_ios(host); 1083 mmc_set_ios(host);
1084 mmc_host_clk_release(host);
1068} 1085}
1069 1086
1070/* 1087/*
@@ -1072,8 +1089,10 @@ void mmc_set_timing(struct mmc_host *host, unsigned int timing)
1072 */ 1089 */
1073void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type) 1090void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
1074{ 1091{
1092 mmc_host_clk_hold(host);
1075 host->ios.drv_type = drv_type; 1093 host->ios.drv_type = drv_type;
1076 mmc_set_ios(host); 1094 mmc_set_ios(host);
1095 mmc_host_clk_release(host);
1077} 1096}
1078 1097
1079/* 1098/*
@@ -1091,6 +1110,8 @@ static void mmc_power_up(struct mmc_host *host)
1091{ 1110{
1092 int bit; 1111 int bit;
1093 1112
1113 mmc_host_clk_hold(host);
1114
1094 /* If ocr is set, we use it */ 1115 /* If ocr is set, we use it */
1095 if (host->ocr) 1116 if (host->ocr)
1096 bit = ffs(host->ocr) - 1; 1117 bit = ffs(host->ocr) - 1;
@@ -1126,10 +1147,14 @@ static void mmc_power_up(struct mmc_host *host)
1126 * time required to reach a stable voltage. 1147 * time required to reach a stable voltage.
1127 */ 1148 */
1128 mmc_delay(10); 1149 mmc_delay(10);
1150
1151 mmc_host_clk_release(host);
1129} 1152}
1130 1153
1131static void mmc_power_off(struct mmc_host *host) 1154static void mmc_power_off(struct mmc_host *host)
1132{ 1155{
1156 mmc_host_clk_hold(host);
1157
1133 host->ios.clock = 0; 1158 host->ios.clock = 0;
1134 host->ios.vdd = 0; 1159 host->ios.vdd = 0;
1135 1160
@@ -1147,6 +1172,8 @@ static void mmc_power_off(struct mmc_host *host)
1147 host->ios.bus_width = MMC_BUS_WIDTH_1; 1172 host->ios.bus_width = MMC_BUS_WIDTH_1;
1148 host->ios.timing = MMC_TIMING_LEGACY; 1173 host->ios.timing = MMC_TIMING_LEGACY;
1149 mmc_set_ios(host); 1174 mmc_set_ios(host);
1175
1176 mmc_host_clk_release(host);
1150} 1177}
1151 1178
1152/* 1179/*
@@ -1502,7 +1529,7 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,
1502 goto out; 1529 goto out;
1503 } 1530 }
1504 } while (!(cmd.resp[0] & R1_READY_FOR_DATA) || 1531 } while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
1505 R1_CURRENT_STATE(cmd.resp[0]) == 7); 1532 R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG);
1506out: 1533out:
1507 return err; 1534 return err;
1508} 1535}
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index b29d3e8fd3a2..793d0a0dad8d 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -119,14 +119,14 @@ static void mmc_host_clk_gate_work(struct work_struct *work)
119} 119}
120 120
121/** 121/**
122 * mmc_host_clk_ungate - ungate hardware MCI clocks 122 * mmc_host_clk_hold - ungate hardware MCI clocks
123 * @host: host to ungate. 123 * @host: host to ungate.
124 * 124 *
125 * Makes sure the host ios.clock is restored to a non-zero value 125 * Makes sure the host ios.clock is restored to a non-zero value
126 * past this call. Increase clock reference count and ungate clock 126 * past this call. Increase clock reference count and ungate clock
127 * if we're the first user. 127 * if we're the first user.
128 */ 128 */
129void mmc_host_clk_ungate(struct mmc_host *host) 129void mmc_host_clk_hold(struct mmc_host *host)
130{ 130{
131 unsigned long flags; 131 unsigned long flags;
132 132
@@ -164,14 +164,14 @@ static bool mmc_host_may_gate_card(struct mmc_card *card)
164} 164}
165 165
166/** 166/**
167 * mmc_host_clk_gate - gate off hardware MCI clocks 167 * mmc_host_clk_release - gate off hardware MCI clocks
168 * @host: host to gate. 168 * @host: host to gate.
169 * 169 *
170 * Calls the host driver with ios.clock set to zero as often as possible 170 * Calls the host driver with ios.clock set to zero as often as possible
171 * in order to gate off hardware MCI clocks. Decrease clock reference 171 * in order to gate off hardware MCI clocks. Decrease clock reference
172 * count and schedule disabling of clock. 172 * count and schedule disabling of clock.
173 */ 173 */
174void mmc_host_clk_gate(struct mmc_host *host) 174void mmc_host_clk_release(struct mmc_host *host)
175{ 175{
176 unsigned long flags; 176 unsigned long flags;
177 177
@@ -179,7 +179,7 @@ void mmc_host_clk_gate(struct mmc_host *host)
179 host->clk_requests--; 179 host->clk_requests--;
180 if (mmc_host_may_gate_card(host->card) && 180 if (mmc_host_may_gate_card(host->card) &&
181 !host->clk_requests) 181 !host->clk_requests)
182 schedule_work(&host->clk_gate_work); 182 queue_work(system_nrt_wq, &host->clk_gate_work);
183 spin_unlock_irqrestore(&host->clk_lock, flags); 183 spin_unlock_irqrestore(&host->clk_lock, flags);
184} 184}
185 185
@@ -231,7 +231,7 @@ static inline void mmc_host_clk_exit(struct mmc_host *host)
231 if (cancel_work_sync(&host->clk_gate_work)) 231 if (cancel_work_sync(&host->clk_gate_work))
232 mmc_host_clk_gate_delayed(host); 232 mmc_host_clk_gate_delayed(host);
233 if (host->clk_gated) 233 if (host->clk_gated)
234 mmc_host_clk_ungate(host); 234 mmc_host_clk_hold(host);
235 /* There should be only one user now */ 235 /* There should be only one user now */
236 WARN_ON(host->clk_requests > 1); 236 WARN_ON(host->clk_requests > 1);
237} 237}
diff --git a/drivers/mmc/core/host.h b/drivers/mmc/core/host.h
index de199f911928..fb8a5cd2e4a1 100644
--- a/drivers/mmc/core/host.h
+++ b/drivers/mmc/core/host.h
@@ -16,16 +16,16 @@ int mmc_register_host_class(void);
16void mmc_unregister_host_class(void); 16void mmc_unregister_host_class(void);
17 17
18#ifdef CONFIG_MMC_CLKGATE 18#ifdef CONFIG_MMC_CLKGATE
19void mmc_host_clk_ungate(struct mmc_host *host); 19void mmc_host_clk_hold(struct mmc_host *host);
20void mmc_host_clk_gate(struct mmc_host *host); 20void mmc_host_clk_release(struct mmc_host *host);
21unsigned int mmc_host_clk_rate(struct mmc_host *host); 21unsigned int mmc_host_clk_rate(struct mmc_host *host);
22 22
23#else 23#else
24static inline void mmc_host_clk_ungate(struct mmc_host *host) 24static inline void mmc_host_clk_hold(struct mmc_host *host)
25{ 25{
26} 26}
27 27
28static inline void mmc_host_clk_gate(struct mmc_host *host) 28static inline void mmc_host_clk_release(struct mmc_host *host)
29{ 29{
30} 30}
31 31
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index aa7d1d79b8c5..5700b1cbdfec 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -259,7 +259,7 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
259 } 259 }
260 260
261 card->ext_csd.rev = ext_csd[EXT_CSD_REV]; 261 card->ext_csd.rev = ext_csd[EXT_CSD_REV];
262 if (card->ext_csd.rev > 5) { 262 if (card->ext_csd.rev > 6) {
263 printk(KERN_ERR "%s: unrecognised EXT_CSD revision %d\n", 263 printk(KERN_ERR "%s: unrecognised EXT_CSD revision %d\n",
264 mmc_hostname(card->host), card->ext_csd.rev); 264 mmc_hostname(card->host), card->ext_csd.rev);
265 err = -EINVAL; 265 err = -EINVAL;
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 845ce7c533b9..770c3d06f5dc 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -407,7 +407,7 @@ int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
407 break; 407 break;
408 if (mmc_host_is_spi(card->host)) 408 if (mmc_host_is_spi(card->host))
409 break; 409 break;
410 } while (R1_CURRENT_STATE(status) == 7); 410 } while (R1_CURRENT_STATE(status) == R1_STATE_PRG);
411 411
412 if (mmc_host_is_spi(card->host)) { 412 if (mmc_host_is_spi(card->host)) {
413 if (status & R1_SPI_ILLEGAL_COMMAND) 413 if (status & R1_SPI_ILLEGAL_COMMAND)
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 633975ff2bb3..0370e03e3142 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -469,56 +469,75 @@ static int sd_select_driver_type(struct mmc_card *card, u8 *status)
469 return 0; 469 return 0;
470} 470}
471 471
472static int sd_set_bus_speed_mode(struct mmc_card *card, u8 *status) 472static void sd_update_bus_speed_mode(struct mmc_card *card)
473{ 473{
474 unsigned int bus_speed = 0, timing = 0;
475 int err;
476
477 /* 474 /*
478 * If the host doesn't support any of the UHS-I modes, fallback on 475 * If the host doesn't support any of the UHS-I modes, fallback on
479 * default speed. 476 * default speed.
480 */ 477 */
481 if (!(card->host->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | 478 if (!(card->host->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
482 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50))) 479 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50))) {
483 return 0; 480 card->sd_bus_speed = 0;
481 return;
482 }
484 483
485 if ((card->host->caps & MMC_CAP_UHS_SDR104) && 484 if ((card->host->caps & MMC_CAP_UHS_SDR104) &&
486 (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104)) { 485 (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104)) {
487 bus_speed = UHS_SDR104_BUS_SPEED; 486 card->sd_bus_speed = UHS_SDR104_BUS_SPEED;
488 timing = MMC_TIMING_UHS_SDR104;
489 card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR;
490 } else if ((card->host->caps & MMC_CAP_UHS_DDR50) && 487 } else if ((card->host->caps & MMC_CAP_UHS_DDR50) &&
491 (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50)) { 488 (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50)) {
492 bus_speed = UHS_DDR50_BUS_SPEED; 489 card->sd_bus_speed = UHS_DDR50_BUS_SPEED;
493 timing = MMC_TIMING_UHS_DDR50;
494 card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR;
495 } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 | 490 } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
496 MMC_CAP_UHS_SDR50)) && (card->sw_caps.sd3_bus_mode & 491 MMC_CAP_UHS_SDR50)) && (card->sw_caps.sd3_bus_mode &
497 SD_MODE_UHS_SDR50)) { 492 SD_MODE_UHS_SDR50)) {
498 bus_speed = UHS_SDR50_BUS_SPEED; 493 card->sd_bus_speed = UHS_SDR50_BUS_SPEED;
499 timing = MMC_TIMING_UHS_SDR50;
500 card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR;
501 } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 | 494 } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
502 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25)) && 495 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25)) &&
503 (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR25)) { 496 (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR25)) {
504 bus_speed = UHS_SDR25_BUS_SPEED; 497 card->sd_bus_speed = UHS_SDR25_BUS_SPEED;
505 timing = MMC_TIMING_UHS_SDR25;
506 card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR;
507 } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 | 498 } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
508 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25 | 499 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25 |
509 MMC_CAP_UHS_SDR12)) && (card->sw_caps.sd3_bus_mode & 500 MMC_CAP_UHS_SDR12)) && (card->sw_caps.sd3_bus_mode &
510 SD_MODE_UHS_SDR12)) { 501 SD_MODE_UHS_SDR12)) {
511 bus_speed = UHS_SDR12_BUS_SPEED; 502 card->sd_bus_speed = UHS_SDR12_BUS_SPEED;
512 timing = MMC_TIMING_UHS_SDR12; 503 }
513 card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR; 504}
505
506static int sd_set_bus_speed_mode(struct mmc_card *card, u8 *status)
507{
508 int err;
509 unsigned int timing = 0;
510
511 switch (card->sd_bus_speed) {
512 case UHS_SDR104_BUS_SPEED:
513 timing = MMC_TIMING_UHS_SDR104;
514 card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR;
515 break;
516 case UHS_DDR50_BUS_SPEED:
517 timing = MMC_TIMING_UHS_DDR50;
518 card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR;
519 break;
520 case UHS_SDR50_BUS_SPEED:
521 timing = MMC_TIMING_UHS_SDR50;
522 card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR;
523 break;
524 case UHS_SDR25_BUS_SPEED:
525 timing = MMC_TIMING_UHS_SDR25;
526 card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR;
527 break;
528 case UHS_SDR12_BUS_SPEED:
529 timing = MMC_TIMING_UHS_SDR12;
530 card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR;
531 break;
532 default:
533 return 0;
514 } 534 }
515 535
516 card->sd_bus_speed = bus_speed; 536 err = mmc_sd_switch(card, 1, 0, card->sd_bus_speed, status);
517 err = mmc_sd_switch(card, 1, 0, bus_speed, status);
518 if (err) 537 if (err)
519 return err; 538 return err;
520 539
521 if ((status[16] & 0xF) != bus_speed) 540 if ((status[16] & 0xF) != card->sd_bus_speed)
522 printk(KERN_WARNING "%s: Problem setting bus speed mode!\n", 541 printk(KERN_WARNING "%s: Problem setting bus speed mode!\n",
523 mmc_hostname(card->host)); 542 mmc_hostname(card->host));
524 else { 543 else {
@@ -618,18 +637,24 @@ static int mmc_sd_init_uhs_card(struct mmc_card *card)
618 mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4); 637 mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4);
619 } 638 }
620 639
640 /*
641 * Select the bus speed mode depending on host
642 * and card capability.
643 */
644 sd_update_bus_speed_mode(card);
645
621 /* Set the driver strength for the card */ 646 /* Set the driver strength for the card */
622 err = sd_select_driver_type(card, status); 647 err = sd_select_driver_type(card, status);
623 if (err) 648 if (err)
624 goto out; 649 goto out;
625 650
626 /* Set bus speed mode of the card */ 651 /* Set current limit for the card */
627 err = sd_set_bus_speed_mode(card, status); 652 err = sd_set_current_limit(card, status);
628 if (err) 653 if (err)
629 goto out; 654 goto out;
630 655
631 /* Set current limit for the card */ 656 /* Set bus speed mode of the card */
632 err = sd_set_current_limit(card, status); 657 err = sd_set_bus_speed_mode(card, status);
633 if (err) 658 if (err)
634 goto out; 659 goto out;
635 660
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 77f0b6b1681d..ff0f714b012c 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -62,7 +62,7 @@ struct idmac_desc {
62 62
63 u32 des1; /* Buffer sizes */ 63 u32 des1; /* Buffer sizes */
64#define IDMAC_SET_BUFFER1_SIZE(d, s) \ 64#define IDMAC_SET_BUFFER1_SIZE(d, s) \
65 ((d)->des1 = ((d)->des1 & 0x03ffc000) | ((s) & 0x3fff)) 65 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
66 66
67 u32 des2; /* buffer 1 physical address */ 67 u32 des2; /* buffer 1 physical address */
68 68
@@ -699,7 +699,7 @@ static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
699 } 699 }
700 700
701 /* DDR mode set */ 701 /* DDR mode set */
702 if (ios->ddr) { 702 if (ios->timing == MMC_TIMING_UHS_DDR50) {
703 regs = mci_readl(slot->host, UHS_REG); 703 regs = mci_readl(slot->host, UHS_REG);
704 regs |= (0x1 << slot->id) << 16; 704 regs |= (0x1 << slot->id) << 16;
705 mci_writel(slot->host, UHS_REG, regs); 705 mci_writel(slot->host, UHS_REG, regs);
@@ -1646,7 +1646,7 @@ static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id)
1646 mmc->caps |= MMC_CAP_4_BIT_DATA; 1646 mmc->caps |= MMC_CAP_4_BIT_DATA;
1647 1647
1648 if (host->pdata->quirks & DW_MCI_QUIRK_HIGHSPEED) 1648 if (host->pdata->quirks & DW_MCI_QUIRK_HIGHSPEED)
1649 mmc->caps |= MMC_CAP_SD_HIGHSPEED; 1649 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
1650 1650
1651#ifdef CONFIG_MMC_DW_IDMAC 1651#ifdef CONFIG_MMC_DW_IDMAC
1652 mmc->max_segs = host->ring_size; 1652 mmc->max_segs = host->ring_size;
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 9ebfb4b482f5..4dc0028086a3 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -16,6 +16,7 @@
16#include <linux/err.h> 16#include <linux/err.h>
17#include <linux/clk.h> 17#include <linux/clk.h>
18#include <linux/gpio.h> 18#include <linux/gpio.h>
19#include <linux/module.h>
19#include <linux/slab.h> 20#include <linux/slab.h>
20#include <linux/mmc/host.h> 21#include <linux/mmc/host.h>
21#include <linux/mmc/mmc.h> 22#include <linux/mmc/mmc.h>
@@ -27,6 +28,7 @@
27#include "sdhci-pltfm.h" 28#include "sdhci-pltfm.h"
28#include "sdhci-esdhc.h" 29#include "sdhci-esdhc.h"
29 30
31#define SDHCI_CTRL_D3CD 0x08
30/* VENDOR SPEC register */ 32/* VENDOR SPEC register */
31#define SDHCI_VENDOR_SPEC 0xC0 33#define SDHCI_VENDOR_SPEC 0xC0
32#define SDHCI_VENDOR_SPEC_SDIO_QUIRK 0x00000002 34#define SDHCI_VENDOR_SPEC_SDIO_QUIRK 0x00000002
@@ -141,13 +143,32 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg)
141 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 143 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
142 struct pltfm_imx_data *imx_data = pltfm_host->priv; 144 struct pltfm_imx_data *imx_data = pltfm_host->priv;
143 struct esdhc_platform_data *boarddata = &imx_data->boarddata; 145 struct esdhc_platform_data *boarddata = &imx_data->boarddata;
144 146 u32 data;
145 if (unlikely((reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE) 147
146 && (boarddata->cd_type == ESDHC_CD_GPIO))) 148 if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)) {
147 /* 149 if (boarddata->cd_type == ESDHC_CD_GPIO)
148 * these interrupts won't work with a custom card_detect gpio 150 /*
149 */ 151 * These interrupts won't work with a custom
150 val &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); 152 * card_detect gpio (only applied to mx25/35)
153 */
154 val &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
155
156 if (val & SDHCI_INT_CARD_INT) {
157 /*
158 * Clear and then set D3CD bit to avoid missing the
159 * card interrupt. This is a eSDHC controller problem
160 * so we need to apply the following workaround: clear
161 * and set D3CD bit will make eSDHC re-sample the card
162 * interrupt. In case a card interrupt was lost,
163 * re-sample it by the following steps.
164 */
165 data = readl(host->ioaddr + SDHCI_HOST_CONTROL);
166 data &= ~SDHCI_CTRL_D3CD;
167 writel(data, host->ioaddr + SDHCI_HOST_CONTROL);
168 data |= SDHCI_CTRL_D3CD;
169 writel(data, host->ioaddr + SDHCI_HOST_CONTROL);
170 }
171 }
151 172
152 if (unlikely((imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT) 173 if (unlikely((imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT)
153 && (reg == SDHCI_INT_STATUS) 174 && (reg == SDHCI_INT_STATUS)
@@ -217,8 +238,10 @@ static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg)
217 */ 238 */
218 return; 239 return;
219 case SDHCI_HOST_CONTROL: 240 case SDHCI_HOST_CONTROL:
220 /* FSL messed up here, so we can just keep those two */ 241 /* FSL messed up here, so we can just keep those three */
221 new_val = val & (SDHCI_CTRL_LED | SDHCI_CTRL_4BITBUS); 242 new_val = val & (SDHCI_CTRL_LED | \
243 SDHCI_CTRL_4BITBUS | \
244 SDHCI_CTRL_D3CD);
222 /* ensure the endianess */ 245 /* ensure the endianess */
223 new_val |= ESDHC_HOST_CONTROL_LE; 246 new_val |= ESDHC_HOST_CONTROL_LE;
224 /* DMA mode bits are shifted */ 247 /* DMA mode bits are shifted */
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index 4198dbbc5c20..fc7e4a515629 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -195,7 +195,8 @@ static int __devinit sdhci_pxav3_probe(struct platform_device *pdev)
195 clk_enable(clk); 195 clk_enable(clk);
196 196
197 host->quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL 197 host->quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL
198 | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC; 198 | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC
199 | SDHCI_QUIRK_32BIT_ADMA_SIZE;
199 200
200 /* enable 1/8V DDR capable */ 201 /* enable 1/8V DDR capable */
201 host->mmc->caps |= MMC_CAP_1_8V_DDR; 202 host->mmc->caps |= MMC_CAP_1_8V_DDR;
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 460ffaf0f6d7..fe886d6c474a 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -19,6 +19,7 @@
19#include <linux/clk.h> 19#include <linux/clk.h>
20#include <linux/io.h> 20#include <linux/io.h>
21#include <linux/gpio.h> 21#include <linux/gpio.h>
22#include <linux/module.h>
22 23
23#include <linux/mmc/host.h> 24#include <linux/mmc/host.h>
24 25
@@ -301,6 +302,8 @@ static int sdhci_s3c_platform_8bit_width(struct sdhci_host *host, int width)
301 ctrl &= ~SDHCI_CTRL_8BITBUS; 302 ctrl &= ~SDHCI_CTRL_8BITBUS;
302 break; 303 break;
303 default: 304 default:
305 ctrl &= ~SDHCI_CTRL_4BITBUS;
306 ctrl &= ~SDHCI_CTRL_8BITBUS;
304 break; 307 break;
305 } 308 }
306 309
@@ -502,6 +505,9 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
502 /* This host supports the Auto CMD12 */ 505 /* This host supports the Auto CMD12 */
503 host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12; 506 host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12;
504 507
508 /* Samsung SoCs need BROKEN_ADMA_ZEROLEN_DESC */
509 host->quirks |= SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC;
510
505 if (pdata->cd_type == S3C_SDHCI_CD_NONE || 511 if (pdata->cd_type == S3C_SDHCI_CD_NONE ||
506 pdata->cd_type == S3C_SDHCI_CD_PERMANENT) 512 pdata->cd_type == S3C_SDHCI_CD_PERMANENT)
507 host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION; 513 host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index c31a3343340d..0e02cc1df12e 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -628,12 +628,11 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
628 /* timeout in us */ 628 /* timeout in us */
629 if (!data) 629 if (!data)
630 target_timeout = cmd->cmd_timeout_ms * 1000; 630 target_timeout = cmd->cmd_timeout_ms * 1000;
631 else 631 else {
632 target_timeout = data->timeout_ns / 1000 + 632 target_timeout = data->timeout_ns / 1000;
633 data->timeout_clks / host->clock; 633 if (host->clock)
634 634 target_timeout += data->timeout_clks / host->clock;
635 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK) 635 }
636 host->timeout_clk = host->clock / 1000;
637 636
638 /* 637 /*
639 * Figure out needed cycles. 638 * Figure out needed cycles.
@@ -645,7 +644,6 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
645 * => 644 * =>
646 * (1) / (2) > 2^6 645 * (1) / (2) > 2^6
647 */ 646 */
648 BUG_ON(!host->timeout_clk);
649 count = 0; 647 count = 0;
650 current_timeout = (1 << 13) * 1000 / host->timeout_clk; 648 current_timeout = (1 << 13) * 1000 / host->timeout_clk;
651 while (current_timeout < target_timeout) { 649 while (current_timeout < target_timeout) {
@@ -1867,9 +1865,6 @@ static void sdhci_tasklet_finish(unsigned long param)
1867 1865
1868 del_timer(&host->timer); 1866 del_timer(&host->timer);
1869 1867
1870 if (host->version >= SDHCI_SPEC_300)
1871 del_timer(&host->tuning_timer);
1872
1873 mrq = host->mrq; 1868 mrq = host->mrq;
1874 1869
1875 /* 1870 /*
@@ -2461,22 +2456,6 @@ int sdhci_add_host(struct sdhci_host *host)
2461 host->max_clk = host->ops->get_max_clock(host); 2456 host->max_clk = host->ops->get_max_clock(host);
2462 } 2457 }
2463 2458
2464 host->timeout_clk =
2465 (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT;
2466 if (host->timeout_clk == 0) {
2467 if (host->ops->get_timeout_clock) {
2468 host->timeout_clk = host->ops->get_timeout_clock(host);
2469 } else if (!(host->quirks &
2470 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
2471 printk(KERN_ERR
2472 "%s: Hardware doesn't specify timeout clock "
2473 "frequency.\n", mmc_hostname(mmc));
2474 return -ENODEV;
2475 }
2476 }
2477 if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT)
2478 host->timeout_clk *= 1000;
2479
2480 /* 2459 /*
2481 * In case of Host Controller v3.00, find out whether clock 2460 * In case of Host Controller v3.00, find out whether clock
2482 * multiplier is supported. 2461 * multiplier is supported.
@@ -2509,10 +2488,26 @@ int sdhci_add_host(struct sdhci_host *host)
2509 } else 2488 } else
2510 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200; 2489 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
2511 2490
2491 host->timeout_clk =
2492 (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT;
2493 if (host->timeout_clk == 0) {
2494 if (host->ops->get_timeout_clock) {
2495 host->timeout_clk = host->ops->get_timeout_clock(host);
2496 } else if (!(host->quirks &
2497 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
2498 printk(KERN_ERR
2499 "%s: Hardware doesn't specify timeout clock "
2500 "frequency.\n", mmc_hostname(mmc));
2501 return -ENODEV;
2502 }
2503 }
2504 if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT)
2505 host->timeout_clk *= 1000;
2506
2512 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK) 2507 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)
2513 mmc->max_discard_to = (1 << 27) / (mmc->f_max / 1000); 2508 host->timeout_clk = mmc->f_max / 1000;
2514 else 2509
2515 mmc->max_discard_to = (1 << 27) / host->timeout_clk; 2510 mmc->max_discard_to = (1 << 27) / host->timeout_clk;
2516 2511
2517 mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23; 2512 mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
2518 2513
diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c
index 774f6439d7ce..0c4a672f5db6 100644
--- a/drivers/mmc/host/sh_mobile_sdhi.c
+++ b/drivers/mmc/host/sh_mobile_sdhi.c
@@ -120,11 +120,11 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev)
120 mmc_data->hclk = clk_get_rate(priv->clk); 120 mmc_data->hclk = clk_get_rate(priv->clk);
121 mmc_data->set_pwr = sh_mobile_sdhi_set_pwr; 121 mmc_data->set_pwr = sh_mobile_sdhi_set_pwr;
122 mmc_data->get_cd = sh_mobile_sdhi_get_cd; 122 mmc_data->get_cd = sh_mobile_sdhi_get_cd;
123 if (mmc_data->flags & TMIO_MMC_HAS_IDLE_WAIT)
124 mmc_data->write16_hook = sh_mobile_sdhi_write16_hook;
125 mmc_data->capabilities = MMC_CAP_MMC_HIGHSPEED; 123 mmc_data->capabilities = MMC_CAP_MMC_HIGHSPEED;
126 if (p) { 124 if (p) {
127 mmc_data->flags = p->tmio_flags; 125 mmc_data->flags = p->tmio_flags;
126 if (mmc_data->flags & TMIO_MMC_HAS_IDLE_WAIT)
127 mmc_data->write16_hook = sh_mobile_sdhi_write16_hook;
128 mmc_data->ocr_mask = p->tmio_ocr_mask; 128 mmc_data->ocr_mask = p->tmio_ocr_mask;
129 mmc_data->capabilities |= p->tmio_caps; 129 mmc_data->capabilities |= p->tmio_caps;
130 130
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index 8d185de90d20..44a9668c4b7a 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -27,7 +27,6 @@
27static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state) 27static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state)
28{ 28{
29 const struct mfd_cell *cell = mfd_get_cell(dev); 29 const struct mfd_cell *cell = mfd_get_cell(dev);
30 struct mmc_host *mmc = platform_get_drvdata(dev);
31 int ret; 30 int ret;
32 31
33 ret = tmio_mmc_host_suspend(&dev->dev); 32 ret = tmio_mmc_host_suspend(&dev->dev);
@@ -42,7 +41,6 @@ static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state)
42static int tmio_mmc_resume(struct platform_device *dev) 41static int tmio_mmc_resume(struct platform_device *dev)
43{ 42{
44 const struct mfd_cell *cell = mfd_get_cell(dev); 43 const struct mfd_cell *cell = mfd_get_cell(dev);
45 struct mmc_host *mmc = platform_get_drvdata(dev);
46 int ret = 0; 44 int ret = 0;
47 45
48 /* Tell the MFD core we are ready to be enabled */ 46 /* Tell the MFD core we are ready to be enabled */
diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
index 65b5b76cc379..64fbb0021825 100644
--- a/drivers/mtd/ubi/debug.h
+++ b/drivers/mtd/ubi/debug.h
@@ -181,7 +181,7 @@ static inline int ubi_dbg_is_erase_failure(const struct ubi_device *ubi)
181 181
182#define ubi_dbg_msg(fmt, ...) do { \ 182#define ubi_dbg_msg(fmt, ...) do { \
183 if (0) \ 183 if (0) \
184 pr_debug(fmt "\n", ##__VA_ARGS__); \ 184 printk(KERN_DEBUG fmt "\n", ##__VA_ARGS__); \
185} while (0) 185} while (0)
186 186
187#define dbg_msg(fmt, ...) ubi_dbg_msg(fmt, ##__VA_ARGS__) 187#define dbg_msg(fmt, ...) ubi_dbg_msg(fmt, ##__VA_ARGS__)
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index f7bbde9eb2cb..2adc294f512a 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -46,6 +46,7 @@
46#include <linux/skbuff.h> 46#include <linux/skbuff.h>
47#include <linux/platform_device.h> 47#include <linux/platform_device.h>
48#include <linux/clk.h> 48#include <linux/clk.h>
49#include <linux/io.h>
49 50
50#include <linux/can/dev.h> 51#include <linux/can/dev.h>
51#include <linux/can/error.h> 52#include <linux/can/error.h>
@@ -503,9 +504,9 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev)
503 spin_unlock_irqrestore(&priv->mbx_lock, flags); 504 spin_unlock_irqrestore(&priv->mbx_lock, flags);
504 505
505 /* Prepare mailbox for transmission */ 506 /* Prepare mailbox for transmission */
507 data = cf->can_dlc | (get_tx_head_prio(priv) << 8);
506 if (cf->can_id & CAN_RTR_FLAG) /* Remote transmission request */ 508 if (cf->can_id & CAN_RTR_FLAG) /* Remote transmission request */
507 data |= HECC_CANMCF_RTR; 509 data |= HECC_CANMCF_RTR;
508 data |= get_tx_head_prio(priv) << 8;
509 hecc_write_mbx(priv, mbxno, HECC_CANMCF, data); 510 hecc_write_mbx(priv, mbxno, HECC_CANMCF, data);
510 511
511 if (cf->can_id & CAN_EFF_FLAG) /* Extended frame format */ 512 if (cf->can_id & CAN_EFF_FLAG) /* Extended frame format */
@@ -923,6 +924,7 @@ static int ti_hecc_probe(struct platform_device *pdev)
923 priv->can.do_get_state = ti_hecc_get_state; 924 priv->can.do_get_state = ti_hecc_get_state;
924 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES; 925 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
925 926
927 spin_lock_init(&priv->mbx_lock);
926 ndev->irq = irq->start; 928 ndev->irq = irq->start;
927 ndev->flags |= IFF_ECHO; 929 ndev->flags |= IFF_ECHO;
928 platform_set_drvdata(pdev, ndev); 930 platform_set_drvdata(pdev, ndev);
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index bc3bd34c43f1..6715bf54f04e 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -427,6 +427,7 @@ greth_start_xmit(struct sk_buff *skb, struct net_device *dev)
427 dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE); 427 dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE);
428 428
429 status = GRETH_BD_EN | GRETH_BD_IE | (skb->len & GRETH_BD_LEN); 429 status = GRETH_BD_EN | GRETH_BD_IE | (skb->len & GRETH_BD_LEN);
430 greth->tx_bufs_length[greth->tx_next] = skb->len & GRETH_BD_LEN;
430 431
431 /* Wrap around descriptor ring */ 432 /* Wrap around descriptor ring */
432 if (greth->tx_next == GRETH_TXBD_NUM_MASK) { 433 if (greth->tx_next == GRETH_TXBD_NUM_MASK) {
@@ -489,7 +490,8 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
489 if (nr_frags != 0) 490 if (nr_frags != 0)
490 status = GRETH_TXBD_MORE; 491 status = GRETH_TXBD_MORE;
491 492
492 status |= GRETH_TXBD_CSALL; 493 if (skb->ip_summed == CHECKSUM_PARTIAL)
494 status |= GRETH_TXBD_CSALL;
493 status |= skb_headlen(skb) & GRETH_BD_LEN; 495 status |= skb_headlen(skb) & GRETH_BD_LEN;
494 if (greth->tx_next == GRETH_TXBD_NUM_MASK) 496 if (greth->tx_next == GRETH_TXBD_NUM_MASK)
495 status |= GRETH_BD_WR; 497 status |= GRETH_BD_WR;
@@ -512,7 +514,9 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
512 greth->tx_skbuff[curr_tx] = NULL; 514 greth->tx_skbuff[curr_tx] = NULL;
513 bdp = greth->tx_bd_base + curr_tx; 515 bdp = greth->tx_bd_base + curr_tx;
514 516
515 status = GRETH_TXBD_CSALL | GRETH_BD_EN; 517 status = GRETH_BD_EN;
518 if (skb->ip_summed == CHECKSUM_PARTIAL)
519 status |= GRETH_TXBD_CSALL;
516 status |= frag->size & GRETH_BD_LEN; 520 status |= frag->size & GRETH_BD_LEN;
517 521
518 /* Wrap around descriptor ring */ 522 /* Wrap around descriptor ring */
@@ -637,6 +641,7 @@ static void greth_clean_tx(struct net_device *dev)
637 dev->stats.tx_fifo_errors++; 641 dev->stats.tx_fifo_errors++;
638 } 642 }
639 dev->stats.tx_packets++; 643 dev->stats.tx_packets++;
644 dev->stats.tx_bytes += greth->tx_bufs_length[greth->tx_last];
640 greth->tx_last = NEXT_TX(greth->tx_last); 645 greth->tx_last = NEXT_TX(greth->tx_last);
641 greth->tx_free++; 646 greth->tx_free++;
642 } 647 }
@@ -691,6 +696,7 @@ static void greth_clean_tx_gbit(struct net_device *dev)
691 greth->tx_skbuff[greth->tx_last] = NULL; 696 greth->tx_skbuff[greth->tx_last] = NULL;
692 697
693 greth_update_tx_stats(dev, stat); 698 greth_update_tx_stats(dev, stat);
699 dev->stats.tx_bytes += skb->len;
694 700
695 bdp = greth->tx_bd_base + greth->tx_last; 701 bdp = greth->tx_bd_base + greth->tx_last;
696 702
@@ -792,6 +798,7 @@ static int greth_rx(struct net_device *dev, int limit)
792 memcpy(skb_put(skb, pkt_len), phys_to_virt(dma_addr), pkt_len); 798 memcpy(skb_put(skb, pkt_len), phys_to_virt(dma_addr), pkt_len);
793 799
794 skb->protocol = eth_type_trans(skb, dev); 800 skb->protocol = eth_type_trans(skb, dev);
801 dev->stats.rx_bytes += pkt_len;
795 dev->stats.rx_packets++; 802 dev->stats.rx_packets++;
796 netif_receive_skb(skb); 803 netif_receive_skb(skb);
797 } 804 }
@@ -906,6 +913,7 @@ static int greth_rx_gbit(struct net_device *dev, int limit)
906 913
907 skb->protocol = eth_type_trans(skb, dev); 914 skb->protocol = eth_type_trans(skb, dev);
908 dev->stats.rx_packets++; 915 dev->stats.rx_packets++;
916 dev->stats.rx_bytes += pkt_len;
909 netif_receive_skb(skb); 917 netif_receive_skb(skb);
910 918
911 greth->rx_skbuff[greth->rx_cur] = newskb; 919 greth->rx_skbuff[greth->rx_cur] = newskb;
diff --git a/drivers/net/ethernet/aeroflex/greth.h b/drivers/net/ethernet/aeroflex/greth.h
index 9a0040dee4da..232a622a85b7 100644
--- a/drivers/net/ethernet/aeroflex/greth.h
+++ b/drivers/net/ethernet/aeroflex/greth.h
@@ -103,6 +103,7 @@ struct greth_private {
103 103
104 unsigned char *tx_bufs[GRETH_TXBD_NUM]; 104 unsigned char *tx_bufs[GRETH_TXBD_NUM];
105 unsigned char *rx_bufs[GRETH_RXBD_NUM]; 105 unsigned char *rx_bufs[GRETH_RXBD_NUM];
106 u16 tx_bufs_length[GRETH_TXBD_NUM];
106 107
107 u16 tx_next; 108 u16 tx_next;
108 u16 tx_last; 109 u16 tx_last;
diff --git a/drivers/net/ethernet/amd/am79c961a.c b/drivers/net/ethernet/amd/am79c961a.c
index c2b630c5e852..7d5ded80d2d7 100644
--- a/drivers/net/ethernet/amd/am79c961a.c
+++ b/drivers/net/ethernet/amd/am79c961a.c
@@ -308,8 +308,11 @@ static void am79c961_timer(unsigned long data)
308 struct net_device *dev = (struct net_device *)data; 308 struct net_device *dev = (struct net_device *)data;
309 struct dev_priv *priv = netdev_priv(dev); 309 struct dev_priv *priv = netdev_priv(dev);
310 unsigned int lnkstat, carrier; 310 unsigned int lnkstat, carrier;
311 unsigned long flags;
311 312
313 spin_lock_irqsave(&priv->chip_lock, flags);
312 lnkstat = read_ireg(dev->base_addr, ISALED0) & ISALED0_LNKST; 314 lnkstat = read_ireg(dev->base_addr, ISALED0) & ISALED0_LNKST;
315 spin_unlock_irqrestore(&priv->chip_lock, flags);
313 carrier = netif_carrier_ok(dev); 316 carrier = netif_carrier_ok(dev);
314 317
315 if (lnkstat && !carrier) { 318 if (lnkstat && !carrier) {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index f127768e4e83..2f92487724c6 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -310,6 +310,14 @@ union db_prod {
310 u32 raw; 310 u32 raw;
311}; 311};
312 312
313/* dropless fc FW/HW related params */
314#define BRB_SIZE(bp) (CHIP_IS_E3(bp) ? 1024 : 512)
315#define MAX_AGG_QS(bp) (CHIP_IS_E1(bp) ? \
316 ETH_MAX_AGGREGATION_QUEUES_E1 :\
317 ETH_MAX_AGGREGATION_QUEUES_E1H_E2)
318#define FW_DROP_LEVEL(bp) (3 + MAX_SPQ_PENDING + MAX_AGG_QS(bp))
319#define FW_PREFETCH_CNT 16
320#define DROPLESS_FC_HEADROOM 100
313 321
314/* MC hsi */ 322/* MC hsi */
315#define BCM_PAGE_SHIFT 12 323#define BCM_PAGE_SHIFT 12
@@ -326,15 +334,35 @@ union db_prod {
326/* SGE ring related macros */ 334/* SGE ring related macros */
327#define NUM_RX_SGE_PAGES 2 335#define NUM_RX_SGE_PAGES 2
328#define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge)) 336#define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge))
329#define MAX_RX_SGE_CNT (RX_SGE_CNT - 2) 337#define NEXT_PAGE_SGE_DESC_CNT 2
338#define MAX_RX_SGE_CNT (RX_SGE_CNT - NEXT_PAGE_SGE_DESC_CNT)
330/* RX_SGE_CNT is promised to be a power of 2 */ 339/* RX_SGE_CNT is promised to be a power of 2 */
331#define RX_SGE_MASK (RX_SGE_CNT - 1) 340#define RX_SGE_MASK (RX_SGE_CNT - 1)
332#define NUM_RX_SGE (RX_SGE_CNT * NUM_RX_SGE_PAGES) 341#define NUM_RX_SGE (RX_SGE_CNT * NUM_RX_SGE_PAGES)
333#define MAX_RX_SGE (NUM_RX_SGE - 1) 342#define MAX_RX_SGE (NUM_RX_SGE - 1)
334#define NEXT_SGE_IDX(x) ((((x) & RX_SGE_MASK) == \ 343#define NEXT_SGE_IDX(x) ((((x) & RX_SGE_MASK) == \
335 (MAX_RX_SGE_CNT - 1)) ? (x) + 3 : (x) + 1) 344 (MAX_RX_SGE_CNT - 1)) ? \
345 (x) + 1 + NEXT_PAGE_SGE_DESC_CNT : \
346 (x) + 1)
336#define RX_SGE(x) ((x) & MAX_RX_SGE) 347#define RX_SGE(x) ((x) & MAX_RX_SGE)
337 348
349/*
350 * Number of required SGEs is the sum of two:
351 * 1. Number of possible opened aggregations (next packet for
352 * these aggregations will probably consume SGE immidiatelly)
353 * 2. Rest of BRB blocks divided by 2 (block will consume new SGE only
354 * after placement on BD for new TPA aggregation)
355 *
356 * Takes into account NEXT_PAGE_SGE_DESC_CNT "next" elements on each page
357 */
358#define NUM_SGE_REQ (MAX_AGG_QS(bp) + \
359 (BRB_SIZE(bp) - MAX_AGG_QS(bp)) / 2)
360#define NUM_SGE_PG_REQ ((NUM_SGE_REQ + MAX_RX_SGE_CNT - 1) / \
361 MAX_RX_SGE_CNT)
362#define SGE_TH_LO(bp) (NUM_SGE_REQ + \
363 NUM_SGE_PG_REQ * NEXT_PAGE_SGE_DESC_CNT)
364#define SGE_TH_HI(bp) (SGE_TH_LO(bp) + DROPLESS_FC_HEADROOM)
365
338/* Manipulate a bit vector defined as an array of u64 */ 366/* Manipulate a bit vector defined as an array of u64 */
339 367
340/* Number of bits in one sge_mask array element */ 368/* Number of bits in one sge_mask array element */
@@ -546,24 +574,43 @@ struct bnx2x_fastpath {
546 574
547#define NUM_TX_RINGS 16 575#define NUM_TX_RINGS 16
548#define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_tx_bd_types)) 576#define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_tx_bd_types))
549#define MAX_TX_DESC_CNT (TX_DESC_CNT - 1) 577#define NEXT_PAGE_TX_DESC_CNT 1
578#define MAX_TX_DESC_CNT (TX_DESC_CNT - NEXT_PAGE_TX_DESC_CNT)
550#define NUM_TX_BD (TX_DESC_CNT * NUM_TX_RINGS) 579#define NUM_TX_BD (TX_DESC_CNT * NUM_TX_RINGS)
551#define MAX_TX_BD (NUM_TX_BD - 1) 580#define MAX_TX_BD (NUM_TX_BD - 1)
552#define MAX_TX_AVAIL (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2) 581#define MAX_TX_AVAIL (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2)
553#define NEXT_TX_IDX(x) ((((x) & MAX_TX_DESC_CNT) == \ 582#define NEXT_TX_IDX(x) ((((x) & MAX_TX_DESC_CNT) == \
554 (MAX_TX_DESC_CNT - 1)) ? (x) + 2 : (x) + 1) 583 (MAX_TX_DESC_CNT - 1)) ? \
584 (x) + 1 + NEXT_PAGE_TX_DESC_CNT : \
585 (x) + 1)
555#define TX_BD(x) ((x) & MAX_TX_BD) 586#define TX_BD(x) ((x) & MAX_TX_BD)
556#define TX_BD_POFF(x) ((x) & MAX_TX_DESC_CNT) 587#define TX_BD_POFF(x) ((x) & MAX_TX_DESC_CNT)
557 588
558/* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */ 589/* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */
559#define NUM_RX_RINGS 8 590#define NUM_RX_RINGS 8
560#define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd)) 591#define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd))
561#define MAX_RX_DESC_CNT (RX_DESC_CNT - 2) 592#define NEXT_PAGE_RX_DESC_CNT 2
593#define MAX_RX_DESC_CNT (RX_DESC_CNT - NEXT_PAGE_RX_DESC_CNT)
562#define RX_DESC_MASK (RX_DESC_CNT - 1) 594#define RX_DESC_MASK (RX_DESC_CNT - 1)
563#define NUM_RX_BD (RX_DESC_CNT * NUM_RX_RINGS) 595#define NUM_RX_BD (RX_DESC_CNT * NUM_RX_RINGS)
564#define MAX_RX_BD (NUM_RX_BD - 1) 596#define MAX_RX_BD (NUM_RX_BD - 1)
565#define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2) 597#define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2)
566#define MIN_RX_AVAIL 128 598
599/* dropless fc calculations for BDs
600 *
601 * Number of BDs should as number of buffers in BRB:
602 * Low threshold takes into account NEXT_PAGE_RX_DESC_CNT
603 * "next" elements on each page
604 */
605#define NUM_BD_REQ BRB_SIZE(bp)
606#define NUM_BD_PG_REQ ((NUM_BD_REQ + MAX_RX_DESC_CNT - 1) / \
607 MAX_RX_DESC_CNT)
608#define BD_TH_LO(bp) (NUM_BD_REQ + \
609 NUM_BD_PG_REQ * NEXT_PAGE_RX_DESC_CNT + \
610 FW_DROP_LEVEL(bp))
611#define BD_TH_HI(bp) (BD_TH_LO(bp) + DROPLESS_FC_HEADROOM)
612
613#define MIN_RX_AVAIL ((bp)->dropless_fc ? BD_TH_HI(bp) + 128 : 128)
567 614
568#define MIN_RX_SIZE_TPA_HW (CHIP_IS_E1(bp) ? \ 615#define MIN_RX_SIZE_TPA_HW (CHIP_IS_E1(bp) ? \
569 ETH_MIN_RX_CQES_WITH_TPA_E1 : \ 616 ETH_MIN_RX_CQES_WITH_TPA_E1 : \
@@ -574,7 +621,9 @@ struct bnx2x_fastpath {
574 MIN_RX_AVAIL)) 621 MIN_RX_AVAIL))
575 622
576#define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \ 623#define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \
577 (MAX_RX_DESC_CNT - 1)) ? (x) + 3 : (x) + 1) 624 (MAX_RX_DESC_CNT - 1)) ? \
625 (x) + 1 + NEXT_PAGE_RX_DESC_CNT : \
626 (x) + 1)
578#define RX_BD(x) ((x) & MAX_RX_BD) 627#define RX_BD(x) ((x) & MAX_RX_BD)
579 628
580/* 629/*
@@ -584,14 +633,31 @@ struct bnx2x_fastpath {
584#define CQE_BD_REL (sizeof(union eth_rx_cqe) / sizeof(struct eth_rx_bd)) 633#define CQE_BD_REL (sizeof(union eth_rx_cqe) / sizeof(struct eth_rx_bd))
585#define NUM_RCQ_RINGS (NUM_RX_RINGS * CQE_BD_REL) 634#define NUM_RCQ_RINGS (NUM_RX_RINGS * CQE_BD_REL)
586#define RCQ_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe)) 635#define RCQ_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe))
587#define MAX_RCQ_DESC_CNT (RCQ_DESC_CNT - 1) 636#define NEXT_PAGE_RCQ_DESC_CNT 1
637#define MAX_RCQ_DESC_CNT (RCQ_DESC_CNT - NEXT_PAGE_RCQ_DESC_CNT)
588#define NUM_RCQ_BD (RCQ_DESC_CNT * NUM_RCQ_RINGS) 638#define NUM_RCQ_BD (RCQ_DESC_CNT * NUM_RCQ_RINGS)
589#define MAX_RCQ_BD (NUM_RCQ_BD - 1) 639#define MAX_RCQ_BD (NUM_RCQ_BD - 1)
590#define MAX_RCQ_AVAIL (MAX_RCQ_DESC_CNT * NUM_RCQ_RINGS - 2) 640#define MAX_RCQ_AVAIL (MAX_RCQ_DESC_CNT * NUM_RCQ_RINGS - 2)
591#define NEXT_RCQ_IDX(x) ((((x) & MAX_RCQ_DESC_CNT) == \ 641#define NEXT_RCQ_IDX(x) ((((x) & MAX_RCQ_DESC_CNT) == \
592 (MAX_RCQ_DESC_CNT - 1)) ? (x) + 2 : (x) + 1) 642 (MAX_RCQ_DESC_CNT - 1)) ? \
643 (x) + 1 + NEXT_PAGE_RCQ_DESC_CNT : \
644 (x) + 1)
593#define RCQ_BD(x) ((x) & MAX_RCQ_BD) 645#define RCQ_BD(x) ((x) & MAX_RCQ_BD)
594 646
647/* dropless fc calculations for RCQs
648 *
649 * Number of RCQs should be as number of buffers in BRB:
650 * Low threshold takes into account NEXT_PAGE_RCQ_DESC_CNT
651 * "next" elements on each page
652 */
653#define NUM_RCQ_REQ BRB_SIZE(bp)
654#define NUM_RCQ_PG_REQ ((NUM_BD_REQ + MAX_RCQ_DESC_CNT - 1) / \
655 MAX_RCQ_DESC_CNT)
656#define RCQ_TH_LO(bp) (NUM_RCQ_REQ + \
657 NUM_RCQ_PG_REQ * NEXT_PAGE_RCQ_DESC_CNT + \
658 FW_DROP_LEVEL(bp))
659#define RCQ_TH_HI(bp) (RCQ_TH_LO(bp) + DROPLESS_FC_HEADROOM)
660
595 661
596/* This is needed for determining of last_max */ 662/* This is needed for determining of last_max */
597#define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b)) 663#define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b))
@@ -680,24 +746,17 @@ struct bnx2x_fastpath {
680#define FP_CSB_FUNC_OFF \ 746#define FP_CSB_FUNC_OFF \
681 offsetof(struct cstorm_status_block_c, func) 747 offsetof(struct cstorm_status_block_c, func)
682 748
683#define HC_INDEX_TOE_RX_CQ_CONS 0 /* Formerly Ustorm TOE CQ index */ 749#define HC_INDEX_ETH_RX_CQ_CONS 1
684 /* (HC_INDEX_U_TOE_RX_CQ_CONS) */
685#define HC_INDEX_ETH_RX_CQ_CONS 1 /* Formerly Ustorm ETH CQ index */
686 /* (HC_INDEX_U_ETH_RX_CQ_CONS) */
687#define HC_INDEX_ETH_RX_BD_CONS 2 /* Formerly Ustorm ETH BD index */
688 /* (HC_INDEX_U_ETH_RX_BD_CONS) */
689
690#define HC_INDEX_TOE_TX_CQ_CONS 4 /* Formerly Cstorm TOE CQ index */
691 /* (HC_INDEX_C_TOE_TX_CQ_CONS) */
692#define HC_INDEX_ETH_TX_CQ_CONS_COS0 5 /* Formerly Cstorm ETH CQ index */
693 /* (HC_INDEX_C_ETH_TX_CQ_CONS) */
694#define HC_INDEX_ETH_TX_CQ_CONS_COS1 6 /* Formerly Cstorm ETH CQ index */
695 /* (HC_INDEX_C_ETH_TX_CQ_CONS) */
696#define HC_INDEX_ETH_TX_CQ_CONS_COS2 7 /* Formerly Cstorm ETH CQ index */
697 /* (HC_INDEX_C_ETH_TX_CQ_CONS) */
698 750
699#define HC_INDEX_ETH_FIRST_TX_CQ_CONS HC_INDEX_ETH_TX_CQ_CONS_COS0 751#define HC_INDEX_OOO_TX_CQ_CONS 4
700 752
753#define HC_INDEX_ETH_TX_CQ_CONS_COS0 5
754
755#define HC_INDEX_ETH_TX_CQ_CONS_COS1 6
756
757#define HC_INDEX_ETH_TX_CQ_CONS_COS2 7
758
759#define HC_INDEX_ETH_FIRST_TX_CQ_CONS HC_INDEX_ETH_TX_CQ_CONS_COS0
701 760
702#define BNX2X_RX_SB_INDEX \ 761#define BNX2X_RX_SB_INDEX \
703 (&fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS]) 762 (&fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS])
@@ -1095,11 +1154,12 @@ struct bnx2x {
1095#define BP_PORT(bp) (bp->pfid & 1) 1154#define BP_PORT(bp) (bp->pfid & 1)
1096#define BP_FUNC(bp) (bp->pfid) 1155#define BP_FUNC(bp) (bp->pfid)
1097#define BP_ABS_FUNC(bp) (bp->pf_num) 1156#define BP_ABS_FUNC(bp) (bp->pf_num)
1098#define BP_E1HVN(bp) (bp->pfid >> 1) 1157#define BP_VN(bp) ((bp)->pfid >> 1)
1099#define BP_VN(bp) (BP_E1HVN(bp)) /*remove when approved*/ 1158#define BP_MAX_VN_NUM(bp) (CHIP_MODE_IS_4_PORT(bp) ? 2 : 4)
1100#define BP_L_ID(bp) (BP_E1HVN(bp) << 2) 1159#define BP_L_ID(bp) (BP_VN(bp) << 2)
1101#define BP_FW_MB_IDX(bp) (BP_PORT(bp) +\ 1160#define BP_FW_MB_IDX_VN(bp, vn) (BP_PORT(bp) +\
1102 BP_VN(bp) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2 : 1)) 1161 (vn) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2 : 1))
1162#define BP_FW_MB_IDX(bp) BP_FW_MB_IDX_VN(bp, BP_VN(bp))
1103 1163
1104 struct net_device *dev; 1164 struct net_device *dev;
1105 struct pci_dev *pdev; 1165 struct pci_dev *pdev;
@@ -1762,7 +1822,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1762 1822
1763#define MAX_DMAE_C_PER_PORT 8 1823#define MAX_DMAE_C_PER_PORT 8
1764#define INIT_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ 1824#define INIT_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
1765 BP_E1HVN(bp)) 1825 BP_VN(bp))
1766#define PMF_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ 1826#define PMF_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
1767 E1HVN_MAX) 1827 E1HVN_MAX)
1768 1828
@@ -1788,7 +1848,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1788 1848
1789/* must be used on a CID before placing it on a HW ring */ 1849/* must be used on a CID before placing it on a HW ring */
1790#define HW_CID(bp, x) ((BP_PORT(bp) << 23) | \ 1850#define HW_CID(bp, x) ((BP_PORT(bp) << 23) | \
1791 (BP_E1HVN(bp) << BNX2X_SWCID_SHIFT) | \ 1851 (BP_VN(bp) << BNX2X_SWCID_SHIFT) | \
1792 (x)) 1852 (x))
1793 1853
1794#define SP_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_spe)) 1854#define SP_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_spe))
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 5c3eb17c4f4a..e575e89c7d46 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -993,8 +993,6 @@ void __bnx2x_link_report(struct bnx2x *bp)
993void bnx2x_init_rx_rings(struct bnx2x *bp) 993void bnx2x_init_rx_rings(struct bnx2x *bp)
994{ 994{
995 int func = BP_FUNC(bp); 995 int func = BP_FUNC(bp);
996 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
997 ETH_MAX_AGGREGATION_QUEUES_E1H_E2;
998 u16 ring_prod; 996 u16 ring_prod;
999 int i, j; 997 int i, j;
1000 998
@@ -1007,7 +1005,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
1007 1005
1008 if (!fp->disable_tpa) { 1006 if (!fp->disable_tpa) {
1009 /* Fill the per-aggregtion pool */ 1007 /* Fill the per-aggregtion pool */
1010 for (i = 0; i < max_agg_queues; i++) { 1008 for (i = 0; i < MAX_AGG_QS(bp); i++) {
1011 struct bnx2x_agg_info *tpa_info = 1009 struct bnx2x_agg_info *tpa_info =
1012 &fp->tpa_info[i]; 1010 &fp->tpa_info[i];
1013 struct sw_rx_bd *first_buf = 1011 struct sw_rx_bd *first_buf =
@@ -1047,7 +1045,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
1047 bnx2x_free_rx_sge_range(bp, fp, 1045 bnx2x_free_rx_sge_range(bp, fp,
1048 ring_prod); 1046 ring_prod);
1049 bnx2x_free_tpa_pool(bp, fp, 1047 bnx2x_free_tpa_pool(bp, fp,
1050 max_agg_queues); 1048 MAX_AGG_QS(bp));
1051 fp->disable_tpa = 1; 1049 fp->disable_tpa = 1;
1052 ring_prod = 0; 1050 ring_prod = 0;
1053 break; 1051 break;
@@ -1143,9 +1141,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1143 bnx2x_free_rx_bds(fp); 1141 bnx2x_free_rx_bds(fp);
1144 1142
1145 if (!fp->disable_tpa) 1143 if (!fp->disable_tpa)
1146 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ? 1144 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1147 ETH_MAX_AGGREGATION_QUEUES_E1 :
1148 ETH_MAX_AGGREGATION_QUEUES_E1H_E2);
1149 } 1145 }
1150} 1146}
1151 1147
@@ -3100,15 +3096,20 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3100 struct bnx2x_fastpath *fp = &bp->fp[index]; 3096 struct bnx2x_fastpath *fp = &bp->fp[index];
3101 int ring_size = 0; 3097 int ring_size = 0;
3102 u8 cos; 3098 u8 cos;
3099 int rx_ring_size = 0;
3103 3100
3104 /* if rx_ring_size specified - use it */ 3101 /* if rx_ring_size specified - use it */
3105 int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size : 3102 if (!bp->rx_ring_size) {
3106 MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
3107 3103
3108 /* allocate at least number of buffers required by FW */ 3104 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
3109 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA : 3105
3110 MIN_RX_SIZE_TPA, 3106 /* allocate at least number of buffers required by FW */
3111 rx_ring_size); 3107 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
3108 MIN_RX_SIZE_TPA, rx_ring_size);
3109
3110 bp->rx_ring_size = rx_ring_size;
3111 } else
3112 rx_ring_size = bp->rx_ring_size;
3112 3113
3113 /* Common */ 3114 /* Common */
3114 sb = &bnx2x_fp(bp, index, status_blk); 3115 sb = &bnx2x_fp(bp, index, status_blk);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index ce14f11c0de5..a49f8cfa2dc6 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -366,13 +366,50 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
366 } 366 }
367 367
368 /* advertise the requested speed and duplex if supported */ 368 /* advertise the requested speed and duplex if supported */
369 cmd->advertising &= bp->port.supported[cfg_idx]; 369 if (cmd->advertising & ~(bp->port.supported[cfg_idx])) {
370 DP(NETIF_MSG_LINK, "Advertisement parameters "
371 "are not supported\n");
372 return -EINVAL;
373 }
370 374
371 bp->link_params.req_line_speed[cfg_idx] = SPEED_AUTO_NEG; 375 bp->link_params.req_line_speed[cfg_idx] = SPEED_AUTO_NEG;
372 bp->link_params.req_duplex[cfg_idx] = DUPLEX_FULL; 376 bp->link_params.req_duplex[cfg_idx] = cmd->duplex;
373 bp->port.advertising[cfg_idx] |= (ADVERTISED_Autoneg | 377 bp->port.advertising[cfg_idx] = (ADVERTISED_Autoneg |
374 cmd->advertising); 378 cmd->advertising);
379 if (cmd->advertising) {
380
381 bp->link_params.speed_cap_mask[cfg_idx] = 0;
382 if (cmd->advertising & ADVERTISED_10baseT_Half) {
383 bp->link_params.speed_cap_mask[cfg_idx] |=
384 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF;
385 }
386 if (cmd->advertising & ADVERTISED_10baseT_Full)
387 bp->link_params.speed_cap_mask[cfg_idx] |=
388 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL;
375 389
390 if (cmd->advertising & ADVERTISED_100baseT_Full)
391 bp->link_params.speed_cap_mask[cfg_idx] |=
392 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL;
393
394 if (cmd->advertising & ADVERTISED_100baseT_Half) {
395 bp->link_params.speed_cap_mask[cfg_idx] |=
396 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF;
397 }
398 if (cmd->advertising & ADVERTISED_1000baseT_Half) {
399 bp->link_params.speed_cap_mask[cfg_idx] |=
400 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G;
401 }
402 if (cmd->advertising & (ADVERTISED_1000baseT_Full |
403 ADVERTISED_1000baseKX_Full))
404 bp->link_params.speed_cap_mask[cfg_idx] |=
405 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G;
406
407 if (cmd->advertising & (ADVERTISED_10000baseT_Full |
408 ADVERTISED_10000baseKX4_Full |
409 ADVERTISED_10000baseKR_Full))
410 bp->link_params.speed_cap_mask[cfg_idx] |=
411 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G;
412 }
376 } else { /* forced speed */ 413 } else { /* forced speed */
377 /* advertise the requested speed and duplex if supported */ 414 /* advertise the requested speed and duplex if supported */
378 switch (speed) { 415 switch (speed) {
@@ -1313,10 +1350,7 @@ static void bnx2x_get_ringparam(struct net_device *dev,
1313 if (bp->rx_ring_size) 1350 if (bp->rx_ring_size)
1314 ering->rx_pending = bp->rx_ring_size; 1351 ering->rx_pending = bp->rx_ring_size;
1315 else 1352 else
1316 if (bp->state == BNX2X_STATE_OPEN && bp->num_queues) 1353 ering->rx_pending = MAX_RX_AVAIL;
1317 ering->rx_pending = MAX_RX_AVAIL/bp->num_queues;
1318 else
1319 ering->rx_pending = MAX_RX_AVAIL;
1320 1354
1321 ering->rx_mini_pending = 0; 1355 ering->rx_mini_pending = 0;
1322 ering->rx_jumbo_pending = 0; 1356 ering->rx_jumbo_pending = 0;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 8e9b87be3002..818723c9e678 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -778,9 +778,9 @@ static int bnx2x_ets_e3b0_set_cos_bw(struct bnx2x *bp,
778{ 778{
779 u32 nig_reg_adress_crd_weight = 0; 779 u32 nig_reg_adress_crd_weight = 0;
780 u32 pbf_reg_adress_crd_weight = 0; 780 u32 pbf_reg_adress_crd_weight = 0;
781 /* Calculate and set BW for this COS*/ 781 /* Calculate and set BW for this COS - use 1 instead of 0 for BW */
782 const u32 cos_bw_nig = (bw * min_w_val_nig) / total_bw; 782 const u32 cos_bw_nig = ((bw ? bw : 1) * min_w_val_nig) / total_bw;
783 const u32 cos_bw_pbf = (bw * min_w_val_pbf) / total_bw; 783 const u32 cos_bw_pbf = ((bw ? bw : 1) * min_w_val_pbf) / total_bw;
784 784
785 switch (cos_entry) { 785 switch (cos_entry) {
786 case 0: 786 case 0:
@@ -852,18 +852,12 @@ static int bnx2x_ets_e3b0_get_total_bw(
852 /* Calculate total BW requested */ 852 /* Calculate total BW requested */
853 for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) { 853 for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) {
854 if (bnx2x_cos_state_bw == ets_params->cos[cos_idx].state) { 854 if (bnx2x_cos_state_bw == ets_params->cos[cos_idx].state) {
855 855 *total_bw +=
856 if (0 == ets_params->cos[cos_idx].params.bw_params.bw) { 856 ets_params->cos[cos_idx].params.bw_params.bw;
857 DP(NETIF_MSG_LINK,
858 "bnx2x_ets_E3B0_config BW was set to 0\n");
859 return -EINVAL;
860 } 857 }
861 *total_bw +=
862 ets_params->cos[cos_idx].params.bw_params.bw;
863 }
864 } 858 }
865 859
866 /*Check taotl BW is valid */ 860 /* Check total BW is valid */
867 if ((100 != *total_bw) || (0 == *total_bw)) { 861 if ((100 != *total_bw) || (0 == *total_bw)) {
868 if (0 == *total_bw) { 862 if (0 == *total_bw) {
869 DP(NETIF_MSG_LINK, 863 DP(NETIF_MSG_LINK,
@@ -1726,7 +1720,7 @@ static int bnx2x_xmac_enable(struct link_params *params,
1726 1720
1727 /* Check loopback mode */ 1721 /* Check loopback mode */
1728 if (lb) 1722 if (lb)
1729 val |= XMAC_CTRL_REG_CORE_LOCAL_LPBK; 1723 val |= XMAC_CTRL_REG_LINE_LOCAL_LPBK;
1730 REG_WR(bp, xmac_base + XMAC_REG_CTRL, val); 1724 REG_WR(bp, xmac_base + XMAC_REG_CTRL, val);
1731 bnx2x_set_xumac_nig(params, 1725 bnx2x_set_xumac_nig(params,
1732 ((vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) != 0), 1); 1726 ((vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) != 0), 1);
@@ -3630,6 +3624,12 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3630 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, 3624 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
3631 MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, val16); 3625 MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, val16);
3632 3626
3627 /* Advertised and set FEC (Forward Error Correction) */
3628 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
3629 MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2,
3630 (MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY |
3631 MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ));
3632
3633 /* Enable CL37 BAM */ 3633 /* Enable CL37 BAM */
3634 if (REG_RD(bp, params->shmem_base + 3634 if (REG_RD(bp, params->shmem_base +
3635 offsetof(struct shmem_region, dev_info. 3635 offsetof(struct shmem_region, dev_info.
@@ -5925,7 +5925,7 @@ int bnx2x_set_led(struct link_params *params,
5925 (tmp | EMAC_LED_OVERRIDE)); 5925 (tmp | EMAC_LED_OVERRIDE));
5926 /* 5926 /*
5927 * return here without enabling traffic 5927 * return here without enabling traffic
5928 * LED blink andsetting rate in ON mode. 5928 * LED blink and setting rate in ON mode.
5929 * In oper mode, enabling LED blink 5929 * In oper mode, enabling LED blink
5930 * and setting rate is needed. 5930 * and setting rate is needed.
5931 */ 5931 */
@@ -5937,7 +5937,11 @@ int bnx2x_set_led(struct link_params *params,
5937 * This is a work-around for HW issue found when link 5937 * This is a work-around for HW issue found when link
5938 * is up in CL73 5938 * is up in CL73
5939 */ 5939 */
5940 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1); 5940 if ((!CHIP_IS_E3(bp)) ||
5941 (CHIP_IS_E3(bp) &&
5942 mode == LED_MODE_ON))
5943 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
5944
5941 if (CHIP_IS_E1x(bp) || 5945 if (CHIP_IS_E1x(bp) ||
5942 CHIP_IS_E2(bp) || 5946 CHIP_IS_E2(bp) ||
5943 (mode == LED_MODE_ON)) 5947 (mode == LED_MODE_ON))
@@ -10644,8 +10648,7 @@ static struct bnx2x_phy phy_warpcore = {
10644 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT, 10648 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
10645 .addr = 0xff, 10649 .addr = 0xff,
10646 .def_md_devad = 0, 10650 .def_md_devad = 0,
10647 .flags = (FLAGS_HW_LOCK_REQUIRED | 10651 .flags = FLAGS_HW_LOCK_REQUIRED,
10648 FLAGS_TX_ERROR_CHECK),
10649 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10652 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10650 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10653 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10651 .mdio_ctrl = 0, 10654 .mdio_ctrl = 0,
@@ -10771,8 +10774,7 @@ static struct bnx2x_phy phy_8706 = {
10771 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706, 10774 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706,
10772 .addr = 0xff, 10775 .addr = 0xff,
10773 .def_md_devad = 0, 10776 .def_md_devad = 0,
10774 .flags = (FLAGS_INIT_XGXS_FIRST | 10777 .flags = FLAGS_INIT_XGXS_FIRST,
10775 FLAGS_TX_ERROR_CHECK),
10776 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10778 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10777 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10779 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10778 .mdio_ctrl = 0, 10780 .mdio_ctrl = 0,
@@ -10803,8 +10805,7 @@ static struct bnx2x_phy phy_8726 = {
10803 .addr = 0xff, 10805 .addr = 0xff,
10804 .def_md_devad = 0, 10806 .def_md_devad = 0,
10805 .flags = (FLAGS_HW_LOCK_REQUIRED | 10807 .flags = (FLAGS_HW_LOCK_REQUIRED |
10806 FLAGS_INIT_XGXS_FIRST | 10808 FLAGS_INIT_XGXS_FIRST),
10807 FLAGS_TX_ERROR_CHECK),
10808 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10809 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10809 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10810 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10810 .mdio_ctrl = 0, 10811 .mdio_ctrl = 0,
@@ -10835,8 +10836,7 @@ static struct bnx2x_phy phy_8727 = {
10835 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727, 10836 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
10836 .addr = 0xff, 10837 .addr = 0xff,
10837 .def_md_devad = 0, 10838 .def_md_devad = 0,
10838 .flags = (FLAGS_FAN_FAILURE_DET_REQ | 10839 .flags = FLAGS_FAN_FAILURE_DET_REQ,
10839 FLAGS_TX_ERROR_CHECK),
10840 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10840 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10841 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10841 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10842 .mdio_ctrl = 0, 10842 .mdio_ctrl = 0,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 85dd294aeaba..621ab281ed89 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -408,8 +408,8 @@ u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
408 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET); 408 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
409 409
410 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0); 410 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
411 opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) | 411 opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) |
412 (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT)); 412 (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
413 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT); 413 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
414 414
415#ifdef __BIG_ENDIAN 415#ifdef __BIG_ENDIAN
@@ -1417,7 +1417,7 @@ static void bnx2x_hc_int_enable(struct bnx2x *bp)
1417 if (!CHIP_IS_E1(bp)) { 1417 if (!CHIP_IS_E1(bp)) {
1418 /* init leading/trailing edge */ 1418 /* init leading/trailing edge */
1419 if (IS_MF(bp)) { 1419 if (IS_MF(bp)) {
1420 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4))); 1420 val = (0xee0f | (1 << (BP_VN(bp) + 4)));
1421 if (bp->port.pmf) 1421 if (bp->port.pmf)
1422 /* enable nig and gpio3 attention */ 1422 /* enable nig and gpio3 attention */
1423 val |= 0x1100; 1423 val |= 0x1100;
@@ -1469,7 +1469,7 @@ static void bnx2x_igu_int_enable(struct bnx2x *bp)
1469 1469
1470 /* init leading/trailing edge */ 1470 /* init leading/trailing edge */
1471 if (IS_MF(bp)) { 1471 if (IS_MF(bp)) {
1472 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4))); 1472 val = (0xee0f | (1 << (BP_VN(bp) + 4)));
1473 if (bp->port.pmf) 1473 if (bp->port.pmf)
1474 /* enable nig and gpio3 attention */ 1474 /* enable nig and gpio3 attention */
1475 val |= 0x1100; 1475 val |= 0x1100;
@@ -2285,7 +2285,7 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2285 int vn; 2285 int vn;
2286 2286
2287 bp->vn_weight_sum = 0; 2287 bp->vn_weight_sum = 0;
2288 for (vn = VN_0; vn < E1HVN_MAX; vn++) { 2288 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2289 u32 vn_cfg = bp->mf_config[vn]; 2289 u32 vn_cfg = bp->mf_config[vn];
2290 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> 2290 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2291 FUNC_MF_CFG_MIN_BW_SHIFT) * 100; 2291 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
@@ -2318,12 +2318,18 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2318 CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 2318 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2319} 2319}
2320 2320
2321/* returns func by VN for current port */
2322static inline int func_by_vn(struct bnx2x *bp, int vn)
2323{
2324 return 2 * vn + BP_PORT(bp);
2325}
2326
2321static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn) 2327static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
2322{ 2328{
2323 struct rate_shaping_vars_per_vn m_rs_vn; 2329 struct rate_shaping_vars_per_vn m_rs_vn;
2324 struct fairness_vars_per_vn m_fair_vn; 2330 struct fairness_vars_per_vn m_fair_vn;
2325 u32 vn_cfg = bp->mf_config[vn]; 2331 u32 vn_cfg = bp->mf_config[vn];
2326 int func = 2*vn + BP_PORT(bp); 2332 int func = func_by_vn(bp, vn);
2327 u16 vn_min_rate, vn_max_rate; 2333 u16 vn_min_rate, vn_max_rate;
2328 int i; 2334 int i;
2329 2335
@@ -2420,7 +2426,7 @@ void bnx2x_read_mf_cfg(struct bnx2x *bp)
2420 * 2426 *
2421 * and there are 2 functions per port 2427 * and there are 2 functions per port
2422 */ 2428 */
2423 for (vn = VN_0; vn < E1HVN_MAX; vn++) { 2429 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2424 int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp); 2430 int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
2425 2431
2426 if (func >= E1H_FUNC_MAX) 2432 if (func >= E1H_FUNC_MAX)
@@ -2452,7 +2458,7 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2452 2458
2453 /* calculate and set min-max rate for each vn */ 2459 /* calculate and set min-max rate for each vn */
2454 if (bp->port.pmf) 2460 if (bp->port.pmf)
2455 for (vn = VN_0; vn < E1HVN_MAX; vn++) 2461 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++)
2456 bnx2x_init_vn_minmax(bp, vn); 2462 bnx2x_init_vn_minmax(bp, vn);
2457 2463
2458 /* always enable rate shaping and fairness */ 2464 /* always enable rate shaping and fairness */
@@ -2471,16 +2477,15 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2471 2477
2472static inline void bnx2x_link_sync_notify(struct bnx2x *bp) 2478static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
2473{ 2479{
2474 int port = BP_PORT(bp);
2475 int func; 2480 int func;
2476 int vn; 2481 int vn;
2477 2482
2478 /* Set the attention towards other drivers on the same port */ 2483 /* Set the attention towards other drivers on the same port */
2479 for (vn = VN_0; vn < E1HVN_MAX; vn++) { 2484 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2480 if (vn == BP_E1HVN(bp)) 2485 if (vn == BP_VN(bp))
2481 continue; 2486 continue;
2482 2487
2483 func = ((vn << 1) | port); 2488 func = func_by_vn(bp, vn);
2484 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 + 2489 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2485 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1); 2490 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2486 } 2491 }
@@ -2575,7 +2580,7 @@ static void bnx2x_pmf_update(struct bnx2x *bp)
2575 bnx2x_dcbx_pmf_update(bp); 2580 bnx2x_dcbx_pmf_update(bp);
2576 2581
2577 /* enable nig attention */ 2582 /* enable nig attention */
2578 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4))); 2583 val = (0xff0f | (1 << (BP_VN(bp) + 4)));
2579 if (bp->common.int_block == INT_BLOCK_HC) { 2584 if (bp->common.int_block == INT_BLOCK_HC) {
2580 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); 2585 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2581 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); 2586 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
@@ -2754,8 +2759,14 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
2754 u16 tpa_agg_size = 0; 2759 u16 tpa_agg_size = 0;
2755 2760
2756 if (!fp->disable_tpa) { 2761 if (!fp->disable_tpa) {
2757 pause->sge_th_hi = 250; 2762 pause->sge_th_lo = SGE_TH_LO(bp);
2758 pause->sge_th_lo = 150; 2763 pause->sge_th_hi = SGE_TH_HI(bp);
2764
2765 /* validate SGE ring has enough to cross high threshold */
2766 WARN_ON(bp->dropless_fc &&
2767 pause->sge_th_hi + FW_PREFETCH_CNT >
2768 MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES);
2769
2759 tpa_agg_size = min_t(u32, 2770 tpa_agg_size = min_t(u32,
2760 (min_t(u32, 8, MAX_SKB_FRAGS) * 2771 (min_t(u32, 8, MAX_SKB_FRAGS) *
2761 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff); 2772 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
@@ -2769,10 +2780,21 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
2769 2780
2770 /* pause - not for e1 */ 2781 /* pause - not for e1 */
2771 if (!CHIP_IS_E1(bp)) { 2782 if (!CHIP_IS_E1(bp)) {
2772 pause->bd_th_hi = 350; 2783 pause->bd_th_lo = BD_TH_LO(bp);
2773 pause->bd_th_lo = 250; 2784 pause->bd_th_hi = BD_TH_HI(bp);
2774 pause->rcq_th_hi = 350; 2785
2775 pause->rcq_th_lo = 250; 2786 pause->rcq_th_lo = RCQ_TH_LO(bp);
2787 pause->rcq_th_hi = RCQ_TH_HI(bp);
2788 /*
2789 * validate that rings have enough entries to cross
2790 * high thresholds
2791 */
2792 WARN_ON(bp->dropless_fc &&
2793 pause->bd_th_hi + FW_PREFETCH_CNT >
2794 bp->rx_ring_size);
2795 WARN_ON(bp->dropless_fc &&
2796 pause->rcq_th_hi + FW_PREFETCH_CNT >
2797 NUM_RCQ_RINGS * MAX_RCQ_DESC_CNT);
2776 2798
2777 pause->pri_map = 1; 2799 pause->pri_map = 1;
2778 } 2800 }
@@ -2800,9 +2822,7 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
2800 * For PF Clients it should be the maximum avaliable number. 2822 * For PF Clients it should be the maximum avaliable number.
2801 * VF driver(s) may want to define it to a smaller value. 2823 * VF driver(s) may want to define it to a smaller value.
2802 */ 2824 */
2803 rxq_init->max_tpa_queues = 2825 rxq_init->max_tpa_queues = MAX_AGG_QS(bp);
2804 (CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
2805 ETH_MAX_AGGREGATION_QUEUES_E1H_E2);
2806 2826
2807 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT; 2827 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
2808 rxq_init->fw_sb_id = fp->fw_sb_id; 2828 rxq_init->fw_sb_id = fp->fw_sb_id;
@@ -4804,6 +4824,37 @@ void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
4804 hc_sm->time_to_expire = 0xFFFFFFFF; 4824 hc_sm->time_to_expire = 0xFFFFFFFF;
4805} 4825}
4806 4826
4827
4828/* allocates state machine ids. */
4829static inline
4830void bnx2x_map_sb_state_machines(struct hc_index_data *index_data)
4831{
4832 /* zero out state machine indices */
4833 /* rx indices */
4834 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
4835
4836 /* tx indices */
4837 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
4838 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
4839 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
4840 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
4841
4842 /* map indices */
4843 /* rx indices */
4844 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
4845 SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
4846
4847 /* tx indices */
4848 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
4849 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
4850 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
4851 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
4852 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
4853 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
4854 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
4855 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
4856}
4857
4807static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, 4858static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
4808 u8 vf_valid, int fw_sb_id, int igu_sb_id) 4859 u8 vf_valid, int fw_sb_id, int igu_sb_id)
4809{ 4860{
@@ -4835,6 +4886,7 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
4835 hc_sm_p = sb_data_e2.common.state_machine; 4886 hc_sm_p = sb_data_e2.common.state_machine;
4836 sb_data_p = (u32 *)&sb_data_e2; 4887 sb_data_p = (u32 *)&sb_data_e2;
4837 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32); 4888 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
4889 bnx2x_map_sb_state_machines(sb_data_e2.index_data);
4838 } else { 4890 } else {
4839 memset(&sb_data_e1x, 0, 4891 memset(&sb_data_e1x, 0,
4840 sizeof(struct hc_status_block_data_e1x)); 4892 sizeof(struct hc_status_block_data_e1x));
@@ -4849,6 +4901,7 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
4849 hc_sm_p = sb_data_e1x.common.state_machine; 4901 hc_sm_p = sb_data_e1x.common.state_machine;
4850 sb_data_p = (u32 *)&sb_data_e1x; 4902 sb_data_p = (u32 *)&sb_data_e1x;
4851 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32); 4903 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
4904 bnx2x_map_sb_state_machines(sb_data_e1x.index_data);
4852 } 4905 }
4853 4906
4854 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], 4907 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
@@ -5798,7 +5851,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
5798 * take the UNDI lock to protect undi_unload flow from accessing 5851 * take the UNDI lock to protect undi_unload flow from accessing
5799 * registers while we're resetting the chip 5852 * registers while we're resetting the chip
5800 */ 5853 */
5801 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); 5854 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
5802 5855
5803 bnx2x_reset_common(bp); 5856 bnx2x_reset_common(bp);
5804 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff); 5857 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
@@ -5810,7 +5863,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
5810 } 5863 }
5811 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val); 5864 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val);
5812 5865
5813 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); 5866 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
5814 5867
5815 bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON); 5868 bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON);
5816 5869
@@ -6667,12 +6720,16 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
6667 if (CHIP_MODE_IS_4_PORT(bp)) 6720 if (CHIP_MODE_IS_4_PORT(bp))
6668 dsb_idx = BP_FUNC(bp); 6721 dsb_idx = BP_FUNC(bp);
6669 else 6722 else
6670 dsb_idx = BP_E1HVN(bp); 6723 dsb_idx = BP_VN(bp);
6671 6724
6672 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ? 6725 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
6673 IGU_BC_BASE_DSB_PROD + dsb_idx : 6726 IGU_BC_BASE_DSB_PROD + dsb_idx :
6674 IGU_NORM_BASE_DSB_PROD + dsb_idx); 6727 IGU_NORM_BASE_DSB_PROD + dsb_idx);
6675 6728
6729 /*
6730 * igu prods come in chunks of E1HVN_MAX (4) -
6731 * does not matters what is the current chip mode
6732 */
6676 for (i = 0; i < (num_segs * E1HVN_MAX); 6733 for (i = 0; i < (num_segs * E1HVN_MAX);
6677 i += E1HVN_MAX) { 6734 i += E1HVN_MAX) {
6678 addr = IGU_REG_PROD_CONS_MEMORY + 6735 addr = IGU_REG_PROD_CONS_MEMORY +
@@ -7566,7 +7623,7 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
7566 u32 val; 7623 u32 val;
7567 /* The mac address is written to entries 1-4 to 7624 /* The mac address is written to entries 1-4 to
7568 preserve entry 0 which is used by the PMF */ 7625 preserve entry 0 which is used by the PMF */
7569 u8 entry = (BP_E1HVN(bp) + 1)*8; 7626 u8 entry = (BP_VN(bp) + 1)*8;
7570 7627
7571 val = (mac_addr[0] << 8) | mac_addr[1]; 7628 val = (mac_addr[0] << 8) | mac_addr[1];
7572 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val); 7629 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
@@ -8542,10 +8599,12 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
8542 /* Check if there is any driver already loaded */ 8599 /* Check if there is any driver already loaded */
8543 val = REG_RD(bp, MISC_REG_UNPREPARED); 8600 val = REG_RD(bp, MISC_REG_UNPREPARED);
8544 if (val == 0x1) { 8601 if (val == 0x1) {
8545 /* Check if it is the UNDI driver 8602
8603 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
8604 /*
8605 * Check if it is the UNDI driver
8546 * UNDI driver initializes CID offset for normal bell to 0x7 8606 * UNDI driver initializes CID offset for normal bell to 0x7
8547 */ 8607 */
8548 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8549 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST); 8608 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
8550 if (val == 0x7) { 8609 if (val == 0x7) {
8551 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 8610 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
@@ -8583,9 +8642,6 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
8583 bnx2x_fw_command(bp, reset_code, 0); 8642 bnx2x_fw_command(bp, reset_code, 0);
8584 } 8643 }
8585 8644
8586 /* now it's safe to release the lock */
8587 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8588
8589 bnx2x_undi_int_disable(bp); 8645 bnx2x_undi_int_disable(bp);
8590 port = BP_PORT(bp); 8646 port = BP_PORT(bp);
8591 8647
@@ -8635,8 +8691,10 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
8635 bp->fw_seq = 8691 bp->fw_seq =
8636 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) & 8692 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
8637 DRV_MSG_SEQ_NUMBER_MASK); 8693 DRV_MSG_SEQ_NUMBER_MASK);
8638 } else 8694 }
8639 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); 8695
8696 /* now it's safe to release the lock */
8697 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
8640 } 8698 }
8641} 8699}
8642 8700
@@ -8773,13 +8831,13 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
8773static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp) 8831static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
8774{ 8832{
8775 int pfid = BP_FUNC(bp); 8833 int pfid = BP_FUNC(bp);
8776 int vn = BP_E1HVN(bp);
8777 int igu_sb_id; 8834 int igu_sb_id;
8778 u32 val; 8835 u32 val;
8779 u8 fid, igu_sb_cnt = 0; 8836 u8 fid, igu_sb_cnt = 0;
8780 8837
8781 bp->igu_base_sb = 0xff; 8838 bp->igu_base_sb = 0xff;
8782 if (CHIP_INT_MODE_IS_BC(bp)) { 8839 if (CHIP_INT_MODE_IS_BC(bp)) {
8840 int vn = BP_VN(bp);
8783 igu_sb_cnt = bp->igu_sb_cnt; 8841 igu_sb_cnt = bp->igu_sb_cnt;
8784 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) * 8842 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
8785 FP_SB_MAX_E1x; 8843 FP_SB_MAX_E1x;
@@ -9410,6 +9468,10 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
9410 bp->igu_base_sb = 0; 9468 bp->igu_base_sb = 0;
9411 } else { 9469 } else {
9412 bp->common.int_block = INT_BLOCK_IGU; 9470 bp->common.int_block = INT_BLOCK_IGU;
9471
9472 /* do not allow device reset during IGU info preocessing */
9473 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
9474
9413 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); 9475 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
9414 9476
9415 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { 9477 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
@@ -9441,6 +9503,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
9441 9503
9442 bnx2x_get_igu_cam_info(bp); 9504 bnx2x_get_igu_cam_info(bp);
9443 9505
9506 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
9444 } 9507 }
9445 9508
9446 /* 9509 /*
@@ -9467,7 +9530,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
9467 9530
9468 bp->mf_ov = 0; 9531 bp->mf_ov = 0;
9469 bp->mf_mode = 0; 9532 bp->mf_mode = 0;
9470 vn = BP_E1HVN(bp); 9533 vn = BP_VN(bp);
9471 9534
9472 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) { 9535 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
9473 BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n", 9536 BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n",
@@ -9587,13 +9650,6 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
9587 /* port info */ 9650 /* port info */
9588 bnx2x_get_port_hwinfo(bp); 9651 bnx2x_get_port_hwinfo(bp);
9589 9652
9590 if (!BP_NOMCP(bp)) {
9591 bp->fw_seq =
9592 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
9593 DRV_MSG_SEQ_NUMBER_MASK);
9594 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9595 }
9596
9597 /* Get MAC addresses */ 9653 /* Get MAC addresses */
9598 bnx2x_get_mac_hwinfo(bp); 9654 bnx2x_get_mac_hwinfo(bp);
9599 9655
@@ -9759,6 +9815,14 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
9759 if (!BP_NOMCP(bp)) 9815 if (!BP_NOMCP(bp))
9760 bnx2x_undi_unload(bp); 9816 bnx2x_undi_unload(bp);
9761 9817
9818 /* init fw_seq after undi_unload! */
9819 if (!BP_NOMCP(bp)) {
9820 bp->fw_seq =
9821 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
9822 DRV_MSG_SEQ_NUMBER_MASK);
9823 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9824 }
9825
9762 if (CHIP_REV_IS_FPGA(bp)) 9826 if (CHIP_REV_IS_FPGA(bp))
9763 dev_err(&bp->pdev->dev, "FPGA detected\n"); 9827 dev_err(&bp->pdev->dev, "FPGA detected\n");
9764 9828
@@ -10253,17 +10317,21 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10253 /* clean indirect addresses */ 10317 /* clean indirect addresses */
10254 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, 10318 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10255 PCICFG_VENDOR_ID_OFFSET); 10319 PCICFG_VENDOR_ID_OFFSET);
10256 /* Clean the following indirect addresses for all functions since it 10320 /*
10321 * Clean the following indirect addresses for all functions since it
10257 * is not used by the driver. 10322 * is not used by the driver.
10258 */ 10323 */
10259 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0); 10324 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0);
10260 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0); 10325 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0);
10261 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0); 10326 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);
10262 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0); 10327 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0);
10263 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0); 10328
10264 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0); 10329 if (CHIP_IS_E1x(bp)) {
10265 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0); 10330 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
10266 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0); 10331 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
10332 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
10333 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
10334 }
10267 10335
10268 /* 10336 /*
10269 * Enable internal target-read (in case we are probed after PF FLR). 10337 * Enable internal target-read (in case we are probed after PF FLR).
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 40266c14e6dc..750e8445dac4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -5320,7 +5320,7 @@
5320#define XCM_REG_XX_OVFL_EVNT_ID 0x20058 5320#define XCM_REG_XX_OVFL_EVNT_ID 0x20058
5321#define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS (0x1<<0) 5321#define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS (0x1<<0)
5322#define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS (0x1<<1) 5322#define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS (0x1<<1)
5323#define XMAC_CTRL_REG_CORE_LOCAL_LPBK (0x1<<3) 5323#define XMAC_CTRL_REG_LINE_LOCAL_LPBK (0x1<<2)
5324#define XMAC_CTRL_REG_RX_EN (0x1<<1) 5324#define XMAC_CTRL_REG_RX_EN (0x1<<1)
5325#define XMAC_CTRL_REG_SOFT_RESET (0x1<<6) 5325#define XMAC_CTRL_REG_SOFT_RESET (0x1<<6)
5326#define XMAC_CTRL_REG_TX_EN (0x1<<0) 5326#define XMAC_CTRL_REG_TX_EN (0x1<<0)
@@ -5766,7 +5766,7 @@
5766#define HW_LOCK_RESOURCE_RECOVERY_LEADER_0 8 5766#define HW_LOCK_RESOURCE_RECOVERY_LEADER_0 8
5767#define HW_LOCK_RESOURCE_RECOVERY_LEADER_1 9 5767#define HW_LOCK_RESOURCE_RECOVERY_LEADER_1 9
5768#define HW_LOCK_RESOURCE_SPIO 2 5768#define HW_LOCK_RESOURCE_SPIO 2
5769#define HW_LOCK_RESOURCE_UNDI 5 5769#define HW_LOCK_RESOURCE_RESET 5
5770#define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT (0x1<<4) 5770#define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT (0x1<<4)
5771#define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR (0x1<<5) 5771#define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR (0x1<<5)
5772#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (0x1<<18) 5772#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (0x1<<18)
@@ -6853,6 +6853,9 @@ Theotherbitsarereservedandshouldbezero*/
6853#define MDIO_WC_REG_IEEE0BLK_AUTONEGNP 0x7 6853#define MDIO_WC_REG_IEEE0BLK_AUTONEGNP 0x7
6854#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT0 0x10 6854#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT0 0x10
6855#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1 0x11 6855#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1 0x11
6856#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2 0x12
6857#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY 0x4000
6858#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ 0x8000
6856#define MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150 0x96 6859#define MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150 0x96
6857#define MDIO_WC_REG_XGXSBLK0_XGXSCONTROL 0x8000 6860#define MDIO_WC_REG_XGXSBLK0_XGXSCONTROL 0x8000
6858#define MDIO_WC_REG_XGXSBLK0_MISCCONTROL1 0x800e 6861#define MDIO_WC_REG_XGXSBLK0_MISCCONTROL1 0x800e
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 628f7b99614f..02ac6a771bf9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -713,7 +713,8 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)
713 break; 713 break;
714 714
715 case MAC_TYPE_NONE: /* unreached */ 715 case MAC_TYPE_NONE: /* unreached */
716 BNX2X_ERR("stats updated by DMAE but no MAC active\n"); 716 DP(BNX2X_MSG_STATS,
717 "stats updated by DMAE but no MAC active\n");
717 return -1; 718 return -1;
718 719
719 default: /* unreached */ 720 default: /* unreached */
@@ -1391,7 +1392,7 @@ static void bnx2x_port_stats_base_init(struct bnx2x *bp)
1391 1392
1392static void bnx2x_func_stats_base_init(struct bnx2x *bp) 1393static void bnx2x_func_stats_base_init(struct bnx2x *bp)
1393{ 1394{
1394 int vn, vn_max = IS_MF(bp) ? E1HVN_MAX : E1VN_MAX; 1395 int vn, vn_max = IS_MF(bp) ? BP_MAX_VN_NUM(bp) : E1VN_MAX;
1395 u32 func_stx; 1396 u32 func_stx;
1396 1397
1397 /* sanity */ 1398 /* sanity */
@@ -1404,7 +1405,7 @@ static void bnx2x_func_stats_base_init(struct bnx2x *bp)
1404 func_stx = bp->func_stx; 1405 func_stx = bp->func_stx;
1405 1406
1406 for (vn = VN_0; vn < vn_max; vn++) { 1407 for (vn = VN_0; vn < vn_max; vn++) {
1407 int mb_idx = CHIP_IS_E1x(bp) ? 2*vn + BP_PORT(bp) : vn; 1408 int mb_idx = BP_FW_MB_IDX_VN(bp, vn);
1408 1409
1409 bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param); 1410 bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param);
1410 bnx2x_func_stats_init(bp); 1411 bnx2x_func_stats_init(bp);
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 1485013b4b8c..26c6bd44a604 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -6738,12 +6738,10 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6738 !mss && skb->len > VLAN_ETH_FRAME_LEN) 6738 !mss && skb->len > VLAN_ETH_FRAME_LEN)
6739 base_flags |= TXD_FLAG_JMB_PKT; 6739 base_flags |= TXD_FLAG_JMB_PKT;
6740 6740
6741#ifdef BCM_KERNEL_SUPPORTS_8021Q
6742 if (vlan_tx_tag_present(skb)) { 6741 if (vlan_tx_tag_present(skb)) {
6743 base_flags |= TXD_FLAG_VLAN; 6742 base_flags |= TXD_FLAG_VLAN;
6744 vlan = vlan_tx_tag_get(skb); 6743 vlan = vlan_tx_tag_get(skb);
6745 } 6744 }
6746#endif
6747 6745
6748 len = skb_headlen(skb); 6746 len = skb_headlen(skb);
6749 6747
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index f30b96fee840..212736bab6bb 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -1669,10 +1669,10 @@ static int gfar_get_cls_all(struct gfar_private *priv,
1669 u32 i = 0; 1669 u32 i = 0;
1670 1670
1671 list_for_each_entry(comp, &priv->rx_list.list, list) { 1671 list_for_each_entry(comp, &priv->rx_list.list, list) {
1672 if (i <= cmd->rule_cnt) { 1672 if (i == cmd->rule_cnt)
1673 rule_locs[i] = comp->fs.location; 1673 return -EMSGSIZE;
1674 i++; 1674 rule_locs[i] = comp->fs.location;
1675 } 1675 i++;
1676 } 1676 }
1677 1677
1678 cmd->data = MAX_FILER_IDX; 1678 cmd->data = MAX_FILER_IDX;
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 8cca4a62b397..72b84de48756 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -395,7 +395,7 @@ static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *ada
395} 395}
396 396
397/* recycle the current buffer on the rx queue */ 397/* recycle the current buffer on the rx queue */
398static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter) 398static int ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
399{ 399{
400 u32 q_index = adapter->rx_queue.index; 400 u32 q_index = adapter->rx_queue.index;
401 u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator; 401 u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator;
@@ -403,6 +403,7 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
403 unsigned int index = correlator & 0xffffffffUL; 403 unsigned int index = correlator & 0xffffffffUL;
404 union ibmveth_buf_desc desc; 404 union ibmveth_buf_desc desc;
405 unsigned long lpar_rc; 405 unsigned long lpar_rc;
406 int ret = 1;
406 407
407 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS); 408 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
408 BUG_ON(index >= adapter->rx_buff_pool[pool].size); 409 BUG_ON(index >= adapter->rx_buff_pool[pool].size);
@@ -410,7 +411,7 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
410 if (!adapter->rx_buff_pool[pool].active) { 411 if (!adapter->rx_buff_pool[pool].active) {
411 ibmveth_rxq_harvest_buffer(adapter); 412 ibmveth_rxq_harvest_buffer(adapter);
412 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]); 413 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
413 return; 414 goto out;
414 } 415 }
415 416
416 desc.fields.flags_len = IBMVETH_BUF_VALID | 417 desc.fields.flags_len = IBMVETH_BUF_VALID |
@@ -423,12 +424,16 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
423 netdev_dbg(adapter->netdev, "h_add_logical_lan_buffer failed " 424 netdev_dbg(adapter->netdev, "h_add_logical_lan_buffer failed "
424 "during recycle rc=%ld", lpar_rc); 425 "during recycle rc=%ld", lpar_rc);
425 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator); 426 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
427 ret = 0;
426 } 428 }
427 429
428 if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) { 430 if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
429 adapter->rx_queue.index = 0; 431 adapter->rx_queue.index = 0;
430 adapter->rx_queue.toggle = !adapter->rx_queue.toggle; 432 adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
431 } 433 }
434
435out:
436 return ret;
432} 437}
433 438
434static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter) 439static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
@@ -752,7 +757,7 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
752 struct ibmveth_adapter *adapter = netdev_priv(dev); 757 struct ibmveth_adapter *adapter = netdev_priv(dev);
753 unsigned long set_attr, clr_attr, ret_attr; 758 unsigned long set_attr, clr_attr, ret_attr;
754 unsigned long set_attr6, clr_attr6; 759 unsigned long set_attr6, clr_attr6;
755 long ret, ret6; 760 long ret, ret4, ret6;
756 int rc1 = 0, rc2 = 0; 761 int rc1 = 0, rc2 = 0;
757 int restart = 0; 762 int restart = 0;
758 763
@@ -765,6 +770,8 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
765 770
766 set_attr = 0; 771 set_attr = 0;
767 clr_attr = 0; 772 clr_attr = 0;
773 set_attr6 = 0;
774 clr_attr6 = 0;
768 775
769 if (data) { 776 if (data) {
770 set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM; 777 set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
@@ -779,16 +786,20 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
779 if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) && 786 if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) &&
780 !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) && 787 !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) &&
781 (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) { 788 (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) {
782 ret = h_illan_attributes(adapter->vdev->unit_address, clr_attr, 789 ret4 = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
783 set_attr, &ret_attr); 790 set_attr, &ret_attr);
784 791
785 if (ret != H_SUCCESS) { 792 if (ret4 != H_SUCCESS) {
786 netdev_err(dev, "unable to change IPv4 checksum " 793 netdev_err(dev, "unable to change IPv4 checksum "
787 "offload settings. %d rc=%ld\n", 794 "offload settings. %d rc=%ld\n",
788 data, ret); 795 data, ret4);
796
797 h_illan_attributes(adapter->vdev->unit_address,
798 set_attr, clr_attr, &ret_attr);
799
800 if (data == 1)
801 dev->features &= ~NETIF_F_IP_CSUM;
789 802
790 ret = h_illan_attributes(adapter->vdev->unit_address,
791 set_attr, clr_attr, &ret_attr);
792 } else { 803 } else {
793 adapter->fw_ipv4_csum_support = data; 804 adapter->fw_ipv4_csum_support = data;
794 } 805 }
@@ -799,15 +810,18 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
799 if (ret6 != H_SUCCESS) { 810 if (ret6 != H_SUCCESS) {
800 netdev_err(dev, "unable to change IPv6 checksum " 811 netdev_err(dev, "unable to change IPv6 checksum "
801 "offload settings. %d rc=%ld\n", 812 "offload settings. %d rc=%ld\n",
802 data, ret); 813 data, ret6);
814
815 h_illan_attributes(adapter->vdev->unit_address,
816 set_attr6, clr_attr6, &ret_attr);
817
818 if (data == 1)
819 dev->features &= ~NETIF_F_IPV6_CSUM;
803 820
804 ret = h_illan_attributes(adapter->vdev->unit_address,
805 set_attr6, clr_attr6,
806 &ret_attr);
807 } else 821 } else
808 adapter->fw_ipv6_csum_support = data; 822 adapter->fw_ipv6_csum_support = data;
809 823
810 if (ret != H_SUCCESS || ret6 != H_SUCCESS) 824 if (ret4 == H_SUCCESS || ret6 == H_SUCCESS)
811 adapter->rx_csum = data; 825 adapter->rx_csum = data;
812 else 826 else
813 rc1 = -EIO; 827 rc1 = -EIO;
@@ -925,6 +939,7 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
925 union ibmveth_buf_desc descs[6]; 939 union ibmveth_buf_desc descs[6];
926 int last, i; 940 int last, i;
927 int force_bounce = 0; 941 int force_bounce = 0;
942 dma_addr_t dma_addr;
928 943
929 /* 944 /*
930 * veth handles a maximum of 6 segments including the header, so 945 * veth handles a maximum of 6 segments including the header, so
@@ -989,17 +1004,16 @@ retry_bounce:
989 } 1004 }
990 1005
991 /* Map the header */ 1006 /* Map the header */
992 descs[0].fields.address = dma_map_single(&adapter->vdev->dev, skb->data, 1007 dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
993 skb_headlen(skb), 1008 skb_headlen(skb), DMA_TO_DEVICE);
994 DMA_TO_DEVICE); 1009 if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
995 if (dma_mapping_error(&adapter->vdev->dev, descs[0].fields.address))
996 goto map_failed; 1010 goto map_failed;
997 1011
998 descs[0].fields.flags_len = desc_flags | skb_headlen(skb); 1012 descs[0].fields.flags_len = desc_flags | skb_headlen(skb);
1013 descs[0].fields.address = dma_addr;
999 1014
1000 /* Map the frags */ 1015 /* Map the frags */
1001 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1016 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1002 unsigned long dma_addr;
1003 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1017 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1004 1018
1005 dma_addr = skb_frag_dma_map(&adapter->vdev->dev, frag, 0, 1019 dma_addr = skb_frag_dma_map(&adapter->vdev->dev, frag, 0,
@@ -1020,7 +1034,12 @@ retry_bounce:
1020 netdev->stats.tx_bytes += skb->len; 1034 netdev->stats.tx_bytes += skb->len;
1021 } 1035 }
1022 1036
1023 for (i = 0; i < skb_shinfo(skb)->nr_frags + 1; i++) 1037 dma_unmap_single(&adapter->vdev->dev,
1038 descs[0].fields.address,
1039 descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1040 DMA_TO_DEVICE);
1041
1042 for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++)
1024 dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address, 1043 dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
1025 descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK, 1044 descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1026 DMA_TO_DEVICE); 1045 DMA_TO_DEVICE);
@@ -1083,8 +1102,9 @@ restart_poll:
1083 if (rx_flush) 1102 if (rx_flush)
1084 ibmveth_flush_buffer(skb->data, 1103 ibmveth_flush_buffer(skb->data,
1085 length + offset); 1104 length + offset);
1105 if (!ibmveth_rxq_recycle_buffer(adapter))
1106 kfree_skb(skb);
1086 skb = new_skb; 1107 skb = new_skb;
1087 ibmveth_rxq_recycle_buffer(adapter);
1088 } else { 1108 } else {
1089 ibmveth_rxq_harvest_buffer(adapter); 1109 ibmveth_rxq_harvest_buffer(adapter);
1090 skb_reserve(skb, offset); 1110 skb_reserve(skb, offset);
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
index 8545c7aa93eb..a5a89ecb6f36 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
@@ -4026,6 +4026,12 @@ s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw)
4026 checksum += eeprom_data; 4026 checksum += eeprom_data;
4027 } 4027 }
4028 4028
4029#ifdef CONFIG_PARISC
4030 /* This is a signature and not a checksum on HP c8000 */
4031 if ((hw->subsystem_vendor_id == 0x103C) && (eeprom_data == 0x16d6))
4032 return E1000_SUCCESS;
4033
4034#endif
4029 if (checksum == (u16) EEPROM_SUM) 4035 if (checksum == (u16) EEPROM_SUM)
4030 return E1000_SUCCESS; 4036 return E1000_SUCCESS;
4031 else { 4037 else {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 49e82de136a7..08439ca60734 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1306,8 +1306,8 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1306 if (ring_is_rsc_enabled(rx_ring)) 1306 if (ring_is_rsc_enabled(rx_ring))
1307 pkt_is_rsc = ixgbe_get_rsc_state(rx_desc); 1307 pkt_is_rsc = ixgbe_get_rsc_state(rx_desc);
1308 1308
1309 /* if this is a skb from previous receive DMA will be 0 */ 1309 /* linear means we are building an skb from multiple pages */
1310 if (rx_buffer_info->dma) { 1310 if (!skb_is_nonlinear(skb)) {
1311 u16 hlen; 1311 u16 hlen;
1312 if (pkt_is_rsc && 1312 if (pkt_is_rsc &&
1313 !(staterr & IXGBE_RXD_STAT_EOP) && 1313 !(staterr & IXGBE_RXD_STAT_EOP) &&
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 1a3033d8e7ed..d17d0624c5e6 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -40,6 +40,7 @@
40#include <linux/clk.h> 40#include <linux/clk.h>
41#include <linux/phy.h> 41#include <linux/phy.h>
42#include <linux/io.h> 42#include <linux/io.h>
43#include <linux/interrupt.h>
43#include <linux/types.h> 44#include <linux/types.h>
44#include <asm/pgtable.h> 45#include <asm/pgtable.h>
45#include <asm/system.h> 46#include <asm/system.h>
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
index 7efa62427235..00bc4fc968c7 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
@@ -3,7 +3,7 @@
3# 3#
4 4
5config PCH_GBE 5config PCH_GBE
6 tristate "OKI SEMICONDUCTOR ML7223 IOH GbE (Intel EG20T PCH)" 6 tristate "OKI SEMICONDUCTOR IOH(ML7223/ML7831) GbE"
7 depends on PCI 7 depends on PCI
8 select NET_CORE 8 select NET_CORE
9 select MII 9 select MII
@@ -15,7 +15,8 @@ config PCH_GBE
15 to Gigabit Ethernet. This driver enables Gigabit Ethernet function. 15 to Gigabit Ethernet. This driver enables Gigabit Ethernet function.
16 16
17 This driver also can be used for OKI SEMICONDUCTOR IOH(Input/ 17 This driver also can be used for OKI SEMICONDUCTOR IOH(Input/
18 Output Hub), ML7223. 18 Output Hub), ML7223/ML7831.
19 ML7223 IOH is for MP(Media Phone) use. 19 ML7223 IOH is for MP(Media Phone) use. ML7831 IOH is for general
20 ML7223 is companion chip for Intel Atom E6xx series. 20 purpose use.
21 ML7223 is completely compatible for Intel EG20T PCH. 21 ML7223/ML7831 is companion chip for Intel Atom E6xx series.
22 ML7223/ML7831 is completely compatible for Intel EG20T PCH.
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
index 59fac77d0dbb..a09a07197eb5 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
@@ -127,8 +127,8 @@ struct pch_gbe_regs {
127 127
128/* Reset */ 128/* Reset */
129#define PCH_GBE_ALL_RST 0x80000000 /* All reset */ 129#define PCH_GBE_ALL_RST 0x80000000 /* All reset */
130#define PCH_GBE_TX_RST 0x40000000 /* TX MAC, TX FIFO, TX DMA reset */ 130#define PCH_GBE_TX_RST 0x00008000 /* TX MAC, TX FIFO, TX DMA reset */
131#define PCH_GBE_RX_RST 0x04000000 /* RX MAC, RX FIFO, RX DMA reset */ 131#define PCH_GBE_RX_RST 0x00004000 /* RX MAC, RX FIFO, RX DMA reset */
132 132
133/* TCP/IP Accelerator Control */ 133/* TCP/IP Accelerator Control */
134#define PCH_GBE_EX_LIST_EN 0x00000008 /* External List Enable */ 134#define PCH_GBE_EX_LIST_EN 0x00000008 /* External List Enable */
@@ -276,6 +276,9 @@ struct pch_gbe_regs {
276#define PCH_GBE_RX_DMA_EN 0x00000002 /* Enables Receive DMA */ 276#define PCH_GBE_RX_DMA_EN 0x00000002 /* Enables Receive DMA */
277#define PCH_GBE_TX_DMA_EN 0x00000001 /* Enables Transmission DMA */ 277#define PCH_GBE_TX_DMA_EN 0x00000001 /* Enables Transmission DMA */
278 278
279/* RX DMA STATUS */
280#define PCH_GBE_IDLE_CHECK 0xFFFFFFFE
281
279/* Wake On LAN Status */ 282/* Wake On LAN Status */
280#define PCH_GBE_WLS_BR 0x00000008 /* Broadcas Address */ 283#define PCH_GBE_WLS_BR 0x00000008 /* Broadcas Address */
281#define PCH_GBE_WLS_MLT 0x00000004 /* Multicast Address */ 284#define PCH_GBE_WLS_MLT 0x00000004 /* Multicast Address */
@@ -471,6 +474,7 @@ struct pch_gbe_tx_desc {
471struct pch_gbe_buffer { 474struct pch_gbe_buffer {
472 struct sk_buff *skb; 475 struct sk_buff *skb;
473 dma_addr_t dma; 476 dma_addr_t dma;
477 unsigned char *rx_buffer;
474 unsigned long time_stamp; 478 unsigned long time_stamp;
475 u16 length; 479 u16 length;
476 bool mapped; 480 bool mapped;
@@ -511,6 +515,9 @@ struct pch_gbe_tx_ring {
511struct pch_gbe_rx_ring { 515struct pch_gbe_rx_ring {
512 struct pch_gbe_rx_desc *desc; 516 struct pch_gbe_rx_desc *desc;
513 dma_addr_t dma; 517 dma_addr_t dma;
518 unsigned char *rx_buff_pool;
519 dma_addr_t rx_buff_pool_logic;
520 unsigned int rx_buff_pool_size;
514 unsigned int size; 521 unsigned int size;
515 unsigned int count; 522 unsigned int count;
516 unsigned int next_to_use; 523 unsigned int next_to_use;
@@ -622,6 +629,7 @@ struct pch_gbe_adapter {
622 unsigned long rx_buffer_len; 629 unsigned long rx_buffer_len;
623 unsigned long tx_queue_len; 630 unsigned long tx_queue_len;
624 bool have_msi; 631 bool have_msi;
632 bool rx_stop_flag;
625}; 633};
626 634
627extern const char pch_driver_version[]; 635extern const char pch_driver_version[];
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 72276fe78f8f..35a7c21680b3 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -20,7 +20,6 @@
20 20
21#include "pch_gbe.h" 21#include "pch_gbe.h"
22#include "pch_gbe_api.h" 22#include "pch_gbe_api.h"
23#include <linux/prefetch.h>
24 23
25#define DRV_VERSION "1.00" 24#define DRV_VERSION "1.00"
26const char pch_driver_version[] = DRV_VERSION; 25const char pch_driver_version[] = DRV_VERSION;
@@ -34,11 +33,15 @@ const char pch_driver_version[] = DRV_VERSION;
34#define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */ 33#define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */
35#define PCH_GBE_COPYBREAK_DEFAULT 256 34#define PCH_GBE_COPYBREAK_DEFAULT 256
36#define PCH_GBE_PCI_BAR 1 35#define PCH_GBE_PCI_BAR 1
36#define PCH_GBE_RESERVE_MEMORY 0x200000 /* 2MB */
37 37
38/* Macros for ML7223 */ 38/* Macros for ML7223 */
39#define PCI_VENDOR_ID_ROHM 0x10db 39#define PCI_VENDOR_ID_ROHM 0x10db
40#define PCI_DEVICE_ID_ROHM_ML7223_GBE 0x8013 40#define PCI_DEVICE_ID_ROHM_ML7223_GBE 0x8013
41 41
42/* Macros for ML7831 */
43#define PCI_DEVICE_ID_ROHM_ML7831_GBE 0x8802
44
42#define PCH_GBE_TX_WEIGHT 64 45#define PCH_GBE_TX_WEIGHT 64
43#define PCH_GBE_RX_WEIGHT 64 46#define PCH_GBE_RX_WEIGHT 64
44#define PCH_GBE_RX_BUFFER_WRITE 16 47#define PCH_GBE_RX_BUFFER_WRITE 16
@@ -52,6 +55,7 @@ const char pch_driver_version[] = DRV_VERSION;
52 ) 55 )
53 56
54/* Ethertype field values */ 57/* Ethertype field values */
58#define PCH_GBE_MAX_RX_BUFFER_SIZE 0x2880
55#define PCH_GBE_MAX_JUMBO_FRAME_SIZE 10318 59#define PCH_GBE_MAX_JUMBO_FRAME_SIZE 10318
56#define PCH_GBE_FRAME_SIZE_2048 2048 60#define PCH_GBE_FRAME_SIZE_2048 2048
57#define PCH_GBE_FRAME_SIZE_4096 4096 61#define PCH_GBE_FRAME_SIZE_4096 4096
@@ -83,10 +87,12 @@ const char pch_driver_version[] = DRV_VERSION;
83#define PCH_GBE_INT_ENABLE_MASK ( \ 87#define PCH_GBE_INT_ENABLE_MASK ( \
84 PCH_GBE_INT_RX_DMA_CMPLT | \ 88 PCH_GBE_INT_RX_DMA_CMPLT | \
85 PCH_GBE_INT_RX_DSC_EMP | \ 89 PCH_GBE_INT_RX_DSC_EMP | \
90 PCH_GBE_INT_RX_FIFO_ERR | \
86 PCH_GBE_INT_WOL_DET | \ 91 PCH_GBE_INT_WOL_DET | \
87 PCH_GBE_INT_TX_CMPLT \ 92 PCH_GBE_INT_TX_CMPLT \
88 ) 93 )
89 94
95#define PCH_GBE_INT_DISABLE_ALL 0
90 96
91static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT; 97static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
92 98
@@ -138,6 +144,27 @@ static void pch_gbe_wait_clr_bit(void *reg, u32 bit)
138 if (!tmp) 144 if (!tmp)
139 pr_err("Error: busy bit is not cleared\n"); 145 pr_err("Error: busy bit is not cleared\n");
140} 146}
147
148/**
149 * pch_gbe_wait_clr_bit_irq - Wait to clear a bit for interrupt context
150 * @reg: Pointer of register
151 * @busy: Busy bit
152 */
153static int pch_gbe_wait_clr_bit_irq(void *reg, u32 bit)
154{
155 u32 tmp;
156 int ret = -1;
157 /* wait busy */
158 tmp = 20;
159 while ((ioread32(reg) & bit) && --tmp)
160 udelay(5);
161 if (!tmp)
162 pr_err("Error: busy bit is not cleared\n");
163 else
164 ret = 0;
165 return ret;
166}
167
141/** 168/**
142 * pch_gbe_mac_mar_set - Set MAC address register 169 * pch_gbe_mac_mar_set - Set MAC address register
143 * @hw: Pointer to the HW structure 170 * @hw: Pointer to the HW structure
@@ -189,6 +216,17 @@ static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw)
189 return; 216 return;
190} 217}
191 218
219static void pch_gbe_mac_reset_rx(struct pch_gbe_hw *hw)
220{
221 /* Read the MAC address. and store to the private data */
222 pch_gbe_mac_read_mac_addr(hw);
223 iowrite32(PCH_GBE_RX_RST, &hw->reg->RESET);
224 pch_gbe_wait_clr_bit_irq(&hw->reg->RESET, PCH_GBE_RX_RST);
225 /* Setup the MAC address */
226 pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
227 return;
228}
229
192/** 230/**
193 * pch_gbe_mac_init_rx_addrs - Initialize receive address's 231 * pch_gbe_mac_init_rx_addrs - Initialize receive address's
194 * @hw: Pointer to the HW structure 232 * @hw: Pointer to the HW structure
@@ -671,13 +709,8 @@ static void pch_gbe_setup_rctl(struct pch_gbe_adapter *adapter)
671 709
672 tcpip = ioread32(&hw->reg->TCPIP_ACC); 710 tcpip = ioread32(&hw->reg->TCPIP_ACC);
673 711
674 if (netdev->features & NETIF_F_RXCSUM) { 712 tcpip |= PCH_GBE_RX_TCPIPACC_OFF;
675 tcpip &= ~PCH_GBE_RX_TCPIPACC_OFF; 713 tcpip &= ~PCH_GBE_RX_TCPIPACC_EN;
676 tcpip |= PCH_GBE_RX_TCPIPACC_EN;
677 } else {
678 tcpip |= PCH_GBE_RX_TCPIPACC_OFF;
679 tcpip &= ~PCH_GBE_RX_TCPIPACC_EN;
680 }
681 iowrite32(tcpip, &hw->reg->TCPIP_ACC); 714 iowrite32(tcpip, &hw->reg->TCPIP_ACC);
682 return; 715 return;
683} 716}
@@ -717,13 +750,6 @@ static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter)
717 iowrite32(rdba, &hw->reg->RX_DSC_BASE); 750 iowrite32(rdba, &hw->reg->RX_DSC_BASE);
718 iowrite32(rdlen, &hw->reg->RX_DSC_SIZE); 751 iowrite32(rdlen, &hw->reg->RX_DSC_SIZE);
719 iowrite32((rdba + rdlen), &hw->reg->RX_DSC_SW_P); 752 iowrite32((rdba + rdlen), &hw->reg->RX_DSC_SW_P);
720
721 /* Enables Receive DMA */
722 rxdma = ioread32(&hw->reg->DMA_CTRL);
723 rxdma |= PCH_GBE_RX_DMA_EN;
724 iowrite32(rxdma, &hw->reg->DMA_CTRL);
725 /* Enables Receive */
726 iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN);
727} 753}
728 754
729/** 755/**
@@ -1097,6 +1123,48 @@ void pch_gbe_update_stats(struct pch_gbe_adapter *adapter)
1097 spin_unlock_irqrestore(&adapter->stats_lock, flags); 1123 spin_unlock_irqrestore(&adapter->stats_lock, flags);
1098} 1124}
1099 1125
1126static void pch_gbe_stop_receive(struct pch_gbe_adapter *adapter)
1127{
1128 struct pch_gbe_hw *hw = &adapter->hw;
1129 u32 rxdma;
1130 u16 value;
1131 int ret;
1132
1133 /* Disable Receive DMA */
1134 rxdma = ioread32(&hw->reg->DMA_CTRL);
1135 rxdma &= ~PCH_GBE_RX_DMA_EN;
1136 iowrite32(rxdma, &hw->reg->DMA_CTRL);
1137 /* Wait Rx DMA BUS is IDLE */
1138 ret = pch_gbe_wait_clr_bit_irq(&hw->reg->RX_DMA_ST, PCH_GBE_IDLE_CHECK);
1139 if (ret) {
1140 /* Disable Bus master */
1141 pci_read_config_word(adapter->pdev, PCI_COMMAND, &value);
1142 value &= ~PCI_COMMAND_MASTER;
1143 pci_write_config_word(adapter->pdev, PCI_COMMAND, value);
1144 /* Stop Receive */
1145 pch_gbe_mac_reset_rx(hw);
1146 /* Enable Bus master */
1147 value |= PCI_COMMAND_MASTER;
1148 pci_write_config_word(adapter->pdev, PCI_COMMAND, value);
1149 } else {
1150 /* Stop Receive */
1151 pch_gbe_mac_reset_rx(hw);
1152 }
1153}
1154
1155static void pch_gbe_start_receive(struct pch_gbe_hw *hw)
1156{
1157 u32 rxdma;
1158
1159 /* Enables Receive DMA */
1160 rxdma = ioread32(&hw->reg->DMA_CTRL);
1161 rxdma |= PCH_GBE_RX_DMA_EN;
1162 iowrite32(rxdma, &hw->reg->DMA_CTRL);
1163 /* Enables Receive */
1164 iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN);
1165 return;
1166}
1167
1100/** 1168/**
1101 * pch_gbe_intr - Interrupt Handler 1169 * pch_gbe_intr - Interrupt Handler
1102 * @irq: Interrupt number 1170 * @irq: Interrupt number
@@ -1123,7 +1191,15 @@ static irqreturn_t pch_gbe_intr(int irq, void *data)
1123 if (int_st & PCH_GBE_INT_RX_FRAME_ERR) 1191 if (int_st & PCH_GBE_INT_RX_FRAME_ERR)
1124 adapter->stats.intr_rx_frame_err_count++; 1192 adapter->stats.intr_rx_frame_err_count++;
1125 if (int_st & PCH_GBE_INT_RX_FIFO_ERR) 1193 if (int_st & PCH_GBE_INT_RX_FIFO_ERR)
1126 adapter->stats.intr_rx_fifo_err_count++; 1194 if (!adapter->rx_stop_flag) {
1195 adapter->stats.intr_rx_fifo_err_count++;
1196 pr_debug("Rx fifo over run\n");
1197 adapter->rx_stop_flag = true;
1198 int_en = ioread32(&hw->reg->INT_EN);
1199 iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR),
1200 &hw->reg->INT_EN);
1201 pch_gbe_stop_receive(adapter);
1202 }
1127 if (int_st & PCH_GBE_INT_RX_DMA_ERR) 1203 if (int_st & PCH_GBE_INT_RX_DMA_ERR)
1128 adapter->stats.intr_rx_dma_err_count++; 1204 adapter->stats.intr_rx_dma_err_count++;
1129 if (int_st & PCH_GBE_INT_TX_FIFO_ERR) 1205 if (int_st & PCH_GBE_INT_TX_FIFO_ERR)
@@ -1135,7 +1211,7 @@ static irqreturn_t pch_gbe_intr(int irq, void *data)
1135 /* When Rx descriptor is empty */ 1211 /* When Rx descriptor is empty */
1136 if ((int_st & PCH_GBE_INT_RX_DSC_EMP)) { 1212 if ((int_st & PCH_GBE_INT_RX_DSC_EMP)) {
1137 adapter->stats.intr_rx_dsc_empty_count++; 1213 adapter->stats.intr_rx_dsc_empty_count++;
1138 pr_err("Rx descriptor is empty\n"); 1214 pr_debug("Rx descriptor is empty\n");
1139 int_en = ioread32(&hw->reg->INT_EN); 1215 int_en = ioread32(&hw->reg->INT_EN);
1140 iowrite32((int_en & ~PCH_GBE_INT_RX_DSC_EMP), &hw->reg->INT_EN); 1216 iowrite32((int_en & ~PCH_GBE_INT_RX_DSC_EMP), &hw->reg->INT_EN);
1141 if (hw->mac.tx_fc_enable) { 1217 if (hw->mac.tx_fc_enable) {
@@ -1185,29 +1261,23 @@ pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter,
1185 unsigned int i; 1261 unsigned int i;
1186 unsigned int bufsz; 1262 unsigned int bufsz;
1187 1263
1188 bufsz = adapter->rx_buffer_len + PCH_GBE_DMA_ALIGN; 1264 bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
1189 i = rx_ring->next_to_use; 1265 i = rx_ring->next_to_use;
1190 1266
1191 while ((cleaned_count--)) { 1267 while ((cleaned_count--)) {
1192 buffer_info = &rx_ring->buffer_info[i]; 1268 buffer_info = &rx_ring->buffer_info[i];
1193 skb = buffer_info->skb; 1269 skb = netdev_alloc_skb(netdev, bufsz);
1194 if (skb) { 1270 if (unlikely(!skb)) {
1195 skb_trim(skb, 0); 1271 /* Better luck next round */
1196 } else { 1272 adapter->stats.rx_alloc_buff_failed++;
1197 skb = netdev_alloc_skb(netdev, bufsz); 1273 break;
1198 if (unlikely(!skb)) {
1199 /* Better luck next round */
1200 adapter->stats.rx_alloc_buff_failed++;
1201 break;
1202 }
1203 /* 64byte align */
1204 skb_reserve(skb, PCH_GBE_DMA_ALIGN);
1205
1206 buffer_info->skb = skb;
1207 buffer_info->length = adapter->rx_buffer_len;
1208 } 1274 }
1275 /* align */
1276 skb_reserve(skb, NET_IP_ALIGN);
1277 buffer_info->skb = skb;
1278
1209 buffer_info->dma = dma_map_single(&pdev->dev, 1279 buffer_info->dma = dma_map_single(&pdev->dev,
1210 skb->data, 1280 buffer_info->rx_buffer,
1211 buffer_info->length, 1281 buffer_info->length,
1212 DMA_FROM_DEVICE); 1282 DMA_FROM_DEVICE);
1213 if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) { 1283 if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
@@ -1240,6 +1310,36 @@ pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter,
1240 return; 1310 return;
1241} 1311}
1242 1312
1313static int
1314pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter,
1315 struct pch_gbe_rx_ring *rx_ring, int cleaned_count)
1316{
1317 struct pci_dev *pdev = adapter->pdev;
1318 struct pch_gbe_buffer *buffer_info;
1319 unsigned int i;
1320 unsigned int bufsz;
1321 unsigned int size;
1322
1323 bufsz = adapter->rx_buffer_len;
1324
1325 size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY;
1326 rx_ring->rx_buff_pool = dma_alloc_coherent(&pdev->dev, size,
1327 &rx_ring->rx_buff_pool_logic,
1328 GFP_KERNEL);
1329 if (!rx_ring->rx_buff_pool) {
1330 pr_err("Unable to allocate memory for the receive poll buffer\n");
1331 return -ENOMEM;
1332 }
1333 memset(rx_ring->rx_buff_pool, 0, size);
1334 rx_ring->rx_buff_pool_size = size;
1335 for (i = 0; i < rx_ring->count; i++) {
1336 buffer_info = &rx_ring->buffer_info[i];
1337 buffer_info->rx_buffer = rx_ring->rx_buff_pool + bufsz * i;
1338 buffer_info->length = bufsz;
1339 }
1340 return 0;
1341}
1342
1243/** 1343/**
1244 * pch_gbe_alloc_tx_buffers - Allocate transmit buffers 1344 * pch_gbe_alloc_tx_buffers - Allocate transmit buffers
1245 * @adapter: Board private structure 1345 * @adapter: Board private structure
@@ -1380,7 +1480,7 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
1380 unsigned int i; 1480 unsigned int i;
1381 unsigned int cleaned_count = 0; 1481 unsigned int cleaned_count = 0;
1382 bool cleaned = false; 1482 bool cleaned = false;
1383 struct sk_buff *skb, *new_skb; 1483 struct sk_buff *skb;
1384 u8 dma_status; 1484 u8 dma_status;
1385 u16 gbec_status; 1485 u16 gbec_status;
1386 u32 tcp_ip_status; 1486 u32 tcp_ip_status;
@@ -1401,13 +1501,12 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
1401 rx_desc->gbec_status = DSC_INIT16; 1501 rx_desc->gbec_status = DSC_INIT16;
1402 buffer_info = &rx_ring->buffer_info[i]; 1502 buffer_info = &rx_ring->buffer_info[i];
1403 skb = buffer_info->skb; 1503 skb = buffer_info->skb;
1504 buffer_info->skb = NULL;
1404 1505
1405 /* unmap dma */ 1506 /* unmap dma */
1406 dma_unmap_single(&pdev->dev, buffer_info->dma, 1507 dma_unmap_single(&pdev->dev, buffer_info->dma,
1407 buffer_info->length, DMA_FROM_DEVICE); 1508 buffer_info->length, DMA_FROM_DEVICE);
1408 buffer_info->mapped = false; 1509 buffer_info->mapped = false;
1409 /* Prefetch the packet */
1410 prefetch(skb->data);
1411 1510
1412 pr_debug("RxDecNo = 0x%04x Status[DMA:0x%02x GBE:0x%04x " 1511 pr_debug("RxDecNo = 0x%04x Status[DMA:0x%02x GBE:0x%04x "
1413 "TCP:0x%08x] BufInf = 0x%p\n", 1512 "TCP:0x%08x] BufInf = 0x%p\n",
@@ -1427,70 +1526,16 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
1427 pr_err("Receive CRC Error\n"); 1526 pr_err("Receive CRC Error\n");
1428 } else { 1527 } else {
1429 /* get receive length */ 1528 /* get receive length */
1430 /* length convert[-3] */ 1529 /* length convert[-3], length includes FCS length */
1431 length = (rx_desc->rx_words_eob) - 3; 1530 length = (rx_desc->rx_words_eob) - 3 - ETH_FCS_LEN;
1432 1531 if (rx_desc->rx_words_eob & 0x02)
1433 /* Decide the data conversion method */ 1532 length = length - 4;
1434 if (!(netdev->features & NETIF_F_RXCSUM)) { 1533 /*
1435 /* [Header:14][payload] */ 1534 * buffer_info->rx_buffer: [Header:14][payload]
1436 if (NET_IP_ALIGN) { 1535 * skb->data: [Reserve:2][Header:14][payload]
1437 /* Because alignment differs, 1536 */
1438 * the new_skb is newly allocated, 1537 memcpy(skb->data, buffer_info->rx_buffer, length);
1439 * and data is copied to new_skb.*/ 1538
1440 new_skb = netdev_alloc_skb(netdev,
1441 length + NET_IP_ALIGN);
1442 if (!new_skb) {
1443 /* dorrop error */
1444 pr_err("New skb allocation "
1445 "Error\n");
1446 goto dorrop;
1447 }
1448 skb_reserve(new_skb, NET_IP_ALIGN);
1449 memcpy(new_skb->data, skb->data,
1450 length);
1451 skb = new_skb;
1452 } else {
1453 /* DMA buffer is used as SKB as it is.*/
1454 buffer_info->skb = NULL;
1455 }
1456 } else {
1457 /* [Header:14][padding:2][payload] */
1458 /* The length includes padding length */
1459 length = length - PCH_GBE_DMA_PADDING;
1460 if ((length < copybreak) ||
1461 (NET_IP_ALIGN != PCH_GBE_DMA_PADDING)) {
1462 /* Because alignment differs,
1463 * the new_skb is newly allocated,
1464 * and data is copied to new_skb.
1465 * Padding data is deleted
1466 * at the time of a copy.*/
1467 new_skb = netdev_alloc_skb(netdev,
1468 length + NET_IP_ALIGN);
1469 if (!new_skb) {
1470 /* dorrop error */
1471 pr_err("New skb allocation "
1472 "Error\n");
1473 goto dorrop;
1474 }
1475 skb_reserve(new_skb, NET_IP_ALIGN);
1476 memcpy(new_skb->data, skb->data,
1477 ETH_HLEN);
1478 memcpy(&new_skb->data[ETH_HLEN],
1479 &skb->data[ETH_HLEN +
1480 PCH_GBE_DMA_PADDING],
1481 length - ETH_HLEN);
1482 skb = new_skb;
1483 } else {
1484 /* Padding data is deleted
1485 * by moving header data.*/
1486 memmove(&skb->data[PCH_GBE_DMA_PADDING],
1487 &skb->data[0], ETH_HLEN);
1488 skb_reserve(skb, NET_IP_ALIGN);
1489 buffer_info->skb = NULL;
1490 }
1491 }
1492 /* The length includes FCS length */
1493 length = length - ETH_FCS_LEN;
1494 /* update status of driver */ 1539 /* update status of driver */
1495 adapter->stats.rx_bytes += length; 1540 adapter->stats.rx_bytes += length;
1496 adapter->stats.rx_packets++; 1541 adapter->stats.rx_packets++;
@@ -1509,7 +1554,6 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
1509 pr_debug("Receive skb->ip_summed: %d length: %d\n", 1554 pr_debug("Receive skb->ip_summed: %d length: %d\n",
1510 skb->ip_summed, length); 1555 skb->ip_summed, length);
1511 } 1556 }
1512dorrop:
1513 /* return some buffers to hardware, one at a time is too slow */ 1557 /* return some buffers to hardware, one at a time is too slow */
1514 if (unlikely(cleaned_count >= PCH_GBE_RX_BUFFER_WRITE)) { 1558 if (unlikely(cleaned_count >= PCH_GBE_RX_BUFFER_WRITE)) {
1515 pch_gbe_alloc_rx_buffers(adapter, rx_ring, 1559 pch_gbe_alloc_rx_buffers(adapter, rx_ring,
@@ -1714,9 +1758,15 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter)
1714 pr_err("Error: can't bring device up\n"); 1758 pr_err("Error: can't bring device up\n");
1715 return err; 1759 return err;
1716 } 1760 }
1761 err = pch_gbe_alloc_rx_buffers_pool(adapter, rx_ring, rx_ring->count);
1762 if (err) {
1763 pr_err("Error: can't bring device up\n");
1764 return err;
1765 }
1717 pch_gbe_alloc_tx_buffers(adapter, tx_ring); 1766 pch_gbe_alloc_tx_buffers(adapter, tx_ring);
1718 pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count); 1767 pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count);
1719 adapter->tx_queue_len = netdev->tx_queue_len; 1768 adapter->tx_queue_len = netdev->tx_queue_len;
1769 pch_gbe_start_receive(&adapter->hw);
1720 1770
1721 mod_timer(&adapter->watchdog_timer, jiffies); 1771 mod_timer(&adapter->watchdog_timer, jiffies);
1722 1772
@@ -1734,6 +1784,7 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter)
1734void pch_gbe_down(struct pch_gbe_adapter *adapter) 1784void pch_gbe_down(struct pch_gbe_adapter *adapter)
1735{ 1785{
1736 struct net_device *netdev = adapter->netdev; 1786 struct net_device *netdev = adapter->netdev;
1787 struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
1737 1788
1738 /* signal that we're down so the interrupt handler does not 1789 /* signal that we're down so the interrupt handler does not
1739 * reschedule our watchdog timer */ 1790 * reschedule our watchdog timer */
@@ -1752,6 +1803,12 @@ void pch_gbe_down(struct pch_gbe_adapter *adapter)
1752 pch_gbe_reset(adapter); 1803 pch_gbe_reset(adapter);
1753 pch_gbe_clean_tx_ring(adapter, adapter->tx_ring); 1804 pch_gbe_clean_tx_ring(adapter, adapter->tx_ring);
1754 pch_gbe_clean_rx_ring(adapter, adapter->rx_ring); 1805 pch_gbe_clean_rx_ring(adapter, adapter->rx_ring);
1806
1807 pci_free_consistent(adapter->pdev, rx_ring->rx_buff_pool_size,
1808 rx_ring->rx_buff_pool, rx_ring->rx_buff_pool_logic);
1809 rx_ring->rx_buff_pool_logic = 0;
1810 rx_ring->rx_buff_pool_size = 0;
1811 rx_ring->rx_buff_pool = NULL;
1755} 1812}
1756 1813
1757/** 1814/**
@@ -2004,6 +2061,8 @@ static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
2004{ 2061{
2005 struct pch_gbe_adapter *adapter = netdev_priv(netdev); 2062 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2006 int max_frame; 2063 int max_frame;
2064 unsigned long old_rx_buffer_len = adapter->rx_buffer_len;
2065 int err;
2007 2066
2008 max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 2067 max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
2009 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || 2068 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
@@ -2018,14 +2077,24 @@ static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
2018 else if (max_frame <= PCH_GBE_FRAME_SIZE_8192) 2077 else if (max_frame <= PCH_GBE_FRAME_SIZE_8192)
2019 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_8192; 2078 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_8192;
2020 else 2079 else
2021 adapter->rx_buffer_len = PCH_GBE_MAX_JUMBO_FRAME_SIZE; 2080 adapter->rx_buffer_len = PCH_GBE_MAX_RX_BUFFER_SIZE;
2022 netdev->mtu = new_mtu;
2023 adapter->hw.mac.max_frame_size = max_frame;
2024 2081
2025 if (netif_running(netdev)) 2082 if (netif_running(netdev)) {
2026 pch_gbe_reinit_locked(adapter); 2083 pch_gbe_down(adapter);
2027 else 2084 err = pch_gbe_up(adapter);
2085 if (err) {
2086 adapter->rx_buffer_len = old_rx_buffer_len;
2087 pch_gbe_up(adapter);
2088 return -ENOMEM;
2089 } else {
2090 netdev->mtu = new_mtu;
2091 adapter->hw.mac.max_frame_size = max_frame;
2092 }
2093 } else {
2028 pch_gbe_reset(adapter); 2094 pch_gbe_reset(adapter);
2095 netdev->mtu = new_mtu;
2096 adapter->hw.mac.max_frame_size = max_frame;
2097 }
2029 2098
2030 pr_debug("max_frame : %d rx_buffer_len : %d mtu : %d max_frame_size : %d\n", 2099 pr_debug("max_frame : %d rx_buffer_len : %d mtu : %d max_frame_size : %d\n",
2031 max_frame, (u32) adapter->rx_buffer_len, netdev->mtu, 2100 max_frame, (u32) adapter->rx_buffer_len, netdev->mtu,
@@ -2103,6 +2172,7 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
2103 int work_done = 0; 2172 int work_done = 0;
2104 bool poll_end_flag = false; 2173 bool poll_end_flag = false;
2105 bool cleaned = false; 2174 bool cleaned = false;
2175 u32 int_en;
2106 2176
2107 pr_debug("budget : %d\n", budget); 2177 pr_debug("budget : %d\n", budget);
2108 2178
@@ -2110,8 +2180,15 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
2110 if (!netif_carrier_ok(netdev)) { 2180 if (!netif_carrier_ok(netdev)) {
2111 poll_end_flag = true; 2181 poll_end_flag = true;
2112 } else { 2182 } else {
2113 cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
2114 pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget); 2183 pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget);
2184 if (adapter->rx_stop_flag) {
2185 adapter->rx_stop_flag = false;
2186 pch_gbe_start_receive(&adapter->hw);
2187 int_en = ioread32(&adapter->hw.reg->INT_EN);
2188 iowrite32((int_en | PCH_GBE_INT_RX_FIFO_ERR),
2189 &adapter->hw.reg->INT_EN);
2190 }
2191 cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
2115 2192
2116 if (cleaned) 2193 if (cleaned)
2117 work_done = budget; 2194 work_done = budget;
@@ -2452,6 +2529,13 @@ static DEFINE_PCI_DEVICE_TABLE(pch_gbe_pcidev_id) = {
2452 .class = (PCI_CLASS_NETWORK_ETHERNET << 8), 2529 .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2453 .class_mask = (0xFFFF00) 2530 .class_mask = (0xFFFF00)
2454 }, 2531 },
2532 {.vendor = PCI_VENDOR_ID_ROHM,
2533 .device = PCI_DEVICE_ID_ROHM_ML7831_GBE,
2534 .subvendor = PCI_ANY_ID,
2535 .subdevice = PCI_ANY_ID,
2536 .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2537 .class_mask = (0xFFFF00)
2538 },
2455 /* required last entry */ 2539 /* required last entry */
2456 {0} 2540 {0}
2457}; 2541};
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 835bbb534c5d..6eb9f4ea3bfd 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -407,6 +407,7 @@ enum rtl_register_content {
407 RxOK = 0x0001, 407 RxOK = 0x0001,
408 408
409 /* RxStatusDesc */ 409 /* RxStatusDesc */
410 RxBOVF = (1 << 24),
410 RxFOVF = (1 << 23), 411 RxFOVF = (1 << 23),
411 RxRWT = (1 << 22), 412 RxRWT = (1 << 22),
412 RxRES = (1 << 21), 413 RxRES = (1 << 21),
@@ -682,6 +683,7 @@ struct rtl8169_private {
682 struct mii_if_info mii; 683 struct mii_if_info mii;
683 struct rtl8169_counters counters; 684 struct rtl8169_counters counters;
684 u32 saved_wolopts; 685 u32 saved_wolopts;
686 u32 opts1_mask;
685 687
686 struct rtl_fw { 688 struct rtl_fw {
687 const struct firmware *fw; 689 const struct firmware *fw;
@@ -710,6 +712,7 @@ MODULE_FIRMWARE(FIRMWARE_8168D_1);
710MODULE_FIRMWARE(FIRMWARE_8168D_2); 712MODULE_FIRMWARE(FIRMWARE_8168D_2);
711MODULE_FIRMWARE(FIRMWARE_8168E_1); 713MODULE_FIRMWARE(FIRMWARE_8168E_1);
712MODULE_FIRMWARE(FIRMWARE_8168E_2); 714MODULE_FIRMWARE(FIRMWARE_8168E_2);
715MODULE_FIRMWARE(FIRMWARE_8168E_3);
713MODULE_FIRMWARE(FIRMWARE_8105E_1); 716MODULE_FIRMWARE(FIRMWARE_8105E_1);
714 717
715static int rtl8169_open(struct net_device *dev); 718static int rtl8169_open(struct net_device *dev);
@@ -3077,6 +3080,14 @@ static void rtl8169_phy_reset(struct net_device *dev,
3077 netif_err(tp, link, dev, "PHY reset failed\n"); 3080 netif_err(tp, link, dev, "PHY reset failed\n");
3078} 3081}
3079 3082
3083static bool rtl_tbi_enabled(struct rtl8169_private *tp)
3084{
3085 void __iomem *ioaddr = tp->mmio_addr;
3086
3087 return (tp->mac_version == RTL_GIGA_MAC_VER_01) &&
3088 (RTL_R8(PHYstatus) & TBI_Enable);
3089}
3090
3080static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp) 3091static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
3081{ 3092{
3082 void __iomem *ioaddr = tp->mmio_addr; 3093 void __iomem *ioaddr = tp->mmio_addr;
@@ -3109,7 +3120,7 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
3109 ADVERTISED_1000baseT_Half | 3120 ADVERTISED_1000baseT_Half |
3110 ADVERTISED_1000baseT_Full : 0)); 3121 ADVERTISED_1000baseT_Full : 0));
3111 3122
3112 if (RTL_R8(PHYstatus) & TBI_Enable) 3123 if (rtl_tbi_enabled(tp))
3113 netif_info(tp, link, dev, "TBI auto-negotiating\n"); 3124 netif_info(tp, link, dev, "TBI auto-negotiating\n");
3114} 3125}
3115 3126
@@ -3319,9 +3330,16 @@ static void r810x_phy_power_up(struct rtl8169_private *tp)
3319 3330
3320static void r810x_pll_power_down(struct rtl8169_private *tp) 3331static void r810x_pll_power_down(struct rtl8169_private *tp)
3321{ 3332{
3333 void __iomem *ioaddr = tp->mmio_addr;
3334
3322 if (__rtl8169_get_wol(tp) & WAKE_ANY) { 3335 if (__rtl8169_get_wol(tp) & WAKE_ANY) {
3323 rtl_writephy(tp, 0x1f, 0x0000); 3336 rtl_writephy(tp, 0x1f, 0x0000);
3324 rtl_writephy(tp, MII_BMCR, 0x0000); 3337 rtl_writephy(tp, MII_BMCR, 0x0000);
3338
3339 if (tp->mac_version == RTL_GIGA_MAC_VER_29 ||
3340 tp->mac_version == RTL_GIGA_MAC_VER_30)
3341 RTL_W32(RxConfig, RTL_R32(RxConfig) | AcceptBroadcast |
3342 AcceptMulticast | AcceptMyPhys);
3325 return; 3343 return;
3326 } 3344 }
3327 3345
@@ -3417,7 +3435,8 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
3417 rtl_writephy(tp, MII_BMCR, 0x0000); 3435 rtl_writephy(tp, MII_BMCR, 0x0000);
3418 3436
3419 if (tp->mac_version == RTL_GIGA_MAC_VER_32 || 3437 if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
3420 tp->mac_version == RTL_GIGA_MAC_VER_33) 3438 tp->mac_version == RTL_GIGA_MAC_VER_33 ||
3439 tp->mac_version == RTL_GIGA_MAC_VER_34)
3421 RTL_W32(RxConfig, RTL_R32(RxConfig) | AcceptBroadcast | 3440 RTL_W32(RxConfig, RTL_R32(RxConfig) | AcceptBroadcast |
3422 AcceptMulticast | AcceptMyPhys); 3441 AcceptMulticast | AcceptMyPhys);
3423 return; 3442 return;
@@ -3727,8 +3746,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3727 tp->features |= rtl_try_msi(pdev, ioaddr, cfg); 3746 tp->features |= rtl_try_msi(pdev, ioaddr, cfg);
3728 RTL_W8(Cfg9346, Cfg9346_Lock); 3747 RTL_W8(Cfg9346, Cfg9346_Lock);
3729 3748
3730 if ((tp->mac_version <= RTL_GIGA_MAC_VER_06) && 3749 if (rtl_tbi_enabled(tp)) {
3731 (RTL_R8(PHYstatus) & TBI_Enable)) {
3732 tp->set_speed = rtl8169_set_speed_tbi; 3750 tp->set_speed = rtl8169_set_speed_tbi;
3733 tp->get_settings = rtl8169_gset_tbi; 3751 tp->get_settings = rtl8169_gset_tbi;
3734 tp->phy_reset_enable = rtl8169_tbi_reset_enable; 3752 tp->phy_reset_enable = rtl8169_tbi_reset_enable;
@@ -3777,6 +3795,9 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3777 tp->intr_event = cfg->intr_event; 3795 tp->intr_event = cfg->intr_event;
3778 tp->napi_event = cfg->napi_event; 3796 tp->napi_event = cfg->napi_event;
3779 3797
3798 tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ?
3799 ~(RxBOVF | RxFOVF) : ~0;
3800
3780 init_timer(&tp->timer); 3801 init_timer(&tp->timer);
3781 tp->timer.data = (unsigned long) dev; 3802 tp->timer.data = (unsigned long) dev;
3782 tp->timer.function = rtl8169_phy_timer; 3803 tp->timer.function = rtl8169_phy_timer;
@@ -3988,6 +4009,7 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
3988 while (RTL_R8(TxPoll) & NPQ) 4009 while (RTL_R8(TxPoll) & NPQ)
3989 udelay(20); 4010 udelay(20);
3990 } else if (tp->mac_version == RTL_GIGA_MAC_VER_34) { 4011 } else if (tp->mac_version == RTL_GIGA_MAC_VER_34) {
4012 RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
3991 while (!(RTL_R32(TxConfig) & TXCFG_EMPTY)) 4013 while (!(RTL_R32(TxConfig) & TXCFG_EMPTY))
3992 udelay(100); 4014 udelay(100);
3993 } else { 4015 } else {
@@ -5314,7 +5336,7 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
5314 u32 status; 5336 u32 status;
5315 5337
5316 rmb(); 5338 rmb();
5317 status = le32_to_cpu(desc->opts1); 5339 status = le32_to_cpu(desc->opts1) & tp->opts1_mask;
5318 5340
5319 if (status & DescOwn) 5341 if (status & DescOwn)
5320 break; 5342 break;
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index bf2404ae3b87..4479a45f7329 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -31,6 +31,7 @@
31#include <linux/phy.h> 31#include <linux/phy.h>
32#include <linux/cache.h> 32#include <linux/cache.h>
33#include <linux/io.h> 33#include <linux/io.h>
34#include <linux/interrupt.h>
34#include <linux/pm_runtime.h> 35#include <linux/pm_runtime.h>
35#include <linux/slab.h> 36#include <linux/slab.h>
36#include <linux/ethtool.h> 37#include <linux/ethtool.h>
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 76dcadfaaa43..de9afebe1830 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -1050,7 +1050,6 @@ static int efx_init_io(struct efx_nic *efx)
1050{ 1050{
1051 struct pci_dev *pci_dev = efx->pci_dev; 1051 struct pci_dev *pci_dev = efx->pci_dev;
1052 dma_addr_t dma_mask = efx->type->max_dma_mask; 1052 dma_addr_t dma_mask = efx->type->max_dma_mask;
1053 bool use_wc;
1054 int rc; 1053 int rc;
1055 1054
1056 netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n"); 1055 netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
@@ -1101,21 +1100,8 @@ static int efx_init_io(struct efx_nic *efx)
1101 rc = -EIO; 1100 rc = -EIO;
1102 goto fail3; 1101 goto fail3;
1103 } 1102 }
1104 1103 efx->membase = ioremap_nocache(efx->membase_phys,
1105 /* bug22643: If SR-IOV is enabled then tx push over a write combined 1104 efx->type->mem_map_size);
1106 * mapping is unsafe. We need to disable write combining in this case.
1107 * MSI is unsupported when SR-IOV is enabled, and the firmware will
1108 * have removed the MSI capability. So write combining is safe if
1109 * there is an MSI capability.
1110 */
1111 use_wc = (!EFX_WORKAROUND_22643(efx) ||
1112 pci_find_capability(pci_dev, PCI_CAP_ID_MSI));
1113 if (use_wc)
1114 efx->membase = ioremap_wc(efx->membase_phys,
1115 efx->type->mem_map_size);
1116 else
1117 efx->membase = ioremap_nocache(efx->membase_phys,
1118 efx->type->mem_map_size);
1119 if (!efx->membase) { 1105 if (!efx->membase) {
1120 netif_err(efx, probe, efx->net_dev, 1106 netif_err(efx, probe, efx->net_dev,
1121 "could not map memory BAR at %llx+%x\n", 1107 "could not map memory BAR at %llx+%x\n",
diff --git a/drivers/net/ethernet/sfc/io.h b/drivers/net/ethernet/sfc/io.h
index cc978803d484..751d1ec112cc 100644
--- a/drivers/net/ethernet/sfc/io.h
+++ b/drivers/net/ethernet/sfc/io.h
@@ -103,7 +103,6 @@ static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value,
103 _efx_writed(efx, value->u32[2], reg + 8); 103 _efx_writed(efx, value->u32[2], reg + 8);
104 _efx_writed(efx, value->u32[3], reg + 12); 104 _efx_writed(efx, value->u32[3], reg + 12);
105#endif 105#endif
106 wmb();
107 mmiowb(); 106 mmiowb();
108 spin_unlock_irqrestore(&efx->biu_lock, flags); 107 spin_unlock_irqrestore(&efx->biu_lock, flags);
109} 108}
@@ -126,7 +125,6 @@ static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
126 __raw_writel((__force u32)value->u32[0], membase + addr); 125 __raw_writel((__force u32)value->u32[0], membase + addr);
127 __raw_writel((__force u32)value->u32[1], membase + addr + 4); 126 __raw_writel((__force u32)value->u32[1], membase + addr + 4);
128#endif 127#endif
129 wmb();
130 mmiowb(); 128 mmiowb();
131 spin_unlock_irqrestore(&efx->biu_lock, flags); 129 spin_unlock_irqrestore(&efx->biu_lock, flags);
132} 130}
@@ -141,7 +139,6 @@ static inline void efx_writed(struct efx_nic *efx, efx_dword_t *value,
141 139
142 /* No lock required */ 140 /* No lock required */
143 _efx_writed(efx, value->u32[0], reg); 141 _efx_writed(efx, value->u32[0], reg);
144 wmb();
145} 142}
146 143
147/* Read a 128-bit CSR, locking as appropriate. */ 144/* Read a 128-bit CSR, locking as appropriate. */
@@ -152,7 +149,6 @@ static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value,
152 149
153 spin_lock_irqsave(&efx->biu_lock, flags); 150 spin_lock_irqsave(&efx->biu_lock, flags);
154 value->u32[0] = _efx_readd(efx, reg + 0); 151 value->u32[0] = _efx_readd(efx, reg + 0);
155 rmb();
156 value->u32[1] = _efx_readd(efx, reg + 4); 152 value->u32[1] = _efx_readd(efx, reg + 4);
157 value->u32[2] = _efx_readd(efx, reg + 8); 153 value->u32[2] = _efx_readd(efx, reg + 8);
158 value->u32[3] = _efx_readd(efx, reg + 12); 154 value->u32[3] = _efx_readd(efx, reg + 12);
@@ -175,7 +171,6 @@ static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase,
175 value->u64[0] = (__force __le64)__raw_readq(membase + addr); 171 value->u64[0] = (__force __le64)__raw_readq(membase + addr);
176#else 172#else
177 value->u32[0] = (__force __le32)__raw_readl(membase + addr); 173 value->u32[0] = (__force __le32)__raw_readl(membase + addr);
178 rmb();
179 value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4); 174 value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4);
180#endif 175#endif
181 spin_unlock_irqrestore(&efx->biu_lock, flags); 176 spin_unlock_irqrestore(&efx->biu_lock, flags);
@@ -249,7 +244,6 @@ static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value,
249 _efx_writed(efx, value->u32[2], reg + 8); 244 _efx_writed(efx, value->u32[2], reg + 8);
250 _efx_writed(efx, value->u32[3], reg + 12); 245 _efx_writed(efx, value->u32[3], reg + 12);
251#endif 246#endif
252 wmb();
253} 247}
254#define efx_writeo_page(efx, value, reg, page) \ 248#define efx_writeo_page(efx, value, reg, page) \
255 _efx_writeo_page(efx, value, \ 249 _efx_writeo_page(efx, value, \
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 3dd45ed61f0a..81a425397468 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -50,20 +50,6 @@ static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
50 return &nic_data->mcdi; 50 return &nic_data->mcdi;
51} 51}
52 52
53static inline void
54efx_mcdi_readd(struct efx_nic *efx, efx_dword_t *value, unsigned reg)
55{
56 struct siena_nic_data *nic_data = efx->nic_data;
57 value->u32[0] = (__force __le32)__raw_readl(nic_data->mcdi_smem + reg);
58}
59
60static inline void
61efx_mcdi_writed(struct efx_nic *efx, const efx_dword_t *value, unsigned reg)
62{
63 struct siena_nic_data *nic_data = efx->nic_data;
64 __raw_writel((__force u32)value->u32[0], nic_data->mcdi_smem + reg);
65}
66
67void efx_mcdi_init(struct efx_nic *efx) 53void efx_mcdi_init(struct efx_nic *efx)
68{ 54{
69 struct efx_mcdi_iface *mcdi; 55 struct efx_mcdi_iface *mcdi;
@@ -84,8 +70,8 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,
84 const u8 *inbuf, size_t inlen) 70 const u8 *inbuf, size_t inlen)
85{ 71{
86 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 72 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
87 unsigned pdu = MCDI_PDU(efx); 73 unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
88 unsigned doorbell = MCDI_DOORBELL(efx); 74 unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx);
89 unsigned int i; 75 unsigned int i;
90 efx_dword_t hdr; 76 efx_dword_t hdr;
91 u32 xflags, seqno; 77 u32 xflags, seqno;
@@ -106,28 +92,29 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,
106 MCDI_HEADER_SEQ, seqno, 92 MCDI_HEADER_SEQ, seqno,
107 MCDI_HEADER_XFLAGS, xflags); 93 MCDI_HEADER_XFLAGS, xflags);
108 94
109 efx_mcdi_writed(efx, &hdr, pdu); 95 efx_writed(efx, &hdr, pdu);
110 96
111 for (i = 0; i < inlen; i += 4) 97 for (i = 0; i < inlen; i += 4)
112 efx_mcdi_writed(efx, (const efx_dword_t *)(inbuf + i), 98 _efx_writed(efx, *((__le32 *)(inbuf + i)), pdu + 4 + i);
113 pdu + 4 + i); 99
100 /* Ensure the payload is written out before the header */
101 wmb();
114 102
115 /* ring the doorbell with a distinctive value */ 103 /* ring the doorbell with a distinctive value */
116 EFX_POPULATE_DWORD_1(hdr, EFX_DWORD_0, 0x45789abc); 104 _efx_writed(efx, (__force __le32) 0x45789abc, doorbell);
117 efx_mcdi_writed(efx, &hdr, doorbell);
118} 105}
119 106
120static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen) 107static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen)
121{ 108{
122 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 109 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
123 unsigned int pdu = MCDI_PDU(efx); 110 unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
124 int i; 111 int i;
125 112
126 BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); 113 BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT);
127 BUG_ON(outlen & 3 || outlen >= 0x100); 114 BUG_ON(outlen & 3 || outlen >= 0x100);
128 115
129 for (i = 0; i < outlen; i += 4) 116 for (i = 0; i < outlen; i += 4)
130 efx_mcdi_readd(efx, (efx_dword_t *)(outbuf + i), pdu + 4 + i); 117 *((__le32 *)(outbuf + i)) = _efx_readd(efx, pdu + 4 + i);
131} 118}
132 119
133static int efx_mcdi_poll(struct efx_nic *efx) 120static int efx_mcdi_poll(struct efx_nic *efx)
@@ -135,7 +122,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
135 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 122 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
136 unsigned int time, finish; 123 unsigned int time, finish;
137 unsigned int respseq, respcmd, error; 124 unsigned int respseq, respcmd, error;
138 unsigned int pdu = MCDI_PDU(efx); 125 unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
139 unsigned int rc, spins; 126 unsigned int rc, spins;
140 efx_dword_t reg; 127 efx_dword_t reg;
141 128
@@ -161,7 +148,8 @@ static int efx_mcdi_poll(struct efx_nic *efx)
161 148
162 time = get_seconds(); 149 time = get_seconds();
163 150
164 efx_mcdi_readd(efx, &reg, pdu); 151 rmb();
152 efx_readd(efx, &reg, pdu);
165 153
166 /* All 1's indicates that shared memory is in reset (and is 154 /* All 1's indicates that shared memory is in reset (and is
167 * not a valid header). Wait for it to come out reset before 155 * not a valid header). Wait for it to come out reset before
@@ -188,7 +176,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
188 respseq, mcdi->seqno); 176 respseq, mcdi->seqno);
189 rc = EIO; 177 rc = EIO;
190 } else if (error) { 178 } else if (error) {
191 efx_mcdi_readd(efx, &reg, pdu + 4); 179 efx_readd(efx, &reg, pdu + 4);
192 switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) { 180 switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) {
193#define TRANSLATE_ERROR(name) \ 181#define TRANSLATE_ERROR(name) \
194 case MC_CMD_ERR_ ## name: \ 182 case MC_CMD_ERR_ ## name: \
@@ -222,21 +210,21 @@ out:
222/* Test and clear MC-rebooted flag for this port/function */ 210/* Test and clear MC-rebooted flag for this port/function */
223int efx_mcdi_poll_reboot(struct efx_nic *efx) 211int efx_mcdi_poll_reboot(struct efx_nic *efx)
224{ 212{
225 unsigned int addr = MCDI_REBOOT_FLAG(efx); 213 unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_REBOOT_FLAG(efx);
226 efx_dword_t reg; 214 efx_dword_t reg;
227 uint32_t value; 215 uint32_t value;
228 216
229 if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) 217 if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
230 return false; 218 return false;
231 219
232 efx_mcdi_readd(efx, &reg, addr); 220 efx_readd(efx, &reg, addr);
233 value = EFX_DWORD_FIELD(reg, EFX_DWORD_0); 221 value = EFX_DWORD_FIELD(reg, EFX_DWORD_0);
234 222
235 if (value == 0) 223 if (value == 0)
236 return 0; 224 return 0;
237 225
238 EFX_ZERO_DWORD(reg); 226 EFX_ZERO_DWORD(reg);
239 efx_mcdi_writed(efx, &reg, addr); 227 efx_writed(efx, &reg, addr);
240 228
241 if (value == MC_STATUS_DWORD_ASSERT) 229 if (value == MC_STATUS_DWORD_ASSERT)
242 return -EINTR; 230 return -EINTR;
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index bafa23a6874c..3edfbaf5f022 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -1936,13 +1936,6 @@ void efx_nic_get_regs(struct efx_nic *efx, void *buf)
1936 1936
1937 size = min_t(size_t, table->step, 16); 1937 size = min_t(size_t, table->step, 16);
1938 1938
1939 if (table->offset >= efx->type->mem_map_size) {
1940 /* No longer mapped; return dummy data */
1941 memcpy(buf, "\xde\xc0\xad\xde", 4);
1942 buf += table->rows * size;
1943 continue;
1944 }
1945
1946 for (i = 0; i < table->rows; i++) { 1939 for (i = 0; i < table->rows; i++) {
1947 switch (table->step) { 1940 switch (table->step) {
1948 case 4: /* 32-bit register or SRAM */ 1941 case 4: /* 32-bit register or SRAM */
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index b5b288628c6b..5fb24d3aa3ca 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -143,12 +143,10 @@ static inline struct falcon_board *falcon_board(struct efx_nic *efx)
143/** 143/**
144 * struct siena_nic_data - Siena NIC state 144 * struct siena_nic_data - Siena NIC state
145 * @mcdi: Management-Controller-to-Driver Interface 145 * @mcdi: Management-Controller-to-Driver Interface
146 * @mcdi_smem: MCDI shared memory mapping. The mapping is always uncacheable.
147 * @wol_filter_id: Wake-on-LAN packet filter id 146 * @wol_filter_id: Wake-on-LAN packet filter id
148 */ 147 */
149struct siena_nic_data { 148struct siena_nic_data {
150 struct efx_mcdi_iface mcdi; 149 struct efx_mcdi_iface mcdi;
151 void __iomem *mcdi_smem;
152 int wol_filter_id; 150 int wol_filter_id;
153}; 151};
154 152
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index 4fdd148747b2..cc2549cb7076 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -252,26 +252,12 @@ static int siena_probe_nic(struct efx_nic *efx)
252 efx_reado(efx, &reg, FR_AZ_CS_DEBUG); 252 efx_reado(efx, &reg, FR_AZ_CS_DEBUG);
253 efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1; 253 efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1;
254 254
255 /* Initialise MCDI */
256 nic_data->mcdi_smem = ioremap_nocache(efx->membase_phys +
257 FR_CZ_MC_TREG_SMEM,
258 FR_CZ_MC_TREG_SMEM_STEP *
259 FR_CZ_MC_TREG_SMEM_ROWS);
260 if (!nic_data->mcdi_smem) {
261 netif_err(efx, probe, efx->net_dev,
262 "could not map MCDI at %llx+%x\n",
263 (unsigned long long)efx->membase_phys +
264 FR_CZ_MC_TREG_SMEM,
265 FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS);
266 rc = -ENOMEM;
267 goto fail1;
268 }
269 efx_mcdi_init(efx); 255 efx_mcdi_init(efx);
270 256
271 /* Recover from a failed assertion before probing */ 257 /* Recover from a failed assertion before probing */
272 rc = efx_mcdi_handle_assertion(efx); 258 rc = efx_mcdi_handle_assertion(efx);
273 if (rc) 259 if (rc)
274 goto fail2; 260 goto fail1;
275 261
276 /* Let the BMC know that the driver is now in charge of link and 262 /* Let the BMC know that the driver is now in charge of link and
277 * filter settings. We must do this before we reset the NIC */ 263 * filter settings. We must do this before we reset the NIC */
@@ -326,7 +312,6 @@ fail4:
326fail3: 312fail3:
327 efx_mcdi_drv_attach(efx, false, NULL); 313 efx_mcdi_drv_attach(efx, false, NULL);
328fail2: 314fail2:
329 iounmap(nic_data->mcdi_smem);
330fail1: 315fail1:
331 kfree(efx->nic_data); 316 kfree(efx->nic_data);
332 return rc; 317 return rc;
@@ -406,8 +391,6 @@ static int siena_init_nic(struct efx_nic *efx)
406 391
407static void siena_remove_nic(struct efx_nic *efx) 392static void siena_remove_nic(struct efx_nic *efx)
408{ 393{
409 struct siena_nic_data *nic_data = efx->nic_data;
410
411 efx_nic_free_buffer(efx, &efx->irq_status); 394 efx_nic_free_buffer(efx, &efx->irq_status);
412 395
413 siena_reset_hw(efx, RESET_TYPE_ALL); 396 siena_reset_hw(efx, RESET_TYPE_ALL);
@@ -417,8 +400,7 @@ static void siena_remove_nic(struct efx_nic *efx)
417 efx_mcdi_drv_attach(efx, false, NULL); 400 efx_mcdi_drv_attach(efx, false, NULL);
418 401
419 /* Tear down the private nic state */ 402 /* Tear down the private nic state */
420 iounmap(nic_data->mcdi_smem); 403 kfree(efx->nic_data);
421 kfree(nic_data);
422 efx->nic_data = NULL; 404 efx->nic_data = NULL;
423} 405}
424 406
@@ -658,7 +640,8 @@ const struct efx_nic_type siena_a0_nic_type = {
658 .default_mac_ops = &efx_mcdi_mac_operations, 640 .default_mac_ops = &efx_mcdi_mac_operations,
659 641
660 .revision = EFX_REV_SIENA_A0, 642 .revision = EFX_REV_SIENA_A0,
661 .mem_map_size = FR_CZ_MC_TREG_SMEM, /* MC_TREG_SMEM mapped separately */ 643 .mem_map_size = (FR_CZ_MC_TREG_SMEM +
644 FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS),
662 .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, 645 .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
663 .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL, 646 .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
664 .buf_tbl_base = FR_BZ_BUF_FULL_TBL, 647 .buf_tbl_base = FR_BZ_BUF_FULL_TBL,
diff --git a/drivers/net/ethernet/sfc/workarounds.h b/drivers/net/ethernet/sfc/workarounds.h
index 99ff11400cef..e4dd3a7f304b 100644
--- a/drivers/net/ethernet/sfc/workarounds.h
+++ b/drivers/net/ethernet/sfc/workarounds.h
@@ -38,8 +38,6 @@
38#define EFX_WORKAROUND_15783 EFX_WORKAROUND_ALWAYS 38#define EFX_WORKAROUND_15783 EFX_WORKAROUND_ALWAYS
39/* Legacy interrupt storm when interrupt fifo fills */ 39/* Legacy interrupt storm when interrupt fifo fills */
40#define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA 40#define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA
41/* Write combining and sriov=enabled are incompatible */
42#define EFX_WORKAROUND_22643 EFX_WORKAROUND_SIENA
43 41
44/* Spurious parity errors in TSORT buffers */ 42/* Spurious parity errors in TSORT buffers */
45#define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A 43#define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index f07a72150c63..12068219059a 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -2452,14 +2452,13 @@ static irqreturn_t cas_interruptN(int irq, void *dev_id)
2452 struct net_device *dev = dev_id; 2452 struct net_device *dev = dev_id;
2453 struct cas *cp = netdev_priv(dev); 2453 struct cas *cp = netdev_priv(dev);
2454 unsigned long flags; 2454 unsigned long flags;
2455 int ring; 2455 int ring = (irq == cp->pci_irq_INTC) ? 2 : 3;
2456 u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring)); 2456 u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring));
2457 2457
2458 /* check for shared irq */ 2458 /* check for shared irq */
2459 if (status == 0) 2459 if (status == 0)
2460 return IRQ_NONE; 2460 return IRQ_NONE;
2461 2461
2462 ring = (irq == cp->pci_irq_INTC) ? 2 : 3;
2463 spin_lock_irqsave(&cp->lock, flags); 2462 spin_lock_irqsave(&cp->lock, flags);
2464 if (status & INTR_RX_DONE_ALT) { /* handle rx separately */ 2463 if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
2465#ifdef USE_NAPI 2464#ifdef USE_NAPI
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index dfc82720065a..ed2a3977c6e7 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -799,5 +799,11 @@ static void __exit cleanup_netconsole(void)
799 } 799 }
800} 800}
801 801
802module_init(init_netconsole); 802/*
803 * Use late_initcall to ensure netconsole is
804 * initialized after network device driver if built-in.
805 *
806 * late_initcall() and module_init() are identical if built as module.
807 */
808late_initcall(init_netconsole);
803module_exit(cleanup_netconsole); 809module_exit(cleanup_netconsole);
diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c
index 0620ba963508..04bb8fcc0cb5 100644
--- a/drivers/net/phy/national.c
+++ b/drivers/net/phy/national.c
@@ -25,8 +25,9 @@
25/* DP83865 phy identifier values */ 25/* DP83865 phy identifier values */
26#define DP83865_PHY_ID 0x20005c7a 26#define DP83865_PHY_ID 0x20005c7a
27 27
28#define DP83865_INT_MASK_REG 0x15 28#define DP83865_INT_STATUS 0x14
29#define DP83865_INT_MASK_STATUS 0x14 29#define DP83865_INT_MASK 0x15
30#define DP83865_INT_CLEAR 0x17
30 31
31#define DP83865_INT_REMOTE_FAULT 0x0008 32#define DP83865_INT_REMOTE_FAULT 0x0008
32#define DP83865_INT_ANE_COMPLETED 0x0010 33#define DP83865_INT_ANE_COMPLETED 0x0010
@@ -68,21 +69,25 @@ static int ns_config_intr(struct phy_device *phydev)
68 int err; 69 int err;
69 70
70 if (phydev->interrupts == PHY_INTERRUPT_ENABLED) 71 if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
71 err = phy_write(phydev, DP83865_INT_MASK_REG, 72 err = phy_write(phydev, DP83865_INT_MASK,
72 DP83865_INT_MASK_DEFAULT); 73 DP83865_INT_MASK_DEFAULT);
73 else 74 else
74 err = phy_write(phydev, DP83865_INT_MASK_REG, 0); 75 err = phy_write(phydev, DP83865_INT_MASK, 0);
75 76
76 return err; 77 return err;
77} 78}
78 79
79static int ns_ack_interrupt(struct phy_device *phydev) 80static int ns_ack_interrupt(struct phy_device *phydev)
80{ 81{
81 int ret = phy_read(phydev, DP83865_INT_MASK_STATUS); 82 int ret = phy_read(phydev, DP83865_INT_STATUS);
82 if (ret < 0) 83 if (ret < 0)
83 return ret; 84 return ret;
84 85
85 return 0; 86 /* Clear the interrupt status bit by writing a “1”
87 * to the corresponding bit in INT_CLEAR (2:0 are reserved) */
88 ret = phy_write(phydev, DP83865_INT_CLEAR, ret & ~0x7);
89
90 return ret;
86} 91}
87 92
88static void ns_giga_speed_fallback(struct phy_device *phydev, int mode) 93static void ns_giga_speed_fallback(struct phy_device *phydev, int mode)
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 10e5d985afa3..edfa15d2e795 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -1465,7 +1465,12 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
1465 continue; 1465 continue;
1466 } 1466 }
1467 1467
1468 mtu = pch->chan->mtu - hdrlen; 1468 /*
1469 * hdrlen includes the 2-byte PPP protocol field, but the
1470 * MTU counts only the payload excluding the protocol field.
1471 * (RFC1661 Section 2)
1472 */
1473 mtu = pch->chan->mtu - (hdrlen - 2);
1469 if (mtu < 4) 1474 if (mtu < 4)
1470 mtu = 4; 1475 mtu = 4;
1471 if (flen > mtu) 1476 if (flen > mtu)
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index 86ac38c96bcf..3bb131137033 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -80,13 +80,13 @@ static int rionet_capable = 1;
80 */ 80 */
81static struct rio_dev **rionet_active; 81static struct rio_dev **rionet_active;
82 82
83#define is_rionet_capable(pef, src_ops, dst_ops) \ 83#define is_rionet_capable(src_ops, dst_ops) \
84 ((pef & RIO_PEF_INB_MBOX) && \ 84 ((src_ops & RIO_SRC_OPS_DATA_MSG) && \
85 (pef & RIO_PEF_INB_DOORBELL) && \ 85 (dst_ops & RIO_DST_OPS_DATA_MSG) && \
86 (src_ops & RIO_SRC_OPS_DOORBELL) && \ 86 (src_ops & RIO_SRC_OPS_DOORBELL) && \
87 (dst_ops & RIO_DST_OPS_DOORBELL)) 87 (dst_ops & RIO_DST_OPS_DOORBELL))
88#define dev_rionet_capable(dev) \ 88#define dev_rionet_capable(dev) \
89 is_rionet_capable(dev->pef, dev->src_ops, dev->dst_ops) 89 is_rionet_capable(dev->src_ops, dev->dst_ops)
90 90
91#define RIONET_MAC_MATCH(x) (*(u32 *)x == 0x00010001) 91#define RIONET_MAC_MATCH(x) (*(u32 *)x == 0x00010001)
92#define RIONET_GET_DESTID(x) (*(u16 *)(x + 4)) 92#define RIONET_GET_DESTID(x) (*(u16 *)(x + 4))
@@ -282,7 +282,6 @@ static int rionet_open(struct net_device *ndev)
282{ 282{
283 int i, rc = 0; 283 int i, rc = 0;
284 struct rionet_peer *peer, *tmp; 284 struct rionet_peer *peer, *tmp;
285 u32 pwdcsr;
286 struct rionet_private *rnet = netdev_priv(ndev); 285 struct rionet_private *rnet = netdev_priv(ndev);
287 286
288 if (netif_msg_ifup(rnet)) 287 if (netif_msg_ifup(rnet))
@@ -332,13 +331,8 @@ static int rionet_open(struct net_device *ndev)
332 continue; 331 continue;
333 } 332 }
334 333
335 /* 334 /* Send a join message */
336 * If device has initialized inbound doorbells, 335 rio_send_doorbell(peer->rdev, RIONET_DOORBELL_JOIN);
337 * send a join message
338 */
339 rio_read_config_32(peer->rdev, RIO_WRITE_PORT_CSR, &pwdcsr);
340 if (pwdcsr & RIO_DOORBELL_AVAIL)
341 rio_send_doorbell(peer->rdev, RIONET_DOORBELL_JOIN);
342 } 336 }
343 337
344 out: 338 out:
@@ -492,7 +486,7 @@ static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev)
492static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id) 486static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id)
493{ 487{
494 int rc = -ENODEV; 488 int rc = -ENODEV;
495 u32 lpef, lsrc_ops, ldst_ops; 489 u32 lsrc_ops, ldst_ops;
496 struct rionet_peer *peer; 490 struct rionet_peer *peer;
497 struct net_device *ndev = NULL; 491 struct net_device *ndev = NULL;
498 492
@@ -515,12 +509,11 @@ static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id)
515 * on later probes 509 * on later probes
516 */ 510 */
517 if (!rionet_check) { 511 if (!rionet_check) {
518 rio_local_read_config_32(rdev->net->hport, RIO_PEF_CAR, &lpef);
519 rio_local_read_config_32(rdev->net->hport, RIO_SRC_OPS_CAR, 512 rio_local_read_config_32(rdev->net->hport, RIO_SRC_OPS_CAR,
520 &lsrc_ops); 513 &lsrc_ops);
521 rio_local_read_config_32(rdev->net->hport, RIO_DST_OPS_CAR, 514 rio_local_read_config_32(rdev->net->hport, RIO_DST_OPS_CAR,
522 &ldst_ops); 515 &ldst_ops);
523 if (!is_rionet_capable(lpef, lsrc_ops, ldst_ops)) { 516 if (!is_rionet_capable(lsrc_ops, ldst_ops)) {
524 printk(KERN_ERR 517 printk(KERN_ERR
525 "%s: local device is not network capable\n", 518 "%s: local device is not network capable\n",
526 DRV_NAME); 519 DRV_NAME);
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 15772b1b6a91..13c1f044b40d 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -59,6 +59,7 @@
59#define USB_PRODUCT_IPHONE_3G 0x1292 59#define USB_PRODUCT_IPHONE_3G 0x1292
60#define USB_PRODUCT_IPHONE_3GS 0x1294 60#define USB_PRODUCT_IPHONE_3GS 0x1294
61#define USB_PRODUCT_IPHONE_4 0x1297 61#define USB_PRODUCT_IPHONE_4 0x1297
62#define USB_PRODUCT_IPHONE_4_VZW 0x129c
62 63
63#define IPHETH_USBINTF_CLASS 255 64#define IPHETH_USBINTF_CLASS 255
64#define IPHETH_USBINTF_SUBCLASS 253 65#define IPHETH_USBINTF_SUBCLASS 253
@@ -98,6 +99,10 @@ static struct usb_device_id ipheth_table[] = {
98 USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4, 99 USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4,
99 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, 100 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
100 IPHETH_USBINTF_PROTO) }, 101 IPHETH_USBINTF_PROTO) },
102 { USB_DEVICE_AND_INTERFACE_INFO(
103 USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4_VZW,
104 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
105 IPHETH_USBINTF_PROTO) },
101 { } 106 { }
102}; 107};
103MODULE_DEVICE_TABLE(usb, ipheth_table); 108MODULE_DEVICE_TABLE(usb, ipheth_table);
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
index 2d4c0910295b..2d394af82171 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -41,7 +41,8 @@ static bool ar9002_hw_is_cal_supported(struct ath_hw *ah,
41 case ADC_DC_CAL: 41 case ADC_DC_CAL:
42 /* Run ADC Gain Cal for non-CCK & non 2GHz-HT20 only */ 42 /* Run ADC Gain Cal for non-CCK & non 2GHz-HT20 only */
43 if (!IS_CHAN_B(chan) && 43 if (!IS_CHAN_B(chan) &&
44 !(IS_CHAN_2GHZ(chan) && IS_CHAN_HT20(chan))) 44 !((IS_CHAN_2GHZ(chan) || IS_CHAN_A_FAST_CLOCK(ah, chan)) &&
45 IS_CHAN_HT20(chan)))
45 supported = true; 46 supported = true;
46 break; 47 break;
47 } 48 }
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index a73e50d80cbb..51398f0063e2 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -50,7 +50,7 @@ static int ar9003_hw_power_interpolate(int32_t x,
50static const struct ar9300_eeprom ar9300_default = { 50static const struct ar9300_eeprom ar9300_default = {
51 .eepromVersion = 2, 51 .eepromVersion = 2,
52 .templateVersion = 2, 52 .templateVersion = 2,
53 .macAddr = {1, 2, 3, 4, 5, 6}, 53 .macAddr = {0, 2, 3, 4, 5, 6},
54 .custData = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 54 .custData = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
55 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 55 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
56 .baseEepHeader = { 56 .baseEepHeader = {
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index 95147948794d..4956d09cb589 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -678,7 +678,7 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
678 REG_WRITE_ARRAY(&ah->iniModesAdditional, 678 REG_WRITE_ARRAY(&ah->iniModesAdditional,
679 modesIndex, regWrites); 679 modesIndex, regWrites);
680 680
681 if (AR_SREV_9300(ah)) 681 if (AR_SREV_9330(ah))
682 REG_WRITE_ARRAY(&ah->iniModesAdditional, 1, regWrites); 682 REG_WRITE_ARRAY(&ah->iniModesAdditional, 1, regWrites);
683 683
684 if (AR_SREV_9340(ah) && !ah->is_clk_25mhz) 684 if (AR_SREV_9340(ah) && !ah->is_clk_25mhz)
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 7910165cf0e6..a16f53994a7e 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -2272,7 +2272,11 @@ static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
2272 2272
2273 mutex_lock(&sc->mutex); 2273 mutex_lock(&sc->mutex);
2274 ah->coverage_class = coverage_class; 2274 ah->coverage_class = coverage_class;
2275
2276 ath9k_ps_wakeup(sc);
2275 ath9k_hw_init_global_settings(ah); 2277 ath9k_hw_init_global_settings(ah);
2278 ath9k_ps_restore(sc);
2279
2276 mutex_unlock(&sc->mutex); 2280 mutex_unlock(&sc->mutex);
2277} 2281}
2278 2282
@@ -2288,6 +2292,12 @@ static void ath9k_flush(struct ieee80211_hw *hw, bool drop)
2288 mutex_lock(&sc->mutex); 2292 mutex_lock(&sc->mutex);
2289 cancel_delayed_work_sync(&sc->tx_complete_work); 2293 cancel_delayed_work_sync(&sc->tx_complete_work);
2290 2294
2295 if (ah->ah_flags & AH_UNPLUGGED) {
2296 ath_dbg(common, ATH_DBG_ANY, "Device has been unplugged!\n");
2297 mutex_unlock(&sc->mutex);
2298 return;
2299 }
2300
2291 if (sc->sc_flags & SC_OP_INVALID) { 2301 if (sc->sc_flags & SC_OP_INVALID) {
2292 ath_dbg(common, ATH_DBG_ANY, "Device not present\n"); 2302 ath_dbg(common, ATH_DBG_ANY, "Device not present\n");
2293 mutex_unlock(&sc->mutex); 2303 mutex_unlock(&sc->mutex);
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 782b8f3ae58f..af351ecd87c4 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -1115,8 +1115,10 @@ static int carl9170_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1115 * the high througput speed in 802.11n networks. 1115 * the high througput speed in 802.11n networks.
1116 */ 1116 */
1117 1117
1118 if (!is_main_vif(ar, vif)) 1118 if (!is_main_vif(ar, vif)) {
1119 mutex_lock(&ar->mutex);
1119 goto err_softw; 1120 goto err_softw;
1121 }
1120 1122
1121 /* 1123 /*
1122 * While the hardware supports *catch-all* key, for offloading 1124 * While the hardware supports *catch-all* key, for offloading
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 24077023d484..56fa3a3648c4 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -1637,7 +1637,8 @@ static void handle_irq_beacon(struct b43_wldev *dev)
1637 u32 cmd, beacon0_valid, beacon1_valid; 1637 u32 cmd, beacon0_valid, beacon1_valid;
1638 1638
1639 if (!b43_is_mode(wl, NL80211_IFTYPE_AP) && 1639 if (!b43_is_mode(wl, NL80211_IFTYPE_AP) &&
1640 !b43_is_mode(wl, NL80211_IFTYPE_MESH_POINT)) 1640 !b43_is_mode(wl, NL80211_IFTYPE_MESH_POINT) &&
1641 !b43_is_mode(wl, NL80211_IFTYPE_ADHOC))
1641 return; 1642 return;
1642 1643
1643 /* This is the bottom half of the asynchronous beacon update. */ 1644 /* This is the bottom half of the asynchronous beacon update. */
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 3774dd034746..ef9ad79d1bfd 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -1903,15 +1903,17 @@ static void ipw2100_down(struct ipw2100_priv *priv)
1903static int ipw2100_net_init(struct net_device *dev) 1903static int ipw2100_net_init(struct net_device *dev)
1904{ 1904{
1905 struct ipw2100_priv *priv = libipw_priv(dev); 1905 struct ipw2100_priv *priv = libipw_priv(dev);
1906
1907 return ipw2100_up(priv, 1);
1908}
1909
1910static int ipw2100_wdev_init(struct net_device *dev)
1911{
1912 struct ipw2100_priv *priv = libipw_priv(dev);
1906 const struct libipw_geo *geo = libipw_get_geo(priv->ieee); 1913 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
1907 struct wireless_dev *wdev = &priv->ieee->wdev; 1914 struct wireless_dev *wdev = &priv->ieee->wdev;
1908 int ret;
1909 int i; 1915 int i;
1910 1916
1911 ret = ipw2100_up(priv, 1);
1912 if (ret)
1913 return ret;
1914
1915 memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN); 1917 memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN);
1916 1918
1917 /* fill-out priv->ieee->bg_band */ 1919 /* fill-out priv->ieee->bg_band */
@@ -6350,9 +6352,13 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
6350 "Error calling register_netdev.\n"); 6352 "Error calling register_netdev.\n");
6351 goto fail; 6353 goto fail;
6352 } 6354 }
6355 registered = 1;
6356
6357 err = ipw2100_wdev_init(dev);
6358 if (err)
6359 goto fail;
6353 6360
6354 mutex_lock(&priv->action_mutex); 6361 mutex_lock(&priv->action_mutex);
6355 registered = 1;
6356 6362
6357 IPW_DEBUG_INFO("%s: Bound to %s\n", dev->name, pci_name(pci_dev)); 6363 IPW_DEBUG_INFO("%s: Bound to %s\n", dev->name, pci_name(pci_dev));
6358 6364
@@ -6389,7 +6395,8 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
6389 6395
6390 fail_unlock: 6396 fail_unlock:
6391 mutex_unlock(&priv->action_mutex); 6397 mutex_unlock(&priv->action_mutex);
6392 6398 wiphy_unregister(priv->ieee->wdev.wiphy);
6399 kfree(priv->ieee->bg_band.channels);
6393 fail: 6400 fail:
6394 if (dev) { 6401 if (dev) {
6395 if (registered) 6402 if (registered)
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index f303df43ed3f..99a710dfe771 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -11426,16 +11426,23 @@ static void ipw_bg_down(struct work_struct *work)
11426/* Called by register_netdev() */ 11426/* Called by register_netdev() */
11427static int ipw_net_init(struct net_device *dev) 11427static int ipw_net_init(struct net_device *dev)
11428{ 11428{
11429 int rc = 0;
11430 struct ipw_priv *priv = libipw_priv(dev);
11431
11432 mutex_lock(&priv->mutex);
11433 if (ipw_up(priv))
11434 rc = -EIO;
11435 mutex_unlock(&priv->mutex);
11436
11437 return rc;
11438}
11439
11440static int ipw_wdev_init(struct net_device *dev)
11441{
11429 int i, rc = 0; 11442 int i, rc = 0;
11430 struct ipw_priv *priv = libipw_priv(dev); 11443 struct ipw_priv *priv = libipw_priv(dev);
11431 const struct libipw_geo *geo = libipw_get_geo(priv->ieee); 11444 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
11432 struct wireless_dev *wdev = &priv->ieee->wdev; 11445 struct wireless_dev *wdev = &priv->ieee->wdev;
11433 mutex_lock(&priv->mutex);
11434
11435 if (ipw_up(priv)) {
11436 rc = -EIO;
11437 goto out;
11438 }
11439 11446
11440 memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN); 11447 memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN);
11441 11448
@@ -11520,13 +11527,9 @@ static int ipw_net_init(struct net_device *dev)
11520 set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev); 11527 set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev);
11521 11528
11522 /* With that information in place, we can now register the wiphy... */ 11529 /* With that information in place, we can now register the wiphy... */
11523 if (wiphy_register(wdev->wiphy)) { 11530 if (wiphy_register(wdev->wiphy))
11524 rc = -EIO; 11531 rc = -EIO;
11525 goto out;
11526 }
11527
11528out: 11532out:
11529 mutex_unlock(&priv->mutex);
11530 return rc; 11533 return rc;
11531} 11534}
11532 11535
@@ -11833,14 +11836,22 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,
11833 goto out_remove_sysfs; 11836 goto out_remove_sysfs;
11834 } 11837 }
11835 11838
11839 err = ipw_wdev_init(net_dev);
11840 if (err) {
11841 IPW_ERROR("failed to register wireless device\n");
11842 goto out_unregister_netdev;
11843 }
11844
11836#ifdef CONFIG_IPW2200_PROMISCUOUS 11845#ifdef CONFIG_IPW2200_PROMISCUOUS
11837 if (rtap_iface) { 11846 if (rtap_iface) {
11838 err = ipw_prom_alloc(priv); 11847 err = ipw_prom_alloc(priv);
11839 if (err) { 11848 if (err) {
11840 IPW_ERROR("Failed to register promiscuous network " 11849 IPW_ERROR("Failed to register promiscuous network "
11841 "device (error %d).\n", err); 11850 "device (error %d).\n", err);
11842 unregister_netdev(priv->net_dev); 11851 wiphy_unregister(priv->ieee->wdev.wiphy);
11843 goto out_remove_sysfs; 11852 kfree(priv->ieee->a_band.channels);
11853 kfree(priv->ieee->bg_band.channels);
11854 goto out_unregister_netdev;
11844 } 11855 }
11845 } 11856 }
11846#endif 11857#endif
@@ -11852,6 +11863,8 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,
11852 11863
11853 return 0; 11864 return 0;
11854 11865
11866 out_unregister_netdev:
11867 unregister_netdev(priv->net_dev);
11855 out_remove_sysfs: 11868 out_remove_sysfs:
11856 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group); 11869 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11857 out_release_irq: 11870 out_release_irq:
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-rs.c b/drivers/net/wireless/iwlegacy/iwl-3945-rs.c
index 0cc5177d738d..8faeaf2dddec 100644
--- a/drivers/net/wireless/iwlegacy/iwl-3945-rs.c
+++ b/drivers/net/wireless/iwlegacy/iwl-3945-rs.c
@@ -821,12 +821,15 @@ static void iwl3945_rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
821 821
822 out: 822 out:
823 823
824 rs_sta->last_txrate_idx = index; 824 if (sband->band == IEEE80211_BAND_5GHZ) {
825 if (sband->band == IEEE80211_BAND_5GHZ) 825 if (WARN_ON_ONCE(index < IWL_FIRST_OFDM_RATE))
826 info->control.rates[0].idx = rs_sta->last_txrate_idx - 826 index = IWL_FIRST_OFDM_RATE;
827 IWL_FIRST_OFDM_RATE; 827 rs_sta->last_txrate_idx = index;
828 else 828 info->control.rates[0].idx = index - IWL_FIRST_OFDM_RATE;
829 } else {
830 rs_sta->last_txrate_idx = index;
829 info->control.rates[0].idx = rs_sta->last_txrate_idx; 831 info->control.rates[0].idx = rs_sta->last_txrate_idx;
832 }
830 833
831 IWL_DEBUG_RATE(priv, "leave: %d\n", index); 834 IWL_DEBUG_RATE(priv, "leave: %d\n", index);
832} 835}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
index ea31d7674df3..a7b891453869 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
@@ -168,7 +168,7 @@ static int iwlagn_set_temperature_offset_calib(struct iwl_priv *priv)
168 168
169 memset(&cmd, 0, sizeof(cmd)); 169 memset(&cmd, 0, sizeof(cmd));
170 iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD); 170 iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
171 memcpy(&cmd.radio_sensor_offset, offset_calib, sizeof(offset_calib)); 171 memcpy(&cmd.radio_sensor_offset, offset_calib, sizeof(*offset_calib));
172 if (!(cmd.radio_sensor_offset)) 172 if (!(cmd.radio_sensor_offset))
173 cmd.radio_sensor_offset = DEFAULT_RADIO_SENSOR_OFFSET; 173 cmd.radio_sensor_offset = DEFAULT_RADIO_SENSOR_OFFSET;
174 174
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 7f6c58ebbc44..6057e18f688c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -1780,7 +1780,12 @@ static int iwl_mac_setup_register(struct iwl_priv *priv,
1780 IEEE80211_HW_SPECTRUM_MGMT | 1780 IEEE80211_HW_SPECTRUM_MGMT |
1781 IEEE80211_HW_REPORTS_TX_ACK_STATUS; 1781 IEEE80211_HW_REPORTS_TX_ACK_STATUS;
1782 1782
1783 /*
1784 * Including the following line will crash some AP's. This
1785 * workaround removes the stimulus which causes the crash until
1786 * the AP software can be fixed.
1783 hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF; 1787 hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
1788 */
1784 1789
1785 hw->flags |= IEEE80211_HW_SUPPORTS_PS | 1790 hw->flags |= IEEE80211_HW_SUPPORTS_PS |
1786 IEEE80211_HW_SUPPORTS_DYNAMIC_PS; 1791 IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c
index ca686dbf5893..f6d823f012db 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c
@@ -925,6 +925,8 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_mem_buffer *rxb)
925 cmd = txq->cmd[cmd_index]; 925 cmd = txq->cmd[cmd_index];
926 meta = &txq->meta[cmd_index]; 926 meta = &txq->meta[cmd_index];
927 927
928 txq->time_stamp = jiffies;
929
928 iwlagn_unmap_tfd(trans, meta, &txq->tfds[index], 930 iwlagn_unmap_tfd(trans, meta, &txq->tfds[index],
929 DMA_BIDIRECTIONAL); 931 DMA_BIDIRECTIONAL);
930 932
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index a5ddb39ca4a0..31c98509f7e6 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -3769,14 +3769,15 @@ static void rt2800_efuse_read(struct rt2x00_dev *rt2x00dev, unsigned int i)
3769 rt2800_regbusy_read(rt2x00dev, EFUSE_CTRL, EFUSE_CTRL_KICK, &reg); 3769 rt2800_regbusy_read(rt2x00dev, EFUSE_CTRL, EFUSE_CTRL_KICK, &reg);
3770 3770
3771 /* Apparently the data is read from end to start */ 3771 /* Apparently the data is read from end to start */
3772 rt2800_register_read_lock(rt2x00dev, EFUSE_DATA3, 3772 rt2800_register_read_lock(rt2x00dev, EFUSE_DATA3, &reg);
3773 (u32 *)&rt2x00dev->eeprom[i]); 3773 /* The returned value is in CPU order, but eeprom is le */
3774 rt2800_register_read_lock(rt2x00dev, EFUSE_DATA2, 3774 rt2x00dev->eeprom[i] = cpu_to_le32(reg);
3775 (u32 *)&rt2x00dev->eeprom[i + 2]); 3775 rt2800_register_read_lock(rt2x00dev, EFUSE_DATA2, &reg);
3776 rt2800_register_read_lock(rt2x00dev, EFUSE_DATA1, 3776 *(u32 *)&rt2x00dev->eeprom[i + 2] = cpu_to_le32(reg);
3777 (u32 *)&rt2x00dev->eeprom[i + 4]); 3777 rt2800_register_read_lock(rt2x00dev, EFUSE_DATA1, &reg);
3778 rt2800_register_read_lock(rt2x00dev, EFUSE_DATA0, 3778 *(u32 *)&rt2x00dev->eeprom[i + 4] = cpu_to_le32(reg);
3779 (u32 *)&rt2x00dev->eeprom[i + 6]); 3779 rt2800_register_read_lock(rt2x00dev, EFUSE_DATA0, &reg);
3780 *(u32 *)&rt2x00dev->eeprom[i + 6] = cpu_to_le32(reg);
3780 3781
3781 mutex_unlock(&rt2x00dev->csr_mutex); 3782 mutex_unlock(&rt2x00dev->csr_mutex);
3782} 3783}
@@ -3942,19 +3943,23 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
3942 return -ENODEV; 3943 return -ENODEV;
3943 } 3944 }
3944 3945
3945 if (!rt2x00_rf(rt2x00dev, RF2820) && 3946 switch (rt2x00dev->chip.rf) {
3946 !rt2x00_rf(rt2x00dev, RF2850) && 3947 case RF2820:
3947 !rt2x00_rf(rt2x00dev, RF2720) && 3948 case RF2850:
3948 !rt2x00_rf(rt2x00dev, RF2750) && 3949 case RF2720:
3949 !rt2x00_rf(rt2x00dev, RF3020) && 3950 case RF2750:
3950 !rt2x00_rf(rt2x00dev, RF2020) && 3951 case RF3020:
3951 !rt2x00_rf(rt2x00dev, RF3021) && 3952 case RF2020:
3952 !rt2x00_rf(rt2x00dev, RF3022) && 3953 case RF3021:
3953 !rt2x00_rf(rt2x00dev, RF3052) && 3954 case RF3022:
3954 !rt2x00_rf(rt2x00dev, RF3320) && 3955 case RF3052:
3955 !rt2x00_rf(rt2x00dev, RF5370) && 3956 case RF3320:
3956 !rt2x00_rf(rt2x00dev, RF5390)) { 3957 case RF5370:
3957 ERROR(rt2x00dev, "Invalid RF chipset detected.\n"); 3958 case RF5390:
3959 break;
3960 default:
3961 ERROR(rt2x00dev, "Invalid RF chipset 0x%x detected.\n",
3962 rt2x00dev->chip.rf);
3958 return -ENODEV; 3963 return -ENODEV;
3959 } 3964 }
3960 3965
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 677b5ababbdd..f1565792f270 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -464,6 +464,15 @@ static bool rt2800usb_txdone_entry_check(struct queue_entry *entry, u32 reg)
464 int wcid, ack, pid; 464 int wcid, ack, pid;
465 int tx_wcid, tx_ack, tx_pid; 465 int tx_wcid, tx_ack, tx_pid;
466 466
467 if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) ||
468 !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags)) {
469 WARNING(entry->queue->rt2x00dev,
470 "Data pending for entry %u in queue %u\n",
471 entry->entry_idx, entry->queue->qid);
472 cond_resched();
473 return false;
474 }
475
467 wcid = rt2x00_get_field32(reg, TX_STA_FIFO_WCID); 476 wcid = rt2x00_get_field32(reg, TX_STA_FIFO_WCID);
468 ack = rt2x00_get_field32(reg, TX_STA_FIFO_TX_ACK_REQUIRED); 477 ack = rt2x00_get_field32(reg, TX_STA_FIFO_TX_ACK_REQUIRED);
469 pid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_TYPE); 478 pid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_TYPE);
@@ -529,13 +538,12 @@ static void rt2800usb_txdone(struct rt2x00_dev *rt2x00dev)
529 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE); 538 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
530 if (rt2800usb_txdone_entry_check(entry, reg)) 539 if (rt2800usb_txdone_entry_check(entry, reg))
531 break; 540 break;
541 entry = NULL;
532 } 542 }
533 543
534 if (!entry || rt2x00queue_empty(queue)) 544 if (entry)
535 break; 545 rt2800_txdone_entry(entry, reg,
536 546 rt2800usb_get_txwi(entry));
537 rt2800_txdone_entry(entry, reg,
538 rt2800usb_get_txwi(entry));
539 } 547 }
540} 548}
541 549
@@ -559,8 +567,10 @@ static void rt2800usb_work_txdone(struct work_struct *work)
559 while (!rt2x00queue_empty(queue)) { 567 while (!rt2x00queue_empty(queue)) {
560 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE); 568 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
561 569
562 if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) 570 if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) ||
571 !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
563 break; 572 break;
573
564 if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags)) 574 if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags))
565 rt2x00lib_txdone_noinfo(entry, TXDONE_FAILURE); 575 rt2x00lib_txdone_noinfo(entry, TXDONE_FAILURE);
566 else if (rt2x00queue_status_timeout(entry)) 576 else if (rt2x00queue_status_timeout(entry))
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index b6b4542c2460..1e31050dafc9 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -262,23 +262,20 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb)
262 struct queue_entry *entry = (struct queue_entry *)urb->context; 262 struct queue_entry *entry = (struct queue_entry *)urb->context;
263 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 263 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
264 264
265 if (!test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) 265 if (!test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
266 return; 266 return;
267
268 if (rt2x00dev->ops->lib->tx_dma_done)
269 rt2x00dev->ops->lib->tx_dma_done(entry);
270
271 /*
272 * Report the frame as DMA done
273 */
274 rt2x00lib_dmadone(entry);
275
276 /* 267 /*
277 * Check if the frame was correctly uploaded 268 * Check if the frame was correctly uploaded
278 */ 269 */
279 if (urb->status) 270 if (urb->status)
280 set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); 271 set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
272 /*
273 * Report the frame as DMA done
274 */
275 rt2x00lib_dmadone(entry);
281 276
277 if (rt2x00dev->ops->lib->tx_dma_done)
278 rt2x00dev->ops->lib->tx_dma_done(entry);
282 /* 279 /*
283 * Schedule the delayed work for reading the TX status 280 * Schedule the delayed work for reading the TX status
284 * from the device. 281 * from the device.
@@ -874,18 +871,8 @@ int rt2x00usb_suspend(struct usb_interface *usb_intf, pm_message_t state)
874{ 871{
875 struct ieee80211_hw *hw = usb_get_intfdata(usb_intf); 872 struct ieee80211_hw *hw = usb_get_intfdata(usb_intf);
876 struct rt2x00_dev *rt2x00dev = hw->priv; 873 struct rt2x00_dev *rt2x00dev = hw->priv;
877 int retval;
878
879 retval = rt2x00lib_suspend(rt2x00dev, state);
880 if (retval)
881 return retval;
882 874
883 /* 875 return rt2x00lib_suspend(rt2x00dev, state);
884 * Decrease usbdev refcount.
885 */
886 usb_put_dev(interface_to_usbdev(usb_intf));
887
888 return 0;
889} 876}
890EXPORT_SYMBOL_GPL(rt2x00usb_suspend); 877EXPORT_SYMBOL_GPL(rt2x00usb_suspend);
891 878
@@ -894,8 +881,6 @@ int rt2x00usb_resume(struct usb_interface *usb_intf)
894 struct ieee80211_hw *hw = usb_get_intfdata(usb_intf); 881 struct ieee80211_hw *hw = usb_get_intfdata(usb_intf);
895 struct rt2x00_dev *rt2x00dev = hw->priv; 882 struct rt2x00_dev *rt2x00dev = hw->priv;
896 883
897 usb_get_dev(interface_to_usbdev(usb_intf));
898
899 return rt2x00lib_resume(rt2x00dev); 884 return rt2x00lib_resume(rt2x00dev);
900} 885}
901EXPORT_SYMBOL_GPL(rt2x00usb_resume); 886EXPORT_SYMBOL_GPL(rt2x00usb_resume);
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index 1bdc1aa305c0..04c4e9eb6ee6 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -610,6 +610,11 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
610 610
611 mac->link_state = MAC80211_NOLINK; 611 mac->link_state = MAC80211_NOLINK;
612 memset(mac->bssid, 0, 6); 612 memset(mac->bssid, 0, 6);
613
614 /* reset sec info */
615 rtl_cam_reset_sec_info(hw);
616
617 rtl_cam_reset_all_entry(hw);
613 mac->vendor = PEER_UNKNOWN; 618 mac->vendor = PEER_UNKNOWN;
614 619
615 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG, 620 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
@@ -1063,6 +1068,9 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1063 *or clear all entry here. 1068 *or clear all entry here.
1064 */ 1069 */
1065 rtl_cam_delete_one_entry(hw, mac_addr, key_idx); 1070 rtl_cam_delete_one_entry(hw, mac_addr, key_idx);
1071
1072 rtl_cam_reset_sec_info(hw);
1073
1066 break; 1074 break;
1067 default: 1075 default:
1068 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, 1076 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
index c4161148e0d8..bc33b147f44f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
@@ -548,15 +548,16 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
548 (tcb_desc->rts_use_shortpreamble ? 1 : 0) 548 (tcb_desc->rts_use_shortpreamble ? 1 : 0)
549 : (tcb_desc->rts_use_shortgi ? 1 : 0))); 549 : (tcb_desc->rts_use_shortgi ? 1 : 0)));
550 if (mac->bw_40) { 550 if (mac->bw_40) {
551 if (tcb_desc->packet_bw) { 551 if (rate_flag & IEEE80211_TX_RC_DUP_DATA) {
552 SET_TX_DESC_DATA_BW(txdesc, 1); 552 SET_TX_DESC_DATA_BW(txdesc, 1);
553 SET_TX_DESC_DATA_SC(txdesc, 3); 553 SET_TX_DESC_DATA_SC(txdesc, 3);
554 } else if(rate_flag & IEEE80211_TX_RC_40_MHZ_WIDTH){
555 SET_TX_DESC_DATA_BW(txdesc, 1);
556 SET_TX_DESC_DATA_SC(txdesc, mac->cur_40_prime_sc);
554 } else { 557 } else {
555 SET_TX_DESC_DATA_BW(txdesc, 0); 558 SET_TX_DESC_DATA_BW(txdesc, 0);
556 if (rate_flag & IEEE80211_TX_RC_DUP_DATA) 559 SET_TX_DESC_DATA_SC(txdesc, 0);
557 SET_TX_DESC_DATA_SC(txdesc, 560 }
558 mac->cur_40_prime_sc);
559 }
560 } else { 561 } else {
561 SET_TX_DESC_DATA_BW(txdesc, 0); 562 SET_TX_DESC_DATA_BW(txdesc, 0);
562 SET_TX_DESC_DATA_SC(txdesc, 0); 563 SET_TX_DESC_DATA_SC(txdesc, 0);
diff --git a/drivers/net/wireless/wl12xx/acx.c b/drivers/net/wireless/wl12xx/acx.c
index e047594794aa..f2838ae07da5 100644
--- a/drivers/net/wireless/wl12xx/acx.c
+++ b/drivers/net/wireless/wl12xx/acx.c
@@ -78,8 +78,6 @@ int wl1271_acx_sleep_auth(struct wl1271 *wl, u8 sleep_auth)
78 auth->sleep_auth = sleep_auth; 78 auth->sleep_auth = sleep_auth;
79 79
80 ret = wl1271_cmd_configure(wl, ACX_SLEEP_AUTH, auth, sizeof(*auth)); 80 ret = wl1271_cmd_configure(wl, ACX_SLEEP_AUTH, auth, sizeof(*auth));
81 if (ret < 0)
82 return ret;
83 81
84out: 82out:
85 kfree(auth); 83 kfree(auth);
@@ -576,10 +574,8 @@ int wl1271_acx_cca_threshold(struct wl1271 *wl)
576 574
577 ret = wl1271_cmd_configure(wl, ACX_CCA_THRESHOLD, 575 ret = wl1271_cmd_configure(wl, ACX_CCA_THRESHOLD,
578 detection, sizeof(*detection)); 576 detection, sizeof(*detection));
579 if (ret < 0) { 577 if (ret < 0)
580 wl1271_warning("failed to set cca threshold: %d", ret); 578 wl1271_warning("failed to set cca threshold: %d", ret);
581 return ret;
582 }
583 579
584out: 580out:
585 kfree(detection); 581 kfree(detection);
diff --git a/drivers/net/wireless/wl12xx/sdio.c b/drivers/net/wireless/wl12xx/sdio.c
index ac2e5661397c..516a8980723c 100644
--- a/drivers/net/wireless/wl12xx/sdio.c
+++ b/drivers/net/wireless/wl12xx/sdio.c
@@ -164,7 +164,7 @@ static int wl1271_sdio_power_on(struct wl1271 *wl)
164 /* If enabled, tell runtime PM not to power off the card */ 164 /* If enabled, tell runtime PM not to power off the card */
165 if (pm_runtime_enabled(&func->dev)) { 165 if (pm_runtime_enabled(&func->dev)) {
166 ret = pm_runtime_get_sync(&func->dev); 166 ret = pm_runtime_get_sync(&func->dev);
167 if (ret) 167 if (ret < 0)
168 goto out; 168 goto out;
169 } else { 169 } else {
170 /* Runtime PM is disabled: power up the card manually */ 170 /* Runtime PM is disabled: power up the card manually */
diff --git a/drivers/net/wireless/wl12xx/testmode.c b/drivers/net/wireless/wl12xx/testmode.c
index 5d5e1ef87206..4ae8effaee22 100644
--- a/drivers/net/wireless/wl12xx/testmode.c
+++ b/drivers/net/wireless/wl12xx/testmode.c
@@ -36,7 +36,6 @@ enum wl1271_tm_commands {
36 WL1271_TM_CMD_TEST, 36 WL1271_TM_CMD_TEST,
37 WL1271_TM_CMD_INTERROGATE, 37 WL1271_TM_CMD_INTERROGATE,
38 WL1271_TM_CMD_CONFIGURE, 38 WL1271_TM_CMD_CONFIGURE,
39 WL1271_TM_CMD_NVS_PUSH,
40 WL1271_TM_CMD_SET_PLT_MODE, 39 WL1271_TM_CMD_SET_PLT_MODE,
41 WL1271_TM_CMD_RECOVER, 40 WL1271_TM_CMD_RECOVER,
42 41
@@ -139,12 +138,15 @@ static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[])
139 138
140 if (ret < 0) { 139 if (ret < 0) {
141 wl1271_warning("testmode cmd interrogate failed: %d", ret); 140 wl1271_warning("testmode cmd interrogate failed: %d", ret);
141 kfree(cmd);
142 return ret; 142 return ret;
143 } 143 }
144 144
145 skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, sizeof(*cmd)); 145 skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, sizeof(*cmd));
146 if (!skb) 146 if (!skb) {
147 kfree(cmd);
147 return -ENOMEM; 148 return -ENOMEM;
149 }
148 150
149 NLA_PUT(skb, WL1271_TM_ATTR_DATA, sizeof(*cmd), cmd); 151 NLA_PUT(skb, WL1271_TM_ATTR_DATA, sizeof(*cmd), cmd);
150 152
@@ -187,48 +189,6 @@ static int wl1271_tm_cmd_configure(struct wl1271 *wl, struct nlattr *tb[])
187 return 0; 189 return 0;
188} 190}
189 191
190static int wl1271_tm_cmd_nvs_push(struct wl1271 *wl, struct nlattr *tb[])
191{
192 int ret = 0;
193 size_t len;
194 void *buf;
195
196 wl1271_debug(DEBUG_TESTMODE, "testmode cmd nvs push");
197
198 if (!tb[WL1271_TM_ATTR_DATA])
199 return -EINVAL;
200
201 buf = nla_data(tb[WL1271_TM_ATTR_DATA]);
202 len = nla_len(tb[WL1271_TM_ATTR_DATA]);
203
204 mutex_lock(&wl->mutex);
205
206 kfree(wl->nvs);
207
208 if ((wl->chip.id == CHIP_ID_1283_PG20) &&
209 (len != sizeof(struct wl128x_nvs_file)))
210 return -EINVAL;
211 else if (len != sizeof(struct wl1271_nvs_file))
212 return -EINVAL;
213
214 wl->nvs = kzalloc(len, GFP_KERNEL);
215 if (!wl->nvs) {
216 wl1271_error("could not allocate memory for the nvs file");
217 ret = -ENOMEM;
218 goto out;
219 }
220
221 memcpy(wl->nvs, buf, len);
222 wl->nvs_len = len;
223
224 wl1271_debug(DEBUG_TESTMODE, "testmode pushed nvs");
225
226out:
227 mutex_unlock(&wl->mutex);
228
229 return ret;
230}
231
232static int wl1271_tm_cmd_set_plt_mode(struct wl1271 *wl, struct nlattr *tb[]) 192static int wl1271_tm_cmd_set_plt_mode(struct wl1271 *wl, struct nlattr *tb[])
233{ 193{
234 u32 val; 194 u32 val;
@@ -285,8 +245,6 @@ int wl1271_tm_cmd(struct ieee80211_hw *hw, void *data, int len)
285 return wl1271_tm_cmd_interrogate(wl, tb); 245 return wl1271_tm_cmd_interrogate(wl, tb);
286 case WL1271_TM_CMD_CONFIGURE: 246 case WL1271_TM_CMD_CONFIGURE:
287 return wl1271_tm_cmd_configure(wl, tb); 247 return wl1271_tm_cmd_configure(wl, tb);
288 case WL1271_TM_CMD_NVS_PUSH:
289 return wl1271_tm_cmd_nvs_push(wl, tb);
290 case WL1271_TM_CMD_SET_PLT_MODE: 248 case WL1271_TM_CMD_SET_PLT_MODE:
291 return wl1271_tm_cmd_set_plt_mode(wl, tb); 249 return wl1271_tm_cmd_set_plt_mode(wl, tb);
292 case WL1271_TM_CMD_RECOVER: 250 case WL1271_TM_CMD_RECOVER:
diff --git a/drivers/pci/hotplug/pcihp_slot.c b/drivers/pci/hotplug/pcihp_slot.c
index 749fdf070319..3ffd9c1acc0a 100644
--- a/drivers/pci/hotplug/pcihp_slot.c
+++ b/drivers/pci/hotplug/pcihp_slot.c
@@ -158,47 +158,6 @@ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
158 */ 158 */
159} 159}
160 160
161/* Program PCIE MaxPayload setting on device: ensure parent maxpayload <= device */
162static int pci_set_payload(struct pci_dev *dev)
163{
164 int pos, ppos;
165 u16 pctl, psz;
166 u16 dctl, dsz, dcap, dmax;
167 struct pci_dev *parent;
168
169 parent = dev->bus->self;
170 pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
171 if (!pos)
172 return 0;
173
174 /* Read Device MaxPayload capability and setting */
175 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &dctl);
176 pci_read_config_word(dev, pos + PCI_EXP_DEVCAP, &dcap);
177 dsz = (dctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
178 dmax = (dcap & PCI_EXP_DEVCAP_PAYLOAD);
179
180 /* Read Parent MaxPayload setting */
181 ppos = pci_find_capability(parent, PCI_CAP_ID_EXP);
182 if (!ppos)
183 return 0;
184 pci_read_config_word(parent, ppos + PCI_EXP_DEVCTL, &pctl);
185 psz = (pctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
186
187 /* If parent payload > device max payload -> error
188 * If parent payload > device payload -> set speed
189 * If parent payload <= device payload -> do nothing
190 */
191 if (psz > dmax)
192 return -1;
193 else if (psz > dsz) {
194 dev_info(&dev->dev, "Setting MaxPayload to %d\n", 128 << psz);
195 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL,
196 (dctl & ~PCI_EXP_DEVCTL_PAYLOAD) +
197 (psz << 5));
198 }
199 return 0;
200}
201
202void pci_configure_slot(struct pci_dev *dev) 161void pci_configure_slot(struct pci_dev *dev)
203{ 162{
204 struct pci_dev *cdev; 163 struct pci_dev *cdev;
@@ -210,9 +169,9 @@ void pci_configure_slot(struct pci_dev *dev)
210 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI))) 169 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI)))
211 return; 170 return;
212 171
213 ret = pci_set_payload(dev); 172 if (dev->bus && dev->bus->self)
214 if (ret) 173 pcie_bus_configure_settings(dev->bus,
215 dev_warn(&dev->dev, "could not set device max payload\n"); 174 dev->bus->self->pcie_mpss);
216 175
217 memset(&hpp, 0, sizeof(hpp)); 176 memset(&hpp, 0, sizeof(hpp));
218 ret = pci_get_hp_params(dev, &hpp); 177 ret = pci_get_hp_params(dev, &hpp);
diff --git a/drivers/pci/of.c b/drivers/pci/of.c
index c94d37ec55c8..f0929934bb7a 100644
--- a/drivers/pci/of.c
+++ b/drivers/pci/of.c
@@ -55,7 +55,7 @@ struct device_node * __weak pcibios_get_phb_of_node(struct pci_bus *bus)
55 */ 55 */
56 if (bus->bridge->of_node) 56 if (bus->bridge->of_node)
57 return of_node_get(bus->bridge->of_node); 57 return of_node_get(bus->bridge->of_node);
58 if (bus->bridge->parent->of_node) 58 if (bus->bridge->parent && bus->bridge->parent->of_node)
59 return of_node_get(bus->bridge->parent->of_node); 59 return of_node_get(bus->bridge->parent->of_node);
60 return NULL; 60 return NULL;
61} 61}
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 08a95b369d85..4e84fd4a4312 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -77,6 +77,8 @@ unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
77unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE; 77unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
78unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE; 78unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
79 79
80enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_SAFE;
81
80/* 82/*
81 * The default CLS is used if arch didn't set CLS explicitly and not 83 * The default CLS is used if arch didn't set CLS explicitly and not
82 * all pci devices agree on the same value. Arch can override either 84 * all pci devices agree on the same value. Arch can override either
@@ -3223,6 +3225,67 @@ out:
3223EXPORT_SYMBOL(pcie_set_readrq); 3225EXPORT_SYMBOL(pcie_set_readrq);
3224 3226
3225/** 3227/**
3228 * pcie_get_mps - get PCI Express maximum payload size
3229 * @dev: PCI device to query
3230 *
3231 * Returns maximum payload size in bytes
3232 * or appropriate error value.
3233 */
3234int pcie_get_mps(struct pci_dev *dev)
3235{
3236 int ret, cap;
3237 u16 ctl;
3238
3239 cap = pci_pcie_cap(dev);
3240 if (!cap)
3241 return -EINVAL;
3242
3243 ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3244 if (!ret)
3245 ret = 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
3246
3247 return ret;
3248}
3249
3250/**
3251 * pcie_set_mps - set PCI Express maximum payload size
3252 * @dev: PCI device to query
3253 * @mps: maximum payload size in bytes
3254 * valid values are 128, 256, 512, 1024, 2048, 4096
3255 *
3256 * If possible sets maximum payload size
3257 */
3258int pcie_set_mps(struct pci_dev *dev, int mps)
3259{
3260 int cap, err = -EINVAL;
3261 u16 ctl, v;
3262
3263 if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
3264 goto out;
3265
3266 v = ffs(mps) - 8;
3267 if (v > dev->pcie_mpss)
3268 goto out;
3269 v <<= 5;
3270
3271 cap = pci_pcie_cap(dev);
3272 if (!cap)
3273 goto out;
3274
3275 err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3276 if (err)
3277 goto out;
3278
3279 if ((ctl & PCI_EXP_DEVCTL_PAYLOAD) != v) {
3280 ctl &= ~PCI_EXP_DEVCTL_PAYLOAD;
3281 ctl |= v;
3282 err = pci_write_config_word(dev, cap + PCI_EXP_DEVCTL, ctl);
3283 }
3284out:
3285 return err;
3286}
3287
3288/**
3226 * pci_select_bars - Make BAR mask from the type of resource 3289 * pci_select_bars - Make BAR mask from the type of resource
3227 * @dev: the PCI device for which BAR mask is made 3290 * @dev: the PCI device for which BAR mask is made
3228 * @flags: resource type mask to be selected 3291 * @flags: resource type mask to be selected
@@ -3505,6 +3568,10 @@ static int __init pci_setup(char *str)
3505 pci_hotplug_io_size = memparse(str + 9, &str); 3568 pci_hotplug_io_size = memparse(str + 9, &str);
3506 } else if (!strncmp(str, "hpmemsize=", 10)) { 3569 } else if (!strncmp(str, "hpmemsize=", 10)) {
3507 pci_hotplug_mem_size = memparse(str + 10, &str); 3570 pci_hotplug_mem_size = memparse(str + 10, &str);
3571 } else if (!strncmp(str, "pcie_bus_safe", 13)) {
3572 pcie_bus_config = PCIE_BUS_SAFE;
3573 } else if (!strncmp(str, "pcie_bus_perf", 13)) {
3574 pcie_bus_config = PCIE_BUS_PERFORMANCE;
3508 } else { 3575 } else {
3509 printk(KERN_ERR "PCI: Unknown option `%s'\n", 3576 printk(KERN_ERR "PCI: Unknown option `%s'\n",
3510 str); 3577 str);
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index c8cee764b0de..b74084e9ca12 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -283,6 +283,8 @@ static inline int pci_iov_bus_range(struct pci_bus *bus)
283 283
284#endif /* CONFIG_PCI_IOV */ 284#endif /* CONFIG_PCI_IOV */
285 285
286extern unsigned long pci_cardbus_resource_alignment(struct resource *);
287
286static inline resource_size_t pci_resource_alignment(struct pci_dev *dev, 288static inline resource_size_t pci_resource_alignment(struct pci_dev *dev,
287 struct resource *res) 289 struct resource *res)
288{ 290{
@@ -292,6 +294,8 @@ static inline resource_size_t pci_resource_alignment(struct pci_dev *dev,
292 if (resno >= PCI_IOV_RESOURCES && resno <= PCI_IOV_RESOURCE_END) 294 if (resno >= PCI_IOV_RESOURCES && resno <= PCI_IOV_RESOURCE_END)
293 return pci_sriov_resource_alignment(dev, resno); 295 return pci_sriov_resource_alignment(dev, resno);
294#endif 296#endif
297 if (dev->class >> 8 == PCI_CLASS_BRIDGE_CARDBUS)
298 return pci_cardbus_resource_alignment(res);
295 return resource_alignment(res); 299 return resource_alignment(res);
296} 300}
297 301
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 795c9026d55f..f3f94a5c068f 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -856,6 +856,8 @@ void set_pcie_port_type(struct pci_dev *pdev)
856 pdev->pcie_cap = pos; 856 pdev->pcie_cap = pos;
857 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16); 857 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
858 pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4; 858 pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4;
859 pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, &reg16);
860 pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
859} 861}
860 862
861void set_pcie_hotplug_bridge(struct pci_dev *pdev) 863void set_pcie_hotplug_bridge(struct pci_dev *pdev)
@@ -1326,6 +1328,151 @@ int pci_scan_slot(struct pci_bus *bus, int devfn)
1326 return nr; 1328 return nr;
1327} 1329}
1328 1330
1331static int pcie_find_smpss(struct pci_dev *dev, void *data)
1332{
1333 u8 *smpss = data;
1334
1335 if (!pci_is_pcie(dev))
1336 return 0;
1337
1338 /* For PCIE hotplug enabled slots not connected directly to a
1339 * PCI-E root port, there can be problems when hotplugging
1340 * devices. This is due to the possibility of hotplugging a
1341 * device into the fabric with a smaller MPS that the devices
1342 * currently running have configured. Modifying the MPS on the
1343 * running devices could cause a fatal bus error due to an
1344 * incoming frame being larger than the newly configured MPS.
1345 * To work around this, the MPS for the entire fabric must be
1346 * set to the minimum size. Any devices hotplugged into this
1347 * fabric will have the minimum MPS set. If the PCI hotplug
1348 * slot is directly connected to the root port and there are not
1349 * other devices on the fabric (which seems to be the most
1350 * common case), then this is not an issue and MPS discovery
1351 * will occur as normal.
1352 */
1353 if (dev->is_hotplug_bridge && (!list_is_singular(&dev->bus->devices) ||
1354 (dev->bus->self &&
1355 dev->bus->self->pcie_type != PCI_EXP_TYPE_ROOT_PORT)))
1356 *smpss = 0;
1357
1358 if (*smpss > dev->pcie_mpss)
1359 *smpss = dev->pcie_mpss;
1360
1361 return 0;
1362}
1363
1364static void pcie_write_mps(struct pci_dev *dev, int mps)
1365{
1366 int rc, dev_mpss;
1367
1368 dev_mpss = 128 << dev->pcie_mpss;
1369
1370 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
1371 if (dev->bus->self) {
1372 dev_dbg(&dev->bus->dev, "Bus MPSS %d\n",
1373 128 << dev->bus->self->pcie_mpss);
1374
1375 /* For "MPS Force Max", the assumption is made that
1376 * downstream communication will never be larger than
1377 * the MRRS. So, the MPS only needs to be configured
1378 * for the upstream communication. This being the case,
1379 * walk from the top down and set the MPS of the child
1380 * to that of the parent bus.
1381 */
1382 mps = 128 << dev->bus->self->pcie_mpss;
1383 if (mps > dev_mpss)
1384 dev_warn(&dev->dev, "MPS configured higher than"
1385 " maximum supported by the device. If"
1386 " a bus issue occurs, try running with"
1387 " pci=pcie_bus_safe.\n");
1388 }
1389
1390 dev->pcie_mpss = ffs(mps) - 8;
1391 }
1392
1393 rc = pcie_set_mps(dev, mps);
1394 if (rc)
1395 dev_err(&dev->dev, "Failed attempting to set the MPS\n");
1396}
1397
1398static void pcie_write_mrrs(struct pci_dev *dev, int mps)
1399{
1400 int rc, mrrs, dev_mpss;
1401
1402 /* In the "safe" case, do not configure the MRRS. There appear to be
1403 * issues with setting MRRS to 0 on a number of devices.
1404 */
1405
1406 if (pcie_bus_config != PCIE_BUS_PERFORMANCE)
1407 return;
1408
1409 dev_mpss = 128 << dev->pcie_mpss;
1410
1411 /* For Max performance, the MRRS must be set to the largest supported
1412 * value. However, it cannot be configured larger than the MPS the
1413 * device or the bus can support. This assumes that the largest MRRS
1414 * available on the device cannot be smaller than the device MPSS.
1415 */
1416 mrrs = min(mps, dev_mpss);
1417
1418 /* MRRS is a R/W register. Invalid values can be written, but a
1419 * subsequent read will verify if the value is acceptable or not.
1420 * If the MRRS value provided is not acceptable (e.g., too large),
1421 * shrink the value until it is acceptable to the HW.
1422 */
1423 while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) {
1424 dev_warn(&dev->dev, "Attempting to modify the PCI-E MRRS value"
1425 " to %d. If any issues are encountered, please try "
1426 "running with pci=pcie_bus_safe\n", mrrs);
1427 rc = pcie_set_readrq(dev, mrrs);
1428 if (rc)
1429 dev_err(&dev->dev,
1430 "Failed attempting to set the MRRS\n");
1431
1432 mrrs /= 2;
1433 }
1434}
1435
1436static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
1437{
1438 int mps = 128 << *(u8 *)data;
1439
1440 if (!pci_is_pcie(dev))
1441 return 0;
1442
1443 dev_dbg(&dev->dev, "Dev MPS %d MPSS %d MRRS %d\n",
1444 pcie_get_mps(dev), 128<<dev->pcie_mpss, pcie_get_readrq(dev));
1445
1446 pcie_write_mps(dev, mps);
1447 pcie_write_mrrs(dev, mps);
1448
1449 dev_dbg(&dev->dev, "Dev MPS %d MPSS %d MRRS %d\n",
1450 pcie_get_mps(dev), 128<<dev->pcie_mpss, pcie_get_readrq(dev));
1451
1452 return 0;
1453}
1454
1455/* pcie_bus_configure_mps requires that pci_walk_bus work in a top-down,
1456 * parents then children fashion. If this changes, then this code will not
1457 * work as designed.
1458 */
1459void pcie_bus_configure_settings(struct pci_bus *bus, u8 mpss)
1460{
1461 u8 smpss = mpss;
1462
1463 if (!pci_is_pcie(bus->self))
1464 return;
1465
1466 if (pcie_bus_config == PCIE_BUS_SAFE) {
1467 pcie_find_smpss(bus->self, &smpss);
1468 pci_walk_bus(bus, pcie_find_smpss, &smpss);
1469 }
1470
1471 pcie_bus_configure_set(bus->self, &smpss);
1472 pci_walk_bus(bus, pcie_bus_configure_set, &smpss);
1473}
1474EXPORT_SYMBOL_GPL(pcie_bus_configure_settings);
1475
1329unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus) 1476unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus)
1330{ 1477{
1331 unsigned int devfn, pass, max = bus->secondary; 1478 unsigned int devfn, pass, max = bus->secondary;
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 8a1d3c7863a8..784da9d36029 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -34,6 +34,7 @@ struct resource_list_x {
34 resource_size_t start; 34 resource_size_t start;
35 resource_size_t end; 35 resource_size_t end;
36 resource_size_t add_size; 36 resource_size_t add_size;
37 resource_size_t min_align;
37 unsigned long flags; 38 unsigned long flags;
38}; 39};
39 40
@@ -65,7 +66,7 @@ void pci_realloc(void)
65 */ 66 */
66static void add_to_list(struct resource_list_x *head, 67static void add_to_list(struct resource_list_x *head,
67 struct pci_dev *dev, struct resource *res, 68 struct pci_dev *dev, struct resource *res,
68 resource_size_t add_size) 69 resource_size_t add_size, resource_size_t min_align)
69{ 70{
70 struct resource_list_x *list = head; 71 struct resource_list_x *list = head;
71 struct resource_list_x *ln = list->next; 72 struct resource_list_x *ln = list->next;
@@ -84,13 +85,16 @@ static void add_to_list(struct resource_list_x *head,
84 tmp->end = res->end; 85 tmp->end = res->end;
85 tmp->flags = res->flags; 86 tmp->flags = res->flags;
86 tmp->add_size = add_size; 87 tmp->add_size = add_size;
88 tmp->min_align = min_align;
87 list->next = tmp; 89 list->next = tmp;
88} 90}
89 91
90static void add_to_failed_list(struct resource_list_x *head, 92static void add_to_failed_list(struct resource_list_x *head,
91 struct pci_dev *dev, struct resource *res) 93 struct pci_dev *dev, struct resource *res)
92{ 94{
93 add_to_list(head, dev, res, 0); 95 add_to_list(head, dev, res,
96 0 /* dont care */,
97 0 /* dont care */);
94} 98}
95 99
96static void __dev_sort_resources(struct pci_dev *dev, 100static void __dev_sort_resources(struct pci_dev *dev,
@@ -121,18 +125,18 @@ static inline void reset_resource(struct resource *res)
121} 125}
122 126
123/** 127/**
124 * adjust_resources_sorted() - satisfy any additional resource requests 128 * reassign_resources_sorted() - satisfy any additional resource requests
125 * 129 *
126 * @add_head : head of the list tracking requests requiring additional 130 * @realloc_head : head of the list tracking requests requiring additional
127 * resources 131 * resources
128 * @head : head of the list tracking requests with allocated 132 * @head : head of the list tracking requests with allocated
129 * resources 133 * resources
130 * 134 *
131 * Walk through each element of the add_head and try to procure 135 * Walk through each element of the realloc_head and try to procure
132 * additional resources for the element, provided the element 136 * additional resources for the element, provided the element
133 * is in the head list. 137 * is in the head list.
134 */ 138 */
135static void adjust_resources_sorted(struct resource_list_x *add_head, 139static void reassign_resources_sorted(struct resource_list_x *realloc_head,
136 struct resource_list *head) 140 struct resource_list *head)
137{ 141{
138 struct resource *res; 142 struct resource *res;
@@ -141,8 +145,8 @@ static void adjust_resources_sorted(struct resource_list_x *add_head,
141 resource_size_t add_size; 145 resource_size_t add_size;
142 int idx; 146 int idx;
143 147
144 prev = add_head; 148 prev = realloc_head;
145 for (list = add_head->next; list;) { 149 for (list = realloc_head->next; list;) {
146 res = list->res; 150 res = list->res;
147 /* skip resource that has been reset */ 151 /* skip resource that has been reset */
148 if (!res->flags) 152 if (!res->flags)
@@ -159,13 +163,17 @@ static void adjust_resources_sorted(struct resource_list_x *add_head,
159 163
160 idx = res - &list->dev->resource[0]; 164 idx = res - &list->dev->resource[0];
161 add_size=list->add_size; 165 add_size=list->add_size;
162 if (!resource_size(res) && add_size) { 166 if (!resource_size(res)) {
163 res->end = res->start + add_size - 1; 167 res->start = list->start;
164 if(pci_assign_resource(list->dev, idx)) 168 res->end = res->start + add_size - 1;
169 if(pci_assign_resource(list->dev, idx))
165 reset_resource(res); 170 reset_resource(res);
166 } else if (add_size) { 171 } else {
167 adjust_resource(res, res->start, 172 resource_size_t align = list->min_align;
168 resource_size(res) + add_size); 173 res->flags |= list->flags & (IORESOURCE_STARTALIGN|IORESOURCE_SIZEALIGN);
174 if (pci_reassign_resource(list->dev, idx, add_size, align))
175 dev_printk(KERN_DEBUG, &list->dev->dev, "failed to add optional resources res=%pR\n",
176 res);
169 } 177 }
170out: 178out:
171 tmp = list; 179 tmp = list;
@@ -210,16 +218,16 @@ static void assign_requested_resources_sorted(struct resource_list *head,
210} 218}
211 219
212static void __assign_resources_sorted(struct resource_list *head, 220static void __assign_resources_sorted(struct resource_list *head,
213 struct resource_list_x *add_head, 221 struct resource_list_x *realloc_head,
214 struct resource_list_x *fail_head) 222 struct resource_list_x *fail_head)
215{ 223{
216 /* Satisfy the must-have resource requests */ 224 /* Satisfy the must-have resource requests */
217 assign_requested_resources_sorted(head, fail_head); 225 assign_requested_resources_sorted(head, fail_head);
218 226
219 /* Try to satisfy any additional nice-to-have resource 227 /* Try to satisfy any additional optional resource
220 requests */ 228 requests */
221 if (add_head) 229 if (realloc_head)
222 adjust_resources_sorted(add_head, head); 230 reassign_resources_sorted(realloc_head, head);
223 free_list(resource_list, head); 231 free_list(resource_list, head);
224} 232}
225 233
@@ -235,7 +243,7 @@ static void pdev_assign_resources_sorted(struct pci_dev *dev,
235} 243}
236 244
237static void pbus_assign_resources_sorted(const struct pci_bus *bus, 245static void pbus_assign_resources_sorted(const struct pci_bus *bus,
238 struct resource_list_x *add_head, 246 struct resource_list_x *realloc_head,
239 struct resource_list_x *fail_head) 247 struct resource_list_x *fail_head)
240{ 248{
241 struct pci_dev *dev; 249 struct pci_dev *dev;
@@ -245,7 +253,7 @@ static void pbus_assign_resources_sorted(const struct pci_bus *bus,
245 list_for_each_entry(dev, &bus->devices, bus_list) 253 list_for_each_entry(dev, &bus->devices, bus_list)
246 __dev_sort_resources(dev, &head); 254 __dev_sort_resources(dev, &head);
247 255
248 __assign_resources_sorted(&head, add_head, fail_head); 256 __assign_resources_sorted(&head, realloc_head, fail_head);
249} 257}
250 258
251void pci_setup_cardbus(struct pci_bus *bus) 259void pci_setup_cardbus(struct pci_bus *bus)
@@ -540,13 +548,27 @@ static resource_size_t calculate_memsize(resource_size_t size,
540 return size; 548 return size;
541} 549}
542 550
551static resource_size_t get_res_add_size(struct resource_list_x *realloc_head,
552 struct resource *res)
553{
554 struct resource_list_x *list;
555
556 /* check if it is in realloc_head list */
557 for (list = realloc_head->next; list && list->res != res;
558 list = list->next);
559 if (list)
560 return list->add_size;
561
562 return 0;
563}
564
543/** 565/**
544 * pbus_size_io() - size the io window of a given bus 566 * pbus_size_io() - size the io window of a given bus
545 * 567 *
546 * @bus : the bus 568 * @bus : the bus
547 * @min_size : the minimum io window that must to be allocated 569 * @min_size : the minimum io window that must to be allocated
548 * @add_size : additional optional io window 570 * @add_size : additional optional io window
549 * @add_head : track the additional io window on this list 571 * @realloc_head : track the additional io window on this list
550 * 572 *
551 * Sizing the IO windows of the PCI-PCI bridge is trivial, 573 * Sizing the IO windows of the PCI-PCI bridge is trivial,
552 * since these windows have 4K granularity and the IO ranges 574 * since these windows have 4K granularity and the IO ranges
@@ -554,11 +576,12 @@ static resource_size_t calculate_memsize(resource_size_t size,
554 * We must be careful with the ISA aliasing though. 576 * We must be careful with the ISA aliasing though.
555 */ 577 */
556static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, 578static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
557 resource_size_t add_size, struct resource_list_x *add_head) 579 resource_size_t add_size, struct resource_list_x *realloc_head)
558{ 580{
559 struct pci_dev *dev; 581 struct pci_dev *dev;
560 struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO); 582 struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO);
561 unsigned long size = 0, size0 = 0, size1 = 0; 583 unsigned long size = 0, size0 = 0, size1 = 0;
584 resource_size_t children_add_size = 0;
562 585
563 if (!b_res) 586 if (!b_res)
564 return; 587 return;
@@ -579,11 +602,16 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
579 size += r_size; 602 size += r_size;
580 else 603 else
581 size1 += r_size; 604 size1 += r_size;
605
606 if (realloc_head)
607 children_add_size += get_res_add_size(realloc_head, r);
582 } 608 }
583 } 609 }
584 size0 = calculate_iosize(size, min_size, size1, 610 size0 = calculate_iosize(size, min_size, size1,
585 resource_size(b_res), 4096); 611 resource_size(b_res), 4096);
586 size1 = (!add_head || (add_head && !add_size)) ? size0 : 612 if (children_add_size > add_size)
613 add_size = children_add_size;
614 size1 = (!realloc_head || (realloc_head && !add_size)) ? size0 :
587 calculate_iosize(size, min_size+add_size, size1, 615 calculate_iosize(size, min_size+add_size, size1,
588 resource_size(b_res), 4096); 616 resource_size(b_res), 4096);
589 if (!size0 && !size1) { 617 if (!size0 && !size1) {
@@ -598,8 +626,8 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
598 b_res->start = 4096; 626 b_res->start = 4096;
599 b_res->end = b_res->start + size0 - 1; 627 b_res->end = b_res->start + size0 - 1;
600 b_res->flags |= IORESOURCE_STARTALIGN; 628 b_res->flags |= IORESOURCE_STARTALIGN;
601 if (size1 > size0 && add_head) 629 if (size1 > size0 && realloc_head)
602 add_to_list(add_head, bus->self, b_res, size1-size0); 630 add_to_list(realloc_head, bus->self, b_res, size1-size0, 4096);
603} 631}
604 632
605/** 633/**
@@ -608,7 +636,7 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
608 * @bus : the bus 636 * @bus : the bus
609 * @min_size : the minimum memory window that must to be allocated 637 * @min_size : the minimum memory window that must to be allocated
610 * @add_size : additional optional memory window 638 * @add_size : additional optional memory window
611 * @add_head : track the additional memory window on this list 639 * @realloc_head : track the additional memory window on this list
612 * 640 *
613 * Calculate the size of the bus and minimal alignment which 641 * Calculate the size of the bus and minimal alignment which
614 * guarantees that all child resources fit in this size. 642 * guarantees that all child resources fit in this size.
@@ -616,7 +644,7 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
616static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, 644static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
617 unsigned long type, resource_size_t min_size, 645 unsigned long type, resource_size_t min_size,
618 resource_size_t add_size, 646 resource_size_t add_size,
619 struct resource_list_x *add_head) 647 struct resource_list_x *realloc_head)
620{ 648{
621 struct pci_dev *dev; 649 struct pci_dev *dev;
622 resource_size_t min_align, align, size, size0, size1; 650 resource_size_t min_align, align, size, size0, size1;
@@ -624,6 +652,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
624 int order, max_order; 652 int order, max_order;
625 struct resource *b_res = find_free_bus_resource(bus, type); 653 struct resource *b_res = find_free_bus_resource(bus, type);
626 unsigned int mem64_mask = 0; 654 unsigned int mem64_mask = 0;
655 resource_size_t children_add_size = 0;
627 656
628 if (!b_res) 657 if (!b_res)
629 return 0; 658 return 0;
@@ -645,6 +674,16 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
645 if (r->parent || (r->flags & mask) != type) 674 if (r->parent || (r->flags & mask) != type)
646 continue; 675 continue;
647 r_size = resource_size(r); 676 r_size = resource_size(r);
677#ifdef CONFIG_PCI_IOV
678 /* put SRIOV requested res to the optional list */
679 if (realloc_head && i >= PCI_IOV_RESOURCES &&
680 i <= PCI_IOV_RESOURCE_END) {
681 r->end = r->start - 1;
682 add_to_list(realloc_head, dev, r, r_size, 0/* dont' care */);
683 children_add_size += r_size;
684 continue;
685 }
686#endif
648 /* For bridges size != alignment */ 687 /* For bridges size != alignment */
649 align = pci_resource_alignment(dev, r); 688 align = pci_resource_alignment(dev, r);
650 order = __ffs(align) - 20; 689 order = __ffs(align) - 20;
@@ -665,6 +704,9 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
665 if (order > max_order) 704 if (order > max_order)
666 max_order = order; 705 max_order = order;
667 mem64_mask &= r->flags & IORESOURCE_MEM_64; 706 mem64_mask &= r->flags & IORESOURCE_MEM_64;
707
708 if (realloc_head)
709 children_add_size += get_res_add_size(realloc_head, r);
668 } 710 }
669 } 711 }
670 align = 0; 712 align = 0;
@@ -681,7 +723,9 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
681 align += aligns[order]; 723 align += aligns[order];
682 } 724 }
683 size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), min_align); 725 size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), min_align);
684 size1 = (!add_head || (add_head && !add_size)) ? size0 : 726 if (children_add_size > add_size)
727 add_size = children_add_size;
728 size1 = (!realloc_head || (realloc_head && !add_size)) ? size0 :
685 calculate_memsize(size, min_size+add_size, 0, 729 calculate_memsize(size, min_size+add_size, 0,
686 resource_size(b_res), min_align); 730 resource_size(b_res), min_align);
687 if (!size0 && !size1) { 731 if (!size0 && !size1) {
@@ -695,12 +739,22 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
695 b_res->start = min_align; 739 b_res->start = min_align;
696 b_res->end = size0 + min_align - 1; 740 b_res->end = size0 + min_align - 1;
697 b_res->flags |= IORESOURCE_STARTALIGN | mem64_mask; 741 b_res->flags |= IORESOURCE_STARTALIGN | mem64_mask;
698 if (size1 > size0 && add_head) 742 if (size1 > size0 && realloc_head)
699 add_to_list(add_head, bus->self, b_res, size1-size0); 743 add_to_list(realloc_head, bus->self, b_res, size1-size0, min_align);
700 return 1; 744 return 1;
701} 745}
702 746
703static void pci_bus_size_cardbus(struct pci_bus *bus) 747unsigned long pci_cardbus_resource_alignment(struct resource *res)
748{
749 if (res->flags & IORESOURCE_IO)
750 return pci_cardbus_io_size;
751 if (res->flags & IORESOURCE_MEM)
752 return pci_cardbus_mem_size;
753 return 0;
754}
755
756static void pci_bus_size_cardbus(struct pci_bus *bus,
757 struct resource_list_x *realloc_head)
704{ 758{
705 struct pci_dev *bridge = bus->self; 759 struct pci_dev *bridge = bus->self;
706 struct resource *b_res = &bridge->resource[PCI_BRIDGE_RESOURCES]; 760 struct resource *b_res = &bridge->resource[PCI_BRIDGE_RESOURCES];
@@ -711,12 +765,14 @@ static void pci_bus_size_cardbus(struct pci_bus *bus)
711 * a fixed amount of bus space for CardBus bridges. 765 * a fixed amount of bus space for CardBus bridges.
712 */ 766 */
713 b_res[0].start = 0; 767 b_res[0].start = 0;
714 b_res[0].end = pci_cardbus_io_size - 1;
715 b_res[0].flags |= IORESOURCE_IO | IORESOURCE_SIZEALIGN; 768 b_res[0].flags |= IORESOURCE_IO | IORESOURCE_SIZEALIGN;
769 if (realloc_head)
770 add_to_list(realloc_head, bridge, b_res, pci_cardbus_io_size, 0 /* dont care */);
716 771
717 b_res[1].start = 0; 772 b_res[1].start = 0;
718 b_res[1].end = pci_cardbus_io_size - 1;
719 b_res[1].flags |= IORESOURCE_IO | IORESOURCE_SIZEALIGN; 773 b_res[1].flags |= IORESOURCE_IO | IORESOURCE_SIZEALIGN;
774 if (realloc_head)
775 add_to_list(realloc_head, bridge, b_res+1, pci_cardbus_io_size, 0 /* dont care */);
720 776
721 /* 777 /*
722 * Check whether prefetchable memory is supported 778 * Check whether prefetchable memory is supported
@@ -736,21 +792,31 @@ static void pci_bus_size_cardbus(struct pci_bus *bus)
736 */ 792 */
737 if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0) { 793 if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0) {
738 b_res[2].start = 0; 794 b_res[2].start = 0;
739 b_res[2].end = pci_cardbus_mem_size - 1;
740 b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH | IORESOURCE_SIZEALIGN; 795 b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH | IORESOURCE_SIZEALIGN;
796 if (realloc_head)
797 add_to_list(realloc_head, bridge, b_res+2, pci_cardbus_mem_size, 0 /* dont care */);
741 798
742 b_res[3].start = 0; 799 b_res[3].start = 0;
743 b_res[3].end = pci_cardbus_mem_size - 1;
744 b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_SIZEALIGN; 800 b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_SIZEALIGN;
801 if (realloc_head)
802 add_to_list(realloc_head, bridge, b_res+3, pci_cardbus_mem_size, 0 /* dont care */);
745 } else { 803 } else {
746 b_res[3].start = 0; 804 b_res[3].start = 0;
747 b_res[3].end = pci_cardbus_mem_size * 2 - 1;
748 b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_SIZEALIGN; 805 b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_SIZEALIGN;
806 if (realloc_head)
807 add_to_list(realloc_head, bridge, b_res+3, pci_cardbus_mem_size * 2, 0 /* dont care */);
749 } 808 }
809
810 /* set the size of the resource to zero, so that the resource does not
811 * get assigned during required-resource allocation cycle but gets assigned
812 * during the optional-resource allocation cycle.
813 */
814 b_res[0].start = b_res[1].start = b_res[2].start = b_res[3].start = 1;
815 b_res[0].end = b_res[1].end = b_res[2].end = b_res[3].end = 0;
750} 816}
751 817
752void __ref __pci_bus_size_bridges(struct pci_bus *bus, 818void __ref __pci_bus_size_bridges(struct pci_bus *bus,
753 struct resource_list_x *add_head) 819 struct resource_list_x *realloc_head)
754{ 820{
755 struct pci_dev *dev; 821 struct pci_dev *dev;
756 unsigned long mask, prefmask; 822 unsigned long mask, prefmask;
@@ -763,12 +829,12 @@ void __ref __pci_bus_size_bridges(struct pci_bus *bus,
763 829
764 switch (dev->class >> 8) { 830 switch (dev->class >> 8) {
765 case PCI_CLASS_BRIDGE_CARDBUS: 831 case PCI_CLASS_BRIDGE_CARDBUS:
766 pci_bus_size_cardbus(b); 832 pci_bus_size_cardbus(b, realloc_head);
767 break; 833 break;
768 834
769 case PCI_CLASS_BRIDGE_PCI: 835 case PCI_CLASS_BRIDGE_PCI:
770 default: 836 default:
771 __pci_bus_size_bridges(b, add_head); 837 __pci_bus_size_bridges(b, realloc_head);
772 break; 838 break;
773 } 839 }
774 } 840 }
@@ -792,7 +858,7 @@ void __ref __pci_bus_size_bridges(struct pci_bus *bus,
792 * Follow thru 858 * Follow thru
793 */ 859 */
794 default: 860 default:
795 pbus_size_io(bus, 0, additional_io_size, add_head); 861 pbus_size_io(bus, 0, additional_io_size, realloc_head);
796 /* If the bridge supports prefetchable range, size it 862 /* If the bridge supports prefetchable range, size it
797 separately. If it doesn't, or its prefetchable window 863 separately. If it doesn't, or its prefetchable window
798 has already been allocated by arch code, try 864 has already been allocated by arch code, try
@@ -800,11 +866,11 @@ void __ref __pci_bus_size_bridges(struct pci_bus *bus,
800 resources. */ 866 resources. */
801 mask = IORESOURCE_MEM; 867 mask = IORESOURCE_MEM;
802 prefmask = IORESOURCE_MEM | IORESOURCE_PREFETCH; 868 prefmask = IORESOURCE_MEM | IORESOURCE_PREFETCH;
803 if (pbus_size_mem(bus, prefmask, prefmask, 0, additional_mem_size, add_head)) 869 if (pbus_size_mem(bus, prefmask, prefmask, 0, additional_mem_size, realloc_head))
804 mask = prefmask; /* Success, size non-prefetch only. */ 870 mask = prefmask; /* Success, size non-prefetch only. */
805 else 871 else
806 additional_mem_size += additional_mem_size; 872 additional_mem_size += additional_mem_size;
807 pbus_size_mem(bus, mask, IORESOURCE_MEM, 0, additional_mem_size, add_head); 873 pbus_size_mem(bus, mask, IORESOURCE_MEM, 0, additional_mem_size, realloc_head);
808 break; 874 break;
809 } 875 }
810} 876}
@@ -816,20 +882,20 @@ void __ref pci_bus_size_bridges(struct pci_bus *bus)
816EXPORT_SYMBOL(pci_bus_size_bridges); 882EXPORT_SYMBOL(pci_bus_size_bridges);
817 883
818static void __ref __pci_bus_assign_resources(const struct pci_bus *bus, 884static void __ref __pci_bus_assign_resources(const struct pci_bus *bus,
819 struct resource_list_x *add_head, 885 struct resource_list_x *realloc_head,
820 struct resource_list_x *fail_head) 886 struct resource_list_x *fail_head)
821{ 887{
822 struct pci_bus *b; 888 struct pci_bus *b;
823 struct pci_dev *dev; 889 struct pci_dev *dev;
824 890
825 pbus_assign_resources_sorted(bus, add_head, fail_head); 891 pbus_assign_resources_sorted(bus, realloc_head, fail_head);
826 892
827 list_for_each_entry(dev, &bus->devices, bus_list) { 893 list_for_each_entry(dev, &bus->devices, bus_list) {
828 b = dev->subordinate; 894 b = dev->subordinate;
829 if (!b) 895 if (!b)
830 continue; 896 continue;
831 897
832 __pci_bus_assign_resources(b, add_head, fail_head); 898 __pci_bus_assign_resources(b, realloc_head, fail_head);
833 899
834 switch (dev->class >> 8) { 900 switch (dev->class >> 8) {
835 case PCI_CLASS_BRIDGE_PCI: 901 case PCI_CLASS_BRIDGE_PCI:
@@ -1039,7 +1105,7 @@ void __init
1039pci_assign_unassigned_resources(void) 1105pci_assign_unassigned_resources(void)
1040{ 1106{
1041 struct pci_bus *bus; 1107 struct pci_bus *bus;
1042 struct resource_list_x add_list; /* list of resources that 1108 struct resource_list_x realloc_list; /* list of resources that
1043 want additional resources */ 1109 want additional resources */
1044 int tried_times = 0; 1110 int tried_times = 0;
1045 enum release_type rel_type = leaf_only; 1111 enum release_type rel_type = leaf_only;
@@ -1052,7 +1118,7 @@ pci_assign_unassigned_resources(void)
1052 1118
1053 1119
1054 head.next = NULL; 1120 head.next = NULL;
1055 add_list.next = NULL; 1121 realloc_list.next = NULL;
1056 1122
1057 pci_try_num = max_depth + 1; 1123 pci_try_num = max_depth + 1;
1058 printk(KERN_DEBUG "PCI: max bus depth: %d pci_try_num: %d\n", 1124 printk(KERN_DEBUG "PCI: max bus depth: %d pci_try_num: %d\n",
@@ -1062,12 +1128,12 @@ again:
1062 /* Depth first, calculate sizes and alignments of all 1128 /* Depth first, calculate sizes and alignments of all
1063 subordinate buses. */ 1129 subordinate buses. */
1064 list_for_each_entry(bus, &pci_root_buses, node) 1130 list_for_each_entry(bus, &pci_root_buses, node)
1065 __pci_bus_size_bridges(bus, &add_list); 1131 __pci_bus_size_bridges(bus, &realloc_list);
1066 1132
1067 /* Depth last, allocate resources and update the hardware. */ 1133 /* Depth last, allocate resources and update the hardware. */
1068 list_for_each_entry(bus, &pci_root_buses, node) 1134 list_for_each_entry(bus, &pci_root_buses, node)
1069 __pci_bus_assign_resources(bus, &add_list, &head); 1135 __pci_bus_assign_resources(bus, &realloc_list, &head);
1070 BUG_ON(add_list.next); 1136 BUG_ON(realloc_list.next);
1071 tried_times++; 1137 tried_times++;
1072 1138
1073 /* any device complain? */ 1139 /* any device complain? */
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 319f359906e8..51a9095c7da4 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -128,16 +128,16 @@ void pci_disable_bridge_window(struct pci_dev *dev)
128} 128}
129#endif /* CONFIG_PCI_QUIRKS */ 129#endif /* CONFIG_PCI_QUIRKS */
130 130
131
132
131static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev, 133static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev,
132 int resno) 134 int resno, resource_size_t size, resource_size_t align)
133{ 135{
134 struct resource *res = dev->resource + resno; 136 struct resource *res = dev->resource + resno;
135 resource_size_t size, min, align; 137 resource_size_t min;
136 int ret; 138 int ret;
137 139
138 size = resource_size(res);
139 min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM; 140 min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM;
140 align = pci_resource_alignment(dev, res);
141 141
142 /* First, try exact prefetching match.. */ 142 /* First, try exact prefetching match.. */
143 ret = pci_bus_alloc_resource(bus, res, size, align, min, 143 ret = pci_bus_alloc_resource(bus, res, size, align, min,
@@ -154,56 +154,101 @@ static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev,
154 ret = pci_bus_alloc_resource(bus, res, size, align, min, 0, 154 ret = pci_bus_alloc_resource(bus, res, size, align, min, 0,
155 pcibios_align_resource, dev); 155 pcibios_align_resource, dev);
156 } 156 }
157 return ret;
158}
157 159
158 if (ret < 0 && dev->fw_addr[resno]) { 160static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev,
159 struct resource *root, *conflict; 161 int resno, resource_size_t size)
160 resource_size_t start, end; 162{
163 struct resource *root, *conflict;
164 resource_size_t start, end;
165 int ret = 0;
161 166
162 /* 167 if (res->flags & IORESOURCE_IO)
163 * If we failed to assign anything, let's try the address 168 root = &ioport_resource;
164 * where firmware left it. That at least has a chance of 169 else
165 * working, which is better than just leaving it disabled. 170 root = &iomem_resource;
166 */ 171
172 start = res->start;
173 end = res->end;
174 res->start = dev->fw_addr[resno];
175 res->end = res->start + size - 1;
176 dev_info(&dev->dev, "BAR %d: trying firmware assignment %pR\n",
177 resno, res);
178 conflict = request_resource_conflict(root, res);
179 if (conflict) {
180 dev_info(&dev->dev,
181 "BAR %d: %pR conflicts with %s %pR\n", resno,
182 res, conflict->name, conflict);
183 res->start = start;
184 res->end = end;
185 ret = 1;
186 }
187 return ret;
188}
189
190static int _pci_assign_resource(struct pci_dev *dev, int resno, int size, resource_size_t min_align)
191{
192 struct resource *res = dev->resource + resno;
193 struct pci_bus *bus;
194 int ret;
195 char *type;
167 196
168 if (res->flags & IORESOURCE_IO) 197 bus = dev->bus;
169 root = &ioport_resource; 198 while ((ret = __pci_assign_resource(bus, dev, resno, size, min_align))) {
199 if (!bus->parent || !bus->self->transparent)
200 break;
201 bus = bus->parent;
202 }
203
204 if (ret) {
205 if (res->flags & IORESOURCE_MEM)
206 if (res->flags & IORESOURCE_PREFETCH)
207 type = "mem pref";
208 else
209 type = "mem";
210 else if (res->flags & IORESOURCE_IO)
211 type = "io";
170 else 212 else
171 root = &iomem_resource; 213 type = "unknown";
172 214 dev_info(&dev->dev,
173 start = res->start; 215 "BAR %d: can't assign %s (size %#llx)\n",
174 end = res->end; 216 resno, type, (unsigned long long) resource_size(res));
175 res->start = dev->fw_addr[resno];
176 res->end = res->start + size - 1;
177 dev_info(&dev->dev, "BAR %d: trying firmware assignment %pR\n",
178 resno, res);
179 conflict = request_resource_conflict(root, res);
180 if (conflict) {
181 dev_info(&dev->dev,
182 "BAR %d: %pR conflicts with %s %pR\n", resno,
183 res, conflict->name, conflict);
184 res->start = start;
185 res->end = end;
186 } else
187 ret = 0;
188 } 217 }
189 218
219 return ret;
220}
221
222int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsize,
223 resource_size_t min_align)
224{
225 struct resource *res = dev->resource + resno;
226 resource_size_t new_size;
227 int ret;
228
229 if (!res->parent) {
230 dev_info(&dev->dev, "BAR %d: can't reassign an unassigned resouce %pR "
231 "\n", resno, res);
232 return -EINVAL;
233 }
234
235 new_size = resource_size(res) + addsize + min_align;
236 ret = _pci_assign_resource(dev, resno, new_size, min_align);
190 if (!ret) { 237 if (!ret) {
191 res->flags &= ~IORESOURCE_STARTALIGN; 238 res->flags &= ~IORESOURCE_STARTALIGN;
192 dev_info(&dev->dev, "BAR %d: assigned %pR\n", resno, res); 239 dev_info(&dev->dev, "BAR %d: assigned %pR\n", resno, res);
193 if (resno < PCI_BRIDGE_RESOURCES) 240 if (resno < PCI_BRIDGE_RESOURCES)
194 pci_update_resource(dev, resno); 241 pci_update_resource(dev, resno);
195 } 242 }
196
197 return ret; 243 return ret;
198} 244}
199 245
200int pci_assign_resource(struct pci_dev *dev, int resno) 246int pci_assign_resource(struct pci_dev *dev, int resno)
201{ 247{
202 struct resource *res = dev->resource + resno; 248 struct resource *res = dev->resource + resno;
203 resource_size_t align; 249 resource_size_t align, size;
204 struct pci_bus *bus; 250 struct pci_bus *bus;
205 int ret; 251 int ret;
206 char *type;
207 252
208 align = pci_resource_alignment(dev, res); 253 align = pci_resource_alignment(dev, res);
209 if (!align) { 254 if (!align) {
@@ -213,34 +258,27 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
213 } 258 }
214 259
215 bus = dev->bus; 260 bus = dev->bus;
216 while ((ret = __pci_assign_resource(bus, dev, resno))) { 261 size = resource_size(res);
217 if (bus->parent && bus->self->transparent) 262 ret = _pci_assign_resource(dev, resno, size, align);
218 bus = bus->parent;
219 else
220 bus = NULL;
221 if (bus)
222 continue;
223 break;
224 }
225 263
226 if (ret) { 264 /*
227 if (res->flags & IORESOURCE_MEM) 265 * If we failed to assign anything, let's try the address
228 if (res->flags & IORESOURCE_PREFETCH) 266 * where firmware left it. That at least has a chance of
229 type = "mem pref"; 267 * working, which is better than just leaving it disabled.
230 else 268 */
231 type = "mem"; 269 if (ret < 0 && dev->fw_addr[resno])
232 else if (res->flags & IORESOURCE_IO) 270 ret = pci_revert_fw_address(res, dev, resno, size);
233 type = "io";
234 else
235 type = "unknown";
236 dev_info(&dev->dev,
237 "BAR %d: can't assign %s (size %#llx)\n",
238 resno, type, (unsigned long long) resource_size(res));
239 }
240 271
272 if (!ret) {
273 res->flags &= ~IORESOURCE_STARTALIGN;
274 dev_info(&dev->dev, "BAR %d: assigned %pR\n", resno, res);
275 if (resno < PCI_BRIDGE_RESOURCES)
276 pci_update_resource(dev, resno);
277 }
241 return ret; 278 return ret;
242} 279}
243 280
281
244/* Sort resources by alignment */ 282/* Sort resources by alignment */
245void pdev_sort_resources(struct pci_dev *dev, struct resource_list *head) 283void pdev_sort_resources(struct pci_dev *dev, struct resource_list *head)
246{ 284{
diff --git a/drivers/power/max8997_charger.c b/drivers/power/max8997_charger.c
index 7106b49b26e4..ffc5033ea9c9 100644
--- a/drivers/power/max8997_charger.c
+++ b/drivers/power/max8997_charger.c
@@ -20,6 +20,7 @@
20 */ 20 */
21 21
22#include <linux/err.h> 22#include <linux/err.h>
23#include <linux/module.h>
23#include <linux/slab.h> 24#include <linux/slab.h>
24#include <linux/platform_device.h> 25#include <linux/platform_device.h>
25#include <linux/power_supply.h> 26#include <linux/power_supply.h>
diff --git a/drivers/power/max8998_charger.c b/drivers/power/max8998_charger.c
index cc21fa2120be..ef8efadb58cb 100644
--- a/drivers/power/max8998_charger.c
+++ b/drivers/power/max8998_charger.c
@@ -20,6 +20,7 @@
20 */ 20 */
21 21
22#include <linux/err.h> 22#include <linux/err.h>
23#include <linux/module.h>
23#include <linux/slab.h> 24#include <linux/slab.h>
24#include <linux/platform_device.h> 25#include <linux/platform_device.h>
25#include <linux/power_supply.h> 26#include <linux/power_supply.h>
diff --git a/drivers/power/s3c_adc_battery.c b/drivers/power/s3c_adc_battery.c
index a675e31b4f13..d32d0d70f9ba 100644
--- a/drivers/power/s3c_adc_battery.c
+++ b/drivers/power/s3c_adc_battery.c
@@ -20,6 +20,7 @@
20#include <linux/s3c_adc_battery.h> 20#include <linux/s3c_adc_battery.h>
21#include <linux/errno.h> 21#include <linux/errno.h>
22#include <linux/init.h> 22#include <linux/init.h>
23#include <linux/module.h>
23 24
24#include <plat/adc.h> 25#include <plat/adc.h>
25 26
diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c
index ee893581d4b7..ebe77dd87daf 100644
--- a/drivers/rapidio/rio-scan.c
+++ b/drivers/rapidio/rio-scan.c
@@ -505,8 +505,7 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net,
505 rdev->dev.dma_mask = &rdev->dma_mask; 505 rdev->dev.dma_mask = &rdev->dma_mask;
506 rdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); 506 rdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
507 507
508 if ((rdev->pef & RIO_PEF_INB_DOORBELL) && 508 if (rdev->dst_ops & RIO_DST_OPS_DOORBELL)
509 (rdev->dst_ops & RIO_DST_OPS_DOORBELL))
510 rio_init_dbell_res(&rdev->riores[RIO_DOORBELL_RESOURCE], 509 rio_init_dbell_res(&rdev->riores[RIO_DOORBELL_RESOURCE],
511 0, 0xffff); 510 0, 0xffff);
512 511
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 3195dbd3ec34..44e91e598f8d 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -639,7 +639,7 @@ EXPORT_SYMBOL_GPL(rtc_irq_unregister);
639static int rtc_update_hrtimer(struct rtc_device *rtc, int enabled) 639static int rtc_update_hrtimer(struct rtc_device *rtc, int enabled)
640{ 640{
641 /* 641 /*
642 * We unconditionally cancel the timer here, because otherwise 642 * We always cancel the timer here first, because otherwise
643 * we could run into BUG_ON(timer->state != HRTIMER_STATE_CALLBACK); 643 * we could run into BUG_ON(timer->state != HRTIMER_STATE_CALLBACK);
644 * when we manage to start the timer before the callback 644 * when we manage to start the timer before the callback
645 * returns HRTIMER_RESTART. 645 * returns HRTIMER_RESTART.
@@ -708,7 +708,7 @@ int rtc_irq_set_freq(struct rtc_device *rtc, struct rtc_task *task, int freq)
708 int err = 0; 708 int err = 0;
709 unsigned long flags; 709 unsigned long flags;
710 710
711 if (freq <= 0 || freq > 5000) 711 if (freq <= 0 || freq > RTC_MAX_FREQ)
712 return -EINVAL; 712 return -EINVAL;
713retry: 713retry:
714 spin_lock_irqsave(&rtc->irq_task_lock, flags); 714 spin_lock_irqsave(&rtc->irq_task_lock, flags);
diff --git a/drivers/rtc/rtc-ep93xx.c b/drivers/rtc/rtc-ep93xx.c
index 335551d333b2..14a42a1edc66 100644
--- a/drivers/rtc/rtc-ep93xx.c
+++ b/drivers/rtc/rtc-ep93xx.c
@@ -36,6 +36,7 @@
36 */ 36 */
37struct ep93xx_rtc { 37struct ep93xx_rtc {
38 void __iomem *mmio_base; 38 void __iomem *mmio_base;
39 struct rtc_device *rtc;
39}; 40};
40 41
41static int ep93xx_rtc_get_swcomp(struct device *dev, unsigned short *preload, 42static int ep93xx_rtc_get_swcomp(struct device *dev, unsigned short *preload,
@@ -130,7 +131,6 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev)
130{ 131{
131 struct ep93xx_rtc *ep93xx_rtc; 132 struct ep93xx_rtc *ep93xx_rtc;
132 struct resource *res; 133 struct resource *res;
133 struct rtc_device *rtc;
134 int err; 134 int err;
135 135
136 ep93xx_rtc = devm_kzalloc(&pdev->dev, sizeof(*ep93xx_rtc), GFP_KERNEL); 136 ep93xx_rtc = devm_kzalloc(&pdev->dev, sizeof(*ep93xx_rtc), GFP_KERNEL);
@@ -151,12 +151,12 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev)
151 return -ENXIO; 151 return -ENXIO;
152 152
153 pdev->dev.platform_data = ep93xx_rtc; 153 pdev->dev.platform_data = ep93xx_rtc;
154 platform_set_drvdata(pdev, rtc); 154 platform_set_drvdata(pdev, ep93xx_rtc);
155 155
156 rtc = rtc_device_register(pdev->name, 156 ep93xx_rtc->rtc = rtc_device_register(pdev->name,
157 &pdev->dev, &ep93xx_rtc_ops, THIS_MODULE); 157 &pdev->dev, &ep93xx_rtc_ops, THIS_MODULE);
158 if (IS_ERR(rtc)) { 158 if (IS_ERR(ep93xx_rtc->rtc)) {
159 err = PTR_ERR(rtc); 159 err = PTR_ERR(ep93xx_rtc->rtc);
160 goto exit; 160 goto exit;
161 } 161 }
162 162
@@ -167,7 +167,7 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev)
167 return 0; 167 return 0;
168 168
169fail: 169fail:
170 rtc_device_unregister(rtc); 170 rtc_device_unregister(ep93xx_rtc->rtc);
171exit: 171exit:
172 platform_set_drvdata(pdev, NULL); 172 platform_set_drvdata(pdev, NULL);
173 pdev->dev.platform_data = NULL; 173 pdev->dev.platform_data = NULL;
@@ -176,11 +176,11 @@ exit:
176 176
177static int __exit ep93xx_rtc_remove(struct platform_device *pdev) 177static int __exit ep93xx_rtc_remove(struct platform_device *pdev)
178{ 178{
179 struct rtc_device *rtc = platform_get_drvdata(pdev); 179 struct ep93xx_rtc *ep93xx_rtc = platform_get_drvdata(pdev);
180 180
181 sysfs_remove_group(&pdev->dev.kobj, &ep93xx_rtc_sysfs_files); 181 sysfs_remove_group(&pdev->dev.kobj, &ep93xx_rtc_sysfs_files);
182 platform_set_drvdata(pdev, NULL); 182 platform_set_drvdata(pdev, NULL);
183 rtc_device_unregister(rtc); 183 rtc_device_unregister(ep93xx_rtc->rtc);
184 pdev->dev.platform_data = NULL; 184 pdev->dev.platform_data = NULL;
185 185
186 return 0; 186 return 0;
diff --git a/drivers/rtc/rtc-imxdi.c b/drivers/rtc/rtc-imxdi.c
index 2dd3c0163272..d93a9608b1f0 100644
--- a/drivers/rtc/rtc-imxdi.c
+++ b/drivers/rtc/rtc-imxdi.c
@@ -35,6 +35,7 @@
35#include <linux/module.h> 35#include <linux/module.h>
36#include <linux/platform_device.h> 36#include <linux/platform_device.h>
37#include <linux/rtc.h> 37#include <linux/rtc.h>
38#include <linux/sched.h>
38#include <linux/workqueue.h> 39#include <linux/workqueue.h>
39 40
40/* DryIce Register Definitions */ 41/* DryIce Register Definitions */
diff --git a/drivers/rtc/rtc-lib.c b/drivers/rtc/rtc-lib.c
index 075f1708deae..c4cf05731118 100644
--- a/drivers/rtc/rtc-lib.c
+++ b/drivers/rtc/rtc-lib.c
@@ -85,6 +85,8 @@ void rtc_time_to_tm(unsigned long time, struct rtc_time *tm)
85 time -= tm->tm_hour * 3600; 85 time -= tm->tm_hour * 3600;
86 tm->tm_min = time / 60; 86 tm->tm_min = time / 60;
87 tm->tm_sec = time - tm->tm_min * 60; 87 tm->tm_sec = time - tm->tm_min * 60;
88
89 tm->tm_isdst = 0;
88} 90}
89EXPORT_SYMBOL(rtc_time_to_tm); 91EXPORT_SYMBOL(rtc_time_to_tm);
90 92
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index 9329dbb9ebab..7639ab906f02 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -51,6 +51,27 @@ static enum s3c_cpu_type s3c_rtc_cpu_type;
51 51
52static DEFINE_SPINLOCK(s3c_rtc_pie_lock); 52static DEFINE_SPINLOCK(s3c_rtc_pie_lock);
53 53
54static void s3c_rtc_alarm_clk_enable(bool enable)
55{
56 static DEFINE_SPINLOCK(s3c_rtc_alarm_clk_lock);
57 static bool alarm_clk_enabled;
58 unsigned long irq_flags;
59
60 spin_lock_irqsave(&s3c_rtc_alarm_clk_lock, irq_flags);
61 if (enable) {
62 if (!alarm_clk_enabled) {
63 clk_enable(rtc_clk);
64 alarm_clk_enabled = true;
65 }
66 } else {
67 if (alarm_clk_enabled) {
68 clk_disable(rtc_clk);
69 alarm_clk_enabled = false;
70 }
71 }
72 spin_unlock_irqrestore(&s3c_rtc_alarm_clk_lock, irq_flags);
73}
74
54/* IRQ Handlers */ 75/* IRQ Handlers */
55 76
56static irqreturn_t s3c_rtc_alarmirq(int irq, void *id) 77static irqreturn_t s3c_rtc_alarmirq(int irq, void *id)
@@ -64,6 +85,9 @@ static irqreturn_t s3c_rtc_alarmirq(int irq, void *id)
64 writeb(S3C2410_INTP_ALM, s3c_rtc_base + S3C2410_INTP); 85 writeb(S3C2410_INTP_ALM, s3c_rtc_base + S3C2410_INTP);
65 86
66 clk_disable(rtc_clk); 87 clk_disable(rtc_clk);
88
89 s3c_rtc_alarm_clk_enable(false);
90
67 return IRQ_HANDLED; 91 return IRQ_HANDLED;
68} 92}
69 93
@@ -97,6 +121,8 @@ static int s3c_rtc_setaie(struct device *dev, unsigned int enabled)
97 writeb(tmp, s3c_rtc_base + S3C2410_RTCALM); 121 writeb(tmp, s3c_rtc_base + S3C2410_RTCALM);
98 clk_disable(rtc_clk); 122 clk_disable(rtc_clk);
99 123
124 s3c_rtc_alarm_clk_enable(enabled);
125
100 return 0; 126 return 0;
101} 127}
102 128
@@ -152,10 +178,6 @@ static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
152 goto retry_get_time; 178 goto retry_get_time;
153 } 179 }
154 180
155 pr_debug("read time %04d.%02d.%02d %02d:%02d:%02d\n",
156 1900 + rtc_tm->tm_year, rtc_tm->tm_mon, rtc_tm->tm_mday,
157 rtc_tm->tm_hour, rtc_tm->tm_min, rtc_tm->tm_sec);
158
159 rtc_tm->tm_sec = bcd2bin(rtc_tm->tm_sec); 181 rtc_tm->tm_sec = bcd2bin(rtc_tm->tm_sec);
160 rtc_tm->tm_min = bcd2bin(rtc_tm->tm_min); 182 rtc_tm->tm_min = bcd2bin(rtc_tm->tm_min);
161 rtc_tm->tm_hour = bcd2bin(rtc_tm->tm_hour); 183 rtc_tm->tm_hour = bcd2bin(rtc_tm->tm_hour);
@@ -164,6 +186,11 @@ static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
164 rtc_tm->tm_year = bcd2bin(rtc_tm->tm_year); 186 rtc_tm->tm_year = bcd2bin(rtc_tm->tm_year);
165 187
166 rtc_tm->tm_year += 100; 188 rtc_tm->tm_year += 100;
189
190 pr_debug("read time %04d.%02d.%02d %02d:%02d:%02d\n",
191 1900 + rtc_tm->tm_year, rtc_tm->tm_mon, rtc_tm->tm_mday,
192 rtc_tm->tm_hour, rtc_tm->tm_min, rtc_tm->tm_sec);
193
167 rtc_tm->tm_mon -= 1; 194 rtc_tm->tm_mon -= 1;
168 195
169 clk_disable(rtc_clk); 196 clk_disable(rtc_clk);
@@ -269,10 +296,9 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
269 clk_enable(rtc_clk); 296 clk_enable(rtc_clk);
270 pr_debug("s3c_rtc_setalarm: %d, %04d.%02d.%02d %02d:%02d:%02d\n", 297 pr_debug("s3c_rtc_setalarm: %d, %04d.%02d.%02d %02d:%02d:%02d\n",
271 alrm->enabled, 298 alrm->enabled,
272 1900 + tm->tm_year, tm->tm_mon, tm->tm_mday, 299 1900 + tm->tm_year, tm->tm_mon + 1, tm->tm_mday,
273 tm->tm_hour, tm->tm_min, tm->tm_sec); 300 tm->tm_hour, tm->tm_min, tm->tm_sec);
274 301
275
276 alrm_en = readb(base + S3C2410_RTCALM) & S3C2410_RTCALM_ALMEN; 302 alrm_en = readb(base + S3C2410_RTCALM) & S3C2410_RTCALM_ALMEN;
277 writeb(0x00, base + S3C2410_RTCALM); 303 writeb(0x00, base + S3C2410_RTCALM);
278 304
@@ -319,49 +345,7 @@ static int s3c_rtc_proc(struct device *dev, struct seq_file *seq)
319 return 0; 345 return 0;
320} 346}
321 347
322static int s3c_rtc_open(struct device *dev)
323{
324 struct platform_device *pdev = to_platform_device(dev);
325 struct rtc_device *rtc_dev = platform_get_drvdata(pdev);
326 int ret;
327
328 ret = request_irq(s3c_rtc_alarmno, s3c_rtc_alarmirq,
329 IRQF_DISABLED, "s3c2410-rtc alarm", rtc_dev);
330
331 if (ret) {
332 dev_err(dev, "IRQ%d error %d\n", s3c_rtc_alarmno, ret);
333 return ret;
334 }
335
336 ret = request_irq(s3c_rtc_tickno, s3c_rtc_tickirq,
337 IRQF_DISABLED, "s3c2410-rtc tick", rtc_dev);
338
339 if (ret) {
340 dev_err(dev, "IRQ%d error %d\n", s3c_rtc_tickno, ret);
341 goto tick_err;
342 }
343
344 return ret;
345
346 tick_err:
347 free_irq(s3c_rtc_alarmno, rtc_dev);
348 return ret;
349}
350
351static void s3c_rtc_release(struct device *dev)
352{
353 struct platform_device *pdev = to_platform_device(dev);
354 struct rtc_device *rtc_dev = platform_get_drvdata(pdev);
355
356 /* do not clear AIE here, it may be needed for wake */
357
358 free_irq(s3c_rtc_alarmno, rtc_dev);
359 free_irq(s3c_rtc_tickno, rtc_dev);
360}
361
362static const struct rtc_class_ops s3c_rtcops = { 348static const struct rtc_class_ops s3c_rtcops = {
363 .open = s3c_rtc_open,
364 .release = s3c_rtc_release,
365 .read_time = s3c_rtc_gettime, 349 .read_time = s3c_rtc_gettime,
366 .set_time = s3c_rtc_settime, 350 .set_time = s3c_rtc_settime,
367 .read_alarm = s3c_rtc_getalarm, 351 .read_alarm = s3c_rtc_getalarm,
@@ -425,6 +409,9 @@ static int __devexit s3c_rtc_remove(struct platform_device *dev)
425{ 409{
426 struct rtc_device *rtc = platform_get_drvdata(dev); 410 struct rtc_device *rtc = platform_get_drvdata(dev);
427 411
412 free_irq(s3c_rtc_alarmno, rtc);
413 free_irq(s3c_rtc_tickno, rtc);
414
428 platform_set_drvdata(dev, NULL); 415 platform_set_drvdata(dev, NULL);
429 rtc_device_unregister(rtc); 416 rtc_device_unregister(rtc);
430 417
@@ -548,10 +535,32 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev)
548 535
549 s3c_rtc_setfreq(&pdev->dev, 1); 536 s3c_rtc_setfreq(&pdev->dev, 1);
550 537
538 ret = request_irq(s3c_rtc_alarmno, s3c_rtc_alarmirq,
539 IRQF_DISABLED, "s3c2410-rtc alarm", rtc);
540 if (ret) {
541 dev_err(&pdev->dev, "IRQ%d error %d\n", s3c_rtc_alarmno, ret);
542 goto err_alarm_irq;
543 }
544
545 ret = request_irq(s3c_rtc_tickno, s3c_rtc_tickirq,
546 IRQF_DISABLED, "s3c2410-rtc tick", rtc);
547 if (ret) {
548 dev_err(&pdev->dev, "IRQ%d error %d\n", s3c_rtc_tickno, ret);
549 free_irq(s3c_rtc_alarmno, rtc);
550 goto err_tick_irq;
551 }
552
551 clk_disable(rtc_clk); 553 clk_disable(rtc_clk);
552 554
553 return 0; 555 return 0;
554 556
557 err_tick_irq:
558 free_irq(s3c_rtc_alarmno, rtc);
559
560 err_alarm_irq:
561 platform_set_drvdata(pdev, NULL);
562 rtc_device_unregister(rtc);
563
555 err_nortc: 564 err_nortc:
556 s3c_rtc_enable(pdev, 0); 565 s3c_rtc_enable(pdev, 0);
557 clk_disable(rtc_clk); 566 clk_disable(rtc_clk);
diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c
index 9a81f778d6b2..20687d55e7a7 100644
--- a/drivers/rtc/rtc-twl.c
+++ b/drivers/rtc/rtc-twl.c
@@ -362,14 +362,6 @@ static irqreturn_t twl_rtc_interrupt(int irq, void *rtc)
362 int res; 362 int res;
363 u8 rd_reg; 363 u8 rd_reg;
364 364
365#ifdef CONFIG_LOCKDEP
366 /* WORKAROUND for lockdep forcing IRQF_DISABLED on us, which
367 * we don't want and can't tolerate. Although it might be
368 * friendlier not to borrow this thread context...
369 */
370 local_irq_enable();
371#endif
372
373 res = twl_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG); 365 res = twl_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG);
374 if (res) 366 if (res)
375 goto out; 367 goto out;
@@ -428,24 +420,12 @@ static struct rtc_class_ops twl_rtc_ops = {
428static int __devinit twl_rtc_probe(struct platform_device *pdev) 420static int __devinit twl_rtc_probe(struct platform_device *pdev)
429{ 421{
430 struct rtc_device *rtc; 422 struct rtc_device *rtc;
431 int ret = 0; 423 int ret = -EINVAL;
432 int irq = platform_get_irq(pdev, 0); 424 int irq = platform_get_irq(pdev, 0);
433 u8 rd_reg; 425 u8 rd_reg;
434 426
435 if (irq <= 0) 427 if (irq <= 0)
436 return -EINVAL; 428 goto out1;
437
438 rtc = rtc_device_register(pdev->name,
439 &pdev->dev, &twl_rtc_ops, THIS_MODULE);
440 if (IS_ERR(rtc)) {
441 ret = PTR_ERR(rtc);
442 dev_err(&pdev->dev, "can't register RTC device, err %ld\n",
443 PTR_ERR(rtc));
444 goto out0;
445
446 }
447
448 platform_set_drvdata(pdev, rtc);
449 429
450 ret = twl_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG); 430 ret = twl_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG);
451 if (ret < 0) 431 if (ret < 0)
@@ -462,14 +442,6 @@ static int __devinit twl_rtc_probe(struct platform_device *pdev)
462 if (ret < 0) 442 if (ret < 0)
463 goto out1; 443 goto out1;
464 444
465 ret = request_irq(irq, twl_rtc_interrupt,
466 IRQF_TRIGGER_RISING,
467 dev_name(&rtc->dev), rtc);
468 if (ret < 0) {
469 dev_err(&pdev->dev, "IRQ is not free.\n");
470 goto out1;
471 }
472
473 if (twl_class_is_6030()) { 445 if (twl_class_is_6030()) {
474 twl6030_interrupt_unmask(TWL6030_RTC_INT_MASK, 446 twl6030_interrupt_unmask(TWL6030_RTC_INT_MASK,
475 REG_INT_MSK_LINE_A); 447 REG_INT_MSK_LINE_A);
@@ -480,28 +452,44 @@ static int __devinit twl_rtc_probe(struct platform_device *pdev)
480 /* Check RTC module status, Enable if it is off */ 452 /* Check RTC module status, Enable if it is off */
481 ret = twl_rtc_read_u8(&rd_reg, REG_RTC_CTRL_REG); 453 ret = twl_rtc_read_u8(&rd_reg, REG_RTC_CTRL_REG);
482 if (ret < 0) 454 if (ret < 0)
483 goto out2; 455 goto out1;
484 456
485 if (!(rd_reg & BIT_RTC_CTRL_REG_STOP_RTC_M)) { 457 if (!(rd_reg & BIT_RTC_CTRL_REG_STOP_RTC_M)) {
486 dev_info(&pdev->dev, "Enabling TWL-RTC.\n"); 458 dev_info(&pdev->dev, "Enabling TWL-RTC.\n");
487 rd_reg = BIT_RTC_CTRL_REG_STOP_RTC_M; 459 rd_reg = BIT_RTC_CTRL_REG_STOP_RTC_M;
488 ret = twl_rtc_write_u8(rd_reg, REG_RTC_CTRL_REG); 460 ret = twl_rtc_write_u8(rd_reg, REG_RTC_CTRL_REG);
489 if (ret < 0) 461 if (ret < 0)
490 goto out2; 462 goto out1;
491 } 463 }
492 464
493 /* init cached IRQ enable bits */ 465 /* init cached IRQ enable bits */
494 ret = twl_rtc_read_u8(&rtc_irq_bits, REG_RTC_INTERRUPTS_REG); 466 ret = twl_rtc_read_u8(&rtc_irq_bits, REG_RTC_INTERRUPTS_REG);
495 if (ret < 0) 467 if (ret < 0)
468 goto out1;
469
470 rtc = rtc_device_register(pdev->name,
471 &pdev->dev, &twl_rtc_ops, THIS_MODULE);
472 if (IS_ERR(rtc)) {
473 ret = PTR_ERR(rtc);
474 dev_err(&pdev->dev, "can't register RTC device, err %ld\n",
475 PTR_ERR(rtc));
476 goto out1;
477 }
478
479 ret = request_threaded_irq(irq, NULL, twl_rtc_interrupt,
480 IRQF_TRIGGER_RISING,
481 dev_name(&rtc->dev), rtc);
482 if (ret < 0) {
483 dev_err(&pdev->dev, "IRQ is not free.\n");
496 goto out2; 484 goto out2;
485 }
497 486
498 return ret; 487 platform_set_drvdata(pdev, rtc);
488 return 0;
499 489
500out2: 490out2:
501 free_irq(irq, rtc);
502out1:
503 rtc_device_unregister(rtc); 491 rtc_device_unregister(rtc);
504out0: 492out1:
505 return ret; 493 return ret;
506} 494}
507 495
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index eb4e034378cd..f1a2016829fc 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -249,6 +249,7 @@ static int dasd_ioctl_reset_profile(struct dasd_block *block)
249static int dasd_ioctl_read_profile(struct dasd_block *block, void __user *argp) 249static int dasd_ioctl_read_profile(struct dasd_block *block, void __user *argp)
250{ 250{
251 struct dasd_profile_info_t *data; 251 struct dasd_profile_info_t *data;
252 int rc = 0;
252 253
253 data = kmalloc(sizeof(*data), GFP_KERNEL); 254 data = kmalloc(sizeof(*data), GFP_KERNEL);
254 if (!data) 255 if (!data)
@@ -279,11 +280,14 @@ static int dasd_ioctl_read_profile(struct dasd_block *block, void __user *argp)
279 spin_unlock_bh(&block->profile.lock); 280 spin_unlock_bh(&block->profile.lock);
280 } else { 281 } else {
281 spin_unlock_bh(&block->profile.lock); 282 spin_unlock_bh(&block->profile.lock);
282 return -EIO; 283 rc = -EIO;
284 goto out;
283 } 285 }
284 if (copy_to_user(argp, data, sizeof(*data))) 286 if (copy_to_user(argp, data, sizeof(*data)))
285 return -EFAULT; 287 rc = -EFAULT;
286 return 0; 288out:
289 kfree(data);
290 return rc;
287} 291}
288#else 292#else
289static int dasd_ioctl_reset_profile(struct dasd_block *block) 293static int dasd_ioctl_reset_profile(struct dasd_block *block)
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index be55fb2b1b1c..837e010299a8 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -383,8 +383,10 @@ static int sclp_attach_storage(u8 id)
383 switch (sccb->header.response_code) { 383 switch (sccb->header.response_code) {
384 case 0x0020: 384 case 0x0020:
385 set_bit(id, sclp_storage_ids); 385 set_bit(id, sclp_storage_ids);
386 for (i = 0; i < sccb->assigned; i++) 386 for (i = 0; i < sccb->assigned; i++) {
387 sclp_unassign_storage(sccb->entries[i] >> 16); 387 if (sccb->entries[i])
388 sclp_unassign_storage(sccb->entries[i] >> 16);
389 }
388 break; 390 break;
389 default: 391 default:
390 rc = -EIO; 392 rc = -EIO;
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 9ae80cd5953b..dba72a4e6a1c 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -563,7 +563,7 @@ int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn,
563 nopout_wqe->itt = ((u16)task->itt | 563 nopout_wqe->itt = ((u16)task->itt |
564 (ISCSI_TASK_TYPE_MPATH << 564 (ISCSI_TASK_TYPE_MPATH <<
565 ISCSI_TMF_REQUEST_TYPE_SHIFT)); 565 ISCSI_TMF_REQUEST_TYPE_SHIFT));
566 nopout_wqe->ttt = nopout_hdr->ttt; 566 nopout_wqe->ttt = be32_to_cpu(nopout_hdr->ttt);
567 nopout_wqe->flags = 0; 567 nopout_wqe->flags = 0;
568 if (!unsol) 568 if (!unsol)
569 nopout_wqe->flags = ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION; 569 nopout_wqe->flags = ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION;
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 83aa3ac52c40..9d3d81778af1 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -432,6 +432,8 @@ void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
432 u8 flogi_maddr[ETH_ALEN]; 432 u8 flogi_maddr[ETH_ALEN];
433 const struct net_device_ops *ops; 433 const struct net_device_ops *ops;
434 434
435 rtnl_lock();
436
435 /* 437 /*
436 * Don't listen for Ethernet packets anymore. 438 * Don't listen for Ethernet packets anymore.
437 * synchronize_net() ensures that the packet handlers are not running 439 * synchronize_net() ensures that the packet handlers are not running
@@ -461,6 +463,8 @@ void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
461 " specific feature for LLD.\n"); 463 " specific feature for LLD.\n");
462 } 464 }
463 465
466 rtnl_unlock();
467
464 /* Release the self-reference taken during fcoe_interface_create() */ 468 /* Release the self-reference taken during fcoe_interface_create() */
465 fcoe_interface_put(fcoe); 469 fcoe_interface_put(fcoe);
466} 470}
@@ -1951,11 +1955,8 @@ static void fcoe_destroy_work(struct work_struct *work)
1951 fcoe_if_destroy(port->lport); 1955 fcoe_if_destroy(port->lport);
1952 1956
1953 /* Do not tear down the fcoe interface for NPIV port */ 1957 /* Do not tear down the fcoe interface for NPIV port */
1954 if (!npiv) { 1958 if (!npiv)
1955 rtnl_lock();
1956 fcoe_interface_cleanup(fcoe); 1959 fcoe_interface_cleanup(fcoe);
1957 rtnl_unlock();
1958 }
1959 1960
1960 mutex_unlock(&fcoe_config_mutex); 1961 mutex_unlock(&fcoe_config_mutex);
1961} 1962}
@@ -2009,8 +2010,9 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
2009 printk(KERN_ERR "fcoe: Failed to create interface (%s)\n", 2010 printk(KERN_ERR "fcoe: Failed to create interface (%s)\n",
2010 netdev->name); 2011 netdev->name);
2011 rc = -EIO; 2012 rc = -EIO;
2013 rtnl_unlock();
2012 fcoe_interface_cleanup(fcoe); 2014 fcoe_interface_cleanup(fcoe);
2013 goto out_nodev; 2015 goto out_nortnl;
2014 } 2016 }
2015 2017
2016 /* Make this the "master" N_Port */ 2018 /* Make this the "master" N_Port */
@@ -2027,6 +2029,7 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
2027 2029
2028out_nodev: 2030out_nodev:
2029 rtnl_unlock(); 2031 rtnl_unlock();
2032out_nortnl:
2030 mutex_unlock(&fcoe_config_mutex); 2033 mutex_unlock(&fcoe_config_mutex);
2031 return rc; 2034 return rc;
2032} 2035}
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index ec61bdb833ac..b200b736b000 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -676,6 +676,16 @@ static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno,
676 BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA); 676 BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA);
677 removed[*nremoved] = h->dev[entry]; 677 removed[*nremoved] = h->dev[entry];
678 (*nremoved)++; 678 (*nremoved)++;
679
680 /*
681 * New physical devices won't have target/lun assigned yet
682 * so we need to preserve the values in the slot we are replacing.
683 */
684 if (new_entry->target == -1) {
685 new_entry->target = h->dev[entry]->target;
686 new_entry->lun = h->dev[entry]->lun;
687 }
688
679 h->dev[entry] = new_entry; 689 h->dev[entry] = new_entry;
680 added[*nadded] = new_entry; 690 added[*nadded] = new_entry;
681 (*nadded)++; 691 (*nadded)++;
@@ -1548,10 +1558,17 @@ static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
1548} 1558}
1549 1559
1550static int hpsa_update_device_info(struct ctlr_info *h, 1560static int hpsa_update_device_info(struct ctlr_info *h,
1551 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device) 1561 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
1562 unsigned char *is_OBDR_device)
1552{ 1563{
1553#define OBDR_TAPE_INQ_SIZE 49 1564
1565#define OBDR_SIG_OFFSET 43
1566#define OBDR_TAPE_SIG "$DR-10"
1567#define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
1568#define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
1569
1554 unsigned char *inq_buff; 1570 unsigned char *inq_buff;
1571 unsigned char *obdr_sig;
1555 1572
1556 inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL); 1573 inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
1557 if (!inq_buff) 1574 if (!inq_buff)
@@ -1583,6 +1600,16 @@ static int hpsa_update_device_info(struct ctlr_info *h,
1583 else 1600 else
1584 this_device->raid_level = RAID_UNKNOWN; 1601 this_device->raid_level = RAID_UNKNOWN;
1585 1602
1603 if (is_OBDR_device) {
1604 /* See if this is a One-Button-Disaster-Recovery device
1605 * by looking for "$DR-10" at offset 43 in inquiry data.
1606 */
1607 obdr_sig = &inq_buff[OBDR_SIG_OFFSET];
1608 *is_OBDR_device = (this_device->devtype == TYPE_ROM &&
1609 strncmp(obdr_sig, OBDR_TAPE_SIG,
1610 OBDR_SIG_LEN) == 0);
1611 }
1612
1586 kfree(inq_buff); 1613 kfree(inq_buff);
1587 return 0; 1614 return 0;
1588 1615
@@ -1716,7 +1743,7 @@ static int add_msa2xxx_enclosure_device(struct ctlr_info *h,
1716 return 0; 1743 return 0;
1717 } 1744 }
1718 1745
1719 if (hpsa_update_device_info(h, scsi3addr, this_device)) 1746 if (hpsa_update_device_info(h, scsi3addr, this_device, NULL))
1720 return 0; 1747 return 0;
1721 (*nmsa2xxx_enclosures)++; 1748 (*nmsa2xxx_enclosures)++;
1722 hpsa_set_bus_target_lun(this_device, bus, target, 0); 1749 hpsa_set_bus_target_lun(this_device, bus, target, 0);
@@ -1808,7 +1835,6 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
1808 */ 1835 */
1809 struct ReportLUNdata *physdev_list = NULL; 1836 struct ReportLUNdata *physdev_list = NULL;
1810 struct ReportLUNdata *logdev_list = NULL; 1837 struct ReportLUNdata *logdev_list = NULL;
1811 unsigned char *inq_buff = NULL;
1812 u32 nphysicals = 0; 1838 u32 nphysicals = 0;
1813 u32 nlogicals = 0; 1839 u32 nlogicals = 0;
1814 u32 ndev_allocated = 0; 1840 u32 ndev_allocated = 0;
@@ -1824,11 +1850,9 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
1824 GFP_KERNEL); 1850 GFP_KERNEL);
1825 physdev_list = kzalloc(reportlunsize, GFP_KERNEL); 1851 physdev_list = kzalloc(reportlunsize, GFP_KERNEL);
1826 logdev_list = kzalloc(reportlunsize, GFP_KERNEL); 1852 logdev_list = kzalloc(reportlunsize, GFP_KERNEL);
1827 inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
1828 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL); 1853 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
1829 1854
1830 if (!currentsd || !physdev_list || !logdev_list || 1855 if (!currentsd || !physdev_list || !logdev_list || !tmpdevice) {
1831 !inq_buff || !tmpdevice) {
1832 dev_err(&h->pdev->dev, "out of memory\n"); 1856 dev_err(&h->pdev->dev, "out of memory\n");
1833 goto out; 1857 goto out;
1834 } 1858 }
@@ -1863,7 +1887,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
1863 /* adjust our table of devices */ 1887 /* adjust our table of devices */
1864 nmsa2xxx_enclosures = 0; 1888 nmsa2xxx_enclosures = 0;
1865 for (i = 0; i < nphysicals + nlogicals + 1; i++) { 1889 for (i = 0; i < nphysicals + nlogicals + 1; i++) {
1866 u8 *lunaddrbytes; 1890 u8 *lunaddrbytes, is_OBDR = 0;
1867 1891
1868 /* Figure out where the LUN ID info is coming from */ 1892 /* Figure out where the LUN ID info is coming from */
1869 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position, 1893 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
@@ -1874,7 +1898,8 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
1874 continue; 1898 continue;
1875 1899
1876 /* Get device type, vendor, model, device id */ 1900 /* Get device type, vendor, model, device id */
1877 if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice)) 1901 if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
1902 &is_OBDR))
1878 continue; /* skip it if we can't talk to it. */ 1903 continue; /* skip it if we can't talk to it. */
1879 figure_bus_target_lun(h, lunaddrbytes, &bus, &target, &lun, 1904 figure_bus_target_lun(h, lunaddrbytes, &bus, &target, &lun,
1880 tmpdevice); 1905 tmpdevice);
@@ -1898,7 +1923,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
1898 hpsa_set_bus_target_lun(this_device, bus, target, lun); 1923 hpsa_set_bus_target_lun(this_device, bus, target, lun);
1899 1924
1900 switch (this_device->devtype) { 1925 switch (this_device->devtype) {
1901 case TYPE_ROM: { 1926 case TYPE_ROM:
1902 /* We don't *really* support actual CD-ROM devices, 1927 /* We don't *really* support actual CD-ROM devices,
1903 * just "One Button Disaster Recovery" tape drive 1928 * just "One Button Disaster Recovery" tape drive
1904 * which temporarily pretends to be a CD-ROM drive. 1929 * which temporarily pretends to be a CD-ROM drive.
@@ -1906,15 +1931,8 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
1906 * device by checking for "$DR-10" in bytes 43-48 of 1931 * device by checking for "$DR-10" in bytes 43-48 of
1907 * the inquiry data. 1932 * the inquiry data.
1908 */ 1933 */
1909 char obdr_sig[7]; 1934 if (is_OBDR)
1910#define OBDR_TAPE_SIG "$DR-10" 1935 ncurrent++;
1911 strncpy(obdr_sig, &inq_buff[43], 6);
1912 obdr_sig[6] = '\0';
1913 if (strncmp(obdr_sig, OBDR_TAPE_SIG, 6) != 0)
1914 /* Not OBDR device, ignore it. */
1915 break;
1916 }
1917 ncurrent++;
1918 break; 1936 break;
1919 case TYPE_DISK: 1937 case TYPE_DISK:
1920 if (i < nphysicals) 1938 if (i < nphysicals)
@@ -1947,7 +1965,6 @@ out:
1947 for (i = 0; i < ndev_allocated; i++) 1965 for (i = 0; i < ndev_allocated; i++)
1948 kfree(currentsd[i]); 1966 kfree(currentsd[i]);
1949 kfree(currentsd); 1967 kfree(currentsd);
1950 kfree(inq_buff);
1951 kfree(physdev_list); 1968 kfree(physdev_list);
1952 kfree(logdev_list); 1969 kfree(logdev_list);
1953} 1970}
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c
index 26072f1e9852..6981b773a88d 100644
--- a/drivers/scsi/isci/host.c
+++ b/drivers/scsi/isci/host.c
@@ -531,6 +531,9 @@ static void sci_controller_process_completions(struct isci_host *ihost)
531 break; 531 break;
532 532
533 case SCU_COMPLETION_TYPE_EVENT: 533 case SCU_COMPLETION_TYPE_EVENT:
534 sci_controller_event_completion(ihost, ent);
535 break;
536
534 case SCU_COMPLETION_TYPE_NOTIFY: { 537 case SCU_COMPLETION_TYPE_NOTIFY: {
535 event_cycle ^= ((event_get+1) & SCU_MAX_EVENTS) << 538 event_cycle ^= ((event_get+1) & SCU_MAX_EVENTS) <<
536 (SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT - SCU_MAX_EVENTS_SHIFT); 539 (SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT - SCU_MAX_EVENTS_SHIFT);
@@ -1091,6 +1094,7 @@ static void isci_host_completion_routine(unsigned long data)
1091 struct isci_request *request; 1094 struct isci_request *request;
1092 struct isci_request *next_request; 1095 struct isci_request *next_request;
1093 struct sas_task *task; 1096 struct sas_task *task;
1097 u16 active;
1094 1098
1095 INIT_LIST_HEAD(&completed_request_list); 1099 INIT_LIST_HEAD(&completed_request_list);
1096 INIT_LIST_HEAD(&errored_request_list); 1100 INIT_LIST_HEAD(&errored_request_list);
@@ -1181,6 +1185,13 @@ static void isci_host_completion_routine(unsigned long data)
1181 } 1185 }
1182 } 1186 }
1183 1187
1188 /* the coalesence timeout doubles at each encoding step, so
1189 * update it based on the ilog2 value of the outstanding requests
1190 */
1191 active = isci_tci_active(ihost);
1192 writel(SMU_ICC_GEN_VAL(NUMBER, active) |
1193 SMU_ICC_GEN_VAL(TIMER, ISCI_COALESCE_BASE + ilog2(active)),
1194 &ihost->smu_registers->interrupt_coalesce_control);
1184} 1195}
1185 1196
1186/** 1197/**
@@ -1471,7 +1482,7 @@ static void sci_controller_ready_state_enter(struct sci_base_state_machine *sm)
1471 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); 1482 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1472 1483
1473 /* set the default interrupt coalescence number and timeout value. */ 1484 /* set the default interrupt coalescence number and timeout value. */
1474 sci_controller_set_interrupt_coalescence(ihost, 0x10, 250); 1485 sci_controller_set_interrupt_coalescence(ihost, 0, 0);
1475} 1486}
1476 1487
1477static void sci_controller_ready_state_exit(struct sci_base_state_machine *sm) 1488static void sci_controller_ready_state_exit(struct sci_base_state_machine *sm)
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
index 062101a39f79..9f33831a2f04 100644
--- a/drivers/scsi/isci/host.h
+++ b/drivers/scsi/isci/host.h
@@ -369,6 +369,9 @@ static inline struct isci_host *dev_to_ihost(struct domain_device *dev)
369#define ISCI_TAG_SEQ(tag) (((tag) >> 12) & (SCI_MAX_SEQ-1)) 369#define ISCI_TAG_SEQ(tag) (((tag) >> 12) & (SCI_MAX_SEQ-1))
370#define ISCI_TAG_TCI(tag) ((tag) & (SCI_MAX_IO_REQUESTS-1)) 370#define ISCI_TAG_TCI(tag) ((tag) & (SCI_MAX_IO_REQUESTS-1))
371 371
372/* interrupt coalescing baseline: 9 == 3 to 5us interrupt delay per command */
373#define ISCI_COALESCE_BASE 9
374
372/* expander attached sata devices require 3 rnc slots */ 375/* expander attached sata devices require 3 rnc slots */
373static inline int sci_remote_device_node_count(struct isci_remote_device *idev) 376static inline int sci_remote_device_node_count(struct isci_remote_device *idev)
374{ 377{
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index 61e0d09e2b57..29aa34efb0f5 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -59,10 +59,19 @@
59#include <linux/firmware.h> 59#include <linux/firmware.h>
60#include <linux/efi.h> 60#include <linux/efi.h>
61#include <asm/string.h> 61#include <asm/string.h>
62#include <scsi/scsi_host.h>
62#include "isci.h" 63#include "isci.h"
63#include "task.h" 64#include "task.h"
64#include "probe_roms.h" 65#include "probe_roms.h"
65 66
67#define MAJ 1
68#define MIN 0
69#define BUILD 0
70#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
71 __stringify(BUILD)
72
73MODULE_VERSION(DRV_VERSION);
74
66static struct scsi_transport_template *isci_transport_template; 75static struct scsi_transport_template *isci_transport_template;
67 76
68static DEFINE_PCI_DEVICE_TABLE(isci_id_table) = { 77static DEFINE_PCI_DEVICE_TABLE(isci_id_table) = {
@@ -113,6 +122,22 @@ unsigned char max_concurr_spinup = 1;
113module_param(max_concurr_spinup, byte, 0); 122module_param(max_concurr_spinup, byte, 0);
114MODULE_PARM_DESC(max_concurr_spinup, "Max concurrent device spinup"); 123MODULE_PARM_DESC(max_concurr_spinup, "Max concurrent device spinup");
115 124
125static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, char *buf)
126{
127 struct Scsi_Host *shost = container_of(dev, typeof(*shost), shost_dev);
128 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
129 struct isci_host *ihost = container_of(sas_ha, typeof(*ihost), sas_ha);
130
131 return snprintf(buf, PAGE_SIZE, "%d\n", ihost->id);
132}
133
134static DEVICE_ATTR(isci_id, S_IRUGO, isci_show_id, NULL);
135
136struct device_attribute *isci_host_attrs[] = {
137 &dev_attr_isci_id,
138 NULL
139};
140
116static struct scsi_host_template isci_sht = { 141static struct scsi_host_template isci_sht = {
117 142
118 .module = THIS_MODULE, 143 .module = THIS_MODULE,
@@ -138,6 +163,7 @@ static struct scsi_host_template isci_sht = {
138 .slave_alloc = sas_slave_alloc, 163 .slave_alloc = sas_slave_alloc,
139 .target_destroy = sas_target_destroy, 164 .target_destroy = sas_target_destroy,
140 .ioctl = sas_ioctl, 165 .ioctl = sas_ioctl,
166 .shost_attrs = isci_host_attrs,
141}; 167};
142 168
143static struct sas_domain_function_template isci_transport_ops = { 169static struct sas_domain_function_template isci_transport_ops = {
@@ -232,17 +258,6 @@ static int isci_register_sas_ha(struct isci_host *isci_host)
232 return 0; 258 return 0;
233} 259}
234 260
235static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, char *buf)
236{
237 struct Scsi_Host *shost = container_of(dev, typeof(*shost), shost_dev);
238 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
239 struct isci_host *ihost = container_of(sas_ha, typeof(*ihost), sas_ha);
240
241 return snprintf(buf, PAGE_SIZE, "%d\n", ihost->id);
242}
243
244static DEVICE_ATTR(isci_id, S_IRUGO, isci_show_id, NULL);
245
246static void isci_unregister(struct isci_host *isci_host) 261static void isci_unregister(struct isci_host *isci_host)
247{ 262{
248 struct Scsi_Host *shost; 263 struct Scsi_Host *shost;
@@ -251,7 +266,6 @@ static void isci_unregister(struct isci_host *isci_host)
251 return; 266 return;
252 267
253 shost = isci_host->shost; 268 shost = isci_host->shost;
254 device_remove_file(&shost->shost_dev, &dev_attr_isci_id);
255 269
256 sas_unregister_ha(&isci_host->sas_ha); 270 sas_unregister_ha(&isci_host->sas_ha);
257 271
@@ -415,14 +429,8 @@ static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id)
415 if (err) 429 if (err)
416 goto err_shost_remove; 430 goto err_shost_remove;
417 431
418 err = device_create_file(&shost->shost_dev, &dev_attr_isci_id);
419 if (err)
420 goto err_unregister_ha;
421
422 return isci_host; 432 return isci_host;
423 433
424 err_unregister_ha:
425 sas_unregister_ha(&(isci_host->sas_ha));
426 err_shost_remove: 434 err_shost_remove:
427 scsi_remove_host(shost); 435 scsi_remove_host(shost);
428 err_shost: 436 err_shost:
@@ -540,7 +548,8 @@ static __init int isci_init(void)
540{ 548{
541 int err; 549 int err;
542 550
543 pr_info("%s: Intel(R) C600 SAS Controller Driver\n", DRV_NAME); 551 pr_info("%s: Intel(R) C600 SAS Controller Driver - version %s\n",
552 DRV_NAME, DRV_VERSION);
544 553
545 isci_transport_template = sas_domain_attach_transport(&isci_transport_ops); 554 isci_transport_template = sas_domain_attach_transport(&isci_transport_ops);
546 if (!isci_transport_template) 555 if (!isci_transport_template)
diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c
index 79313a7a2356..430fc8ff014a 100644
--- a/drivers/scsi/isci/phy.c
+++ b/drivers/scsi/isci/phy.c
@@ -104,6 +104,7 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
104 u32 parity_count = 0; 104 u32 parity_count = 0;
105 u32 llctl, link_rate; 105 u32 llctl, link_rate;
106 u32 clksm_value = 0; 106 u32 clksm_value = 0;
107 u32 sp_timeouts = 0;
107 108
108 iphy->link_layer_registers = reg; 109 iphy->link_layer_registers = reg;
109 110
@@ -211,6 +212,18 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
211 llctl |= SCU_SAS_LLCTL_GEN_VAL(MAX_LINK_RATE, link_rate); 212 llctl |= SCU_SAS_LLCTL_GEN_VAL(MAX_LINK_RATE, link_rate);
212 writel(llctl, &iphy->link_layer_registers->link_layer_control); 213 writel(llctl, &iphy->link_layer_registers->link_layer_control);
213 214
215 sp_timeouts = readl(&iphy->link_layer_registers->sas_phy_timeouts);
216
217 /* Clear the default 0x36 (54us) RATE_CHANGE timeout value. */
218 sp_timeouts &= ~SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0xFF);
219
220 /* Set RATE_CHANGE timeout value to 0x3B (59us). This ensures SCU can
221 * lock with 3Gb drive when SCU max rate is set to 1.5Gb.
222 */
223 sp_timeouts |= SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0x3B);
224
225 writel(sp_timeouts, &iphy->link_layer_registers->sas_phy_timeouts);
226
214 if (is_a2(ihost->pdev)) { 227 if (is_a2(ihost->pdev)) {
215 /* Program the max ARB time for the PHY to 700us so we inter-operate with 228 /* Program the max ARB time for the PHY to 700us so we inter-operate with
216 * the PMC expander which shuts down PHYs if the expander PHY generates too 229 * the PMC expander which shuts down PHYs if the expander PHY generates too
diff --git a/drivers/scsi/isci/registers.h b/drivers/scsi/isci/registers.h
index 9b266c7428e8..00afc738bbed 100644
--- a/drivers/scsi/isci/registers.h
+++ b/drivers/scsi/isci/registers.h
@@ -1299,6 +1299,18 @@ struct scu_transport_layer_registers {
1299#define SCU_AFE_XCVRCR_OFFSET 0x00DC 1299#define SCU_AFE_XCVRCR_OFFSET 0x00DC
1300#define SCU_AFE_LUTCR_OFFSET 0x00E0 1300#define SCU_AFE_LUTCR_OFFSET 0x00E0
1301 1301
1302#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_ALIGN_DETECTION_SHIFT (0UL)
1303#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_ALIGN_DETECTION_MASK (0x000000FFUL)
1304#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_HOT_PLUG_SHIFT (8UL)
1305#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_HOT_PLUG_MASK (0x0000FF00UL)
1306#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_COMSAS_DETECTION_SHIFT (16UL)
1307#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_COMSAS_DETECTION_MASK (0x00FF0000UL)
1308#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_RATE_CHANGE_SHIFT (24UL)
1309#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_RATE_CHANGE_MASK (0xFF000000UL)
1310
1311#define SCU_SAS_PHYTOV_GEN_VAL(name, value) \
1312 SCU_GEN_VALUE(SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_##name, value)
1313
1302#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_SHIFT (0) 1314#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_SHIFT (0)
1303#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_MASK (0x00000003) 1315#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_MASK (0x00000003)
1304#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN1 (0) 1316#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN1 (0)
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index a46e07ac789f..b5d3a8c4d329 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -732,12 +732,20 @@ sci_io_request_terminate(struct isci_request *ireq)
732 sci_change_state(&ireq->sm, SCI_REQ_ABORTING); 732 sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
733 return SCI_SUCCESS; 733 return SCI_SUCCESS;
734 case SCI_REQ_TASK_WAIT_TC_RESP: 734 case SCI_REQ_TASK_WAIT_TC_RESP:
735 /* The task frame was already confirmed to have been
736 * sent by the SCU HW. Since the state machine is
737 * now only waiting for the task response itself,
738 * abort the request and complete it immediately
739 * and don't wait for the task response.
740 */
735 sci_change_state(&ireq->sm, SCI_REQ_ABORTING); 741 sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
736 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 742 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
737 return SCI_SUCCESS; 743 return SCI_SUCCESS;
738 case SCI_REQ_ABORTING: 744 case SCI_REQ_ABORTING:
739 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 745 /* If a request has a termination requested twice, return
740 return SCI_SUCCESS; 746 * a failure indication, since HW confirmation of the first
747 * abort is still outstanding.
748 */
741 case SCI_REQ_COMPLETED: 749 case SCI_REQ_COMPLETED:
742 default: 750 default:
743 dev_warn(&ireq->owning_controller->pdev->dev, 751 dev_warn(&ireq->owning_controller->pdev->dev,
@@ -2399,22 +2407,19 @@ static void isci_task_save_for_upper_layer_completion(
2399 } 2407 }
2400} 2408}
2401 2409
2402static void isci_request_process_stp_response(struct sas_task *task, 2410static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_fis *fis)
2403 void *response_buffer)
2404{ 2411{
2405 struct dev_to_host_fis *d2h_reg_fis = response_buffer;
2406 struct task_status_struct *ts = &task->task_status; 2412 struct task_status_struct *ts = &task->task_status;
2407 struct ata_task_resp *resp = (void *)&ts->buf[0]; 2413 struct ata_task_resp *resp = (void *)&ts->buf[0];
2408 2414
2409 resp->frame_len = le16_to_cpu(*(__le16 *)(response_buffer + 6)); 2415 resp->frame_len = sizeof(*fis);
2410 memcpy(&resp->ending_fis[0], response_buffer + 16, 24); 2416 memcpy(resp->ending_fis, fis, sizeof(*fis));
2411 ts->buf_valid_size = sizeof(*resp); 2417 ts->buf_valid_size = sizeof(*resp);
2412 2418
2413 /** 2419 /* If the device fault bit is set in the status register, then
2414 * If the device fault bit is set in the status register, then
2415 * set the sense data and return. 2420 * set the sense data and return.
2416 */ 2421 */
2417 if (d2h_reg_fis->status & ATA_DF) 2422 if (fis->status & ATA_DF)
2418 ts->stat = SAS_PROTO_RESPONSE; 2423 ts->stat = SAS_PROTO_RESPONSE;
2419 else 2424 else
2420 ts->stat = SAM_STAT_GOOD; 2425 ts->stat = SAM_STAT_GOOD;
@@ -2428,7 +2433,6 @@ static void isci_request_io_request_complete(struct isci_host *ihost,
2428{ 2433{
2429 struct sas_task *task = isci_request_access_task(request); 2434 struct sas_task *task = isci_request_access_task(request);
2430 struct ssp_response_iu *resp_iu; 2435 struct ssp_response_iu *resp_iu;
2431 void *resp_buf;
2432 unsigned long task_flags; 2436 unsigned long task_flags;
2433 struct isci_remote_device *idev = isci_lookup_device(task->dev); 2437 struct isci_remote_device *idev = isci_lookup_device(task->dev);
2434 enum service_response response = SAS_TASK_UNDELIVERED; 2438 enum service_response response = SAS_TASK_UNDELIVERED;
@@ -2565,9 +2569,7 @@ static void isci_request_io_request_complete(struct isci_host *ihost,
2565 task); 2569 task);
2566 2570
2567 if (sas_protocol_ata(task->task_proto)) { 2571 if (sas_protocol_ata(task->task_proto)) {
2568 resp_buf = &request->stp.rsp; 2572 isci_process_stp_response(task, &request->stp.rsp);
2569 isci_request_process_stp_response(task,
2570 resp_buf);
2571 } else if (SAS_PROTOCOL_SSP == task->task_proto) { 2573 } else if (SAS_PROTOCOL_SSP == task->task_proto) {
2572 2574
2573 /* crack the iu response buffer. */ 2575 /* crack the iu response buffer. */
diff --git a/drivers/scsi/isci/unsolicited_frame_control.c b/drivers/scsi/isci/unsolicited_frame_control.c
index e9e1e2abacb9..16f88ab939c8 100644
--- a/drivers/scsi/isci/unsolicited_frame_control.c
+++ b/drivers/scsi/isci/unsolicited_frame_control.c
@@ -72,7 +72,7 @@ int sci_unsolicited_frame_control_construct(struct isci_host *ihost)
72 */ 72 */
73 buf_len = SCU_MAX_UNSOLICITED_FRAMES * SCU_UNSOLICITED_FRAME_BUFFER_SIZE; 73 buf_len = SCU_MAX_UNSOLICITED_FRAMES * SCU_UNSOLICITED_FRAME_BUFFER_SIZE;
74 header_len = SCU_MAX_UNSOLICITED_FRAMES * sizeof(struct scu_unsolicited_frame_header); 74 header_len = SCU_MAX_UNSOLICITED_FRAMES * sizeof(struct scu_unsolicited_frame_header);
75 size = buf_len + header_len + SCU_MAX_UNSOLICITED_FRAMES * sizeof(dma_addr_t); 75 size = buf_len + header_len + SCU_MAX_UNSOLICITED_FRAMES * sizeof(uf_control->address_table.array[0]);
76 76
77 /* 77 /*
78 * The Unsolicited Frame buffers are set at the start of the UF 78 * The Unsolicited Frame buffers are set at the start of the UF
diff --git a/drivers/scsi/isci/unsolicited_frame_control.h b/drivers/scsi/isci/unsolicited_frame_control.h
index 31cb9506f52d..75d896686f5a 100644
--- a/drivers/scsi/isci/unsolicited_frame_control.h
+++ b/drivers/scsi/isci/unsolicited_frame_control.h
@@ -214,7 +214,7 @@ struct sci_uf_address_table_array {
214 * starting address of the UF address table. 214 * starting address of the UF address table.
215 * 64-bit pointers are required by the hardware. 215 * 64-bit pointers are required by the hardware.
216 */ 216 */
217 dma_addr_t *array; 217 u64 *array;
218 218
219 /** 219 /**
220 * This field specifies the physical address location for the UF 220 * This field specifies the physical address location for the UF
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 01ff082dc34c..d261e982a2fa 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -494,6 +494,9 @@ static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp,
494 */ 494 */
495 error = lport->tt.frame_send(lport, fp); 495 error = lport->tt.frame_send(lport, fp);
496 496
497 if (fh->fh_type == FC_TYPE_BLS)
498 return error;
499
497 /* 500 /*
498 * Update the exchange and sequence flags, 501 * Update the exchange and sequence flags,
499 * assuming all frames for the sequence have been sent. 502 * assuming all frames for the sequence have been sent.
@@ -575,42 +578,35 @@ static void fc_seq_set_resp(struct fc_seq *sp,
575} 578}
576 579
577/** 580/**
578 * fc_seq_exch_abort() - Abort an exchange and sequence 581 * fc_exch_abort_locked() - Abort an exchange
579 * @req_sp: The sequence to be aborted 582 * @ep: The exchange to be aborted
580 * @timer_msec: The period of time to wait before aborting 583 * @timer_msec: The period of time to wait before aborting
581 * 584 *
582 * Generally called because of a timeout or an abort from the upper layer. 585 * Locking notes: Called with exch lock held
586 *
587 * Return value: 0 on success else error code
583 */ 588 */
584static int fc_seq_exch_abort(const struct fc_seq *req_sp, 589static int fc_exch_abort_locked(struct fc_exch *ep,
585 unsigned int timer_msec) 590 unsigned int timer_msec)
586{ 591{
587 struct fc_seq *sp; 592 struct fc_seq *sp;
588 struct fc_exch *ep;
589 struct fc_frame *fp; 593 struct fc_frame *fp;
590 int error; 594 int error;
591 595
592 ep = fc_seq_exch(req_sp);
593
594 spin_lock_bh(&ep->ex_lock);
595 if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL) || 596 if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL) ||
596 ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP)) { 597 ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP))
597 spin_unlock_bh(&ep->ex_lock);
598 return -ENXIO; 598 return -ENXIO;
599 }
600 599
601 /* 600 /*
602 * Send the abort on a new sequence if possible. 601 * Send the abort on a new sequence if possible.
603 */ 602 */
604 sp = fc_seq_start_next_locked(&ep->seq); 603 sp = fc_seq_start_next_locked(&ep->seq);
605 if (!sp) { 604 if (!sp)
606 spin_unlock_bh(&ep->ex_lock);
607 return -ENOMEM; 605 return -ENOMEM;
608 }
609 606
610 ep->esb_stat |= ESB_ST_SEQ_INIT | ESB_ST_ABNORMAL; 607 ep->esb_stat |= ESB_ST_SEQ_INIT | ESB_ST_ABNORMAL;
611 if (timer_msec) 608 if (timer_msec)
612 fc_exch_timer_set_locked(ep, timer_msec); 609 fc_exch_timer_set_locked(ep, timer_msec);
613 spin_unlock_bh(&ep->ex_lock);
614 610
615 /* 611 /*
616 * If not logged into the fabric, don't send ABTS but leave 612 * If not logged into the fabric, don't send ABTS but leave
@@ -633,6 +629,28 @@ static int fc_seq_exch_abort(const struct fc_seq *req_sp,
633} 629}
634 630
635/** 631/**
632 * fc_seq_exch_abort() - Abort an exchange and sequence
633 * @req_sp: The sequence to be aborted
634 * @timer_msec: The period of time to wait before aborting
635 *
636 * Generally called because of a timeout or an abort from the upper layer.
637 *
638 * Return value: 0 on success else error code
639 */
640static int fc_seq_exch_abort(const struct fc_seq *req_sp,
641 unsigned int timer_msec)
642{
643 struct fc_exch *ep;
644 int error;
645
646 ep = fc_seq_exch(req_sp);
647 spin_lock_bh(&ep->ex_lock);
648 error = fc_exch_abort_locked(ep, timer_msec);
649 spin_unlock_bh(&ep->ex_lock);
650 return error;
651}
652
653/**
636 * fc_exch_timeout() - Handle exchange timer expiration 654 * fc_exch_timeout() - Handle exchange timer expiration
637 * @work: The work_struct identifying the exchange that timed out 655 * @work: The work_struct identifying the exchange that timed out
638 */ 656 */
@@ -1715,6 +1733,7 @@ static void fc_exch_reset(struct fc_exch *ep)
1715 int rc = 1; 1733 int rc = 1;
1716 1734
1717 spin_lock_bh(&ep->ex_lock); 1735 spin_lock_bh(&ep->ex_lock);
1736 fc_exch_abort_locked(ep, 0);
1718 ep->state |= FC_EX_RST_CLEANUP; 1737 ep->state |= FC_EX_RST_CLEANUP;
1719 if (cancel_delayed_work(&ep->timeout_work)) 1738 if (cancel_delayed_work(&ep->timeout_work))
1720 atomic_dec(&ep->ex_refcnt); /* drop hold for timer */ 1739 atomic_dec(&ep->ex_refcnt); /* drop hold for timer */
@@ -1962,6 +1981,7 @@ static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
1962 struct fc_exch *ep; 1981 struct fc_exch *ep;
1963 struct fc_seq *sp = NULL; 1982 struct fc_seq *sp = NULL;
1964 struct fc_frame_header *fh; 1983 struct fc_frame_header *fh;
1984 struct fc_fcp_pkt *fsp = NULL;
1965 int rc = 1; 1985 int rc = 1;
1966 1986
1967 ep = fc_exch_alloc(lport, fp); 1987 ep = fc_exch_alloc(lport, fp);
@@ -1984,8 +2004,10 @@ static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
1984 fc_exch_setup_hdr(ep, fp, ep->f_ctl); 2004 fc_exch_setup_hdr(ep, fp, ep->f_ctl);
1985 sp->cnt++; 2005 sp->cnt++;
1986 2006
1987 if (ep->xid <= lport->lro_xid && fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD) 2007 if (ep->xid <= lport->lro_xid && fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD) {
2008 fsp = fr_fsp(fp);
1988 fc_fcp_ddp_setup(fr_fsp(fp), ep->xid); 2009 fc_fcp_ddp_setup(fr_fsp(fp), ep->xid);
2010 }
1989 2011
1990 if (unlikely(lport->tt.frame_send(lport, fp))) 2012 if (unlikely(lport->tt.frame_send(lport, fp)))
1991 goto err; 2013 goto err;
@@ -1999,7 +2021,8 @@ static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
1999 spin_unlock_bh(&ep->ex_lock); 2021 spin_unlock_bh(&ep->ex_lock);
2000 return sp; 2022 return sp;
2001err: 2023err:
2002 fc_fcp_ddp_done(fr_fsp(fp)); 2024 if (fsp)
2025 fc_fcp_ddp_done(fsp);
2003 rc = fc_exch_done_locked(ep); 2026 rc = fc_exch_done_locked(ep);
2004 spin_unlock_bh(&ep->ex_lock); 2027 spin_unlock_bh(&ep->ex_lock);
2005 if (!rc) 2028 if (!rc)
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index afb63c843144..4c41ee816f0b 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -2019,6 +2019,11 @@ int fc_eh_abort(struct scsi_cmnd *sc_cmd)
2019 struct fc_fcp_internal *si; 2019 struct fc_fcp_internal *si;
2020 int rc = FAILED; 2020 int rc = FAILED;
2021 unsigned long flags; 2021 unsigned long flags;
2022 int rval;
2023
2024 rval = fc_block_scsi_eh(sc_cmd);
2025 if (rval)
2026 return rval;
2022 2027
2023 lport = shost_priv(sc_cmd->device->host); 2028 lport = shost_priv(sc_cmd->device->host);
2024 if (lport->state != LPORT_ST_READY) 2029 if (lport->state != LPORT_ST_READY)
@@ -2068,9 +2073,9 @@ int fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
2068 int rc = FAILED; 2073 int rc = FAILED;
2069 int rval; 2074 int rval;
2070 2075
2071 rval = fc_remote_port_chkready(rport); 2076 rval = fc_block_scsi_eh(sc_cmd);
2072 if (rval) 2077 if (rval)
2073 goto out; 2078 return rval;
2074 2079
2075 lport = shost_priv(sc_cmd->device->host); 2080 lport = shost_priv(sc_cmd->device->host);
2076 2081
@@ -2116,6 +2121,8 @@ int fc_eh_host_reset(struct scsi_cmnd *sc_cmd)
2116 2121
2117 FC_SCSI_DBG(lport, "Resetting host\n"); 2122 FC_SCSI_DBG(lport, "Resetting host\n");
2118 2123
2124 fc_block_scsi_eh(sc_cmd);
2125
2119 lport->tt.lport_reset(lport); 2126 lport->tt.lport_reset(lport);
2120 wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT; 2127 wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT;
2121 while (!fc_fcp_lport_queue_ready(lport) && time_before(jiffies, 2128 while (!fc_fcp_lport_queue_ready(lport) && time_before(jiffies,
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index e55ed9cf23fb..628f347404f9 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -88,6 +88,7 @@
88 */ 88 */
89 89
90#include <linux/timer.h> 90#include <linux/timer.h>
91#include <linux/delay.h>
91#include <linux/slab.h> 92#include <linux/slab.h>
92#include <asm/unaligned.h> 93#include <asm/unaligned.h>
93 94
@@ -1029,8 +1030,16 @@ static void fc_lport_enter_reset(struct fc_lport *lport)
1029 FCH_EVT_LIPRESET, 0); 1030 FCH_EVT_LIPRESET, 0);
1030 fc_vports_linkchange(lport); 1031 fc_vports_linkchange(lport);
1031 fc_lport_reset_locked(lport); 1032 fc_lport_reset_locked(lport);
1032 if (lport->link_up) 1033 if (lport->link_up) {
1034 /*
1035 * Wait upto resource allocation time out before
1036 * doing re-login since incomplete FIP exchanged
1037 * from last session may collide with exchanges
1038 * in new session.
1039 */
1040 msleep(lport->r_a_tov);
1033 fc_lport_enter_flogi(lport); 1041 fc_lport_enter_flogi(lport);
1042 }
1034} 1043}
1035 1044
1036/** 1045/**
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 7836eb01c7fc..a31e05f3bfd4 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1786,13 +1786,16 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1786 fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN); 1786 fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
1787 } 1787 }
1788 1788
1789 if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) { 1789 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
1790 if (ha->fw_attributes & BIT_4) { 1790 if (ha->fw_attributes & BIT_4) {
1791 int prot = 0;
1791 vha->flags.difdix_supported = 1; 1792 vha->flags.difdix_supported = 1;
1792 ql_dbg(ql_dbg_user, vha, 0x7082, 1793 ql_dbg(ql_dbg_user, vha, 0x7082,
1793 "Registered for DIF/DIX type 1 and 3 protection.\n"); 1794 "Registered for DIF/DIX type 1 and 3 protection.\n");
1795 if (ql2xenabledif == 1)
1796 prot = SHOST_DIX_TYPE0_PROTECTION;
1794 scsi_host_set_prot(vha->host, 1797 scsi_host_set_prot(vha->host,
1795 SHOST_DIF_TYPE1_PROTECTION 1798 prot | SHOST_DIF_TYPE1_PROTECTION
1796 | SHOST_DIF_TYPE2_PROTECTION 1799 | SHOST_DIF_TYPE2_PROTECTION
1797 | SHOST_DIF_TYPE3_PROTECTION 1800 | SHOST_DIF_TYPE3_PROTECTION
1798 | SHOST_DIX_TYPE1_PROTECTION 1801 | SHOST_DIX_TYPE1_PROTECTION
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 2155071f3100..d79cd8a5f831 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -8,24 +8,24 @@
8/* 8/*
9 * Table for showing the current message id in use for particular level 9 * Table for showing the current message id in use for particular level
10 * Change this table for addition of log/debug messages. 10 * Change this table for addition of log/debug messages.
11 * ----------------------------------------------------- 11 * ----------------------------------------------------------------------
12 * | Level | Last Value Used | 12 * | Level | Last Value Used | Holes |
13 * ----------------------------------------------------- 13 * ----------------------------------------------------------------------
14 * | Module Init and Probe | 0x0116 | 14 * | Module Init and Probe | 0x0116 | |
15 * | Mailbox commands | 0x111e | 15 * | Mailbox commands | 0x1126 | |
16 * | Device Discovery | 0x2083 | 16 * | Device Discovery | 0x2083 | |
17 * | Queue Command and IO tracing | 0x302e | 17 * | Queue Command and IO tracing | 0x302e | 0x3008 |
18 * | DPC Thread | 0x401c | 18 * | DPC Thread | 0x401c | |
19 * | Async Events | 0x5059 | 19 * | Async Events | 0x5059 | |
20 * | Timer Routines | 0x600d | 20 * | Timer Routines | 0x600d | |
21 * | User Space Interactions | 0x709c | 21 * | User Space Interactions | 0x709d | |
22 * | Task Management | 0x8043 | 22 * | Task Management | 0x8041 | |
23 * | AER/EEH | 0x900f | 23 * | AER/EEH | 0x900f | |
24 * | Virtual Port | 0xa007 | 24 * | Virtual Port | 0xa007 | |
25 * | ISP82XX Specific | 0xb027 | 25 * | ISP82XX Specific | 0xb04f | |
26 * | MultiQ | 0xc00b | 26 * | MultiQ | 0xc00b | |
27 * | Misc | 0xd00b | 27 * | Misc | 0xd00b | |
28 * ----------------------------------------------------- 28 * ----------------------------------------------------------------------
29 */ 29 */
30 30
31#include "qla_def.h" 31#include "qla_def.h"
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index cc5a79259d33..a03eaf40f377 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -2529,6 +2529,7 @@ struct qla_hw_data {
2529#define DT_ISP8021 BIT_14 2529#define DT_ISP8021 BIT_14
2530#define DT_ISP_LAST (DT_ISP8021 << 1) 2530#define DT_ISP_LAST (DT_ISP8021 << 1)
2531 2531
2532#define DT_T10_PI BIT_25
2532#define DT_IIDMA BIT_26 2533#define DT_IIDMA BIT_26
2533#define DT_FWI2 BIT_27 2534#define DT_FWI2 BIT_27
2534#define DT_ZIO_SUPPORTED BIT_28 2535#define DT_ZIO_SUPPORTED BIT_28
@@ -2572,6 +2573,7 @@ struct qla_hw_data {
2572#define IS_NOCACHE_VPD_TYPE(ha) (IS_QLA81XX(ha)) 2573#define IS_NOCACHE_VPD_TYPE(ha) (IS_QLA81XX(ha))
2573#define IS_ALOGIO_CAPABLE(ha) (IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha)) 2574#define IS_ALOGIO_CAPABLE(ha) (IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha))
2574 2575
2576#define IS_T10_PI_CAPABLE(ha) ((ha)->device_type & DT_T10_PI)
2575#define IS_IIDMA_CAPABLE(ha) ((ha)->device_type & DT_IIDMA) 2577#define IS_IIDMA_CAPABLE(ha) ((ha)->device_type & DT_IIDMA)
2576#define IS_FWI2_CAPABLE(ha) ((ha)->device_type & DT_FWI2) 2578#define IS_FWI2_CAPABLE(ha) ((ha)->device_type & DT_FWI2)
2577#define IS_ZIO_SUPPORTED(ha) ((ha)->device_type & DT_ZIO_SUPPORTED) 2579#define IS_ZIO_SUPPORTED(ha) ((ha)->device_type & DT_ZIO_SUPPORTED)
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 691783abfb69..aa69486dc064 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -537,6 +537,11 @@ struct sts_entry_24xx {
537 /* 537 /*
538 * If DIF Error is set in comp_status, these additional fields are 538 * If DIF Error is set in comp_status, these additional fields are
539 * defined: 539 * defined:
540 *
541 * !!! NOTE: Firmware sends expected/actual DIF data in big endian
542 * format; but all of the "data" field gets swab32-d in the beginning
543 * of qla2x00_status_entry().
544 *
540 * &data[10] : uint8_t report_runt_bg[2]; - computed guard 545 * &data[10] : uint8_t report_runt_bg[2]; - computed guard
541 * &data[12] : uint8_t actual_dif[8]; - DIF Data received 546 * &data[12] : uint8_t actual_dif[8]; - DIF Data received
542 * &data[20] : uint8_t expected_dif[8]; - DIF Data computed 547 * &data[20] : uint8_t expected_dif[8]; - DIF Data computed
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index def694271bf7..37da04d3db26 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -3838,15 +3838,12 @@ qla2x00_loop_resync(scsi_qla_host_t *vha)
3838 req = vha->req; 3838 req = vha->req;
3839 rsp = req->rsp; 3839 rsp = req->rsp;
3840 3840
3841 atomic_set(&vha->loop_state, LOOP_UPDATE);
3842 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 3841 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
3843 if (vha->flags.online) { 3842 if (vha->flags.online) {
3844 if (!(rval = qla2x00_fw_ready(vha))) { 3843 if (!(rval = qla2x00_fw_ready(vha))) {
3845 /* Wait at most MAX_TARGET RSCNs for a stable link. */ 3844 /* Wait at most MAX_TARGET RSCNs for a stable link. */
3846 wait_time = 256; 3845 wait_time = 256;
3847 do { 3846 do {
3848 atomic_set(&vha->loop_state, LOOP_UPDATE);
3849
3850 /* Issue a marker after FW becomes ready. */ 3847 /* Issue a marker after FW becomes ready. */
3851 qla2x00_marker(vha, req, rsp, 0, 0, 3848 qla2x00_marker(vha, req, rsp, 0, 0,
3852 MK_SYNC_ALL); 3849 MK_SYNC_ALL);
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index d2e904bc21c0..9902834e0b74 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -102,3 +102,32 @@ qla2x00_set_fcport_state(fc_port_t *fcport, int state)
102 fcport->d_id.b.al_pa); 102 fcport->d_id.b.al_pa);
103 } 103 }
104} 104}
105
106static inline int
107qla2x00_hba_err_chk_enabled(srb_t *sp)
108{
109 /*
110 * Uncomment when corresponding SCSI changes are done.
111 *
112 if (!sp->cmd->prot_chk)
113 return 0;
114 *
115 */
116
117 switch (scsi_get_prot_op(sp->cmd)) {
118 case SCSI_PROT_READ_STRIP:
119 case SCSI_PROT_WRITE_INSERT:
120 if (ql2xenablehba_err_chk >= 1)
121 return 1;
122 break;
123 case SCSI_PROT_READ_PASS:
124 case SCSI_PROT_WRITE_PASS:
125 if (ql2xenablehba_err_chk >= 2)
126 return 1;
127 break;
128 case SCSI_PROT_READ_INSERT:
129 case SCSI_PROT_WRITE_STRIP:
130 return 1;
131 }
132 return 0;
133}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 49d6906af886..dbec89622a0f 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -709,20 +709,28 @@ struct fw_dif_context {
709 * 709 *
710 */ 710 */
711static inline void 711static inline void
712qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt, 712qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
713 unsigned int protcnt) 713 unsigned int protcnt)
714{ 714{
715 struct sd_dif_tuple *spt; 715 struct scsi_cmnd *cmd = sp->cmd;
716 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 716 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
717 unsigned char op = scsi_get_prot_op(cmd);
718 717
719 switch (scsi_get_prot_type(cmd)) { 718 switch (scsi_get_prot_type(cmd)) {
720 /* For TYPE 0 protection: no checking */
721 case SCSI_PROT_DIF_TYPE0: 719 case SCSI_PROT_DIF_TYPE0:
722 pkt->ref_tag_mask[0] = 0x00; 720 /*
723 pkt->ref_tag_mask[1] = 0x00; 721 * No check for ql2xenablehba_err_chk, as it would be an
724 pkt->ref_tag_mask[2] = 0x00; 722 * I/O error if hba tag generation is not done.
725 pkt->ref_tag_mask[3] = 0x00; 723 */
724 pkt->ref_tag = cpu_to_le32((uint32_t)
725 (0xffffffff & scsi_get_lba(cmd)));
726
727 if (!qla2x00_hba_err_chk_enabled(sp))
728 break;
729
730 pkt->ref_tag_mask[0] = 0xff;
731 pkt->ref_tag_mask[1] = 0xff;
732 pkt->ref_tag_mask[2] = 0xff;
733 pkt->ref_tag_mask[3] = 0xff;
726 break; 734 break;
727 735
728 /* 736 /*
@@ -730,20 +738,16 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,
730 * match LBA in CDB + N 738 * match LBA in CDB + N
731 */ 739 */
732 case SCSI_PROT_DIF_TYPE2: 740 case SCSI_PROT_DIF_TYPE2:
733 if (!ql2xenablehba_err_chk) 741 pkt->app_tag = __constant_cpu_to_le16(0);
734 break; 742 pkt->app_tag_mask[0] = 0x0;
735 743 pkt->app_tag_mask[1] = 0x0;
736 if (scsi_prot_sg_count(cmd)) {
737 spt = page_address(sg_page(scsi_prot_sglist(cmd))) +
738 scsi_prot_sglist(cmd)[0].offset;
739 pkt->app_tag = swab32(spt->app_tag);
740 pkt->app_tag_mask[0] = 0xff;
741 pkt->app_tag_mask[1] = 0xff;
742 }
743 744
744 pkt->ref_tag = cpu_to_le32((uint32_t) 745 pkt->ref_tag = cpu_to_le32((uint32_t)
745 (0xffffffff & scsi_get_lba(cmd))); 746 (0xffffffff & scsi_get_lba(cmd)));
746 747
748 if (!qla2x00_hba_err_chk_enabled(sp))
749 break;
750
747 /* enable ALL bytes of the ref tag */ 751 /* enable ALL bytes of the ref tag */
748 pkt->ref_tag_mask[0] = 0xff; 752 pkt->ref_tag_mask[0] = 0xff;
749 pkt->ref_tag_mask[1] = 0xff; 753 pkt->ref_tag_mask[1] = 0xff;
@@ -763,26 +767,15 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,
763 * 16 bit app tag. 767 * 16 bit app tag.
764 */ 768 */
765 case SCSI_PROT_DIF_TYPE1: 769 case SCSI_PROT_DIF_TYPE1:
766 if (!ql2xenablehba_err_chk) 770 pkt->ref_tag = cpu_to_le32((uint32_t)
771 (0xffffffff & scsi_get_lba(cmd)));
772 pkt->app_tag = __constant_cpu_to_le16(0);
773 pkt->app_tag_mask[0] = 0x0;
774 pkt->app_tag_mask[1] = 0x0;
775
776 if (!qla2x00_hba_err_chk_enabled(sp))
767 break; 777 break;
768 778
769 if (protcnt && (op == SCSI_PROT_WRITE_STRIP ||
770 op == SCSI_PROT_WRITE_PASS)) {
771 spt = page_address(sg_page(scsi_prot_sglist(cmd))) +
772 scsi_prot_sglist(cmd)[0].offset;
773 ql_dbg(ql_dbg_io, vha, 0x3008,
774 "LBA from user %p, lba = 0x%x for cmd=%p.\n",
775 spt, (int)spt->ref_tag, cmd);
776 pkt->ref_tag = swab32(spt->ref_tag);
777 pkt->app_tag_mask[0] = 0x0;
778 pkt->app_tag_mask[1] = 0x0;
779 } else {
780 pkt->ref_tag = cpu_to_le32((uint32_t)
781 (0xffffffff & scsi_get_lba(cmd)));
782 pkt->app_tag = __constant_cpu_to_le16(0);
783 pkt->app_tag_mask[0] = 0x0;
784 pkt->app_tag_mask[1] = 0x0;
785 }
786 /* enable ALL bytes of the ref tag */ 779 /* enable ALL bytes of the ref tag */
787 pkt->ref_tag_mask[0] = 0xff; 780 pkt->ref_tag_mask[0] = 0xff;
788 pkt->ref_tag_mask[1] = 0xff; 781 pkt->ref_tag_mask[1] = 0xff;
@@ -798,8 +791,162 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,
798 scsi_get_prot_type(cmd), cmd); 791 scsi_get_prot_type(cmd), cmd);
799} 792}
800 793
794struct qla2_sgx {
795 dma_addr_t dma_addr; /* OUT */
796 uint32_t dma_len; /* OUT */
797
798 uint32_t tot_bytes; /* IN */
799 struct scatterlist *cur_sg; /* IN */
800
801 /* for book keeping, bzero on initial invocation */
802 uint32_t bytes_consumed;
803 uint32_t num_bytes;
804 uint32_t tot_partial;
805
806 /* for debugging */
807 uint32_t num_sg;
808 srb_t *sp;
809};
801 810
802static int 811static int
812qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
813 uint32_t *partial)
814{
815 struct scatterlist *sg;
816 uint32_t cumulative_partial, sg_len;
817 dma_addr_t sg_dma_addr;
818
819 if (sgx->num_bytes == sgx->tot_bytes)
820 return 0;
821
822 sg = sgx->cur_sg;
823 cumulative_partial = sgx->tot_partial;
824
825 sg_dma_addr = sg_dma_address(sg);
826 sg_len = sg_dma_len(sg);
827
828 sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
829
830 if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
831 sgx->dma_len = (blk_sz - cumulative_partial);
832 sgx->tot_partial = 0;
833 sgx->num_bytes += blk_sz;
834 *partial = 0;
835 } else {
836 sgx->dma_len = sg_len - sgx->bytes_consumed;
837 sgx->tot_partial += sgx->dma_len;
838 *partial = 1;
839 }
840
841 sgx->bytes_consumed += sgx->dma_len;
842
843 if (sg_len == sgx->bytes_consumed) {
844 sg = sg_next(sg);
845 sgx->num_sg++;
846 sgx->cur_sg = sg;
847 sgx->bytes_consumed = 0;
848 }
849
850 return 1;
851}
852
853static int
854qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
855 uint32_t *dsd, uint16_t tot_dsds)
856{
857 void *next_dsd;
858 uint8_t avail_dsds = 0;
859 uint32_t dsd_list_len;
860 struct dsd_dma *dsd_ptr;
861 struct scatterlist *sg_prot;
862 uint32_t *cur_dsd = dsd;
863 uint16_t used_dsds = tot_dsds;
864
865 uint32_t prot_int;
866 uint32_t partial;
867 struct qla2_sgx sgx;
868 dma_addr_t sle_dma;
869 uint32_t sle_dma_len, tot_prot_dma_len = 0;
870 struct scsi_cmnd *cmd = sp->cmd;
871
872 prot_int = cmd->device->sector_size;
873
874 memset(&sgx, 0, sizeof(struct qla2_sgx));
875 sgx.tot_bytes = scsi_bufflen(sp->cmd);
876 sgx.cur_sg = scsi_sglist(sp->cmd);
877 sgx.sp = sp;
878
879 sg_prot = scsi_prot_sglist(sp->cmd);
880
881 while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
882
883 sle_dma = sgx.dma_addr;
884 sle_dma_len = sgx.dma_len;
885alloc_and_fill:
886 /* Allocate additional continuation packets? */
887 if (avail_dsds == 0) {
888 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
889 QLA_DSDS_PER_IOCB : used_dsds;
890 dsd_list_len = (avail_dsds + 1) * 12;
891 used_dsds -= avail_dsds;
892
893 /* allocate tracking DS */
894 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
895 if (!dsd_ptr)
896 return 1;
897
898 /* allocate new list */
899 dsd_ptr->dsd_addr = next_dsd =
900 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
901 &dsd_ptr->dsd_list_dma);
902
903 if (!next_dsd) {
904 /*
905 * Need to cleanup only this dsd_ptr, rest
906 * will be done by sp_free_dma()
907 */
908 kfree(dsd_ptr);
909 return 1;
910 }
911
912 list_add_tail(&dsd_ptr->list,
913 &((struct crc_context *)sp->ctx)->dsd_list);
914
915 sp->flags |= SRB_CRC_CTX_DSD_VALID;
916
917 /* add new list to cmd iocb or last list */
918 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
919 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
920 *cur_dsd++ = dsd_list_len;
921 cur_dsd = (uint32_t *)next_dsd;
922 }
923 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
924 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
925 *cur_dsd++ = cpu_to_le32(sle_dma_len);
926 avail_dsds--;
927
928 if (partial == 0) {
929 /* Got a full protection interval */
930 sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
931 sle_dma_len = 8;
932
933 tot_prot_dma_len += sle_dma_len;
934 if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
935 tot_prot_dma_len = 0;
936 sg_prot = sg_next(sg_prot);
937 }
938
939 partial = 1; /* So as to not re-enter this block */
940 goto alloc_and_fill;
941 }
942 }
943 /* Null termination */
944 *cur_dsd++ = 0;
945 *cur_dsd++ = 0;
946 *cur_dsd++ = 0;
947 return 0;
948}
949static int
803qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, 950qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
804 uint16_t tot_dsds) 951 uint16_t tot_dsds)
805{ 952{
@@ -981,7 +1128,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
981 struct scsi_cmnd *cmd; 1128 struct scsi_cmnd *cmd;
982 struct scatterlist *cur_seg; 1129 struct scatterlist *cur_seg;
983 int sgc; 1130 int sgc;
984 uint32_t total_bytes; 1131 uint32_t total_bytes = 0;
985 uint32_t data_bytes; 1132 uint32_t data_bytes;
986 uint32_t dif_bytes; 1133 uint32_t dif_bytes;
987 uint8_t bundling = 1; 1134 uint8_t bundling = 1;
@@ -1023,8 +1170,10 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1023 __constant_cpu_to_le16(CF_READ_DATA); 1170 __constant_cpu_to_le16(CF_READ_DATA);
1024 } 1171 }
1025 1172
1026 tot_prot_dsds = scsi_prot_sg_count(cmd); 1173 if ((scsi_get_prot_op(sp->cmd) == SCSI_PROT_READ_INSERT) ||
1027 if (!tot_prot_dsds) 1174 (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_STRIP) ||
1175 (scsi_get_prot_op(sp->cmd) == SCSI_PROT_READ_STRIP) ||
1176 (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_INSERT))
1028 bundling = 0; 1177 bundling = 0;
1029 1178
1030 /* Allocate CRC context from global pool */ 1179 /* Allocate CRC context from global pool */
@@ -1047,7 +1196,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1047 1196
1048 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list); 1197 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1049 1198
1050 qla24xx_set_t10dif_tags(cmd, (struct fw_dif_context *) 1199 qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
1051 &crc_ctx_pkt->ref_tag, tot_prot_dsds); 1200 &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1052 1201
1053 cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma)); 1202 cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
@@ -1076,7 +1225,6 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1076 fcp_cmnd->additional_cdb_len |= 2; 1225 fcp_cmnd->additional_cdb_len |= 2;
1077 1226
1078 int_to_scsilun(sp->cmd->device->lun, &fcp_cmnd->lun); 1227 int_to_scsilun(sp->cmd->device->lun, &fcp_cmnd->lun);
1079 host_to_fcp_swap((uint8_t *)&fcp_cmnd->lun, sizeof(fcp_cmnd->lun));
1080 memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); 1228 memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1081 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len); 1229 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1082 cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32( 1230 cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
@@ -1107,15 +1255,28 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1107 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */ 1255 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1108 1256
1109 /* Compute dif len and adjust data len to incude protection */ 1257 /* Compute dif len and adjust data len to incude protection */
1110 total_bytes = data_bytes;
1111 dif_bytes = 0; 1258 dif_bytes = 0;
1112 blk_size = cmd->device->sector_size; 1259 blk_size = cmd->device->sector_size;
1113 if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) { 1260 dif_bytes = (data_bytes / blk_size) * 8;
1114 dif_bytes = (data_bytes / blk_size) * 8; 1261
1115 total_bytes += dif_bytes; 1262 switch (scsi_get_prot_op(sp->cmd)) {
1263 case SCSI_PROT_READ_INSERT:
1264 case SCSI_PROT_WRITE_STRIP:
1265 total_bytes = data_bytes;
1266 data_bytes += dif_bytes;
1267 break;
1268
1269 case SCSI_PROT_READ_STRIP:
1270 case SCSI_PROT_WRITE_INSERT:
1271 case SCSI_PROT_READ_PASS:
1272 case SCSI_PROT_WRITE_PASS:
1273 total_bytes = data_bytes + dif_bytes;
1274 break;
1275 default:
1276 BUG();
1116 } 1277 }
1117 1278
1118 if (!ql2xenablehba_err_chk) 1279 if (!qla2x00_hba_err_chk_enabled(sp))
1119 fw_prot_opts |= 0x10; /* Disable Guard tag checking */ 1280 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1120 1281
1121 if (!bundling) { 1282 if (!bundling) {
@@ -1151,7 +1312,12 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1151 1312
1152 cmd_pkt->control_flags |= 1313 cmd_pkt->control_flags |=
1153 __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE); 1314 __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1154 if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd, 1315
1316 if (!bundling && tot_prot_dsds) {
1317 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1318 cur_dsd, tot_dsds))
1319 goto crc_queuing_error;
1320 } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1155 (tot_dsds - tot_prot_dsds))) 1321 (tot_dsds - tot_prot_dsds)))
1156 goto crc_queuing_error; 1322 goto crc_queuing_error;
1157 1323
@@ -1414,6 +1580,22 @@ qla24xx_dif_start_scsi(srb_t *sp)
1414 goto queuing_error; 1580 goto queuing_error;
1415 else 1581 else
1416 sp->flags |= SRB_DMA_VALID; 1582 sp->flags |= SRB_DMA_VALID;
1583
1584 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1585 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1586 struct qla2_sgx sgx;
1587 uint32_t partial;
1588
1589 memset(&sgx, 0, sizeof(struct qla2_sgx));
1590 sgx.tot_bytes = scsi_bufflen(cmd);
1591 sgx.cur_sg = scsi_sglist(cmd);
1592 sgx.sp = sp;
1593
1594 nseg = 0;
1595 while (qla24xx_get_one_block_sg(
1596 cmd->device->sector_size, &sgx, &partial))
1597 nseg++;
1598 }
1417 } else 1599 } else
1418 nseg = 0; 1600 nseg = 0;
1419 1601
@@ -1428,6 +1610,11 @@ qla24xx_dif_start_scsi(srb_t *sp)
1428 goto queuing_error; 1610 goto queuing_error;
1429 else 1611 else
1430 sp->flags |= SRB_CRC_PROT_DMA_VALID; 1612 sp->flags |= SRB_CRC_PROT_DMA_VALID;
1613
1614 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1615 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1616 nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1617 }
1431 } else { 1618 } else {
1432 nseg = 0; 1619 nseg = 0;
1433 } 1620 }
@@ -1454,6 +1641,7 @@ qla24xx_dif_start_scsi(srb_t *sp)
1454 /* Build header part of command packet (excluding the OPCODE). */ 1641 /* Build header part of command packet (excluding the OPCODE). */
1455 req->current_outstanding_cmd = handle; 1642 req->current_outstanding_cmd = handle;
1456 req->outstanding_cmds[handle] = sp; 1643 req->outstanding_cmds[handle] = sp;
1644 sp->handle = handle;
1457 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle; 1645 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1458 req->cnt -= req_cnt; 1646 req->cnt -= req_cnt;
1459 1647
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index b16b7725dee0..646fc5263d50 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -719,7 +719,6 @@ skip_rio:
719 vha->flags.rscn_queue_overflow = 1; 719 vha->flags.rscn_queue_overflow = 1;
720 } 720 }
721 721
722 atomic_set(&vha->loop_state, LOOP_UPDATE);
723 atomic_set(&vha->loop_down_timer, 0); 722 atomic_set(&vha->loop_down_timer, 0);
724 vha->flags.management_server_logged_in = 0; 723 vha->flags.management_server_logged_in = 0;
725 724
@@ -1435,25 +1434,27 @@ struct scsi_dif_tuple {
1435 * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST 1434 * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST
1436 * to indicate to the kernel that the HBA detected error. 1435 * to indicate to the kernel that the HBA detected error.
1437 */ 1436 */
1438static inline void 1437static inline int
1439qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) 1438qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
1440{ 1439{
1441 struct scsi_qla_host *vha = sp->fcport->vha; 1440 struct scsi_qla_host *vha = sp->fcport->vha;
1442 struct scsi_cmnd *cmd = sp->cmd; 1441 struct scsi_cmnd *cmd = sp->cmd;
1443 struct scsi_dif_tuple *ep = 1442 uint8_t *ap = &sts24->data[12];
1444 (struct scsi_dif_tuple *)&sts24->data[20]; 1443 uint8_t *ep = &sts24->data[20];
1445 struct scsi_dif_tuple *ap =
1446 (struct scsi_dif_tuple *)&sts24->data[12];
1447 uint32_t e_ref_tag, a_ref_tag; 1444 uint32_t e_ref_tag, a_ref_tag;
1448 uint16_t e_app_tag, a_app_tag; 1445 uint16_t e_app_tag, a_app_tag;
1449 uint16_t e_guard, a_guard; 1446 uint16_t e_guard, a_guard;
1450 1447
1451 e_ref_tag = be32_to_cpu(ep->ref_tag); 1448 /*
1452 a_ref_tag = be32_to_cpu(ap->ref_tag); 1449 * swab32 of the "data" field in the beginning of qla2x00_status_entry()
1453 e_app_tag = be16_to_cpu(ep->app_tag); 1450 * would make guard field appear at offset 2
1454 a_app_tag = be16_to_cpu(ap->app_tag); 1451 */
1455 e_guard = be16_to_cpu(ep->guard); 1452 a_guard = le16_to_cpu(*(uint16_t *)(ap + 2));
1456 a_guard = be16_to_cpu(ap->guard); 1453 a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0));
1454 a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4));
1455 e_guard = le16_to_cpu(*(uint16_t *)(ep + 2));
1456 e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0));
1457 e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4));
1457 1458
1458 ql_dbg(ql_dbg_io, vha, 0x3023, 1459 ql_dbg(ql_dbg_io, vha, 0x3023,
1459 "iocb(s) %p Returned STATUS.\n", sts24); 1460 "iocb(s) %p Returned STATUS.\n", sts24);
@@ -1465,6 +1466,63 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
1465 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag, 1466 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
1466 a_app_tag, e_app_tag, a_guard, e_guard); 1467 a_app_tag, e_app_tag, a_guard, e_guard);
1467 1468
1469 /*
1470 * Ignore sector if:
1471 * For type 3: ref & app tag is all 'f's
1472 * For type 0,1,2: app tag is all 'f's
1473 */
1474 if ((a_app_tag == 0xffff) &&
1475 ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) ||
1476 (a_ref_tag == 0xffffffff))) {
1477 uint32_t blocks_done, resid;
1478 sector_t lba_s = scsi_get_lba(cmd);
1479
1480 /* 2TB boundary case covered automatically with this */
1481 blocks_done = e_ref_tag - (uint32_t)lba_s + 1;
1482
1483 resid = scsi_bufflen(cmd) - (blocks_done *
1484 cmd->device->sector_size);
1485
1486 scsi_set_resid(cmd, resid);
1487 cmd->result = DID_OK << 16;
1488
1489 /* Update protection tag */
1490 if (scsi_prot_sg_count(cmd)) {
1491 uint32_t i, j = 0, k = 0, num_ent;
1492 struct scatterlist *sg;
1493 struct sd_dif_tuple *spt;
1494
1495 /* Patch the corresponding protection tags */
1496 scsi_for_each_prot_sg(cmd, sg,
1497 scsi_prot_sg_count(cmd), i) {
1498 num_ent = sg_dma_len(sg) / 8;
1499 if (k + num_ent < blocks_done) {
1500 k += num_ent;
1501 continue;
1502 }
1503 j = blocks_done - k - 1;
1504 k = blocks_done;
1505 break;
1506 }
1507
1508 if (k != blocks_done) {
1509 qla_printk(KERN_WARNING, sp->fcport->vha->hw,
1510 "unexpected tag values tag:lba=%x:%lx)\n",
1511 e_ref_tag, lba_s);
1512 return 1;
1513 }
1514
1515 spt = page_address(sg_page(sg)) + sg->offset;
1516 spt += j;
1517
1518 spt->app_tag = 0xffff;
1519 if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3)
1520 spt->ref_tag = 0xffffffff;
1521 }
1522
1523 return 0;
1524 }
1525
1468 /* check guard */ 1526 /* check guard */
1469 if (e_guard != a_guard) { 1527 if (e_guard != a_guard) {
1470 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 1528 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
@@ -1472,28 +1530,30 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
1472 set_driver_byte(cmd, DRIVER_SENSE); 1530 set_driver_byte(cmd, DRIVER_SENSE);
1473 set_host_byte(cmd, DID_ABORT); 1531 set_host_byte(cmd, DID_ABORT);
1474 cmd->result |= SAM_STAT_CHECK_CONDITION << 1; 1532 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1475 return; 1533 return 1;
1476 } 1534 }
1477 1535
1478 /* check appl tag */ 1536 /* check ref tag */
1479 if (e_app_tag != a_app_tag) { 1537 if (e_ref_tag != a_ref_tag) {
1480 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 1538 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1481 0x10, 0x2); 1539 0x10, 0x3);
1482 set_driver_byte(cmd, DRIVER_SENSE); 1540 set_driver_byte(cmd, DRIVER_SENSE);
1483 set_host_byte(cmd, DID_ABORT); 1541 set_host_byte(cmd, DID_ABORT);
1484 cmd->result |= SAM_STAT_CHECK_CONDITION << 1; 1542 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1485 return; 1543 return 1;
1486 } 1544 }
1487 1545
1488 /* check ref tag */ 1546 /* check appl tag */
1489 if (e_ref_tag != a_ref_tag) { 1547 if (e_app_tag != a_app_tag) {
1490 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 1548 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1491 0x10, 0x3); 1549 0x10, 0x2);
1492 set_driver_byte(cmd, DRIVER_SENSE); 1550 set_driver_byte(cmd, DRIVER_SENSE);
1493 set_host_byte(cmd, DID_ABORT); 1551 set_host_byte(cmd, DID_ABORT);
1494 cmd->result |= SAM_STAT_CHECK_CONDITION << 1; 1552 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1495 return; 1553 return 1;
1496 } 1554 }
1555
1556 return 1;
1497} 1557}
1498 1558
1499/** 1559/**
@@ -1767,7 +1827,7 @@ check_scsi_status:
1767 break; 1827 break;
1768 1828
1769 case CS_DIF_ERROR: 1829 case CS_DIF_ERROR:
1770 qla2x00_handle_dif_error(sp, sts24); 1830 logit = qla2x00_handle_dif_error(sp, sts24);
1771 break; 1831 break;
1772 default: 1832 default:
1773 cp->result = DID_ERROR << 16; 1833 cp->result = DID_ERROR << 16;
@@ -2468,11 +2528,10 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
2468 goto skip_msi; 2528 goto skip_msi;
2469 } 2529 }
2470 2530
2471 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX || 2531 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) {
2472 !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) {
2473 ql_log(ql_log_warn, vha, 0x0035, 2532 ql_log(ql_log_warn, vha, 0x0035,
2474 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n", 2533 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n",
2475 ha->pdev->revision, ha->fw_attributes); 2534 ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX);
2476 goto skip_msix; 2535 goto skip_msix;
2477 } 2536 }
2478 2537
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index c706ed370000..f488cc69fc79 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -472,7 +472,7 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
472 host->can_queue = base_vha->req->length + 128; 472 host->can_queue = base_vha->req->length + 128;
473 host->this_id = 255; 473 host->this_id = 255;
474 host->cmd_per_lun = 3; 474 host->cmd_per_lun = 3;
475 if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) 475 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
476 host->max_cmd_len = 32; 476 host->max_cmd_len = 32;
477 else 477 else
478 host->max_cmd_len = MAX_CMDSZ; 478 host->max_cmd_len = MAX_CMDSZ;
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 5cbf33a50b14..049807cda419 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -2208,6 +2208,7 @@ qla82xx_msix_rsp_q(int irq, void *dev_id)
2208 struct qla_hw_data *ha; 2208 struct qla_hw_data *ha;
2209 struct rsp_que *rsp; 2209 struct rsp_que *rsp;
2210 struct device_reg_82xx __iomem *reg; 2210 struct device_reg_82xx __iomem *reg;
2211 unsigned long flags;
2211 2212
2212 rsp = (struct rsp_que *) dev_id; 2213 rsp = (struct rsp_que *) dev_id;
2213 if (!rsp) { 2214 if (!rsp) {
@@ -2218,11 +2219,11 @@ qla82xx_msix_rsp_q(int irq, void *dev_id)
2218 2219
2219 ha = rsp->hw; 2220 ha = rsp->hw;
2220 reg = &ha->iobase->isp82; 2221 reg = &ha->iobase->isp82;
2221 spin_lock_irq(&ha->hardware_lock); 2222 spin_lock_irqsave(&ha->hardware_lock, flags);
2222 vha = pci_get_drvdata(ha->pdev); 2223 vha = pci_get_drvdata(ha->pdev);
2223 qla24xx_process_response_queue(vha, rsp); 2224 qla24xx_process_response_queue(vha, rsp);
2224 WRT_REG_DWORD(&reg->host_int, 0); 2225 WRT_REG_DWORD(&reg->host_int, 0);
2225 spin_unlock_irq(&ha->hardware_lock); 2226 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2226 return IRQ_HANDLED; 2227 return IRQ_HANDLED;
2227} 2228}
2228 2229
@@ -2838,6 +2839,16 @@ sufficient_dsds:
2838 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun); 2839 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
2839 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); 2840 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2840 2841
2842 /* build FCP_CMND IU */
2843 memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
2844 int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun);
2845 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
2846
2847 if (cmd->sc_data_direction == DMA_TO_DEVICE)
2848 ctx->fcp_cmnd->additional_cdb_len |= 1;
2849 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
2850 ctx->fcp_cmnd->additional_cdb_len |= 2;
2851
2841 /* 2852 /*
2842 * Update tagged queuing modifier -- default is TSK_SIMPLE (0). 2853 * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2843 */ 2854 */
@@ -2854,16 +2865,6 @@ sufficient_dsds:
2854 } 2865 }
2855 } 2866 }
2856 2867
2857 /* build FCP_CMND IU */
2858 memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
2859 int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun);
2860 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
2861
2862 if (cmd->sc_data_direction == DMA_TO_DEVICE)
2863 ctx->fcp_cmnd->additional_cdb_len |= 1;
2864 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
2865 ctx->fcp_cmnd->additional_cdb_len |= 2;
2866
2867 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); 2868 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
2868 2869
2869 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 + 2870 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index e02df276804e..4cace3f20c04 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -106,17 +106,21 @@ MODULE_PARM_DESC(ql2xmaxqdepth,
106 "Maximum queue depth to report for target devices."); 106 "Maximum queue depth to report for target devices.");
107 107
108/* Do not change the value of this after module load */ 108/* Do not change the value of this after module load */
109int ql2xenabledif = 1; 109int ql2xenabledif = 0;
110module_param(ql2xenabledif, int, S_IRUGO|S_IWUSR); 110module_param(ql2xenabledif, int, S_IRUGO|S_IWUSR);
111MODULE_PARM_DESC(ql2xenabledif, 111MODULE_PARM_DESC(ql2xenabledif,
112 " Enable T10-CRC-DIF " 112 " Enable T10-CRC-DIF "
113 " Default is 0 - No DIF Support. 1 - Enable it"); 113 " Default is 0 - No DIF Support. 1 - Enable it"
114 ", 2 - Enable DIF for all types, except Type 0.");
114 115
115int ql2xenablehba_err_chk; 116int ql2xenablehba_err_chk = 2;
116module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR); 117module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR);
117MODULE_PARM_DESC(ql2xenablehba_err_chk, 118MODULE_PARM_DESC(ql2xenablehba_err_chk,
118 " Enable T10-CRC-DIF Error isolation by HBA" 119 " Enable T10-CRC-DIF Error isolation by HBA:\n"
119 " Default is 0 - Error isolation disabled, 1 - Enable it"); 120 " Default is 1.\n"
121 " 0 -- Error isolation disabled\n"
122 " 1 -- Error isolation enabled only for DIX Type 0\n"
123 " 2 -- Error isolation enabled for all Types\n");
120 124
121int ql2xiidmaenable=1; 125int ql2xiidmaenable=1;
122module_param(ql2xiidmaenable, int, S_IRUGO); 126module_param(ql2xiidmaenable, int, S_IRUGO);
@@ -909,7 +913,14 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
909 "Abort command mbx success.\n"); 913 "Abort command mbx success.\n");
910 wait = 1; 914 wait = 1;
911 } 915 }
916
917 spin_lock_irqsave(&ha->hardware_lock, flags);
912 qla2x00_sp_compl(ha, sp); 918 qla2x00_sp_compl(ha, sp);
919 spin_unlock_irqrestore(&ha->hardware_lock, flags);
920
921 /* Did the command return during mailbox execution? */
922 if (ret == FAILED && !CMD_SP(cmd))
923 ret = SUCCESS;
913 924
914 /* Wait for the command to be returned. */ 925 /* Wait for the command to be returned. */
915 if (wait) { 926 if (wait) {
@@ -2251,7 +2262,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2251 host->this_id = 255; 2262 host->this_id = 255;
2252 host->cmd_per_lun = 3; 2263 host->cmd_per_lun = 3;
2253 host->unique_id = host->host_no; 2264 host->unique_id = host->host_no;
2254 if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) 2265 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
2255 host->max_cmd_len = 32; 2266 host->max_cmd_len = 32;
2256 else 2267 else
2257 host->max_cmd_len = MAX_CMDSZ; 2268 host->max_cmd_len = MAX_CMDSZ;
@@ -2378,13 +2389,16 @@ skip_dpc:
2378 "Detected hba at address=%p.\n", 2389 "Detected hba at address=%p.\n",
2379 ha); 2390 ha);
2380 2391
2381 if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) { 2392 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
2382 if (ha->fw_attributes & BIT_4) { 2393 if (ha->fw_attributes & BIT_4) {
2394 int prot = 0;
2383 base_vha->flags.difdix_supported = 1; 2395 base_vha->flags.difdix_supported = 1;
2384 ql_dbg(ql_dbg_init, base_vha, 0x00f1, 2396 ql_dbg(ql_dbg_init, base_vha, 0x00f1,
2385 "Registering for DIF/DIX type 1 and 3 protection.\n"); 2397 "Registering for DIF/DIX type 1 and 3 protection.\n");
2398 if (ql2xenabledif == 1)
2399 prot = SHOST_DIX_TYPE0_PROTECTION;
2386 scsi_host_set_prot(host, 2400 scsi_host_set_prot(host,
2387 SHOST_DIF_TYPE1_PROTECTION 2401 prot | SHOST_DIF_TYPE1_PROTECTION
2388 | SHOST_DIF_TYPE2_PROTECTION 2402 | SHOST_DIF_TYPE2_PROTECTION
2389 | SHOST_DIF_TYPE3_PROTECTION 2403 | SHOST_DIF_TYPE3_PROTECTION
2390 | SHOST_DIX_TYPE1_PROTECTION 2404 | SHOST_DIX_TYPE1_PROTECTION
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 062c97bf62f5..13b6357c1fa2 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.03.07.03-k" 10#define QLA2XXX_VERSION "8.03.07.07-k"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 3 13#define QLA_DRIVER_MINOR_VER 3
diff --git a/drivers/scsi/qla4xxx/Kconfig b/drivers/scsi/qla4xxx/Kconfig
index 2c33ce6eac1e..0f5599e0abf6 100644
--- a/drivers/scsi/qla4xxx/Kconfig
+++ b/drivers/scsi/qla4xxx/Kconfig
@@ -1,6 +1,6 @@
1config SCSI_QLA_ISCSI 1config SCSI_QLA_ISCSI
2 tristate "QLogic ISP4XXX and ISP82XX host adapter family support" 2 tristate "QLogic ISP4XXX and ISP82XX host adapter family support"
3 depends on PCI && SCSI 3 depends on PCI && SCSI && NET
4 select SCSI_ISCSI_ATTRS 4 select SCSI_ISCSI_ATTRS
5 ---help--- 5 ---help---
6 This driver supports the QLogic 40xx (ISP4XXX) and 8022 (ISP82XX) 6 This driver supports the QLogic 40xx (ISP4XXX) and 8022 (ISP82XX)
diff --git a/drivers/sh/intc/chip.c b/drivers/sh/intc/chip.c
index f33e2dd97934..33b2ed451e09 100644
--- a/drivers/sh/intc/chip.c
+++ b/drivers/sh/intc/chip.c
@@ -186,6 +186,9 @@ static unsigned char intc_irq_sense_table[IRQ_TYPE_SENSE_MASK + 1] = {
186 !defined(CONFIG_CPU_SUBTYPE_SH7709) 186 !defined(CONFIG_CPU_SUBTYPE_SH7709)
187 [IRQ_TYPE_LEVEL_HIGH] = VALID(3), 187 [IRQ_TYPE_LEVEL_HIGH] = VALID(3),
188#endif 188#endif
189#if defined(CONFIG_ARCH_SH7372)
190 [IRQ_TYPE_EDGE_BOTH] = VALID(4),
191#endif
189}; 192};
190 193
191static int intc_set_type(struct irq_data *data, unsigned int type) 194static int intc_set_type(struct irq_data *data, unsigned int type)
diff --git a/drivers/staging/brcm80211/brcmsmac/otp.c b/drivers/staging/brcm80211/brcmsmac/otp.c
index 34253cf37812..4a70180eba5d 100644
--- a/drivers/staging/brcm80211/brcmsmac/otp.c
+++ b/drivers/staging/brcm80211/brcmsmac/otp.c
@@ -16,6 +16,7 @@
16 16
17#include <linux/io.h> 17#include <linux/io.h>
18#include <linux/errno.h> 18#include <linux/errno.h>
19#include <linux/string.h>
19 20
20#include <brcm_hw_ids.h> 21#include <brcm_hw_ids.h>
21#include <chipcommon.h> 22#include <chipcommon.h>
diff --git a/drivers/staging/brcm80211/brcmsmac/types.h b/drivers/staging/brcm80211/brcmsmac/types.h
index bbf21897ae0e..823b5e4672e2 100644
--- a/drivers/staging/brcm80211/brcmsmac/types.h
+++ b/drivers/staging/brcm80211/brcmsmac/types.h
@@ -18,6 +18,7 @@
18#define _BRCM_TYPES_H_ 18#define _BRCM_TYPES_H_
19 19
20#include <linux/types.h> 20#include <linux/types.h>
21#include <linux/io.h>
21 22
22/* Bus types */ 23/* Bus types */
23#define SI_BUS 0 /* SOC Interconnect */ 24#define SI_BUS 0 /* SOC Interconnect */
diff --git a/drivers/staging/comedi/drivers/ni_labpc.c b/drivers/staging/comedi/drivers/ni_labpc.c
index 6859af0778cf..7611def97d06 100644
--- a/drivers/staging/comedi/drivers/ni_labpc.c
+++ b/drivers/staging/comedi/drivers/ni_labpc.c
@@ -241,8 +241,10 @@ static int labpc_eeprom_write_insn(struct comedi_device *dev,
241 struct comedi_insn *insn, 241 struct comedi_insn *insn,
242 unsigned int *data); 242 unsigned int *data);
243static void labpc_adc_timing(struct comedi_device *dev, struct comedi_cmd *cmd); 243static void labpc_adc_timing(struct comedi_device *dev, struct comedi_cmd *cmd);
244#ifdef CONFIG_COMEDI_PCI 244#ifdef CONFIG_ISA_DMA_API
245static unsigned int labpc_suggest_transfer_size(struct comedi_cmd cmd); 245static unsigned int labpc_suggest_transfer_size(struct comedi_cmd cmd);
246#endif
247#ifdef CONFIG_COMEDI_PCI
246static int labpc_find_device(struct comedi_device *dev, int bus, int slot); 248static int labpc_find_device(struct comedi_device *dev, int bus, int slot);
247#endif 249#endif
248static int labpc_dio_mem_callback(int dir, int port, int data, 250static int labpc_dio_mem_callback(int dir, int port, int data,
diff --git a/drivers/staging/gma500/mdfld_dsi_dbi.c b/drivers/staging/gma500/mdfld_dsi_dbi.c
index 02e17c9c8637..fd211f3467c4 100644
--- a/drivers/staging/gma500/mdfld_dsi_dbi.c
+++ b/drivers/staging/gma500/mdfld_dsi_dbi.c
@@ -711,10 +711,11 @@ struct mdfld_dsi_encoder *mdfld_dsi_dbi_init(struct drm_device *dev,
711 /* Create drm encoder object */ 711 /* Create drm encoder object */
712 connector = &dsi_connector->base.base; 712 connector = &dsi_connector->base.base;
713 encoder = &dbi_output->base.base; 713 encoder = &dbi_output->base.base;
714 /* Review this if we ever get MIPI-HDMI bridges or similar */
714 drm_encoder_init(dev, 715 drm_encoder_init(dev,
715 encoder, 716 encoder,
716 p_funcs->encoder_funcs, 717 p_funcs->encoder_funcs,
717 DRM_MODE_ENCODER_MIPI); 718 DRM_MODE_ENCODER_LVDS);
718 drm_encoder_helper_add(encoder, p_funcs->encoder_helper_funcs); 719 drm_encoder_helper_add(encoder, p_funcs->encoder_helper_funcs);
719 720
720 /* Attach to given connector */ 721 /* Attach to given connector */
diff --git a/drivers/staging/gma500/mdfld_dsi_dbi.h b/drivers/staging/gma500/mdfld_dsi_dbi.h
index dc6242c51d0b..f0fa986fd934 100644
--- a/drivers/staging/gma500/mdfld_dsi_dbi.h
+++ b/drivers/staging/gma500/mdfld_dsi_dbi.h
@@ -42,9 +42,6 @@
42#include "mdfld_dsi_output.h" 42#include "mdfld_dsi_output.h"
43#include "mdfld_output.h" 43#include "mdfld_output.h"
44 44
45#define DRM_MODE_ENCODER_MIPI 5
46
47
48/* 45/*
49 * DBI encoder which inherits from mdfld_dsi_encoder 46 * DBI encoder which inherits from mdfld_dsi_encoder
50 */ 47 */
diff --git a/drivers/staging/gma500/mdfld_dsi_dpi.c b/drivers/staging/gma500/mdfld_dsi_dpi.c
index 6e03a91e947e..e685f1217baa 100644
--- a/drivers/staging/gma500/mdfld_dsi_dpi.c
+++ b/drivers/staging/gma500/mdfld_dsi_dpi.c
@@ -777,10 +777,15 @@ struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev,
777 /* Create drm encoder object */ 777 /* Create drm encoder object */
778 connector = &dsi_connector->base.base; 778 connector = &dsi_connector->base.base;
779 encoder = &dpi_output->base.base; 779 encoder = &dpi_output->base.base;
780 /*
781 * On existing hardware this will be a panel of some form,
782 * if future devices also have HDMI bridges this will need
783 * revisiting
784 */
780 drm_encoder_init(dev, 785 drm_encoder_init(dev,
781 encoder, 786 encoder,
782 p_funcs->encoder_funcs, 787 p_funcs->encoder_funcs,
783 DRM_MODE_ENCODER_MIPI); 788 DRM_MODE_ENCODER_LVDS);
784 drm_encoder_helper_add(encoder, 789 drm_encoder_helper_add(encoder,
785 p_funcs->encoder_helper_funcs); 790 p_funcs->encoder_helper_funcs);
786 791
diff --git a/drivers/staging/gma500/mdfld_dsi_output.c b/drivers/staging/gma500/mdfld_dsi_output.c
index 7536095c30a0..9050c0f78b15 100644
--- a/drivers/staging/gma500/mdfld_dsi_output.c
+++ b/drivers/staging/gma500/mdfld_dsi_output.c
@@ -955,7 +955,9 @@ void mdfld_dsi_output_init(struct drm_device *dev,
955 psb_output->type = (pipe == 0) ? INTEL_OUTPUT_MIPI : INTEL_OUTPUT_MIPI2; 955 psb_output->type = (pipe == 0) ? INTEL_OUTPUT_MIPI : INTEL_OUTPUT_MIPI2;
956 956
957 connector = &psb_output->base; 957 connector = &psb_output->base;
958 drm_connector_init(dev, connector, &mdfld_dsi_connector_funcs, DRM_MODE_CONNECTOR_MIPI); 958 /* Revisit type if MIPI/HDMI bridges ever appear on Medfield */
959 drm_connector_init(dev, connector, &mdfld_dsi_connector_funcs,
960 DRM_MODE_CONNECTOR_LVDS);
959 drm_connector_helper_add(connector, &mdfld_dsi_connector_helper_funcs); 961 drm_connector_helper_add(connector, &mdfld_dsi_connector_helper_funcs);
960 962
961 connector->display_info.subpixel_order = SubPixelHorizontalRGB; 963 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
diff --git a/drivers/staging/gma500/medfield.h b/drivers/staging/gma500/medfield.h
index 38165e8367e5..09e9687431f1 100644
--- a/drivers/staging/gma500/medfield.h
+++ b/drivers/staging/gma500/medfield.h
@@ -21,8 +21,6 @@
21 * DEALINGS IN THE SOFTWARE. 21 * DEALINGS IN THE SOFTWARE.
22 */ 22 */
23 23
24#define DRM_MODE_ENCODER_MIPI 5
25
26/* Medfield DSI controller registers */ 24/* Medfield DSI controller registers */
27 25
28#define MIPIA_DEVICE_READY_REG 0xb000 26#define MIPIA_DEVICE_READY_REG 0xb000
diff --git a/drivers/staging/gma500/psb_drv.h b/drivers/staging/gma500/psb_drv.h
index 72f487a2a1b7..fd4732dd783a 100644
--- a/drivers/staging/gma500/psb_drv.h
+++ b/drivers/staging/gma500/psb_drv.h
@@ -35,7 +35,6 @@
35 35
36/* Append new drm mode definition here, align with libdrm definition */ 36/* Append new drm mode definition here, align with libdrm definition */
37#define DRM_MODE_SCALE_NO_SCALE 2 37#define DRM_MODE_SCALE_NO_SCALE 2
38#define DRM_MODE_CONNECTOR_MIPI 15
39 38
40enum { 39enum {
41 CHIP_PSB_8108 = 0, /* Poulsbo */ 40 CHIP_PSB_8108 = 0, /* Poulsbo */
diff --git a/drivers/staging/octeon/ethernet-rgmii.c b/drivers/staging/octeon/ethernet-rgmii.c
index 9c0d2936e486..c3d73f8431ae 100644
--- a/drivers/staging/octeon/ethernet-rgmii.c
+++ b/drivers/staging/octeon/ethernet-rgmii.c
@@ -26,6 +26,7 @@
26**********************************************************************/ 26**********************************************************************/
27#include <linux/kernel.h> 27#include <linux/kernel.h>
28#include <linux/netdevice.h> 28#include <linux/netdevice.h>
29#include <linux/interrupt.h>
29#include <linux/phy.h> 30#include <linux/phy.h>
30#include <linux/ratelimit.h> 31#include <linux/ratelimit.h>
31#include <net/dst.h> 32#include <net/dst.h>
diff --git a/drivers/staging/octeon/ethernet-spi.c b/drivers/staging/octeon/ethernet-spi.c
index 970825421884..d0e2d514968a 100644
--- a/drivers/staging/octeon/ethernet-spi.c
+++ b/drivers/staging/octeon/ethernet-spi.c
@@ -26,6 +26,7 @@
26**********************************************************************/ 26**********************************************************************/
27#include <linux/kernel.h> 27#include <linux/kernel.h>
28#include <linux/netdevice.h> 28#include <linux/netdevice.h>
29#include <linux/interrupt.h>
29#include <net/dst.h> 30#include <net/dst.h>
30 31
31#include <asm/octeon/octeon.h> 32#include <asm/octeon/octeon.h>
diff --git a/drivers/staging/tidspbridge/core/dsp-clock.c b/drivers/staging/tidspbridge/core/dsp-clock.c
index 589a0554332e..3d1279c424a8 100644
--- a/drivers/staging/tidspbridge/core/dsp-clock.c
+++ b/drivers/staging/tidspbridge/core/dsp-clock.c
@@ -209,7 +209,6 @@ int dsp_clk_enable(enum dsp_clk_id clk_id)
209 break; 209 break;
210#ifdef CONFIG_OMAP_MCBSP 210#ifdef CONFIG_OMAP_MCBSP
211 case MCBSP_CLK: 211 case MCBSP_CLK:
212 omap_mcbsp_set_io_type(MCBSP_ID(clk_id), OMAP_MCBSP_POLL_IO);
213 omap_mcbsp_request(MCBSP_ID(clk_id)); 212 omap_mcbsp_request(MCBSP_ID(clk_id));
214 omap2_mcbsp_set_clks_src(MCBSP_ID(clk_id), MCBSP_CLKS_PAD_SRC); 213 omap2_mcbsp_set_clks_src(MCBSP_ID(clk_id), MCBSP_CLKS_PAD_SRC);
215 break; 214 break;
diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
index 975e34bcd722..1ca66ea9b281 100644
--- a/drivers/staging/zcache/tmem.c
+++ b/drivers/staging/zcache/tmem.c
@@ -604,7 +604,7 @@ int tmem_get(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
604 struct tmem_obj *obj; 604 struct tmem_obj *obj;
605 void *pampd; 605 void *pampd;
606 bool ephemeral = is_ephemeral(pool); 606 bool ephemeral = is_ephemeral(pool);
607 uint32_t ret = -1; 607 int ret = -1;
608 struct tmem_hashbucket *hb; 608 struct tmem_hashbucket *hb;
609 bool free = (get_and_free == 1) || ((get_and_free == 0) && ephemeral); 609 bool free = (get_and_free == 1) || ((get_and_free == 0) && ephemeral);
610 bool lock_held = false; 610 bool lock_held = false;
diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c
index 855a5bb56a47..462fbc20561f 100644
--- a/drivers/staging/zcache/zcache-main.c
+++ b/drivers/staging/zcache/zcache-main.c
@@ -1158,7 +1158,7 @@ static void *zcache_pampd_create(char *data, size_t size, bool raw, int eph,
1158 size_t clen; 1158 size_t clen;
1159 int ret; 1159 int ret;
1160 unsigned long count; 1160 unsigned long count;
1161 struct page *page = virt_to_page(data); 1161 struct page *page = (struct page *)(data);
1162 struct zcache_client *cli = pool->client; 1162 struct zcache_client *cli = pool->client;
1163 uint16_t client_id = get_client_id_from_client(cli); 1163 uint16_t client_id = get_client_id_from_client(cli);
1164 unsigned long zv_mean_zsize; 1164 unsigned long zv_mean_zsize;
@@ -1227,7 +1227,7 @@ static int zcache_pampd_get_data(char *data, size_t *bufsize, bool raw,
1227 int ret = 0; 1227 int ret = 0;
1228 1228
1229 BUG_ON(is_ephemeral(pool)); 1229 BUG_ON(is_ephemeral(pool));
1230 zv_decompress(virt_to_page(data), pampd); 1230 zv_decompress((struct page *)(data), pampd);
1231 return ret; 1231 return ret;
1232} 1232}
1233 1233
@@ -1242,7 +1242,7 @@ static int zcache_pampd_get_data_and_free(char *data, size_t *bufsize, bool raw,
1242 int ret = 0; 1242 int ret = 0;
1243 1243
1244 BUG_ON(!is_ephemeral(pool)); 1244 BUG_ON(!is_ephemeral(pool));
1245 zbud_decompress(virt_to_page(data), pampd); 1245 zbud_decompress((struct page *)(data), pampd);
1246 zbud_free_and_delist((struct zbud_hdr *)pampd); 1246 zbud_free_and_delist((struct zbud_hdr *)pampd);
1247 atomic_dec(&zcache_curr_eph_pampd_count); 1247 atomic_dec(&zcache_curr_eph_pampd_count);
1248 return ret; 1248 return ret;
@@ -1539,7 +1539,7 @@ static int zcache_put_page(int cli_id, int pool_id, struct tmem_oid *oidp,
1539 goto out; 1539 goto out;
1540 if (!zcache_freeze && zcache_do_preload(pool) == 0) { 1540 if (!zcache_freeze && zcache_do_preload(pool) == 0) {
1541 /* preload does preempt_disable on success */ 1541 /* preload does preempt_disable on success */
1542 ret = tmem_put(pool, oidp, index, page_address(page), 1542 ret = tmem_put(pool, oidp, index, (char *)(page),
1543 PAGE_SIZE, 0, is_ephemeral(pool)); 1543 PAGE_SIZE, 0, is_ephemeral(pool));
1544 if (ret < 0) { 1544 if (ret < 0) {
1545 if (is_ephemeral(pool)) 1545 if (is_ephemeral(pool))
@@ -1572,7 +1572,7 @@ static int zcache_get_page(int cli_id, int pool_id, struct tmem_oid *oidp,
1572 pool = zcache_get_pool_by_id(cli_id, pool_id); 1572 pool = zcache_get_pool_by_id(cli_id, pool_id);
1573 if (likely(pool != NULL)) { 1573 if (likely(pool != NULL)) {
1574 if (atomic_read(&pool->obj_count) > 0) 1574 if (atomic_read(&pool->obj_count) > 0)
1575 ret = tmem_get(pool, oidp, index, page_address(page), 1575 ret = tmem_get(pool, oidp, index, (char *)(page),
1576 &size, 0, is_ephemeral(pool)); 1576 &size, 0, is_ephemeral(pool));
1577 zcache_put_pool(pool); 1577 zcache_put_pool(pool);
1578 } 1578 }
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index c24fb10de60b..6a4ea29c2f36 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -2243,7 +2243,6 @@ static int iscsit_handle_snack(
2243 case 0: 2243 case 0:
2244 return iscsit_handle_recovery_datain_or_r2t(conn, buf, 2244 return iscsit_handle_recovery_datain_or_r2t(conn, buf,
2245 hdr->itt, hdr->ttt, hdr->begrun, hdr->runlength); 2245 hdr->itt, hdr->ttt, hdr->begrun, hdr->runlength);
2246 return 0;
2247 case ISCSI_FLAG_SNACK_TYPE_STATUS: 2246 case ISCSI_FLAG_SNACK_TYPE_STATUS:
2248 return iscsit_handle_status_snack(conn, hdr->itt, hdr->ttt, 2247 return iscsit_handle_status_snack(conn, hdr->itt, hdr->ttt,
2249 hdr->begrun, hdr->runlength); 2248 hdr->begrun, hdr->runlength);
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index f095e65b1ccf..f1643dbf6a92 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -268,7 +268,7 @@ struct se_tpg_np *lio_target_call_addnptotpg(
268 ISCSI_TCP); 268 ISCSI_TCP);
269 if (IS_ERR(tpg_np)) { 269 if (IS_ERR(tpg_np)) {
270 iscsit_put_tpg(tpg); 270 iscsit_put_tpg(tpg);
271 return ERR_PTR(PTR_ERR(tpg_np)); 271 return ERR_CAST(tpg_np);
272 } 272 }
273 pr_debug("LIO_Target_ConfigFS: addnptotpg done!\n"); 273 pr_debug("LIO_Target_ConfigFS: addnptotpg done!\n");
274 274
@@ -1285,7 +1285,7 @@ struct se_wwn *lio_target_call_coreaddtiqn(
1285 1285
1286 tiqn = iscsit_add_tiqn((unsigned char *)name); 1286 tiqn = iscsit_add_tiqn((unsigned char *)name);
1287 if (IS_ERR(tiqn)) 1287 if (IS_ERR(tiqn))
1288 return ERR_PTR(PTR_ERR(tiqn)); 1288 return ERR_CAST(tiqn);
1289 /* 1289 /*
1290 * Setup struct iscsi_wwn_stat_grps for se_wwn->fabric_stat_group. 1290 * Setup struct iscsi_wwn_stat_grps for se_wwn->fabric_stat_group.
1291 */ 1291 */
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index 980650792cf6..c4c68da3e500 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -834,7 +834,7 @@ static int iscsit_attach_ooo_cmdsn(
834 */ 834 */
835 list_for_each_entry(ooo_tmp, &sess->sess_ooo_cmdsn_list, 835 list_for_each_entry(ooo_tmp, &sess->sess_ooo_cmdsn_list,
836 ooo_list) { 836 ooo_list) {
837 while (ooo_tmp->cmdsn < ooo_cmdsn->cmdsn) 837 if (ooo_tmp->cmdsn < ooo_cmdsn->cmdsn)
838 continue; 838 continue;
839 839
840 list_add(&ooo_cmdsn->ooo_list, 840 list_add(&ooo_cmdsn->ooo_list,
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index bcaf82f47037..daad362a93ce 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -1013,19 +1013,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
1013 ISCSI_LOGIN_STATUS_TARGET_ERROR); 1013 ISCSI_LOGIN_STATUS_TARGET_ERROR);
1014 goto new_sess_out; 1014 goto new_sess_out;
1015 } 1015 }
1016#if 0 1016 snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c",
1017 if (!iscsi_ntop6((const unsigned char *) 1017 &sock_in6.sin6_addr.in6_u);
1018 &sock_in6.sin6_addr.in6_u, 1018 conn->login_port = ntohs(sock_in6.sin6_port);
1019 (char *)&conn->ipv6_login_ip[0],
1020 IPV6_ADDRESS_SPACE)) {
1021 pr_err("iscsi_ntop6() failed\n");
1022 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
1023 ISCSI_LOGIN_STATUS_TARGET_ERROR);
1024 goto new_sess_out;
1025 }
1026#else
1027 pr_debug("Skipping iscsi_ntop6()\n");
1028#endif
1029 } else { 1019 } else {
1030 memset(&sock_in, 0, sizeof(struct sockaddr_in)); 1020 memset(&sock_in, 0, sizeof(struct sockaddr_in));
1031 1021
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index 252e246cf51e..5b773160200f 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -545,13 +545,13 @@ int iscsi_copy_param_list(
545 struct iscsi_param_list *src_param_list, 545 struct iscsi_param_list *src_param_list,
546 int leading) 546 int leading)
547{ 547{
548 struct iscsi_param *new_param = NULL, *param = NULL; 548 struct iscsi_param *param = NULL;
549 struct iscsi_param *new_param = NULL;
549 struct iscsi_param_list *param_list = NULL; 550 struct iscsi_param_list *param_list = NULL;
550 551
551 param_list = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL); 552 param_list = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL);
552 if (!param_list) { 553 if (!param_list) {
553 pr_err("Unable to allocate memory for" 554 pr_err("Unable to allocate memory for struct iscsi_param_list.\n");
554 " struct iscsi_param_list.\n");
555 goto err_out; 555 goto err_out;
556 } 556 }
557 INIT_LIST_HEAD(&param_list->param_list); 557 INIT_LIST_HEAD(&param_list->param_list);
@@ -567,8 +567,17 @@ int iscsi_copy_param_list(
567 567
568 new_param = kzalloc(sizeof(struct iscsi_param), GFP_KERNEL); 568 new_param = kzalloc(sizeof(struct iscsi_param), GFP_KERNEL);
569 if (!new_param) { 569 if (!new_param) {
570 pr_err("Unable to allocate memory for" 570 pr_err("Unable to allocate memory for struct iscsi_param.\n");
571 " struct iscsi_param.\n"); 571 goto err_out;
572 }
573
574 new_param->name = kstrdup(param->name, GFP_KERNEL);
575 new_param->value = kstrdup(param->value, GFP_KERNEL);
576 if (!new_param->value || !new_param->name) {
577 kfree(new_param->value);
578 kfree(new_param->name);
579 kfree(new_param);
580 pr_err("Unable to allocate memory for parameter name/value.\n");
572 goto err_out; 581 goto err_out;
573 } 582 }
574 583
@@ -580,32 +589,12 @@ int iscsi_copy_param_list(
580 new_param->use = param->use; 589 new_param->use = param->use;
581 new_param->type_range = param->type_range; 590 new_param->type_range = param->type_range;
582 591
583 new_param->name = kzalloc(strlen(param->name) + 1, GFP_KERNEL);
584 if (!new_param->name) {
585 pr_err("Unable to allocate memory for"
586 " parameter name.\n");
587 goto err_out;
588 }
589
590 new_param->value = kzalloc(strlen(param->value) + 1,
591 GFP_KERNEL);
592 if (!new_param->value) {
593 pr_err("Unable to allocate memory for"
594 " parameter value.\n");
595 goto err_out;
596 }
597
598 memcpy(new_param->name, param->name, strlen(param->name));
599 new_param->name[strlen(param->name)] = '\0';
600 memcpy(new_param->value, param->value, strlen(param->value));
601 new_param->value[strlen(param->value)] = '\0';
602
603 list_add_tail(&new_param->p_list, &param_list->param_list); 592 list_add_tail(&new_param->p_list, &param_list->param_list);
604 } 593 }
605 594
606 if (!list_empty(&param_list->param_list)) 595 if (!list_empty(&param_list->param_list)) {
607 *dst_param_list = param_list; 596 *dst_param_list = param_list;
608 else { 597 } else {
609 pr_err("No parameters allocated.\n"); 598 pr_err("No parameters allocated.\n");
610 goto err_out; 599 goto err_out;
611 } 600 }
@@ -1441,7 +1430,7 @@ static int iscsi_enforce_integrity_rules(
1441 u8 DataSequenceInOrder = 0; 1430 u8 DataSequenceInOrder = 0;
1442 u8 ErrorRecoveryLevel = 0, SessionType = 0; 1431 u8 ErrorRecoveryLevel = 0, SessionType = 0;
1443 u8 IFMarker = 0, OFMarker = 0; 1432 u8 IFMarker = 0, OFMarker = 0;
1444 u8 IFMarkInt_Reject = 0, OFMarkInt_Reject = 0; 1433 u8 IFMarkInt_Reject = 1, OFMarkInt_Reject = 1;
1445 u32 FirstBurstLength = 0, MaxBurstLength = 0; 1434 u32 FirstBurstLength = 0, MaxBurstLength = 0;
1446 struct iscsi_param *param = NULL; 1435 struct iscsi_param *param = NULL;
1447 1436
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index a1acb0167902..f00137f377b2 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -243,7 +243,7 @@ struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr(
243 if (!cmd->tmr_req) { 243 if (!cmd->tmr_req) {
244 pr_err("Unable to allocate memory for" 244 pr_err("Unable to allocate memory for"
245 " Task Management command!\n"); 245 " Task Management command!\n");
246 return NULL; 246 goto out;
247 } 247 }
248 /* 248 /*
249 * TASK_REASSIGN for ERL=2 / connection stays inside of 249 * TASK_REASSIGN for ERL=2 / connection stays inside of
@@ -298,8 +298,6 @@ struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr(
298 return cmd; 298 return cmd;
299out: 299out:
300 iscsit_release_cmd(cmd); 300 iscsit_release_cmd(cmd);
301 if (se_cmd)
302 transport_free_se_cmd(se_cmd);
303 return NULL; 301 return NULL;
304} 302}
305 303
@@ -877,40 +875,6 @@ void iscsit_inc_session_usage_count(struct iscsi_session *sess)
877} 875}
878 876
879/* 877/*
880 * Used before iscsi_do[rx,tx]_data() to determine iov and [rx,tx]_marker
881 * array counts needed for sync and steering.
882 */
883static int iscsit_determine_sync_and_steering_counts(
884 struct iscsi_conn *conn,
885 struct iscsi_data_count *count)
886{
887 u32 length = count->data_length;
888 u32 marker, markint;
889
890 count->sync_and_steering = 1;
891
892 marker = (count->type == ISCSI_RX_DATA) ?
893 conn->of_marker : conn->if_marker;
894 markint = (count->type == ISCSI_RX_DATA) ?
895 (conn->conn_ops->OFMarkInt * 4) :
896 (conn->conn_ops->IFMarkInt * 4);
897 count->ss_iov_count = count->iov_count;
898
899 while (length > 0) {
900 if (length >= marker) {
901 count->ss_iov_count += 3;
902 count->ss_marker_count += 2;
903
904 length -= marker;
905 marker = markint;
906 } else
907 length = 0;
908 }
909
910 return 0;
911}
912
913/*
914 * Setup conn->if_marker and conn->of_marker values based upon 878 * Setup conn->if_marker and conn->of_marker values based upon
915 * the initial marker-less interval. (see iSCSI v19 A.2) 879 * the initial marker-less interval. (see iSCSI v19 A.2)
916 */ 880 */
@@ -1292,7 +1256,7 @@ int iscsit_fe_sendpage_sg(
1292 struct kvec iov; 1256 struct kvec iov;
1293 u32 tx_hdr_size, data_len; 1257 u32 tx_hdr_size, data_len;
1294 u32 offset = cmd->first_data_sg_off; 1258 u32 offset = cmd->first_data_sg_off;
1295 int tx_sent; 1259 int tx_sent, iov_off;
1296 1260
1297send_hdr: 1261send_hdr:
1298 tx_hdr_size = ISCSI_HDR_LEN; 1262 tx_hdr_size = ISCSI_HDR_LEN;
@@ -1312,9 +1276,19 @@ send_hdr:
1312 } 1276 }
1313 1277
1314 data_len = cmd->tx_size - tx_hdr_size - cmd->padding; 1278 data_len = cmd->tx_size - tx_hdr_size - cmd->padding;
1315 if (conn->conn_ops->DataDigest) 1279 /*
1280 * Set iov_off used by padding and data digest tx_data() calls below
1281 * in order to determine proper offset into cmd->iov_data[]
1282 */
1283 if (conn->conn_ops->DataDigest) {
1316 data_len -= ISCSI_CRC_LEN; 1284 data_len -= ISCSI_CRC_LEN;
1317 1285 if (cmd->padding)
1286 iov_off = (cmd->iov_data_count - 2);
1287 else
1288 iov_off = (cmd->iov_data_count - 1);
1289 } else {
1290 iov_off = (cmd->iov_data_count - 1);
1291 }
1318 /* 1292 /*
1319 * Perform sendpage() for each page in the scatterlist 1293 * Perform sendpage() for each page in the scatterlist
1320 */ 1294 */
@@ -1343,8 +1317,7 @@ send_pg:
1343 1317
1344send_padding: 1318send_padding:
1345 if (cmd->padding) { 1319 if (cmd->padding) {
1346 struct kvec *iov_p = 1320 struct kvec *iov_p = &cmd->iov_data[iov_off++];
1347 &cmd->iov_data[cmd->iov_data_count-1];
1348 1321
1349 tx_sent = tx_data(conn, iov_p, 1, cmd->padding); 1322 tx_sent = tx_data(conn, iov_p, 1, cmd->padding);
1350 if (cmd->padding != tx_sent) { 1323 if (cmd->padding != tx_sent) {
@@ -1358,8 +1331,7 @@ send_padding:
1358 1331
1359send_datacrc: 1332send_datacrc:
1360 if (conn->conn_ops->DataDigest) { 1333 if (conn->conn_ops->DataDigest) {
1361 struct kvec *iov_d = 1334 struct kvec *iov_d = &cmd->iov_data[iov_off];
1362 &cmd->iov_data[cmd->iov_data_count];
1363 1335
1364 tx_sent = tx_data(conn, iov_d, 1, ISCSI_CRC_LEN); 1336 tx_sent = tx_data(conn, iov_d, 1, ISCSI_CRC_LEN);
1365 if (ISCSI_CRC_LEN != tx_sent) { 1337 if (ISCSI_CRC_LEN != tx_sent) {
@@ -1433,8 +1405,7 @@ static int iscsit_do_rx_data(
1433 struct iscsi_data_count *count) 1405 struct iscsi_data_count *count)
1434{ 1406{
1435 int data = count->data_length, rx_loop = 0, total_rx = 0, iov_len; 1407 int data = count->data_length, rx_loop = 0, total_rx = 0, iov_len;
1436 u32 rx_marker_val[count->ss_marker_count], rx_marker_iov = 0; 1408 struct kvec *iov_p;
1437 struct kvec iov[count->ss_iov_count], *iov_p;
1438 struct msghdr msg; 1409 struct msghdr msg;
1439 1410
1440 if (!conn || !conn->sock || !conn->conn_ops) 1411 if (!conn || !conn->sock || !conn->conn_ops)
@@ -1442,93 +1413,8 @@ static int iscsit_do_rx_data(
1442 1413
1443 memset(&msg, 0, sizeof(struct msghdr)); 1414 memset(&msg, 0, sizeof(struct msghdr));
1444 1415
1445 if (count->sync_and_steering) { 1416 iov_p = count->iov;
1446 int size = 0; 1417 iov_len = count->iov_count;
1447 u32 i, orig_iov_count = 0;
1448 u32 orig_iov_len = 0, orig_iov_loc = 0;
1449 u32 iov_count = 0, per_iov_bytes = 0;
1450 u32 *rx_marker, old_rx_marker = 0;
1451 struct kvec *iov_record;
1452
1453 memset(&rx_marker_val, 0,
1454 count->ss_marker_count * sizeof(u32));
1455 memset(&iov, 0, count->ss_iov_count * sizeof(struct kvec));
1456
1457 iov_record = count->iov;
1458 orig_iov_count = count->iov_count;
1459 rx_marker = &conn->of_marker;
1460
1461 i = 0;
1462 size = data;
1463 orig_iov_len = iov_record[orig_iov_loc].iov_len;
1464 while (size > 0) {
1465 pr_debug("rx_data: #1 orig_iov_len %u,"
1466 " orig_iov_loc %u\n", orig_iov_len, orig_iov_loc);
1467 pr_debug("rx_data: #2 rx_marker %u, size"
1468 " %u\n", *rx_marker, size);
1469
1470 if (orig_iov_len >= *rx_marker) {
1471 iov[iov_count].iov_len = *rx_marker;
1472 iov[iov_count++].iov_base =
1473 (iov_record[orig_iov_loc].iov_base +
1474 per_iov_bytes);
1475
1476 iov[iov_count].iov_len = (MARKER_SIZE / 2);
1477 iov[iov_count++].iov_base =
1478 &rx_marker_val[rx_marker_iov++];
1479 iov[iov_count].iov_len = (MARKER_SIZE / 2);
1480 iov[iov_count++].iov_base =
1481 &rx_marker_val[rx_marker_iov++];
1482 old_rx_marker = *rx_marker;
1483
1484 /*
1485 * OFMarkInt is in 32-bit words.
1486 */
1487 *rx_marker = (conn->conn_ops->OFMarkInt * 4);
1488 size -= old_rx_marker;
1489 orig_iov_len -= old_rx_marker;
1490 per_iov_bytes += old_rx_marker;
1491
1492 pr_debug("rx_data: #3 new_rx_marker"
1493 " %u, size %u\n", *rx_marker, size);
1494 } else {
1495 iov[iov_count].iov_len = orig_iov_len;
1496 iov[iov_count++].iov_base =
1497 (iov_record[orig_iov_loc].iov_base +
1498 per_iov_bytes);
1499
1500 per_iov_bytes = 0;
1501 *rx_marker -= orig_iov_len;
1502 size -= orig_iov_len;
1503
1504 if (size)
1505 orig_iov_len =
1506 iov_record[++orig_iov_loc].iov_len;
1507
1508 pr_debug("rx_data: #4 new_rx_marker"
1509 " %u, size %u\n", *rx_marker, size);
1510 }
1511 }
1512 data += (rx_marker_iov * (MARKER_SIZE / 2));
1513
1514 iov_p = &iov[0];
1515 iov_len = iov_count;
1516
1517 if (iov_count > count->ss_iov_count) {
1518 pr_err("iov_count: %d, count->ss_iov_count:"
1519 " %d\n", iov_count, count->ss_iov_count);
1520 return -1;
1521 }
1522 if (rx_marker_iov > count->ss_marker_count) {
1523 pr_err("rx_marker_iov: %d, count->ss_marker"
1524 "_count: %d\n", rx_marker_iov,
1525 count->ss_marker_count);
1526 return -1;
1527 }
1528 } else {
1529 iov_p = count->iov;
1530 iov_len = count->iov_count;
1531 }
1532 1418
1533 while (total_rx < data) { 1419 while (total_rx < data) {
1534 rx_loop = kernel_recvmsg(conn->sock, &msg, iov_p, iov_len, 1420 rx_loop = kernel_recvmsg(conn->sock, &msg, iov_p, iov_len,
@@ -1543,16 +1429,6 @@ static int iscsit_do_rx_data(
1543 rx_loop, total_rx, data); 1429 rx_loop, total_rx, data);
1544 } 1430 }
1545 1431
1546 if (count->sync_and_steering) {
1547 int j;
1548 for (j = 0; j < rx_marker_iov; j++) {
1549 pr_debug("rx_data: #5 j: %d, offset: %d\n",
1550 j, rx_marker_val[j]);
1551 conn->of_marker_offset = rx_marker_val[j];
1552 }
1553 total_rx -= (rx_marker_iov * (MARKER_SIZE / 2));
1554 }
1555
1556 return total_rx; 1432 return total_rx;
1557} 1433}
1558 1434
@@ -1561,8 +1437,7 @@ static int iscsit_do_tx_data(
1561 struct iscsi_data_count *count) 1437 struct iscsi_data_count *count)
1562{ 1438{
1563 int data = count->data_length, total_tx = 0, tx_loop = 0, iov_len; 1439 int data = count->data_length, total_tx = 0, tx_loop = 0, iov_len;
1564 u32 tx_marker_val[count->ss_marker_count], tx_marker_iov = 0; 1440 struct kvec *iov_p;
1565 struct kvec iov[count->ss_iov_count], *iov_p;
1566 struct msghdr msg; 1441 struct msghdr msg;
1567 1442
1568 if (!conn || !conn->sock || !conn->conn_ops) 1443 if (!conn || !conn->sock || !conn->conn_ops)
@@ -1575,98 +1450,8 @@ static int iscsit_do_tx_data(
1575 1450
1576 memset(&msg, 0, sizeof(struct msghdr)); 1451 memset(&msg, 0, sizeof(struct msghdr));
1577 1452
1578 if (count->sync_and_steering) { 1453 iov_p = count->iov;
1579 int size = 0; 1454 iov_len = count->iov_count;
1580 u32 i, orig_iov_count = 0;
1581 u32 orig_iov_len = 0, orig_iov_loc = 0;
1582 u32 iov_count = 0, per_iov_bytes = 0;
1583 u32 *tx_marker, old_tx_marker = 0;
1584 struct kvec *iov_record;
1585
1586 memset(&tx_marker_val, 0,
1587 count->ss_marker_count * sizeof(u32));
1588 memset(&iov, 0, count->ss_iov_count * sizeof(struct kvec));
1589
1590 iov_record = count->iov;
1591 orig_iov_count = count->iov_count;
1592 tx_marker = &conn->if_marker;
1593
1594 i = 0;
1595 size = data;
1596 orig_iov_len = iov_record[orig_iov_loc].iov_len;
1597 while (size > 0) {
1598 pr_debug("tx_data: #1 orig_iov_len %u,"
1599 " orig_iov_loc %u\n", orig_iov_len, orig_iov_loc);
1600 pr_debug("tx_data: #2 tx_marker %u, size"
1601 " %u\n", *tx_marker, size);
1602
1603 if (orig_iov_len >= *tx_marker) {
1604 iov[iov_count].iov_len = *tx_marker;
1605 iov[iov_count++].iov_base =
1606 (iov_record[orig_iov_loc].iov_base +
1607 per_iov_bytes);
1608
1609 tx_marker_val[tx_marker_iov] =
1610 (size - *tx_marker);
1611 iov[iov_count].iov_len = (MARKER_SIZE / 2);
1612 iov[iov_count++].iov_base =
1613 &tx_marker_val[tx_marker_iov++];
1614 iov[iov_count].iov_len = (MARKER_SIZE / 2);
1615 iov[iov_count++].iov_base =
1616 &tx_marker_val[tx_marker_iov++];
1617 old_tx_marker = *tx_marker;
1618
1619 /*
1620 * IFMarkInt is in 32-bit words.
1621 */
1622 *tx_marker = (conn->conn_ops->IFMarkInt * 4);
1623 size -= old_tx_marker;
1624 orig_iov_len -= old_tx_marker;
1625 per_iov_bytes += old_tx_marker;
1626
1627 pr_debug("tx_data: #3 new_tx_marker"
1628 " %u, size %u\n", *tx_marker, size);
1629 pr_debug("tx_data: #4 offset %u\n",
1630 tx_marker_val[tx_marker_iov-1]);
1631 } else {
1632 iov[iov_count].iov_len = orig_iov_len;
1633 iov[iov_count++].iov_base
1634 = (iov_record[orig_iov_loc].iov_base +
1635 per_iov_bytes);
1636
1637 per_iov_bytes = 0;
1638 *tx_marker -= orig_iov_len;
1639 size -= orig_iov_len;
1640
1641 if (size)
1642 orig_iov_len =
1643 iov_record[++orig_iov_loc].iov_len;
1644
1645 pr_debug("tx_data: #5 new_tx_marker"
1646 " %u, size %u\n", *tx_marker, size);
1647 }
1648 }
1649
1650 data += (tx_marker_iov * (MARKER_SIZE / 2));
1651
1652 iov_p = &iov[0];
1653 iov_len = iov_count;
1654
1655 if (iov_count > count->ss_iov_count) {
1656 pr_err("iov_count: %d, count->ss_iov_count:"
1657 " %d\n", iov_count, count->ss_iov_count);
1658 return -1;
1659 }
1660 if (tx_marker_iov > count->ss_marker_count) {
1661 pr_err("tx_marker_iov: %d, count->ss_marker"
1662 "_count: %d\n", tx_marker_iov,
1663 count->ss_marker_count);
1664 return -1;
1665 }
1666 } else {
1667 iov_p = count->iov;
1668 iov_len = count->iov_count;
1669 }
1670 1455
1671 while (total_tx < data) { 1456 while (total_tx < data) {
1672 tx_loop = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len, 1457 tx_loop = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len,
@@ -1681,9 +1466,6 @@ static int iscsit_do_tx_data(
1681 tx_loop, total_tx, data); 1466 tx_loop, total_tx, data);
1682 } 1467 }
1683 1468
1684 if (count->sync_and_steering)
1685 total_tx -= (tx_marker_iov * (MARKER_SIZE / 2));
1686
1687 return total_tx; 1469 return total_tx;
1688} 1470}
1689 1471
@@ -1704,12 +1486,6 @@ int rx_data(
1704 c.data_length = data; 1486 c.data_length = data;
1705 c.type = ISCSI_RX_DATA; 1487 c.type = ISCSI_RX_DATA;
1706 1488
1707 if (conn->conn_ops->OFMarker &&
1708 (conn->conn_state >= TARG_CONN_STATE_LOGGED_IN)) {
1709 if (iscsit_determine_sync_and_steering_counts(conn, &c) < 0)
1710 return -1;
1711 }
1712
1713 return iscsit_do_rx_data(conn, &c); 1489 return iscsit_do_rx_data(conn, &c);
1714} 1490}
1715 1491
@@ -1730,12 +1506,6 @@ int tx_data(
1730 c.data_length = data; 1506 c.data_length = data;
1731 c.type = ISCSI_TX_DATA; 1507 c.type = ISCSI_TX_DATA;
1732 1508
1733 if (conn->conn_ops->IFMarker &&
1734 (conn->conn_state >= TARG_CONN_STATE_LOGGED_IN)) {
1735 if (iscsit_determine_sync_and_steering_counts(conn, &c) < 0)
1736 return -1;
1737 }
1738
1739 return iscsit_do_tx_data(conn, &c); 1509 return iscsit_do_tx_data(conn, &c);
1740} 1510}
1741 1511
diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
index 8ae09a1bdf74..f04d4ef99dca 100644
--- a/drivers/target/target_core_cdb.c
+++ b/drivers/target/target_core_cdb.c
@@ -24,6 +24,7 @@
24 */ 24 */
25 25
26#include <linux/kernel.h> 26#include <linux/kernel.h>
27#include <linux/ctype.h>
27#include <asm/unaligned.h> 28#include <asm/unaligned.h>
28#include <scsi/scsi.h> 29#include <scsi/scsi.h>
29 30
@@ -67,6 +68,7 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
67{ 68{
68 struct se_lun *lun = cmd->se_lun; 69 struct se_lun *lun = cmd->se_lun;
69 struct se_device *dev = cmd->se_dev; 70 struct se_device *dev = cmd->se_dev;
71 struct se_portal_group *tpg = lun->lun_sep->sep_tpg;
70 unsigned char *buf; 72 unsigned char *buf;
71 73
72 /* 74 /*
@@ -81,9 +83,13 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
81 83
82 buf = transport_kmap_first_data_page(cmd); 84 buf = transport_kmap_first_data_page(cmd);
83 85
84 buf[0] = dev->transport->get_device_type(dev); 86 if (dev == tpg->tpg_virt_lun0.lun_se_dev) {
85 if (buf[0] == TYPE_TAPE) 87 buf[0] = 0x3f; /* Not connected */
86 buf[1] = 0x80; 88 } else {
89 buf[0] = dev->transport->get_device_type(dev);
90 if (buf[0] == TYPE_TAPE)
91 buf[1] = 0x80;
92 }
87 buf[2] = dev->transport->get_device_rev(dev); 93 buf[2] = dev->transport->get_device_rev(dev);
88 94
89 /* 95 /*
@@ -149,6 +155,37 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
149 return 0; 155 return 0;
150} 156}
151 157
158static void
159target_parse_naa_6h_vendor_specific(struct se_device *dev, unsigned char *buf_off)
160{
161 unsigned char *p = &dev->se_sub_dev->t10_wwn.unit_serial[0];
162 unsigned char *buf = buf_off;
163 int cnt = 0, next = 1;
164 /*
165 * Generate up to 36 bits of VENDOR SPECIFIC IDENTIFIER starting on
166 * byte 3 bit 3-0 for NAA IEEE Registered Extended DESIGNATOR field
167 * format, followed by 64 bits of VENDOR SPECIFIC IDENTIFIER EXTENSION
168 * to complete the payload. These are based from VPD=0x80 PRODUCT SERIAL
169 * NUMBER set via vpd_unit_serial in target_core_configfs.c to ensure
170 * per device uniqeness.
171 */
172 while (*p != '\0') {
173 if (cnt >= 13)
174 break;
175 if (!isxdigit(*p)) {
176 p++;
177 continue;
178 }
179 if (next != 0) {
180 buf[cnt++] |= hex_to_bin(*p++);
181 next = 0;
182 } else {
183 buf[cnt] = hex_to_bin(*p++) << 4;
184 next = 1;
185 }
186 }
187}
188
152/* 189/*
153 * Device identification VPD, for a complete list of 190 * Device identification VPD, for a complete list of
154 * DESIGNATOR TYPEs see spc4r17 Table 459. 191 * DESIGNATOR TYPEs see spc4r17 Table 459.
@@ -214,8 +251,7 @@ target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
214 * VENDOR_SPECIFIC_IDENTIFIER and 251 * VENDOR_SPECIFIC_IDENTIFIER and
215 * VENDOR_SPECIFIC_IDENTIFIER_EXTENTION 252 * VENDOR_SPECIFIC_IDENTIFIER_EXTENTION
216 */ 253 */
217 buf[off++] |= hex_to_bin(dev->se_sub_dev->t10_wwn.unit_serial[0]); 254 target_parse_naa_6h_vendor_specific(dev, &buf[off]);
218 hex2bin(&buf[off], &dev->se_sub_dev->t10_wwn.unit_serial[1], 12);
219 255
220 len = 20; 256 len = 20;
221 off = (len + 4); 257 off = (len + 4);
@@ -915,8 +951,8 @@ target_emulate_modesense(struct se_cmd *cmd, int ten)
915 length += target_modesense_control(dev, &buf[offset+length]); 951 length += target_modesense_control(dev, &buf[offset+length]);
916 break; 952 break;
917 default: 953 default:
918 pr_err("Got Unknown Mode Page: 0x%02x\n", 954 pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n",
919 cdb[2] & 0x3f); 955 cdb[2] & 0x3f, cdb[3]);
920 return PYX_TRANSPORT_UNKNOWN_MODE_PAGE; 956 return PYX_TRANSPORT_UNKNOWN_MODE_PAGE;
921 } 957 }
922 offset += length; 958 offset += length;
@@ -1072,8 +1108,6 @@ target_emulate_unmap(struct se_task *task)
1072 size -= 16; 1108 size -= 16;
1073 } 1109 }
1074 1110
1075 task->task_scsi_status = GOOD;
1076 transport_complete_task(task, 1);
1077err: 1111err:
1078 transport_kunmap_first_data_page(cmd); 1112 transport_kunmap_first_data_page(cmd);
1079 1113
@@ -1085,24 +1119,17 @@ err:
1085 * Note this is not used for TCM/pSCSI passthrough 1119 * Note this is not used for TCM/pSCSI passthrough
1086 */ 1120 */
1087static int 1121static int
1088target_emulate_write_same(struct se_task *task, int write_same32) 1122target_emulate_write_same(struct se_task *task, u32 num_blocks)
1089{ 1123{
1090 struct se_cmd *cmd = task->task_se_cmd; 1124 struct se_cmd *cmd = task->task_se_cmd;
1091 struct se_device *dev = cmd->se_dev; 1125 struct se_device *dev = cmd->se_dev;
1092 sector_t range; 1126 sector_t range;
1093 sector_t lba = cmd->t_task_lba; 1127 sector_t lba = cmd->t_task_lba;
1094 unsigned int num_blocks;
1095 int ret; 1128 int ret;
1096 /* 1129 /*
1097 * Extract num_blocks from the WRITE_SAME_* CDB. Then use the explict 1130 * Use the explicit range when non zero is supplied, otherwise calculate
1098 * range when non zero is supplied, otherwise calculate the remaining 1131 * the remaining range based on ->get_blocks() - starting LBA.
1099 * range based on ->get_blocks() - starting LBA.
1100 */ 1132 */
1101 if (write_same32)
1102 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]);
1103 else
1104 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]);
1105
1106 if (num_blocks != 0) 1133 if (num_blocks != 0)
1107 range = num_blocks; 1134 range = num_blocks;
1108 else 1135 else
@@ -1117,8 +1144,6 @@ target_emulate_write_same(struct se_task *task, int write_same32)
1117 return ret; 1144 return ret;
1118 } 1145 }
1119 1146
1120 task->task_scsi_status = GOOD;
1121 transport_complete_task(task, 1);
1122 return 0; 1147 return 0;
1123} 1148}
1124 1149
@@ -1165,13 +1190,23 @@ transport_emulate_control_cdb(struct se_task *task)
1165 } 1190 }
1166 ret = target_emulate_unmap(task); 1191 ret = target_emulate_unmap(task);
1167 break; 1192 break;
1193 case WRITE_SAME:
1194 if (!dev->transport->do_discard) {
1195 pr_err("WRITE_SAME emulation not supported"
1196 " for: %s\n", dev->transport->name);
1197 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1198 }
1199 ret = target_emulate_write_same(task,
1200 get_unaligned_be16(&cmd->t_task_cdb[7]));
1201 break;
1168 case WRITE_SAME_16: 1202 case WRITE_SAME_16:
1169 if (!dev->transport->do_discard) { 1203 if (!dev->transport->do_discard) {
1170 pr_err("WRITE_SAME_16 emulation not supported" 1204 pr_err("WRITE_SAME_16 emulation not supported"
1171 " for: %s\n", dev->transport->name); 1205 " for: %s\n", dev->transport->name);
1172 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 1206 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1173 } 1207 }
1174 ret = target_emulate_write_same(task, 0); 1208 ret = target_emulate_write_same(task,
1209 get_unaligned_be32(&cmd->t_task_cdb[10]));
1175 break; 1210 break;
1176 case VARIABLE_LENGTH_CMD: 1211 case VARIABLE_LENGTH_CMD:
1177 service_action = 1212 service_action =
@@ -1184,7 +1219,8 @@ transport_emulate_control_cdb(struct se_task *task)
1184 dev->transport->name); 1219 dev->transport->name);
1185 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 1220 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1186 } 1221 }
1187 ret = target_emulate_write_same(task, 1); 1222 ret = target_emulate_write_same(task,
1223 get_unaligned_be32(&cmd->t_task_cdb[28]));
1188 break; 1224 break;
1189 default: 1225 default:
1190 pr_err("Unsupported VARIABLE_LENGTH_CMD SA:" 1226 pr_err("Unsupported VARIABLE_LENGTH_CMD SA:"
@@ -1219,8 +1255,14 @@ transport_emulate_control_cdb(struct se_task *task)
1219 1255
1220 if (ret < 0) 1256 if (ret < 0)
1221 return ret; 1257 return ret;
1222 task->task_scsi_status = GOOD; 1258 /*
1223 transport_complete_task(task, 1); 1259 * Handle the successful completion here unless a caller
1260 * has explictly requested an asychronous completion.
1261 */
1262 if (!(cmd->se_cmd_flags & SCF_EMULATE_CDB_ASYNC)) {
1263 task->task_scsi_status = GOOD;
1264 transport_complete_task(task, 1);
1265 }
1224 1266
1225 return PYX_TRANSPORT_SENT_TO_TRANSPORT; 1267 return PYX_TRANSPORT_SENT_TO_TRANSPORT;
1226} 1268}
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index b38b6c993e65..ca6e4a4df134 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -472,9 +472,9 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
472 struct se_dev_entry *deve; 472 struct se_dev_entry *deve;
473 u32 i; 473 u32 i;
474 474
475 spin_lock_bh(&tpg->acl_node_lock); 475 spin_lock_irq(&tpg->acl_node_lock);
476 list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) { 476 list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
477 spin_unlock_bh(&tpg->acl_node_lock); 477 spin_unlock_irq(&tpg->acl_node_lock);
478 478
479 spin_lock_irq(&nacl->device_list_lock); 479 spin_lock_irq(&nacl->device_list_lock);
480 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { 480 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
@@ -491,9 +491,9 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
491 } 491 }
492 spin_unlock_irq(&nacl->device_list_lock); 492 spin_unlock_irq(&nacl->device_list_lock);
493 493
494 spin_lock_bh(&tpg->acl_node_lock); 494 spin_lock_irq(&tpg->acl_node_lock);
495 } 495 }
496 spin_unlock_bh(&tpg->acl_node_lock); 496 spin_unlock_irq(&tpg->acl_node_lock);
497} 497}
498 498
499static struct se_port *core_alloc_port(struct se_device *dev) 499static struct se_port *core_alloc_port(struct se_device *dev)
@@ -839,6 +839,24 @@ int se_dev_check_shutdown(struct se_device *dev)
839 return ret; 839 return ret;
840} 840}
841 841
842u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
843{
844 u32 tmp, aligned_max_sectors;
845 /*
846 * Limit max_sectors to a PAGE_SIZE aligned value for modern
847 * transport_allocate_data_tasks() operation.
848 */
849 tmp = rounddown((max_sectors * block_size), PAGE_SIZE);
850 aligned_max_sectors = (tmp / block_size);
851 if (max_sectors != aligned_max_sectors) {
852 printk(KERN_INFO "Rounding down aligned max_sectors from %u"
853 " to %u\n", max_sectors, aligned_max_sectors);
854 return aligned_max_sectors;
855 }
856
857 return max_sectors;
858}
859
842void se_dev_set_default_attribs( 860void se_dev_set_default_attribs(
843 struct se_device *dev, 861 struct se_device *dev,
844 struct se_dev_limits *dev_limits) 862 struct se_dev_limits *dev_limits)
@@ -878,6 +896,11 @@ void se_dev_set_default_attribs(
878 * max_sectors is based on subsystem plugin dependent requirements. 896 * max_sectors is based on subsystem plugin dependent requirements.
879 */ 897 */
880 dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors; 898 dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors;
899 /*
900 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
901 */
902 limits->max_sectors = se_dev_align_max_sectors(limits->max_sectors,
903 limits->logical_block_size);
881 dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors; 904 dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors;
882 /* 905 /*
883 * Set optimal_sectors from max_sectors, which can be lowered via 906 * Set optimal_sectors from max_sectors, which can be lowered via
@@ -1242,6 +1265,11 @@ int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors)
1242 return -EINVAL; 1265 return -EINVAL;
1243 } 1266 }
1244 } 1267 }
1268 /*
1269 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
1270 */
1271 max_sectors = se_dev_align_max_sectors(max_sectors,
1272 dev->se_sub_dev->se_dev_attrib.block_size);
1245 1273
1246 dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors; 1274 dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors;
1247 pr_debug("dev[%p]: SE Device max_sectors changed to %u\n", 1275 pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
@@ -1344,15 +1372,17 @@ struct se_lun *core_dev_add_lun(
1344 */ 1372 */
1345 if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) { 1373 if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
1346 struct se_node_acl *acl; 1374 struct se_node_acl *acl;
1347 spin_lock_bh(&tpg->acl_node_lock); 1375 spin_lock_irq(&tpg->acl_node_lock);
1348 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { 1376 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
1349 if (acl->dynamic_node_acl) { 1377 if (acl->dynamic_node_acl &&
1350 spin_unlock_bh(&tpg->acl_node_lock); 1378 (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
1379 !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
1380 spin_unlock_irq(&tpg->acl_node_lock);
1351 core_tpg_add_node_to_devs(acl, tpg); 1381 core_tpg_add_node_to_devs(acl, tpg);
1352 spin_lock_bh(&tpg->acl_node_lock); 1382 spin_lock_irq(&tpg->acl_node_lock);
1353 } 1383 }
1354 } 1384 }
1355 spin_unlock_bh(&tpg->acl_node_lock); 1385 spin_unlock_irq(&tpg->acl_node_lock);
1356 } 1386 }
1357 1387
1358 return lun_p; 1388 return lun_p;
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index f1654694f4ea..55bbe0847a6d 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -481,7 +481,7 @@ static struct config_group *target_fabric_make_nodeacl(
481 481
482 se_nacl = tf->tf_ops.fabric_make_nodeacl(se_tpg, group, name); 482 se_nacl = tf->tf_ops.fabric_make_nodeacl(se_tpg, group, name);
483 if (IS_ERR(se_nacl)) 483 if (IS_ERR(se_nacl))
484 return ERR_PTR(PTR_ERR(se_nacl)); 484 return ERR_CAST(se_nacl);
485 485
486 nacl_cg = &se_nacl->acl_group; 486 nacl_cg = &se_nacl->acl_group;
487 nacl_cg->default_groups = se_nacl->acl_default_groups; 487 nacl_cg->default_groups = se_nacl->acl_default_groups;
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 1c1b849cd4fb..7fd3a161f7cc 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -1598,14 +1598,14 @@ static int core_scsi3_decode_spec_i_port(
1598 * from the decoded fabric module specific TransportID 1598 * from the decoded fabric module specific TransportID
1599 * at *i_str. 1599 * at *i_str.
1600 */ 1600 */
1601 spin_lock_bh(&tmp_tpg->acl_node_lock); 1601 spin_lock_irq(&tmp_tpg->acl_node_lock);
1602 dest_node_acl = __core_tpg_get_initiator_node_acl( 1602 dest_node_acl = __core_tpg_get_initiator_node_acl(
1603 tmp_tpg, i_str); 1603 tmp_tpg, i_str);
1604 if (dest_node_acl) { 1604 if (dest_node_acl) {
1605 atomic_inc(&dest_node_acl->acl_pr_ref_count); 1605 atomic_inc(&dest_node_acl->acl_pr_ref_count);
1606 smp_mb__after_atomic_inc(); 1606 smp_mb__after_atomic_inc();
1607 } 1607 }
1608 spin_unlock_bh(&tmp_tpg->acl_node_lock); 1608 spin_unlock_irq(&tmp_tpg->acl_node_lock);
1609 1609
1610 if (!dest_node_acl) { 1610 if (!dest_node_acl) {
1611 core_scsi3_tpg_undepend_item(tmp_tpg); 1611 core_scsi3_tpg_undepend_item(tmp_tpg);
@@ -3496,14 +3496,14 @@ after_iport_check:
3496 /* 3496 /*
3497 * Locate the destination struct se_node_acl from the received Transport ID 3497 * Locate the destination struct se_node_acl from the received Transport ID
3498 */ 3498 */
3499 spin_lock_bh(&dest_se_tpg->acl_node_lock); 3499 spin_lock_irq(&dest_se_tpg->acl_node_lock);
3500 dest_node_acl = __core_tpg_get_initiator_node_acl(dest_se_tpg, 3500 dest_node_acl = __core_tpg_get_initiator_node_acl(dest_se_tpg,
3501 initiator_str); 3501 initiator_str);
3502 if (dest_node_acl) { 3502 if (dest_node_acl) {
3503 atomic_inc(&dest_node_acl->acl_pr_ref_count); 3503 atomic_inc(&dest_node_acl->acl_pr_ref_count);
3504 smp_mb__after_atomic_inc(); 3504 smp_mb__after_atomic_inc();
3505 } 3505 }
3506 spin_unlock_bh(&dest_se_tpg->acl_node_lock); 3506 spin_unlock_irq(&dest_se_tpg->acl_node_lock);
3507 3507
3508 if (!dest_node_acl) { 3508 if (!dest_node_acl) {
3509 pr_err("Unable to locate %s dest_node_acl for" 3509 pr_err("Unable to locate %s dest_node_acl for"
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 3dd81d24d9a9..e567e129c697 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -390,12 +390,10 @@ static int rd_MEMCPY_read(struct rd_request *req)
390 length = req->rd_size; 390 length = req->rd_size;
391 391
392 dst = sg_virt(&sg_d[i++]) + dst_offset; 392 dst = sg_virt(&sg_d[i++]) + dst_offset;
393 if (!dst) 393 BUG_ON(!dst);
394 BUG();
395 394
396 src = sg_virt(&sg_s[j]) + src_offset; 395 src = sg_virt(&sg_s[j]) + src_offset;
397 if (!src) 396 BUG_ON(!src);
398 BUG();
399 397
400 dst_offset = 0; 398 dst_offset = 0;
401 src_offset = length; 399 src_offset = length;
@@ -415,8 +413,7 @@ static int rd_MEMCPY_read(struct rd_request *req)
415 length = req->rd_size; 413 length = req->rd_size;
416 414
417 dst = sg_virt(&sg_d[i]) + dst_offset; 415 dst = sg_virt(&sg_d[i]) + dst_offset;
418 if (!dst) 416 BUG_ON(!dst);
419 BUG();
420 417
421 if (sg_d[i].length == length) { 418 if (sg_d[i].length == length) {
422 i++; 419 i++;
@@ -425,8 +422,7 @@ static int rd_MEMCPY_read(struct rd_request *req)
425 dst_offset = length; 422 dst_offset = length;
426 423
427 src = sg_virt(&sg_s[j++]) + src_offset; 424 src = sg_virt(&sg_s[j++]) + src_offset;
428 if (!src) 425 BUG_ON(!src);
429 BUG();
430 426
431 src_offset = 0; 427 src_offset = 0;
432 page_end = 1; 428 page_end = 1;
@@ -510,12 +506,10 @@ static int rd_MEMCPY_write(struct rd_request *req)
510 length = req->rd_size; 506 length = req->rd_size;
511 507
512 src = sg_virt(&sg_s[i++]) + src_offset; 508 src = sg_virt(&sg_s[i++]) + src_offset;
513 if (!src) 509 BUG_ON(!src);
514 BUG();
515 510
516 dst = sg_virt(&sg_d[j]) + dst_offset; 511 dst = sg_virt(&sg_d[j]) + dst_offset;
517 if (!dst) 512 BUG_ON(!dst);
518 BUG();
519 513
520 src_offset = 0; 514 src_offset = 0;
521 dst_offset = length; 515 dst_offset = length;
@@ -535,8 +529,7 @@ static int rd_MEMCPY_write(struct rd_request *req)
535 length = req->rd_size; 529 length = req->rd_size;
536 530
537 src = sg_virt(&sg_s[i]) + src_offset; 531 src = sg_virt(&sg_s[i]) + src_offset;
538 if (!src) 532 BUG_ON(!src);
539 BUG();
540 533
541 if (sg_s[i].length == length) { 534 if (sg_s[i].length == length) {
542 i++; 535 i++;
@@ -545,8 +538,7 @@ static int rd_MEMCPY_write(struct rd_request *req)
545 src_offset = length; 538 src_offset = length;
546 539
547 dst = sg_virt(&sg_d[j++]) + dst_offset; 540 dst = sg_virt(&sg_d[j++]) + dst_offset;
548 if (!dst) 541 BUG_ON(!dst);
549 BUG();
550 542
551 dst_offset = 0; 543 dst_offset = 0;
552 page_end = 1; 544 page_end = 1;
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 4f1ba4c5ef11..162b736c7342 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -137,15 +137,15 @@ struct se_node_acl *core_tpg_get_initiator_node_acl(
137{ 137{
138 struct se_node_acl *acl; 138 struct se_node_acl *acl;
139 139
140 spin_lock_bh(&tpg->acl_node_lock); 140 spin_lock_irq(&tpg->acl_node_lock);
141 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { 141 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
142 if (!strcmp(acl->initiatorname, initiatorname) && 142 if (!strcmp(acl->initiatorname, initiatorname) &&
143 !acl->dynamic_node_acl) { 143 !acl->dynamic_node_acl) {
144 spin_unlock_bh(&tpg->acl_node_lock); 144 spin_unlock_irq(&tpg->acl_node_lock);
145 return acl; 145 return acl;
146 } 146 }
147 } 147 }
148 spin_unlock_bh(&tpg->acl_node_lock); 148 spin_unlock_irq(&tpg->acl_node_lock);
149 149
150 return NULL; 150 return NULL;
151} 151}
@@ -298,13 +298,21 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
298 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl); 298 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
299 return NULL; 299 return NULL;
300 } 300 }
301 /*
302 * Here we only create demo-mode MappedLUNs from the active
303 * TPG LUNs if the fabric is not explictly asking for
304 * tpg_check_demo_mode_login_only() == 1.
305 */
306 if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only != NULL) &&
307 (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) == 1))
308 do { ; } while (0);
309 else
310 core_tpg_add_node_to_devs(acl, tpg);
301 311
302 core_tpg_add_node_to_devs(acl, tpg); 312 spin_lock_irq(&tpg->acl_node_lock);
303
304 spin_lock_bh(&tpg->acl_node_lock);
305 list_add_tail(&acl->acl_list, &tpg->acl_node_list); 313 list_add_tail(&acl->acl_list, &tpg->acl_node_list);
306 tpg->num_node_acls++; 314 tpg->num_node_acls++;
307 spin_unlock_bh(&tpg->acl_node_lock); 315 spin_unlock_irq(&tpg->acl_node_lock);
308 316
309 pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s" 317 pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
310 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(), 318 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
@@ -354,7 +362,7 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
354{ 362{
355 struct se_node_acl *acl = NULL; 363 struct se_node_acl *acl = NULL;
356 364
357 spin_lock_bh(&tpg->acl_node_lock); 365 spin_lock_irq(&tpg->acl_node_lock);
358 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); 366 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
359 if (acl) { 367 if (acl) {
360 if (acl->dynamic_node_acl) { 368 if (acl->dynamic_node_acl) {
@@ -362,7 +370,7 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
362 pr_debug("%s_TPG[%u] - Replacing dynamic ACL" 370 pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
363 " for %s\n", tpg->se_tpg_tfo->get_fabric_name(), 371 " for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
364 tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname); 372 tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
365 spin_unlock_bh(&tpg->acl_node_lock); 373 spin_unlock_irq(&tpg->acl_node_lock);
366 /* 374 /*
367 * Release the locally allocated struct se_node_acl 375 * Release the locally allocated struct se_node_acl
368 * because * core_tpg_add_initiator_node_acl() returned 376 * because * core_tpg_add_initiator_node_acl() returned
@@ -378,10 +386,10 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
378 " Node %s already exists for TPG %u, ignoring" 386 " Node %s already exists for TPG %u, ignoring"
379 " request.\n", tpg->se_tpg_tfo->get_fabric_name(), 387 " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
380 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); 388 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
381 spin_unlock_bh(&tpg->acl_node_lock); 389 spin_unlock_irq(&tpg->acl_node_lock);
382 return ERR_PTR(-EEXIST); 390 return ERR_PTR(-EEXIST);
383 } 391 }
384 spin_unlock_bh(&tpg->acl_node_lock); 392 spin_unlock_irq(&tpg->acl_node_lock);
385 393
386 if (!se_nacl) { 394 if (!se_nacl) {
387 pr_err("struct se_node_acl pointer is NULL\n"); 395 pr_err("struct se_node_acl pointer is NULL\n");
@@ -418,10 +426,10 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
418 return ERR_PTR(-EINVAL); 426 return ERR_PTR(-EINVAL);
419 } 427 }
420 428
421 spin_lock_bh(&tpg->acl_node_lock); 429 spin_lock_irq(&tpg->acl_node_lock);
422 list_add_tail(&acl->acl_list, &tpg->acl_node_list); 430 list_add_tail(&acl->acl_list, &tpg->acl_node_list);
423 tpg->num_node_acls++; 431 tpg->num_node_acls++;
424 spin_unlock_bh(&tpg->acl_node_lock); 432 spin_unlock_irq(&tpg->acl_node_lock);
425 433
426done: 434done:
427 pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s" 435 pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
@@ -445,14 +453,14 @@ int core_tpg_del_initiator_node_acl(
445 struct se_session *sess, *sess_tmp; 453 struct se_session *sess, *sess_tmp;
446 int dynamic_acl = 0; 454 int dynamic_acl = 0;
447 455
448 spin_lock_bh(&tpg->acl_node_lock); 456 spin_lock_irq(&tpg->acl_node_lock);
449 if (acl->dynamic_node_acl) { 457 if (acl->dynamic_node_acl) {
450 acl->dynamic_node_acl = 0; 458 acl->dynamic_node_acl = 0;
451 dynamic_acl = 1; 459 dynamic_acl = 1;
452 } 460 }
453 list_del(&acl->acl_list); 461 list_del(&acl->acl_list);
454 tpg->num_node_acls--; 462 tpg->num_node_acls--;
455 spin_unlock_bh(&tpg->acl_node_lock); 463 spin_unlock_irq(&tpg->acl_node_lock);
456 464
457 spin_lock_bh(&tpg->session_lock); 465 spin_lock_bh(&tpg->session_lock);
458 list_for_each_entry_safe(sess, sess_tmp, 466 list_for_each_entry_safe(sess, sess_tmp,
@@ -503,21 +511,21 @@ int core_tpg_set_initiator_node_queue_depth(
503 struct se_node_acl *acl; 511 struct se_node_acl *acl;
504 int dynamic_acl = 0; 512 int dynamic_acl = 0;
505 513
506 spin_lock_bh(&tpg->acl_node_lock); 514 spin_lock_irq(&tpg->acl_node_lock);
507 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); 515 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
508 if (!acl) { 516 if (!acl) {
509 pr_err("Access Control List entry for %s Initiator" 517 pr_err("Access Control List entry for %s Initiator"
510 " Node %s does not exists for TPG %hu, ignoring" 518 " Node %s does not exists for TPG %hu, ignoring"
511 " request.\n", tpg->se_tpg_tfo->get_fabric_name(), 519 " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
512 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); 520 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
513 spin_unlock_bh(&tpg->acl_node_lock); 521 spin_unlock_irq(&tpg->acl_node_lock);
514 return -ENODEV; 522 return -ENODEV;
515 } 523 }
516 if (acl->dynamic_node_acl) { 524 if (acl->dynamic_node_acl) {
517 acl->dynamic_node_acl = 0; 525 acl->dynamic_node_acl = 0;
518 dynamic_acl = 1; 526 dynamic_acl = 1;
519 } 527 }
520 spin_unlock_bh(&tpg->acl_node_lock); 528 spin_unlock_irq(&tpg->acl_node_lock);
521 529
522 spin_lock_bh(&tpg->session_lock); 530 spin_lock_bh(&tpg->session_lock);
523 list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) { 531 list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) {
@@ -533,10 +541,10 @@ int core_tpg_set_initiator_node_queue_depth(
533 tpg->se_tpg_tfo->get_fabric_name(), initiatorname); 541 tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
534 spin_unlock_bh(&tpg->session_lock); 542 spin_unlock_bh(&tpg->session_lock);
535 543
536 spin_lock_bh(&tpg->acl_node_lock); 544 spin_lock_irq(&tpg->acl_node_lock);
537 if (dynamic_acl) 545 if (dynamic_acl)
538 acl->dynamic_node_acl = 1; 546 acl->dynamic_node_acl = 1;
539 spin_unlock_bh(&tpg->acl_node_lock); 547 spin_unlock_irq(&tpg->acl_node_lock);
540 return -EEXIST; 548 return -EEXIST;
541 } 549 }
542 /* 550 /*
@@ -571,10 +579,10 @@ int core_tpg_set_initiator_node_queue_depth(
571 if (init_sess) 579 if (init_sess)
572 tpg->se_tpg_tfo->close_session(init_sess); 580 tpg->se_tpg_tfo->close_session(init_sess);
573 581
574 spin_lock_bh(&tpg->acl_node_lock); 582 spin_lock_irq(&tpg->acl_node_lock);
575 if (dynamic_acl) 583 if (dynamic_acl)
576 acl->dynamic_node_acl = 1; 584 acl->dynamic_node_acl = 1;
577 spin_unlock_bh(&tpg->acl_node_lock); 585 spin_unlock_irq(&tpg->acl_node_lock);
578 return -EINVAL; 586 return -EINVAL;
579 } 587 }
580 spin_unlock_bh(&tpg->session_lock); 588 spin_unlock_bh(&tpg->session_lock);
@@ -590,10 +598,10 @@ int core_tpg_set_initiator_node_queue_depth(
590 initiatorname, tpg->se_tpg_tfo->get_fabric_name(), 598 initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
591 tpg->se_tpg_tfo->tpg_get_tag(tpg)); 599 tpg->se_tpg_tfo->tpg_get_tag(tpg));
592 600
593 spin_lock_bh(&tpg->acl_node_lock); 601 spin_lock_irq(&tpg->acl_node_lock);
594 if (dynamic_acl) 602 if (dynamic_acl)
595 acl->dynamic_node_acl = 1; 603 acl->dynamic_node_acl = 1;
596 spin_unlock_bh(&tpg->acl_node_lock); 604 spin_unlock_irq(&tpg->acl_node_lock);
597 605
598 return 0; 606 return 0;
599} 607}
@@ -717,20 +725,20 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
717 * not been released because of TFO->tpg_check_demo_mode_cache() == 1 725 * not been released because of TFO->tpg_check_demo_mode_cache() == 1
718 * in transport_deregister_session(). 726 * in transport_deregister_session().
719 */ 727 */
720 spin_lock_bh(&se_tpg->acl_node_lock); 728 spin_lock_irq(&se_tpg->acl_node_lock);
721 list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list, 729 list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list,
722 acl_list) { 730 acl_list) {
723 list_del(&nacl->acl_list); 731 list_del(&nacl->acl_list);
724 se_tpg->num_node_acls--; 732 se_tpg->num_node_acls--;
725 spin_unlock_bh(&se_tpg->acl_node_lock); 733 spin_unlock_irq(&se_tpg->acl_node_lock);
726 734
727 core_tpg_wait_for_nacl_pr_ref(nacl); 735 core_tpg_wait_for_nacl_pr_ref(nacl);
728 core_free_device_list_for_node(nacl, se_tpg); 736 core_free_device_list_for_node(nacl, se_tpg);
729 se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, nacl); 737 se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, nacl);
730 738
731 spin_lock_bh(&se_tpg->acl_node_lock); 739 spin_lock_irq(&se_tpg->acl_node_lock);
732 } 740 }
733 spin_unlock_bh(&se_tpg->acl_node_lock); 741 spin_unlock_irq(&se_tpg->acl_node_lock);
734 742
735 if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) 743 if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL)
736 core_tpg_release_virtual_lun0(se_tpg); 744 core_tpg_release_virtual_lun0(se_tpg);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 89760329d5d0..a4b0a8d27f25 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -389,17 +389,18 @@ void transport_deregister_session(struct se_session *se_sess)
389{ 389{
390 struct se_portal_group *se_tpg = se_sess->se_tpg; 390 struct se_portal_group *se_tpg = se_sess->se_tpg;
391 struct se_node_acl *se_nacl; 391 struct se_node_acl *se_nacl;
392 unsigned long flags;
392 393
393 if (!se_tpg) { 394 if (!se_tpg) {
394 transport_free_session(se_sess); 395 transport_free_session(se_sess);
395 return; 396 return;
396 } 397 }
397 398
398 spin_lock_bh(&se_tpg->session_lock); 399 spin_lock_irqsave(&se_tpg->session_lock, flags);
399 list_del(&se_sess->sess_list); 400 list_del(&se_sess->sess_list);
400 se_sess->se_tpg = NULL; 401 se_sess->se_tpg = NULL;
401 se_sess->fabric_sess_ptr = NULL; 402 se_sess->fabric_sess_ptr = NULL;
402 spin_unlock_bh(&se_tpg->session_lock); 403 spin_unlock_irqrestore(&se_tpg->session_lock, flags);
403 404
404 /* 405 /*
405 * Determine if we need to do extra work for this initiator node's 406 * Determine if we need to do extra work for this initiator node's
@@ -407,22 +408,22 @@ void transport_deregister_session(struct se_session *se_sess)
407 */ 408 */
408 se_nacl = se_sess->se_node_acl; 409 se_nacl = se_sess->se_node_acl;
409 if (se_nacl) { 410 if (se_nacl) {
410 spin_lock_bh(&se_tpg->acl_node_lock); 411 spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
411 if (se_nacl->dynamic_node_acl) { 412 if (se_nacl->dynamic_node_acl) {
412 if (!se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache( 413 if (!se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache(
413 se_tpg)) { 414 se_tpg)) {
414 list_del(&se_nacl->acl_list); 415 list_del(&se_nacl->acl_list);
415 se_tpg->num_node_acls--; 416 se_tpg->num_node_acls--;
416 spin_unlock_bh(&se_tpg->acl_node_lock); 417 spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
417 418
418 core_tpg_wait_for_nacl_pr_ref(se_nacl); 419 core_tpg_wait_for_nacl_pr_ref(se_nacl);
419 core_free_device_list_for_node(se_nacl, se_tpg); 420 core_free_device_list_for_node(se_nacl, se_tpg);
420 se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, 421 se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg,
421 se_nacl); 422 se_nacl);
422 spin_lock_bh(&se_tpg->acl_node_lock); 423 spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
423 } 424 }
424 } 425 }
425 spin_unlock_bh(&se_tpg->acl_node_lock); 426 spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
426 } 427 }
427 428
428 transport_free_session(se_sess); 429 transport_free_session(se_sess);
@@ -976,15 +977,17 @@ static void target_qf_do_work(struct work_struct *work)
976{ 977{
977 struct se_device *dev = container_of(work, struct se_device, 978 struct se_device *dev = container_of(work, struct se_device,
978 qf_work_queue); 979 qf_work_queue);
980 LIST_HEAD(qf_cmd_list);
979 struct se_cmd *cmd, *cmd_tmp; 981 struct se_cmd *cmd, *cmd_tmp;
980 982
981 spin_lock_irq(&dev->qf_cmd_lock); 983 spin_lock_irq(&dev->qf_cmd_lock);
982 list_for_each_entry_safe(cmd, cmd_tmp, &dev->qf_cmd_list, se_qf_node) { 984 list_splice_init(&dev->qf_cmd_list, &qf_cmd_list);
985 spin_unlock_irq(&dev->qf_cmd_lock);
983 986
987 list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
984 list_del(&cmd->se_qf_node); 988 list_del(&cmd->se_qf_node);
985 atomic_dec(&dev->dev_qf_count); 989 atomic_dec(&dev->dev_qf_count);
986 smp_mb__after_atomic_dec(); 990 smp_mb__after_atomic_dec();
987 spin_unlock_irq(&dev->qf_cmd_lock);
988 991
989 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" 992 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
990 " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, 993 " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
@@ -996,10 +999,7 @@ static void target_qf_do_work(struct work_struct *work)
996 * has been added to head of queue 999 * has been added to head of queue
997 */ 1000 */
998 transport_add_cmd_to_queue(cmd, cmd->t_state); 1001 transport_add_cmd_to_queue(cmd, cmd->t_state);
999
1000 spin_lock_irq(&dev->qf_cmd_lock);
1001 } 1002 }
1002 spin_unlock_irq(&dev->qf_cmd_lock);
1003} 1003}
1004 1004
1005unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd) 1005unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
@@ -2053,8 +2053,14 @@ static void transport_generic_request_failure(
2053 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; 2053 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
2054 break; 2054 break;
2055 } 2055 }
2056 2056 /*
2057 if (!sc) 2057 * If a fabric does not define a cmd->se_tfo->new_cmd_map caller,
2058 * make the call to transport_send_check_condition_and_sense()
2059 * directly. Otherwise expect the fabric to make the call to
2060 * transport_send_check_condition_and_sense() after handling
2061 * possible unsoliticied write data payloads.
2062 */
2063 if (!sc && !cmd->se_tfo->new_cmd_map)
2058 transport_new_cmd_failure(cmd); 2064 transport_new_cmd_failure(cmd);
2059 else { 2065 else {
2060 ret = transport_send_check_condition_and_sense(cmd, 2066 ret = transport_send_check_condition_and_sense(cmd,
@@ -2847,12 +2853,42 @@ static int transport_cmd_get_valid_sectors(struct se_cmd *cmd)
2847 " transport_dev_end_lba(): %llu\n", 2853 " transport_dev_end_lba(): %llu\n",
2848 cmd->t_task_lba, sectors, 2854 cmd->t_task_lba, sectors,
2849 transport_dev_end_lba(dev)); 2855 transport_dev_end_lba(dev));
2850 pr_err(" We should return CHECK_CONDITION" 2856 return -EINVAL;
2851 " but we don't yet\n");
2852 return 0;
2853 } 2857 }
2854 2858
2855 return sectors; 2859 return 0;
2860}
2861
2862static int target_check_write_same_discard(unsigned char *flags, struct se_device *dev)
2863{
2864 /*
2865 * Determine if the received WRITE_SAME is used to for direct
2866 * passthrough into Linux/SCSI with struct request via TCM/pSCSI
2867 * or we are signaling the use of internal WRITE_SAME + UNMAP=1
2868 * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK code.
2869 */
2870 int passthrough = (dev->transport->transport_type ==
2871 TRANSPORT_PLUGIN_PHBA_PDEV);
2872
2873 if (!passthrough) {
2874 if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
2875 pr_err("WRITE_SAME PBDATA and LBDATA"
2876 " bits not supported for Block Discard"
2877 " Emulation\n");
2878 return -ENOSYS;
2879 }
2880 /*
2881 * Currently for the emulated case we only accept
2882 * tpws with the UNMAP=1 bit set.
2883 */
2884 if (!(flags[0] & 0x08)) {
2885 pr_err("WRITE_SAME w/o UNMAP bit not"
2886 " supported for Block Discard Emulation\n");
2887 return -ENOSYS;
2888 }
2889 }
2890
2891 return 0;
2856} 2892}
2857 2893
2858/* transport_generic_cmd_sequencer(): 2894/* transport_generic_cmd_sequencer():
@@ -3065,7 +3101,7 @@ static int transport_generic_cmd_sequencer(
3065 goto out_unsupported_cdb; 3101 goto out_unsupported_cdb;
3066 3102
3067 if (sectors) 3103 if (sectors)
3068 size = transport_get_size(sectors, cdb, cmd); 3104 size = transport_get_size(1, cdb, cmd);
3069 else { 3105 else {
3070 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not" 3106 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
3071 " supported\n"); 3107 " supported\n");
@@ -3075,27 +3111,9 @@ static int transport_generic_cmd_sequencer(
3075 cmd->t_task_lba = get_unaligned_be64(&cdb[12]); 3111 cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
3076 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 3112 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3077 3113
3078 /* 3114 if (target_check_write_same_discard(&cdb[10], dev) < 0)
3079 * Skip the remaining assignments for TCM/PSCSI passthrough
3080 */
3081 if (passthrough)
3082 break;
3083
3084 if ((cdb[10] & 0x04) || (cdb[10] & 0x02)) {
3085 pr_err("WRITE_SAME PBDATA and LBDATA"
3086 " bits not supported for Block Discard"
3087 " Emulation\n");
3088 goto out_invalid_cdb_field; 3115 goto out_invalid_cdb_field;
3089 } 3116
3090 /*
3091 * Currently for the emulated case we only accept
3092 * tpws with the UNMAP=1 bit set.
3093 */
3094 if (!(cdb[10] & 0x08)) {
3095 pr_err("WRITE_SAME w/o UNMAP bit not"
3096 " supported for Block Discard Emulation\n");
3097 goto out_invalid_cdb_field;
3098 }
3099 break; 3117 break;
3100 default: 3118 default:
3101 pr_err("VARIABLE_LENGTH_CMD service action" 3119 pr_err("VARIABLE_LENGTH_CMD service action"
@@ -3330,10 +3348,12 @@ static int transport_generic_cmd_sequencer(
3330 cmd->se_cmd_flags |= SCF_EMULATE_CDB_ASYNC; 3348 cmd->se_cmd_flags |= SCF_EMULATE_CDB_ASYNC;
3331 /* 3349 /*
3332 * Check to ensure that LBA + Range does not exceed past end of 3350 * Check to ensure that LBA + Range does not exceed past end of
3333 * device. 3351 * device for IBLOCK and FILEIO ->do_sync_cache() backend calls
3334 */ 3352 */
3335 if (!transport_cmd_get_valid_sectors(cmd)) 3353 if ((cmd->t_task_lba != 0) || (sectors != 0)) {
3336 goto out_invalid_cdb_field; 3354 if (transport_cmd_get_valid_sectors(cmd) < 0)
3355 goto out_invalid_cdb_field;
3356 }
3337 break; 3357 break;
3338 case UNMAP: 3358 case UNMAP:
3339 size = get_unaligned_be16(&cdb[7]); 3359 size = get_unaligned_be16(&cdb[7]);
@@ -3345,40 +3365,38 @@ static int transport_generic_cmd_sequencer(
3345 goto out_unsupported_cdb; 3365 goto out_unsupported_cdb;
3346 3366
3347 if (sectors) 3367 if (sectors)
3348 size = transport_get_size(sectors, cdb, cmd); 3368 size = transport_get_size(1, cdb, cmd);
3349 else { 3369 else {
3350 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); 3370 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
3351 goto out_invalid_cdb_field; 3371 goto out_invalid_cdb_field;
3352 } 3372 }
3353 3373
3354 cmd->t_task_lba = get_unaligned_be64(&cdb[2]); 3374 cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
3355 passthrough = (dev->transport->transport_type == 3375 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3356 TRANSPORT_PLUGIN_PHBA_PDEV); 3376
3357 /* 3377 if (target_check_write_same_discard(&cdb[1], dev) < 0)
3358 * Determine if the received WRITE_SAME_16 is used to for direct 3378 goto out_invalid_cdb_field;
3359 * passthrough into Linux/SCSI with struct request via TCM/pSCSI 3379 break;
3360 * or we are signaling the use of internal WRITE_SAME + UNMAP=1 3380 case WRITE_SAME:
3361 * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK and 3381 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
3362 * TCM/FILEIO subsystem plugin backstores. 3382 if (sector_ret)
3363 */ 3383 goto out_unsupported_cdb;
3364 if (!passthrough) { 3384
3365 if ((cdb[1] & 0x04) || (cdb[1] & 0x02)) { 3385 if (sectors)
3366 pr_err("WRITE_SAME PBDATA and LBDATA" 3386 size = transport_get_size(1, cdb, cmd);
3367 " bits not supported for Block Discard" 3387 else {
3368 " Emulation\n"); 3388 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
3369 goto out_invalid_cdb_field; 3389 goto out_invalid_cdb_field;
3370 }
3371 /*
3372 * Currently for the emulated case we only accept
3373 * tpws with the UNMAP=1 bit set.
3374 */
3375 if (!(cdb[1] & 0x08)) {
3376 pr_err("WRITE_SAME w/o UNMAP bit not "
3377 " supported for Block Discard Emulation\n");
3378 goto out_invalid_cdb_field;
3379 }
3380 } 3390 }
3391
3392 cmd->t_task_lba = get_unaligned_be32(&cdb[2]);
3381 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 3393 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3394 /*
3395 * Follow sbcr26 with WRITE_SAME (10) and check for the existence
3396 * of byte 1 bit 3 UNMAP instead of original reserved field
3397 */
3398 if (target_check_write_same_discard(&cdb[1], dev) < 0)
3399 goto out_invalid_cdb_field;
3382 break; 3400 break;
3383 case ALLOW_MEDIUM_REMOVAL: 3401 case ALLOW_MEDIUM_REMOVAL:
3384 case GPCMD_CLOSE_TRACK: 3402 case GPCMD_CLOSE_TRACK:
@@ -3873,9 +3891,7 @@ EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);
3873static int transport_new_cmd_obj(struct se_cmd *cmd) 3891static int transport_new_cmd_obj(struct se_cmd *cmd)
3874{ 3892{
3875 struct se_device *dev = cmd->se_dev; 3893 struct se_device *dev = cmd->se_dev;
3876 u32 task_cdbs; 3894 int set_counts = 1, rc, task_cdbs;
3877 u32 rc;
3878 int set_counts = 1;
3879 3895
3880 /* 3896 /*
3881 * Setup any BIDI READ tasks and memory from 3897 * Setup any BIDI READ tasks and memory from
@@ -3893,7 +3909,7 @@ static int transport_new_cmd_obj(struct se_cmd *cmd)
3893 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 3909 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3894 cmd->scsi_sense_reason = 3910 cmd->scsi_sense_reason =
3895 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 3911 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3896 return PYX_TRANSPORT_LU_COMM_FAILURE; 3912 return -EINVAL;
3897 } 3913 }
3898 atomic_inc(&cmd->t_fe_count); 3914 atomic_inc(&cmd->t_fe_count);
3899 atomic_inc(&cmd->t_se_count); 3915 atomic_inc(&cmd->t_se_count);
@@ -3912,7 +3928,7 @@ static int transport_new_cmd_obj(struct se_cmd *cmd)
3912 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 3928 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3913 cmd->scsi_sense_reason = 3929 cmd->scsi_sense_reason =
3914 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 3930 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3915 return PYX_TRANSPORT_LU_COMM_FAILURE; 3931 return -EINVAL;
3916 } 3932 }
3917 3933
3918 if (set_counts) { 3934 if (set_counts) {
@@ -4028,8 +4044,6 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
4028 if (!task->task_sg) 4044 if (!task->task_sg)
4029 continue; 4045 continue;
4030 4046
4031 BUG_ON(!task->task_padded_sg);
4032
4033 if (!sg_first) { 4047 if (!sg_first) {
4034 sg_first = task->task_sg; 4048 sg_first = task->task_sg;
4035 chained_nents = task->task_sg_nents; 4049 chained_nents = task->task_sg_nents;
@@ -4037,9 +4051,19 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
4037 sg_chain(sg_prev, sg_prev_nents, task->task_sg); 4051 sg_chain(sg_prev, sg_prev_nents, task->task_sg);
4038 chained_nents += task->task_sg_nents; 4052 chained_nents += task->task_sg_nents;
4039 } 4053 }
4054 /*
4055 * For the padded tasks, use the extra SGL vector allocated
4056 * in transport_allocate_data_tasks() for the sg_prev_nents
4057 * offset into sg_chain() above.. The last task of a
4058 * multi-task list, or a single task will not have
4059 * task->task_sg_padded set..
4060 */
4061 if (task->task_padded_sg)
4062 sg_prev_nents = (task->task_sg_nents + 1);
4063 else
4064 sg_prev_nents = task->task_sg_nents;
4040 4065
4041 sg_prev = task->task_sg; 4066 sg_prev = task->task_sg;
4042 sg_prev_nents = task->task_sg_nents;
4043 } 4067 }
4044 /* 4068 /*
4045 * Setup the starting pointer and total t_tasks_sg_linked_no including 4069 * Setup the starting pointer and total t_tasks_sg_linked_no including
@@ -4091,7 +4115,7 @@ static int transport_allocate_data_tasks(
4091 4115
4092 cmd_sg = sgl; 4116 cmd_sg = sgl;
4093 for (i = 0; i < task_count; i++) { 4117 for (i = 0; i < task_count; i++) {
4094 unsigned int task_size; 4118 unsigned int task_size, task_sg_nents_padded;
4095 int count; 4119 int count;
4096 4120
4097 task = transport_generic_get_task(cmd, data_direction); 4121 task = transport_generic_get_task(cmd, data_direction);
@@ -4110,30 +4134,33 @@ static int transport_allocate_data_tasks(
4110 4134
4111 /* Update new cdb with updated lba/sectors */ 4135 /* Update new cdb with updated lba/sectors */
4112 cmd->transport_split_cdb(task->task_lba, task->task_sectors, cdb); 4136 cmd->transport_split_cdb(task->task_lba, task->task_sectors, cdb);
4113 4137 /*
4138 * This now assumes that passed sg_ents are in PAGE_SIZE chunks
4139 * in order to calculate the number per task SGL entries
4140 */
4141 task->task_sg_nents = DIV_ROUND_UP(task->task_size, PAGE_SIZE);
4114 /* 4142 /*
4115 * Check if the fabric module driver is requesting that all 4143 * Check if the fabric module driver is requesting that all
4116 * struct se_task->task_sg[] be chained together.. If so, 4144 * struct se_task->task_sg[] be chained together.. If so,
4117 * then allocate an extra padding SG entry for linking and 4145 * then allocate an extra padding SG entry for linking and
4118 * marking the end of the chained SGL. 4146 * marking the end of the chained SGL for every task except
4119 * Possibly over-allocate task sgl size by using cmd sgl size. 4147 * the last one for (task_count > 1) operation, or skipping
4120 * It's so much easier and only a waste when task_count > 1. 4148 * the extra padding for the (task_count == 1) case.
4121 * That is extremely rare.
4122 */ 4149 */
4123 task->task_sg_nents = sgl_nents; 4150 if (cmd->se_tfo->task_sg_chaining && (i < (task_count - 1))) {
4124 if (cmd->se_tfo->task_sg_chaining) { 4151 task_sg_nents_padded = (task->task_sg_nents + 1);
4125 task->task_sg_nents++;
4126 task->task_padded_sg = 1; 4152 task->task_padded_sg = 1;
4127 } 4153 } else
4154 task_sg_nents_padded = task->task_sg_nents;
4128 4155
4129 task->task_sg = kmalloc(sizeof(struct scatterlist) * 4156 task->task_sg = kmalloc(sizeof(struct scatterlist) *
4130 task->task_sg_nents, GFP_KERNEL); 4157 task_sg_nents_padded, GFP_KERNEL);
4131 if (!task->task_sg) { 4158 if (!task->task_sg) {
4132 cmd->se_dev->transport->free_task(task); 4159 cmd->se_dev->transport->free_task(task);
4133 return -ENOMEM; 4160 return -ENOMEM;
4134 } 4161 }
4135 4162
4136 sg_init_table(task->task_sg, task->task_sg_nents); 4163 sg_init_table(task->task_sg, task_sg_nents_padded);
4137 4164
4138 task_size = task->task_size; 4165 task_size = task->task_size;
4139 4166
@@ -4230,10 +4257,13 @@ static u32 transport_allocate_tasks(
4230 struct scatterlist *sgl, 4257 struct scatterlist *sgl,
4231 unsigned int sgl_nents) 4258 unsigned int sgl_nents)
4232{ 4259{
4233 if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) 4260 if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
4261 if (transport_cmd_get_valid_sectors(cmd) < 0)
4262 return -EINVAL;
4263
4234 return transport_allocate_data_tasks(cmd, lba, data_direction, 4264 return transport_allocate_data_tasks(cmd, lba, data_direction,
4235 sgl, sgl_nents); 4265 sgl, sgl_nents);
4236 else 4266 } else
4237 return transport_allocate_control_task(cmd); 4267 return transport_allocate_control_task(cmd);
4238 4268
4239} 4269}
@@ -4726,6 +4756,13 @@ int transport_send_check_condition_and_sense(
4726 */ 4756 */
4727 switch (reason) { 4757 switch (reason) {
4728 case TCM_NON_EXISTENT_LUN: 4758 case TCM_NON_EXISTENT_LUN:
4759 /* CURRENT ERROR */
4760 buffer[offset] = 0x70;
4761 /* ILLEGAL REQUEST */
4762 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4763 /* LOGICAL UNIT NOT SUPPORTED */
4764 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x25;
4765 break;
4729 case TCM_UNSUPPORTED_SCSI_OPCODE: 4766 case TCM_UNSUPPORTED_SCSI_OPCODE:
4730 case TCM_SECTOR_COUNT_TOO_MANY: 4767 case TCM_SECTOR_COUNT_TOO_MANY:
4731 /* CURRENT ERROR */ 4768 /* CURRENT ERROR */
diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h
index bd4fe21a23b8..3749d8b4b423 100644
--- a/drivers/target/tcm_fc/tcm_fc.h
+++ b/drivers/target/tcm_fc/tcm_fc.h
@@ -98,8 +98,7 @@ struct ft_tpg {
98 struct list_head list; /* linkage in ft_lport_acl tpg_list */ 98 struct list_head list; /* linkage in ft_lport_acl tpg_list */
99 struct list_head lun_list; /* head of LUNs */ 99 struct list_head lun_list; /* head of LUNs */
100 struct se_portal_group se_tpg; 100 struct se_portal_group se_tpg;
101 struct task_struct *thread; /* processing thread */ 101 struct workqueue_struct *workqueue;
102 struct se_queue_obj qobj; /* queue for processing thread */
103}; 102};
104 103
105struct ft_lport_acl { 104struct ft_lport_acl {
@@ -110,16 +109,10 @@ struct ft_lport_acl {
110 struct se_wwn fc_lport_wwn; 109 struct se_wwn fc_lport_wwn;
111}; 110};
112 111
113enum ft_cmd_state {
114 FC_CMD_ST_NEW = 0,
115 FC_CMD_ST_REJ
116};
117
118/* 112/*
119 * Commands 113 * Commands
120 */ 114 */
121struct ft_cmd { 115struct ft_cmd {
122 enum ft_cmd_state state;
123 u32 lun; /* LUN from request */ 116 u32 lun; /* LUN from request */
124 struct ft_sess *sess; /* session held for cmd */ 117 struct ft_sess *sess; /* session held for cmd */
125 struct fc_seq *seq; /* sequence in exchange mgr */ 118 struct fc_seq *seq; /* sequence in exchange mgr */
@@ -127,7 +120,7 @@ struct ft_cmd {
127 struct fc_frame *req_frame; 120 struct fc_frame *req_frame;
128 unsigned char *cdb; /* pointer to CDB inside frame */ 121 unsigned char *cdb; /* pointer to CDB inside frame */
129 u32 write_data_len; /* data received on writes */ 122 u32 write_data_len; /* data received on writes */
130 struct se_queue_req se_req; 123 struct work_struct work;
131 /* Local sense buffer */ 124 /* Local sense buffer */
132 unsigned char ft_sense_buffer[TRANSPORT_SENSE_BUFFER]; 125 unsigned char ft_sense_buffer[TRANSPORT_SENSE_BUFFER];
133 u32 was_ddp_setup:1; /* Set only if ddp is setup */ 126 u32 was_ddp_setup:1; /* Set only if ddp is setup */
@@ -177,7 +170,6 @@ int ft_is_state_remove(struct se_cmd *);
177/* 170/*
178 * other internal functions. 171 * other internal functions.
179 */ 172 */
180int ft_thread(void *);
181void ft_recv_req(struct ft_sess *, struct fc_frame *); 173void ft_recv_req(struct ft_sess *, struct fc_frame *);
182struct ft_tpg *ft_lport_find_tpg(struct fc_lport *); 174struct ft_tpg *ft_lport_find_tpg(struct fc_lport *);
183struct ft_node_acl *ft_acl_get(struct ft_tpg *, struct fc_rport_priv *); 175struct ft_node_acl *ft_acl_get(struct ft_tpg *, struct fc_rport_priv *);
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 5654dc22f7ae..80fbcde00cb6 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -62,8 +62,8 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
62 int count; 62 int count;
63 63
64 se_cmd = &cmd->se_cmd; 64 se_cmd = &cmd->se_cmd;
65 pr_debug("%s: cmd %p state %d sess %p seq %p se_cmd %p\n", 65 pr_debug("%s: cmd %p sess %p seq %p se_cmd %p\n",
66 caller, cmd, cmd->state, cmd->sess, cmd->seq, se_cmd); 66 caller, cmd, cmd->sess, cmd->seq, se_cmd);
67 pr_debug("%s: cmd %p cdb %p\n", 67 pr_debug("%s: cmd %p cdb %p\n",
68 caller, cmd, cmd->cdb); 68 caller, cmd, cmd->cdb);
69 pr_debug("%s: cmd %p lun %d\n", caller, cmd, cmd->lun); 69 pr_debug("%s: cmd %p lun %d\n", caller, cmd, cmd->lun);
@@ -90,38 +90,6 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
90 16, 4, cmd->cdb, MAX_COMMAND_SIZE, 0); 90 16, 4, cmd->cdb, MAX_COMMAND_SIZE, 0);
91} 91}
92 92
93static void ft_queue_cmd(struct ft_sess *sess, struct ft_cmd *cmd)
94{
95 struct ft_tpg *tpg = sess->tport->tpg;
96 struct se_queue_obj *qobj = &tpg->qobj;
97 unsigned long flags;
98
99 qobj = &sess->tport->tpg->qobj;
100 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
101 list_add_tail(&cmd->se_req.qr_list, &qobj->qobj_list);
102 atomic_inc(&qobj->queue_cnt);
103 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
104
105 wake_up_process(tpg->thread);
106}
107
108static struct ft_cmd *ft_dequeue_cmd(struct se_queue_obj *qobj)
109{
110 unsigned long flags;
111 struct se_queue_req *qr;
112
113 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
114 if (list_empty(&qobj->qobj_list)) {
115 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
116 return NULL;
117 }
118 qr = list_first_entry(&qobj->qobj_list, struct se_queue_req, qr_list);
119 list_del(&qr->qr_list);
120 atomic_dec(&qobj->queue_cnt);
121 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
122 return container_of(qr, struct ft_cmd, se_req);
123}
124
125static void ft_free_cmd(struct ft_cmd *cmd) 93static void ft_free_cmd(struct ft_cmd *cmd)
126{ 94{
127 struct fc_frame *fp; 95 struct fc_frame *fp;
@@ -282,9 +250,7 @@ u32 ft_get_task_tag(struct se_cmd *se_cmd)
282 250
283int ft_get_cmd_state(struct se_cmd *se_cmd) 251int ft_get_cmd_state(struct se_cmd *se_cmd)
284{ 252{
285 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); 253 return 0;
286
287 return cmd->state;
288} 254}
289 255
290int ft_is_state_remove(struct se_cmd *se_cmd) 256int ft_is_state_remove(struct se_cmd *se_cmd)
@@ -505,6 +471,8 @@ int ft_queue_tm_resp(struct se_cmd *se_cmd)
505 return 0; 471 return 0;
506} 472}
507 473
474static void ft_send_work(struct work_struct *work);
475
508/* 476/*
509 * Handle incoming FCP command. 477 * Handle incoming FCP command.
510 */ 478 */
@@ -523,7 +491,9 @@ static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp)
523 goto busy; 491 goto busy;
524 } 492 }
525 cmd->req_frame = fp; /* hold frame during cmd */ 493 cmd->req_frame = fp; /* hold frame during cmd */
526 ft_queue_cmd(sess, cmd); 494
495 INIT_WORK(&cmd->work, ft_send_work);
496 queue_work(sess->tport->tpg->workqueue, &cmd->work);
527 return; 497 return;
528 498
529busy: 499busy:
@@ -563,12 +533,13 @@ void ft_recv_req(struct ft_sess *sess, struct fc_frame *fp)
563/* 533/*
564 * Send new command to target. 534 * Send new command to target.
565 */ 535 */
566static void ft_send_cmd(struct ft_cmd *cmd) 536static void ft_send_work(struct work_struct *work)
567{ 537{
538 struct ft_cmd *cmd = container_of(work, struct ft_cmd, work);
568 struct fc_frame_header *fh = fc_frame_header_get(cmd->req_frame); 539 struct fc_frame_header *fh = fc_frame_header_get(cmd->req_frame);
569 struct se_cmd *se_cmd; 540 struct se_cmd *se_cmd;
570 struct fcp_cmnd *fcp; 541 struct fcp_cmnd *fcp;
571 int data_dir; 542 int data_dir = 0;
572 u32 data_len; 543 u32 data_len;
573 int task_attr; 544 int task_attr;
574 int ret; 545 int ret;
@@ -675,42 +646,3 @@ static void ft_send_cmd(struct ft_cmd *cmd)
675err: 646err:
676 ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID); 647 ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID);
677} 648}
678
679/*
680 * Handle request in the command thread.
681 */
682static void ft_exec_req(struct ft_cmd *cmd)
683{
684 pr_debug("cmd state %x\n", cmd->state);
685 switch (cmd->state) {
686 case FC_CMD_ST_NEW:
687 ft_send_cmd(cmd);
688 break;
689 default:
690 break;
691 }
692}
693
694/*
695 * Processing thread.
696 * Currently one thread per tpg.
697 */
698int ft_thread(void *arg)
699{
700 struct ft_tpg *tpg = arg;
701 struct se_queue_obj *qobj = &tpg->qobj;
702 struct ft_cmd *cmd;
703
704 while (!kthread_should_stop()) {
705 schedule_timeout_interruptible(MAX_SCHEDULE_TIMEOUT);
706 if (kthread_should_stop())
707 goto out;
708
709 cmd = ft_dequeue_cmd(qobj);
710 if (cmd)
711 ft_exec_req(cmd);
712 }
713
714out:
715 return 0;
716}
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index 8781d1e423df..8fa39b74f22c 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -256,7 +256,7 @@ struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata)
256 struct se_portal_group *se_tpg = &tpg->se_tpg; 256 struct se_portal_group *se_tpg = &tpg->se_tpg;
257 struct se_node_acl *se_acl; 257 struct se_node_acl *se_acl;
258 258
259 spin_lock_bh(&se_tpg->acl_node_lock); 259 spin_lock_irq(&se_tpg->acl_node_lock);
260 list_for_each_entry(se_acl, &se_tpg->acl_node_list, acl_list) { 260 list_for_each_entry(se_acl, &se_tpg->acl_node_list, acl_list) {
261 acl = container_of(se_acl, struct ft_node_acl, se_node_acl); 261 acl = container_of(se_acl, struct ft_node_acl, se_node_acl);
262 pr_debug("acl %p port_name %llx\n", 262 pr_debug("acl %p port_name %llx\n",
@@ -270,7 +270,7 @@ struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata)
270 break; 270 break;
271 } 271 }
272 } 272 }
273 spin_unlock_bh(&se_tpg->acl_node_lock); 273 spin_unlock_irq(&se_tpg->acl_node_lock);
274 return found; 274 return found;
275} 275}
276 276
@@ -327,7 +327,6 @@ static struct se_portal_group *ft_add_tpg(
327 tpg->index = index; 327 tpg->index = index;
328 tpg->lport_acl = lacl; 328 tpg->lport_acl = lacl;
329 INIT_LIST_HEAD(&tpg->lun_list); 329 INIT_LIST_HEAD(&tpg->lun_list);
330 transport_init_queue_obj(&tpg->qobj);
331 330
332 ret = core_tpg_register(&ft_configfs->tf_ops, wwn, &tpg->se_tpg, 331 ret = core_tpg_register(&ft_configfs->tf_ops, wwn, &tpg->se_tpg,
333 tpg, TRANSPORT_TPG_TYPE_NORMAL); 332 tpg, TRANSPORT_TPG_TYPE_NORMAL);
@@ -336,8 +335,8 @@ static struct se_portal_group *ft_add_tpg(
336 return NULL; 335 return NULL;
337 } 336 }
338 337
339 tpg->thread = kthread_run(ft_thread, tpg, "ft_tpg%lu", index); 338 tpg->workqueue = alloc_workqueue("tcm_fc", 0, 1);
340 if (IS_ERR(tpg->thread)) { 339 if (!tpg->workqueue) {
341 kfree(tpg); 340 kfree(tpg);
342 return NULL; 341 return NULL;
343 } 342 }
@@ -356,7 +355,7 @@ static void ft_del_tpg(struct se_portal_group *se_tpg)
356 pr_debug("del tpg %s\n", 355 pr_debug("del tpg %s\n",
357 config_item_name(&tpg->se_tpg.tpg_group.cg_item)); 356 config_item_name(&tpg->se_tpg.tpg_group.cg_item));
358 357
359 kthread_stop(tpg->thread); 358 destroy_workqueue(tpg->workqueue);
360 359
361 /* Wait for sessions to be freed thru RCU, for BUG_ON below */ 360 /* Wait for sessions to be freed thru RCU, for BUG_ON below */
362 synchronize_rcu(); 361 synchronize_rcu();
@@ -655,9 +654,7 @@ static void __exit ft_exit(void)
655 synchronize_rcu(); 654 synchronize_rcu();
656} 655}
657 656
658#ifdef MODULE
659MODULE_DESCRIPTION("FC TCM fabric driver " FT_VERSION); 657MODULE_DESCRIPTION("FC TCM fabric driver " FT_VERSION);
660MODULE_LICENSE("GPL"); 658MODULE_LICENSE("GPL");
661module_init(ft_init); 659module_init(ft_init);
662module_exit(ft_exit); 660module_exit(ft_exit);
663#endif /* MODULE */
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index c37f4cd96452..d35ea5a3d56c 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -219,43 +219,41 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
219 if (cmd->was_ddp_setup) { 219 if (cmd->was_ddp_setup) {
220 BUG_ON(!ep); 220 BUG_ON(!ep);
221 BUG_ON(!lport); 221 BUG_ON(!lport);
222 } 222 /*
223 223 * Since DDP (Large Rx offload) was setup for this request,
224 /* 224 * payload is expected to be copied directly to user buffers.
225 * Doesn't expect payload if DDP is setup. Payload 225 */
226 * is expected to be copied directly to user buffers 226 buf = fc_frame_payload_get(fp, 1);
227 * due to DDP (Large Rx offload), 227 if (buf)
228 */ 228 pr_err("%s: xid 0x%x, f_ctl 0x%x, cmd->sg %p, "
229 buf = fc_frame_payload_get(fp, 1);
230 if (buf)
231 pr_err("%s: xid 0x%x, f_ctl 0x%x, cmd->sg %p, "
232 "cmd->sg_cnt 0x%x. DDP was setup" 229 "cmd->sg_cnt 0x%x. DDP was setup"
233 " hence not expected to receive frame with " 230 " hence not expected to receive frame with "
234 "payload, Frame will be dropped if " 231 "payload, Frame will be dropped if"
235 "'Sequence Initiative' bit in f_ctl is " 232 "'Sequence Initiative' bit in f_ctl is"
236 "not set\n", __func__, ep->xid, f_ctl, 233 "not set\n", __func__, ep->xid, f_ctl,
237 cmd->sg, cmd->sg_cnt); 234 cmd->sg, cmd->sg_cnt);
238 /* 235 /*
239 * Invalidate HW DDP context if it was setup for respective 236 * Invalidate HW DDP context if it was setup for respective
240 * command. Invalidation of HW DDP context is requited in both 237 * command. Invalidation of HW DDP context is requited in both
241 * situation (success and error). 238 * situation (success and error).
242 */ 239 */
243 ft_invl_hw_context(cmd); 240 ft_invl_hw_context(cmd);
244 241
245 /* 242 /*
246 * If "Sequence Initiative (TSI)" bit set in f_ctl, means last 243 * If "Sequence Initiative (TSI)" bit set in f_ctl, means last
247 * write data frame is received successfully where payload is 244 * write data frame is received successfully where payload is
248 * posted directly to user buffer and only the last frame's 245 * posted directly to user buffer and only the last frame's
249 * header is posted in receive queue. 246 * header is posted in receive queue.
250 * 247 *
251 * If "Sequence Initiative (TSI)" bit is not set, means error 248 * If "Sequence Initiative (TSI)" bit is not set, means error
252 * condition w.r.t. DDP, hence drop the packet and let explict 249 * condition w.r.t. DDP, hence drop the packet and let explict
253 * ABORTS from other end of exchange timer trigger the recovery. 250 * ABORTS from other end of exchange timer trigger the recovery.
254 */ 251 */
255 if (f_ctl & FC_FC_SEQ_INIT) 252 if (f_ctl & FC_FC_SEQ_INIT)
256 goto last_frame; 253 goto last_frame;
257 else 254 else
258 goto drop; 255 goto drop;
256 }
259 257
260 rel_off = ntohl(fh->fh_parm_offset); 258 rel_off = ntohl(fh->fh_parm_offset);
261 frame_len = fr_len(fp); 259 frame_len = fr_len(fp);
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index 98b6e3bdb000..e809e9d4683c 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -446,8 +446,19 @@ static inline void legacy_pty_init(void) { }
446int pty_limit = NR_UNIX98_PTY_DEFAULT; 446int pty_limit = NR_UNIX98_PTY_DEFAULT;
447static int pty_limit_min; 447static int pty_limit_min;
448static int pty_limit_max = NR_UNIX98_PTY_MAX; 448static int pty_limit_max = NR_UNIX98_PTY_MAX;
449static int tty_count;
449static int pty_count; 450static int pty_count;
450 451
452static inline void pty_inc_count(void)
453{
454 pty_count = (++tty_count) / 2;
455}
456
457static inline void pty_dec_count(void)
458{
459 pty_count = (--tty_count) / 2;
460}
461
451static struct cdev ptmx_cdev; 462static struct cdev ptmx_cdev;
452 463
453static struct ctl_table pty_table[] = { 464static struct ctl_table pty_table[] = {
@@ -542,6 +553,7 @@ static struct tty_struct *pts_unix98_lookup(struct tty_driver *driver,
542 553
543static void pty_unix98_shutdown(struct tty_struct *tty) 554static void pty_unix98_shutdown(struct tty_struct *tty)
544{ 555{
556 tty_driver_remove_tty(tty->driver, tty);
545 /* We have our own method as we don't use the tty index */ 557 /* We have our own method as we don't use the tty index */
546 kfree(tty->termios); 558 kfree(tty->termios);
547} 559}
@@ -588,7 +600,8 @@ static int pty_unix98_install(struct tty_driver *driver, struct tty_struct *tty)
588 */ 600 */
589 tty_driver_kref_get(driver); 601 tty_driver_kref_get(driver);
590 tty->count++; 602 tty->count++;
591 pty_count++; 603 pty_inc_count(); /* tty */
604 pty_inc_count(); /* tty->link */
592 return 0; 605 return 0;
593err_free_mem: 606err_free_mem:
594 deinitialize_tty_struct(o_tty); 607 deinitialize_tty_struct(o_tty);
@@ -602,7 +615,7 @@ err_free_tty:
602 615
603static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty) 616static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty)
604{ 617{
605 pty_count--; 618 pty_dec_count();
606} 619}
607 620
608static const struct tty_operations ptm_unix98_ops = { 621static const struct tty_operations ptm_unix98_ops = {
diff --git a/drivers/tty/serial/8250.c b/drivers/tty/serial/8250.c
index f2dfec82faf8..7f50999eebc2 100644
--- a/drivers/tty/serial/8250.c
+++ b/drivers/tty/serial/8250.c
@@ -1819,6 +1819,8 @@ static void serial8250_backup_timeout(unsigned long data)
1819 unsigned int iir, ier = 0, lsr; 1819 unsigned int iir, ier = 0, lsr;
1820 unsigned long flags; 1820 unsigned long flags;
1821 1821
1822 spin_lock_irqsave(&up->port.lock, flags);
1823
1822 /* 1824 /*
1823 * Must disable interrupts or else we risk racing with the interrupt 1825 * Must disable interrupts or else we risk racing with the interrupt
1824 * based handler. 1826 * based handler.
@@ -1836,10 +1838,8 @@ static void serial8250_backup_timeout(unsigned long data)
1836 * the "Diva" UART used on the management processor on many HP 1838 * the "Diva" UART used on the management processor on many HP
1837 * ia64 and parisc boxes. 1839 * ia64 and parisc boxes.
1838 */ 1840 */
1839 spin_lock_irqsave(&up->port.lock, flags);
1840 lsr = serial_in(up, UART_LSR); 1841 lsr = serial_in(up, UART_LSR);
1841 up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS; 1842 up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
1842 spin_unlock_irqrestore(&up->port.lock, flags);
1843 if ((iir & UART_IIR_NO_INT) && (up->ier & UART_IER_THRI) && 1843 if ((iir & UART_IIR_NO_INT) && (up->ier & UART_IER_THRI) &&
1844 (!uart_circ_empty(&up->port.state->xmit) || up->port.x_char) && 1844 (!uart_circ_empty(&up->port.state->xmit) || up->port.x_char) &&
1845 (lsr & UART_LSR_THRE)) { 1845 (lsr & UART_LSR_THRE)) {
@@ -1848,11 +1848,13 @@ static void serial8250_backup_timeout(unsigned long data)
1848 } 1848 }
1849 1849
1850 if (!(iir & UART_IIR_NO_INT)) 1850 if (!(iir & UART_IIR_NO_INT))
1851 serial8250_handle_port(up); 1851 transmit_chars(up);
1852 1852
1853 if (is_real_interrupt(up->port.irq)) 1853 if (is_real_interrupt(up->port.irq))
1854 serial_out(up, UART_IER, ier); 1854 serial_out(up, UART_IER, ier);
1855 1855
1856 spin_unlock_irqrestore(&up->port.lock, flags);
1857
1856 /* Standard timer interval plus 0.2s to keep the port running */ 1858 /* Standard timer interval plus 0.2s to keep the port running */
1857 mod_timer(&up->timer, 1859 mod_timer(&up->timer,
1858 jiffies + uart_poll_timeout(&up->port) + HZ / 5); 1860 jiffies + uart_poll_timeout(&up->port) + HZ / 5);
diff --git a/drivers/tty/serial/8250_pci.c b/drivers/tty/serial/8250_pci.c
index 6b887d90a205..3abeca2a2a1b 100644
--- a/drivers/tty/serial/8250_pci.c
+++ b/drivers/tty/serial/8250_pci.c
@@ -1599,11 +1599,6 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
1599 .device = 0x800D, 1599 .device = 0x800D,
1600 .init = pci_eg20t_init, 1600 .init = pci_eg20t_init,
1601 }, 1601 },
1602 {
1603 .vendor = 0x10DB,
1604 .device = 0x800D,
1605 .init = pci_eg20t_init,
1606 },
1607 /* 1602 /*
1608 * Cronyx Omega PCI (PLX-chip based) 1603 * Cronyx Omega PCI (PLX-chip based)
1609 */ 1604 */
@@ -4021,7 +4016,7 @@ static struct pci_device_id serial_pci_tbl[] = {
4021 0, 0, pbn_NETMOS9900_2s_115200 }, 4016 0, 0, pbn_NETMOS9900_2s_115200 },
4022 4017
4023 /* 4018 /*
4024 * Best Connectivity PCI Multi I/O cards 4019 * Best Connectivity and Rosewill PCI Multi I/O cards
4025 */ 4020 */
4026 4021
4027 { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865, 4022 { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865,
@@ -4029,6 +4024,10 @@ static struct pci_device_id serial_pci_tbl[] = {
4029 0, 0, pbn_b0_1_115200 }, 4024 0, 0, pbn_b0_1_115200 },
4030 4025
4031 { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865, 4026 { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865,
4027 0xA000, 0x3002,
4028 0, 0, pbn_b0_bt_2_115200 },
4029
4030 { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865,
4032 0xA000, 0x3004, 4031 0xA000, 0x3004,
4033 0, 0, pbn_b0_bt_4_115200 }, 4032 0, 0, pbn_b0_bt_4_115200 },
4034 /* Intel CE4100 */ 4033 /* Intel CE4100 */
diff --git a/drivers/tty/serial/8250_pnp.c b/drivers/tty/serial/8250_pnp.c
index fc301f6722e1..a2f236510ff1 100644
--- a/drivers/tty/serial/8250_pnp.c
+++ b/drivers/tty/serial/8250_pnp.c
@@ -109,6 +109,9 @@ static const struct pnp_device_id pnp_dev_table[] = {
109 /* IBM */ 109 /* IBM */
110 /* IBM Thinkpad 701 Internal Modem Voice */ 110 /* IBM Thinkpad 701 Internal Modem Voice */
111 { "IBM0033", 0 }, 111 { "IBM0033", 0 },
112 /* Intermec */
113 /* Intermec CV60 touchscreen port */
114 { "PNP4972", 0 },
112 /* Intertex */ 115 /* Intertex */
113 /* Intertex 28k8 33k6 Voice EXT PnP */ 116 /* Intertex 28k8 33k6 Voice EXT PnP */
114 { "IXDC801", 0 }, 117 { "IXDC801", 0 },
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index af9b7814965a..b922f5d2e61e 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -1609,9 +1609,11 @@ static struct console atmel_console = {
1609static int __init atmel_console_init(void) 1609static int __init atmel_console_init(void)
1610{ 1610{
1611 if (atmel_default_console_device) { 1611 if (atmel_default_console_device) {
1612 add_preferred_console(ATMEL_DEVICENAME, 1612 struct atmel_uart_data *pdata =
1613 atmel_default_console_device->id, NULL); 1613 atmel_default_console_device->dev.platform_data;
1614 atmel_init_port(&atmel_ports[atmel_default_console_device->id], 1614
1615 add_preferred_console(ATMEL_DEVICENAME, pdata->num, NULL);
1616 atmel_init_port(&atmel_ports[pdata->num],
1615 atmel_default_console_device); 1617 atmel_default_console_device);
1616 register_console(&atmel_console); 1618 register_console(&atmel_console);
1617 } 1619 }
diff --git a/drivers/tty/serial/crisv10.c b/drivers/tty/serial/crisv10.c
index 225123b37f19..58be715913cd 100644
--- a/drivers/tty/serial/crisv10.c
+++ b/drivers/tty/serial/crisv10.c
@@ -4450,7 +4450,7 @@ static int __init rs_init(void)
4450 4450
4451#if defined(CONFIG_ETRAX_RS485) 4451#if defined(CONFIG_ETRAX_RS485)
4452#if defined(CONFIG_ETRAX_RS485_ON_PA) 4452#if defined(CONFIG_ETRAX_RS485_ON_PA)
4453 if (cris_io_interface_allocate_pins(if_ser0, 'a', rs485_pa_bit, 4453 if (cris_io_interface_allocate_pins(if_serial_0, 'a', rs485_pa_bit,
4454 rs485_pa_bit)) { 4454 rs485_pa_bit)) {
4455 printk(KERN_CRIT "ETRAX100LX serial: Could not allocate " 4455 printk(KERN_CRIT "ETRAX100LX serial: Could not allocate "
4456 "RS485 pin\n"); 4456 "RS485 pin\n");
@@ -4459,7 +4459,7 @@ static int __init rs_init(void)
4459 } 4459 }
4460#endif 4460#endif
4461#if defined(CONFIG_ETRAX_RS485_ON_PORT_G) 4461#if defined(CONFIG_ETRAX_RS485_ON_PORT_G)
4462 if (cris_io_interface_allocate_pins(if_ser0, 'g', rs485_pa_bit, 4462 if (cris_io_interface_allocate_pins(if_serial_0, 'g', rs485_pa_bit,
4463 rs485_port_g_bit)) { 4463 rs485_port_g_bit)) {
4464 printk(KERN_CRIT "ETRAX100LX serial: Could not allocate " 4464 printk(KERN_CRIT "ETRAX100LX serial: Could not allocate "
4465 "RS485 pin\n"); 4465 "RS485 pin\n");
diff --git a/drivers/tty/serial/max3107-aava.c b/drivers/tty/serial/max3107-aava.c
index a1fe304f2f52..d73aadd7a9ad 100644
--- a/drivers/tty/serial/max3107-aava.c
+++ b/drivers/tty/serial/max3107-aava.c
@@ -340,5 +340,5 @@ module_exit(max3107_exit);
340 340
341MODULE_DESCRIPTION("MAX3107 driver"); 341MODULE_DESCRIPTION("MAX3107 driver");
342MODULE_AUTHOR("Aavamobile"); 342MODULE_AUTHOR("Aavamobile");
343MODULE_ALIAS("aava-max3107-spi"); 343MODULE_ALIAS("spi:aava-max3107");
344MODULE_LICENSE("GPL v2"); 344MODULE_LICENSE("GPL v2");
diff --git a/drivers/tty/serial/max3107.c b/drivers/tty/serial/max3107.c
index 750b4f627315..a8164601c0ea 100644
--- a/drivers/tty/serial/max3107.c
+++ b/drivers/tty/serial/max3107.c
@@ -1209,5 +1209,5 @@ module_exit(max3107_exit);
1209 1209
1210MODULE_DESCRIPTION("MAX3107 driver"); 1210MODULE_DESCRIPTION("MAX3107 driver");
1211MODULE_AUTHOR("Aavamobile"); 1211MODULE_AUTHOR("Aavamobile");
1212MODULE_ALIAS("max3107-spi"); 1212MODULE_ALIAS("spi:max3107");
1213MODULE_LICENSE("GPL v2"); 1213MODULE_LICENSE("GPL v2");
diff --git a/drivers/tty/serial/mrst_max3110.c b/drivers/tty/serial/mrst_max3110.c
index a764bf99743b..23bc743f2a22 100644
--- a/drivers/tty/serial/mrst_max3110.c
+++ b/drivers/tty/serial/mrst_max3110.c
@@ -917,4 +917,4 @@ module_init(serial_m3110_init);
917module_exit(serial_m3110_exit); 917module_exit(serial_m3110_exit);
918 918
919MODULE_LICENSE("GPL v2"); 919MODULE_LICENSE("GPL v2");
920MODULE_ALIAS("max3110-uart"); 920MODULE_ALIAS("spi:max3110-uart");
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index c37df8d0fa28..5e713d3ef1f4 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -806,8 +806,7 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
806 806
807 serial_omap_set_mctrl(&up->port, up->port.mctrl); 807 serial_omap_set_mctrl(&up->port, up->port.mctrl);
808 /* Software Flow Control Configuration */ 808 /* Software Flow Control Configuration */
809 if (termios->c_iflag & (IXON | IXOFF)) 809 serial_omap_configure_xonxoff(up, termios);
810 serial_omap_configure_xonxoff(up, termios);
811 810
812 spin_unlock_irqrestore(&up->port.lock, flags); 811 spin_unlock_irqrestore(&up->port.lock, flags);
813 dev_dbg(up->port.dev, "serial_omap_set_termios+%d\n", up->pdev->id); 812 dev_dbg(up->port.dev, "serial_omap_set_termios+%d\n", up->pdev->id);
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index 846dfcd3ce0d..b46218d679e2 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -598,7 +598,8 @@ static void pch_request_dma(struct uart_port *port)
598 dma_cap_zero(mask); 598 dma_cap_zero(mask);
599 dma_cap_set(DMA_SLAVE, mask); 599 dma_cap_set(DMA_SLAVE, mask);
600 600
601 dma_dev = pci_get_bus_and_slot(2, PCI_DEVFN(0xa, 0)); /* Get DMA's dev 601 dma_dev = pci_get_bus_and_slot(priv->pdev->bus->number,
602 PCI_DEVFN(0xa, 0)); /* Get DMA's dev
602 information */ 603 information */
603 /* Set Tx DMA */ 604 /* Set Tx DMA */
604 param = &priv->param_tx; 605 param = &priv->param_tx;
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index afc629423152..6edafb5ace18 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -1225,15 +1225,19 @@ static const struct dev_pm_ops s3c24xx_serial_pm_ops = {
1225 .suspend = s3c24xx_serial_suspend, 1225 .suspend = s3c24xx_serial_suspend,
1226 .resume = s3c24xx_serial_resume, 1226 .resume = s3c24xx_serial_resume,
1227}; 1227};
1228#define SERIAL_SAMSUNG_PM_OPS (&s3c24xx_serial_pm_ops)
1229
1228#else /* !CONFIG_PM_SLEEP */ 1230#else /* !CONFIG_PM_SLEEP */
1229#define s3c24xx_serial_pm_ops NULL 1231
1232#define SERIAL_SAMSUNG_PM_OPS NULL
1230#endif /* CONFIG_PM_SLEEP */ 1233#endif /* CONFIG_PM_SLEEP */
1231 1234
1232int s3c24xx_serial_init(struct platform_driver *drv, 1235int s3c24xx_serial_init(struct platform_driver *drv,
1233 struct s3c24xx_uart_info *info) 1236 struct s3c24xx_uart_info *info)
1234{ 1237{
1235 dbg("s3c24xx_serial_init(%p,%p)\n", drv, info); 1238 dbg("s3c24xx_serial_init(%p,%p)\n", drv, info);
1236 drv->driver.pm = &s3c24xx_serial_pm_ops; 1239
1240 drv->driver.pm = SERIAL_SAMSUNG_PM_OPS;
1237 1241
1238 return platform_driver_register(drv); 1242 return platform_driver_register(drv);
1239} 1243}
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index db7912cb7ae0..a3efbea5dbba 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -200,6 +200,11 @@ static int uart_startup(struct tty_struct *tty, struct uart_state *state, int in
200 clear_bit(TTY_IO_ERROR, &tty->flags); 200 clear_bit(TTY_IO_ERROR, &tty->flags);
201 } 201 }
202 202
203 /*
204 * This is to allow setserial on this port. People may want to set
205 * port/irq/type and then reconfigure the port properly if it failed
206 * now.
207 */
203 if (retval && capable(CAP_SYS_ADMIN)) 208 if (retval && capable(CAP_SYS_ADMIN))
204 retval = 0; 209 retval = 0;
205 210
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 2ec57b2fb278..5ea6ec3442e6 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -47,6 +47,7 @@
47#include <linux/ctype.h> 47#include <linux/ctype.h>
48#include <linux/err.h> 48#include <linux/err.h>
49#include <linux/dmaengine.h> 49#include <linux/dmaengine.h>
50#include <linux/dma-mapping.h>
50#include <linux/scatterlist.h> 51#include <linux/scatterlist.h>
51#include <linux/slab.h> 52#include <linux/slab.h>
52 53
@@ -95,6 +96,12 @@ struct sci_port {
95#endif 96#endif
96 97
97 struct notifier_block freq_transition; 98 struct notifier_block freq_transition;
99
100#ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
101 unsigned short saved_smr;
102 unsigned short saved_fcr;
103 unsigned char saved_brr;
104#endif
98}; 105};
99 106
100/* Function prototypes */ 107/* Function prototypes */
@@ -1076,7 +1083,7 @@ static unsigned int sci_get_mctrl(struct uart_port *port)
1076 /* This routine is used for getting signals of: DTR, DCD, DSR, RI, 1083 /* This routine is used for getting signals of: DTR, DCD, DSR, RI,
1077 and CTS/RTS */ 1084 and CTS/RTS */
1078 1085
1079 return TIOCM_DTR | TIOCM_RTS | TIOCM_DSR; 1086 return TIOCM_DTR | TIOCM_RTS | TIOCM_CTS | TIOCM_DSR;
1080} 1087}
1081 1088
1082#ifdef CONFIG_SERIAL_SH_SCI_DMA 1089#ifdef CONFIG_SERIAL_SH_SCI_DMA
@@ -1633,11 +1640,25 @@ static unsigned int sci_scbrr_calc(unsigned int algo_id, unsigned int bps,
1633 return ((freq + 16 * bps) / (32 * bps) - 1); 1640 return ((freq + 16 * bps) / (32 * bps) - 1);
1634} 1641}
1635 1642
1643static void sci_reset(struct uart_port *port)
1644{
1645 unsigned int status;
1646
1647 do {
1648 status = sci_in(port, SCxSR);
1649 } while (!(status & SCxSR_TEND(port)));
1650
1651 sci_out(port, SCSCR, 0x00); /* TE=0, RE=0, CKE1=0 */
1652
1653 if (port->type != PORT_SCI)
1654 sci_out(port, SCFCR, SCFCR_RFRST | SCFCR_TFRST);
1655}
1656
1636static void sci_set_termios(struct uart_port *port, struct ktermios *termios, 1657static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
1637 struct ktermios *old) 1658 struct ktermios *old)
1638{ 1659{
1639 struct sci_port *s = to_sci_port(port); 1660 struct sci_port *s = to_sci_port(port);
1640 unsigned int status, baud, smr_val, max_baud; 1661 unsigned int baud, smr_val, max_baud;
1641 int t = -1; 1662 int t = -1;
1642 u16 scfcr = 0; 1663 u16 scfcr = 0;
1643 1664
@@ -1657,14 +1678,7 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
1657 1678
1658 sci_port_enable(s); 1679 sci_port_enable(s);
1659 1680
1660 do { 1681 sci_reset(port);
1661 status = sci_in(port, SCxSR);
1662 } while (!(status & SCxSR_TEND(port)));
1663
1664 sci_out(port, SCSCR, 0x00); /* TE=0, RE=0, CKE1=0 */
1665
1666 if (port->type != PORT_SCI)
1667 sci_out(port, SCFCR, scfcr | SCFCR_RFRST | SCFCR_TFRST);
1668 1682
1669 smr_val = sci_in(port, SCSMR) & 3; 1683 smr_val = sci_in(port, SCSMR) & 3;
1670 1684
@@ -1913,6 +1927,7 @@ static int __devinit sci_init_single(struct platform_device *dev,
1913 1927
1914 port->dev = &dev->dev; 1928 port->dev = &dev->dev;
1915 1929
1930 pm_runtime_irq_safe(&dev->dev);
1916 pm_runtime_enable(&dev->dev); 1931 pm_runtime_enable(&dev->dev);
1917 } 1932 }
1918 1933
@@ -2036,7 +2051,8 @@ static int __devinit serial_console_setup(struct console *co, char *options)
2036 if (options) 2051 if (options)
2037 uart_parse_options(options, &baud, &parity, &bits, &flow); 2052 uart_parse_options(options, &baud, &parity, &bits, &flow);
2038 2053
2039 /* TODO: disable clock */ 2054 sci_port_disable(sci_port);
2055
2040 return uart_set_options(port, co, baud, parity, bits, flow); 2056 return uart_set_options(port, co, baud, parity, bits, flow);
2041} 2057}
2042 2058
@@ -2079,6 +2095,36 @@ static int __devinit sci_probe_earlyprintk(struct platform_device *pdev)
2079 return 0; 2095 return 0;
2080} 2096}
2081 2097
2098#define uart_console(port) ((port)->cons->index == (port)->line)
2099
2100static int sci_runtime_suspend(struct device *dev)
2101{
2102 struct sci_port *sci_port = dev_get_drvdata(dev);
2103 struct uart_port *port = &sci_port->port;
2104
2105 if (uart_console(port)) {
2106 sci_port->saved_smr = sci_in(port, SCSMR);
2107 sci_port->saved_brr = sci_in(port, SCBRR);
2108 sci_port->saved_fcr = sci_in(port, SCFCR);
2109 }
2110 return 0;
2111}
2112
2113static int sci_runtime_resume(struct device *dev)
2114{
2115 struct sci_port *sci_port = dev_get_drvdata(dev);
2116 struct uart_port *port = &sci_port->port;
2117
2118 if (uart_console(port)) {
2119 sci_reset(port);
2120 sci_out(port, SCSMR, sci_port->saved_smr);
2121 sci_out(port, SCBRR, sci_port->saved_brr);
2122 sci_out(port, SCFCR, sci_port->saved_fcr);
2123 sci_out(port, SCSCR, sci_port->cfg->scscr);
2124 }
2125 return 0;
2126}
2127
2082#define SCI_CONSOLE (&serial_console) 2128#define SCI_CONSOLE (&serial_console)
2083 2129
2084#else 2130#else
@@ -2088,6 +2134,8 @@ static inline int __devinit sci_probe_earlyprintk(struct platform_device *pdev)
2088} 2134}
2089 2135
2090#define SCI_CONSOLE NULL 2136#define SCI_CONSOLE NULL
2137#define sci_runtime_suspend NULL
2138#define sci_runtime_resume NULL
2091 2139
2092#endif /* CONFIG_SERIAL_SH_SCI_CONSOLE */ 2140#endif /* CONFIG_SERIAL_SH_SCI_CONSOLE */
2093 2141
@@ -2203,6 +2251,8 @@ static int sci_resume(struct device *dev)
2203} 2251}
2204 2252
2205static const struct dev_pm_ops sci_dev_pm_ops = { 2253static const struct dev_pm_ops sci_dev_pm_ops = {
2254 .runtime_suspend = sci_runtime_suspend,
2255 .runtime_resume = sci_runtime_resume,
2206 .suspend = sci_suspend, 2256 .suspend = sci_suspend,
2207 .resume = sci_resume, 2257 .resume = sci_resume,
2208}; 2258};
diff --git a/drivers/tty/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c
index c327218cad44..9af9f0879a24 100644
--- a/drivers/tty/serial/ucc_uart.c
+++ b/drivers/tty/serial/ucc_uart.c
@@ -235,7 +235,7 @@ static inline void *qe2cpu_addr(dma_addr_t addr, struct uart_qe_port *qe_port)
235 return qe_port->bd_virt + (addr - qe_port->bd_dma_addr); 235 return qe_port->bd_virt + (addr - qe_port->bd_dma_addr);
236 236
237 /* something nasty happened */ 237 /* something nasty happened */
238 printk(KERN_ERR "%s: addr=%x\n", __func__, addr); 238 printk(KERN_ERR "%s: addr=%llx\n", __func__, (u64)addr);
239 BUG(); 239 BUG();
240 return NULL; 240 return NULL;
241} 241}
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 150e4f747c7d..4f1fc81112e6 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1295,8 +1295,7 @@ static int tty_driver_install_tty(struct tty_driver *driver,
1295 * 1295 *
1296 * Locking: tty_mutex for now 1296 * Locking: tty_mutex for now
1297 */ 1297 */
1298static void tty_driver_remove_tty(struct tty_driver *driver, 1298void tty_driver_remove_tty(struct tty_driver *driver, struct tty_struct *tty)
1299 struct tty_struct *tty)
1300{ 1299{
1301 if (driver->ops->remove) 1300 if (driver->ops->remove)
1302 driver->ops->remove(driver, tty); 1301 driver->ops->remove(driver, tty);
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 8669ba3fe794..73cbbd85219f 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1775,6 +1775,8 @@ int usb_hcd_alloc_bandwidth(struct usb_device *udev,
1775 struct usb_interface *iface = usb_ifnum_to_if(udev, 1775 struct usb_interface *iface = usb_ifnum_to_if(udev,
1776 cur_alt->desc.bInterfaceNumber); 1776 cur_alt->desc.bInterfaceNumber);
1777 1777
1778 if (!iface)
1779 return -EINVAL;
1778 if (iface->resetting_device) { 1780 if (iface->resetting_device) {
1779 /* 1781 /*
1780 * The USB core just reset the device, so the xHCI host 1782 * The USB core just reset the device, so the xHCI host
diff --git a/drivers/usb/gadget/f_phonet.c b/drivers/usb/gadget/f_phonet.c
index 8f8d3f6cd89e..8f3eab1af885 100644
--- a/drivers/usb/gadget/f_phonet.c
+++ b/drivers/usb/gadget/f_phonet.c
@@ -434,6 +434,7 @@ static int pn_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
434 config_ep_by_speed(gadget, f, fp->out_ep)) { 434 config_ep_by_speed(gadget, f, fp->out_ep)) {
435 fp->in_ep->desc = NULL; 435 fp->in_ep->desc = NULL;
436 fp->out_ep->desc = NULL; 436 fp->out_ep->desc = NULL;
437 spin_unlock(&port->lock);
437 return -EINVAL; 438 return -EINVAL;
438 } 439 }
439 usb_ep_enable(fp->out_ep); 440 usb_ep_enable(fp->out_ep);
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index e051b30c1847..4c32cb19b405 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -343,7 +343,7 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
343 u32 temp; 343 u32 temp;
344 u32 power_okay; 344 u32 power_okay;
345 int i; 345 int i;
346 u8 resume_needed = 0; 346 unsigned long resume_needed = 0;
347 347
348 if (time_before (jiffies, ehci->next_statechange)) 348 if (time_before (jiffies, ehci->next_statechange))
349 msleep(5); 349 msleep(5);
@@ -416,7 +416,7 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
416 if (test_bit(i, &ehci->bus_suspended) && 416 if (test_bit(i, &ehci->bus_suspended) &&
417 (temp & PORT_SUSPEND)) { 417 (temp & PORT_SUSPEND)) {
418 temp |= PORT_RESUME; 418 temp |= PORT_RESUME;
419 resume_needed = 1; 419 set_bit(i, &resume_needed);
420 } 420 }
421 ehci_writel(ehci, temp, &ehci->regs->port_status [i]); 421 ehci_writel(ehci, temp, &ehci->regs->port_status [i]);
422 } 422 }
@@ -431,8 +431,7 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
431 i = HCS_N_PORTS (ehci->hcs_params); 431 i = HCS_N_PORTS (ehci->hcs_params);
432 while (i--) { 432 while (i--) {
433 temp = ehci_readl(ehci, &ehci->regs->port_status [i]); 433 temp = ehci_readl(ehci, &ehci->regs->port_status [i]);
434 if (test_bit(i, &ehci->bus_suspended) && 434 if (test_bit(i, &resume_needed)) {
435 (temp & PORT_SUSPEND)) {
436 temp &= ~(PORT_RWC_BITS | PORT_RESUME); 435 temp &= ~(PORT_RWC_BITS | PORT_RESUME);
437 ehci_writel(ehci, temp, &ehci->regs->port_status [i]); 436 ehci_writel(ehci, temp, &ehci->regs->port_status [i]);
438 ehci_vdbg (ehci, "resumed port %d\n", i + 1); 437 ehci_vdbg (ehci, "resumed port %d\n", i + 1);
diff --git a/drivers/usb/host/ehci-s5p.c b/drivers/usb/host/ehci-s5p.c
index b3958b3d3163..9e77f1c8bdbd 100644
--- a/drivers/usb/host/ehci-s5p.c
+++ b/drivers/usb/host/ehci-s5p.c
@@ -86,6 +86,7 @@ static int __devinit s5p_ehci_probe(struct platform_device *pdev)
86 goto fail_hcd; 86 goto fail_hcd;
87 } 87 }
88 88
89 s5p_ehci->hcd = hcd;
89 s5p_ehci->clk = clk_get(&pdev->dev, "usbhost"); 90 s5p_ehci->clk = clk_get(&pdev->dev, "usbhost");
90 91
91 if (IS_ERR(s5p_ehci->clk)) { 92 if (IS_ERR(s5p_ehci->clk)) {
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 0be788cc2fdb..723f8231193d 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -463,11 +463,12 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
463 && (temp & PORT_POWER)) 463 && (temp & PORT_POWER))
464 status |= USB_PORT_STAT_SUSPEND; 464 status |= USB_PORT_STAT_SUSPEND;
465 } 465 }
466 if ((temp & PORT_PLS_MASK) == XDEV_RESUME) { 466 if ((temp & PORT_PLS_MASK) == XDEV_RESUME &&
467 !DEV_SUPERSPEED(temp)) {
467 if ((temp & PORT_RESET) || !(temp & PORT_PE)) 468 if ((temp & PORT_RESET) || !(temp & PORT_PE))
468 goto error; 469 goto error;
469 if (!DEV_SUPERSPEED(temp) && time_after_eq(jiffies, 470 if (time_after_eq(jiffies,
470 bus_state->resume_done[wIndex])) { 471 bus_state->resume_done[wIndex])) {
471 xhci_dbg(xhci, "Resume USB2 port %d\n", 472 xhci_dbg(xhci, "Resume USB2 port %d\n",
472 wIndex + 1); 473 wIndex + 1);
473 bus_state->resume_done[wIndex] = 0; 474 bus_state->resume_done[wIndex] = 0;
@@ -487,6 +488,14 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
487 xhci_ring_device(xhci, slot_id); 488 xhci_ring_device(xhci, slot_id);
488 bus_state->port_c_suspend |= 1 << wIndex; 489 bus_state->port_c_suspend |= 1 << wIndex;
489 bus_state->suspended_ports &= ~(1 << wIndex); 490 bus_state->suspended_ports &= ~(1 << wIndex);
491 } else {
492 /*
493 * The resume has been signaling for less than
494 * 20ms. Report the port status as SUSPEND,
495 * let the usbcore check port status again
496 * and clear resume signaling later.
497 */
498 status |= USB_PORT_STAT_SUSPEND;
490 } 499 }
491 } 500 }
492 if ((temp & PORT_PLS_MASK) == XDEV_U0 501 if ((temp & PORT_PLS_MASK) == XDEV_U0
@@ -664,7 +673,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
664 xhci_dbg(xhci, "PORTSC %04x\n", temp); 673 xhci_dbg(xhci, "PORTSC %04x\n", temp);
665 if (temp & PORT_RESET) 674 if (temp & PORT_RESET)
666 goto error; 675 goto error;
667 if (temp & XDEV_U3) { 676 if ((temp & PORT_PLS_MASK) == XDEV_U3) {
668 if ((temp & PORT_PE) == 0) 677 if ((temp & PORT_PE) == 0)
669 goto error; 678 goto error;
670 679
@@ -752,7 +761,7 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
752 memset(buf, 0, retval); 761 memset(buf, 0, retval);
753 status = 0; 762 status = 0;
754 763
755 mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC; 764 mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC;
756 765
757 spin_lock_irqsave(&xhci->lock, flags); 766 spin_lock_irqsave(&xhci->lock, flags);
758 /* For each port, did anything change? If so, set that bit in buf. */ 767 /* For each port, did anything change? If so, set that bit in buf. */
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 7113d16e2d3a..952e2ded61af 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -514,8 +514,12 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
514 (unsigned long long) addr); 514 (unsigned long long) addr);
515} 515}
516 516
517/* flip_cycle means flip the cycle bit of all but the first and last TRB.
518 * (The last TRB actually points to the ring enqueue pointer, which is not part
519 * of this TD.) This is used to remove partially enqueued isoc TDs from a ring.
520 */
517static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, 521static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
518 struct xhci_td *cur_td) 522 struct xhci_td *cur_td, bool flip_cycle)
519{ 523{
520 struct xhci_segment *cur_seg; 524 struct xhci_segment *cur_seg;
521 union xhci_trb *cur_trb; 525 union xhci_trb *cur_trb;
@@ -528,6 +532,12 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
528 * leave the pointers intact. 532 * leave the pointers intact.
529 */ 533 */
530 cur_trb->generic.field[3] &= cpu_to_le32(~TRB_CHAIN); 534 cur_trb->generic.field[3] &= cpu_to_le32(~TRB_CHAIN);
535 /* Flip the cycle bit (link TRBs can't be the first
536 * or last TRB).
537 */
538 if (flip_cycle)
539 cur_trb->generic.field[3] ^=
540 cpu_to_le32(TRB_CYCLE);
531 xhci_dbg(xhci, "Cancel (unchain) link TRB\n"); 541 xhci_dbg(xhci, "Cancel (unchain) link TRB\n");
532 xhci_dbg(xhci, "Address = %p (0x%llx dma); " 542 xhci_dbg(xhci, "Address = %p (0x%llx dma); "
533 "in seg %p (0x%llx dma)\n", 543 "in seg %p (0x%llx dma)\n",
@@ -541,6 +551,11 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
541 cur_trb->generic.field[2] = 0; 551 cur_trb->generic.field[2] = 0;
542 /* Preserve only the cycle bit of this TRB */ 552 /* Preserve only the cycle bit of this TRB */
543 cur_trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE); 553 cur_trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
554 /* Flip the cycle bit except on the first or last TRB */
555 if (flip_cycle && cur_trb != cur_td->first_trb &&
556 cur_trb != cur_td->last_trb)
557 cur_trb->generic.field[3] ^=
558 cpu_to_le32(TRB_CYCLE);
544 cur_trb->generic.field[3] |= cpu_to_le32( 559 cur_trb->generic.field[3] |= cpu_to_le32(
545 TRB_TYPE(TRB_TR_NOOP)); 560 TRB_TYPE(TRB_TR_NOOP));
546 xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) " 561 xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) "
@@ -719,14 +734,14 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
719 cur_td->urb->stream_id, 734 cur_td->urb->stream_id,
720 cur_td, &deq_state); 735 cur_td, &deq_state);
721 else 736 else
722 td_to_noop(xhci, ep_ring, cur_td); 737 td_to_noop(xhci, ep_ring, cur_td, false);
723remove_finished_td: 738remove_finished_td:
724 /* 739 /*
725 * The event handler won't see a completion for this TD anymore, 740 * The event handler won't see a completion for this TD anymore,
726 * so remove it from the endpoint ring's TD list. Keep it in 741 * so remove it from the endpoint ring's TD list. Keep it in
727 * the cancelled TD list for URB completion later. 742 * the cancelled TD list for URB completion later.
728 */ 743 */
729 list_del(&cur_td->td_list); 744 list_del_init(&cur_td->td_list);
730 } 745 }
731 last_unlinked_td = cur_td; 746 last_unlinked_td = cur_td;
732 xhci_stop_watchdog_timer_in_irq(xhci, ep); 747 xhci_stop_watchdog_timer_in_irq(xhci, ep);
@@ -754,7 +769,7 @@ remove_finished_td:
754 do { 769 do {
755 cur_td = list_entry(ep->cancelled_td_list.next, 770 cur_td = list_entry(ep->cancelled_td_list.next,
756 struct xhci_td, cancelled_td_list); 771 struct xhci_td, cancelled_td_list);
757 list_del(&cur_td->cancelled_td_list); 772 list_del_init(&cur_td->cancelled_td_list);
758 773
759 /* Clean up the cancelled URB */ 774 /* Clean up the cancelled URB */
760 /* Doesn't matter what we pass for status, since the core will 775 /* Doesn't matter what we pass for status, since the core will
@@ -862,9 +877,9 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
862 cur_td = list_first_entry(&ring->td_list, 877 cur_td = list_first_entry(&ring->td_list,
863 struct xhci_td, 878 struct xhci_td,
864 td_list); 879 td_list);
865 list_del(&cur_td->td_list); 880 list_del_init(&cur_td->td_list);
866 if (!list_empty(&cur_td->cancelled_td_list)) 881 if (!list_empty(&cur_td->cancelled_td_list))
867 list_del(&cur_td->cancelled_td_list); 882 list_del_init(&cur_td->cancelled_td_list);
868 xhci_giveback_urb_in_irq(xhci, cur_td, 883 xhci_giveback_urb_in_irq(xhci, cur_td,
869 -ESHUTDOWN, "killed"); 884 -ESHUTDOWN, "killed");
870 } 885 }
@@ -873,7 +888,7 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
873 &temp_ep->cancelled_td_list, 888 &temp_ep->cancelled_td_list,
874 struct xhci_td, 889 struct xhci_td,
875 cancelled_td_list); 890 cancelled_td_list);
876 list_del(&cur_td->cancelled_td_list); 891 list_del_init(&cur_td->cancelled_td_list);
877 xhci_giveback_urb_in_irq(xhci, cur_td, 892 xhci_giveback_urb_in_irq(xhci, cur_td,
878 -ESHUTDOWN, "killed"); 893 -ESHUTDOWN, "killed");
879 } 894 }
@@ -1565,10 +1580,10 @@ td_cleanup:
1565 else 1580 else
1566 *status = 0; 1581 *status = 0;
1567 } 1582 }
1568 list_del(&td->td_list); 1583 list_del_init(&td->td_list);
1569 /* Was this TD slated to be cancelled but completed anyway? */ 1584 /* Was this TD slated to be cancelled but completed anyway? */
1570 if (!list_empty(&td->cancelled_td_list)) 1585 if (!list_empty(&td->cancelled_td_list))
1571 list_del(&td->cancelled_td_list); 1586 list_del_init(&td->cancelled_td_list);
1572 1587
1573 urb_priv->td_cnt++; 1588 urb_priv->td_cnt++;
1574 /* Giveback the urb when all the tds are completed */ 1589 /* Giveback the urb when all the tds are completed */
@@ -1919,8 +1934,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1919 int status = -EINPROGRESS; 1934 int status = -EINPROGRESS;
1920 struct urb_priv *urb_priv; 1935 struct urb_priv *urb_priv;
1921 struct xhci_ep_ctx *ep_ctx; 1936 struct xhci_ep_ctx *ep_ctx;
1937 struct list_head *tmp;
1922 u32 trb_comp_code; 1938 u32 trb_comp_code;
1923 int ret = 0; 1939 int ret = 0;
1940 int td_num = 0;
1924 1941
1925 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); 1942 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1926 xdev = xhci->devs[slot_id]; 1943 xdev = xhci->devs[slot_id];
@@ -1942,6 +1959,12 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1942 return -ENODEV; 1959 return -ENODEV;
1943 } 1960 }
1944 1961
1962 /* Count current td numbers if ep->skip is set */
1963 if (ep->skip) {
1964 list_for_each(tmp, &ep_ring->td_list)
1965 td_num++;
1966 }
1967
1945 event_dma = le64_to_cpu(event->buffer); 1968 event_dma = le64_to_cpu(event->buffer);
1946 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); 1969 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1947 /* Look for common error cases */ 1970 /* Look for common error cases */
@@ -2053,7 +2076,18 @@ static int handle_tx_event(struct xhci_hcd *xhci,
2053 goto cleanup; 2076 goto cleanup;
2054 } 2077 }
2055 2078
2079 /* We've skipped all the TDs on the ep ring when ep->skip set */
2080 if (ep->skip && td_num == 0) {
2081 ep->skip = false;
2082 xhci_dbg(xhci, "All tds on the ep_ring skipped. "
2083 "Clear skip flag.\n");
2084 ret = 0;
2085 goto cleanup;
2086 }
2087
2056 td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list); 2088 td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
2089 if (ep->skip)
2090 td_num--;
2057 2091
2058 /* Is this a TRB in the currently executing TD? */ 2092 /* Is this a TRB in the currently executing TD? */
2059 event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue, 2093 event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
@@ -2500,11 +2534,8 @@ static int prepare_transfer(struct xhci_hcd *xhci,
2500 2534
2501 if (td_index == 0) { 2535 if (td_index == 0) {
2502 ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb); 2536 ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb);
2503 if (unlikely(ret)) { 2537 if (unlikely(ret))
2504 xhci_urb_free_priv(xhci, urb_priv);
2505 urb->hcpriv = NULL;
2506 return ret; 2538 return ret;
2507 }
2508 } 2539 }
2509 2540
2510 td->urb = urb; 2541 td->urb = urb;
@@ -2672,6 +2703,10 @@ static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len,
2672{ 2703{
2673 int packets_transferred; 2704 int packets_transferred;
2674 2705
2706 /* One TRB with a zero-length data packet. */
2707 if (running_total == 0 && trb_buff_len == 0)
2708 return 0;
2709
2675 /* All the TRB queueing functions don't count the current TRB in 2710 /* All the TRB queueing functions don't count the current TRB in
2676 * running_total. 2711 * running_total.
2677 */ 2712 */
@@ -3113,20 +3148,15 @@ static int count_isoc_trbs_needed(struct xhci_hcd *xhci,
3113 struct urb *urb, int i) 3148 struct urb *urb, int i)
3114{ 3149{
3115 int num_trbs = 0; 3150 int num_trbs = 0;
3116 u64 addr, td_len, running_total; 3151 u64 addr, td_len;
3117 3152
3118 addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset); 3153 addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
3119 td_len = urb->iso_frame_desc[i].length; 3154 td_len = urb->iso_frame_desc[i].length;
3120 3155
3121 running_total = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1)); 3156 num_trbs = DIV_ROUND_UP(td_len + (addr & (TRB_MAX_BUFF_SIZE - 1)),
3122 running_total &= TRB_MAX_BUFF_SIZE - 1; 3157 TRB_MAX_BUFF_SIZE);
3123 if (running_total != 0) 3158 if (num_trbs == 0)
3124 num_trbs++;
3125
3126 while (running_total < td_len) {
3127 num_trbs++; 3159 num_trbs++;
3128 running_total += TRB_MAX_BUFF_SIZE;
3129 }
3130 3160
3131 return num_trbs; 3161 return num_trbs;
3132} 3162}
@@ -3226,6 +3256,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3226 start_trb = &ep_ring->enqueue->generic; 3256 start_trb = &ep_ring->enqueue->generic;
3227 start_cycle = ep_ring->cycle_state; 3257 start_cycle = ep_ring->cycle_state;
3228 3258
3259 urb_priv = urb->hcpriv;
3229 /* Queue the first TRB, even if it's zero-length */ 3260 /* Queue the first TRB, even if it's zero-length */
3230 for (i = 0; i < num_tds; i++) { 3261 for (i = 0; i < num_tds; i++) {
3231 unsigned int total_packet_count; 3262 unsigned int total_packet_count;
@@ -3237,9 +3268,11 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3237 addr = start_addr + urb->iso_frame_desc[i].offset; 3268 addr = start_addr + urb->iso_frame_desc[i].offset;
3238 td_len = urb->iso_frame_desc[i].length; 3269 td_len = urb->iso_frame_desc[i].length;
3239 td_remain_len = td_len; 3270 td_remain_len = td_len;
3240 /* FIXME: Ignoring zero-length packets, can those happen? */
3241 total_packet_count = roundup(td_len, 3271 total_packet_count = roundup(td_len,
3242 le16_to_cpu(urb->ep->desc.wMaxPacketSize)); 3272 le16_to_cpu(urb->ep->desc.wMaxPacketSize));
3273 /* A zero-length transfer still involves at least one packet. */
3274 if (total_packet_count == 0)
3275 total_packet_count++;
3243 burst_count = xhci_get_burst_count(xhci, urb->dev, urb, 3276 burst_count = xhci_get_burst_count(xhci, urb->dev, urb,
3244 total_packet_count); 3277 total_packet_count);
3245 residue = xhci_get_last_burst_packet_count(xhci, 3278 residue = xhci_get_last_burst_packet_count(xhci,
@@ -3249,12 +3282,13 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3249 3282
3250 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, 3283 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
3251 urb->stream_id, trbs_per_td, urb, i, mem_flags); 3284 urb->stream_id, trbs_per_td, urb, i, mem_flags);
3252 if (ret < 0) 3285 if (ret < 0) {
3253 return ret; 3286 if (i == 0)
3287 return ret;
3288 goto cleanup;
3289 }
3254 3290
3255 urb_priv = urb->hcpriv;
3256 td = urb_priv->td[i]; 3291 td = urb_priv->td[i];
3257
3258 for (j = 0; j < trbs_per_td; j++) { 3292 for (j = 0; j < trbs_per_td; j++) {
3259 u32 remainder = 0; 3293 u32 remainder = 0;
3260 field = TRB_TBC(burst_count) | TRB_TLBPC(residue); 3294 field = TRB_TBC(burst_count) | TRB_TLBPC(residue);
@@ -3344,6 +3378,27 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3344 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, 3378 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3345 start_cycle, start_trb); 3379 start_cycle, start_trb);
3346 return 0; 3380 return 0;
3381cleanup:
3382 /* Clean up a partially enqueued isoc transfer. */
3383
3384 for (i--; i >= 0; i--)
3385 list_del_init(&urb_priv->td[i]->td_list);
3386
3387 /* Use the first TD as a temporary variable to turn the TDs we've queued
3388 * into No-ops with a software-owned cycle bit. That way the hardware
3389 * won't accidentally start executing bogus TDs when we partially
3390 * overwrite them. td->first_trb and td->start_seg are already set.
3391 */
3392 urb_priv->td[0]->last_trb = ep_ring->enqueue;
3393 /* Every TRB except the first & last will have its cycle bit flipped. */
3394 td_to_noop(xhci, ep_ring, urb_priv->td[0], true);
3395
3396 /* Reset the ring enqueue back to the first TRB and its cycle bit. */
3397 ep_ring->enqueue = urb_priv->td[0]->first_trb;
3398 ep_ring->enq_seg = urb_priv->td[0]->start_seg;
3399 ep_ring->cycle_state = start_cycle;
3400 usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
3401 return ret;
3347} 3402}
3348 3403
3349/* 3404/*
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 1c4432d8fc10..3a0f695138f4 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1085,8 +1085,11 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1085 if (urb->dev->speed == USB_SPEED_FULL) { 1085 if (urb->dev->speed == USB_SPEED_FULL) {
1086 ret = xhci_check_maxpacket(xhci, slot_id, 1086 ret = xhci_check_maxpacket(xhci, slot_id,
1087 ep_index, urb); 1087 ep_index, urb);
1088 if (ret < 0) 1088 if (ret < 0) {
1089 xhci_urb_free_priv(xhci, urb_priv);
1090 urb->hcpriv = NULL;
1089 return ret; 1091 return ret;
1092 }
1090 } 1093 }
1091 1094
1092 /* We have a spinlock and interrupts disabled, so we must pass 1095 /* We have a spinlock and interrupts disabled, so we must pass
@@ -1097,6 +1100,8 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1097 goto dying; 1100 goto dying;
1098 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb, 1101 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
1099 slot_id, ep_index); 1102 slot_id, ep_index);
1103 if (ret)
1104 goto free_priv;
1100 spin_unlock_irqrestore(&xhci->lock, flags); 1105 spin_unlock_irqrestore(&xhci->lock, flags);
1101 } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) { 1106 } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) {
1102 spin_lock_irqsave(&xhci->lock, flags); 1107 spin_lock_irqsave(&xhci->lock, flags);
@@ -1117,6 +1122,8 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1117 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, 1122 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
1118 slot_id, ep_index); 1123 slot_id, ep_index);
1119 } 1124 }
1125 if (ret)
1126 goto free_priv;
1120 spin_unlock_irqrestore(&xhci->lock, flags); 1127 spin_unlock_irqrestore(&xhci->lock, flags);
1121 } else if (usb_endpoint_xfer_int(&urb->ep->desc)) { 1128 } else if (usb_endpoint_xfer_int(&urb->ep->desc)) {
1122 spin_lock_irqsave(&xhci->lock, flags); 1129 spin_lock_irqsave(&xhci->lock, flags);
@@ -1124,6 +1131,8 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1124 goto dying; 1131 goto dying;
1125 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb, 1132 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
1126 slot_id, ep_index); 1133 slot_id, ep_index);
1134 if (ret)
1135 goto free_priv;
1127 spin_unlock_irqrestore(&xhci->lock, flags); 1136 spin_unlock_irqrestore(&xhci->lock, flags);
1128 } else { 1137 } else {
1129 spin_lock_irqsave(&xhci->lock, flags); 1138 spin_lock_irqsave(&xhci->lock, flags);
@@ -1131,18 +1140,22 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1131 goto dying; 1140 goto dying;
1132 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb, 1141 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,
1133 slot_id, ep_index); 1142 slot_id, ep_index);
1143 if (ret)
1144 goto free_priv;
1134 spin_unlock_irqrestore(&xhci->lock, flags); 1145 spin_unlock_irqrestore(&xhci->lock, flags);
1135 } 1146 }
1136exit: 1147exit:
1137 return ret; 1148 return ret;
1138dying: 1149dying:
1139 xhci_urb_free_priv(xhci, urb_priv);
1140 urb->hcpriv = NULL;
1141 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for " 1150 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for "
1142 "non-responsive xHCI host.\n", 1151 "non-responsive xHCI host.\n",
1143 urb->ep->desc.bEndpointAddress, urb); 1152 urb->ep->desc.bEndpointAddress, urb);
1153 ret = -ESHUTDOWN;
1154free_priv:
1155 xhci_urb_free_priv(xhci, urb_priv);
1156 urb->hcpriv = NULL;
1144 spin_unlock_irqrestore(&xhci->lock, flags); 1157 spin_unlock_irqrestore(&xhci->lock, flags);
1145 return -ESHUTDOWN; 1158 return ret;
1146} 1159}
1147 1160
1148/* Get the right ring for the given URB. 1161/* Get the right ring for the given URB.
@@ -1239,6 +1252,13 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1239 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) { 1252 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) {
1240 xhci_dbg(xhci, "HW died, freeing TD.\n"); 1253 xhci_dbg(xhci, "HW died, freeing TD.\n");
1241 urb_priv = urb->hcpriv; 1254 urb_priv = urb->hcpriv;
1255 for (i = urb_priv->td_cnt; i < urb_priv->length; i++) {
1256 td = urb_priv->td[i];
1257 if (!list_empty(&td->td_list))
1258 list_del_init(&td->td_list);
1259 if (!list_empty(&td->cancelled_td_list))
1260 list_del_init(&td->cancelled_td_list);
1261 }
1242 1262
1243 usb_hcd_unlink_urb_from_ep(hcd, urb); 1263 usb_hcd_unlink_urb_from_ep(hcd, urb);
1244 spin_unlock_irqrestore(&xhci->lock, flags); 1264 spin_unlock_irqrestore(&xhci->lock, flags);
diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c
index ae8c39617743..5e7cfba5b079 100644
--- a/drivers/usb/musb/blackfin.c
+++ b/drivers/usb/musb/blackfin.c
@@ -17,6 +17,7 @@
17#include <linux/io.h> 17#include <linux/io.h>
18#include <linux/platform_device.h> 18#include <linux/platform_device.h>
19#include <linux/dma-mapping.h> 19#include <linux/dma-mapping.h>
20#include <linux/prefetch.h>
20 21
21#include <asm/cacheflush.h> 22#include <asm/cacheflush.h>
22 23
diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c
index 149f3f310a0a..318fb4e8a885 100644
--- a/drivers/usb/musb/cppi_dma.c
+++ b/drivers/usb/musb/cppi_dma.c
@@ -226,8 +226,10 @@ static int cppi_controller_stop(struct dma_controller *c)
226 struct cppi *controller; 226 struct cppi *controller;
227 void __iomem *tibase; 227 void __iomem *tibase;
228 int i; 228 int i;
229 struct musb *musb;
229 230
230 controller = container_of(c, struct cppi, controller); 231 controller = container_of(c, struct cppi, controller);
232 musb = controller->musb;
231 233
232 tibase = controller->tibase; 234 tibase = controller->tibase;
233 /* DISABLE INDIVIDUAL CHANNEL Interrupts */ 235 /* DISABLE INDIVIDUAL CHANNEL Interrupts */
@@ -289,9 +291,11 @@ cppi_channel_allocate(struct dma_controller *c,
289 u8 index; 291 u8 index;
290 struct cppi_channel *cppi_ch; 292 struct cppi_channel *cppi_ch;
291 void __iomem *tibase; 293 void __iomem *tibase;
294 struct musb *musb;
292 295
293 controller = container_of(c, struct cppi, controller); 296 controller = container_of(c, struct cppi, controller);
294 tibase = controller->tibase; 297 tibase = controller->tibase;
298 musb = controller->musb;
295 299
296 /* ep0 doesn't use DMA; remember cppi indices are 0..N-1 */ 300 /* ep0 doesn't use DMA; remember cppi indices are 0..N-1 */
297 index = ep->epnum - 1; 301 index = ep->epnum - 1;
@@ -339,7 +343,8 @@ static void cppi_channel_release(struct dma_channel *channel)
339 c = container_of(channel, struct cppi_channel, channel); 343 c = container_of(channel, struct cppi_channel, channel);
340 tibase = c->controller->tibase; 344 tibase = c->controller->tibase;
341 if (!c->hw_ep) 345 if (!c->hw_ep)
342 dev_dbg(musb->controller, "releasing idle DMA channel %p\n", c); 346 dev_dbg(c->controller->musb->controller,
347 "releasing idle DMA channel %p\n", c);
343 else if (!c->transmit) 348 else if (!c->transmit)
344 core_rxirq_enable(tibase, c->index + 1); 349 core_rxirq_enable(tibase, c->index + 1);
345 350
@@ -357,10 +362,11 @@ cppi_dump_rx(int level, struct cppi_channel *c, const char *tag)
357 362
358 musb_ep_select(base, c->index + 1); 363 musb_ep_select(base, c->index + 1);
359 364
360 DBG(level, "RX DMA%d%s: %d left, csr %04x, " 365 dev_dbg(c->controller->musb->controller,
361 "%08x H%08x S%08x C%08x, " 366 "RX DMA%d%s: %d left, csr %04x, "
362 "B%08x L%08x %08x .. %08x" 367 "%08x H%08x S%08x C%08x, "
363 "\n", 368 "B%08x L%08x %08x .. %08x"
369 "\n",
364 c->index, tag, 370 c->index, tag,
365 musb_readl(c->controller->tibase, 371 musb_readl(c->controller->tibase,
366 DAVINCI_RXCPPI_BUFCNT0_REG + 4 * c->index), 372 DAVINCI_RXCPPI_BUFCNT0_REG + 4 * c->index),
@@ -387,10 +393,11 @@ cppi_dump_tx(int level, struct cppi_channel *c, const char *tag)
387 393
388 musb_ep_select(base, c->index + 1); 394 musb_ep_select(base, c->index + 1);
389 395
390 DBG(level, "TX DMA%d%s: csr %04x, " 396 dev_dbg(c->controller->musb->controller,
391 "H%08x S%08x C%08x %08x, " 397 "TX DMA%d%s: csr %04x, "
392 "F%08x L%08x .. %08x" 398 "H%08x S%08x C%08x %08x, "
393 "\n", 399 "F%08x L%08x .. %08x"
400 "\n",
394 c->index, tag, 401 c->index, tag,
395 musb_readw(c->hw_ep->regs, MUSB_TXCSR), 402 musb_readw(c->hw_ep->regs, MUSB_TXCSR),
396 403
@@ -1022,6 +1029,7 @@ static bool cppi_rx_scan(struct cppi *cppi, unsigned ch)
1022 int i; 1029 int i;
1023 dma_addr_t safe2ack; 1030 dma_addr_t safe2ack;
1024 void __iomem *regs = rx->hw_ep->regs; 1031 void __iomem *regs = rx->hw_ep->regs;
1032 struct musb *musb = cppi->musb;
1025 1033
1026 cppi_dump_rx(6, rx, "/K"); 1034 cppi_dump_rx(6, rx, "/K");
1027 1035
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index 668eeef601ae..b3c065ab9dbc 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -172,7 +172,8 @@ enum musb_g_ep0_state {
172#endif 172#endif
173 173
174/* TUSB mapping: "flat" plus ep0 special cases */ 174/* TUSB mapping: "flat" plus ep0 special cases */
175#if defined(CONFIG_USB_MUSB_TUSB6010) 175#if defined(CONFIG_USB_MUSB_TUSB6010) || \
176 defined(CONFIG_USB_MUSB_TUSB6010_MODULE)
176#define musb_ep_select(_mbase, _epnum) \ 177#define musb_ep_select(_mbase, _epnum) \
177 musb_writeb((_mbase), MUSB_INDEX, (_epnum)) 178 musb_writeb((_mbase), MUSB_INDEX, (_epnum))
178#define MUSB_EP_OFFSET MUSB_TUSB_OFFSET 179#define MUSB_EP_OFFSET MUSB_TUSB_OFFSET
@@ -241,7 +242,8 @@ struct musb_hw_ep {
241 void __iomem *fifo; 242 void __iomem *fifo;
242 void __iomem *regs; 243 void __iomem *regs;
243 244
244#ifdef CONFIG_USB_MUSB_TUSB6010 245#if defined(CONFIG_USB_MUSB_TUSB6010) || \
246 defined(CONFIG_USB_MUSB_TUSB6010_MODULE)
245 void __iomem *conf; 247 void __iomem *conf;
246#endif 248#endif
247 249
@@ -258,7 +260,8 @@ struct musb_hw_ep {
258 struct dma_channel *tx_channel; 260 struct dma_channel *tx_channel;
259 struct dma_channel *rx_channel; 261 struct dma_channel *rx_channel;
260 262
261#ifdef CONFIG_USB_MUSB_TUSB6010 263#if defined(CONFIG_USB_MUSB_TUSB6010) || \
264 defined(CONFIG_USB_MUSB_TUSB6010_MODULE)
262 /* TUSB has "asynchronous" and "synchronous" dma modes */ 265 /* TUSB has "asynchronous" and "synchronous" dma modes */
263 dma_addr_t fifo_async; 266 dma_addr_t fifo_async;
264 dma_addr_t fifo_sync; 267 dma_addr_t fifo_sync;
@@ -356,7 +359,8 @@ struct musb {
356 void __iomem *ctrl_base; 359 void __iomem *ctrl_base;
357 void __iomem *mregs; 360 void __iomem *mregs;
358 361
359#ifdef CONFIG_USB_MUSB_TUSB6010 362#if defined(CONFIG_USB_MUSB_TUSB6010) || \
363 defined(CONFIG_USB_MUSB_TUSB6010_MODULE)
360 dma_addr_t async; 364 dma_addr_t async;
361 dma_addr_t sync; 365 dma_addr_t sync;
362 void __iomem *sync_va; 366 void __iomem *sync_va;
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 8c41a2e6ea77..e81820370d6f 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1856,6 +1856,7 @@ int __init musb_gadget_setup(struct musb *musb)
1856 1856
1857 return 0; 1857 return 0;
1858err: 1858err:
1859 musb->g.dev.parent = NULL;
1859 device_unregister(&musb->g.dev); 1860 device_unregister(&musb->g.dev);
1860 return status; 1861 return status;
1861} 1862}
@@ -1863,7 +1864,8 @@ err:
1863void musb_gadget_cleanup(struct musb *musb) 1864void musb_gadget_cleanup(struct musb *musb)
1864{ 1865{
1865 usb_del_gadget_udc(&musb->g); 1866 usb_del_gadget_udc(&musb->g);
1866 device_unregister(&musb->g.dev); 1867 if (musb->g.dev.parent)
1868 device_unregister(&musb->g.dev);
1867} 1869}
1868 1870
1869/* 1871/*
diff --git a/drivers/usb/musb/musb_regs.h b/drivers/usb/musb/musb_regs.h
index 82410703dcd3..03f2655af290 100644
--- a/drivers/usb/musb/musb_regs.h
+++ b/drivers/usb/musb/musb_regs.h
@@ -234,7 +234,8 @@
234#define MUSB_TESTMODE 0x0F /* 8 bit */ 234#define MUSB_TESTMODE 0x0F /* 8 bit */
235 235
236/* Get offset for a given FIFO from musb->mregs */ 236/* Get offset for a given FIFO from musb->mregs */
237#ifdef CONFIG_USB_MUSB_TUSB6010 237#if defined(CONFIG_USB_MUSB_TUSB6010) || \
238 defined(CONFIG_USB_MUSB_TUSB6010_MODULE)
238#define MUSB_FIFO_OFFSET(epnum) (0x200 + ((epnum) * 0x20)) 239#define MUSB_FIFO_OFFSET(epnum) (0x200 + ((epnum) * 0x20))
239#else 240#else
240#define MUSB_FIFO_OFFSET(epnum) (0x20 + ((epnum) * 4)) 241#define MUSB_FIFO_OFFSET(epnum) (0x20 + ((epnum) * 4))
@@ -295,7 +296,8 @@
295#define MUSB_FLAT_OFFSET(_epnum, _offset) \ 296#define MUSB_FLAT_OFFSET(_epnum, _offset) \
296 (0x100 + (0x10*(_epnum)) + (_offset)) 297 (0x100 + (0x10*(_epnum)) + (_offset))
297 298
298#ifdef CONFIG_USB_MUSB_TUSB6010 299#if defined(CONFIG_USB_MUSB_TUSB6010) || \
300 defined(CONFIG_USB_MUSB_TUSB6010_MODULE)
299/* TUSB6010 EP0 configuration register is special */ 301/* TUSB6010 EP0 configuration register is special */
300#define MUSB_TUSB_OFFSET(_epnum, _offset) \ 302#define MUSB_TUSB_OFFSET(_epnum, _offset) \
301 (0x10 + _offset) 303 (0x10 + _offset)
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
index 9eec41fbf3a4..ec1480191f78 100644
--- a/drivers/usb/musb/tusb6010.c
+++ b/drivers/usb/musb/tusb6010.c
@@ -18,6 +18,7 @@
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/errno.h> 19#include <linux/errno.h>
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/prefetch.h>
21#include <linux/usb.h> 22#include <linux/usb.h>
22#include <linux/irq.h> 23#include <linux/irq.h>
23#include <linux/platform_device.h> 24#include <linux/platform_device.h>
diff --git a/drivers/usb/musb/tusb6010_omap.c b/drivers/usb/musb/tusb6010_omap.c
index 07c8a73dfe41..b67b4bc596c1 100644
--- a/drivers/usb/musb/tusb6010_omap.c
+++ b/drivers/usb/musb/tusb6010_omap.c
@@ -20,6 +20,7 @@
20#include <plat/mux.h> 20#include <plat/mux.h>
21 21
22#include "musb_core.h" 22#include "musb_core.h"
23#include "tusb6010.h"
23 24
24#define to_chdat(c) ((struct tusb_omap_dma_ch *)(c)->private_data) 25#define to_chdat(c) ((struct tusb_omap_dma_ch *)(c)->private_data)
25 26
diff --git a/drivers/usb/musb/ux500_dma.c b/drivers/usb/musb/ux500_dma.c
index cecace411832..ef4333f4bbe0 100644
--- a/drivers/usb/musb/ux500_dma.c
+++ b/drivers/usb/musb/ux500_dma.c
@@ -65,7 +65,8 @@ static void ux500_tx_work(struct work_struct *data)
65 struct musb *musb = hw_ep->musb; 65 struct musb *musb = hw_ep->musb;
66 unsigned long flags; 66 unsigned long flags;
67 67
68 DBG(4, "DMA tx transfer done on hw_ep=%d\n", hw_ep->epnum); 68 dev_dbg(musb->controller, "DMA tx transfer done on hw_ep=%d\n",
69 hw_ep->epnum);
69 70
70 spin_lock_irqsave(&musb->lock, flags); 71 spin_lock_irqsave(&musb->lock, flags);
71 ux500_channel->channel.actual_len = ux500_channel->cur_len; 72 ux500_channel->channel.actual_len = ux500_channel->cur_len;
@@ -84,7 +85,8 @@ static void ux500_rx_work(struct work_struct *data)
84 struct musb *musb = hw_ep->musb; 85 struct musb *musb = hw_ep->musb;
85 unsigned long flags; 86 unsigned long flags;
86 87
87 DBG(4, "DMA rx transfer done on hw_ep=%d\n", hw_ep->epnum); 88 dev_dbg(musb->controller, "DMA rx transfer done on hw_ep=%d\n",
89 hw_ep->epnum);
88 90
89 spin_lock_irqsave(&musb->lock, flags); 91 spin_lock_irqsave(&musb->lock, flags);
90 ux500_channel->channel.actual_len = ux500_channel->cur_len; 92 ux500_channel->channel.actual_len = ux500_channel->cur_len;
@@ -116,9 +118,11 @@ static bool ux500_configure_channel(struct dma_channel *channel,
116 enum dma_slave_buswidth addr_width; 118 enum dma_slave_buswidth addr_width;
117 dma_addr_t usb_fifo_addr = (MUSB_FIFO_OFFSET(hw_ep->epnum) + 119 dma_addr_t usb_fifo_addr = (MUSB_FIFO_OFFSET(hw_ep->epnum) +
118 ux500_channel->controller->phy_base); 120 ux500_channel->controller->phy_base);
121 struct musb *musb = ux500_channel->controller->private_data;
119 122
120 DBG(4, "packet_sz=%d, mode=%d, dma_addr=0x%x, len=%d is_tx=%d\n", 123 dev_dbg(musb->controller,
121 packet_sz, mode, dma_addr, len, ux500_channel->is_tx); 124 "packet_sz=%d, mode=%d, dma_addr=0x%x, len=%d is_tx=%d\n",
125 packet_sz, mode, dma_addr, len, ux500_channel->is_tx);
122 126
123 ux500_channel->cur_len = len; 127 ux500_channel->cur_len = len;
124 128
@@ -133,15 +137,13 @@ static bool ux500_configure_channel(struct dma_channel *channel,
133 DMA_SLAVE_BUSWIDTH_4_BYTES; 137 DMA_SLAVE_BUSWIDTH_4_BYTES;
134 138
135 slave_conf.direction = direction; 139 slave_conf.direction = direction;
136 if (direction == DMA_FROM_DEVICE) { 140 slave_conf.src_addr = usb_fifo_addr;
137 slave_conf.src_addr = usb_fifo_addr; 141 slave_conf.src_addr_width = addr_width;
138 slave_conf.src_addr_width = addr_width; 142 slave_conf.src_maxburst = 16;
139 slave_conf.src_maxburst = 16; 143 slave_conf.dst_addr = usb_fifo_addr;
140 } else { 144 slave_conf.dst_addr_width = addr_width;
141 slave_conf.dst_addr = usb_fifo_addr; 145 slave_conf.dst_maxburst = 16;
142 slave_conf.dst_addr_width = addr_width; 146
143 slave_conf.dst_maxburst = 16;
144 }
145 dma_chan->device->device_control(dma_chan, DMA_SLAVE_CONFIG, 147 dma_chan->device->device_control(dma_chan, DMA_SLAVE_CONFIG,
146 (unsigned long) &slave_conf); 148 (unsigned long) &slave_conf);
147 149
@@ -166,6 +168,7 @@ static struct dma_channel *ux500_dma_channel_allocate(struct dma_controller *c,
166 struct ux500_dma_controller *controller = container_of(c, 168 struct ux500_dma_controller *controller = container_of(c,
167 struct ux500_dma_controller, controller); 169 struct ux500_dma_controller, controller);
168 struct ux500_dma_channel *ux500_channel = NULL; 170 struct ux500_dma_channel *ux500_channel = NULL;
171 struct musb *musb = controller->private_data;
169 u8 ch_num = hw_ep->epnum - 1; 172 u8 ch_num = hw_ep->epnum - 1;
170 u32 max_ch; 173 u32 max_ch;
171 174
@@ -192,7 +195,7 @@ static struct dma_channel *ux500_dma_channel_allocate(struct dma_controller *c,
192 ux500_channel->hw_ep = hw_ep; 195 ux500_channel->hw_ep = hw_ep;
193 ux500_channel->is_allocated = 1; 196 ux500_channel->is_allocated = 1;
194 197
195 DBG(7, "hw_ep=%d, is_tx=0x%x, channel=%d\n", 198 dev_dbg(musb->controller, "hw_ep=%d, is_tx=0x%x, channel=%d\n",
196 hw_ep->epnum, is_tx, ch_num); 199 hw_ep->epnum, is_tx, ch_num);
197 200
198 return &(ux500_channel->channel); 201 return &(ux500_channel->channel);
@@ -201,8 +204,9 @@ static struct dma_channel *ux500_dma_channel_allocate(struct dma_controller *c,
201static void ux500_dma_channel_release(struct dma_channel *channel) 204static void ux500_dma_channel_release(struct dma_channel *channel)
202{ 205{
203 struct ux500_dma_channel *ux500_channel = channel->private_data; 206 struct ux500_dma_channel *ux500_channel = channel->private_data;
207 struct musb *musb = ux500_channel->controller->private_data;
204 208
205 DBG(7, "channel=%d\n", ux500_channel->ch_num); 209 dev_dbg(musb->controller, "channel=%d\n", ux500_channel->ch_num);
206 210
207 if (ux500_channel->is_allocated) { 211 if (ux500_channel->is_allocated) {
208 ux500_channel->is_allocated = 0; 212 ux500_channel->is_allocated = 0;
@@ -252,8 +256,8 @@ static int ux500_dma_channel_abort(struct dma_channel *channel)
252 void __iomem *epio = musb->endpoints[ux500_channel->hw_ep->epnum].regs; 256 void __iomem *epio = musb->endpoints[ux500_channel->hw_ep->epnum].regs;
253 u16 csr; 257 u16 csr;
254 258
255 DBG(4, "channel=%d, is_tx=%d\n", ux500_channel->ch_num, 259 dev_dbg(musb->controller, "channel=%d, is_tx=%d\n",
256 ux500_channel->is_tx); 260 ux500_channel->ch_num, ux500_channel->is_tx);
257 261
258 if (channel->status == MUSB_DMA_STATUS_BUSY) { 262 if (channel->status == MUSB_DMA_STATUS_BUSY) {
259 if (ux500_channel->is_tx) { 263 if (ux500_channel->is_tx) {
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 78a2cf9551cc..5fc13e717911 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -101,6 +101,7 @@ static int ftdi_jtag_probe(struct usb_serial *serial);
101static int ftdi_mtxorb_hack_setup(struct usb_serial *serial); 101static int ftdi_mtxorb_hack_setup(struct usb_serial *serial);
102static int ftdi_NDI_device_setup(struct usb_serial *serial); 102static int ftdi_NDI_device_setup(struct usb_serial *serial);
103static int ftdi_stmclite_probe(struct usb_serial *serial); 103static int ftdi_stmclite_probe(struct usb_serial *serial);
104static int ftdi_8u2232c_probe(struct usb_serial *serial);
104static void ftdi_USB_UIRT_setup(struct ftdi_private *priv); 105static void ftdi_USB_UIRT_setup(struct ftdi_private *priv);
105static void ftdi_HE_TIRA1_setup(struct ftdi_private *priv); 106static void ftdi_HE_TIRA1_setup(struct ftdi_private *priv);
106 107
@@ -128,6 +129,10 @@ static struct ftdi_sio_quirk ftdi_stmclite_quirk = {
128 .probe = ftdi_stmclite_probe, 129 .probe = ftdi_stmclite_probe,
129}; 130};
130 131
132static struct ftdi_sio_quirk ftdi_8u2232c_quirk = {
133 .probe = ftdi_8u2232c_probe,
134};
135
131/* 136/*
132 * The 8U232AM has the same API as the sio except for: 137 * The 8U232AM has the same API as the sio except for:
133 * - it can support MUCH higher baudrates; up to: 138 * - it can support MUCH higher baudrates; up to:
@@ -178,7 +183,8 @@ static struct usb_device_id id_table_combined [] = {
178 { USB_DEVICE(FTDI_VID, FTDI_8U232AM_PID) }, 183 { USB_DEVICE(FTDI_VID, FTDI_8U232AM_PID) },
179 { USB_DEVICE(FTDI_VID, FTDI_8U232AM_ALT_PID) }, 184 { USB_DEVICE(FTDI_VID, FTDI_8U232AM_ALT_PID) },
180 { USB_DEVICE(FTDI_VID, FTDI_232RL_PID) }, 185 { USB_DEVICE(FTDI_VID, FTDI_232RL_PID) },
181 { USB_DEVICE(FTDI_VID, FTDI_8U2232C_PID) }, 186 { USB_DEVICE(FTDI_VID, FTDI_8U2232C_PID) ,
187 .driver_info = (kernel_ulong_t)&ftdi_8u2232c_quirk },
182 { USB_DEVICE(FTDI_VID, FTDI_4232H_PID) }, 188 { USB_DEVICE(FTDI_VID, FTDI_4232H_PID) },
183 { USB_DEVICE(FTDI_VID, FTDI_232H_PID) }, 189 { USB_DEVICE(FTDI_VID, FTDI_232H_PID) },
184 { USB_DEVICE(FTDI_VID, FTDI_MICRO_CHAMELEON_PID) }, 190 { USB_DEVICE(FTDI_VID, FTDI_MICRO_CHAMELEON_PID) },
@@ -1737,6 +1743,18 @@ static int ftdi_jtag_probe(struct usb_serial *serial)
1737 return 0; 1743 return 0;
1738} 1744}
1739 1745
1746static int ftdi_8u2232c_probe(struct usb_serial *serial)
1747{
1748 struct usb_device *udev = serial->dev;
1749
1750 dbg("%s", __func__);
1751
1752 if (strcmp(udev->manufacturer, "CALAO Systems") == 0)
1753 return ftdi_jtag_probe(serial);
1754
1755 return 0;
1756}
1757
1740/* 1758/*
1741 * First and second port on STMCLiteadaptors is reserved for JTAG interface 1759 * First and second port on STMCLiteadaptors is reserved for JTAG interface
1742 * and the forth port for pio 1760 * and the forth port for pio
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 815656198914..fe22e90bc879 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -148,6 +148,8 @@ static void option_instat_callback(struct urb *urb);
148#define HUAWEI_PRODUCT_K4505 0x1464 148#define HUAWEI_PRODUCT_K4505 0x1464
149#define HUAWEI_PRODUCT_K3765 0x1465 149#define HUAWEI_PRODUCT_K3765 0x1465
150#define HUAWEI_PRODUCT_E14AC 0x14AC 150#define HUAWEI_PRODUCT_E14AC 0x14AC
151#define HUAWEI_PRODUCT_K3806 0x14AE
152#define HUAWEI_PRODUCT_K4605 0x14C6
151#define HUAWEI_PRODUCT_K3770 0x14C9 153#define HUAWEI_PRODUCT_K3770 0x14C9
152#define HUAWEI_PRODUCT_K3771 0x14CA 154#define HUAWEI_PRODUCT_K3771 0x14CA
153#define HUAWEI_PRODUCT_K4510 0x14CB 155#define HUAWEI_PRODUCT_K4510 0x14CB
@@ -416,6 +418,56 @@ static void option_instat_callback(struct urb *urb);
416#define SAMSUNG_VENDOR_ID 0x04e8 418#define SAMSUNG_VENDOR_ID 0x04e8
417#define SAMSUNG_PRODUCT_GT_B3730 0x6889 419#define SAMSUNG_PRODUCT_GT_B3730 0x6889
418 420
421/* YUGA products www.yuga-info.com*/
422#define YUGA_VENDOR_ID 0x257A
423#define YUGA_PRODUCT_CEM600 0x1601
424#define YUGA_PRODUCT_CEM610 0x1602
425#define YUGA_PRODUCT_CEM500 0x1603
426#define YUGA_PRODUCT_CEM510 0x1604
427#define YUGA_PRODUCT_CEM800 0x1605
428#define YUGA_PRODUCT_CEM900 0x1606
429
430#define YUGA_PRODUCT_CEU818 0x1607
431#define YUGA_PRODUCT_CEU816 0x1608
432#define YUGA_PRODUCT_CEU828 0x1609
433#define YUGA_PRODUCT_CEU826 0x160A
434#define YUGA_PRODUCT_CEU518 0x160B
435#define YUGA_PRODUCT_CEU516 0x160C
436#define YUGA_PRODUCT_CEU528 0x160D
437#define YUGA_PRODUCT_CEU526 0x160F
438
439#define YUGA_PRODUCT_CWM600 0x2601
440#define YUGA_PRODUCT_CWM610 0x2602
441#define YUGA_PRODUCT_CWM500 0x2603
442#define YUGA_PRODUCT_CWM510 0x2604
443#define YUGA_PRODUCT_CWM800 0x2605
444#define YUGA_PRODUCT_CWM900 0x2606
445
446#define YUGA_PRODUCT_CWU718 0x2607
447#define YUGA_PRODUCT_CWU716 0x2608
448#define YUGA_PRODUCT_CWU728 0x2609
449#define YUGA_PRODUCT_CWU726 0x260A
450#define YUGA_PRODUCT_CWU518 0x260B
451#define YUGA_PRODUCT_CWU516 0x260C
452#define YUGA_PRODUCT_CWU528 0x260D
453#define YUGA_PRODUCT_CWU526 0x260F
454
455#define YUGA_PRODUCT_CLM600 0x2601
456#define YUGA_PRODUCT_CLM610 0x2602
457#define YUGA_PRODUCT_CLM500 0x2603
458#define YUGA_PRODUCT_CLM510 0x2604
459#define YUGA_PRODUCT_CLM800 0x2605
460#define YUGA_PRODUCT_CLM900 0x2606
461
462#define YUGA_PRODUCT_CLU718 0x2607
463#define YUGA_PRODUCT_CLU716 0x2608
464#define YUGA_PRODUCT_CLU728 0x2609
465#define YUGA_PRODUCT_CLU726 0x260A
466#define YUGA_PRODUCT_CLU518 0x260B
467#define YUGA_PRODUCT_CLU516 0x260C
468#define YUGA_PRODUCT_CLU528 0x260D
469#define YUGA_PRODUCT_CLU526 0x260F
470
419/* some devices interfaces need special handling due to a number of reasons */ 471/* some devices interfaces need special handling due to a number of reasons */
420enum option_blacklist_reason { 472enum option_blacklist_reason {
421 OPTION_BLACKLIST_NONE = 0, 473 OPTION_BLACKLIST_NONE = 0,
@@ -551,6 +603,8 @@ static const struct usb_device_id option_ids[] = {
551 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff) }, 603 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff) },
552 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) }, 604 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) },
553 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC, 0xff, 0xff, 0xff) }, 605 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC, 0xff, 0xff, 0xff) },
606 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3806, 0xff, 0xff, 0xff) },
607 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff) },
554 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x31) }, 608 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x31) },
555 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x32) }, 609 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x32) },
556 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x31) }, 610 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x31) },
@@ -1005,6 +1059,48 @@ static const struct usb_device_id option_ids[] = {
1005 { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */ 1059 { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
1006 { USB_DEVICE(ONDA_VENDOR_ID, ONDA_MT825UP) }, /* ONDA MT825UP modem */ 1060 { USB_DEVICE(ONDA_VENDOR_ID, ONDA_MT825UP) }, /* ONDA MT825UP modem */
1007 { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/ 1061 { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/
1062 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) },
1063 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM610) },
1064 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM500) },
1065 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM510) },
1066 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM800) },
1067 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM900) },
1068 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU818) },
1069 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU816) },
1070 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU828) },
1071 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU826) },
1072 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU518) },
1073 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU516) },
1074 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU528) },
1075 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU526) },
1076 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM600) },
1077 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM610) },
1078 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM500) },
1079 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM510) },
1080 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM800) },
1081 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM900) },
1082 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU718) },
1083 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU716) },
1084 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU728) },
1085 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU726) },
1086 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU518) },
1087 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU516) },
1088 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU528) },
1089 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU526) },
1090 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM600) },
1091 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM610) },
1092 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM500) },
1093 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM510) },
1094 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM800) },
1095 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM900) },
1096 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU718) },
1097 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU716) },
1098 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU728) },
1099 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU726) },
1100 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU518) },
1101 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU516) },
1102 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU528) },
1103 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU526) },
1008 { } /* Terminating entry */ 1104 { } /* Terminating entry */
1009}; 1105};
1010MODULE_DEVICE_TABLE(usb, option_ids); 1106MODULE_DEVICE_TABLE(usb, option_ids);
@@ -1134,11 +1230,13 @@ static int option_probe(struct usb_serial *serial,
1134 serial->interface->cur_altsetting->desc.bInterfaceClass != 0xff) 1230 serial->interface->cur_altsetting->desc.bInterfaceClass != 0xff)
1135 return -ENODEV; 1231 return -ENODEV;
1136 1232
1137 /* Don't bind network interfaces on Huawei K3765 & K4505 */ 1233 /* Don't bind network interfaces on Huawei K3765, K4505 & K4605 */
1138 if (serial->dev->descriptor.idVendor == HUAWEI_VENDOR_ID && 1234 if (serial->dev->descriptor.idVendor == HUAWEI_VENDOR_ID &&
1139 (serial->dev->descriptor.idProduct == HUAWEI_PRODUCT_K3765 || 1235 (serial->dev->descriptor.idProduct == HUAWEI_PRODUCT_K3765 ||
1140 serial->dev->descriptor.idProduct == HUAWEI_PRODUCT_K4505) && 1236 serial->dev->descriptor.idProduct == HUAWEI_PRODUCT_K4505 ||
1141 serial->interface->cur_altsetting->desc.bInterfaceNumber == 1) 1237 serial->dev->descriptor.idProduct == HUAWEI_PRODUCT_K4605) &&
1238 (serial->interface->cur_altsetting->desc.bInterfaceNumber == 1 ||
1239 serial->interface->cur_altsetting->desc.bInterfaceNumber == 2))
1142 return -ENODEV; 1240 return -ENODEV;
1143 1241
1144 /* Don't bind network interface on Samsung GT-B3730, it is handled by a separate module */ 1242 /* Don't bind network interface on Samsung GT-B3730, it is handled by a separate module */
diff --git a/drivers/video/backlight/adp8870_bl.c b/drivers/video/backlight/adp8870_bl.c
index 05a8832bb3eb..d06886a2bfb5 100644
--- a/drivers/video/backlight/adp8870_bl.c
+++ b/drivers/video/backlight/adp8870_bl.c
@@ -1009,4 +1009,4 @@ module_exit(adp8870_exit);
1009MODULE_LICENSE("GPL v2"); 1009MODULE_LICENSE("GPL v2");
1010MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); 1010MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
1011MODULE_DESCRIPTION("ADP8870 Backlight driver"); 1011MODULE_DESCRIPTION("ADP8870 Backlight driver");
1012MODULE_ALIAS("platform:adp8870-backlight"); 1012MODULE_ALIAS("i2c:adp8870-backlight");
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index 80d292fb92d8..7363c1b169e8 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -19,7 +19,7 @@
19#include <asm/backlight.h> 19#include <asm/backlight.h>
20#endif 20#endif
21 21
22static const char const *backlight_types[] = { 22static const char *const backlight_types[] = {
23 [BACKLIGHT_RAW] = "raw", 23 [BACKLIGHT_RAW] = "raw",
24 [BACKLIGHT_PLATFORM] = "platform", 24 [BACKLIGHT_PLATFORM] = "platform",
25 [BACKLIGHT_FIRMWARE] = "firmware", 25 [BACKLIGHT_FIRMWARE] = "firmware",
diff --git a/drivers/video/backlight/ep93xx_bl.c b/drivers/video/backlight/ep93xx_bl.c
index 9f1e389d51d2..b0582917f0c8 100644
--- a/drivers/video/backlight/ep93xx_bl.c
+++ b/drivers/video/backlight/ep93xx_bl.c
@@ -11,7 +11,7 @@
11 * BRIGHT, on the Cirrus EP9307, EP9312, and EP9315 processors. 11 * BRIGHT, on the Cirrus EP9307, EP9312, and EP9315 processors.
12 */ 12 */
13 13
14 14#include <linux/module.h>
15#include <linux/platform_device.h> 15#include <linux/platform_device.h>
16#include <linux/io.h> 16#include <linux/io.h>
17#include <linux/fb.h> 17#include <linux/fb.h>
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index b8f38ec6eb18..8b5b2a4124c7 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -28,6 +28,8 @@ struct pwm_bl_data {
28 unsigned int lth_brightness; 28 unsigned int lth_brightness;
29 int (*notify)(struct device *, 29 int (*notify)(struct device *,
30 int brightness); 30 int brightness);
31 void (*notify_after)(struct device *,
32 int brightness);
31 int (*check_fb)(struct device *, struct fb_info *); 33 int (*check_fb)(struct device *, struct fb_info *);
32}; 34};
33 35
@@ -55,6 +57,10 @@ static int pwm_backlight_update_status(struct backlight_device *bl)
55 pwm_config(pb->pwm, brightness, pb->period); 57 pwm_config(pb->pwm, brightness, pb->period);
56 pwm_enable(pb->pwm); 58 pwm_enable(pb->pwm);
57 } 59 }
60
61 if (pb->notify_after)
62 pb->notify_after(pb->dev, brightness);
63
58 return 0; 64 return 0;
59} 65}
60 66
@@ -105,6 +111,7 @@ static int pwm_backlight_probe(struct platform_device *pdev)
105 111
106 pb->period = data->pwm_period_ns; 112 pb->period = data->pwm_period_ns;
107 pb->notify = data->notify; 113 pb->notify = data->notify;
114 pb->notify_after = data->notify_after;
108 pb->check_fb = data->check_fb; 115 pb->check_fb = data->check_fb;
109 pb->lth_brightness = data->lth_brightness * 116 pb->lth_brightness = data->lth_brightness *
110 (data->pwm_period_ns / data->max_brightness); 117 (data->pwm_period_ns / data->max_brightness);
@@ -172,6 +179,8 @@ static int pwm_backlight_suspend(struct platform_device *pdev,
172 pb->notify(pb->dev, 0); 179 pb->notify(pb->dev, 0);
173 pwm_config(pb->pwm, 0, pb->period); 180 pwm_config(pb->pwm, 0, pb->period);
174 pwm_disable(pb->pwm); 181 pwm_disable(pb->pwm);
182 if (pb->notify_after)
183 pb->notify_after(pb->dev, 0);
175 return 0; 184 return 0;
176} 185}
177 186
diff --git a/drivers/w1/masters/ds2490.c b/drivers/w1/masters/ds2490.c
index 02bf7bf7160b..b5abaae38e97 100644
--- a/drivers/w1/masters/ds2490.c
+++ b/drivers/w1/masters/ds2490.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * dscore.c 2 * dscore.c
3 * 3 *
4 * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> 4 * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
5 * 5 *
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
@@ -1024,5 +1024,5 @@ module_init(ds_init);
1024module_exit(ds_fini); 1024module_exit(ds_fini);
1025 1025
1026MODULE_LICENSE("GPL"); 1026MODULE_LICENSE("GPL");
1027MODULE_AUTHOR("Evgeniy Polyakov <johnpol@2ka.mipt.ru>"); 1027MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
1028MODULE_DESCRIPTION("DS2490 USB <-> W1 bus master driver (DS9490*)"); 1028MODULE_DESCRIPTION("DS2490 USB <-> W1 bus master driver (DS9490*)");
diff --git a/drivers/w1/masters/matrox_w1.c b/drivers/w1/masters/matrox_w1.c
index 334d1ccf9c92..f667c26b2195 100644
--- a/drivers/w1/masters/matrox_w1.c
+++ b/drivers/w1/masters/matrox_w1.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * matrox_w1.c 2 * matrox_w1.c
3 * 3 *
4 * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> 4 * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
5 * 5 *
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
@@ -39,7 +39,7 @@
39#include "../w1_log.h" 39#include "../w1_log.h"
40 40
41MODULE_LICENSE("GPL"); 41MODULE_LICENSE("GPL");
42MODULE_AUTHOR("Evgeniy Polyakov <johnpol@2ka.mipt.ru>"); 42MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
43MODULE_DESCRIPTION("Driver for transport(Dallas 1-wire prtocol) over VGA DDC(matrox gpio)."); 43MODULE_DESCRIPTION("Driver for transport(Dallas 1-wire prtocol) over VGA DDC(matrox gpio).");
44 44
45static struct pci_device_id matrox_w1_tbl[] = { 45static struct pci_device_id matrox_w1_tbl[] = {
diff --git a/drivers/w1/slaves/w1_ds2408.c b/drivers/w1/slaves/w1_ds2408.c
index c37781899d90..7c8cdb8aed26 100644
--- a/drivers/w1/slaves/w1_ds2408.c
+++ b/drivers/w1/slaves/w1_ds2408.c
@@ -373,7 +373,7 @@ static int w1_f29_add_slave(struct w1_slave *sl)
373static void w1_f29_remove_slave(struct w1_slave *sl) 373static void w1_f29_remove_slave(struct w1_slave *sl)
374{ 374{
375 int i; 375 int i;
376 for (i = NB_SYSFS_BIN_FILES; i <= 0; --i) 376 for (i = NB_SYSFS_BIN_FILES - 1; i >= 0; --i)
377 sysfs_remove_bin_file(&sl->dev.kobj, 377 sysfs_remove_bin_file(&sl->dev.kobj,
378 &(w1_f29_sysfs_bin_files[i])); 378 &(w1_f29_sysfs_bin_files[i]));
379} 379}
diff --git a/drivers/w1/slaves/w1_smem.c b/drivers/w1/slaves/w1_smem.c
index cc8c02e92593..84655625c870 100644
--- a/drivers/w1/slaves/w1_smem.c
+++ b/drivers/w1/slaves/w1_smem.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * w1_smem.c 2 * w1_smem.c
3 * 3 *
4 * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> 4 * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
5 * 5 *
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
@@ -32,7 +32,7 @@
32#include "../w1_family.h" 32#include "../w1_family.h"
33 33
34MODULE_LICENSE("GPL"); 34MODULE_LICENSE("GPL");
35MODULE_AUTHOR("Evgeniy Polyakov <johnpol@2ka.mipt.ru>"); 35MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
36MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol, 64bit memory family."); 36MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol, 64bit memory family.");
37 37
38static struct w1_family w1_smem_family_01 = { 38static struct w1_family w1_smem_family_01 = {
diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c
index 402928b135d1..a1ef9b5b38cf 100644
--- a/drivers/w1/slaves/w1_therm.c
+++ b/drivers/w1/slaves/w1_therm.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * w1_therm.c 2 * w1_therm.c
3 * 3 *
4 * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> 4 * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
5 * 5 *
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
@@ -34,7 +34,7 @@
34#include "../w1_family.h" 34#include "../w1_family.h"
35 35
36MODULE_LICENSE("GPL"); 36MODULE_LICENSE("GPL");
37MODULE_AUTHOR("Evgeniy Polyakov <johnpol@2ka.mipt.ru>"); 37MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
38MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol, temperature family."); 38MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol, temperature family.");
39 39
40/* Allow the strong pullup to be disabled, but default to enabled. 40/* Allow the strong pullup to be disabled, but default to enabled.
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index 6c136c19e982..c37497823851 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * w1.c 2 * w1.c
3 * 3 *
4 * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> 4 * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
5 * 5 *
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
@@ -42,7 +42,7 @@
42#include "w1_netlink.h" 42#include "w1_netlink.h"
43 43
44MODULE_LICENSE("GPL"); 44MODULE_LICENSE("GPL");
45MODULE_AUTHOR("Evgeniy Polyakov <johnpol@2ka.mipt.ru>"); 45MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
46MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol."); 46MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol.");
47 47
48static int w1_timeout = 10; 48static int w1_timeout = 10;
diff --git a/drivers/w1/w1.h b/drivers/w1/w1.h
index 1ce23fc6186c..4d012ca3f32c 100644
--- a/drivers/w1/w1.h
+++ b/drivers/w1/w1.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * w1.h 2 * w1.h
3 * 3 *
4 * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> 4 * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
5 * 5 *
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
diff --git a/drivers/w1/w1_family.c b/drivers/w1/w1_family.c
index 4a099041f28a..63359797c8b1 100644
--- a/drivers/w1/w1_family.c
+++ b/drivers/w1/w1_family.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * w1_family.c 2 * w1_family.c
3 * 3 *
4 * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> 4 * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
5 * 5 *
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
diff --git a/drivers/w1/w1_family.h b/drivers/w1/w1_family.h
index 98a1ac0f4693..490cda2281bc 100644
--- a/drivers/w1/w1_family.h
+++ b/drivers/w1/w1_family.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * w1_family.h 2 * w1_family.h
3 * 3 *
4 * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> 4 * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
5 * 5 *
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
diff --git a/drivers/w1/w1_int.c b/drivers/w1/w1_int.c
index b50be3f1073d..d220bce2cee4 100644
--- a/drivers/w1/w1_int.c
+++ b/drivers/w1/w1_int.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * w1_int.c 2 * w1_int.c
3 * 3 *
4 * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> 4 * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
5 * 5 *
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
diff --git a/drivers/w1/w1_int.h b/drivers/w1/w1_int.h
index 4274082d2262..2ad7d4414bed 100644
--- a/drivers/w1/w1_int.h
+++ b/drivers/w1/w1_int.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * w1_int.h 2 * w1_int.h
3 * 3 *
4 * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> 4 * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
5 * 5 *
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
diff --git a/drivers/w1/w1_io.c b/drivers/w1/w1_io.c
index 8e8b64cfafb6..765b37b62a4f 100644
--- a/drivers/w1/w1_io.c
+++ b/drivers/w1/w1_io.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * w1_io.c 2 * w1_io.c
3 * 3 *
4 * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> 4 * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
5 * 5 *
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
diff --git a/drivers/w1/w1_log.h b/drivers/w1/w1_log.h
index e6ab7cf08f88..9c7bd62e6bdc 100644
--- a/drivers/w1/w1_log.h
+++ b/drivers/w1/w1_log.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * w1_log.h 2 * w1_log.h
3 * 3 *
4 * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> 4 * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
5 * 5 *
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
diff --git a/drivers/w1/w1_netlink.c b/drivers/w1/w1_netlink.c
index 55aabd927c60..40788c925d1c 100644
--- a/drivers/w1/w1_netlink.c
+++ b/drivers/w1/w1_netlink.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * w1_netlink.c 2 * w1_netlink.c
3 * 3 *
4 * Copyright (c) 2003 Evgeniy Polyakov <johnpol@2ka.mipt.ru> 4 * Copyright (c) 2003 Evgeniy Polyakov <zbr@ioremap.net>
5 * 5 *
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
diff --git a/drivers/w1/w1_netlink.h b/drivers/w1/w1_netlink.h
index 27e950f935b1..b0922dc29658 100644
--- a/drivers/w1/w1_netlink.h
+++ b/drivers/w1/w1_netlink.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * w1_netlink.h 2 * w1_netlink.h
3 * 3 *
4 * Copyright (c) 2003 Evgeniy Polyakov <johnpol@2ka.mipt.ru> 4 * Copyright (c) 2003 Evgeniy Polyakov <zbr@ioremap.net>
5 * 5 *
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index 410fba45378d..809cbda03d7a 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -494,15 +494,16 @@ static int hpwdt_pretimeout(struct notifier_block *nb, unsigned long ulReason,
494 asminline_call(&cmn_regs, cru_rom_addr); 494 asminline_call(&cmn_regs, cru_rom_addr);
495 die_nmi_called = 1; 495 die_nmi_called = 1;
496 spin_unlock_irqrestore(&rom_lock, rom_pl); 496 spin_unlock_irqrestore(&rom_lock, rom_pl);
497
498 if (allow_kdump)
499 hpwdt_stop();
500
497 if (!is_icru) { 501 if (!is_icru) {
498 if (cmn_regs.u1.ral == 0) { 502 if (cmn_regs.u1.ral == 0) {
499 printk(KERN_WARNING "hpwdt: An NMI occurred, " 503 panic("An NMI occurred, "
500 "but unable to determine source.\n"); 504 "but unable to determine source.\n");
501 } 505 }
502 } 506 }
503
504 if (allow_kdump)
505 hpwdt_stop();
506 panic("An NMI occurred, please see the Integrated " 507 panic("An NMI occurred, please see the Integrated "
507 "Management Log for details.\n"); 508 "Management Log for details.\n");
508 509
diff --git a/drivers/watchdog/lantiq_wdt.c b/drivers/watchdog/lantiq_wdt.c
index 7d82adac1cb2..102aed0efbf1 100644
--- a/drivers/watchdog/lantiq_wdt.c
+++ b/drivers/watchdog/lantiq_wdt.c
@@ -51,16 +51,16 @@ static int ltq_wdt_ok_to_close;
51static void 51static void
52ltq_wdt_enable(void) 52ltq_wdt_enable(void)
53{ 53{
54 ltq_wdt_timeout = ltq_wdt_timeout * 54 unsigned long int timeout = ltq_wdt_timeout *
55 (ltq_io_region_clk_rate / LTQ_WDT_DIVIDER) + 0x1000; 55 (ltq_io_region_clk_rate / LTQ_WDT_DIVIDER) + 0x1000;
56 if (ltq_wdt_timeout > LTQ_MAX_TIMEOUT) 56 if (timeout > LTQ_MAX_TIMEOUT)
57 ltq_wdt_timeout = LTQ_MAX_TIMEOUT; 57 timeout = LTQ_MAX_TIMEOUT;
58 58
59 /* write the first password magic */ 59 /* write the first password magic */
60 ltq_w32(LTQ_WDT_PW1, ltq_wdt_membase + LTQ_WDT_CR); 60 ltq_w32(LTQ_WDT_PW1, ltq_wdt_membase + LTQ_WDT_CR);
61 /* write the second magic plus the configuration and new timeout */ 61 /* write the second magic plus the configuration and new timeout */
62 ltq_w32(LTQ_WDT_SR_EN | LTQ_WDT_SR_PWD | LTQ_WDT_SR_CLKDIV | 62 ltq_w32(LTQ_WDT_SR_EN | LTQ_WDT_SR_PWD | LTQ_WDT_SR_CLKDIV |
63 LTQ_WDT_PW2 | ltq_wdt_timeout, ltq_wdt_membase + LTQ_WDT_CR); 63 LTQ_WDT_PW2 | timeout, ltq_wdt_membase + LTQ_WDT_CR);
64} 64}
65 65
66static void 66static void
diff --git a/drivers/watchdog/sbc_epx_c3.c b/drivers/watchdog/sbc_epx_c3.c
index 3066a5127ca8..eaca366b7234 100644
--- a/drivers/watchdog/sbc_epx_c3.c
+++ b/drivers/watchdog/sbc_epx_c3.c
@@ -173,7 +173,7 @@ static struct notifier_block epx_c3_notifier = {
173 .notifier_call = epx_c3_notify_sys, 173 .notifier_call = epx_c3_notify_sys,
174}; 174};
175 175
176static const char banner[] __initdata = KERN_INFO PFX 176static const char banner[] __initconst = KERN_INFO PFX
177 "Hardware Watchdog Timer for Winsystems EPX-C3 SBC: 0.1\n"; 177 "Hardware Watchdog Timer for Winsystems EPX-C3 SBC: 0.1\n";
178 178
179static int __init watchdog_init(void) 179static int __init watchdog_init(void)
diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
index d33520d0b4c9..1199da0f98cf 100644
--- a/drivers/watchdog/watchdog_dev.c
+++ b/drivers/watchdog/watchdog_dev.c
@@ -59,7 +59,7 @@ static struct watchdog_device *wdd;
59 59
60static int watchdog_ping(struct watchdog_device *wddev) 60static int watchdog_ping(struct watchdog_device *wddev)
61{ 61{
62 if (test_bit(WDOG_ACTIVE, &wdd->status)) { 62 if (test_bit(WDOG_ACTIVE, &wddev->status)) {
63 if (wddev->ops->ping) 63 if (wddev->ops->ping)
64 return wddev->ops->ping(wddev); /* ping the watchdog */ 64 return wddev->ops->ping(wddev); /* ping the watchdog */
65 else 65 else
@@ -81,12 +81,12 @@ static int watchdog_start(struct watchdog_device *wddev)
81{ 81{
82 int err; 82 int err;
83 83
84 if (!test_bit(WDOG_ACTIVE, &wdd->status)) { 84 if (!test_bit(WDOG_ACTIVE, &wddev->status)) {
85 err = wddev->ops->start(wddev); 85 err = wddev->ops->start(wddev);
86 if (err < 0) 86 if (err < 0)
87 return err; 87 return err;
88 88
89 set_bit(WDOG_ACTIVE, &wdd->status); 89 set_bit(WDOG_ACTIVE, &wddev->status);
90 } 90 }
91 return 0; 91 return 0;
92} 92}
@@ -105,18 +105,18 @@ static int watchdog_stop(struct watchdog_device *wddev)
105{ 105{
106 int err = -EBUSY; 106 int err = -EBUSY;
107 107
108 if (test_bit(WDOG_NO_WAY_OUT, &wdd->status)) { 108 if (test_bit(WDOG_NO_WAY_OUT, &wddev->status)) {
109 pr_info("%s: nowayout prevents watchdog to be stopped!\n", 109 pr_info("%s: nowayout prevents watchdog to be stopped!\n",
110 wdd->info->identity); 110 wddev->info->identity);
111 return err; 111 return err;
112 } 112 }
113 113
114 if (test_bit(WDOG_ACTIVE, &wdd->status)) { 114 if (test_bit(WDOG_ACTIVE, &wddev->status)) {
115 err = wddev->ops->stop(wddev); 115 err = wddev->ops->stop(wddev);
116 if (err < 0) 116 if (err < 0)
117 return err; 117 return err;
118 118
119 clear_bit(WDOG_ACTIVE, &wdd->status); 119 clear_bit(WDOG_ACTIVE, &wddev->status);
120 } 120 }
121 return 0; 121 return 0;
122} 122}
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index da70f5c32eb9..7523719bf8a4 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -54,7 +54,7 @@
54 * This lock protects updates to the following mapping and reference-count 54 * This lock protects updates to the following mapping and reference-count
55 * arrays. The lock does not need to be acquired to read the mapping tables. 55 * arrays. The lock does not need to be acquired to read the mapping tables.
56 */ 56 */
57static DEFINE_SPINLOCK(irq_mapping_update_lock); 57static DEFINE_MUTEX(irq_mapping_update_lock);
58 58
59static LIST_HEAD(xen_irq_list_head); 59static LIST_HEAD(xen_irq_list_head);
60 60
@@ -631,7 +631,7 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
631 int irq = -1; 631 int irq = -1;
632 struct physdev_irq irq_op; 632 struct physdev_irq irq_op;
633 633
634 spin_lock(&irq_mapping_update_lock); 634 mutex_lock(&irq_mapping_update_lock);
635 635
636 irq = find_irq_by_gsi(gsi); 636 irq = find_irq_by_gsi(gsi);
637 if (irq != -1) { 637 if (irq != -1) {
@@ -684,7 +684,7 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
684 handle_edge_irq, name); 684 handle_edge_irq, name);
685 685
686out: 686out:
687 spin_unlock(&irq_mapping_update_lock); 687 mutex_unlock(&irq_mapping_update_lock);
688 688
689 return irq; 689 return irq;
690} 690}
@@ -710,7 +710,7 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
710{ 710{
711 int irq, ret; 711 int irq, ret;
712 712
713 spin_lock(&irq_mapping_update_lock); 713 mutex_lock(&irq_mapping_update_lock);
714 714
715 irq = xen_allocate_irq_dynamic(); 715 irq = xen_allocate_irq_dynamic();
716 if (irq == -1) 716 if (irq == -1)
@@ -724,10 +724,10 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
724 if (ret < 0) 724 if (ret < 0)
725 goto error_irq; 725 goto error_irq;
726out: 726out:
727 spin_unlock(&irq_mapping_update_lock); 727 mutex_unlock(&irq_mapping_update_lock);
728 return irq; 728 return irq;
729error_irq: 729error_irq:
730 spin_unlock(&irq_mapping_update_lock); 730 mutex_unlock(&irq_mapping_update_lock);
731 xen_free_irq(irq); 731 xen_free_irq(irq);
732 return -1; 732 return -1;
733} 733}
@@ -740,7 +740,7 @@ int xen_destroy_irq(int irq)
740 struct irq_info *info = info_for_irq(irq); 740 struct irq_info *info = info_for_irq(irq);
741 int rc = -ENOENT; 741 int rc = -ENOENT;
742 742
743 spin_lock(&irq_mapping_update_lock); 743 mutex_lock(&irq_mapping_update_lock);
744 744
745 desc = irq_to_desc(irq); 745 desc = irq_to_desc(irq);
746 if (!desc) 746 if (!desc)
@@ -766,7 +766,7 @@ int xen_destroy_irq(int irq)
766 xen_free_irq(irq); 766 xen_free_irq(irq);
767 767
768out: 768out:
769 spin_unlock(&irq_mapping_update_lock); 769 mutex_unlock(&irq_mapping_update_lock);
770 return rc; 770 return rc;
771} 771}
772 772
@@ -776,7 +776,7 @@ int xen_irq_from_pirq(unsigned pirq)
776 776
777 struct irq_info *info; 777 struct irq_info *info;
778 778
779 spin_lock(&irq_mapping_update_lock); 779 mutex_lock(&irq_mapping_update_lock);
780 780
781 list_for_each_entry(info, &xen_irq_list_head, list) { 781 list_for_each_entry(info, &xen_irq_list_head, list) {
782 if (info == NULL || info->type != IRQT_PIRQ) 782 if (info == NULL || info->type != IRQT_PIRQ)
@@ -787,7 +787,7 @@ int xen_irq_from_pirq(unsigned pirq)
787 } 787 }
788 irq = -1; 788 irq = -1;
789out: 789out:
790 spin_unlock(&irq_mapping_update_lock); 790 mutex_unlock(&irq_mapping_update_lock);
791 791
792 return irq; 792 return irq;
793} 793}
@@ -802,7 +802,7 @@ int bind_evtchn_to_irq(unsigned int evtchn)
802{ 802{
803 int irq; 803 int irq;
804 804
805 spin_lock(&irq_mapping_update_lock); 805 mutex_lock(&irq_mapping_update_lock);
806 806
807 irq = evtchn_to_irq[evtchn]; 807 irq = evtchn_to_irq[evtchn];
808 808
@@ -818,7 +818,7 @@ int bind_evtchn_to_irq(unsigned int evtchn)
818 } 818 }
819 819
820out: 820out:
821 spin_unlock(&irq_mapping_update_lock); 821 mutex_unlock(&irq_mapping_update_lock);
822 822
823 return irq; 823 return irq;
824} 824}
@@ -829,7 +829,7 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
829 struct evtchn_bind_ipi bind_ipi; 829 struct evtchn_bind_ipi bind_ipi;
830 int evtchn, irq; 830 int evtchn, irq;
831 831
832 spin_lock(&irq_mapping_update_lock); 832 mutex_lock(&irq_mapping_update_lock);
833 833
834 irq = per_cpu(ipi_to_irq, cpu)[ipi]; 834 irq = per_cpu(ipi_to_irq, cpu)[ipi];
835 835
@@ -853,7 +853,7 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
853 } 853 }
854 854
855 out: 855 out:
856 spin_unlock(&irq_mapping_update_lock); 856 mutex_unlock(&irq_mapping_update_lock);
857 return irq; 857 return irq;
858} 858}
859 859
@@ -878,7 +878,7 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
878 struct evtchn_bind_virq bind_virq; 878 struct evtchn_bind_virq bind_virq;
879 int evtchn, irq; 879 int evtchn, irq;
880 880
881 spin_lock(&irq_mapping_update_lock); 881 mutex_lock(&irq_mapping_update_lock);
882 882
883 irq = per_cpu(virq_to_irq, cpu)[virq]; 883 irq = per_cpu(virq_to_irq, cpu)[virq];
884 884
@@ -903,7 +903,7 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
903 } 903 }
904 904
905out: 905out:
906 spin_unlock(&irq_mapping_update_lock); 906 mutex_unlock(&irq_mapping_update_lock);
907 907
908 return irq; 908 return irq;
909} 909}
@@ -913,7 +913,7 @@ static void unbind_from_irq(unsigned int irq)
913 struct evtchn_close close; 913 struct evtchn_close close;
914 int evtchn = evtchn_from_irq(irq); 914 int evtchn = evtchn_from_irq(irq);
915 915
916 spin_lock(&irq_mapping_update_lock); 916 mutex_lock(&irq_mapping_update_lock);
917 917
918 if (VALID_EVTCHN(evtchn)) { 918 if (VALID_EVTCHN(evtchn)) {
919 close.port = evtchn; 919 close.port = evtchn;
@@ -943,7 +943,7 @@ static void unbind_from_irq(unsigned int irq)
943 943
944 xen_free_irq(irq); 944 xen_free_irq(irq);
945 945
946 spin_unlock(&irq_mapping_update_lock); 946 mutex_unlock(&irq_mapping_update_lock);
947} 947}
948 948
949int bind_evtchn_to_irqhandler(unsigned int evtchn, 949int bind_evtchn_to_irqhandler(unsigned int evtchn,
@@ -1279,7 +1279,7 @@ void rebind_evtchn_irq(int evtchn, int irq)
1279 will also be masked. */ 1279 will also be masked. */
1280 disable_irq(irq); 1280 disable_irq(irq);
1281 1281
1282 spin_lock(&irq_mapping_update_lock); 1282 mutex_lock(&irq_mapping_update_lock);
1283 1283
1284 /* After resume the irq<->evtchn mappings are all cleared out */ 1284 /* After resume the irq<->evtchn mappings are all cleared out */
1285 BUG_ON(evtchn_to_irq[evtchn] != -1); 1285 BUG_ON(evtchn_to_irq[evtchn] != -1);
@@ -1289,7 +1289,7 @@ void rebind_evtchn_irq(int evtchn, int irq)
1289 1289
1290 xen_irq_info_evtchn_init(irq, evtchn); 1290 xen_irq_info_evtchn_init(irq, evtchn);
1291 1291
1292 spin_unlock(&irq_mapping_update_lock); 1292 mutex_unlock(&irq_mapping_update_lock);
1293 1293
1294 /* new event channels are always bound to cpu 0 */ 1294 /* new event channels are always bound to cpu 0 */
1295 irq_set_affinity(irq, cpumask_of(0)); 1295 irq_set_affinity(irq, cpumask_of(0));
diff --git a/drivers/xen/xen-selfballoon.c b/drivers/xen/xen-selfballoon.c
index 1b4afd81f872..6ea852e25162 100644
--- a/drivers/xen/xen-selfballoon.c
+++ b/drivers/xen/xen-selfballoon.c
@@ -70,6 +70,7 @@
70#include <linux/kernel.h> 70#include <linux/kernel.h>
71#include <linux/mm.h> 71#include <linux/mm.h>
72#include <linux/mman.h> 72#include <linux/mman.h>
73#include <linux/module.h>
73#include <linux/workqueue.h> 74#include <linux/workqueue.h>
74#include <xen/balloon.h> 75#include <xen/balloon.h>
75#include <xen/tmem.h> 76#include <xen/tmem.h>
diff --git a/fs/9p/v9fs_vfs.h b/fs/9p/v9fs_vfs.h
index 46ce357ca1ab..410ffd6ceb5f 100644
--- a/fs/9p/v9fs_vfs.h
+++ b/fs/9p/v9fs_vfs.h
@@ -54,9 +54,9 @@ extern struct kmem_cache *v9fs_inode_cache;
54 54
55struct inode *v9fs_alloc_inode(struct super_block *sb); 55struct inode *v9fs_alloc_inode(struct super_block *sb);
56void v9fs_destroy_inode(struct inode *inode); 56void v9fs_destroy_inode(struct inode *inode);
57struct inode *v9fs_get_inode(struct super_block *sb, int mode); 57struct inode *v9fs_get_inode(struct super_block *sb, int mode, dev_t);
58int v9fs_init_inode(struct v9fs_session_info *v9ses, 58int v9fs_init_inode(struct v9fs_session_info *v9ses,
59 struct inode *inode, int mode); 59 struct inode *inode, int mode, dev_t);
60void v9fs_evict_inode(struct inode *inode); 60void v9fs_evict_inode(struct inode *inode);
61ino_t v9fs_qid2ino(struct p9_qid *qid); 61ino_t v9fs_qid2ino(struct p9_qid *qid);
62void v9fs_stat2inode(struct p9_wstat *, struct inode *, struct super_block *); 62void v9fs_stat2inode(struct p9_wstat *, struct inode *, struct super_block *);
@@ -83,4 +83,6 @@ static inline void v9fs_invalidate_inode_attr(struct inode *inode)
83 v9inode->cache_validity |= V9FS_INO_INVALID_ATTR; 83 v9inode->cache_validity |= V9FS_INO_INVALID_ATTR;
84 return; 84 return;
85} 85}
86
87int v9fs_open_to_dotl_flags(int flags);
86#endif 88#endif
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index 3c173fcc2c5a..62857a810a79 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -65,7 +65,7 @@ int v9fs_file_open(struct inode *inode, struct file *file)
65 v9inode = V9FS_I(inode); 65 v9inode = V9FS_I(inode);
66 v9ses = v9fs_inode2v9ses(inode); 66 v9ses = v9fs_inode2v9ses(inode);
67 if (v9fs_proto_dotl(v9ses)) 67 if (v9fs_proto_dotl(v9ses))
68 omode = file->f_flags; 68 omode = v9fs_open_to_dotl_flags(file->f_flags);
69 else 69 else
70 omode = v9fs_uflags2omode(file->f_flags, 70 omode = v9fs_uflags2omode(file->f_flags,
71 v9fs_proto_dotu(v9ses)); 71 v9fs_proto_dotu(v9ses));
@@ -169,7 +169,18 @@ static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl)
169 169
170 /* convert posix lock to p9 tlock args */ 170 /* convert posix lock to p9 tlock args */
171 memset(&flock, 0, sizeof(flock)); 171 memset(&flock, 0, sizeof(flock));
172 flock.type = fl->fl_type; 172 /* map the lock type */
173 switch (fl->fl_type) {
174 case F_RDLCK:
175 flock.type = P9_LOCK_TYPE_RDLCK;
176 break;
177 case F_WRLCK:
178 flock.type = P9_LOCK_TYPE_WRLCK;
179 break;
180 case F_UNLCK:
181 flock.type = P9_LOCK_TYPE_UNLCK;
182 break;
183 }
173 flock.start = fl->fl_start; 184 flock.start = fl->fl_start;
174 if (fl->fl_end == OFFSET_MAX) 185 if (fl->fl_end == OFFSET_MAX)
175 flock.length = 0; 186 flock.length = 0;
@@ -245,7 +256,7 @@ static int v9fs_file_getlock(struct file *filp, struct file_lock *fl)
245 256
246 /* convert posix lock to p9 tgetlock args */ 257 /* convert posix lock to p9 tgetlock args */
247 memset(&glock, 0, sizeof(glock)); 258 memset(&glock, 0, sizeof(glock));
248 glock.type = fl->fl_type; 259 glock.type = P9_LOCK_TYPE_UNLCK;
249 glock.start = fl->fl_start; 260 glock.start = fl->fl_start;
250 if (fl->fl_end == OFFSET_MAX) 261 if (fl->fl_end == OFFSET_MAX)
251 glock.length = 0; 262 glock.length = 0;
@@ -257,17 +268,26 @@ static int v9fs_file_getlock(struct file *filp, struct file_lock *fl)
257 res = p9_client_getlock_dotl(fid, &glock); 268 res = p9_client_getlock_dotl(fid, &glock);
258 if (res < 0) 269 if (res < 0)
259 return res; 270 return res;
260 if (glock.type != F_UNLCK) { 271 /* map 9p lock type to os lock type */
261 fl->fl_type = glock.type; 272 switch (glock.type) {
273 case P9_LOCK_TYPE_RDLCK:
274 fl->fl_type = F_RDLCK;
275 break;
276 case P9_LOCK_TYPE_WRLCK:
277 fl->fl_type = F_WRLCK;
278 break;
279 case P9_LOCK_TYPE_UNLCK:
280 fl->fl_type = F_UNLCK;
281 break;
282 }
283 if (glock.type != P9_LOCK_TYPE_UNLCK) {
262 fl->fl_start = glock.start; 284 fl->fl_start = glock.start;
263 if (glock.length == 0) 285 if (glock.length == 0)
264 fl->fl_end = OFFSET_MAX; 286 fl->fl_end = OFFSET_MAX;
265 else 287 else
266 fl->fl_end = glock.start + glock.length - 1; 288 fl->fl_end = glock.start + glock.length - 1;
267 fl->fl_pid = glock.proc_id; 289 fl->fl_pid = glock.proc_id;
268 } else 290 }
269 fl->fl_type = F_UNLCK;
270
271 return res; 291 return res;
272} 292}
273 293
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 8bb5507e822f..e3c03db3c788 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -95,15 +95,18 @@ static int unixmode2p9mode(struct v9fs_session_info *v9ses, int mode)
95/** 95/**
96 * p9mode2unixmode- convert plan9 mode bits to unix mode bits 96 * p9mode2unixmode- convert plan9 mode bits to unix mode bits
97 * @v9ses: v9fs session information 97 * @v9ses: v9fs session information
98 * @mode: mode to convert 98 * @stat: p9_wstat from which mode need to be derived
99 * @rdev: major number, minor number in case of device files.
99 * 100 *
100 */ 101 */
101 102static int p9mode2unixmode(struct v9fs_session_info *v9ses,
102static int p9mode2unixmode(struct v9fs_session_info *v9ses, int mode) 103 struct p9_wstat *stat, dev_t *rdev)
103{ 104{
104 int res; 105 int res;
106 int mode = stat->mode;
105 107
106 res = mode & 0777; 108 res = mode & S_IALLUGO;
109 *rdev = 0;
107 110
108 if ((mode & P9_DMDIR) == P9_DMDIR) 111 if ((mode & P9_DMDIR) == P9_DMDIR)
109 res |= S_IFDIR; 112 res |= S_IFDIR;
@@ -116,9 +119,26 @@ static int p9mode2unixmode(struct v9fs_session_info *v9ses, int mode)
116 && (v9ses->nodev == 0)) 119 && (v9ses->nodev == 0))
117 res |= S_IFIFO; 120 res |= S_IFIFO;
118 else if ((mode & P9_DMDEVICE) && (v9fs_proto_dotu(v9ses)) 121 else if ((mode & P9_DMDEVICE) && (v9fs_proto_dotu(v9ses))
119 && (v9ses->nodev == 0)) 122 && (v9ses->nodev == 0)) {
120 res |= S_IFBLK; 123 char type = 0, ext[32];
121 else 124 int major = -1, minor = -1;
125
126 strncpy(ext, stat->extension, sizeof(ext));
127 sscanf(ext, "%c %u %u", &type, &major, &minor);
128 switch (type) {
129 case 'c':
130 res |= S_IFCHR;
131 break;
132 case 'b':
133 res |= S_IFBLK;
134 break;
135 default:
136 P9_DPRINTK(P9_DEBUG_ERROR,
137 "Unknown special type %c %s\n", type,
138 stat->extension);
139 };
140 *rdev = MKDEV(major, minor);
141 } else
122 res |= S_IFREG; 142 res |= S_IFREG;
123 143
124 if (v9fs_proto_dotu(v9ses)) { 144 if (v9fs_proto_dotu(v9ses)) {
@@ -131,7 +151,6 @@ static int p9mode2unixmode(struct v9fs_session_info *v9ses, int mode)
131 if ((mode & P9_DMSETVTX) == P9_DMSETVTX) 151 if ((mode & P9_DMSETVTX) == P9_DMSETVTX)
132 res |= S_ISVTX; 152 res |= S_ISVTX;
133 } 153 }
134
135 return res; 154 return res;
136} 155}
137 156
@@ -242,13 +261,13 @@ void v9fs_destroy_inode(struct inode *inode)
242} 261}
243 262
244int v9fs_init_inode(struct v9fs_session_info *v9ses, 263int v9fs_init_inode(struct v9fs_session_info *v9ses,
245 struct inode *inode, int mode) 264 struct inode *inode, int mode, dev_t rdev)
246{ 265{
247 int err = 0; 266 int err = 0;
248 267
249 inode_init_owner(inode, NULL, mode); 268 inode_init_owner(inode, NULL, mode);
250 inode->i_blocks = 0; 269 inode->i_blocks = 0;
251 inode->i_rdev = 0; 270 inode->i_rdev = rdev;
252 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 271 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
253 inode->i_mapping->a_ops = &v9fs_addr_operations; 272 inode->i_mapping->a_ops = &v9fs_addr_operations;
254 273
@@ -335,7 +354,7 @@ error:
335 * 354 *
336 */ 355 */
337 356
338struct inode *v9fs_get_inode(struct super_block *sb, int mode) 357struct inode *v9fs_get_inode(struct super_block *sb, int mode, dev_t rdev)
339{ 358{
340 int err; 359 int err;
341 struct inode *inode; 360 struct inode *inode;
@@ -348,7 +367,7 @@ struct inode *v9fs_get_inode(struct super_block *sb, int mode)
348 P9_EPRINTK(KERN_WARNING, "Problem allocating inode\n"); 367 P9_EPRINTK(KERN_WARNING, "Problem allocating inode\n");
349 return ERR_PTR(-ENOMEM); 368 return ERR_PTR(-ENOMEM);
350 } 369 }
351 err = v9fs_init_inode(v9ses, inode, mode); 370 err = v9fs_init_inode(v9ses, inode, mode, rdev);
352 if (err) { 371 if (err) {
353 iput(inode); 372 iput(inode);
354 return ERR_PTR(err); 373 return ERR_PTR(err);
@@ -435,11 +454,12 @@ void v9fs_evict_inode(struct inode *inode)
435static int v9fs_test_inode(struct inode *inode, void *data) 454static int v9fs_test_inode(struct inode *inode, void *data)
436{ 455{
437 int umode; 456 int umode;
457 dev_t rdev;
438 struct v9fs_inode *v9inode = V9FS_I(inode); 458 struct v9fs_inode *v9inode = V9FS_I(inode);
439 struct p9_wstat *st = (struct p9_wstat *)data; 459 struct p9_wstat *st = (struct p9_wstat *)data;
440 struct v9fs_session_info *v9ses = v9fs_inode2v9ses(inode); 460 struct v9fs_session_info *v9ses = v9fs_inode2v9ses(inode);
441 461
442 umode = p9mode2unixmode(v9ses, st->mode); 462 umode = p9mode2unixmode(v9ses, st, &rdev);
443 /* don't match inode of different type */ 463 /* don't match inode of different type */
444 if ((inode->i_mode & S_IFMT) != (umode & S_IFMT)) 464 if ((inode->i_mode & S_IFMT) != (umode & S_IFMT))
445 return 0; 465 return 0;
@@ -473,6 +493,7 @@ static struct inode *v9fs_qid_iget(struct super_block *sb,
473 struct p9_wstat *st, 493 struct p9_wstat *st,
474 int new) 494 int new)
475{ 495{
496 dev_t rdev;
476 int retval, umode; 497 int retval, umode;
477 unsigned long i_ino; 498 unsigned long i_ino;
478 struct inode *inode; 499 struct inode *inode;
@@ -496,8 +517,8 @@ static struct inode *v9fs_qid_iget(struct super_block *sb,
496 * later. 517 * later.
497 */ 518 */
498 inode->i_ino = i_ino; 519 inode->i_ino = i_ino;
499 umode = p9mode2unixmode(v9ses, st->mode); 520 umode = p9mode2unixmode(v9ses, st, &rdev);
500 retval = v9fs_init_inode(v9ses, inode, umode); 521 retval = v9fs_init_inode(v9ses, inode, umode, rdev);
501 if (retval) 522 if (retval)
502 goto error; 523 goto error;
503 524
@@ -532,6 +553,19 @@ v9fs_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid,
532} 553}
533 554
534/** 555/**
556 * v9fs_at_to_dotl_flags- convert Linux specific AT flags to
557 * plan 9 AT flag.
558 * @flags: flags to convert
559 */
560static int v9fs_at_to_dotl_flags(int flags)
561{
562 int rflags = 0;
563 if (flags & AT_REMOVEDIR)
564 rflags |= P9_DOTL_AT_REMOVEDIR;
565 return rflags;
566}
567
568/**
535 * v9fs_remove - helper function to remove files and directories 569 * v9fs_remove - helper function to remove files and directories
536 * @dir: directory inode that is being deleted 570 * @dir: directory inode that is being deleted
537 * @dentry: dentry that is being deleted 571 * @dentry: dentry that is being deleted
@@ -558,7 +592,8 @@ static int v9fs_remove(struct inode *dir, struct dentry *dentry, int flags)
558 return retval; 592 return retval;
559 } 593 }
560 if (v9fs_proto_dotl(v9ses)) 594 if (v9fs_proto_dotl(v9ses))
561 retval = p9_client_unlinkat(dfid, dentry->d_name.name, flags); 595 retval = p9_client_unlinkat(dfid, dentry->d_name.name,
596 v9fs_at_to_dotl_flags(flags));
562 if (retval == -EOPNOTSUPP) { 597 if (retval == -EOPNOTSUPP) {
563 /* Try the one based on path */ 598 /* Try the one based on path */
564 v9fid = v9fs_fid_clone(dentry); 599 v9fid = v9fs_fid_clone(dentry);
@@ -645,13 +680,11 @@ v9fs_create(struct v9fs_session_info *v9ses, struct inode *dir,
645 P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", err); 680 P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", err);
646 goto error; 681 goto error;
647 } 682 }
648 d_instantiate(dentry, inode);
649 err = v9fs_fid_add(dentry, fid); 683 err = v9fs_fid_add(dentry, fid);
650 if (err < 0) 684 if (err < 0)
651 goto error; 685 goto error;
652 686 d_instantiate(dentry, inode);
653 return ofid; 687 return ofid;
654
655error: 688error:
656 if (ofid) 689 if (ofid)
657 p9_client_clunk(ofid); 690 p9_client_clunk(ofid);
@@ -792,6 +825,7 @@ static int v9fs_vfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
792struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry, 825struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
793 struct nameidata *nameidata) 826 struct nameidata *nameidata)
794{ 827{
828 struct dentry *res;
795 struct super_block *sb; 829 struct super_block *sb;
796 struct v9fs_session_info *v9ses; 830 struct v9fs_session_info *v9ses;
797 struct p9_fid *dfid, *fid; 831 struct p9_fid *dfid, *fid;
@@ -823,22 +857,35 @@ struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
823 857
824 return ERR_PTR(result); 858 return ERR_PTR(result);
825 } 859 }
826 860 /*
827 inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb); 861 * Make sure we don't use a wrong inode due to parallel
862 * unlink. For cached mode create calls request for new
863 * inode. But with cache disabled, lookup should do this.
864 */
865 if (v9ses->cache)
866 inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb);
867 else
868 inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb);
828 if (IS_ERR(inode)) { 869 if (IS_ERR(inode)) {
829 result = PTR_ERR(inode); 870 result = PTR_ERR(inode);
830 inode = NULL; 871 inode = NULL;
831 goto error; 872 goto error;
832 } 873 }
833
834 result = v9fs_fid_add(dentry, fid); 874 result = v9fs_fid_add(dentry, fid);
835 if (result < 0) 875 if (result < 0)
836 goto error_iput; 876 goto error_iput;
837
838inst_out: 877inst_out:
839 d_add(dentry, inode); 878 /*
840 return NULL; 879 * If we had a rename on the server and a parallel lookup
841 880 * for the new name, then make sure we instantiate with
881 * the new name. ie look up for a/b, while on server somebody
882 * moved b under k and client parallely did a lookup for
883 * k/b.
884 */
885 res = d_materialise_unique(dentry, inode);
886 if (!IS_ERR(res))
887 return res;
888 result = PTR_ERR(res);
842error_iput: 889error_iput:
843 iput(inode); 890 iput(inode);
844error: 891error:
@@ -1002,7 +1049,7 @@ v9fs_vfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
1002 return PTR_ERR(st); 1049 return PTR_ERR(st);
1003 1050
1004 v9fs_stat2inode(st, dentry->d_inode, dentry->d_inode->i_sb); 1051 v9fs_stat2inode(st, dentry->d_inode, dentry->d_inode->i_sb);
1005 generic_fillattr(dentry->d_inode, stat); 1052 generic_fillattr(dentry->d_inode, stat);
1006 1053
1007 p9stat_free(st); 1054 p9stat_free(st);
1008 kfree(st); 1055 kfree(st);
@@ -1086,6 +1133,7 @@ void
1086v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode, 1133v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
1087 struct super_block *sb) 1134 struct super_block *sb)
1088{ 1135{
1136 mode_t mode;
1089 char ext[32]; 1137 char ext[32];
1090 char tag_name[14]; 1138 char tag_name[14];
1091 unsigned int i_nlink; 1139 unsigned int i_nlink;
@@ -1121,31 +1169,9 @@ v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
1121 inode->i_nlink = i_nlink; 1169 inode->i_nlink = i_nlink;
1122 } 1170 }
1123 } 1171 }
1124 inode->i_mode = p9mode2unixmode(v9ses, stat->mode); 1172 mode = stat->mode & S_IALLUGO;
1125 if ((S_ISBLK(inode->i_mode)) || (S_ISCHR(inode->i_mode))) { 1173 mode |= inode->i_mode & ~S_IALLUGO;
1126 char type = 0; 1174 inode->i_mode = mode;
1127 int major = -1;
1128 int minor = -1;
1129
1130 strncpy(ext, stat->extension, sizeof(ext));
1131 sscanf(ext, "%c %u %u", &type, &major, &minor);
1132 switch (type) {
1133 case 'c':
1134 inode->i_mode &= ~S_IFBLK;
1135 inode->i_mode |= S_IFCHR;
1136 break;
1137 case 'b':
1138 break;
1139 default:
1140 P9_DPRINTK(P9_DEBUG_ERROR,
1141 "Unknown special type %c %s\n", type,
1142 stat->extension);
1143 };
1144 inode->i_rdev = MKDEV(major, minor);
1145 init_special_inode(inode, inode->i_mode, inode->i_rdev);
1146 } else
1147 inode->i_rdev = 0;
1148
1149 i_size_write(inode, stat->length); 1175 i_size_write(inode, stat->length);
1150 1176
1151 /* not real number of blocks, but 512 byte ones ... */ 1177 /* not real number of blocks, but 512 byte ones ... */
@@ -1411,6 +1437,8 @@ v9fs_vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev)
1411 1437
1412int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode) 1438int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode)
1413{ 1439{
1440 int umode;
1441 dev_t rdev;
1414 loff_t i_size; 1442 loff_t i_size;
1415 struct p9_wstat *st; 1443 struct p9_wstat *st;
1416 struct v9fs_session_info *v9ses; 1444 struct v9fs_session_info *v9ses;
@@ -1419,6 +1447,12 @@ int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode)
1419 st = p9_client_stat(fid); 1447 st = p9_client_stat(fid);
1420 if (IS_ERR(st)) 1448 if (IS_ERR(st))
1421 return PTR_ERR(st); 1449 return PTR_ERR(st);
1450 /*
1451 * Don't update inode if the file type is different
1452 */
1453 umode = p9mode2unixmode(v9ses, st, &rdev);
1454 if ((inode->i_mode & S_IFMT) != (umode & S_IFMT))
1455 goto out;
1422 1456
1423 spin_lock(&inode->i_lock); 1457 spin_lock(&inode->i_lock);
1424 /* 1458 /*
@@ -1430,6 +1464,7 @@ int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode)
1430 if (v9ses->cache) 1464 if (v9ses->cache)
1431 inode->i_size = i_size; 1465 inode->i_size = i_size;
1432 spin_unlock(&inode->i_lock); 1466 spin_unlock(&inode->i_lock);
1467out:
1433 p9stat_free(st); 1468 p9stat_free(st);
1434 kfree(st); 1469 kfree(st);
1435 return 0; 1470 return 0;
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
index b6c8ed205192..aded79fcd5cf 100644
--- a/fs/9p/vfs_inode_dotl.c
+++ b/fs/9p/vfs_inode_dotl.c
@@ -153,7 +153,8 @@ static struct inode *v9fs_qid_iget_dotl(struct super_block *sb,
153 * later. 153 * later.
154 */ 154 */
155 inode->i_ino = i_ino; 155 inode->i_ino = i_ino;
156 retval = v9fs_init_inode(v9ses, inode, st->st_mode); 156 retval = v9fs_init_inode(v9ses, inode,
157 st->st_mode, new_decode_dev(st->st_rdev));
157 if (retval) 158 if (retval)
158 goto error; 159 goto error;
159 160
@@ -190,6 +191,58 @@ v9fs_inode_from_fid_dotl(struct v9fs_session_info *v9ses, struct p9_fid *fid,
190 return inode; 191 return inode;
191} 192}
192 193
194struct dotl_openflag_map {
195 int open_flag;
196 int dotl_flag;
197};
198
199static int v9fs_mapped_dotl_flags(int flags)
200{
201 int i;
202 int rflags = 0;
203 struct dotl_openflag_map dotl_oflag_map[] = {
204 { O_CREAT, P9_DOTL_CREATE },
205 { O_EXCL, P9_DOTL_EXCL },
206 { O_NOCTTY, P9_DOTL_NOCTTY },
207 { O_TRUNC, P9_DOTL_TRUNC },
208 { O_APPEND, P9_DOTL_APPEND },
209 { O_NONBLOCK, P9_DOTL_NONBLOCK },
210 { O_DSYNC, P9_DOTL_DSYNC },
211 { FASYNC, P9_DOTL_FASYNC },
212 { O_DIRECT, P9_DOTL_DIRECT },
213 { O_LARGEFILE, P9_DOTL_LARGEFILE },
214 { O_DIRECTORY, P9_DOTL_DIRECTORY },
215 { O_NOFOLLOW, P9_DOTL_NOFOLLOW },
216 { O_NOATIME, P9_DOTL_NOATIME },
217 { O_CLOEXEC, P9_DOTL_CLOEXEC },
218 { O_SYNC, P9_DOTL_SYNC},
219 };
220 for (i = 0; i < ARRAY_SIZE(dotl_oflag_map); i++) {
221 if (flags & dotl_oflag_map[i].open_flag)
222 rflags |= dotl_oflag_map[i].dotl_flag;
223 }
224 return rflags;
225}
226
227/**
228 * v9fs_open_to_dotl_flags- convert Linux specific open flags to
229 * plan 9 open flag.
230 * @flags: flags to convert
231 */
232int v9fs_open_to_dotl_flags(int flags)
233{
234 int rflags = 0;
235
236 /*
237 * We have same bits for P9_DOTL_READONLY, P9_DOTL_WRONLY
238 * and P9_DOTL_NOACCESS
239 */
240 rflags |= flags & O_ACCMODE;
241 rflags |= v9fs_mapped_dotl_flags(flags);
242
243 return rflags;
244}
245
193/** 246/**
194 * v9fs_vfs_create_dotl - VFS hook to create files for 9P2000.L protocol. 247 * v9fs_vfs_create_dotl - VFS hook to create files for 9P2000.L protocol.
195 * @dir: directory inode that is being created 248 * @dir: directory inode that is being created
@@ -258,7 +311,8 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode,
258 "Failed to get acl values in creat %d\n", err); 311 "Failed to get acl values in creat %d\n", err);
259 goto error; 312 goto error;
260 } 313 }
261 err = p9_client_create_dotl(ofid, name, flags, mode, gid, &qid); 314 err = p9_client_create_dotl(ofid, name, v9fs_open_to_dotl_flags(flags),
315 mode, gid, &qid);
262 if (err < 0) { 316 if (err < 0) {
263 P9_DPRINTK(P9_DEBUG_VFS, 317 P9_DPRINTK(P9_DEBUG_VFS,
264 "p9_client_open_dotl failed in creat %d\n", 318 "p9_client_open_dotl failed in creat %d\n",
@@ -281,10 +335,10 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode,
281 P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", err); 335 P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", err);
282 goto error; 336 goto error;
283 } 337 }
284 d_instantiate(dentry, inode);
285 err = v9fs_fid_add(dentry, fid); 338 err = v9fs_fid_add(dentry, fid);
286 if (err < 0) 339 if (err < 0)
287 goto error; 340 goto error;
341 d_instantiate(dentry, inode);
288 342
289 /* Now set the ACL based on the default value */ 343 /* Now set the ACL based on the default value */
290 v9fs_set_create_acl(dentry, &dacl, &pacl); 344 v9fs_set_create_acl(dentry, &dacl, &pacl);
@@ -403,10 +457,10 @@ static int v9fs_vfs_mkdir_dotl(struct inode *dir,
403 err); 457 err);
404 goto error; 458 goto error;
405 } 459 }
406 d_instantiate(dentry, inode);
407 err = v9fs_fid_add(dentry, fid); 460 err = v9fs_fid_add(dentry, fid);
408 if (err < 0) 461 if (err < 0)
409 goto error; 462 goto error;
463 d_instantiate(dentry, inode);
410 fid = NULL; 464 fid = NULL;
411 } else { 465 } else {
412 /* 466 /*
@@ -414,7 +468,7 @@ static int v9fs_vfs_mkdir_dotl(struct inode *dir,
414 * inode with stat. We need to get an inode 468 * inode with stat. We need to get an inode
415 * so that we can set the acl with dentry 469 * so that we can set the acl with dentry
416 */ 470 */
417 inode = v9fs_get_inode(dir->i_sb, mode); 471 inode = v9fs_get_inode(dir->i_sb, mode, 0);
418 if (IS_ERR(inode)) { 472 if (IS_ERR(inode)) {
419 err = PTR_ERR(inode); 473 err = PTR_ERR(inode);
420 goto error; 474 goto error;
@@ -540,6 +594,7 @@ int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr)
540void 594void
541v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode) 595v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode)
542{ 596{
597 mode_t mode;
543 struct v9fs_inode *v9inode = V9FS_I(inode); 598 struct v9fs_inode *v9inode = V9FS_I(inode);
544 599
545 if ((stat->st_result_mask & P9_STATS_BASIC) == P9_STATS_BASIC) { 600 if ((stat->st_result_mask & P9_STATS_BASIC) == P9_STATS_BASIC) {
@@ -552,11 +607,10 @@ v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode)
552 inode->i_uid = stat->st_uid; 607 inode->i_uid = stat->st_uid;
553 inode->i_gid = stat->st_gid; 608 inode->i_gid = stat->st_gid;
554 inode->i_nlink = stat->st_nlink; 609 inode->i_nlink = stat->st_nlink;
555 inode->i_mode = stat->st_mode;
556 inode->i_rdev = new_decode_dev(stat->st_rdev);
557 610
558 if ((S_ISBLK(inode->i_mode)) || (S_ISCHR(inode->i_mode))) 611 mode = stat->st_mode & S_IALLUGO;
559 init_special_inode(inode, inode->i_mode, inode->i_rdev); 612 mode |= inode->i_mode & ~S_IALLUGO;
613 inode->i_mode = mode;
560 614
561 i_size_write(inode, stat->st_size); 615 i_size_write(inode, stat->st_size);
562 inode->i_blocks = stat->st_blocks; 616 inode->i_blocks = stat->st_blocks;
@@ -657,14 +711,14 @@ v9fs_vfs_symlink_dotl(struct inode *dir, struct dentry *dentry,
657 err); 711 err);
658 goto error; 712 goto error;
659 } 713 }
660 d_instantiate(dentry, inode);
661 err = v9fs_fid_add(dentry, fid); 714 err = v9fs_fid_add(dentry, fid);
662 if (err < 0) 715 if (err < 0)
663 goto error; 716 goto error;
717 d_instantiate(dentry, inode);
664 fid = NULL; 718 fid = NULL;
665 } else { 719 } else {
666 /* Not in cached mode. No need to populate inode with stat */ 720 /* Not in cached mode. No need to populate inode with stat */
667 inode = v9fs_get_inode(dir->i_sb, S_IFLNK); 721 inode = v9fs_get_inode(dir->i_sb, S_IFLNK, 0);
668 if (IS_ERR(inode)) { 722 if (IS_ERR(inode)) {
669 err = PTR_ERR(inode); 723 err = PTR_ERR(inode);
670 goto error; 724 goto error;
@@ -810,17 +864,17 @@ v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int omode,
810 err); 864 err);
811 goto error; 865 goto error;
812 } 866 }
813 d_instantiate(dentry, inode);
814 err = v9fs_fid_add(dentry, fid); 867 err = v9fs_fid_add(dentry, fid);
815 if (err < 0) 868 if (err < 0)
816 goto error; 869 goto error;
870 d_instantiate(dentry, inode);
817 fid = NULL; 871 fid = NULL;
818 } else { 872 } else {
819 /* 873 /*
820 * Not in cached mode. No need to populate inode with stat. 874 * Not in cached mode. No need to populate inode with stat.
821 * socket syscall returns a fd, so we need instantiate 875 * socket syscall returns a fd, so we need instantiate
822 */ 876 */
823 inode = v9fs_get_inode(dir->i_sb, mode); 877 inode = v9fs_get_inode(dir->i_sb, mode, rdev);
824 if (IS_ERR(inode)) { 878 if (IS_ERR(inode)) {
825 err = PTR_ERR(inode); 879 err = PTR_ERR(inode);
826 goto error; 880 goto error;
@@ -886,6 +940,11 @@ int v9fs_refresh_inode_dotl(struct p9_fid *fid, struct inode *inode)
886 st = p9_client_getattr_dotl(fid, P9_STATS_ALL); 940 st = p9_client_getattr_dotl(fid, P9_STATS_ALL);
887 if (IS_ERR(st)) 941 if (IS_ERR(st))
888 return PTR_ERR(st); 942 return PTR_ERR(st);
943 /*
944 * Don't update inode if the file type is different
945 */
946 if ((inode->i_mode & S_IFMT) != (st->st_mode & S_IFMT))
947 goto out;
889 948
890 spin_lock(&inode->i_lock); 949 spin_lock(&inode->i_lock);
891 /* 950 /*
@@ -897,6 +956,7 @@ int v9fs_refresh_inode_dotl(struct p9_fid *fid, struct inode *inode)
897 if (v9ses->cache) 956 if (v9ses->cache)
898 inode->i_size = i_size; 957 inode->i_size = i_size;
899 spin_unlock(&inode->i_lock); 958 spin_unlock(&inode->i_lock);
959out:
900 kfree(st); 960 kfree(st);
901 return 0; 961 return 0;
902} 962}
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index feef6cdc1fd2..c70251d47ed1 100644
--- a/fs/9p/vfs_super.c
+++ b/fs/9p/vfs_super.c
@@ -149,7 +149,7 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags,
149 else 149 else
150 sb->s_d_op = &v9fs_dentry_operations; 150 sb->s_d_op = &v9fs_dentry_operations;
151 151
152 inode = v9fs_get_inode(sb, S_IFDIR | mode); 152 inode = v9fs_get_inode(sb, S_IFDIR | mode, 0);
153 if (IS_ERR(inode)) { 153 if (IS_ERR(inode)) {
154 retval = PTR_ERR(inode); 154 retval = PTR_ERR(inode);
155 goto release_sb; 155 goto release_sb;
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index 54b8c28bebc8..720d885e8dca 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -474,17 +474,22 @@ befs_follow_link(struct dentry *dentry, struct nameidata *nd)
474 befs_data_stream *data = &befs_ino->i_data.ds; 474 befs_data_stream *data = &befs_ino->i_data.ds;
475 befs_off_t len = data->size; 475 befs_off_t len = data->size;
476 476
477 befs_debug(sb, "Follow long symlink"); 477 if (len == 0) {
478 478 befs_error(sb, "Long symlink with illegal length");
479 link = kmalloc(len, GFP_NOFS);
480 if (!link) {
481 link = ERR_PTR(-ENOMEM);
482 } else if (befs_read_lsymlink(sb, data, link, len) != len) {
483 kfree(link);
484 befs_error(sb, "Failed to read entire long symlink");
485 link = ERR_PTR(-EIO); 479 link = ERR_PTR(-EIO);
486 } else { 480 } else {
487 link[len - 1] = '\0'; 481 befs_debug(sb, "Follow long symlink");
482
483 link = kmalloc(len, GFP_NOFS);
484 if (!link) {
485 link = ERR_PTR(-ENOMEM);
486 } else if (befs_read_lsymlink(sb, data, link, len) != len) {
487 kfree(link);
488 befs_error(sb, "Failed to read entire long symlink");
489 link = ERR_PTR(-EIO);
490 } else {
491 link[len - 1] = '\0';
492 }
488 } 493 }
489 } else { 494 } else {
490 link = befs_ino->i_data.symlink; 495 link = befs_ino->i_data.symlink;
diff --git a/fs/block_dev.c b/fs/block_dev.c
index ff77262e887c..95f786ec7f08 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -1429,6 +1429,11 @@ static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
1429 WARN_ON_ONCE(bdev->bd_holders); 1429 WARN_ON_ONCE(bdev->bd_holders);
1430 sync_blockdev(bdev); 1430 sync_blockdev(bdev);
1431 kill_bdev(bdev); 1431 kill_bdev(bdev);
1432 /* ->release can cause the old bdi to disappear,
1433 * so must switch it out first
1434 */
1435 bdev_inode_switch_bdi(bdev->bd_inode,
1436 &default_backing_dev_info);
1432 } 1437 }
1433 if (bdev->bd_contains == bdev) { 1438 if (bdev->bd_contains == bdev) {
1434 if (disk->fops->release) 1439 if (disk->fops->release)
@@ -1442,8 +1447,6 @@ static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
1442 disk_put_part(bdev->bd_part); 1447 disk_put_part(bdev->bd_part);
1443 bdev->bd_part = NULL; 1448 bdev->bd_part = NULL;
1444 bdev->bd_disk = NULL; 1449 bdev->bd_disk = NULL;
1445 bdev_inode_switch_bdi(bdev->bd_inode,
1446 &default_backing_dev_info);
1447 if (bdev != bdev->bd_contains) 1450 if (bdev != bdev->bd_contains)
1448 victim = bdev->bd_contains; 1451 victim = bdev->bd_contains;
1449 bdev->bd_contains = NULL; 1452 bdev->bd_contains = NULL;
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 502b9e988679..d9f99a16edd6 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -176,7 +176,11 @@ static inline u64 btrfs_ino(struct inode *inode)
176{ 176{
177 u64 ino = BTRFS_I(inode)->location.objectid; 177 u64 ino = BTRFS_I(inode)->location.objectid;
178 178
179 if (ino <= BTRFS_FIRST_FREE_OBJECTID) 179 /*
180 * !ino: btree_inode
181 * type == BTRFS_ROOT_ITEM_KEY: subvol dir
182 */
183 if (!ino || BTRFS_I(inode)->location.type == BTRFS_ROOT_ITEM_KEY)
180 ino = inode->i_ino; 184 ino = inode->i_ino;
181 return ino; 185 return ino;
182} 186}
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 0469263e327e..03912c5c6f49 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -1415,17 +1415,15 @@ void btrfs_set_##name(struct extent_buffer *eb, type *s, u##bits val);
1415#define BTRFS_SETGET_HEADER_FUNCS(name, type, member, bits) \ 1415#define BTRFS_SETGET_HEADER_FUNCS(name, type, member, bits) \
1416static inline u##bits btrfs_##name(struct extent_buffer *eb) \ 1416static inline u##bits btrfs_##name(struct extent_buffer *eb) \
1417{ \ 1417{ \
1418 type *p = kmap_atomic(eb->first_page, KM_USER0); \ 1418 type *p = page_address(eb->first_page); \
1419 u##bits res = le##bits##_to_cpu(p->member); \ 1419 u##bits res = le##bits##_to_cpu(p->member); \
1420 kunmap_atomic(p, KM_USER0); \
1421 return res; \ 1420 return res; \
1422} \ 1421} \
1423static inline void btrfs_set_##name(struct extent_buffer *eb, \ 1422static inline void btrfs_set_##name(struct extent_buffer *eb, \
1424 u##bits val) \ 1423 u##bits val) \
1425{ \ 1424{ \
1426 type *p = kmap_atomic(eb->first_page, KM_USER0); \ 1425 type *p = page_address(eb->first_page); \
1427 p->member = cpu_to_le##bits(val); \ 1426 p->member = cpu_to_le##bits(val); \
1428 kunmap_atomic(p, KM_USER0); \
1429} 1427}
1430 1428
1431#define BTRFS_SETGET_STACK_FUNCS(name, type, member, bits) \ 1429#define BTRFS_SETGET_STACK_FUNCS(name, type, member, bits) \
@@ -2367,8 +2365,8 @@ static inline int btrfs_insert_empty_item(struct btrfs_trans_handle *trans,
2367int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path); 2365int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path);
2368int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path); 2366int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path);
2369int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf); 2367int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf);
2370int btrfs_drop_snapshot(struct btrfs_root *root, 2368void btrfs_drop_snapshot(struct btrfs_root *root,
2371 struct btrfs_block_rsv *block_rsv, int update_ref); 2369 struct btrfs_block_rsv *block_rsv, int update_ref);
2372int btrfs_drop_subtree(struct btrfs_trans_handle *trans, 2370int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
2373 struct btrfs_root *root, 2371 struct btrfs_root *root,
2374 struct extent_buffer *node, 2372 struct extent_buffer *node,
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 66bac226944e..f5be06a2462f 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -1782,6 +1782,9 @@ static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1782 1782
1783 1783
1784 for (i = 0; i < multi->num_stripes; i++, stripe++) { 1784 for (i = 0; i < multi->num_stripes; i++, stripe++) {
1785 if (!stripe->dev->can_discard)
1786 continue;
1787
1785 ret = btrfs_issue_discard(stripe->dev->bdev, 1788 ret = btrfs_issue_discard(stripe->dev->bdev,
1786 stripe->physical, 1789 stripe->physical,
1787 stripe->length); 1790 stripe->length);
@@ -1789,11 +1792,16 @@ static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1789 discarded_bytes += stripe->length; 1792 discarded_bytes += stripe->length;
1790 else if (ret != -EOPNOTSUPP) 1793 else if (ret != -EOPNOTSUPP)
1791 break; 1794 break;
1795
1796 /*
1797 * Just in case we get back EOPNOTSUPP for some reason,
1798 * just ignore the return value so we don't screw up
1799 * people calling discard_extent.
1800 */
1801 ret = 0;
1792 } 1802 }
1793 kfree(multi); 1803 kfree(multi);
1794 } 1804 }
1795 if (discarded_bytes && ret == -EOPNOTSUPP)
1796 ret = 0;
1797 1805
1798 if (actual_bytes) 1806 if (actual_bytes)
1799 *actual_bytes = discarded_bytes; 1807 *actual_bytes = discarded_bytes;
@@ -6269,8 +6277,8 @@ static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
6269 * also make sure backrefs for the shared block and all lower level 6277 * also make sure backrefs for the shared block and all lower level
6270 * blocks are properly updated. 6278 * blocks are properly updated.
6271 */ 6279 */
6272int btrfs_drop_snapshot(struct btrfs_root *root, 6280void btrfs_drop_snapshot(struct btrfs_root *root,
6273 struct btrfs_block_rsv *block_rsv, int update_ref) 6281 struct btrfs_block_rsv *block_rsv, int update_ref)
6274{ 6282{
6275 struct btrfs_path *path; 6283 struct btrfs_path *path;
6276 struct btrfs_trans_handle *trans; 6284 struct btrfs_trans_handle *trans;
@@ -6283,13 +6291,16 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
6283 int level; 6291 int level;
6284 6292
6285 path = btrfs_alloc_path(); 6293 path = btrfs_alloc_path();
6286 if (!path) 6294 if (!path) {
6287 return -ENOMEM; 6295 err = -ENOMEM;
6296 goto out;
6297 }
6288 6298
6289 wc = kzalloc(sizeof(*wc), GFP_NOFS); 6299 wc = kzalloc(sizeof(*wc), GFP_NOFS);
6290 if (!wc) { 6300 if (!wc) {
6291 btrfs_free_path(path); 6301 btrfs_free_path(path);
6292 return -ENOMEM; 6302 err = -ENOMEM;
6303 goto out;
6293 } 6304 }
6294 6305
6295 trans = btrfs_start_transaction(tree_root, 0); 6306 trans = btrfs_start_transaction(tree_root, 0);
@@ -6318,7 +6329,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
6318 path->lowest_level = 0; 6329 path->lowest_level = 0;
6319 if (ret < 0) { 6330 if (ret < 0) {
6320 err = ret; 6331 err = ret;
6321 goto out; 6332 goto out_free;
6322 } 6333 }
6323 WARN_ON(ret > 0); 6334 WARN_ON(ret > 0);
6324 6335
@@ -6425,11 +6436,14 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
6425 free_extent_buffer(root->commit_root); 6436 free_extent_buffer(root->commit_root);
6426 kfree(root); 6437 kfree(root);
6427 } 6438 }
6428out: 6439out_free:
6429 btrfs_end_transaction_throttle(trans, tree_root); 6440 btrfs_end_transaction_throttle(trans, tree_root);
6430 kfree(wc); 6441 kfree(wc);
6431 btrfs_free_path(path); 6442 btrfs_free_path(path);
6432 return err; 6443out:
6444 if (err)
6445 btrfs_std_error(root->fs_info, err);
6446 return;
6433} 6447}
6434 6448
6435/* 6449/*
@@ -6720,6 +6734,10 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
6720 struct btrfs_space_info *space_info; 6734 struct btrfs_space_info *space_info;
6721 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices; 6735 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
6722 struct btrfs_device *device; 6736 struct btrfs_device *device;
6737 u64 min_free;
6738 u64 dev_min = 1;
6739 u64 dev_nr = 0;
6740 int index;
6723 int full = 0; 6741 int full = 0;
6724 int ret = 0; 6742 int ret = 0;
6725 6743
@@ -6729,8 +6747,10 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
6729 if (!block_group) 6747 if (!block_group)
6730 return -1; 6748 return -1;
6731 6749
6750 min_free = btrfs_block_group_used(&block_group->item);
6751
6732 /* no bytes used, we're good */ 6752 /* no bytes used, we're good */
6733 if (!btrfs_block_group_used(&block_group->item)) 6753 if (!min_free)
6734 goto out; 6754 goto out;
6735 6755
6736 space_info = block_group->space_info; 6756 space_info = block_group->space_info;
@@ -6746,10 +6766,9 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
6746 * all of the extents from this block group. If we can, we're good 6766 * all of the extents from this block group. If we can, we're good
6747 */ 6767 */
6748 if ((space_info->total_bytes != block_group->key.offset) && 6768 if ((space_info->total_bytes != block_group->key.offset) &&
6749 (space_info->bytes_used + space_info->bytes_reserved + 6769 (space_info->bytes_used + space_info->bytes_reserved +
6750 space_info->bytes_pinned + space_info->bytes_readonly + 6770 space_info->bytes_pinned + space_info->bytes_readonly +
6751 btrfs_block_group_used(&block_group->item) < 6771 min_free < space_info->total_bytes)) {
6752 space_info->total_bytes)) {
6753 spin_unlock(&space_info->lock); 6772 spin_unlock(&space_info->lock);
6754 goto out; 6773 goto out;
6755 } 6774 }
@@ -6766,9 +6785,31 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
6766 if (full) 6785 if (full)
6767 goto out; 6786 goto out;
6768 6787
6788 /*
6789 * index:
6790 * 0: raid10
6791 * 1: raid1
6792 * 2: dup
6793 * 3: raid0
6794 * 4: single
6795 */
6796 index = get_block_group_index(block_group);
6797 if (index == 0) {
6798 dev_min = 4;
6799 /* Divide by 2 */
6800 min_free >>= 1;
6801 } else if (index == 1) {
6802 dev_min = 2;
6803 } else if (index == 2) {
6804 /* Multiply by 2 */
6805 min_free <<= 1;
6806 } else if (index == 3) {
6807 dev_min = fs_devices->rw_devices;
6808 do_div(min_free, dev_min);
6809 }
6810
6769 mutex_lock(&root->fs_info->chunk_mutex); 6811 mutex_lock(&root->fs_info->chunk_mutex);
6770 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) { 6812 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
6771 u64 min_free = btrfs_block_group_used(&block_group->item);
6772 u64 dev_offset; 6813 u64 dev_offset;
6773 6814
6774 /* 6815 /*
@@ -6779,7 +6820,11 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
6779 ret = find_free_dev_extent(NULL, device, min_free, 6820 ret = find_free_dev_extent(NULL, device, min_free,
6780 &dev_offset, NULL); 6821 &dev_offset, NULL);
6781 if (!ret) 6822 if (!ret)
6823 dev_nr++;
6824
6825 if (dev_nr >= dev_min)
6782 break; 6826 break;
6827
6783 ret = -1; 6828 ret = -1;
6784 } 6829 }
6785 } 6830 }
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index b910694f61ed..a1cb7821becd 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -183,8 +183,10 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
183 * read from the commit root and sidestep a nasty deadlock 183 * read from the commit root and sidestep a nasty deadlock
184 * between reading the free space cache and updating the csum tree. 184 * between reading the free space cache and updating the csum tree.
185 */ 185 */
186 if (btrfs_is_free_space_inode(root, inode)) 186 if (btrfs_is_free_space_inode(root, inode)) {
187 path->search_commit_root = 1; 187 path->search_commit_root = 1;
188 path->skip_locking = 1;
189 }
188 190
189 disk_bytenr = (u64)bio->bi_sector << 9; 191 disk_bytenr = (u64)bio->bi_sector << 9;
190 if (dio) 192 if (dio)
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 658d66959abe..a381cd22f518 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -150,6 +150,8 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
150 spin_lock(&root->fs_info->defrag_inodes_lock); 150 spin_lock(&root->fs_info->defrag_inodes_lock);
151 if (!BTRFS_I(inode)->in_defrag) 151 if (!BTRFS_I(inode)->in_defrag)
152 __btrfs_add_inode_defrag(inode, defrag); 152 __btrfs_add_inode_defrag(inode, defrag);
153 else
154 kfree(defrag);
153 spin_unlock(&root->fs_info->defrag_inodes_lock); 155 spin_unlock(&root->fs_info->defrag_inodes_lock);
154 return 0; 156 return 0;
155} 157}
@@ -1073,12 +1075,6 @@ static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
1073 start_pos = pos & ~((u64)root->sectorsize - 1); 1075 start_pos = pos & ~((u64)root->sectorsize - 1);
1074 last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT; 1076 last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT;
1075 1077
1076 if (start_pos > inode->i_size) {
1077 err = btrfs_cont_expand(inode, i_size_read(inode), start_pos);
1078 if (err)
1079 return err;
1080 }
1081
1082again: 1078again:
1083 for (i = 0; i < num_pages; i++) { 1079 for (i = 0; i < num_pages; i++) {
1084 pages[i] = find_or_create_page(inode->i_mapping, index + i, 1080 pages[i] = find_or_create_page(inode->i_mapping, index + i,
@@ -1336,6 +1332,7 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
1336 struct inode *inode = fdentry(file)->d_inode; 1332 struct inode *inode = fdentry(file)->d_inode;
1337 struct btrfs_root *root = BTRFS_I(inode)->root; 1333 struct btrfs_root *root = BTRFS_I(inode)->root;
1338 loff_t *ppos = &iocb->ki_pos; 1334 loff_t *ppos = &iocb->ki_pos;
1335 u64 start_pos;
1339 ssize_t num_written = 0; 1336 ssize_t num_written = 0;
1340 ssize_t err = 0; 1337 ssize_t err = 0;
1341 size_t count, ocount; 1338 size_t count, ocount;
@@ -1384,6 +1381,15 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
1384 file_update_time(file); 1381 file_update_time(file);
1385 BTRFS_I(inode)->sequence++; 1382 BTRFS_I(inode)->sequence++;
1386 1383
1384 start_pos = round_down(pos, root->sectorsize);
1385 if (start_pos > i_size_read(inode)) {
1386 err = btrfs_cont_expand(inode, i_size_read(inode), start_pos);
1387 if (err) {
1388 mutex_unlock(&inode->i_mutex);
1389 goto out;
1390 }
1391 }
1392
1387 if (unlikely(file->f_flags & O_DIRECT)) { 1393 if (unlikely(file->f_flags & O_DIRECT)) {
1388 num_written = __btrfs_direct_write(iocb, iov, nr_segs, 1394 num_written = __btrfs_direct_write(iocb, iov, nr_segs,
1389 pos, ppos, count, ocount); 1395 pos, ppos, count, ocount);
@@ -1638,11 +1644,15 @@ static long btrfs_fallocate(struct file *file, int mode,
1638 1644
1639 cur_offset = alloc_start; 1645 cur_offset = alloc_start;
1640 while (1) { 1646 while (1) {
1647 u64 actual_end;
1648
1641 em = btrfs_get_extent(inode, NULL, 0, cur_offset, 1649 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
1642 alloc_end - cur_offset, 0); 1650 alloc_end - cur_offset, 0);
1643 BUG_ON(IS_ERR_OR_NULL(em)); 1651 BUG_ON(IS_ERR_OR_NULL(em));
1644 last_byte = min(extent_map_end(em), alloc_end); 1652 last_byte = min(extent_map_end(em), alloc_end);
1653 actual_end = min_t(u64, extent_map_end(em), offset + len);
1645 last_byte = (last_byte + mask) & ~mask; 1654 last_byte = (last_byte + mask) & ~mask;
1655
1646 if (em->block_start == EXTENT_MAP_HOLE || 1656 if (em->block_start == EXTENT_MAP_HOLE ||
1647 (cur_offset >= inode->i_size && 1657 (cur_offset >= inode->i_size &&
1648 !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) { 1658 !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
@@ -1655,6 +1665,16 @@ static long btrfs_fallocate(struct file *file, int mode,
1655 free_extent_map(em); 1665 free_extent_map(em);
1656 break; 1666 break;
1657 } 1667 }
1668 } else if (actual_end > inode->i_size &&
1669 !(mode & FALLOC_FL_KEEP_SIZE)) {
1670 /*
1671 * We didn't need to allocate any more space, but we
1672 * still extended the size of the file so we need to
1673 * update i_size.
1674 */
1675 inode->i_ctime = CURRENT_TIME;
1676 i_size_write(inode, actual_end);
1677 btrfs_ordered_update_i_size(inode, actual_end, NULL);
1658 } 1678 }
1659 free_extent_map(em); 1679 free_extent_map(em);
1660 1680
@@ -1797,6 +1817,11 @@ static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int origin)
1797 goto out; 1817 goto out;
1798 case SEEK_DATA: 1818 case SEEK_DATA:
1799 case SEEK_HOLE: 1819 case SEEK_HOLE:
1820 if (offset >= i_size_read(inode)) {
1821 mutex_unlock(&inode->i_mutex);
1822 return -ENXIO;
1823 }
1824
1800 ret = find_desired_extent(inode, &offset, origin); 1825 ret = find_desired_extent(inode, &offset, origin);
1801 if (ret) { 1826 if (ret) {
1802 mutex_unlock(&inode->i_mutex); 1827 mutex_unlock(&inode->i_mutex);
@@ -1804,10 +1829,14 @@ static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int origin)
1804 } 1829 }
1805 } 1830 }
1806 1831
1807 if (offset < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET)) 1832 if (offset < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET)) {
1808 return -EINVAL; 1833 offset = -EINVAL;
1809 if (offset > inode->i_sb->s_maxbytes) 1834 goto out;
1810 return -EINVAL; 1835 }
1836 if (offset > inode->i_sb->s_maxbytes) {
1837 offset = -EINVAL;
1838 goto out;
1839 }
1811 1840
1812 /* Special lock needed here? */ 1841 /* Special lock needed here? */
1813 if (offset != file->f_pos) { 1842 if (offset != file->f_pos) {
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 6377713f639c..41ac927401d0 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -190,9 +190,11 @@ int btrfs_truncate_free_space_cache(struct btrfs_root *root,
190 struct btrfs_path *path, 190 struct btrfs_path *path,
191 struct inode *inode) 191 struct inode *inode)
192{ 192{
193 struct btrfs_block_rsv *rsv;
193 loff_t oldsize; 194 loff_t oldsize;
194 int ret = 0; 195 int ret = 0;
195 196
197 rsv = trans->block_rsv;
196 trans->block_rsv = root->orphan_block_rsv; 198 trans->block_rsv = root->orphan_block_rsv;
197 ret = btrfs_block_rsv_check(trans, root, 199 ret = btrfs_block_rsv_check(trans, root,
198 root->orphan_block_rsv, 200 root->orphan_block_rsv,
@@ -210,6 +212,8 @@ int btrfs_truncate_free_space_cache(struct btrfs_root *root,
210 */ 212 */
211 ret = btrfs_truncate_inode_items(trans, root, inode, 213 ret = btrfs_truncate_inode_items(trans, root, inode,
212 0, BTRFS_EXTENT_DATA_KEY); 214 0, BTRFS_EXTENT_DATA_KEY);
215
216 trans->block_rsv = rsv;
213 if (ret) { 217 if (ret) {
214 WARN_ON(1); 218 WARN_ON(1);
215 return ret; 219 return ret;
@@ -1168,9 +1172,9 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
1168 div64_u64(extent_bytes, (sizeof(struct btrfs_free_space))); 1172 div64_u64(extent_bytes, (sizeof(struct btrfs_free_space)));
1169} 1173}
1170 1174
1171static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl, 1175static inline void __bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
1172 struct btrfs_free_space *info, u64 offset, 1176 struct btrfs_free_space *info,
1173 u64 bytes) 1177 u64 offset, u64 bytes)
1174{ 1178{
1175 unsigned long start, count; 1179 unsigned long start, count;
1176 1180
@@ -1181,6 +1185,13 @@ static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
1181 bitmap_clear(info->bitmap, start, count); 1185 bitmap_clear(info->bitmap, start, count);
1182 1186
1183 info->bytes -= bytes; 1187 info->bytes -= bytes;
1188}
1189
1190static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
1191 struct btrfs_free_space *info, u64 offset,
1192 u64 bytes)
1193{
1194 __bitmap_clear_bits(ctl, info, offset, bytes);
1184 ctl->free_space -= bytes; 1195 ctl->free_space -= bytes;
1185} 1196}
1186 1197
@@ -1984,7 +1995,7 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
1984 return 0; 1995 return 0;
1985 1996
1986 ret = search_start; 1997 ret = search_start;
1987 bitmap_clear_bits(ctl, entry, ret, bytes); 1998 __bitmap_clear_bits(ctl, entry, ret, bytes);
1988 1999
1989 return ret; 2000 return ret;
1990} 2001}
@@ -2039,7 +2050,6 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
2039 continue; 2050 continue;
2040 } 2051 }
2041 } else { 2052 } else {
2042
2043 ret = entry->offset; 2053 ret = entry->offset;
2044 2054
2045 entry->offset += bytes; 2055 entry->offset += bytes;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 15fceefbca0a..b2d004ad66a0 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -1786,7 +1786,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1786 &ordered_extent->list); 1786 &ordered_extent->list);
1787 1787
1788 ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent); 1788 ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
1789 if (!ret) { 1789 if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
1790 ret = btrfs_update_inode(trans, root, inode); 1790 ret = btrfs_update_inode(trans, root, inode);
1791 BUG_ON(ret); 1791 BUG_ON(ret);
1792 } 1792 }
@@ -3510,15 +3510,19 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
3510 err = btrfs_drop_extents(trans, inode, cur_offset, 3510 err = btrfs_drop_extents(trans, inode, cur_offset,
3511 cur_offset + hole_size, 3511 cur_offset + hole_size,
3512 &hint_byte, 1); 3512 &hint_byte, 1);
3513 if (err) 3513 if (err) {
3514 btrfs_end_transaction(trans, root);
3514 break; 3515 break;
3516 }
3515 3517
3516 err = btrfs_insert_file_extent(trans, root, 3518 err = btrfs_insert_file_extent(trans, root,
3517 btrfs_ino(inode), cur_offset, 0, 3519 btrfs_ino(inode), cur_offset, 0,
3518 0, hole_size, 0, hole_size, 3520 0, hole_size, 0, hole_size,
3519 0, 0, 0); 3521 0, 0, 0);
3520 if (err) 3522 if (err) {
3523 btrfs_end_transaction(trans, root);
3521 break; 3524 break;
3525 }
3522 3526
3523 btrfs_drop_extent_cache(inode, hole_start, 3527 btrfs_drop_extent_cache(inode, hole_start,
3524 last_byte - 1, 0); 3528 last_byte - 1, 0);
@@ -3952,7 +3956,6 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
3952 struct btrfs_root *root, int *new) 3956 struct btrfs_root *root, int *new)
3953{ 3957{
3954 struct inode *inode; 3958 struct inode *inode;
3955 int bad_inode = 0;
3956 3959
3957 inode = btrfs_iget_locked(s, location->objectid, root); 3960 inode = btrfs_iget_locked(s, location->objectid, root);
3958 if (!inode) 3961 if (!inode)
@@ -3968,15 +3971,12 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
3968 if (new) 3971 if (new)
3969 *new = 1; 3972 *new = 1;
3970 } else { 3973 } else {
3971 bad_inode = 1; 3974 unlock_new_inode(inode);
3975 iput(inode);
3976 inode = ERR_PTR(-ESTALE);
3972 } 3977 }
3973 } 3978 }
3974 3979
3975 if (bad_inode) {
3976 iput(inode);
3977 inode = ERR_PTR(-ESTALE);
3978 }
3979
3980 return inode; 3980 return inode;
3981} 3981}
3982 3982
@@ -4018,7 +4018,8 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
4018 memcpy(&location, dentry->d_fsdata, sizeof(struct btrfs_key)); 4018 memcpy(&location, dentry->d_fsdata, sizeof(struct btrfs_key));
4019 kfree(dentry->d_fsdata); 4019 kfree(dentry->d_fsdata);
4020 dentry->d_fsdata = NULL; 4020 dentry->d_fsdata = NULL;
4021 d_clear_need_lookup(dentry); 4021 /* This thing is hashed, drop it for now */
4022 d_drop(dentry);
4022 } else { 4023 } else {
4023 ret = btrfs_inode_by_name(dir, dentry, &location); 4024 ret = btrfs_inode_by_name(dir, dentry, &location);
4024 } 4025 }
@@ -4085,7 +4086,15 @@ static void btrfs_dentry_release(struct dentry *dentry)
4085static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry, 4086static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
4086 struct nameidata *nd) 4087 struct nameidata *nd)
4087{ 4088{
4088 return d_splice_alias(btrfs_lookup_dentry(dir, dentry), dentry); 4089 struct dentry *ret;
4090
4091 ret = d_splice_alias(btrfs_lookup_dentry(dir, dentry), dentry);
4092 if (unlikely(d_need_lookup(dentry))) {
4093 spin_lock(&dentry->d_lock);
4094 dentry->d_flags &= ~DCACHE_NEED_LOOKUP;
4095 spin_unlock(&dentry->d_lock);
4096 }
4097 return ret;
4089} 4098}
4090 4099
4091unsigned char btrfs_filetype_table[] = { 4100unsigned char btrfs_filetype_table[] = {
@@ -4125,7 +4134,8 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
4125 4134
4126 /* special case for "." */ 4135 /* special case for "." */
4127 if (filp->f_pos == 0) { 4136 if (filp->f_pos == 0) {
4128 over = filldir(dirent, ".", 1, 1, btrfs_ino(inode), DT_DIR); 4137 over = filldir(dirent, ".", 1,
4138 filp->f_pos, btrfs_ino(inode), DT_DIR);
4129 if (over) 4139 if (over)
4130 return 0; 4140 return 0;
4131 filp->f_pos = 1; 4141 filp->f_pos = 1;
@@ -4134,7 +4144,7 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
4134 if (filp->f_pos == 1) { 4144 if (filp->f_pos == 1) {
4135 u64 pino = parent_ino(filp->f_path.dentry); 4145 u64 pino = parent_ino(filp->f_path.dentry);
4136 over = filldir(dirent, "..", 2, 4146 over = filldir(dirent, "..", 2,
4137 2, pino, DT_DIR); 4147 filp->f_pos, pino, DT_DIR);
4138 if (over) 4148 if (over)
4139 return 0; 4149 return 0;
4140 filp->f_pos = 2; 4150 filp->f_pos = 2;
@@ -5823,7 +5833,7 @@ again:
5823 5833
5824 add_pending_csums(trans, inode, ordered->file_offset, &ordered->list); 5834 add_pending_csums(trans, inode, ordered->file_offset, &ordered->list);
5825 ret = btrfs_ordered_update_i_size(inode, 0, ordered); 5835 ret = btrfs_ordered_update_i_size(inode, 0, ordered);
5826 if (!ret) 5836 if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags))
5827 btrfs_update_inode(trans, root, inode); 5837 btrfs_update_inode(trans, root, inode);
5828 ret = 0; 5838 ret = 0;
5829out_unlock: 5839out_unlock:
@@ -7354,11 +7364,15 @@ static int btrfs_set_page_dirty(struct page *page)
7354static int btrfs_permission(struct inode *inode, int mask) 7364static int btrfs_permission(struct inode *inode, int mask)
7355{ 7365{
7356 struct btrfs_root *root = BTRFS_I(inode)->root; 7366 struct btrfs_root *root = BTRFS_I(inode)->root;
7367 umode_t mode = inode->i_mode;
7357 7368
7358 if (btrfs_root_readonly(root) && (mask & MAY_WRITE)) 7369 if (mask & MAY_WRITE &&
7359 return -EROFS; 7370 (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) {
7360 if ((BTRFS_I(inode)->flags & BTRFS_INODE_READONLY) && (mask & MAY_WRITE)) 7371 if (btrfs_root_readonly(root))
7361 return -EACCES; 7372 return -EROFS;
7373 if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY)
7374 return -EACCES;
7375 }
7362 return generic_permission(inode, mask); 7376 return generic_permission(inode, mask);
7363} 7377}
7364 7378
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 7cf013349941..538f65a79ec5 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -2177,6 +2177,11 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
2177 if (!(src_file->f_mode & FMODE_READ)) 2177 if (!(src_file->f_mode & FMODE_READ))
2178 goto out_fput; 2178 goto out_fput;
2179 2179
2180 /* don't make the dst file partly checksummed */
2181 if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) !=
2182 (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM))
2183 goto out_fput;
2184
2180 ret = -EISDIR; 2185 ret = -EISDIR;
2181 if (S_ISDIR(src->i_mode) || S_ISDIR(inode->i_mode)) 2186 if (S_ISDIR(src->i_mode) || S_ISDIR(inode->i_mode))
2182 goto out_fput; 2187 goto out_fput;
@@ -2220,6 +2225,16 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
2220 !IS_ALIGNED(destoff, bs)) 2225 !IS_ALIGNED(destoff, bs))
2221 goto out_unlock; 2226 goto out_unlock;
2222 2227
2228 if (destoff > inode->i_size) {
2229 ret = btrfs_cont_expand(inode, inode->i_size, destoff);
2230 if (ret)
2231 goto out_unlock;
2232 }
2233
2234 /* truncate page cache pages from target inode range */
2235 truncate_inode_pages_range(&inode->i_data, destoff,
2236 PAGE_CACHE_ALIGN(destoff + len) - 1);
2237
2223 /* do any pending delalloc/csum calc on src, one way or 2238 /* do any pending delalloc/csum calc on src, one way or
2224 another, and lock file content */ 2239 another, and lock file content */
2225 while (1) { 2240 while (1) {
@@ -2313,7 +2328,12 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
2313 else 2328 else
2314 new_key.offset = destoff; 2329 new_key.offset = destoff;
2315 2330
2316 trans = btrfs_start_transaction(root, 1); 2331 /*
2332 * 1 - adjusting old extent (we may have to split it)
2333 * 1 - add new extent
2334 * 1 - inode update
2335 */
2336 trans = btrfs_start_transaction(root, 3);
2317 if (IS_ERR(trans)) { 2337 if (IS_ERR(trans)) {
2318 ret = PTR_ERR(trans); 2338 ret = PTR_ERR(trans);
2319 goto out; 2339 goto out;
@@ -2321,14 +2341,21 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
2321 2341
2322 if (type == BTRFS_FILE_EXTENT_REG || 2342 if (type == BTRFS_FILE_EXTENT_REG ||
2323 type == BTRFS_FILE_EXTENT_PREALLOC) { 2343 type == BTRFS_FILE_EXTENT_PREALLOC) {
2344 /*
2345 * a | --- range to clone ---| b
2346 * | ------------- extent ------------- |
2347 */
2348
2349 /* substract range b */
2350 if (key.offset + datal > off + len)
2351 datal = off + len - key.offset;
2352
2353 /* substract range a */
2324 if (off > key.offset) { 2354 if (off > key.offset) {
2325 datao += off - key.offset; 2355 datao += off - key.offset;
2326 datal -= off - key.offset; 2356 datal -= off - key.offset;
2327 } 2357 }
2328 2358
2329 if (key.offset + datal > off + len)
2330 datal = off + len - key.offset;
2331
2332 ret = btrfs_drop_extents(trans, inode, 2359 ret = btrfs_drop_extents(trans, inode,
2333 new_key.offset, 2360 new_key.offset,
2334 new_key.offset + datal, 2361 new_key.offset + datal,
@@ -2425,7 +2452,6 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
2425 if (endoff > inode->i_size) 2452 if (endoff > inode->i_size)
2426 btrfs_i_size_write(inode, endoff); 2453 btrfs_i_size_write(inode, endoff);
2427 2454
2428 BTRFS_I(inode)->flags = BTRFS_I(src)->flags;
2429 ret = btrfs_update_inode(trans, root, inode); 2455 ret = btrfs_update_inode(trans, root, inode);
2430 BUG_ON(ret); 2456 BUG_ON(ret);
2431 btrfs_end_transaction(trans, root); 2457 btrfs_end_transaction(trans, root);
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 7dc36fab4afc..e24b7964a155 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -884,6 +884,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
884 struct btrfs_root *tree_root = fs_info->tree_root; 884 struct btrfs_root *tree_root = fs_info->tree_root;
885 struct btrfs_root *root = pending->root; 885 struct btrfs_root *root = pending->root;
886 struct btrfs_root *parent_root; 886 struct btrfs_root *parent_root;
887 struct btrfs_block_rsv *rsv;
887 struct inode *parent_inode; 888 struct inode *parent_inode;
888 struct dentry *parent; 889 struct dentry *parent;
889 struct dentry *dentry; 890 struct dentry *dentry;
@@ -895,6 +896,8 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
895 u64 objectid; 896 u64 objectid;
896 u64 root_flags; 897 u64 root_flags;
897 898
899 rsv = trans->block_rsv;
900
898 new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS); 901 new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
899 if (!new_root_item) { 902 if (!new_root_item) {
900 pending->error = -ENOMEM; 903 pending->error = -ENOMEM;
@@ -1002,6 +1005,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
1002 btrfs_orphan_post_snapshot(trans, pending); 1005 btrfs_orphan_post_snapshot(trans, pending);
1003fail: 1006fail:
1004 kfree(new_root_item); 1007 kfree(new_root_item);
1008 trans->block_rsv = rsv;
1005 btrfs_block_rsv_release(root, &pending->block_rsv, (u64)-1); 1009 btrfs_block_rsv_release(root, &pending->block_rsv, (u64)-1);
1006 return 0; 1010 return 0;
1007} 1011}
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index babee65f8eda..786639fca067 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -799,14 +799,15 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
799 struct extent_buffer *eb, int slot, 799 struct extent_buffer *eb, int slot,
800 struct btrfs_key *key) 800 struct btrfs_key *key)
801{ 801{
802 struct inode *dir;
803 int ret;
804 struct btrfs_inode_ref *ref; 802 struct btrfs_inode_ref *ref;
803 struct btrfs_dir_item *di;
804 struct inode *dir;
805 struct inode *inode; 805 struct inode *inode;
806 char *name;
807 int namelen;
808 unsigned long ref_ptr; 806 unsigned long ref_ptr;
809 unsigned long ref_end; 807 unsigned long ref_end;
808 char *name;
809 int namelen;
810 int ret;
810 int search_done = 0; 811 int search_done = 0;
811 812
812 /* 813 /*
@@ -909,6 +910,25 @@ again:
909 } 910 }
910 btrfs_release_path(path); 911 btrfs_release_path(path);
911 912
913 /* look for a conflicting sequence number */
914 di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir),
915 btrfs_inode_ref_index(eb, ref),
916 name, namelen, 0);
917 if (di && !IS_ERR(di)) {
918 ret = drop_one_dir_item(trans, root, path, dir, di);
919 BUG_ON(ret);
920 }
921 btrfs_release_path(path);
922
923 /* look for a conflicing name */
924 di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir),
925 name, namelen, 0);
926 if (di && !IS_ERR(di)) {
927 ret = drop_one_dir_item(trans, root, path, dir, di);
928 BUG_ON(ret);
929 }
930 btrfs_release_path(path);
931
912insert: 932insert:
913 /* insert our name */ 933 /* insert our name */
914 ret = btrfs_add_link(trans, dir, inode, name, namelen, 0, 934 ret = btrfs_add_link(trans, dir, inode, name, namelen, 0,
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 53875ae73ad4..f2a4cc79da61 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -142,6 +142,7 @@ static noinline int run_scheduled_bios(struct btrfs_device *device)
142 unsigned long limit; 142 unsigned long limit;
143 unsigned long last_waited = 0; 143 unsigned long last_waited = 0;
144 int force_reg = 0; 144 int force_reg = 0;
145 int sync_pending = 0;
145 struct blk_plug plug; 146 struct blk_plug plug;
146 147
147 /* 148 /*
@@ -229,6 +230,22 @@ loop_lock:
229 230
230 BUG_ON(atomic_read(&cur->bi_cnt) == 0); 231 BUG_ON(atomic_read(&cur->bi_cnt) == 0);
231 232
233 /*
234 * if we're doing the sync list, record that our
235 * plug has some sync requests on it
236 *
237 * If we're doing the regular list and there are
238 * sync requests sitting around, unplug before
239 * we add more
240 */
241 if (pending_bios == &device->pending_sync_bios) {
242 sync_pending = 1;
243 } else if (sync_pending) {
244 blk_finish_plug(&plug);
245 blk_start_plug(&plug);
246 sync_pending = 0;
247 }
248
232 submit_bio(cur->bi_rw, cur); 249 submit_bio(cur->bi_rw, cur);
233 num_run++; 250 num_run++;
234 batch_run++; 251 batch_run++;
@@ -500,6 +517,9 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
500 fs_devices->rw_devices--; 517 fs_devices->rw_devices--;
501 } 518 }
502 519
520 if (device->can_discard)
521 fs_devices->num_can_discard--;
522
503 new_device = kmalloc(sizeof(*new_device), GFP_NOFS); 523 new_device = kmalloc(sizeof(*new_device), GFP_NOFS);
504 BUG_ON(!new_device); 524 BUG_ON(!new_device);
505 memcpy(new_device, device, sizeof(*new_device)); 525 memcpy(new_device, device, sizeof(*new_device));
@@ -508,6 +528,7 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
508 new_device->bdev = NULL; 528 new_device->bdev = NULL;
509 new_device->writeable = 0; 529 new_device->writeable = 0;
510 new_device->in_fs_metadata = 0; 530 new_device->in_fs_metadata = 0;
531 new_device->can_discard = 0;
511 list_replace_rcu(&device->dev_list, &new_device->dev_list); 532 list_replace_rcu(&device->dev_list, &new_device->dev_list);
512 533
513 call_rcu(&device->rcu, free_device); 534 call_rcu(&device->rcu, free_device);
@@ -547,6 +568,7 @@ int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
547static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices, 568static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
548 fmode_t flags, void *holder) 569 fmode_t flags, void *holder)
549{ 570{
571 struct request_queue *q;
550 struct block_device *bdev; 572 struct block_device *bdev;
551 struct list_head *head = &fs_devices->devices; 573 struct list_head *head = &fs_devices->devices;
552 struct btrfs_device *device; 574 struct btrfs_device *device;
@@ -603,6 +625,12 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
603 seeding = 0; 625 seeding = 0;
604 } 626 }
605 627
628 q = bdev_get_queue(bdev);
629 if (blk_queue_discard(q)) {
630 device->can_discard = 1;
631 fs_devices->num_can_discard++;
632 }
633
606 device->bdev = bdev; 634 device->bdev = bdev;
607 device->in_fs_metadata = 0; 635 device->in_fs_metadata = 0;
608 device->mode = flags; 636 device->mode = flags;
@@ -835,6 +863,7 @@ int find_free_dev_extent(struct btrfs_trans_handle *trans,
835 863
836 max_hole_start = search_start; 864 max_hole_start = search_start;
837 max_hole_size = 0; 865 max_hole_size = 0;
866 hole_size = 0;
838 867
839 if (search_start >= search_end) { 868 if (search_start >= search_end) {
840 ret = -ENOSPC; 869 ret = -ENOSPC;
@@ -917,7 +946,14 @@ next:
917 cond_resched(); 946 cond_resched();
918 } 947 }
919 948
920 hole_size = search_end- search_start; 949 /*
950 * At this point, search_start should be the end of
951 * allocated dev extents, and when shrinking the device,
952 * search_end may be smaller than search_start.
953 */
954 if (search_end > search_start)
955 hole_size = search_end - search_start;
956
921 if (hole_size > max_hole_size) { 957 if (hole_size > max_hole_size) {
922 max_hole_start = search_start; 958 max_hole_start = search_start;
923 max_hole_size = hole_size; 959 max_hole_size = hole_size;
@@ -1543,6 +1579,7 @@ error:
1543 1579
1544int btrfs_init_new_device(struct btrfs_root *root, char *device_path) 1580int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1545{ 1581{
1582 struct request_queue *q;
1546 struct btrfs_trans_handle *trans; 1583 struct btrfs_trans_handle *trans;
1547 struct btrfs_device *device; 1584 struct btrfs_device *device;
1548 struct block_device *bdev; 1585 struct block_device *bdev;
@@ -1612,6 +1649,9 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1612 1649
1613 lock_chunks(root); 1650 lock_chunks(root);
1614 1651
1652 q = bdev_get_queue(bdev);
1653 if (blk_queue_discard(q))
1654 device->can_discard = 1;
1615 device->writeable = 1; 1655 device->writeable = 1;
1616 device->work.func = pending_bios_fn; 1656 device->work.func = pending_bios_fn;
1617 generate_random_uuid(device->uuid); 1657 generate_random_uuid(device->uuid);
@@ -1647,6 +1687,8 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1647 root->fs_info->fs_devices->num_devices++; 1687 root->fs_info->fs_devices->num_devices++;
1648 root->fs_info->fs_devices->open_devices++; 1688 root->fs_info->fs_devices->open_devices++;
1649 root->fs_info->fs_devices->rw_devices++; 1689 root->fs_info->fs_devices->rw_devices++;
1690 if (device->can_discard)
1691 root->fs_info->fs_devices->num_can_discard++;
1650 root->fs_info->fs_devices->total_rw_bytes += device->total_bytes; 1692 root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
1651 1693
1652 if (!blk_queue_nonrot(bdev_get_queue(bdev))) 1694 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
@@ -2413,9 +2455,10 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
2413 total_avail = device->total_bytes - device->bytes_used; 2455 total_avail = device->total_bytes - device->bytes_used;
2414 else 2456 else
2415 total_avail = 0; 2457 total_avail = 0;
2416 /* avail is off by max(alloc_start, 1MB), but that is the same 2458
2417 * for all devices, so it doesn't hurt the sorting later on 2459 /* If there is no space on this device, skip it. */
2418 */ 2460 if (total_avail == 0)
2461 continue;
2419 2462
2420 ret = find_free_dev_extent(trans, device, 2463 ret = find_free_dev_extent(trans, device,
2421 max_stripe_size * dev_stripes, 2464 max_stripe_size * dev_stripes,
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 7c12d61ae7ae..6d866db4e177 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -48,6 +48,7 @@ struct btrfs_device {
48 int writeable; 48 int writeable;
49 int in_fs_metadata; 49 int in_fs_metadata;
50 int missing; 50 int missing;
51 int can_discard;
51 52
52 spinlock_t io_lock; 53 spinlock_t io_lock;
53 54
@@ -104,6 +105,7 @@ struct btrfs_fs_devices {
104 u64 rw_devices; 105 u64 rw_devices;
105 u64 missing_devices; 106 u64 missing_devices;
106 u64 total_rw_bytes; 107 u64 total_rw_bytes;
108 u64 num_can_discard;
107 struct block_device *latest_bdev; 109 struct block_device *latest_bdev;
108 110
109 /* all of the devices in the FS, protected by a mutex 111 /* all of the devices in the FS, protected by a mutex
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index d733b9cfea34..69565e5fc6a0 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -116,6 +116,12 @@ static int do_setxattr(struct btrfs_trans_handle *trans,
116 if (ret) 116 if (ret)
117 goto out; 117 goto out;
118 btrfs_release_path(path); 118 btrfs_release_path(path);
119
120 /*
121 * remove the attribute
122 */
123 if (!value)
124 goto out;
119 } 125 }
120 126
121again: 127again:
@@ -158,6 +164,9 @@ out:
158 return ret; 164 return ret;
159} 165}
160 166
167/*
168 * @value: "" makes the attribute to empty, NULL removes it
169 */
161int __btrfs_setxattr(struct btrfs_trans_handle *trans, 170int __btrfs_setxattr(struct btrfs_trans_handle *trans,
162 struct inode *inode, const char *name, 171 struct inode *inode, const char *name,
163 const void *value, size_t size, int flags) 172 const void *value, size_t size, int flags)
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index fee028b5332e..86c59e16ba74 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -1595,7 +1595,7 @@ static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
1595 r = build_dentry_path(rdentry, ppath, pathlen, ino, freepath); 1595 r = build_dentry_path(rdentry, ppath, pathlen, ino, freepath);
1596 dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen, 1596 dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen,
1597 *ppath); 1597 *ppath);
1598 } else if (rpath) { 1598 } else if (rpath || rino) {
1599 *ino = rino; 1599 *ino = rino;
1600 *ppath = rpath; 1600 *ppath = rpath;
1601 *pathlen = strlen(rpath); 1601 *pathlen = strlen(rpath);
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index d47c5ec7fb1f..88bacaf385d9 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -813,8 +813,8 @@ static struct dentry *ceph_mount(struct file_system_type *fs_type,
813 fsc = create_fs_client(fsopt, opt); 813 fsc = create_fs_client(fsopt, opt);
814 if (IS_ERR(fsc)) { 814 if (IS_ERR(fsc)) {
815 res = ERR_CAST(fsc); 815 res = ERR_CAST(fsc);
816 kfree(fsopt); 816 destroy_mount_options(fsopt);
817 kfree(opt); 817 ceph_destroy_options(opt);
818 goto out_final; 818 goto out_final;
819 } 819 }
820 820
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index 2fe3cf13b2e9..6d40656e1e29 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -176,7 +176,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
176 176
177#ifdef CONFIG_CIFS_STATS2 177#ifdef CONFIG_CIFS_STATS2
178 seq_printf(m, " In Send: %d In MaxReq Wait: %d", 178 seq_printf(m, " In Send: %d In MaxReq Wait: %d",
179 atomic_read(&server->inSend), 179 atomic_read(&server->in_send),
180 atomic_read(&server->num_waiters)); 180 atomic_read(&server->num_waiters));
181#endif 181#endif
182 182
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
index 21de1d6d5849..d0f59faefb78 100644
--- a/fs/cifs/cifsacl.c
+++ b/fs/cifs/cifsacl.c
@@ -991,24 +991,6 @@ struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *cifs_sb,
991 return pntsd; 991 return pntsd;
992} 992}
993 993
994static int set_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb, __u16 fid,
995 struct cifs_ntsd *pnntsd, u32 acllen)
996{
997 int xid, rc;
998 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
999
1000 if (IS_ERR(tlink))
1001 return PTR_ERR(tlink);
1002
1003 xid = GetXid();
1004 rc = CIFSSMBSetCIFSACL(xid, tlink_tcon(tlink), fid, pnntsd, acllen);
1005 FreeXid(xid);
1006 cifs_put_tlink(tlink);
1007
1008 cFYI(DBG2, "SetCIFSACL rc = %d", rc);
1009 return rc;
1010}
1011
1012static int set_cifs_acl_by_path(struct cifs_sb_info *cifs_sb, const char *path, 994static int set_cifs_acl_by_path(struct cifs_sb_info *cifs_sb, const char *path,
1013 struct cifs_ntsd *pnntsd, u32 acllen) 995 struct cifs_ntsd *pnntsd, u32 acllen)
1014{ 996{
@@ -1047,18 +1029,10 @@ int set_cifs_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
1047 struct inode *inode, const char *path) 1029 struct inode *inode, const char *path)
1048{ 1030{
1049 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 1031 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1050 struct cifsFileInfo *open_file;
1051 int rc;
1052 1032
1053 cFYI(DBG2, "set ACL for %s from mode 0x%x", path, inode->i_mode); 1033 cFYI(DBG2, "set ACL for %s from mode 0x%x", path, inode->i_mode);
1054 1034
1055 open_file = find_readable_file(CIFS_I(inode), true); 1035 return set_cifs_acl_by_path(cifs_sb, path, pnntsd, acllen);
1056 if (!open_file)
1057 return set_cifs_acl_by_path(cifs_sb, path, pnntsd, acllen);
1058
1059 rc = set_cifs_acl_by_fid(cifs_sb, open_file->netfid, pnntsd, acllen);
1060 cifsFileInfo_put(open_file);
1061 return rc;
1062} 1036}
1063 1037
1064/* Translate the CIFS ACL (simlar to NTFS ACL) for a file into mode bits */ 1038/* Translate the CIFS ACL (simlar to NTFS ACL) for a file into mode bits */
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index e76bfeb68267..30acd22147e1 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -351,9 +351,7 @@ static int
351build_avpair_blob(struct cifs_ses *ses, const struct nls_table *nls_cp) 351build_avpair_blob(struct cifs_ses *ses, const struct nls_table *nls_cp)
352{ 352{
353 unsigned int dlen; 353 unsigned int dlen;
354 unsigned int wlen; 354 unsigned int size = 2 * sizeof(struct ntlmssp2_name);
355 unsigned int size = 6 * sizeof(struct ntlmssp2_name);
356 __le64 curtime;
357 char *defdmname = "WORKGROUP"; 355 char *defdmname = "WORKGROUP";
358 unsigned char *blobptr; 356 unsigned char *blobptr;
359 struct ntlmssp2_name *attrptr; 357 struct ntlmssp2_name *attrptr;
@@ -365,15 +363,14 @@ build_avpair_blob(struct cifs_ses *ses, const struct nls_table *nls_cp)
365 } 363 }
366 364
367 dlen = strlen(ses->domainName); 365 dlen = strlen(ses->domainName);
368 wlen = strlen(ses->server->hostname);
369 366
370 /* The length of this blob is a size which is 367 /*
371 * six times the size of a structure which holds name/size + 368 * The length of this blob is two times the size of a
372 * two times the unicode length of a domain name + 369 * structure (av pair) which holds name/size
373 * two times the unicode length of a server name + 370 * ( for NTLMSSP_AV_NB_DOMAIN_NAME followed by NTLMSSP_AV_EOL ) +
374 * size of a timestamp (which is 8 bytes). 371 * unicode length of a netbios domain name
375 */ 372 */
376 ses->auth_key.len = size + 2 * (2 * dlen) + 2 * (2 * wlen) + 8; 373 ses->auth_key.len = size + 2 * dlen;
377 ses->auth_key.response = kzalloc(ses->auth_key.len, GFP_KERNEL); 374 ses->auth_key.response = kzalloc(ses->auth_key.len, GFP_KERNEL);
378 if (!ses->auth_key.response) { 375 if (!ses->auth_key.response) {
379 ses->auth_key.len = 0; 376 ses->auth_key.len = 0;
@@ -384,44 +381,15 @@ build_avpair_blob(struct cifs_ses *ses, const struct nls_table *nls_cp)
384 blobptr = ses->auth_key.response; 381 blobptr = ses->auth_key.response;
385 attrptr = (struct ntlmssp2_name *) blobptr; 382 attrptr = (struct ntlmssp2_name *) blobptr;
386 383
384 /*
385 * As defined in MS-NTLM 3.3.2, just this av pair field
386 * is sufficient as part of the temp
387 */
387 attrptr->type = cpu_to_le16(NTLMSSP_AV_NB_DOMAIN_NAME); 388 attrptr->type = cpu_to_le16(NTLMSSP_AV_NB_DOMAIN_NAME);
388 attrptr->length = cpu_to_le16(2 * dlen); 389 attrptr->length = cpu_to_le16(2 * dlen);
389 blobptr = (unsigned char *)attrptr + sizeof(struct ntlmssp2_name); 390 blobptr = (unsigned char *)attrptr + sizeof(struct ntlmssp2_name);
390 cifs_strtoUCS((__le16 *)blobptr, ses->domainName, dlen, nls_cp); 391 cifs_strtoUCS((__le16 *)blobptr, ses->domainName, dlen, nls_cp);
391 392
392 blobptr += 2 * dlen;
393 attrptr = (struct ntlmssp2_name *) blobptr;
394
395 attrptr->type = cpu_to_le16(NTLMSSP_AV_NB_COMPUTER_NAME);
396 attrptr->length = cpu_to_le16(2 * wlen);
397 blobptr = (unsigned char *)attrptr + sizeof(struct ntlmssp2_name);
398 cifs_strtoUCS((__le16 *)blobptr, ses->server->hostname, wlen, nls_cp);
399
400 blobptr += 2 * wlen;
401 attrptr = (struct ntlmssp2_name *) blobptr;
402
403 attrptr->type = cpu_to_le16(NTLMSSP_AV_DNS_DOMAIN_NAME);
404 attrptr->length = cpu_to_le16(2 * dlen);
405 blobptr = (unsigned char *)attrptr + sizeof(struct ntlmssp2_name);
406 cifs_strtoUCS((__le16 *)blobptr, ses->domainName, dlen, nls_cp);
407
408 blobptr += 2 * dlen;
409 attrptr = (struct ntlmssp2_name *) blobptr;
410
411 attrptr->type = cpu_to_le16(NTLMSSP_AV_DNS_COMPUTER_NAME);
412 attrptr->length = cpu_to_le16(2 * wlen);
413 blobptr = (unsigned char *)attrptr + sizeof(struct ntlmssp2_name);
414 cifs_strtoUCS((__le16 *)blobptr, ses->server->hostname, wlen, nls_cp);
415
416 blobptr += 2 * wlen;
417 attrptr = (struct ntlmssp2_name *) blobptr;
418
419 attrptr->type = cpu_to_le16(NTLMSSP_AV_TIMESTAMP);
420 attrptr->length = cpu_to_le16(sizeof(__le64));
421 blobptr = (unsigned char *)attrptr + sizeof(struct ntlmssp2_name);
422 curtime = cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
423 memcpy(blobptr, &curtime, sizeof(__le64));
424
425 return 0; 393 return 0;
426} 394}
427 395
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index f93eb948d071..54b8f1e7da94 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -548,6 +548,12 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb)
548 struct inode *dir = dentry->d_inode; 548 struct inode *dir = dentry->d_inode;
549 struct dentry *child; 549 struct dentry *child;
550 550
551 if (!dir) {
552 dput(dentry);
553 dentry = ERR_PTR(-ENOENT);
554 break;
555 }
556
551 /* skip separators */ 557 /* skip separators */
552 while (*s == sep) 558 while (*s == sep)
553 s++; 559 s++;
@@ -563,10 +569,6 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb)
563 mutex_unlock(&dir->i_mutex); 569 mutex_unlock(&dir->i_mutex);
564 dput(dentry); 570 dput(dentry);
565 dentry = child; 571 dentry = child;
566 if (!dentry->d_inode) {
567 dput(dentry);
568 dentry = ERR_PTR(-ENOENT);
569 }
570 } while (!IS_ERR(dentry)); 572 } while (!IS_ERR(dentry));
571 _FreeXid(xid); 573 _FreeXid(xid);
572 kfree(full_path); 574 kfree(full_path);
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index cb71dc1f94d1..95da8027983d 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -125,5 +125,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
125extern const struct export_operations cifs_export_ops; 125extern const struct export_operations cifs_export_ops;
126#endif /* CIFS_NFSD_EXPORT */ 126#endif /* CIFS_NFSD_EXPORT */
127 127
128#define CIFS_VERSION "1.74" 128#define CIFS_VERSION "1.75"
129#endif /* _CIFSFS_H */ 129#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 38ce6d44b145..95dad9d14cf1 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -291,7 +291,7 @@ struct TCP_Server_Info {
291 struct fscache_cookie *fscache; /* client index cache cookie */ 291 struct fscache_cookie *fscache; /* client index cache cookie */
292#endif 292#endif
293#ifdef CONFIG_CIFS_STATS2 293#ifdef CONFIG_CIFS_STATS2
294 atomic_t inSend; /* requests trying to send */ 294 atomic_t in_send; /* requests trying to send */
295 atomic_t num_waiters; /* blocked waiting to get in sendrecv */ 295 atomic_t num_waiters; /* blocked waiting to get in sendrecv */
296#endif 296#endif
297}; 297};
@@ -672,12 +672,54 @@ struct mid_q_entry {
672 bool multiEnd:1; /* both received */ 672 bool multiEnd:1; /* both received */
673}; 673};
674 674
675struct oplock_q_entry { 675/* Make code in transport.c a little cleaner by moving
676 struct list_head qhead; 676 update of optional stats into function below */
677 struct inode *pinode; 677#ifdef CONFIG_CIFS_STATS2
678 struct cifs_tcon *tcon; 678
679 __u16 netfid; 679static inline void cifs_in_send_inc(struct TCP_Server_Info *server)
680}; 680{
681 atomic_inc(&server->in_send);
682}
683
684static inline void cifs_in_send_dec(struct TCP_Server_Info *server)
685{
686 atomic_dec(&server->in_send);
687}
688
689static inline void cifs_num_waiters_inc(struct TCP_Server_Info *server)
690{
691 atomic_inc(&server->num_waiters);
692}
693
694static inline void cifs_num_waiters_dec(struct TCP_Server_Info *server)
695{
696 atomic_dec(&server->num_waiters);
697}
698
699static inline void cifs_save_when_sent(struct mid_q_entry *mid)
700{
701 mid->when_sent = jiffies;
702}
703#else
704static inline void cifs_in_send_inc(struct TCP_Server_Info *server)
705{
706}
707static inline void cifs_in_send_dec(struct TCP_Server_Info *server)
708{
709}
710
711static inline void cifs_num_waiters_inc(struct TCP_Server_Info *server)
712{
713}
714
715static inline void cifs_num_waiters_dec(struct TCP_Server_Info *server)
716{
717}
718
719static inline void cifs_save_when_sent(struct mid_q_entry *mid)
720{
721}
722#endif
681 723
682/* for pending dnotify requests */ 724/* for pending dnotify requests */
683struct dir_notify_req { 725struct dir_notify_req {
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index aac37d99a487..a80f7bd97b90 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -4079,7 +4079,8 @@ int CIFSFindNext(const int xid, struct cifs_tcon *tcon,
4079 T2_FNEXT_RSP_PARMS *parms; 4079 T2_FNEXT_RSP_PARMS *parms;
4080 char *response_data; 4080 char *response_data;
4081 int rc = 0; 4081 int rc = 0;
4082 int bytes_returned, name_len; 4082 int bytes_returned;
4083 unsigned int name_len;
4083 __u16 params, byte_count; 4084 __u16 params, byte_count;
4084 4085
4085 cFYI(1, "In FindNext"); 4086 cFYI(1, "In FindNext");
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 80c2e3add3a2..f4af4cc37500 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -1298,7 +1298,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
1298 /* ignore */ 1298 /* ignore */
1299 } else if (strnicmp(data, "guest", 5) == 0) { 1299 } else if (strnicmp(data, "guest", 5) == 0) {
1300 /* ignore */ 1300 /* ignore */
1301 } else if (strnicmp(data, "rw", 2) == 0) { 1301 } else if (strnicmp(data, "rw", 2) == 0 && strlen(data) == 2) {
1302 /* ignore */ 1302 /* ignore */
1303 } else if (strnicmp(data, "ro", 2) == 0) { 1303 } else if (strnicmp(data, "ro", 2) == 0) {
1304 /* ignore */ 1304 /* ignore */
@@ -1401,7 +1401,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
1401 vol->server_ino = 1; 1401 vol->server_ino = 1;
1402 } else if (strnicmp(data, "noserverino", 9) == 0) { 1402 } else if (strnicmp(data, "noserverino", 9) == 0) {
1403 vol->server_ino = 0; 1403 vol->server_ino = 0;
1404 } else if (strnicmp(data, "rwpidforward", 4) == 0) { 1404 } else if (strnicmp(data, "rwpidforward", 12) == 0) {
1405 vol->rwpidforward = 1; 1405 vol->rwpidforward = 1;
1406 } else if (strnicmp(data, "cifsacl", 7) == 0) { 1406 } else if (strnicmp(data, "cifsacl", 7) == 0) {
1407 vol->cifs_acl = 1; 1407 vol->cifs_acl = 1;
@@ -2878,7 +2878,8 @@ cleanup_volume_info_contents(struct smb_vol *volume_info)
2878 kfree(volume_info->username); 2878 kfree(volume_info->username);
2879 kzfree(volume_info->password); 2879 kzfree(volume_info->password);
2880 kfree(volume_info->UNC); 2880 kfree(volume_info->UNC);
2881 kfree(volume_info->UNCip); 2881 if (volume_info->UNCip != volume_info->UNC + 2)
2882 kfree(volume_info->UNCip);
2882 kfree(volume_info->domainname); 2883 kfree(volume_info->domainname);
2883 kfree(volume_info->iocharset); 2884 kfree(volume_info->iocharset);
2884 kfree(volume_info->prepath); 2885 kfree(volume_info->prepath);
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index ae576fbb5142..72d448bf96ce 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -105,8 +105,8 @@ cifs_bp_rename_retry:
105 } 105 }
106 rcu_read_unlock(); 106 rcu_read_unlock();
107 if (namelen != dfsplen || read_seqretry(&rename_lock, seq)) { 107 if (namelen != dfsplen || read_seqretry(&rename_lock, seq)) {
108 cERROR(1, "did not end path lookup where expected namelen is %d", 108 cFYI(1, "did not end path lookup where expected. namelen=%d "
109 namelen); 109 "dfsplen=%d", namelen, dfsplen);
110 /* presumably this is only possible if racing with a rename 110 /* presumably this is only possible if racing with a rename
111 of one of the parent directories (we can not lock the dentries 111 of one of the parent directories (we can not lock the dentries
112 above us to prevent this, but retrying should be harmless) */ 112 above us to prevent this, but retrying should be harmless) */
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index c1b9c4b10739..10ca6b2c26b7 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -266,15 +266,11 @@ static int wait_for_free_request(struct TCP_Server_Info *server,
266 while (1) { 266 while (1) {
267 if (atomic_read(&server->inFlight) >= cifs_max_pending) { 267 if (atomic_read(&server->inFlight) >= cifs_max_pending) {
268 spin_unlock(&GlobalMid_Lock); 268 spin_unlock(&GlobalMid_Lock);
269#ifdef CONFIG_CIFS_STATS2 269 cifs_num_waiters_inc(server);
270 atomic_inc(&server->num_waiters);
271#endif
272 wait_event(server->request_q, 270 wait_event(server->request_q,
273 atomic_read(&server->inFlight) 271 atomic_read(&server->inFlight)
274 < cifs_max_pending); 272 < cifs_max_pending);
275#ifdef CONFIG_CIFS_STATS2 273 cifs_num_waiters_dec(server);
276 atomic_dec(&server->num_waiters);
277#endif
278 spin_lock(&GlobalMid_Lock); 274 spin_lock(&GlobalMid_Lock);
279 } else { 275 } else {
280 if (server->tcpStatus == CifsExiting) { 276 if (server->tcpStatus == CifsExiting) {
@@ -381,15 +377,13 @@ cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov,
381 mid->callback = callback; 377 mid->callback = callback;
382 mid->callback_data = cbdata; 378 mid->callback_data = cbdata;
383 mid->midState = MID_REQUEST_SUBMITTED; 379 mid->midState = MID_REQUEST_SUBMITTED;
384#ifdef CONFIG_CIFS_STATS2 380
385 atomic_inc(&server->inSend); 381 cifs_in_send_inc(server);
386#endif
387 rc = smb_sendv(server, iov, nvec); 382 rc = smb_sendv(server, iov, nvec);
388#ifdef CONFIG_CIFS_STATS2 383 cifs_in_send_dec(server);
389 atomic_dec(&server->inSend); 384 cifs_save_when_sent(mid);
390 mid->when_sent = jiffies;
391#endif
392 mutex_unlock(&server->srv_mutex); 385 mutex_unlock(&server->srv_mutex);
386
393 if (rc) 387 if (rc)
394 goto out_err; 388 goto out_err;
395 389
@@ -575,14 +569,10 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses,
575 } 569 }
576 570
577 midQ->midState = MID_REQUEST_SUBMITTED; 571 midQ->midState = MID_REQUEST_SUBMITTED;
578#ifdef CONFIG_CIFS_STATS2 572 cifs_in_send_inc(ses->server);
579 atomic_inc(&ses->server->inSend);
580#endif
581 rc = smb_sendv(ses->server, iov, n_vec); 573 rc = smb_sendv(ses->server, iov, n_vec);
582#ifdef CONFIG_CIFS_STATS2 574 cifs_in_send_dec(ses->server);
583 atomic_dec(&ses->server->inSend); 575 cifs_save_when_sent(midQ);
584 midQ->when_sent = jiffies;
585#endif
586 576
587 mutex_unlock(&ses->server->srv_mutex); 577 mutex_unlock(&ses->server->srv_mutex);
588 578
@@ -703,14 +693,11 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
703 } 693 }
704 694
705 midQ->midState = MID_REQUEST_SUBMITTED; 695 midQ->midState = MID_REQUEST_SUBMITTED;
706#ifdef CONFIG_CIFS_STATS2 696
707 atomic_inc(&ses->server->inSend); 697 cifs_in_send_inc(ses->server);
708#endif
709 rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length)); 698 rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
710#ifdef CONFIG_CIFS_STATS2 699 cifs_in_send_dec(ses->server);
711 atomic_dec(&ses->server->inSend); 700 cifs_save_when_sent(midQ);
712 midQ->when_sent = jiffies;
713#endif
714 mutex_unlock(&ses->server->srv_mutex); 701 mutex_unlock(&ses->server->srv_mutex);
715 702
716 if (rc < 0) 703 if (rc < 0)
@@ -843,14 +830,10 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
843 } 830 }
844 831
845 midQ->midState = MID_REQUEST_SUBMITTED; 832 midQ->midState = MID_REQUEST_SUBMITTED;
846#ifdef CONFIG_CIFS_STATS2 833 cifs_in_send_inc(ses->server);
847 atomic_inc(&ses->server->inSend);
848#endif
849 rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length)); 834 rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
850#ifdef CONFIG_CIFS_STATS2 835 cifs_in_send_dec(ses->server);
851 atomic_dec(&ses->server->inSend); 836 cifs_save_when_sent(midQ);
852 midQ->when_sent = jiffies;
853#endif
854 mutex_unlock(&ses->server->srv_mutex); 837 mutex_unlock(&ses->server->srv_mutex);
855 838
856 if (rc < 0) { 839 if (rc < 0) {
diff --git a/fs/compat.c b/fs/compat.c
index 0b48d018e38a..58b1da459893 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -1675,11 +1675,6 @@ asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds,
1675} 1675}
1676#endif /* HAVE_SET_RESTORE_SIGMASK */ 1676#endif /* HAVE_SET_RESTORE_SIGMASK */
1677 1677
1678long asmlinkage compat_sys_nfsservctl(int cmd, void *notused, void *notused2)
1679{
1680 return sys_ni_syscall();
1681}
1682
1683#ifdef CONFIG_EPOLL 1678#ifdef CONFIG_EPOLL
1684 1679
1685#ifdef HAVE_SET_RESTORE_SIGMASK 1680#ifdef HAVE_SET_RESTORE_SIGMASK
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index 04da6acde85d..12661e1deedd 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -1134,7 +1134,7 @@ struct buffer_head *ext3_bread(handle_t *handle, struct inode *inode,
1134 return bh; 1134 return bh;
1135 if (buffer_uptodate(bh)) 1135 if (buffer_uptodate(bh))
1136 return bh; 1136 return bh;
1137 ll_rw_block(READ_META, 1, &bh); 1137 ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh);
1138 wait_on_buffer(bh); 1138 wait_on_buffer(bh);
1139 if (buffer_uptodate(bh)) 1139 if (buffer_uptodate(bh))
1140 return bh; 1140 return bh;
@@ -2807,7 +2807,7 @@ make_io:
2807 trace_ext3_load_inode(inode); 2807 trace_ext3_load_inode(inode);
2808 get_bh(bh); 2808 get_bh(bh);
2809 bh->b_end_io = end_buffer_read_sync; 2809 bh->b_end_io = end_buffer_read_sync;
2810 submit_bh(READ_META, bh); 2810 submit_bh(READ | REQ_META | REQ_PRIO, bh);
2811 wait_on_buffer(bh); 2811 wait_on_buffer(bh);
2812 if (!buffer_uptodate(bh)) { 2812 if (!buffer_uptodate(bh)) {
2813 ext3_error(inode->i_sb, "ext3_get_inode_loc", 2813 ext3_error(inode->i_sb, "ext3_get_inode_loc",
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
index 5571708b6a58..0629e09f6511 100644
--- a/fs/ext3/namei.c
+++ b/fs/ext3/namei.c
@@ -922,7 +922,8 @@ restart:
922 bh = ext3_getblk(NULL, dir, b++, 0, &err); 922 bh = ext3_getblk(NULL, dir, b++, 0, &err);
923 bh_use[ra_max] = bh; 923 bh_use[ra_max] = bh;
924 if (bh) 924 if (bh)
925 ll_rw_block(READ_META, 1, &bh); 925 ll_rw_block(READ | REQ_META | REQ_PRIO,
926 1, &bh);
926 } 927 }
927 } 928 }
928 if ((bh = bh_use[ra_ptr++]) == NULL) 929 if ((bh = bh_use[ra_ptr++]) == NULL)
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index e717dfd2f2b4..b7d7bd0f066e 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -175,6 +175,7 @@ struct mpage_da_data {
175 */ 175 */
176#define EXT4_IO_END_UNWRITTEN 0x0001 176#define EXT4_IO_END_UNWRITTEN 0x0001
177#define EXT4_IO_END_ERROR 0x0002 177#define EXT4_IO_END_ERROR 0x0002
178#define EXT4_IO_END_QUEUED 0x0004
178 179
179struct ext4_io_page { 180struct ext4_io_page {
180 struct page *p_page; 181 struct page *p_page;
diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
index bb85757689b6..5802fa1dab18 100644
--- a/fs/ext4/ext4_jbd2.h
+++ b/fs/ext4/ext4_jbd2.h
@@ -289,10 +289,10 @@ static inline int ext4_should_order_data(struct inode *inode)
289 289
290static inline int ext4_should_writeback_data(struct inode *inode) 290static inline int ext4_should_writeback_data(struct inode *inode)
291{ 291{
292 if (!S_ISREG(inode->i_mode))
293 return 0;
294 if (EXT4_JOURNAL(inode) == NULL) 292 if (EXT4_JOURNAL(inode) == NULL)
295 return 1; 293 return 1;
294 if (!S_ISREG(inode->i_mode))
295 return 0;
296 if (ext4_test_inode_flag(inode, EXT4_INODE_JOURNAL_DATA)) 296 if (ext4_test_inode_flag(inode, EXT4_INODE_JOURNAL_DATA))
297 return 0; 297 return 0;
298 if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA) 298 if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)
diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
index b8602cde5b5a..0962642119c0 100644
--- a/fs/ext4/indirect.c
+++ b/fs/ext4/indirect.c
@@ -800,12 +800,17 @@ ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
800 } 800 }
801 801
802retry: 802retry:
803 if (rw == READ && ext4_should_dioread_nolock(inode)) 803 if (rw == READ && ext4_should_dioread_nolock(inode)) {
804 if (unlikely(!list_empty(&ei->i_completed_io_list))) {
805 mutex_lock(&inode->i_mutex);
806 ext4_flush_completed_IO(inode);
807 mutex_unlock(&inode->i_mutex);
808 }
804 ret = __blockdev_direct_IO(rw, iocb, inode, 809 ret = __blockdev_direct_IO(rw, iocb, inode,
805 inode->i_sb->s_bdev, iov, 810 inode->i_sb->s_bdev, iov,
806 offset, nr_segs, 811 offset, nr_segs,
807 ext4_get_block, NULL, NULL, 0); 812 ext4_get_block, NULL, NULL, 0);
808 else { 813 } else {
809 ret = blockdev_direct_IO(rw, iocb, inode, iov, 814 ret = blockdev_direct_IO(rw, iocb, inode, iov,
810 offset, nr_segs, ext4_get_block); 815 offset, nr_segs, ext4_get_block);
811 816
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index d47264cafee0..986e2388f031 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -120,6 +120,9 @@ void ext4_evict_inode(struct inode *inode)
120 int err; 120 int err;
121 121
122 trace_ext4_evict_inode(inode); 122 trace_ext4_evict_inode(inode);
123
124 ext4_ioend_wait(inode);
125
123 if (inode->i_nlink) { 126 if (inode->i_nlink) {
124 /* 127 /*
125 * When journalling data dirty buffers are tracked only in the 128 * When journalling data dirty buffers are tracked only in the
@@ -644,7 +647,7 @@ struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
644 return bh; 647 return bh;
645 if (buffer_uptodate(bh)) 648 if (buffer_uptodate(bh))
646 return bh; 649 return bh;
647 ll_rw_block(READ_META, 1, &bh); 650 ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh);
648 wait_on_buffer(bh); 651 wait_on_buffer(bh);
649 if (buffer_uptodate(bh)) 652 if (buffer_uptodate(bh))
650 return bh; 653 return bh;
@@ -983,6 +986,8 @@ static int ext4_journalled_write_end(struct file *file,
983 from = pos & (PAGE_CACHE_SIZE - 1); 986 from = pos & (PAGE_CACHE_SIZE - 1);
984 to = from + len; 987 to = from + len;
985 988
989 BUG_ON(!ext4_handle_valid(handle));
990
986 if (copied < len) { 991 if (copied < len) {
987 if (!PageUptodate(page)) 992 if (!PageUptodate(page))
988 copied = 0; 993 copied = 0;
@@ -1283,7 +1288,12 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd,
1283 else if (test_opt(inode->i_sb, MBLK_IO_SUBMIT)) 1288 else if (test_opt(inode->i_sb, MBLK_IO_SUBMIT))
1284 err = ext4_bio_write_page(&io_submit, page, 1289 err = ext4_bio_write_page(&io_submit, page,
1285 len, mpd->wbc); 1290 len, mpd->wbc);
1286 else 1291 else if (buffer_uninit(page_bufs)) {
1292 ext4_set_bh_endio(page_bufs, inode);
1293 err = block_write_full_page_endio(page,
1294 noalloc_get_block_write,
1295 mpd->wbc, ext4_end_io_buffer_write);
1296 } else
1287 err = block_write_full_page(page, 1297 err = block_write_full_page(page,
1288 noalloc_get_block_write, mpd->wbc); 1298 noalloc_get_block_write, mpd->wbc);
1289 1299
@@ -1699,6 +1709,8 @@ static int __ext4_journalled_writepage(struct page *page,
1699 goto out; 1709 goto out;
1700 } 1710 }
1701 1711
1712 BUG_ON(!ext4_handle_valid(handle));
1713
1702 ret = walk_page_buffers(handle, page_bufs, 0, len, NULL, 1714 ret = walk_page_buffers(handle, page_bufs, 0, len, NULL,
1703 do_journal_get_write_access); 1715 do_journal_get_write_access);
1704 1716
@@ -2668,8 +2680,15 @@ static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate)
2668 goto out; 2680 goto out;
2669 } 2681 }
2670 2682
2671 io_end->flag = EXT4_IO_END_UNWRITTEN; 2683 /*
2684 * It may be over-defensive here to check EXT4_IO_END_UNWRITTEN now,
2685 * but being more careful is always safe for the future change.
2686 */
2672 inode = io_end->inode; 2687 inode = io_end->inode;
2688 if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
2689 io_end->flag |= EXT4_IO_END_UNWRITTEN;
2690 atomic_inc(&EXT4_I(inode)->i_aiodio_unwritten);
2691 }
2673 2692
2674 /* Add the io_end to per-inode completed io list*/ 2693 /* Add the io_end to per-inode completed io list*/
2675 spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags); 2694 spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags);
@@ -3279,7 +3298,7 @@ make_io:
3279 trace_ext4_load_inode(inode); 3298 trace_ext4_load_inode(inode);
3280 get_bh(bh); 3299 get_bh(bh);
3281 bh->b_end_io = end_buffer_read_sync; 3300 bh->b_end_io = end_buffer_read_sync;
3282 submit_bh(READ_META, bh); 3301 submit_bh(READ | REQ_META | REQ_PRIO, bh);
3283 wait_on_buffer(bh); 3302 wait_on_buffer(bh);
3284 if (!buffer_uptodate(bh)) { 3303 if (!buffer_uptodate(bh)) {
3285 EXT4_ERROR_INODE_BLOCK(inode, block, 3304 EXT4_ERROR_INODE_BLOCK(inode, block,
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index f8068c7bae9f..1c924faeb6c8 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -922,7 +922,8 @@ restart:
922 bh = ext4_getblk(NULL, dir, b++, 0, &err); 922 bh = ext4_getblk(NULL, dir, b++, 0, &err);
923 bh_use[ra_max] = bh; 923 bh_use[ra_max] = bh;
924 if (bh) 924 if (bh)
925 ll_rw_block(READ_META, 1, &bh); 925 ll_rw_block(READ | REQ_META | REQ_PRIO,
926 1, &bh);
926 } 927 }
927 } 928 }
928 if ((bh = bh_use[ra_ptr++]) == NULL) 929 if ((bh = bh_use[ra_ptr++]) == NULL)
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 430c401d0895..92f38ee13f8a 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -142,7 +142,23 @@ static void ext4_end_io_work(struct work_struct *work)
142 unsigned long flags; 142 unsigned long flags;
143 int ret; 143 int ret;
144 144
145 mutex_lock(&inode->i_mutex); 145 if (!mutex_trylock(&inode->i_mutex)) {
146 /*
147 * Requeue the work instead of waiting so that the work
148 * items queued after this can be processed.
149 */
150 queue_work(EXT4_SB(inode->i_sb)->dio_unwritten_wq, &io->work);
151 /*
152 * To prevent the ext4-dio-unwritten thread from keeping
153 * requeueing end_io requests and occupying cpu for too long,
154 * yield the cpu if it sees an end_io request that has already
155 * been requeued.
156 */
157 if (io->flag & EXT4_IO_END_QUEUED)
158 yield();
159 io->flag |= EXT4_IO_END_QUEUED;
160 return;
161 }
146 ret = ext4_end_io_nolock(io); 162 ret = ext4_end_io_nolock(io);
147 if (ret < 0) { 163 if (ret < 0) {
148 mutex_unlock(&inode->i_mutex); 164 mutex_unlock(&inode->i_mutex);
@@ -334,8 +350,10 @@ submit_and_retry:
334 if ((io_end->num_io_pages >= MAX_IO_PAGES) && 350 if ((io_end->num_io_pages >= MAX_IO_PAGES) &&
335 (io_end->pages[io_end->num_io_pages-1] != io_page)) 351 (io_end->pages[io_end->num_io_pages-1] != io_page))
336 goto submit_and_retry; 352 goto submit_and_retry;
337 if (buffer_uninit(bh)) 353 if (buffer_uninit(bh) && !(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
338 io->io_end->flag |= EXT4_IO_END_UNWRITTEN; 354 io_end->flag |= EXT4_IO_END_UNWRITTEN;
355 atomic_inc(&EXT4_I(inode)->i_aiodio_unwritten);
356 }
339 io->io_end->size += bh->b_size; 357 io->io_end->size += bh->b_size;
340 io->io_next_block++; 358 io->io_next_block++;
341 ret = bio_add_page(io->io_bio, bh->b_page, bh->b_size, bh_offset(bh)); 359 ret = bio_add_page(io->io_bio, bh->b_page, bh->b_size, bh_offset(bh));
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 4687fea0c00f..44d0c8db2239 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -919,7 +919,6 @@ static void ext4_i_callback(struct rcu_head *head)
919 919
920static void ext4_destroy_inode(struct inode *inode) 920static void ext4_destroy_inode(struct inode *inode)
921{ 921{
922 ext4_ioend_wait(inode);
923 if (!list_empty(&(EXT4_I(inode)->i_orphan))) { 922 if (!list_empty(&(EXT4_I(inode)->i_orphan))) {
924 ext4_msg(inode->i_sb, KERN_ERR, 923 ext4_msg(inode->i_sb, KERN_ERR,
925 "Inode %lu (%p): orphan list check failed!", 924 "Inode %lu (%p): orphan list check failed!",
diff --git a/fs/fat/dir.c b/fs/fat/dir.c
index 4ad64732cbce..5efbd5d7701a 100644
--- a/fs/fat/dir.c
+++ b/fs/fat/dir.c
@@ -1231,7 +1231,7 @@ int fat_add_entries(struct inode *dir, void *slots, int nr_slots,
1231 struct super_block *sb = dir->i_sb; 1231 struct super_block *sb = dir->i_sb;
1232 struct msdos_sb_info *sbi = MSDOS_SB(sb); 1232 struct msdos_sb_info *sbi = MSDOS_SB(sb);
1233 struct buffer_head *bh, *prev, *bhs[3]; /* 32*slots (672bytes) */ 1233 struct buffer_head *bh, *prev, *bhs[3]; /* 32*slots (672bytes) */
1234 struct msdos_dir_entry *de; 1234 struct msdos_dir_entry *uninitialized_var(de);
1235 int err, free_slots, i, nr_bhs; 1235 int err, free_slots, i, nr_bhs;
1236 loff_t pos, i_pos; 1236 loff_t pos, i_pos;
1237 1237
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 5942fec22c65..1726d7303047 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -1188,9 +1188,9 @@ static int parse_options(struct super_block *sb, char *options, int is_vfat,
1188out: 1188out:
1189 /* UTF-8 doesn't provide FAT semantics */ 1189 /* UTF-8 doesn't provide FAT semantics */
1190 if (!strcmp(opts->iocharset, "utf8")) { 1190 if (!strcmp(opts->iocharset, "utf8")) {
1191 fat_msg(sb, KERN_ERR, "utf8 is not a recommended IO charset" 1191 fat_msg(sb, KERN_WARNING, "utf8 is not a recommended IO charset"
1192 " for FAT filesystems, filesystem will be " 1192 " for FAT filesystems, filesystem will be "
1193 "case sensitive!\n"); 1193 "case sensitive!");
1194 } 1194 }
1195 1195
1196 /* If user doesn't specify allow_utime, it's initialized from dmask. */ 1196 /* If user doesn't specify allow_utime, it's initialized from dmask. */
@@ -1367,6 +1367,7 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
1367 sbi->free_clusters = -1; /* Don't know yet */ 1367 sbi->free_clusters = -1; /* Don't know yet */
1368 sbi->free_clus_valid = 0; 1368 sbi->free_clus_valid = 0;
1369 sbi->prev_free = FAT_START_ENT; 1369 sbi->prev_free = FAT_START_ENT;
1370 sb->s_maxbytes = 0xffffffff;
1370 1371
1371 if (!sbi->fat_length && b->fat32_length) { 1372 if (!sbi->fat_length && b->fat32_length) {
1372 struct fat_boot_fsinfo *fsinfo; 1373 struct fat_boot_fsinfo *fsinfo;
@@ -1377,8 +1378,6 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
1377 sbi->fat_length = le32_to_cpu(b->fat32_length); 1378 sbi->fat_length = le32_to_cpu(b->fat32_length);
1378 sbi->root_cluster = le32_to_cpu(b->root_cluster); 1379 sbi->root_cluster = le32_to_cpu(b->root_cluster);
1379 1380
1380 sb->s_maxbytes = 0xffffffff;
1381
1382 /* MC - if info_sector is 0, don't multiply by 0 */ 1381 /* MC - if info_sector is 0, don't multiply by 0 */
1383 sbi->fsinfo_sector = le16_to_cpu(b->info_sector); 1382 sbi->fsinfo_sector = le16_to_cpu(b->info_sector);
1384 if (sbi->fsinfo_sector == 0) 1383 if (sbi->fsinfo_sector == 0)
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 640fc229df10..5cb8614508c3 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -258,10 +258,14 @@ void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
258 forget->forget_one.nlookup = nlookup; 258 forget->forget_one.nlookup = nlookup;
259 259
260 spin_lock(&fc->lock); 260 spin_lock(&fc->lock);
261 fc->forget_list_tail->next = forget; 261 if (fc->connected) {
262 fc->forget_list_tail = forget; 262 fc->forget_list_tail->next = forget;
263 wake_up(&fc->waitq); 263 fc->forget_list_tail = forget;
264 kill_fasync(&fc->fasync, SIGIO, POLL_IN); 264 wake_up(&fc->waitq);
265 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
266 } else {
267 kfree(forget);
268 }
265 spin_unlock(&fc->lock); 269 spin_unlock(&fc->lock);
266} 270}
267 271
@@ -1358,6 +1362,10 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
1358 if (outarg.namelen > FUSE_NAME_MAX) 1362 if (outarg.namelen > FUSE_NAME_MAX)
1359 goto err; 1363 goto err;
1360 1364
1365 err = -EINVAL;
1366 if (size != sizeof(outarg) + outarg.namelen + 1)
1367 goto err;
1368
1361 name.name = buf; 1369 name.name = buf;
1362 name.len = outarg.namelen; 1370 name.len = outarg.namelen;
1363 err = fuse_copy_one(cs, buf, outarg.namelen + 1); 1371 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index d480d9af46c9..594f07a81c28 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -14,6 +14,7 @@
14#include <linux/sched.h> 14#include <linux/sched.h>
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/compat.h> 16#include <linux/compat.h>
17#include <linux/swap.h>
17 18
18static const struct file_operations fuse_direct_io_file_operations; 19static const struct file_operations fuse_direct_io_file_operations;
19 20
@@ -245,6 +246,12 @@ void fuse_release_common(struct file *file, int opcode)
245 req = ff->reserved_req; 246 req = ff->reserved_req;
246 fuse_prepare_release(ff, file->f_flags, opcode); 247 fuse_prepare_release(ff, file->f_flags, opcode);
247 248
249 if (ff->flock) {
250 struct fuse_release_in *inarg = &req->misc.release.in;
251 inarg->release_flags |= FUSE_RELEASE_FLOCK_UNLOCK;
252 inarg->lock_owner = fuse_lock_owner_id(ff->fc,
253 (fl_owner_t) file);
254 }
248 /* Hold vfsmount and dentry until release is finished */ 255 /* Hold vfsmount and dentry until release is finished */
249 path_get(&file->f_path); 256 path_get(&file->f_path);
250 req->misc.release.path = file->f_path; 257 req->misc.release.path = file->f_path;
@@ -755,18 +762,6 @@ static size_t fuse_send_write(struct fuse_req *req, struct file *file,
755 return req->misc.write.out.size; 762 return req->misc.write.out.size;
756} 763}
757 764
758static int fuse_write_begin(struct file *file, struct address_space *mapping,
759 loff_t pos, unsigned len, unsigned flags,
760 struct page **pagep, void **fsdata)
761{
762 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
763
764 *pagep = grab_cache_page_write_begin(mapping, index, flags);
765 if (!*pagep)
766 return -ENOMEM;
767 return 0;
768}
769
770void fuse_write_update_size(struct inode *inode, loff_t pos) 765void fuse_write_update_size(struct inode *inode, loff_t pos)
771{ 766{
772 struct fuse_conn *fc = get_fuse_conn(inode); 767 struct fuse_conn *fc = get_fuse_conn(inode);
@@ -779,62 +774,6 @@ void fuse_write_update_size(struct inode *inode, loff_t pos)
779 spin_unlock(&fc->lock); 774 spin_unlock(&fc->lock);
780} 775}
781 776
782static int fuse_buffered_write(struct file *file, struct inode *inode,
783 loff_t pos, unsigned count, struct page *page)
784{
785 int err;
786 size_t nres;
787 struct fuse_conn *fc = get_fuse_conn(inode);
788 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
789 struct fuse_req *req;
790
791 if (is_bad_inode(inode))
792 return -EIO;
793
794 /*
795 * Make sure writepages on the same page are not mixed up with
796 * plain writes.
797 */
798 fuse_wait_on_page_writeback(inode, page->index);
799
800 req = fuse_get_req(fc);
801 if (IS_ERR(req))
802 return PTR_ERR(req);
803
804 req->in.argpages = 1;
805 req->num_pages = 1;
806 req->pages[0] = page;
807 req->page_offset = offset;
808 nres = fuse_send_write(req, file, pos, count, NULL);
809 err = req->out.h.error;
810 fuse_put_request(fc, req);
811 if (!err && !nres)
812 err = -EIO;
813 if (!err) {
814 pos += nres;
815 fuse_write_update_size(inode, pos);
816 if (count == PAGE_CACHE_SIZE)
817 SetPageUptodate(page);
818 }
819 fuse_invalidate_attr(inode);
820 return err ? err : nres;
821}
822
823static int fuse_write_end(struct file *file, struct address_space *mapping,
824 loff_t pos, unsigned len, unsigned copied,
825 struct page *page, void *fsdata)
826{
827 struct inode *inode = mapping->host;
828 int res = 0;
829
830 if (copied)
831 res = fuse_buffered_write(file, inode, pos, copied, page);
832
833 unlock_page(page);
834 page_cache_release(page);
835 return res;
836}
837
838static size_t fuse_send_write_pages(struct fuse_req *req, struct file *file, 777static size_t fuse_send_write_pages(struct fuse_req *req, struct file *file,
839 struct inode *inode, loff_t pos, 778 struct inode *inode, loff_t pos,
840 size_t count) 779 size_t count)
@@ -908,6 +847,8 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req,
908 pagefault_enable(); 847 pagefault_enable();
909 flush_dcache_page(page); 848 flush_dcache_page(page);
910 849
850 mark_page_accessed(page);
851
911 if (!tmp) { 852 if (!tmp) {
912 unlock_page(page); 853 unlock_page(page);
913 page_cache_release(page); 854 page_cache_release(page);
@@ -1559,11 +1500,14 @@ static int fuse_file_flock(struct file *file, int cmd, struct file_lock *fl)
1559 struct fuse_conn *fc = get_fuse_conn(inode); 1500 struct fuse_conn *fc = get_fuse_conn(inode);
1560 int err; 1501 int err;
1561 1502
1562 if (fc->no_lock) { 1503 if (fc->no_flock) {
1563 err = flock_lock_file_wait(file, fl); 1504 err = flock_lock_file_wait(file, fl);
1564 } else { 1505 } else {
1506 struct fuse_file *ff = file->private_data;
1507
1565 /* emulate flock with POSIX locks */ 1508 /* emulate flock with POSIX locks */
1566 fl->fl_owner = (fl_owner_t) file; 1509 fl->fl_owner = (fl_owner_t) file;
1510 ff->flock = true;
1567 err = fuse_setlk(file, fl, 1); 1511 err = fuse_setlk(file, fl, 1);
1568 } 1512 }
1569 1513
@@ -2201,8 +2145,6 @@ static const struct address_space_operations fuse_file_aops = {
2201 .readpage = fuse_readpage, 2145 .readpage = fuse_readpage,
2202 .writepage = fuse_writepage, 2146 .writepage = fuse_writepage,
2203 .launder_page = fuse_launder_page, 2147 .launder_page = fuse_launder_page,
2204 .write_begin = fuse_write_begin,
2205 .write_end = fuse_write_end,
2206 .readpages = fuse_readpages, 2148 .readpages = fuse_readpages,
2207 .set_page_dirty = __set_page_dirty_nobuffers, 2149 .set_page_dirty = __set_page_dirty_nobuffers,
2208 .bmap = fuse_bmap, 2150 .bmap = fuse_bmap,
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index c6aa2d4b8517..cf6db0a93219 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -135,6 +135,9 @@ struct fuse_file {
135 135
136 /** Wait queue head for poll */ 136 /** Wait queue head for poll */
137 wait_queue_head_t poll_wait; 137 wait_queue_head_t poll_wait;
138
139 /** Has flock been performed on this file? */
140 bool flock:1;
138}; 141};
139 142
140/** One input argument of a request */ 143/** One input argument of a request */
@@ -448,7 +451,7 @@ struct fuse_conn {
448 /** Is removexattr not implemented by fs? */ 451 /** Is removexattr not implemented by fs? */
449 unsigned no_removexattr:1; 452 unsigned no_removexattr:1;
450 453
451 /** Are file locking primitives not implemented by fs? */ 454 /** Are posix file locking primitives not implemented by fs? */
452 unsigned no_lock:1; 455 unsigned no_lock:1;
453 456
454 /** Is access not implemented by fs? */ 457 /** Is access not implemented by fs? */
@@ -472,6 +475,9 @@ struct fuse_conn {
472 /** Don't apply umask to creation modes */ 475 /** Don't apply umask to creation modes */
473 unsigned dont_mask:1; 476 unsigned dont_mask:1;
474 477
478 /** Are BSD file locking primitives not implemented by fs? */
479 unsigned no_flock:1;
480
475 /** The number of requests waiting for completion */ 481 /** The number of requests waiting for completion */
476 atomic_t num_waiting; 482 atomic_t num_waiting;
477 483
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 38f84cd48b67..add96f6ffda5 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -71,7 +71,7 @@ struct fuse_mount_data {
71 unsigned blksize; 71 unsigned blksize;
72}; 72};
73 73
74struct fuse_forget_link *fuse_alloc_forget() 74struct fuse_forget_link *fuse_alloc_forget(void)
75{ 75{
76 return kzalloc(sizeof(struct fuse_forget_link), GFP_KERNEL); 76 return kzalloc(sizeof(struct fuse_forget_link), GFP_KERNEL);
77} 77}
@@ -809,6 +809,13 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
809 fc->async_read = 1; 809 fc->async_read = 1;
810 if (!(arg->flags & FUSE_POSIX_LOCKS)) 810 if (!(arg->flags & FUSE_POSIX_LOCKS))
811 fc->no_lock = 1; 811 fc->no_lock = 1;
812 if (arg->minor >= 17) {
813 if (!(arg->flags & FUSE_FLOCK_LOCKS))
814 fc->no_flock = 1;
815 } else {
816 if (!(arg->flags & FUSE_POSIX_LOCKS))
817 fc->no_flock = 1;
818 }
812 if (arg->flags & FUSE_ATOMIC_O_TRUNC) 819 if (arg->flags & FUSE_ATOMIC_O_TRUNC)
813 fc->atomic_o_trunc = 1; 820 fc->atomic_o_trunc = 1;
814 if (arg->minor >= 9) { 821 if (arg->minor >= 9) {
@@ -823,6 +830,7 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
823 } else { 830 } else {
824 ra_pages = fc->max_read / PAGE_CACHE_SIZE; 831 ra_pages = fc->max_read / PAGE_CACHE_SIZE;
825 fc->no_lock = 1; 832 fc->no_lock = 1;
833 fc->no_flock = 1;
826 } 834 }
827 835
828 fc->bdi.ra_pages = min(fc->bdi.ra_pages, ra_pages); 836 fc->bdi.ra_pages = min(fc->bdi.ra_pages, ra_pages);
@@ -843,7 +851,8 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req)
843 arg->minor = FUSE_KERNEL_MINOR_VERSION; 851 arg->minor = FUSE_KERNEL_MINOR_VERSION;
844 arg->max_readahead = fc->bdi.ra_pages * PAGE_CACHE_SIZE; 852 arg->max_readahead = fc->bdi.ra_pages * PAGE_CACHE_SIZE;
845 arg->flags |= FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC | 853 arg->flags |= FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC |
846 FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES | FUSE_DONT_MASK; 854 FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES | FUSE_DONT_MASK |
855 FUSE_FLOCK_LOCKS;
847 req->in.h.opcode = FUSE_INIT; 856 req->in.h.opcode = FUSE_INIT;
848 req->in.numargs = 1; 857 req->in.numargs = 1;
849 req->in.args[0].size = sizeof(*arg); 858 req->in.args[0].size = sizeof(*arg);
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index 85c62923ee29..598646434362 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -624,9 +624,9 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull)
624 bh->b_end_io = end_buffer_write_sync; 624 bh->b_end_io = end_buffer_write_sync;
625 get_bh(bh); 625 get_bh(bh);
626 if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) 626 if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags))
627 submit_bh(WRITE_SYNC | REQ_META, bh); 627 submit_bh(WRITE_SYNC | REQ_META | REQ_PRIO, bh);
628 else 628 else
629 submit_bh(WRITE_FLUSH_FUA | REQ_META, bh); 629 submit_bh(WRITE_FLUSH_FUA | REQ_META | REQ_PRIO, bh);
630 wait_on_buffer(bh); 630 wait_on_buffer(bh);
631 631
632 if (!buffer_uptodate(bh)) 632 if (!buffer_uptodate(bh))
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index 747238cd9f96..be29858900f6 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -37,7 +37,7 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb
37{ 37{
38 struct buffer_head *bh, *head; 38 struct buffer_head *bh, *head;
39 int nr_underway = 0; 39 int nr_underway = 0;
40 int write_op = REQ_META | 40 int write_op = REQ_META | REQ_PRIO |
41 (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE); 41 (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE);
42 42
43 BUG_ON(!PageLocked(page)); 43 BUG_ON(!PageLocked(page));
@@ -225,7 +225,7 @@ int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
225 } 225 }
226 bh->b_end_io = end_buffer_read_sync; 226 bh->b_end_io = end_buffer_read_sync;
227 get_bh(bh); 227 get_bh(bh);
228 submit_bh(READ_SYNC | REQ_META, bh); 228 submit_bh(READ_SYNC | REQ_META | REQ_PRIO, bh);
229 if (!(flags & DIO_WAIT)) 229 if (!(flags & DIO_WAIT))
230 return 0; 230 return 0;
231 231
@@ -435,7 +435,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
435 if (buffer_uptodate(first_bh)) 435 if (buffer_uptodate(first_bh))
436 goto out; 436 goto out;
437 if (!buffer_locked(first_bh)) 437 if (!buffer_locked(first_bh))
438 ll_rw_block(READ_SYNC | REQ_META, 1, &first_bh); 438 ll_rw_block(READ_SYNC | REQ_META | REQ_PRIO, 1, &first_bh);
439 439
440 dblock++; 440 dblock++;
441 extlen--; 441 extlen--;
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 3bc073a4cf82..079587e53849 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -224,7 +224,7 @@ static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector, int silent)
224 224
225 bio->bi_end_io = end_bio_io_page; 225 bio->bi_end_io = end_bio_io_page;
226 bio->bi_private = page; 226 bio->bi_private = page;
227 submit_bio(READ_SYNC | REQ_META, bio); 227 submit_bio(READ_SYNC | REQ_META | REQ_PRIO, bio);
228 wait_on_page_locked(page); 228 wait_on_page_locked(page);
229 bio_put(bio); 229 bio_put(bio);
230 if (!PageUptodate(page)) { 230 if (!PageUptodate(page)) {
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 42e8d23bc047..0e8bb13381e4 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -709,7 +709,7 @@ get_a_page:
709 set_buffer_uptodate(bh); 709 set_buffer_uptodate(bh);
710 710
711 if (!buffer_uptodate(bh)) { 711 if (!buffer_uptodate(bh)) {
712 ll_rw_block(READ_META, 1, &bh); 712 ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh);
713 wait_on_buffer(bh); 713 wait_on_buffer(bh);
714 if (!buffer_uptodate(bh)) 714 if (!buffer_uptodate(bh))
715 goto unlock_out; 715 goto unlock_out;
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index c106ca22e812..d24a9b666a23 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -344,6 +344,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
344 struct inode *root, *inode; 344 struct inode *root, *inode;
345 struct qstr str; 345 struct qstr str;
346 struct nls_table *nls = NULL; 346 struct nls_table *nls = NULL;
347 u64 last_fs_block, last_fs_page;
347 int err; 348 int err;
348 349
349 err = -EINVAL; 350 err = -EINVAL;
@@ -399,9 +400,13 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
399 if (!sbi->rsrc_clump_blocks) 400 if (!sbi->rsrc_clump_blocks)
400 sbi->rsrc_clump_blocks = 1; 401 sbi->rsrc_clump_blocks = 1;
401 402
402 err = generic_check_addressable(sbi->alloc_blksz_shift, 403 err = -EFBIG;
403 sbi->total_blocks); 404 last_fs_block = sbi->total_blocks - 1;
404 if (err) { 405 last_fs_page = (last_fs_block << sbi->alloc_blksz_shift) >>
406 PAGE_CACHE_SHIFT;
407
408 if ((last_fs_block > (sector_t)(~0ULL) >> (sbi->alloc_blksz_shift - 9)) ||
409 (last_fs_page > (pgoff_t)(~0ULL))) {
405 printk(KERN_ERR "hfs: filesystem size too large.\n"); 410 printk(KERN_ERR "hfs: filesystem size too large.\n");
406 goto out_free_vhdr; 411 goto out_free_vhdr;
407 } 412 }
@@ -525,8 +530,8 @@ out_close_cat_tree:
525out_close_ext_tree: 530out_close_ext_tree:
526 hfs_btree_close(sbi->ext_tree); 531 hfs_btree_close(sbi->ext_tree);
527out_free_vhdr: 532out_free_vhdr:
528 kfree(sbi->s_vhdr); 533 kfree(sbi->s_vhdr_buf);
529 kfree(sbi->s_backup_vhdr); 534 kfree(sbi->s_backup_vhdr_buf);
530out_unload_nls: 535out_unload_nls:
531 unload_nls(sbi->nls); 536 unload_nls(sbi->nls);
532 unload_nls(nls); 537 unload_nls(nls);
diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c
index 10e515a0d452..7daf4b852d1c 100644
--- a/fs/hfsplus/wrapper.c
+++ b/fs/hfsplus/wrapper.c
@@ -272,9 +272,9 @@ reread:
272 return 0; 272 return 0;
273 273
274out_free_backup_vhdr: 274out_free_backup_vhdr:
275 kfree(sbi->s_backup_vhdr); 275 kfree(sbi->s_backup_vhdr_buf);
276out_free_vhdr: 276out_free_vhdr:
277 kfree(sbi->s_vhdr); 277 kfree(sbi->s_vhdr_buf);
278out: 278out:
279 return error; 279 return error;
280} 280}
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 87b6e0421c12..ec889538e5a6 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -491,6 +491,7 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb, uid_t uid,
491 inode->i_op = &page_symlink_inode_operations; 491 inode->i_op = &page_symlink_inode_operations;
492 break; 492 break;
493 } 493 }
494 lockdep_annotate_inode_mutex_key(inode);
494 } 495 }
495 return inode; 496 return inode;
496} 497}
diff --git a/fs/inode.c b/fs/inode.c
index 73920d555c88..ec7924696a13 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -848,16 +848,9 @@ struct inode *new_inode(struct super_block *sb)
848} 848}
849EXPORT_SYMBOL(new_inode); 849EXPORT_SYMBOL(new_inode);
850 850
851/**
852 * unlock_new_inode - clear the I_NEW state and wake up any waiters
853 * @inode: new inode to unlock
854 *
855 * Called when the inode is fully initialised to clear the new state of the
856 * inode and wake up anyone waiting for the inode to finish initialisation.
857 */
858void unlock_new_inode(struct inode *inode)
859{
860#ifdef CONFIG_DEBUG_LOCK_ALLOC 851#ifdef CONFIG_DEBUG_LOCK_ALLOC
852void lockdep_annotate_inode_mutex_key(struct inode *inode)
853{
861 if (S_ISDIR(inode->i_mode)) { 854 if (S_ISDIR(inode->i_mode)) {
862 struct file_system_type *type = inode->i_sb->s_type; 855 struct file_system_type *type = inode->i_sb->s_type;
863 856
@@ -873,7 +866,20 @@ void unlock_new_inode(struct inode *inode)
873 &type->i_mutex_dir_key); 866 &type->i_mutex_dir_key);
874 } 867 }
875 } 868 }
869}
870EXPORT_SYMBOL(lockdep_annotate_inode_mutex_key);
876#endif 871#endif
872
873/**
874 * unlock_new_inode - clear the I_NEW state and wake up any waiters
875 * @inode: new inode to unlock
876 *
877 * Called when the inode is fully initialised to clear the new state of the
878 * inode and wake up anyone waiting for the inode to finish initialisation.
879 */
880void unlock_new_inode(struct inode *inode)
881{
882 lockdep_annotate_inode_mutex_key(inode);
877 spin_lock(&inode->i_lock); 883 spin_lock(&inode->i_lock);
878 WARN_ON(!(inode->i_state & I_NEW)); 884 WARN_ON(!(inode->i_state & I_NEW));
879 inode->i_state &= ~I_NEW; 885 inode->i_state &= ~I_NEW;
diff --git a/fs/jfs/jfs_umount.c b/fs/jfs/jfs_umount.c
index adcf92d3b603..7971f37534a3 100644
--- a/fs/jfs/jfs_umount.c
+++ b/fs/jfs/jfs_umount.c
@@ -68,7 +68,7 @@ int jfs_umount(struct super_block *sb)
68 /* 68 /*
69 * Wait for outstanding transactions to be written to log: 69 * Wait for outstanding transactions to be written to log:
70 */ 70 */
71 jfs_flush_journal(log, 1); 71 jfs_flush_journal(log, 2);
72 72
73 /* 73 /*
74 * close fileset inode allocation map (aka fileset inode) 74 * close fileset inode allocation map (aka fileset inode)
@@ -146,7 +146,7 @@ int jfs_umount_rw(struct super_block *sb)
146 * 146 *
147 * remove file system from log active file system list. 147 * remove file system from log active file system list.
148 */ 148 */
149 jfs_flush_journal(log, 1); 149 jfs_flush_journal(log, 2);
150 150
151 /* 151 /*
152 * Make sure all metadata makes it to disk 152 * Make sure all metadata makes it to disk
diff --git a/fs/namei.c b/fs/namei.c
index 2826db35dc25..f4788365ea22 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -727,25 +727,22 @@ static int follow_automount(struct path *path, unsigned flags,
727 if ((flags & LOOKUP_NO_AUTOMOUNT) && !(flags & LOOKUP_PARENT)) 727 if ((flags & LOOKUP_NO_AUTOMOUNT) && !(flags & LOOKUP_PARENT))
728 return -EISDIR; /* we actually want to stop here */ 728 return -EISDIR; /* we actually want to stop here */
729 729
730 /* 730 /* We don't want to mount if someone's just doing a stat -
731 * We don't want to mount if someone's just doing a stat and they've 731 * unless they're stat'ing a directory and appended a '/' to
732 * set AT_SYMLINK_NOFOLLOW - unless they're stat'ing a directory and 732 * the name.
733 * appended a '/' to the name. 733 *
734 * We do, however, want to mount if someone wants to open or
735 * create a file of any type under the mountpoint, wants to
736 * traverse through the mountpoint or wants to open the
737 * mounted directory. Also, autofs may mark negative dentries
738 * as being automount points. These will need the attentions
739 * of the daemon to instantiate them before they can be used.
734 */ 740 */
735 if (!(flags & LOOKUP_FOLLOW)) { 741 if (!(flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY |
736 /* We do, however, want to mount if someone wants to open or 742 LOOKUP_OPEN | LOOKUP_CREATE)) &&
737 * create a file of any type under the mountpoint, wants to 743 path->dentry->d_inode)
738 * traverse through the mountpoint or wants to open the mounted 744 return -EISDIR;
739 * directory. 745
740 * Also, autofs may mark negative dentries as being automount
741 * points. These will need the attentions of the daemon to
742 * instantiate them before they can be used.
743 */
744 if (!(flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY |
745 LOOKUP_OPEN | LOOKUP_CREATE)) &&
746 path->dentry->d_inode)
747 return -EISDIR;
748 }
749 current->total_link_count++; 746 current->total_link_count++;
750 if (current->total_link_count >= 40) 747 if (current->total_link_count >= 40)
751 return -ELOOP; 748 return -ELOOP;
@@ -2619,6 +2616,7 @@ int vfs_rmdir(struct inode *dir, struct dentry *dentry)
2619 if (!dir->i_op->rmdir) 2616 if (!dir->i_op->rmdir)
2620 return -EPERM; 2617 return -EPERM;
2621 2618
2619 dget(dentry);
2622 mutex_lock(&dentry->d_inode->i_mutex); 2620 mutex_lock(&dentry->d_inode->i_mutex);
2623 2621
2624 error = -EBUSY; 2622 error = -EBUSY;
@@ -2639,6 +2637,7 @@ int vfs_rmdir(struct inode *dir, struct dentry *dentry)
2639 2637
2640out: 2638out:
2641 mutex_unlock(&dentry->d_inode->i_mutex); 2639 mutex_unlock(&dentry->d_inode->i_mutex);
2640 dput(dentry);
2642 if (!error) 2641 if (!error)
2643 d_delete(dentry); 2642 d_delete(dentry);
2644 return error; 2643 return error;
@@ -3028,6 +3027,7 @@ static int vfs_rename_dir(struct inode *old_dir, struct dentry *old_dentry,
3028 if (error) 3027 if (error)
3029 return error; 3028 return error;
3030 3029
3030 dget(new_dentry);
3031 if (target) 3031 if (target)
3032 mutex_lock(&target->i_mutex); 3032 mutex_lock(&target->i_mutex);
3033 3033
@@ -3048,6 +3048,7 @@ static int vfs_rename_dir(struct inode *old_dir, struct dentry *old_dentry,
3048out: 3048out:
3049 if (target) 3049 if (target)
3050 mutex_unlock(&target->i_mutex); 3050 mutex_unlock(&target->i_mutex);
3051 dput(new_dentry);
3051 if (!error) 3052 if (!error)
3052 if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE)) 3053 if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE))
3053 d_move(old_dentry,new_dentry); 3054 d_move(old_dentry,new_dentry);
diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
index e56564d2ef95..9561c8fc8bdb 100644
--- a/fs/nfs/blocklayout/blocklayout.c
+++ b/fs/nfs/blocklayout/blocklayout.c
@@ -36,6 +36,7 @@
36#include <linux/namei.h> 36#include <linux/namei.h>
37#include <linux/bio.h> /* struct bio */ 37#include <linux/bio.h> /* struct bio */
38#include <linux/buffer_head.h> /* various write calls */ 38#include <linux/buffer_head.h> /* various write calls */
39#include <linux/prefetch.h>
39 40
40#include "blocklayout.h" 41#include "blocklayout.h"
41 42
diff --git a/fs/nfs/callback.h b/fs/nfs/callback.h
index b257383bb565..07df5f1d85e5 100644
--- a/fs/nfs/callback.h
+++ b/fs/nfs/callback.h
@@ -38,6 +38,7 @@ enum nfs4_callback_opnum {
38struct cb_process_state { 38struct cb_process_state {
39 __be32 drc_status; 39 __be32 drc_status;
40 struct nfs_client *clp; 40 struct nfs_client *clp;
41 int slotid;
41}; 42};
42 43
43struct cb_compound_hdr_arg { 44struct cb_compound_hdr_arg {
@@ -166,7 +167,6 @@ extern unsigned nfs4_callback_layoutrecall(
166 void *dummy, struct cb_process_state *cps); 167 void *dummy, struct cb_process_state *cps);
167 168
168extern void nfs4_check_drain_bc_complete(struct nfs4_session *ses); 169extern void nfs4_check_drain_bc_complete(struct nfs4_session *ses);
169extern void nfs4_cb_take_slot(struct nfs_client *clp);
170 170
171struct cb_devicenotifyitem { 171struct cb_devicenotifyitem {
172 uint32_t cbd_notify_type; 172 uint32_t cbd_notify_type;
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
index 74780f9f852c..43926add945b 100644
--- a/fs/nfs/callback_proc.c
+++ b/fs/nfs/callback_proc.c
@@ -348,7 +348,7 @@ validate_seqid(struct nfs4_slot_table *tbl, struct cb_sequenceargs * args)
348 /* Normal */ 348 /* Normal */
349 if (likely(args->csa_sequenceid == slot->seq_nr + 1)) { 349 if (likely(args->csa_sequenceid == slot->seq_nr + 1)) {
350 slot->seq_nr++; 350 slot->seq_nr++;
351 return htonl(NFS4_OK); 351 goto out_ok;
352 } 352 }
353 353
354 /* Replay */ 354 /* Replay */
@@ -367,11 +367,14 @@ validate_seqid(struct nfs4_slot_table *tbl, struct cb_sequenceargs * args)
367 /* Wraparound */ 367 /* Wraparound */
368 if (args->csa_sequenceid == 1 && (slot->seq_nr + 1) == 0) { 368 if (args->csa_sequenceid == 1 && (slot->seq_nr + 1) == 0) {
369 slot->seq_nr = 1; 369 slot->seq_nr = 1;
370 return htonl(NFS4_OK); 370 goto out_ok;
371 } 371 }
372 372
373 /* Misordered request */ 373 /* Misordered request */
374 return htonl(NFS4ERR_SEQ_MISORDERED); 374 return htonl(NFS4ERR_SEQ_MISORDERED);
375out_ok:
376 tbl->highest_used_slotid = args->csa_slotid;
377 return htonl(NFS4_OK);
375} 378}
376 379
377/* 380/*
@@ -433,26 +436,37 @@ __be32 nfs4_callback_sequence(struct cb_sequenceargs *args,
433 struct cb_sequenceres *res, 436 struct cb_sequenceres *res,
434 struct cb_process_state *cps) 437 struct cb_process_state *cps)
435{ 438{
439 struct nfs4_slot_table *tbl;
436 struct nfs_client *clp; 440 struct nfs_client *clp;
437 int i; 441 int i;
438 __be32 status = htonl(NFS4ERR_BADSESSION); 442 __be32 status = htonl(NFS4ERR_BADSESSION);
439 443
440 cps->clp = NULL;
441
442 clp = nfs4_find_client_sessionid(args->csa_addr, &args->csa_sessionid); 444 clp = nfs4_find_client_sessionid(args->csa_addr, &args->csa_sessionid);
443 if (clp == NULL) 445 if (clp == NULL)
444 goto out; 446 goto out;
445 447
448 tbl = &clp->cl_session->bc_slot_table;
449
450 spin_lock(&tbl->slot_tbl_lock);
446 /* state manager is resetting the session */ 451 /* state manager is resetting the session */
447 if (test_bit(NFS4_SESSION_DRAINING, &clp->cl_session->session_state)) { 452 if (test_bit(NFS4_SESSION_DRAINING, &clp->cl_session->session_state)) {
448 status = NFS4ERR_DELAY; 453 spin_unlock(&tbl->slot_tbl_lock);
454 status = htonl(NFS4ERR_DELAY);
455 /* Return NFS4ERR_BADSESSION if we're draining the session
456 * in order to reset it.
457 */
458 if (test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state))
459 status = htonl(NFS4ERR_BADSESSION);
449 goto out; 460 goto out;
450 } 461 }
451 462
452 status = validate_seqid(&clp->cl_session->bc_slot_table, args); 463 status = validate_seqid(&clp->cl_session->bc_slot_table, args);
464 spin_unlock(&tbl->slot_tbl_lock);
453 if (status) 465 if (status)
454 goto out; 466 goto out;
455 467
468 cps->slotid = args->csa_slotid;
469
456 /* 470 /*
457 * Check for pending referring calls. If a match is found, a 471 * Check for pending referring calls. If a match is found, a
458 * related callback was received before the response to the original 472 * related callback was received before the response to the original
@@ -469,7 +483,6 @@ __be32 nfs4_callback_sequence(struct cb_sequenceargs *args,
469 res->csr_slotid = args->csa_slotid; 483 res->csr_slotid = args->csa_slotid;
470 res->csr_highestslotid = NFS41_BC_MAX_CALLBACKS - 1; 484 res->csr_highestslotid = NFS41_BC_MAX_CALLBACKS - 1;
471 res->csr_target_highestslotid = NFS41_BC_MAX_CALLBACKS - 1; 485 res->csr_target_highestslotid = NFS41_BC_MAX_CALLBACKS - 1;
472 nfs4_cb_take_slot(clp);
473 486
474out: 487out:
475 cps->clp = clp; /* put in nfs4_callback_compound */ 488 cps->clp = clp; /* put in nfs4_callback_compound */
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
index c6c86a77e043..918ad647afea 100644
--- a/fs/nfs/callback_xdr.c
+++ b/fs/nfs/callback_xdr.c
@@ -754,26 +754,15 @@ static void nfs4_callback_free_slot(struct nfs4_session *session)
754 * Let the state manager know callback processing done. 754 * Let the state manager know callback processing done.
755 * A single slot, so highest used slotid is either 0 or -1 755 * A single slot, so highest used slotid is either 0 or -1
756 */ 756 */
757 tbl->highest_used_slotid--; 757 tbl->highest_used_slotid = -1;
758 nfs4_check_drain_bc_complete(session); 758 nfs4_check_drain_bc_complete(session);
759 spin_unlock(&tbl->slot_tbl_lock); 759 spin_unlock(&tbl->slot_tbl_lock);
760} 760}
761 761
762static void nfs4_cb_free_slot(struct nfs_client *clp) 762static void nfs4_cb_free_slot(struct cb_process_state *cps)
763{ 763{
764 if (clp && clp->cl_session) 764 if (cps->slotid != -1)
765 nfs4_callback_free_slot(clp->cl_session); 765 nfs4_callback_free_slot(cps->clp->cl_session);
766}
767
768/* A single slot, so highest used slotid is either 0 or -1 */
769void nfs4_cb_take_slot(struct nfs_client *clp)
770{
771 struct nfs4_slot_table *tbl = &clp->cl_session->bc_slot_table;
772
773 spin_lock(&tbl->slot_tbl_lock);
774 tbl->highest_used_slotid++;
775 BUG_ON(tbl->highest_used_slotid != 0);
776 spin_unlock(&tbl->slot_tbl_lock);
777} 766}
778 767
779#else /* CONFIG_NFS_V4_1 */ 768#else /* CONFIG_NFS_V4_1 */
@@ -784,7 +773,7 @@ preprocess_nfs41_op(int nop, unsigned int op_nr, struct callback_op **op)
784 return htonl(NFS4ERR_MINOR_VERS_MISMATCH); 773 return htonl(NFS4ERR_MINOR_VERS_MISMATCH);
785} 774}
786 775
787static void nfs4_cb_free_slot(struct nfs_client *clp) 776static void nfs4_cb_free_slot(struct cb_process_state *cps)
788{ 777{
789} 778}
790#endif /* CONFIG_NFS_V4_1 */ 779#endif /* CONFIG_NFS_V4_1 */
@@ -866,6 +855,7 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r
866 struct cb_process_state cps = { 855 struct cb_process_state cps = {
867 .drc_status = 0, 856 .drc_status = 0,
868 .clp = NULL, 857 .clp = NULL,
858 .slotid = -1,
869 }; 859 };
870 unsigned int nops = 0; 860 unsigned int nops = 0;
871 861
@@ -906,7 +896,7 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r
906 896
907 *hdr_res.status = status; 897 *hdr_res.status = status;
908 *hdr_res.nops = htonl(nops); 898 *hdr_res.nops = htonl(nops);
909 nfs4_cb_free_slot(cps.clp); 899 nfs4_cb_free_slot(&cps);
910 nfs_put_client(cps.clp); 900 nfs_put_client(cps.clp);
911 dprintk("%s: done, status = %u\n", __func__, ntohl(status)); 901 dprintk("%s: done, status = %u\n", __func__, ntohl(status));
912 return rpc_success; 902 return rpc_success;
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 1ec1a85fa71c..3e93e9a1bee1 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -56,6 +56,9 @@ enum nfs4_session_state {
56 NFS4_SESSION_DRAINING, 56 NFS4_SESSION_DRAINING,
57}; 57};
58 58
59#define NFS4_RENEW_TIMEOUT 0x01
60#define NFS4_RENEW_DELEGATION_CB 0x02
61
59struct nfs4_minor_version_ops { 62struct nfs4_minor_version_ops {
60 u32 minor_version; 63 u32 minor_version;
61 64
@@ -225,7 +228,7 @@ struct nfs4_state_recovery_ops {
225}; 228};
226 229
227struct nfs4_state_maintenance_ops { 230struct nfs4_state_maintenance_ops {
228 int (*sched_state_renewal)(struct nfs_client *, struct rpc_cred *); 231 int (*sched_state_renewal)(struct nfs_client *, struct rpc_cred *, unsigned);
229 struct rpc_cred * (*get_state_renewal_cred_locked)(struct nfs_client *); 232 struct rpc_cred * (*get_state_renewal_cred_locked)(struct nfs_client *);
230 int (*renew_lease)(struct nfs_client *, struct rpc_cred *); 233 int (*renew_lease)(struct nfs_client *, struct rpc_cred *);
231}; 234};
@@ -237,8 +240,6 @@ extern const struct inode_operations nfs4_dir_inode_operations;
237extern int nfs4_proc_setclientid(struct nfs_client *, u32, unsigned short, struct rpc_cred *, struct nfs4_setclientid_res *); 240extern int nfs4_proc_setclientid(struct nfs_client *, u32, unsigned short, struct rpc_cred *, struct nfs4_setclientid_res *);
238extern int nfs4_proc_setclientid_confirm(struct nfs_client *, struct nfs4_setclientid_res *arg, struct rpc_cred *); 241extern int nfs4_proc_setclientid_confirm(struct nfs_client *, struct nfs4_setclientid_res *arg, struct rpc_cred *);
239extern int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred); 242extern int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred);
240extern int nfs4_proc_async_renew(struct nfs_client *, struct rpc_cred *);
241extern int nfs4_proc_renew(struct nfs_client *, struct rpc_cred *);
242extern int nfs4_init_clientid(struct nfs_client *, struct rpc_cred *); 243extern int nfs4_init_clientid(struct nfs_client *, struct rpc_cred *);
243extern int nfs41_init_clientid(struct nfs_client *, struct rpc_cred *); 244extern int nfs41_init_clientid(struct nfs_client *, struct rpc_cred *);
244extern int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc); 245extern int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc);
@@ -349,6 +350,7 @@ extern void nfs4_close_sync(struct nfs4_state *, fmode_t);
349extern void nfs4_state_set_mode_locked(struct nfs4_state *, fmode_t); 350extern void nfs4_state_set_mode_locked(struct nfs4_state *, fmode_t);
350extern void nfs4_schedule_lease_recovery(struct nfs_client *); 351extern void nfs4_schedule_lease_recovery(struct nfs_client *);
351extern void nfs4_schedule_state_manager(struct nfs_client *); 352extern void nfs4_schedule_state_manager(struct nfs_client *);
353extern void nfs4_schedule_path_down_recovery(struct nfs_client *clp);
352extern void nfs4_schedule_stateid_recovery(const struct nfs_server *, struct nfs4_state *); 354extern void nfs4_schedule_stateid_recovery(const struct nfs_server *, struct nfs4_state *);
353extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags); 355extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags);
354extern void nfs41_handle_recall_slot(struct nfs_client *clp); 356extern void nfs41_handle_recall_slot(struct nfs_client *clp);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 8c77039e7a81..4700fae1ada0 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -3374,9 +3374,13 @@ static void nfs4_renew_done(struct rpc_task *task, void *calldata)
3374 3374
3375 if (task->tk_status < 0) { 3375 if (task->tk_status < 0) {
3376 /* Unless we're shutting down, schedule state recovery! */ 3376 /* Unless we're shutting down, schedule state recovery! */
3377 if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) != 0) 3377 if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0)
3378 return;
3379 if (task->tk_status != NFS4ERR_CB_PATH_DOWN) {
3378 nfs4_schedule_lease_recovery(clp); 3380 nfs4_schedule_lease_recovery(clp);
3379 return; 3381 return;
3382 }
3383 nfs4_schedule_path_down_recovery(clp);
3380 } 3384 }
3381 do_renew_lease(clp, timestamp); 3385 do_renew_lease(clp, timestamp);
3382} 3386}
@@ -3386,7 +3390,7 @@ static const struct rpc_call_ops nfs4_renew_ops = {
3386 .rpc_release = nfs4_renew_release, 3390 .rpc_release = nfs4_renew_release,
3387}; 3391};
3388 3392
3389int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred) 3393static int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
3390{ 3394{
3391 struct rpc_message msg = { 3395 struct rpc_message msg = {
3392 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], 3396 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
@@ -3395,9 +3399,11 @@ int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred)
3395 }; 3399 };
3396 struct nfs4_renewdata *data; 3400 struct nfs4_renewdata *data;
3397 3401
3402 if (renew_flags == 0)
3403 return 0;
3398 if (!atomic_inc_not_zero(&clp->cl_count)) 3404 if (!atomic_inc_not_zero(&clp->cl_count))
3399 return -EIO; 3405 return -EIO;
3400 data = kmalloc(sizeof(*data), GFP_KERNEL); 3406 data = kmalloc(sizeof(*data), GFP_NOFS);
3401 if (data == NULL) 3407 if (data == NULL)
3402 return -ENOMEM; 3408 return -ENOMEM;
3403 data->client = clp; 3409 data->client = clp;
@@ -3406,7 +3412,7 @@ int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred)
3406 &nfs4_renew_ops, data); 3412 &nfs4_renew_ops, data);
3407} 3413}
3408 3414
3409int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred) 3415static int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred)
3410{ 3416{
3411 struct rpc_message msg = { 3417 struct rpc_message msg = {
3412 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], 3418 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
@@ -5504,11 +5510,13 @@ static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, struct rpc_
5504 return rpc_run_task(&task_setup_data); 5510 return rpc_run_task(&task_setup_data);
5505} 5511}
5506 5512
5507static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred) 5513static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
5508{ 5514{
5509 struct rpc_task *task; 5515 struct rpc_task *task;
5510 int ret = 0; 5516 int ret = 0;
5511 5517
5518 if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0)
5519 return 0;
5512 task = _nfs41_proc_sequence(clp, cred); 5520 task = _nfs41_proc_sequence(clp, cred);
5513 if (IS_ERR(task)) 5521 if (IS_ERR(task))
5514 ret = PTR_ERR(task); 5522 ret = PTR_ERR(task);
diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c
index df8e7f3ca56d..dc484c0eae7f 100644
--- a/fs/nfs/nfs4renewd.c
+++ b/fs/nfs/nfs4renewd.c
@@ -60,6 +60,7 @@ nfs4_renew_state(struct work_struct *work)
60 struct rpc_cred *cred; 60 struct rpc_cred *cred;
61 long lease; 61 long lease;
62 unsigned long last, now; 62 unsigned long last, now;
63 unsigned renew_flags = 0;
63 64
64 ops = clp->cl_mvops->state_renewal_ops; 65 ops = clp->cl_mvops->state_renewal_ops;
65 dprintk("%s: start\n", __func__); 66 dprintk("%s: start\n", __func__);
@@ -72,18 +73,23 @@ nfs4_renew_state(struct work_struct *work)
72 last = clp->cl_last_renewal; 73 last = clp->cl_last_renewal;
73 now = jiffies; 74 now = jiffies;
74 /* Are we close to a lease timeout? */ 75 /* Are we close to a lease timeout? */
75 if (time_after(now, last + lease/3)) { 76 if (time_after(now, last + lease/3))
77 renew_flags |= NFS4_RENEW_TIMEOUT;
78 if (nfs_delegations_present(clp))
79 renew_flags |= NFS4_RENEW_DELEGATION_CB;
80
81 if (renew_flags != 0) {
76 cred = ops->get_state_renewal_cred_locked(clp); 82 cred = ops->get_state_renewal_cred_locked(clp);
77 spin_unlock(&clp->cl_lock); 83 spin_unlock(&clp->cl_lock);
78 if (cred == NULL) { 84 if (cred == NULL) {
79 if (!nfs_delegations_present(clp)) { 85 if (!(renew_flags & NFS4_RENEW_DELEGATION_CB)) {
80 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); 86 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
81 goto out; 87 goto out;
82 } 88 }
83 nfs_expire_all_delegations(clp); 89 nfs_expire_all_delegations(clp);
84 } else { 90 } else {
85 /* Queue an asynchronous RENEW. */ 91 /* Queue an asynchronous RENEW. */
86 ops->sched_state_renewal(clp, cred); 92 ops->sched_state_renewal(clp, cred, renew_flags);
87 put_rpccred(cred); 93 put_rpccred(cred);
88 goto out_exp; 94 goto out_exp;
89 } 95 }
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 72ab97ef3d61..39914be40b03 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1038,6 +1038,12 @@ void nfs4_schedule_lease_recovery(struct nfs_client *clp)
1038 nfs4_schedule_state_manager(clp); 1038 nfs4_schedule_state_manager(clp);
1039} 1039}
1040 1040
1041void nfs4_schedule_path_down_recovery(struct nfs_client *clp)
1042{
1043 nfs_handle_cb_pathdown(clp);
1044 nfs4_schedule_state_manager(clp);
1045}
1046
1041static int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state) 1047static int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state)
1042{ 1048{
1043 1049
diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c
index 9383ca7245bc..d0cda12fddc3 100644
--- a/fs/nfs/objlayout/objio_osd.c
+++ b/fs/nfs/objlayout/objio_osd.c
@@ -479,7 +479,6 @@ static int _io_check(struct objio_state *ios, bool is_write)
479 for (i = 0; i < ios->numdevs; i++) { 479 for (i = 0; i < ios->numdevs; i++) {
480 struct osd_sense_info osi; 480 struct osd_sense_info osi;
481 struct osd_request *or = ios->per_dev[i].or; 481 struct osd_request *or = ios->per_dev[i].or;
482 unsigned dev;
483 int ret; 482 int ret;
484 483
485 if (!or) 484 if (!or)
@@ -500,9 +499,8 @@ static int _io_check(struct objio_state *ios, bool is_write)
500 499
501 continue; /* we recovered */ 500 continue; /* we recovered */
502 } 501 }
503 dev = ios->per_dev[i].dev; 502 objlayout_io_set_result(&ios->ol_state, i,
504 objlayout_io_set_result(&ios->ol_state, dev, 503 &ios->layout->comps[i].oc_object_id,
505 &ios->layout->comps[dev].oc_object_id,
506 osd_pri_2_pnfs_err(osi.osd_err_pri), 504 osd_pri_2_pnfs_err(osi.osd_err_pri),
507 ios->per_dev[i].offset, 505 ios->per_dev[i].offset,
508 ios->per_dev[i].length, 506 ios->per_dev[i].length,
@@ -589,22 +587,19 @@ static void _calc_stripe_info(struct objio_state *ios, u64 file_offset,
589} 587}
590 588
591static int _add_stripe_unit(struct objio_state *ios, unsigned *cur_pg, 589static int _add_stripe_unit(struct objio_state *ios, unsigned *cur_pg,
592 unsigned pgbase, struct _objio_per_comp *per_dev, int cur_len, 590 unsigned pgbase, struct _objio_per_comp *per_dev, int len,
593 gfp_t gfp_flags) 591 gfp_t gfp_flags)
594{ 592{
595 unsigned pg = *cur_pg; 593 unsigned pg = *cur_pg;
594 int cur_len = len;
596 struct request_queue *q = 595 struct request_queue *q =
597 osd_request_queue(_io_od(ios, per_dev->dev)); 596 osd_request_queue(_io_od(ios, per_dev->dev));
598 597
599 per_dev->length += cur_len;
600
601 if (per_dev->bio == NULL) { 598 if (per_dev->bio == NULL) {
602 unsigned stripes = ios->layout->num_comps / 599 unsigned pages_in_stripe = ios->layout->group_width *
603 ios->layout->mirrors_p1;
604 unsigned pages_in_stripe = stripes *
605 (ios->layout->stripe_unit / PAGE_SIZE); 600 (ios->layout->stripe_unit / PAGE_SIZE);
606 unsigned bio_size = (ios->ol_state.nr_pages + pages_in_stripe) / 601 unsigned bio_size = (ios->ol_state.nr_pages + pages_in_stripe) /
607 stripes; 602 ios->layout->group_width;
608 603
609 if (BIO_MAX_PAGES_KMALLOC < bio_size) 604 if (BIO_MAX_PAGES_KMALLOC < bio_size)
610 bio_size = BIO_MAX_PAGES_KMALLOC; 605 bio_size = BIO_MAX_PAGES_KMALLOC;
@@ -632,6 +627,7 @@ static int _add_stripe_unit(struct objio_state *ios, unsigned *cur_pg,
632 } 627 }
633 BUG_ON(cur_len); 628 BUG_ON(cur_len);
634 629
630 per_dev->length += len;
635 *cur_pg = pg; 631 *cur_pg = pg;
636 return 0; 632 return 0;
637} 633}
@@ -650,7 +646,7 @@ static int _prepare_one_group(struct objio_state *ios, u64 length,
650 int ret = 0; 646 int ret = 0;
651 647
652 while (length) { 648 while (length) {
653 struct _objio_per_comp *per_dev = &ios->per_dev[dev]; 649 struct _objio_per_comp *per_dev = &ios->per_dev[dev - first_dev];
654 unsigned cur_len, page_off = 0; 650 unsigned cur_len, page_off = 0;
655 651
656 if (!per_dev->length) { 652 if (!per_dev->length) {
@@ -670,8 +666,8 @@ static int _prepare_one_group(struct objio_state *ios, u64 length,
670 cur_len = stripe_unit; 666 cur_len = stripe_unit;
671 } 667 }
672 668
673 if (max_comp < dev) 669 if (max_comp < dev - first_dev)
674 max_comp = dev; 670 max_comp = dev - first_dev;
675 } else { 671 } else {
676 cur_len = stripe_unit; 672 cur_len = stripe_unit;
677 } 673 }
@@ -806,7 +802,7 @@ static int _read_mirrors(struct objio_state *ios, unsigned cur_comp)
806 struct _objio_per_comp *per_dev = &ios->per_dev[cur_comp]; 802 struct _objio_per_comp *per_dev = &ios->per_dev[cur_comp];
807 unsigned dev = per_dev->dev; 803 unsigned dev = per_dev->dev;
808 struct pnfs_osd_object_cred *cred = 804 struct pnfs_osd_object_cred *cred =
809 &ios->layout->comps[dev]; 805 &ios->layout->comps[cur_comp];
810 struct osd_obj_id obj = { 806 struct osd_obj_id obj = {
811 .partition = cred->oc_object_id.oid_partition_id, 807 .partition = cred->oc_object_id.oid_partition_id,
812 .id = cred->oc_object_id.oid_object_id, 808 .id = cred->oc_object_id.oid_object_id,
@@ -904,7 +900,7 @@ static int _write_mirrors(struct objio_state *ios, unsigned cur_comp)
904 for (; cur_comp < last_comp; ++cur_comp, ++dev) { 900 for (; cur_comp < last_comp; ++cur_comp, ++dev) {
905 struct osd_request *or = NULL; 901 struct osd_request *or = NULL;
906 struct pnfs_osd_object_cred *cred = 902 struct pnfs_osd_object_cred *cred =
907 &ios->layout->comps[dev]; 903 &ios->layout->comps[cur_comp];
908 struct osd_obj_id obj = { 904 struct osd_obj_id obj = {
909 .partition = cred->oc_object_id.oid_partition_id, 905 .partition = cred->oc_object_id.oid_partition_id,
910 .id = cred->oc_object_id.oid_object_id, 906 .id = cred->oc_object_id.oid_object_id,
diff --git a/fs/nfs/objlayout/pnfs_osd_xdr_cli.c b/fs/nfs/objlayout/pnfs_osd_xdr_cli.c
index 16fc758e9123..b3918f7ac34d 100644
--- a/fs/nfs/objlayout/pnfs_osd_xdr_cli.c
+++ b/fs/nfs/objlayout/pnfs_osd_xdr_cli.c
@@ -170,6 +170,9 @@ int pnfs_osd_xdr_decode_layout_map(struct pnfs_osd_layout *layout,
170 p = _osd_xdr_decode_data_map(p, &layout->olo_map); 170 p = _osd_xdr_decode_data_map(p, &layout->olo_map);
171 layout->olo_comps_index = be32_to_cpup(p++); 171 layout->olo_comps_index = be32_to_cpup(p++);
172 layout->olo_num_comps = be32_to_cpup(p++); 172 layout->olo_num_comps = be32_to_cpup(p++);
173 dprintk("%s: olo_comps_index=%d olo_num_comps=%d\n", __func__,
174 layout->olo_comps_index, layout->olo_num_comps);
175
173 iter->total_comps = layout->olo_num_comps; 176 iter->total_comps = layout->olo_num_comps;
174 return 0; 177 return 0;
175} 178}
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index b961ceac66b4..9b7dd7013b15 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -2035,9 +2035,6 @@ static inline void nfs_initialise_sb(struct super_block *sb)
2035 sb->s_blocksize = nfs_block_bits(server->wsize, 2035 sb->s_blocksize = nfs_block_bits(server->wsize,
2036 &sb->s_blocksize_bits); 2036 &sb->s_blocksize_bits);
2037 2037
2038 if (server->flags & NFS_MOUNT_NOAC)
2039 sb->s_flags |= MS_SYNCHRONOUS;
2040
2041 sb->s_bdi = &server->backing_dev_info; 2038 sb->s_bdi = &server->backing_dev_info;
2042 2039
2043 nfs_super_set_maxbytes(sb, server->maxfilesize); 2040 nfs_super_set_maxbytes(sb, server->maxfilesize);
@@ -2249,6 +2246,10 @@ static struct dentry *nfs_fs_mount(struct file_system_type *fs_type,
2249 if (server->flags & NFS_MOUNT_UNSHARED) 2246 if (server->flags & NFS_MOUNT_UNSHARED)
2250 compare_super = NULL; 2247 compare_super = NULL;
2251 2248
2249 /* -o noac implies -o sync */
2250 if (server->flags & NFS_MOUNT_NOAC)
2251 sb_mntdata.mntflags |= MS_SYNCHRONOUS;
2252
2252 /* Get a superblock - note that we may end up sharing one that already exists */ 2253 /* Get a superblock - note that we may end up sharing one that already exists */
2253 s = sget(fs_type, compare_super, nfs_set_super, &sb_mntdata); 2254 s = sget(fs_type, compare_super, nfs_set_super, &sb_mntdata);
2254 if (IS_ERR(s)) { 2255 if (IS_ERR(s)) {
@@ -2361,6 +2362,10 @@ nfs_xdev_mount(struct file_system_type *fs_type, int flags,
2361 if (server->flags & NFS_MOUNT_UNSHARED) 2362 if (server->flags & NFS_MOUNT_UNSHARED)
2362 compare_super = NULL; 2363 compare_super = NULL;
2363 2364
2365 /* -o noac implies -o sync */
2366 if (server->flags & NFS_MOUNT_NOAC)
2367 sb_mntdata.mntflags |= MS_SYNCHRONOUS;
2368
2364 /* Get a superblock - note that we may end up sharing one that already exists */ 2369 /* Get a superblock - note that we may end up sharing one that already exists */
2365 s = sget(&nfs_fs_type, compare_super, nfs_set_super, &sb_mntdata); 2370 s = sget(&nfs_fs_type, compare_super, nfs_set_super, &sb_mntdata);
2366 if (IS_ERR(s)) { 2371 if (IS_ERR(s)) {
@@ -2628,6 +2633,10 @@ nfs4_remote_mount(struct file_system_type *fs_type, int flags,
2628 if (server->flags & NFS4_MOUNT_UNSHARED) 2633 if (server->flags & NFS4_MOUNT_UNSHARED)
2629 compare_super = NULL; 2634 compare_super = NULL;
2630 2635
2636 /* -o noac implies -o sync */
2637 if (server->flags & NFS_MOUNT_NOAC)
2638 sb_mntdata.mntflags |= MS_SYNCHRONOUS;
2639
2631 /* Get a superblock - note that we may end up sharing one that already exists */ 2640 /* Get a superblock - note that we may end up sharing one that already exists */
2632 s = sget(&nfs4_fs_type, compare_super, nfs_set_super, &sb_mntdata); 2641 s = sget(&nfs4_fs_type, compare_super, nfs_set_super, &sb_mntdata);
2633 if (IS_ERR(s)) { 2642 if (IS_ERR(s)) {
@@ -2916,6 +2925,10 @@ nfs4_xdev_mount(struct file_system_type *fs_type, int flags,
2916 if (server->flags & NFS4_MOUNT_UNSHARED) 2925 if (server->flags & NFS4_MOUNT_UNSHARED)
2917 compare_super = NULL; 2926 compare_super = NULL;
2918 2927
2928 /* -o noac implies -o sync */
2929 if (server->flags & NFS_MOUNT_NOAC)
2930 sb_mntdata.mntflags |= MS_SYNCHRONOUS;
2931
2919 /* Get a superblock - note that we may end up sharing one that already exists */ 2932 /* Get a superblock - note that we may end up sharing one that already exists */
2920 s = sget(&nfs4_fs_type, compare_super, nfs_set_super, &sb_mntdata); 2933 s = sget(&nfs4_fs_type, compare_super, nfs_set_super, &sb_mntdata);
2921 if (IS_ERR(s)) { 2934 if (IS_ERR(s)) {
@@ -3003,6 +3016,10 @@ nfs4_remote_referral_mount(struct file_system_type *fs_type, int flags,
3003 if (server->flags & NFS4_MOUNT_UNSHARED) 3016 if (server->flags & NFS4_MOUNT_UNSHARED)
3004 compare_super = NULL; 3017 compare_super = NULL;
3005 3018
3019 /* -o noac implies -o sync */
3020 if (server->flags & NFS_MOUNT_NOAC)
3021 sb_mntdata.mntflags |= MS_SYNCHRONOUS;
3022
3006 /* Get a superblock - note that we may end up sharing one that already exists */ 3023 /* Get a superblock - note that we may end up sharing one that already exists */
3007 s = sget(&nfs4_fs_type, compare_super, nfs_set_super, &sb_mntdata); 3024 s = sget(&nfs4_fs_type, compare_super, nfs_set_super, &sb_mntdata);
3008 if (IS_ERR(s)) { 3025 if (IS_ERR(s)) {
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index b39b37f80913..c9bd2a6b7d4b 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -958,7 +958,7 @@ static int nfs_flush_multi(struct nfs_pageio_descriptor *desc, struct list_head
958 if (!data) 958 if (!data)
959 goto out_bad; 959 goto out_bad;
960 data->pagevec[0] = page; 960 data->pagevec[0] = page;
961 nfs_write_rpcsetup(req, data, wsize, offset, desc->pg_ioflags); 961 nfs_write_rpcsetup(req, data, len, offset, desc->pg_ioflags);
962 list_add(&data->list, res); 962 list_add(&data->list, res);
963 requests++; 963 requests++;
964 nbytes -= len; 964 nbytes -= len;
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 25b6a887adb9..5afaa58a8630 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -877,30 +877,54 @@ struct numa_maps_private {
877 struct numa_maps md; 877 struct numa_maps md;
878}; 878};
879 879
880static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty) 880static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
881 unsigned long nr_pages)
881{ 882{
882 int count = page_mapcount(page); 883 int count = page_mapcount(page);
883 884
884 md->pages++; 885 md->pages += nr_pages;
885 if (pte_dirty || PageDirty(page)) 886 if (pte_dirty || PageDirty(page))
886 md->dirty++; 887 md->dirty += nr_pages;
887 888
888 if (PageSwapCache(page)) 889 if (PageSwapCache(page))
889 md->swapcache++; 890 md->swapcache += nr_pages;
890 891
891 if (PageActive(page) || PageUnevictable(page)) 892 if (PageActive(page) || PageUnevictable(page))
892 md->active++; 893 md->active += nr_pages;
893 894
894 if (PageWriteback(page)) 895 if (PageWriteback(page))
895 md->writeback++; 896 md->writeback += nr_pages;
896 897
897 if (PageAnon(page)) 898 if (PageAnon(page))
898 md->anon++; 899 md->anon += nr_pages;
899 900
900 if (count > md->mapcount_max) 901 if (count > md->mapcount_max)
901 md->mapcount_max = count; 902 md->mapcount_max = count;
902 903
903 md->node[page_to_nid(page)]++; 904 md->node[page_to_nid(page)] += nr_pages;
905}
906
907static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
908 unsigned long addr)
909{
910 struct page *page;
911 int nid;
912
913 if (!pte_present(pte))
914 return NULL;
915
916 page = vm_normal_page(vma, addr, pte);
917 if (!page)
918 return NULL;
919
920 if (PageReserved(page))
921 return NULL;
922
923 nid = page_to_nid(page);
924 if (!node_isset(nid, node_states[N_HIGH_MEMORY]))
925 return NULL;
926
927 return page;
904} 928}
905 929
906static int gather_pte_stats(pmd_t *pmd, unsigned long addr, 930static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
@@ -912,26 +936,32 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
912 pte_t *pte; 936 pte_t *pte;
913 937
914 md = walk->private; 938 md = walk->private;
915 orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); 939 spin_lock(&walk->mm->page_table_lock);
916 do { 940 if (pmd_trans_huge(*pmd)) {
917 struct page *page; 941 if (pmd_trans_splitting(*pmd)) {
918 int nid; 942 spin_unlock(&walk->mm->page_table_lock);
943 wait_split_huge_page(md->vma->anon_vma, pmd);
944 } else {
945 pte_t huge_pte = *(pte_t *)pmd;
946 struct page *page;
919 947
920 if (!pte_present(*pte)) 948 page = can_gather_numa_stats(huge_pte, md->vma, addr);
921 continue; 949 if (page)
950 gather_stats(page, md, pte_dirty(huge_pte),
951 HPAGE_PMD_SIZE/PAGE_SIZE);
952 spin_unlock(&walk->mm->page_table_lock);
953 return 0;
954 }
955 } else {
956 spin_unlock(&walk->mm->page_table_lock);
957 }
922 958
923 page = vm_normal_page(md->vma, addr, *pte); 959 orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
960 do {
961 struct page *page = can_gather_numa_stats(*pte, md->vma, addr);
924 if (!page) 962 if (!page)
925 continue; 963 continue;
926 964 gather_stats(page, md, pte_dirty(*pte), 1);
927 if (PageReserved(page))
928 continue;
929
930 nid = page_to_nid(page);
931 if (!node_isset(nid, node_states[N_HIGH_MEMORY]))
932 continue;
933
934 gather_stats(page, md, pte_dirty(*pte));
935 965
936 } while (pte++, addr += PAGE_SIZE, addr != end); 966 } while (pte++, addr += PAGE_SIZE, addr != end);
937 pte_unmap_unlock(orig_pte, ptl); 967 pte_unmap_unlock(orig_pte, ptl);
@@ -952,7 +982,7 @@ static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,
952 return 0; 982 return 0;
953 983
954 md = walk->private; 984 md = walk->private;
955 gather_stats(page, md, pte_dirty(*pte)); 985 gather_stats(page, md, pte_dirty(*pte), 1);
956 return 0; 986 return 0;
957} 987}
958 988
diff --git a/fs/ubifs/debug.h b/fs/ubifs/debug.h
index 45174b534377..feb361e252ac 100644
--- a/fs/ubifs/debug.h
+++ b/fs/ubifs/debug.h
@@ -335,9 +335,9 @@ void dbg_debugfs_exit_fs(struct ubifs_info *c);
335#define DBGKEY(key) ((char *)(key)) 335#define DBGKEY(key) ((char *)(key))
336#define DBGKEY1(key) ((char *)(key)) 336#define DBGKEY1(key) ((char *)(key))
337 337
338#define ubifs_dbg_msg(fmt, ...) do { \ 338#define ubifs_dbg_msg(fmt, ...) do { \
339 if (0) \ 339 if (0) \
340 pr_debug(fmt "\n", ##__VA_ARGS__); \ 340 printk(KERN_DEBUG fmt "\n", ##__VA_ARGS__); \
341} while (0) 341} while (0)
342 342
343#define dbg_dump_stack() 343#define dbg_dump_stack()
diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile
index 75bb316529dd..427a4e82a588 100644
--- a/fs/xfs/Makefile
+++ b/fs/xfs/Makefile
@@ -16,44 +16,53 @@
16# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 16# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17# 17#
18 18
19ccflags-y := -I$(src) -I$(src)/linux-2.6 19ccflags-y += -I$(src) # needed for trace events
20ccflags-$(CONFIG_XFS_DEBUG) += -g
21 20
22XFS_LINUX := linux-2.6 21ccflags-$(CONFIG_XFS_DEBUG) += -g
23 22
24obj-$(CONFIG_XFS_FS) += xfs.o 23obj-$(CONFIG_XFS_FS) += xfs.o
25 24
26xfs-y += linux-2.6/xfs_trace.o 25# this one should be compiled first, as the tracing macros can easily blow up
27 26xfs-y += xfs_trace.o
28xfs-$(CONFIG_XFS_QUOTA) += $(addprefix quota/, \
29 xfs_dquot.o \
30 xfs_dquot_item.o \
31 xfs_trans_dquot.o \
32 xfs_qm_syscalls.o \
33 xfs_qm_bhv.o \
34 xfs_qm.o)
35xfs-$(CONFIG_XFS_QUOTA) += linux-2.6/xfs_quotaops.o
36
37ifeq ($(CONFIG_XFS_QUOTA),y)
38xfs-$(CONFIG_PROC_FS) += quota/xfs_qm_stats.o
39endif
40
41xfs-$(CONFIG_XFS_RT) += xfs_rtalloc.o
42xfs-$(CONFIG_XFS_POSIX_ACL) += $(XFS_LINUX)/xfs_acl.o
43xfs-$(CONFIG_PROC_FS) += $(XFS_LINUX)/xfs_stats.o
44xfs-$(CONFIG_SYSCTL) += $(XFS_LINUX)/xfs_sysctl.o
45xfs-$(CONFIG_COMPAT) += $(XFS_LINUX)/xfs_ioctl32.o
46 27
28# highlevel code
29xfs-y += xfs_aops.o \
30 xfs_bit.o \
31 xfs_buf.o \
32 xfs_dfrag.o \
33 xfs_discard.o \
34 xfs_error.o \
35 xfs_export.o \
36 xfs_file.o \
37 xfs_filestream.o \
38 xfs_fsops.o \
39 xfs_fs_subr.o \
40 xfs_globals.o \
41 xfs_iget.o \
42 xfs_ioctl.o \
43 xfs_iomap.o \
44 xfs_iops.o \
45 xfs_itable.o \
46 xfs_message.o \
47 xfs_mru_cache.o \
48 xfs_super.o \
49 xfs_sync.o \
50 xfs_xattr.o \
51 xfs_rename.o \
52 xfs_rw.o \
53 xfs_utils.o \
54 xfs_vnodeops.o \
55 kmem.o \
56 uuid.o
47 57
58# code shared with libxfs
48xfs-y += xfs_alloc.o \ 59xfs-y += xfs_alloc.o \
49 xfs_alloc_btree.o \ 60 xfs_alloc_btree.o \
50 xfs_attr.o \ 61 xfs_attr.o \
51 xfs_attr_leaf.o \ 62 xfs_attr_leaf.o \
52 xfs_bit.o \
53 xfs_bmap.o \ 63 xfs_bmap.o \
54 xfs_bmap_btree.o \ 64 xfs_bmap_btree.o \
55 xfs_btree.o \ 65 xfs_btree.o \
56 xfs_buf_item.o \
57 xfs_da_btree.o \ 66 xfs_da_btree.o \
58 xfs_dir2.o \ 67 xfs_dir2.o \
59 xfs_dir2_block.o \ 68 xfs_dir2_block.o \
@@ -61,49 +70,37 @@ xfs-y += xfs_alloc.o \
61 xfs_dir2_leaf.o \ 70 xfs_dir2_leaf.o \
62 xfs_dir2_node.o \ 71 xfs_dir2_node.o \
63 xfs_dir2_sf.o \ 72 xfs_dir2_sf.o \
64 xfs_error.o \
65 xfs_extfree_item.o \
66 xfs_filestream.o \
67 xfs_fsops.o \
68 xfs_ialloc.o \ 73 xfs_ialloc.o \
69 xfs_ialloc_btree.o \ 74 xfs_ialloc_btree.o \
70 xfs_iget.o \
71 xfs_inode.o \ 75 xfs_inode.o \
72 xfs_inode_item.o \
73 xfs_iomap.o \
74 xfs_itable.o \
75 xfs_dfrag.o \
76 xfs_log.o \
77 xfs_log_cil.o \
78 xfs_log_recover.o \ 76 xfs_log_recover.o \
79 xfs_mount.o \ 77 xfs_mount.o \
80 xfs_mru_cache.o \ 78 xfs_trans.o
81 xfs_rename.o \ 79
82 xfs_trans.o \ 80# low-level transaction/log code
81xfs-y += xfs_log.o \
82 xfs_log_cil.o \
83 xfs_buf_item.o \
84 xfs_extfree_item.o \
85 xfs_inode_item.o \
83 xfs_trans_ail.o \ 86 xfs_trans_ail.o \
84 xfs_trans_buf.o \ 87 xfs_trans_buf.o \
85 xfs_trans_extfree.o \ 88 xfs_trans_extfree.o \
86 xfs_trans_inode.o \ 89 xfs_trans_inode.o \
87 xfs_utils.o \
88 xfs_vnodeops.o \
89 xfs_rw.o
90
91# Objects in linux/
92xfs-y += $(addprefix $(XFS_LINUX)/, \
93 kmem.o \
94 xfs_aops.o \
95 xfs_buf.o \
96 xfs_discard.o \
97 xfs_export.o \
98 xfs_file.o \
99 xfs_fs_subr.o \
100 xfs_globals.o \
101 xfs_ioctl.o \
102 xfs_iops.o \
103 xfs_message.o \
104 xfs_super.o \
105 xfs_sync.o \
106 xfs_xattr.o)
107 90
108# Objects in support/ 91# optional features
109xfs-y += support/uuid.o 92xfs-$(CONFIG_XFS_QUOTA) += xfs_dquot.o \
93 xfs_dquot_item.o \
94 xfs_trans_dquot.o \
95 xfs_qm_syscalls.o \
96 xfs_qm_bhv.o \
97 xfs_qm.o \
98 xfs_quotaops.o
99ifeq ($(CONFIG_XFS_QUOTA),y)
100xfs-$(CONFIG_PROC_FS) += xfs_qm_stats.o
101endif
102xfs-$(CONFIG_XFS_RT) += xfs_rtalloc.o
103xfs-$(CONFIG_XFS_POSIX_ACL) += xfs_acl.o
104xfs-$(CONFIG_PROC_FS) += xfs_stats.o
105xfs-$(CONFIG_SYSCTL) += xfs_sysctl.o
106xfs-$(CONFIG_COMPAT) += xfs_ioctl32.o
diff --git a/fs/xfs/linux-2.6/kmem.c b/fs/xfs/kmem.c
index a907de565db3..a907de565db3 100644
--- a/fs/xfs/linux-2.6/kmem.c
+++ b/fs/xfs/kmem.c
diff --git a/fs/xfs/linux-2.6/kmem.h b/fs/xfs/kmem.h
index f7c8f7a9ea6d..f7c8f7a9ea6d 100644
--- a/fs/xfs/linux-2.6/kmem.h
+++ b/fs/xfs/kmem.h
diff --git a/fs/xfs/linux-2.6/mrlock.h b/fs/xfs/mrlock.h
index ff6a19873e5c..ff6a19873e5c 100644
--- a/fs/xfs/linux-2.6/mrlock.h
+++ b/fs/xfs/mrlock.h
diff --git a/fs/xfs/linux-2.6/time.h b/fs/xfs/time.h
index 387e695a184c..387e695a184c 100644
--- a/fs/xfs/linux-2.6/time.h
+++ b/fs/xfs/time.h
diff --git a/fs/xfs/support/uuid.c b/fs/xfs/uuid.c
index b83f76b6d410..b83f76b6d410 100644
--- a/fs/xfs/support/uuid.c
+++ b/fs/xfs/uuid.c
diff --git a/fs/xfs/support/uuid.h b/fs/xfs/uuid.h
index 4732d71262cc..4732d71262cc 100644
--- a/fs/xfs/support/uuid.h
+++ b/fs/xfs/uuid.h
diff --git a/fs/xfs/xfs.h b/fs/xfs/xfs.h
index 53ec3ea9a625..d8b11b7f94aa 100644
--- a/fs/xfs/xfs.h
+++ b/fs/xfs/xfs.h
@@ -24,5 +24,6 @@
24#define XFS_BUF_LOCK_TRACKING 1 24#define XFS_BUF_LOCK_TRACKING 1
25#endif 25#endif
26 26
27#include <linux-2.6/xfs_linux.h> 27#include "xfs_linux.h"
28
28#endif /* __XFS_H__ */ 29#endif /* __XFS_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_acl.c b/fs/xfs/xfs_acl.c
index b6c4b3795c4a..b6c4b3795c4a 100644
--- a/fs/xfs/linux-2.6/xfs_acl.c
+++ b/fs/xfs/xfs_acl.c
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/xfs_aops.c
index 63e971e2b837..8c37dde4c521 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -1300,6 +1300,7 @@ xfs_end_io_direct_write(
1300 bool is_async) 1300 bool is_async)
1301{ 1301{
1302 struct xfs_ioend *ioend = iocb->private; 1302 struct xfs_ioend *ioend = iocb->private;
1303 struct inode *inode = ioend->io_inode;
1303 1304
1304 /* 1305 /*
1305 * blockdev_direct_IO can return an error even after the I/O 1306 * blockdev_direct_IO can return an error even after the I/O
@@ -1331,7 +1332,7 @@ xfs_end_io_direct_write(
1331 } 1332 }
1332 1333
1333 /* XXX: probably should move into the real I/O completion handler */ 1334 /* XXX: probably should move into the real I/O completion handler */
1334 inode_dio_done(ioend->io_inode); 1335 inode_dio_done(inode);
1335} 1336}
1336 1337
1337STATIC ssize_t 1338STATIC ssize_t
diff --git a/fs/xfs/linux-2.6/xfs_aops.h b/fs/xfs/xfs_aops.h
index 71f721e1a71f..71f721e1a71f 100644
--- a/fs/xfs/linux-2.6/xfs_aops.h
+++ b/fs/xfs/xfs_aops.h
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/xfs_buf.c
index c57836dc778f..c57836dc778f 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/xfs_buf.h
index 620972b8094d..620972b8094d 100644
--- a/fs/xfs/linux-2.6/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
diff --git a/fs/xfs/linux-2.6/xfs_discard.c b/fs/xfs/xfs_discard.c
index 244e797dae32..244e797dae32 100644
--- a/fs/xfs/linux-2.6/xfs_discard.c
+++ b/fs/xfs/xfs_discard.c
diff --git a/fs/xfs/linux-2.6/xfs_discard.h b/fs/xfs/xfs_discard.h
index 344879aea646..344879aea646 100644
--- a/fs/xfs/linux-2.6/xfs_discard.h
+++ b/fs/xfs/xfs_discard.h
diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/xfs_dquot.c
index db62959bed13..db62959bed13 100644
--- a/fs/xfs/quota/xfs_dquot.c
+++ b/fs/xfs/xfs_dquot.c
diff --git a/fs/xfs/quota/xfs_dquot.h b/fs/xfs/xfs_dquot.h
index 34b7e945dbfa..34b7e945dbfa 100644
--- a/fs/xfs/quota/xfs_dquot.h
+++ b/fs/xfs/xfs_dquot.h
diff --git a/fs/xfs/quota/xfs_dquot_item.c b/fs/xfs/xfs_dquot_item.c
index 9e0e2fa3f2c8..9e0e2fa3f2c8 100644
--- a/fs/xfs/quota/xfs_dquot_item.c
+++ b/fs/xfs/xfs_dquot_item.c
diff --git a/fs/xfs/quota/xfs_dquot_item.h b/fs/xfs/xfs_dquot_item.h
index 5acae2ada70b..5acae2ada70b 100644
--- a/fs/xfs/quota/xfs_dquot_item.h
+++ b/fs/xfs/xfs_dquot_item.h
diff --git a/fs/xfs/linux-2.6/xfs_export.c b/fs/xfs/xfs_export.c
index 75e5d322e48f..75e5d322e48f 100644
--- a/fs/xfs/linux-2.6/xfs_export.c
+++ b/fs/xfs/xfs_export.c
diff --git a/fs/xfs/linux-2.6/xfs_export.h b/fs/xfs/xfs_export.h
index 3272b6ae7a35..3272b6ae7a35 100644
--- a/fs/xfs/linux-2.6/xfs_export.h
+++ b/fs/xfs/xfs_export.h
diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/xfs_file.c
index 7f7b42469ea7..7f7b42469ea7 100644
--- a/fs/xfs/linux-2.6/xfs_file.c
+++ b/fs/xfs/xfs_file.c
diff --git a/fs/xfs/linux-2.6/xfs_fs_subr.c b/fs/xfs/xfs_fs_subr.c
index ed88ed16811c..ed88ed16811c 100644
--- a/fs/xfs/linux-2.6/xfs_fs_subr.c
+++ b/fs/xfs/xfs_fs_subr.c
diff --git a/fs/xfs/linux-2.6/xfs_globals.c b/fs/xfs/xfs_globals.c
index 76e81cff70b9..76e81cff70b9 100644
--- a/fs/xfs/linux-2.6/xfs_globals.c
+++ b/fs/xfs/xfs_globals.c
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index f7ce7debe14c..f7ce7debe14c 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.h b/fs/xfs/xfs_ioctl.h
index d56173b34a2a..d56173b34a2a 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl.h
+++ b/fs/xfs/xfs_ioctl.h
diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/xfs_ioctl32.c
index 54e623bfbb85..54e623bfbb85 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl32.c
+++ b/fs/xfs/xfs_ioctl32.c
diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.h b/fs/xfs/xfs_ioctl32.h
index 80f4060e8970..80f4060e8970 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl32.h
+++ b/fs/xfs/xfs_ioctl32.h
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/xfs_iops.c
index b9c172b3fbbe..673704fab748 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -70,9 +70,8 @@ xfs_synchronize_times(
70} 70}
71 71
72/* 72/*
73 * If the linux inode is valid, mark it dirty. 73 * If the linux inode is valid, mark it dirty, else mark the dirty state
74 * Used when committing a dirty inode into a transaction so that 74 * in the XFS inode to make sure we pick it up when reclaiming the inode.
75 * the inode will get written back by the linux code
76 */ 75 */
77void 76void
78xfs_mark_inode_dirty_sync( 77xfs_mark_inode_dirty_sync(
@@ -82,6 +81,10 @@ xfs_mark_inode_dirty_sync(
82 81
83 if (!(inode->i_state & (I_WILL_FREE|I_FREEING))) 82 if (!(inode->i_state & (I_WILL_FREE|I_FREEING)))
84 mark_inode_dirty_sync(inode); 83 mark_inode_dirty_sync(inode);
84 else {
85 barrier();
86 ip->i_update_core = 1;
87 }
85} 88}
86 89
87void 90void
@@ -92,6 +95,11 @@ xfs_mark_inode_dirty(
92 95
93 if (!(inode->i_state & (I_WILL_FREE|I_FREEING))) 96 if (!(inode->i_state & (I_WILL_FREE|I_FREEING)))
94 mark_inode_dirty(inode); 97 mark_inode_dirty(inode);
98 else {
99 barrier();
100 ip->i_update_core = 1;
101 }
102
95} 103}
96 104
97/* 105/*
diff --git a/fs/xfs/linux-2.6/xfs_iops.h b/fs/xfs/xfs_iops.h
index ef41c92ce66e..ef41c92ce66e 100644
--- a/fs/xfs/linux-2.6/xfs_iops.h
+++ b/fs/xfs/xfs_iops.h
diff --git a/fs/xfs/linux-2.6/xfs_linux.h b/fs/xfs/xfs_linux.h
index d42f814e4d35..1e8a45e74c3e 100644
--- a/fs/xfs/linux-2.6/xfs_linux.h
+++ b/fs/xfs/xfs_linux.h
@@ -32,13 +32,12 @@
32# define XFS_BIG_INUMS 0 32# define XFS_BIG_INUMS 0
33#endif 33#endif
34 34
35#include <xfs_types.h> 35#include "xfs_types.h"
36 36
37#include <kmem.h> 37#include "kmem.h"
38#include <mrlock.h> 38#include "mrlock.h"
39#include <time.h> 39#include "time.h"
40 40#include "uuid.h"
41#include <support/uuid.h>
42 41
43#include <linux/semaphore.h> 42#include <linux/semaphore.h>
44#include <linux/mm.h> 43#include <linux/mm.h>
@@ -78,14 +77,14 @@
78#include <asm/byteorder.h> 77#include <asm/byteorder.h>
79#include <asm/unaligned.h> 78#include <asm/unaligned.h>
80 79
81#include <xfs_vnode.h> 80#include "xfs_vnode.h"
82#include <xfs_stats.h> 81#include "xfs_stats.h"
83#include <xfs_sysctl.h> 82#include "xfs_sysctl.h"
84#include <xfs_iops.h> 83#include "xfs_iops.h"
85#include <xfs_aops.h> 84#include "xfs_aops.h"
86#include <xfs_super.h> 85#include "xfs_super.h"
87#include <xfs_buf.h> 86#include "xfs_buf.h"
88#include <xfs_message.h> 87#include "xfs_message.h"
89 88
90#ifdef __BIG_ENDIAN 89#ifdef __BIG_ENDIAN
91#define XFS_NATIVE_HOST 1 90#define XFS_NATIVE_HOST 1
diff --git a/fs/xfs/linux-2.6/xfs_message.c b/fs/xfs/xfs_message.c
index bd672def95ac..bd672def95ac 100644
--- a/fs/xfs/linux-2.6/xfs_message.c
+++ b/fs/xfs/xfs_message.c
diff --git a/fs/xfs/linux-2.6/xfs_message.h b/fs/xfs/xfs_message.h
index 7fb7ea007672..7fb7ea007672 100644
--- a/fs/xfs/linux-2.6/xfs_message.h
+++ b/fs/xfs/xfs_message.h
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/xfs_qm.c
index 9a0aa76facdf..9a0aa76facdf 100644
--- a/fs/xfs/quota/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
diff --git a/fs/xfs/quota/xfs_qm.h b/fs/xfs/xfs_qm.h
index 43b9abe1052c..43b9abe1052c 100644
--- a/fs/xfs/quota/xfs_qm.h
+++ b/fs/xfs/xfs_qm.h
diff --git a/fs/xfs/quota/xfs_qm_bhv.c b/fs/xfs/xfs_qm_bhv.c
index a0a829addca9..a0a829addca9 100644
--- a/fs/xfs/quota/xfs_qm_bhv.c
+++ b/fs/xfs/xfs_qm_bhv.c
diff --git a/fs/xfs/quota/xfs_qm_stats.c b/fs/xfs/xfs_qm_stats.c
index 8671a0b32644..8671a0b32644 100644
--- a/fs/xfs/quota/xfs_qm_stats.c
+++ b/fs/xfs/xfs_qm_stats.c
diff --git a/fs/xfs/quota/xfs_qm_stats.h b/fs/xfs/xfs_qm_stats.h
index 5b964fc0dc09..5b964fc0dc09 100644
--- a/fs/xfs/quota/xfs_qm_stats.h
+++ b/fs/xfs/xfs_qm_stats.h
diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/xfs_qm_syscalls.c
index 609246f42e6c..609246f42e6c 100644
--- a/fs/xfs/quota/xfs_qm_syscalls.c
+++ b/fs/xfs/xfs_qm_syscalls.c
diff --git a/fs/xfs/quota/xfs_quota_priv.h b/fs/xfs/xfs_quota_priv.h
index 94a3d927d716..94a3d927d716 100644
--- a/fs/xfs/quota/xfs_quota_priv.h
+++ b/fs/xfs/xfs_quota_priv.h
diff --git a/fs/xfs/linux-2.6/xfs_quotaops.c b/fs/xfs/xfs_quotaops.c
index 29b9d642e93d..7e76f537abb7 100644
--- a/fs/xfs/linux-2.6/xfs_quotaops.c
+++ b/fs/xfs/xfs_quotaops.c
@@ -25,7 +25,7 @@
25#include "xfs_trans.h" 25#include "xfs_trans.h"
26#include "xfs_bmap_btree.h" 26#include "xfs_bmap_btree.h"
27#include "xfs_inode.h" 27#include "xfs_inode.h"
28#include "quota/xfs_qm.h" 28#include "xfs_qm.h"
29#include <linux/quota.h> 29#include <linux/quota.h>
30 30
31 31
diff --git a/fs/xfs/linux-2.6/xfs_stats.c b/fs/xfs/xfs_stats.c
index 76fdc5861932..76fdc5861932 100644
--- a/fs/xfs/linux-2.6/xfs_stats.c
+++ b/fs/xfs/xfs_stats.c
diff --git a/fs/xfs/linux-2.6/xfs_stats.h b/fs/xfs/xfs_stats.h
index 736854b1ca1a..736854b1ca1a 100644
--- a/fs/xfs/linux-2.6/xfs_stats.h
+++ b/fs/xfs/xfs_stats.h
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/xfs_super.c
index 9a72dda58bd0..2366c54cc4fa 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -356,6 +356,8 @@ xfs_parseargs(
356 mp->m_flags |= XFS_MOUNT_DELAYLOG; 356 mp->m_flags |= XFS_MOUNT_DELAYLOG;
357 } else if (!strcmp(this_char, MNTOPT_NODELAYLOG)) { 357 } else if (!strcmp(this_char, MNTOPT_NODELAYLOG)) {
358 mp->m_flags &= ~XFS_MOUNT_DELAYLOG; 358 mp->m_flags &= ~XFS_MOUNT_DELAYLOG;
359 xfs_warn(mp,
360 "nodelaylog is deprecated and will be removed in Linux 3.3");
359 } else if (!strcmp(this_char, MNTOPT_DISCARD)) { 361 } else if (!strcmp(this_char, MNTOPT_DISCARD)) {
360 mp->m_flags |= XFS_MOUNT_DISCARD; 362 mp->m_flags |= XFS_MOUNT_DISCARD;
361 } else if (!strcmp(this_char, MNTOPT_NODISCARD)) { 363 } else if (!strcmp(this_char, MNTOPT_NODISCARD)) {
@@ -877,33 +879,17 @@ xfs_log_inode(
877 struct xfs_trans *tp; 879 struct xfs_trans *tp;
878 int error; 880 int error;
879 881
880 xfs_iunlock(ip, XFS_ILOCK_SHARED);
881 tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS); 882 tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
882 error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0); 883 error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0);
883
884 if (error) { 884 if (error) {
885 xfs_trans_cancel(tp, 0); 885 xfs_trans_cancel(tp, 0);
886 /* we need to return with the lock hold shared */
887 xfs_ilock(ip, XFS_ILOCK_SHARED);
888 return error; 886 return error;
889 } 887 }
890 888
891 xfs_ilock(ip, XFS_ILOCK_EXCL); 889 xfs_ilock(ip, XFS_ILOCK_EXCL);
892 890 xfs_trans_ijoin_ref(tp, ip, XFS_ILOCK_EXCL);
893 /*
894 * Note - it's possible that we might have pushed ourselves out of the
895 * way during trans_reserve which would flush the inode. But there's
896 * no guarantee that the inode buffer has actually gone out yet (it's
897 * delwri). Plus the buffer could be pinned anyway if it's part of
898 * an inode in another recent transaction. So we play it safe and
899 * fire off the transaction anyway.
900 */
901 xfs_trans_ijoin(tp, ip);
902 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 891 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
903 error = xfs_trans_commit(tp, 0); 892 return xfs_trans_commit(tp, 0);
904 xfs_ilock_demote(ip, XFS_ILOCK_EXCL);
905
906 return error;
907} 893}
908 894
909STATIC int 895STATIC int
@@ -918,7 +904,9 @@ xfs_fs_write_inode(
918 trace_xfs_write_inode(ip); 904 trace_xfs_write_inode(ip);
919 905
920 if (XFS_FORCED_SHUTDOWN(mp)) 906 if (XFS_FORCED_SHUTDOWN(mp))
921 return XFS_ERROR(EIO); 907 return -XFS_ERROR(EIO);
908 if (!ip->i_update_core)
909 return 0;
922 910
923 if (wbc->sync_mode == WB_SYNC_ALL) { 911 if (wbc->sync_mode == WB_SYNC_ALL) {
924 /* 912 /*
@@ -929,12 +917,10 @@ xfs_fs_write_inode(
929 * of synchronous log foces dramatically. 917 * of synchronous log foces dramatically.
930 */ 918 */
931 xfs_ioend_wait(ip); 919 xfs_ioend_wait(ip);
932 xfs_ilock(ip, XFS_ILOCK_SHARED); 920 error = xfs_log_inode(ip);
933 if (ip->i_update_core) { 921 if (error)
934 error = xfs_log_inode(ip); 922 goto out;
935 if (error) 923 return 0;
936 goto out_unlock;
937 }
938 } else { 924 } else {
939 /* 925 /*
940 * We make this non-blocking if the inode is contended, return 926 * We make this non-blocking if the inode is contended, return
diff --git a/fs/xfs/linux-2.6/xfs_super.h b/fs/xfs/xfs_super.h
index 50a3266c999e..50a3266c999e 100644
--- a/fs/xfs/linux-2.6/xfs_super.h
+++ b/fs/xfs/xfs_super.h
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/xfs_sync.c
index 4604f90f86a3..4604f90f86a3 100644
--- a/fs/xfs/linux-2.6/xfs_sync.c
+++ b/fs/xfs/xfs_sync.c
diff --git a/fs/xfs/linux-2.6/xfs_sync.h b/fs/xfs/xfs_sync.h
index 941202e7ac6e..941202e7ac6e 100644
--- a/fs/xfs/linux-2.6/xfs_sync.h
+++ b/fs/xfs/xfs_sync.h
diff --git a/fs/xfs/linux-2.6/xfs_sysctl.c b/fs/xfs/xfs_sysctl.c
index ee2d2adaa438..ee2d2adaa438 100644
--- a/fs/xfs/linux-2.6/xfs_sysctl.c
+++ b/fs/xfs/xfs_sysctl.c
diff --git a/fs/xfs/linux-2.6/xfs_sysctl.h b/fs/xfs/xfs_sysctl.h
index b9937d450f8e..b9937d450f8e 100644
--- a/fs/xfs/linux-2.6/xfs_sysctl.h
+++ b/fs/xfs/xfs_sysctl.h
diff --git a/fs/xfs/linux-2.6/xfs_trace.c b/fs/xfs/xfs_trace.c
index 88d25d4aa56e..9010ce885e6a 100644
--- a/fs/xfs/linux-2.6/xfs_trace.c
+++ b/fs/xfs/xfs_trace.c
@@ -43,8 +43,8 @@
43#include "xfs_quota.h" 43#include "xfs_quota.h"
44#include "xfs_iomap.h" 44#include "xfs_iomap.h"
45#include "xfs_aops.h" 45#include "xfs_aops.h"
46#include "quota/xfs_dquot_item.h" 46#include "xfs_dquot_item.h"
47#include "quota/xfs_dquot.h" 47#include "xfs_dquot.h"
48#include "xfs_log_recover.h" 48#include "xfs_log_recover.h"
49#include "xfs_inode_item.h" 49#include "xfs_inode_item.h"
50 50
diff --git a/fs/xfs/linux-2.6/xfs_trace.h b/fs/xfs/xfs_trace.h
index 690fc7a7bd72..690fc7a7bd72 100644
--- a/fs/xfs/linux-2.6/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
diff --git a/fs/xfs/quota/xfs_trans_dquot.c b/fs/xfs/xfs_trans_dquot.c
index 4d00ee67792d..4d00ee67792d 100644
--- a/fs/xfs/quota/xfs_trans_dquot.c
+++ b/fs/xfs/xfs_trans_dquot.c
diff --git a/fs/xfs/linux-2.6/xfs_vnode.h b/fs/xfs/xfs_vnode.h
index 7c220b4227bc..7c220b4227bc 100644
--- a/fs/xfs/linux-2.6/xfs_vnode.h
+++ b/fs/xfs/xfs_vnode.h
diff --git a/fs/xfs/linux-2.6/xfs_xattr.c b/fs/xfs/xfs_xattr.c
index 87d3e03878c8..87d3e03878c8 100644
--- a/fs/xfs/linux-2.6/xfs_xattr.c
+++ b/fs/xfs/xfs_xattr.c
diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h
index fb2d63f13f4c..aea9e45efce6 100644
--- a/include/asm-generic/memory_model.h
+++ b/include/asm-generic/memory_model.h
@@ -39,7 +39,7 @@
39}) 39})
40 40
41#define __page_to_pfn(pg) \ 41#define __page_to_pfn(pg) \
42({ struct page *__pg = (pg); \ 42({ const struct page *__pg = (pg); \
43 struct pglist_data *__pgdat = NODE_DATA(page_to_nid(__pg)); \ 43 struct pglist_data *__pgdat = NODE_DATA(page_to_nid(__pg)); \
44 (unsigned long)(__pg - __pgdat->node_mem_map) + \ 44 (unsigned long)(__pg - __pgdat->node_mem_map) + \
45 __pgdat->node_start_pfn; \ 45 __pgdat->node_start_pfn; \
@@ -57,7 +57,7 @@
57 * section[i].section_mem_map == mem_map's address - start_pfn; 57 * section[i].section_mem_map == mem_map's address - start_pfn;
58 */ 58 */
59#define __page_to_pfn(pg) \ 59#define __page_to_pfn(pg) \
60({ struct page *__pg = (pg); \ 60({ const struct page *__pg = (pg); \
61 int __sec = page_to_section(__pg); \ 61 int __sec = page_to_section(__pg); \
62 (unsigned long)(__pg - __section_mem_map_addr(__nr_to_section(__sec))); \ 62 (unsigned long)(__pg - __section_mem_map_addr(__nr_to_section(__sec))); \
63}) 63})
diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h
index 4f76959397fa..f4c38d8c6674 100644
--- a/include/asm-generic/unistd.h
+++ b/include/asm-generic/unistd.h
@@ -143,7 +143,7 @@ __SYSCALL(__NR_pivot_root, sys_pivot_root)
143 143
144/* fs/nfsctl.c */ 144/* fs/nfsctl.c */
145#define __NR_nfsservctl 42 145#define __NR_nfsservctl 42
146__SC_COMP(__NR_nfsservctl, sys_nfsservctl, compat_sys_nfsservctl) 146__SYSCALL(__NR_nfsservctl, sys_ni_syscall)
147 147
148/* fs/open.c */ 148/* fs/open.c */
149#define __NR3264_statfs 43 149#define __NR3264_statfs 43
diff --git a/include/linux/basic_mmio_gpio.h b/include/linux/basic_mmio_gpio.h
index 98999cf107ce..feb912196745 100644
--- a/include/linux/basic_mmio_gpio.h
+++ b/include/linux/basic_mmio_gpio.h
@@ -63,15 +63,10 @@ static inline struct bgpio_chip *to_bgpio_chip(struct gpio_chip *gc)
63 return container_of(gc, struct bgpio_chip, gc); 63 return container_of(gc, struct bgpio_chip, gc);
64} 64}
65 65
66int __devexit bgpio_remove(struct bgpio_chip *bgc); 66int bgpio_remove(struct bgpio_chip *bgc);
67int __devinit bgpio_init(struct bgpio_chip *bgc, 67int bgpio_init(struct bgpio_chip *bgc, struct device *dev,
68 struct device *dev, 68 unsigned long sz, void __iomem *dat, void __iomem *set,
69 unsigned long sz, 69 void __iomem *clr, void __iomem *dirout, void __iomem *dirin,
70 void __iomem *dat, 70 bool big_endian);
71 void __iomem *set,
72 void __iomem *clr,
73 void __iomem *dirout,
74 void __iomem *dirin,
75 bool big_endian);
76 71
77#endif /* __BASIC_MMIO_GPIO_H */ 72#endif /* __BASIC_MMIO_GPIO_H */
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 6395692b2e7a..71fc53bb8f1c 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -124,8 +124,13 @@ enum rq_flag_bits {
124 124
125 __REQ_SYNC, /* request is sync (sync write or read) */ 125 __REQ_SYNC, /* request is sync (sync write or read) */
126 __REQ_META, /* metadata io request */ 126 __REQ_META, /* metadata io request */
127 __REQ_PRIO, /* boost priority in cfq */
127 __REQ_DISCARD, /* request to discard sectors */ 128 __REQ_DISCARD, /* request to discard sectors */
129 __REQ_SECURE, /* secure discard (used with __REQ_DISCARD) */
130
128 __REQ_NOIDLE, /* don't anticipate more IO after this one */ 131 __REQ_NOIDLE, /* don't anticipate more IO after this one */
132 __REQ_FUA, /* forced unit access */
133 __REQ_FLUSH, /* request for cache flush */
129 134
130 /* bio only flags */ 135 /* bio only flags */
131 __REQ_RAHEAD, /* read ahead, can fail anytime */ 136 __REQ_RAHEAD, /* read ahead, can fail anytime */
@@ -135,7 +140,6 @@ enum rq_flag_bits {
135 /* request only flags */ 140 /* request only flags */
136 __REQ_SORTED, /* elevator knows about this request */ 141 __REQ_SORTED, /* elevator knows about this request */
137 __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ 142 __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */
138 __REQ_FUA, /* forced unit access */
139 __REQ_NOMERGE, /* don't touch this for merging */ 143 __REQ_NOMERGE, /* don't touch this for merging */
140 __REQ_STARTED, /* drive already may have started this one */ 144 __REQ_STARTED, /* drive already may have started this one */
141 __REQ_DONTPREP, /* don't call prep for this one */ 145 __REQ_DONTPREP, /* don't call prep for this one */
@@ -146,11 +150,9 @@ enum rq_flag_bits {
146 __REQ_PREEMPT, /* set for "ide_preempt" requests */ 150 __REQ_PREEMPT, /* set for "ide_preempt" requests */
147 __REQ_ALLOCED, /* request came from our alloc pool */ 151 __REQ_ALLOCED, /* request came from our alloc pool */
148 __REQ_COPY_USER, /* contains copies of user pages */ 152 __REQ_COPY_USER, /* contains copies of user pages */
149 __REQ_FLUSH, /* request for cache flush */
150 __REQ_FLUSH_SEQ, /* request for flush sequence */ 153 __REQ_FLUSH_SEQ, /* request for flush sequence */
151 __REQ_IO_STAT, /* account I/O stat */ 154 __REQ_IO_STAT, /* account I/O stat */
152 __REQ_MIXED_MERGE, /* merge of different types, fail separately */ 155 __REQ_MIXED_MERGE, /* merge of different types, fail separately */
153 __REQ_SECURE, /* secure discard (used with __REQ_DISCARD) */
154 __REQ_NR_BITS, /* stops here */ 156 __REQ_NR_BITS, /* stops here */
155}; 157};
156 158
@@ -160,14 +162,15 @@ enum rq_flag_bits {
160#define REQ_FAILFAST_DRIVER (1 << __REQ_FAILFAST_DRIVER) 162#define REQ_FAILFAST_DRIVER (1 << __REQ_FAILFAST_DRIVER)
161#define REQ_SYNC (1 << __REQ_SYNC) 163#define REQ_SYNC (1 << __REQ_SYNC)
162#define REQ_META (1 << __REQ_META) 164#define REQ_META (1 << __REQ_META)
165#define REQ_PRIO (1 << __REQ_PRIO)
163#define REQ_DISCARD (1 << __REQ_DISCARD) 166#define REQ_DISCARD (1 << __REQ_DISCARD)
164#define REQ_NOIDLE (1 << __REQ_NOIDLE) 167#define REQ_NOIDLE (1 << __REQ_NOIDLE)
165 168
166#define REQ_FAILFAST_MASK \ 169#define REQ_FAILFAST_MASK \
167 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) 170 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
168#define REQ_COMMON_MASK \ 171#define REQ_COMMON_MASK \
169 (REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_DISCARD | \ 172 (REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | \
170 REQ_NOIDLE | REQ_FLUSH | REQ_FUA | REQ_SECURE) 173 REQ_DISCARD | REQ_NOIDLE | REQ_FLUSH | REQ_FUA | REQ_SECURE)
171#define REQ_CLONE_MASK REQ_COMMON_MASK 174#define REQ_CLONE_MASK REQ_COMMON_MASK
172 175
173#define REQ_RAHEAD (1 << __REQ_RAHEAD) 176#define REQ_RAHEAD (1 << __REQ_RAHEAD)
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 0e67c45b3bc9..7fbaa9103344 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -30,6 +30,7 @@ struct request_pm_state;
30struct blk_trace; 30struct blk_trace;
31struct request; 31struct request;
32struct sg_io_hdr; 32struct sg_io_hdr;
33struct bsg_job;
33 34
34#define BLKDEV_MIN_RQ 4 35#define BLKDEV_MIN_RQ 4
35#define BLKDEV_MAX_RQ 128 /* Default maximum */ 36#define BLKDEV_MAX_RQ 128 /* Default maximum */
@@ -117,6 +118,7 @@ struct request {
117 struct { 118 struct {
118 unsigned int seq; 119 unsigned int seq;
119 struct list_head list; 120 struct list_head list;
121 rq_end_io_fn *saved_end_io;
120 } flush; 122 } flush;
121 }; 123 };
122 124
@@ -209,6 +211,7 @@ typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *,
209typedef void (softirq_done_fn)(struct request *); 211typedef void (softirq_done_fn)(struct request *);
210typedef int (dma_drain_needed_fn)(struct request *); 212typedef int (dma_drain_needed_fn)(struct request *);
211typedef int (lld_busy_fn) (struct request_queue *q); 213typedef int (lld_busy_fn) (struct request_queue *q);
214typedef int (bsg_job_fn) (struct bsg_job *);
212 215
213enum blk_eh_timer_return { 216enum blk_eh_timer_return {
214 BLK_EH_NOT_HANDLED, 217 BLK_EH_NOT_HANDLED,
@@ -375,6 +378,8 @@ struct request_queue {
375 struct mutex sysfs_lock; 378 struct mutex sysfs_lock;
376 379
377#if defined(CONFIG_BLK_DEV_BSG) 380#if defined(CONFIG_BLK_DEV_BSG)
381 bsg_job_fn *bsg_job_fn;
382 int bsg_job_size;
378 struct bsg_class_device bsg_dev; 383 struct bsg_class_device bsg_dev;
379#endif 384#endif
380 385
@@ -868,7 +873,6 @@ struct blk_plug {
868 struct list_head list; 873 struct list_head list;
869 struct list_head cb_list; 874 struct list_head cb_list;
870 unsigned int should_sort; 875 unsigned int should_sort;
871 unsigned int count;
872}; 876};
873#define BLK_MAX_REQUEST_COUNT 16 877#define BLK_MAX_REQUEST_COUNT 16
874 878
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index 8c7c2de7631a..8e9e4bc6d73b 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -14,7 +14,7 @@
14enum blktrace_cat { 14enum blktrace_cat {
15 BLK_TC_READ = 1 << 0, /* reads */ 15 BLK_TC_READ = 1 << 0, /* reads */
16 BLK_TC_WRITE = 1 << 1, /* writes */ 16 BLK_TC_WRITE = 1 << 1, /* writes */
17 BLK_TC_BARRIER = 1 << 2, /* barrier */ 17 BLK_TC_FLUSH = 1 << 2, /* flush */
18 BLK_TC_SYNC = 1 << 3, /* sync IO */ 18 BLK_TC_SYNC = 1 << 3, /* sync IO */
19 BLK_TC_SYNCIO = BLK_TC_SYNC, 19 BLK_TC_SYNCIO = BLK_TC_SYNC,
20 BLK_TC_QUEUE = 1 << 4, /* queueing/merging */ 20 BLK_TC_QUEUE = 1 << 4, /* queueing/merging */
@@ -28,8 +28,9 @@ enum blktrace_cat {
28 BLK_TC_META = 1 << 12, /* metadata */ 28 BLK_TC_META = 1 << 12, /* metadata */
29 BLK_TC_DISCARD = 1 << 13, /* discard requests */ 29 BLK_TC_DISCARD = 1 << 13, /* discard requests */
30 BLK_TC_DRV_DATA = 1 << 14, /* binary per-driver data */ 30 BLK_TC_DRV_DATA = 1 << 14, /* binary per-driver data */
31 BLK_TC_FUA = 1 << 15, /* fua requests */
31 32
32 BLK_TC_END = 1 << 15, /* only 16-bits, reminder */ 33 BLK_TC_END = 1 << 15, /* we've run out of bits! */
33}; 34};
34 35
35#define BLK_TC_SHIFT (16) 36#define BLK_TC_SHIFT (16)
diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h
new file mode 100644
index 000000000000..f55ab8cdc106
--- /dev/null
+++ b/include/linux/bsg-lib.h
@@ -0,0 +1,73 @@
1/*
2 * BSG helper library
3 *
4 * Copyright (C) 2008 James Smart, Emulex Corporation
5 * Copyright (C) 2011 Red Hat, Inc. All rights reserved.
6 * Copyright (C) 2011 Mike Christie
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23#ifndef _BLK_BSG_
24#define _BLK_BSG_
25
26#include <linux/blkdev.h>
27
28struct request;
29struct device;
30struct scatterlist;
31struct request_queue;
32
33struct bsg_buffer {
34 unsigned int payload_len;
35 int sg_cnt;
36 struct scatterlist *sg_list;
37};
38
39struct bsg_job {
40 struct device *dev;
41 struct request *req;
42
43 /* Transport/driver specific request/reply structs */
44 void *request;
45 void *reply;
46
47 unsigned int request_len;
48 unsigned int reply_len;
49 /*
50 * On entry : reply_len indicates the buffer size allocated for
51 * the reply.
52 *
53 * Upon completion : the message handler must set reply_len
54 * to indicates the size of the reply to be returned to the
55 * caller.
56 */
57
58 /* DMA payloads for the request/response */
59 struct bsg_buffer request_payload;
60 struct bsg_buffer reply_payload;
61
62 void *dd_data; /* Used for driver-specific storage */
63};
64
65void bsg_job_done(struct bsg_job *job, int result,
66 unsigned int reply_payload_rcv_len);
67int bsg_setup_queue(struct device *dev, struct request_queue *q, char *name,
68 bsg_job_fn *job_fn, int dd_job_size);
69void bsg_request_fn(struct request_queue *q);
70void bsg_remove_queue(struct request_queue *q);
71void bsg_goose_queue(struct request_queue *q);
72
73#endif
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 8779405e15a8..c6e7523bf765 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -438,7 +438,6 @@ asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds,
438 struct compat_timespec __user *tsp, 438 struct compat_timespec __user *tsp,
439 const compat_sigset_t __user *sigmask, 439 const compat_sigset_t __user *sigmask,
440 compat_size_t sigsetsize); 440 compat_size_t sigsetsize);
441asmlinkage long compat_sys_nfsservctl(int cmd, void *notused, void *notused2);
442asmlinkage long compat_sys_signalfd4(int ufd, 441asmlinkage long compat_sys_signalfd4(int ufd,
443 const compat_sigset_t __user *sigmask, 442 const compat_sigset_t __user *sigmask,
444 compat_size_t sigsetsize, int flags); 443 compat_size_t sigsetsize, int flags);
diff --git a/include/linux/connector.h b/include/linux/connector.h
index 0c69ad825b39..3c9c54fd5690 100644
--- a/include/linux/connector.h
+++ b/include/linux/connector.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * connector.h 2 * connector.h
3 * 3 *
4 * 2004-2005 Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru> 4 * 2004-2005 Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 178cdb4f1d4a..277f497923a2 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -162,10 +162,8 @@ struct inodes_stat_t {
162#define READA RWA_MASK 162#define READA RWA_MASK
163 163
164#define READ_SYNC (READ | REQ_SYNC) 164#define READ_SYNC (READ | REQ_SYNC)
165#define READ_META (READ | REQ_META)
166#define WRITE_SYNC (WRITE | REQ_SYNC | REQ_NOIDLE) 165#define WRITE_SYNC (WRITE | REQ_SYNC | REQ_NOIDLE)
167#define WRITE_ODIRECT (WRITE | REQ_SYNC) 166#define WRITE_ODIRECT (WRITE | REQ_SYNC)
168#define WRITE_META (WRITE | REQ_META)
169#define WRITE_FLUSH (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FLUSH) 167#define WRITE_FLUSH (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FLUSH)
170#define WRITE_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FUA) 168#define WRITE_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FUA)
171#define WRITE_FLUSH_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FLUSH | REQ_FUA) 169#define WRITE_FLUSH_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FLUSH | REQ_FUA)
@@ -2318,6 +2316,11 @@ extern struct inode * iget5_locked(struct super_block *, unsigned long, int (*te
2318extern struct inode * iget_locked(struct super_block *, unsigned long); 2316extern struct inode * iget_locked(struct super_block *, unsigned long);
2319extern int insert_inode_locked4(struct inode *, unsigned long, int (*test)(struct inode *, void *), void *); 2317extern int insert_inode_locked4(struct inode *, unsigned long, int (*test)(struct inode *, void *), void *);
2320extern int insert_inode_locked(struct inode *); 2318extern int insert_inode_locked(struct inode *);
2319#ifdef CONFIG_DEBUG_LOCK_ALLOC
2320extern void lockdep_annotate_inode_mutex_key(struct inode *inode);
2321#else
2322static inline void lockdep_annotate_inode_mutex_key(struct inode *inode) { };
2323#endif
2321extern void unlock_new_inode(struct inode *); 2324extern void unlock_new_inode(struct inode *);
2322extern unsigned int get_next_ino(void); 2325extern unsigned int get_next_ino(void);
2323 2326
diff --git a/include/linux/fuse.h b/include/linux/fuse.h
index d464de53db43..464cff526860 100644
--- a/include/linux/fuse.h
+++ b/include/linux/fuse.h
@@ -47,6 +47,9 @@
47 * - FUSE_IOCTL_UNRESTRICTED shall now return with array of 'struct 47 * - FUSE_IOCTL_UNRESTRICTED shall now return with array of 'struct
48 * fuse_ioctl_iovec' instead of ambiguous 'struct iovec' 48 * fuse_ioctl_iovec' instead of ambiguous 'struct iovec'
49 * - add FUSE_IOCTL_32BIT flag 49 * - add FUSE_IOCTL_32BIT flag
50 *
51 * 7.17
52 * - add FUSE_FLOCK_LOCKS and FUSE_RELEASE_FLOCK_UNLOCK
50 */ 53 */
51 54
52#ifndef _LINUX_FUSE_H 55#ifndef _LINUX_FUSE_H
@@ -78,7 +81,7 @@
78#define FUSE_KERNEL_VERSION 7 81#define FUSE_KERNEL_VERSION 7
79 82
80/** Minor version number of this interface */ 83/** Minor version number of this interface */
81#define FUSE_KERNEL_MINOR_VERSION 16 84#define FUSE_KERNEL_MINOR_VERSION 17
82 85
83/** The node ID of the root inode */ 86/** The node ID of the root inode */
84#define FUSE_ROOT_ID 1 87#define FUSE_ROOT_ID 1
@@ -153,8 +156,10 @@ struct fuse_file_lock {
153/** 156/**
154 * INIT request/reply flags 157 * INIT request/reply flags
155 * 158 *
159 * FUSE_POSIX_LOCKS: remote locking for POSIX file locks
156 * FUSE_EXPORT_SUPPORT: filesystem handles lookups of "." and ".." 160 * FUSE_EXPORT_SUPPORT: filesystem handles lookups of "." and ".."
157 * FUSE_DONT_MASK: don't apply umask to file mode on create operations 161 * FUSE_DONT_MASK: don't apply umask to file mode on create operations
162 * FUSE_FLOCK_LOCKS: remote locking for BSD style file locks
158 */ 163 */
159#define FUSE_ASYNC_READ (1 << 0) 164#define FUSE_ASYNC_READ (1 << 0)
160#define FUSE_POSIX_LOCKS (1 << 1) 165#define FUSE_POSIX_LOCKS (1 << 1)
@@ -163,6 +168,7 @@ struct fuse_file_lock {
163#define FUSE_EXPORT_SUPPORT (1 << 4) 168#define FUSE_EXPORT_SUPPORT (1 << 4)
164#define FUSE_BIG_WRITES (1 << 5) 169#define FUSE_BIG_WRITES (1 << 5)
165#define FUSE_DONT_MASK (1 << 6) 170#define FUSE_DONT_MASK (1 << 6)
171#define FUSE_FLOCK_LOCKS (1 << 10)
166 172
167/** 173/**
168 * CUSE INIT request/reply flags 174 * CUSE INIT request/reply flags
@@ -175,6 +181,7 @@ struct fuse_file_lock {
175 * Release flags 181 * Release flags
176 */ 182 */
177#define FUSE_RELEASE_FLUSH (1 << 0) 183#define FUSE_RELEASE_FLUSH (1 << 0)
184#define FUSE_RELEASE_FLOCK_UNLOCK (1 << 1)
178 185
179/** 186/**
180 * Getattr flags 187 * Getattr flags
diff --git a/include/linux/hash.h b/include/linux/hash.h
index 06d25c189cc5..b80506bdd733 100644
--- a/include/linux/hash.h
+++ b/include/linux/hash.h
@@ -63,7 +63,7 @@ static inline u32 hash_32(u32 val, unsigned int bits)
63 return hash >> (32 - bits); 63 return hash >> (32 - bits);
64} 64}
65 65
66static inline unsigned long hash_ptr(void *ptr, unsigned int bits) 66static inline unsigned long hash_ptr(const void *ptr, unsigned int bits)
67{ 67{
68 return hash_long((unsigned long)ptr, bits); 68 return hash_long((unsigned long)ptr, bits);
69} 69}
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 87a06f345bd2..59517300a315 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -23,6 +23,7 @@
23#include <linux/errno.h> 23#include <linux/errno.h>
24#include <linux/topology.h> 24#include <linux/topology.h>
25#include <linux/wait.h> 25#include <linux/wait.h>
26#include <linux/module.h>
26 27
27#include <asm/irq.h> 28#include <asm/irq.h>
28#include <asm/ptrace.h> 29#include <asm/ptrace.h>
@@ -547,7 +548,15 @@ static inline struct msi_desc *irq_data_get_msi(struct irq_data *d)
547 return d->msi_desc; 548 return d->msi_desc;
548} 549}
549 550
550int irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node); 551int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
552 struct module *owner);
553
554static inline int irq_alloc_descs(int irq, unsigned int from, unsigned int cnt,
555 int node)
556{
557 return __irq_alloc_descs(irq, from, cnt, node, THIS_MODULE);
558}
559
551void irq_free_descs(unsigned int irq, unsigned int cnt); 560void irq_free_descs(unsigned int irq, unsigned int cnt);
552int irq_reserve_irqs(unsigned int from, unsigned int cnt); 561int irq_reserve_irqs(unsigned int from, unsigned int cnt);
553 562
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index 2d921b35212c..150134ac709a 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -66,6 +66,7 @@ struct irq_desc {
66#ifdef CONFIG_PROC_FS 66#ifdef CONFIG_PROC_FS
67 struct proc_dir_entry *dir; 67 struct proc_dir_entry *dir;
68#endif 68#endif
69 struct module *owner;
69 const char *name; 70 const char *name;
70} ____cacheline_internodealigned_in_smp; 71} ____cacheline_internodealigned_in_smp;
71 72
diff --git a/include/linux/loop.h b/include/linux/loop.h
index 66c194e2d9b9..683d69890119 100644
--- a/include/linux/loop.h
+++ b/include/linux/loop.h
@@ -64,7 +64,6 @@ struct loop_device {
64 64
65 struct request_queue *lo_queue; 65 struct request_queue *lo_queue;
66 struct gendisk *lo_disk; 66 struct gendisk *lo_disk;
67 struct list_head lo_list;
68}; 67};
69 68
70#endif /* __KERNEL__ */ 69#endif /* __KERNEL__ */
@@ -161,4 +160,8 @@ int loop_unregister_transfer(int number);
161#define LOOP_CHANGE_FD 0x4C06 160#define LOOP_CHANGE_FD 0x4C06
162#define LOOP_SET_CAPACITY 0x4C07 161#define LOOP_SET_CAPACITY 0x4C07
163 162
163/* /dev/loop-control interface */
164#define LOOP_CTL_ADD 0x4C80
165#define LOOP_CTL_REMOVE 0x4C81
166#define LOOP_CTL_GET_FREE 0x4C82
164#endif 167#endif
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 3b535db00a94..343bd7661f2a 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -39,16 +39,6 @@ extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
39 struct mem_cgroup *mem_cont, 39 struct mem_cgroup *mem_cont,
40 int active, int file); 40 int active, int file);
41 41
42struct memcg_scanrecord {
43 struct mem_cgroup *mem; /* scanend memory cgroup */
44 struct mem_cgroup *root; /* scan target hierarchy root */
45 int context; /* scanning context (see memcontrol.c) */
46 unsigned long nr_scanned[2]; /* the number of scanned pages */
47 unsigned long nr_rotated[2]; /* the number of rotated pages */
48 unsigned long nr_freed[2]; /* the number of freed pages */
49 unsigned long elapsed; /* nsec of time elapsed while scanning */
50};
51
52#ifdef CONFIG_CGROUP_MEM_RES_CTLR 42#ifdef CONFIG_CGROUP_MEM_RES_CTLR
53/* 43/*
54 * All "charge" functions with gfp_mask should use GFP_KERNEL or 44 * All "charge" functions with gfp_mask should use GFP_KERNEL or
@@ -127,15 +117,6 @@ mem_cgroup_get_reclaim_stat_from_page(struct page *page);
127extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, 117extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
128 struct task_struct *p); 118 struct task_struct *p);
129 119
130extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem,
131 gfp_t gfp_mask, bool noswap,
132 struct memcg_scanrecord *rec);
133extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
134 gfp_t gfp_mask, bool noswap,
135 struct zone *zone,
136 struct memcg_scanrecord *rec,
137 unsigned long *nr_scanned);
138
139#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 120#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
140extern int do_swap_account; 121extern int do_swap_account;
141#endif 122#endif
diff --git a/include/linux/mfd/wm8994/pdata.h b/include/linux/mfd/wm8994/pdata.h
index d12f8d635a81..97cf4f27d647 100644
--- a/include/linux/mfd/wm8994/pdata.h
+++ b/include/linux/mfd/wm8994/pdata.h
@@ -26,7 +26,7 @@ struct wm8994_ldo_pdata {
26 struct regulator_init_data *init_data; 26 struct regulator_init_data *init_data;
27}; 27};
28 28
29#define WM8994_CONFIGURE_GPIO 0x8000 29#define WM8994_CONFIGURE_GPIO 0x10000
30 30
31#define WM8994_DRC_REGS 5 31#define WM8994_DRC_REGS 5
32#define WM8994_EQ_REGS 20 32#define WM8994_EQ_REGS 20
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
index 18fd13028ba1..c309b1ecdc1c 100644
--- a/include/linux/miscdevice.h
+++ b/include/linux/miscdevice.h
@@ -40,6 +40,7 @@
40#define BTRFS_MINOR 234 40#define BTRFS_MINOR 234
41#define AUTOFS_MINOR 235 41#define AUTOFS_MINOR 235
42#define MAPPER_CTRL_MINOR 236 42#define MAPPER_CTRL_MINOR 236
43#define LOOP_CTRL_MINOR 237
43#define MISC_DYNAMIC_MINOR 255 44#define MISC_DYNAMIC_MINOR 255
44 45
45struct device; 46struct device;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index fd599f4bb846..7438071b44aa 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -685,7 +685,7 @@ static inline void set_page_section(struct page *page, unsigned long section)
685 page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT; 685 page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
686} 686}
687 687
688static inline unsigned long page_to_section(struct page *page) 688static inline unsigned long page_to_section(const struct page *page)
689{ 689{
690 return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK; 690 return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
691} 691}
@@ -720,7 +720,7 @@ static inline void set_page_links(struct page *page, enum zone_type zone,
720 720
721static __always_inline void *lowmem_page_address(const struct page *page) 721static __always_inline void *lowmem_page_address(const struct page *page)
722{ 722{
723 return __va(PFN_PHYS(page_to_pfn((struct page *)page))); 723 return __va(PFN_PHYS(page_to_pfn(page)));
724} 724}
725 725
726#if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) 726#if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
@@ -737,7 +737,7 @@ static __always_inline void *lowmem_page_address(const struct page *page)
737#endif 737#endif
738 738
739#if defined(HASHED_PAGE_VIRTUAL) 739#if defined(HASHED_PAGE_VIRTUAL)
740void *page_address(struct page *page); 740void *page_address(const struct page *page);
741void set_page_address(struct page *page, void *virtual); 741void set_page_address(struct page *page, void *virtual);
742void page_address_init(void); 742void page_address_init(void);
743#endif 743#endif
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 0f83858147a6..1d09562ccf73 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -56,8 +56,6 @@ struct mmc_ios {
56#define MMC_TIMING_UHS_SDR104 4 56#define MMC_TIMING_UHS_SDR104 4
57#define MMC_TIMING_UHS_DDR50 5 57#define MMC_TIMING_UHS_DDR50 5
58 58
59 unsigned char ddr; /* dual data rate used */
60
61#define MMC_SDR_MODE 0 59#define MMC_SDR_MODE 0
62#define MMC_1_2V_DDR_MODE 1 60#define MMC_1_2V_DDR_MODE 1
63#define MMC_1_8V_DDR_MODE 2 61#define MMC_1_8V_DDR_MODE 2
diff --git a/include/linux/pci.h b/include/linux/pci.h
index f27893b3b724..8c230cbcbb48 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -251,7 +251,8 @@ struct pci_dev {
251 u8 revision; /* PCI revision, low byte of class word */ 251 u8 revision; /* PCI revision, low byte of class word */
252 u8 hdr_type; /* PCI header type (`multi' flag masked out) */ 252 u8 hdr_type; /* PCI header type (`multi' flag masked out) */
253 u8 pcie_cap; /* PCI-E capability offset */ 253 u8 pcie_cap; /* PCI-E capability offset */
254 u8 pcie_type; /* PCI-E device/port type */ 254 u8 pcie_type:4; /* PCI-E device/port type */
255 u8 pcie_mpss:3; /* PCI-E Max Payload Size Supported */
255 u8 rom_base_reg; /* which config register controls the ROM */ 256 u8 rom_base_reg; /* which config register controls the ROM */
256 u8 pin; /* which interrupt pin this device uses */ 257 u8 pin; /* which interrupt pin this device uses */
257 258
@@ -617,6 +618,16 @@ struct pci_driver {
617/* these external functions are only available when PCI support is enabled */ 618/* these external functions are only available when PCI support is enabled */
618#ifdef CONFIG_PCI 619#ifdef CONFIG_PCI
619 620
621extern void pcie_bus_configure_settings(struct pci_bus *bus, u8 smpss);
622
623enum pcie_bus_config_types {
624 PCIE_BUS_PERFORMANCE,
625 PCIE_BUS_SAFE,
626 PCIE_BUS_PEER2PEER,
627};
628
629extern enum pcie_bus_config_types pcie_bus_config;
630
620extern struct bus_type pci_bus_type; 631extern struct bus_type pci_bus_type;
621 632
622/* Do NOT directly access these two variables, unless you are arch specific pci 633/* Do NOT directly access these two variables, unless you are arch specific pci
@@ -796,10 +807,13 @@ int pcix_get_mmrbc(struct pci_dev *dev);
796int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc); 807int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc);
797int pcie_get_readrq(struct pci_dev *dev); 808int pcie_get_readrq(struct pci_dev *dev);
798int pcie_set_readrq(struct pci_dev *dev, int rq); 809int pcie_set_readrq(struct pci_dev *dev, int rq);
810int pcie_get_mps(struct pci_dev *dev);
811int pcie_set_mps(struct pci_dev *dev, int mps);
799int __pci_reset_function(struct pci_dev *dev); 812int __pci_reset_function(struct pci_dev *dev);
800int pci_reset_function(struct pci_dev *dev); 813int pci_reset_function(struct pci_dev *dev);
801void pci_update_resource(struct pci_dev *dev, int resno); 814void pci_update_resource(struct pci_dev *dev, int resno);
802int __must_check pci_assign_resource(struct pci_dev *dev, int i); 815int __must_check pci_assign_resource(struct pci_dev *dev, int i);
816int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align);
803int pci_select_bars(struct pci_dev *dev, unsigned long flags); 817int pci_select_bars(struct pci_dev *dev, unsigned long flags);
804 818
805/* ROM control related routines */ 819/* ROM control related routines */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 245bafdafd5e..c816075c01ce 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -944,8 +944,10 @@ extern void perf_pmu_unregister(struct pmu *pmu);
944 944
945extern int perf_num_counters(void); 945extern int perf_num_counters(void);
946extern const char *perf_pmu_name(void); 946extern const char *perf_pmu_name(void);
947extern void __perf_event_task_sched_in(struct task_struct *task); 947extern void __perf_event_task_sched_in(struct task_struct *prev,
948extern void __perf_event_task_sched_out(struct task_struct *task, struct task_struct *next); 948 struct task_struct *task);
949extern void __perf_event_task_sched_out(struct task_struct *prev,
950 struct task_struct *next);
949extern int perf_event_init_task(struct task_struct *child); 951extern int perf_event_init_task(struct task_struct *child);
950extern void perf_event_exit_task(struct task_struct *child); 952extern void perf_event_exit_task(struct task_struct *child);
951extern void perf_event_free_task(struct task_struct *task); 953extern void perf_event_free_task(struct task_struct *task);
@@ -1059,17 +1061,20 @@ perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
1059 1061
1060extern struct jump_label_key perf_sched_events; 1062extern struct jump_label_key perf_sched_events;
1061 1063
1062static inline void perf_event_task_sched_in(struct task_struct *task) 1064static inline void perf_event_task_sched_in(struct task_struct *prev,
1065 struct task_struct *task)
1063{ 1066{
1064 if (static_branch(&perf_sched_events)) 1067 if (static_branch(&perf_sched_events))
1065 __perf_event_task_sched_in(task); 1068 __perf_event_task_sched_in(prev, task);
1066} 1069}
1067 1070
1068static inline void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next) 1071static inline void perf_event_task_sched_out(struct task_struct *prev,
1072 struct task_struct *next)
1069{ 1073{
1070 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0); 1074 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0);
1071 1075
1072 __perf_event_task_sched_out(task, next); 1076 if (static_branch(&perf_sched_events))
1077 __perf_event_task_sched_out(prev, next);
1073} 1078}
1074 1079
1075extern void perf_event_mmap(struct vm_area_struct *vma); 1080extern void perf_event_mmap(struct vm_area_struct *vma);
@@ -1139,10 +1144,11 @@ extern void perf_event_disable(struct perf_event *event);
1139extern void perf_event_task_tick(void); 1144extern void perf_event_task_tick(void);
1140#else 1145#else
1141static inline void 1146static inline void
1142perf_event_task_sched_in(struct task_struct *task) { } 1147perf_event_task_sched_in(struct task_struct *prev,
1148 struct task_struct *task) { }
1143static inline void 1149static inline void
1144perf_event_task_sched_out(struct task_struct *task, 1150perf_event_task_sched_out(struct task_struct *prev,
1145 struct task_struct *next) { } 1151 struct task_struct *next) { }
1146static inline int perf_event_init_task(struct task_struct *child) { return 0; } 1152static inline int perf_event_init_task(struct task_struct *child) { return 0; }
1147static inline void perf_event_exit_task(struct task_struct *child) { } 1153static inline void perf_event_exit_task(struct task_struct *child) { }
1148static inline void perf_event_free_task(struct task_struct *task) { } 1154static inline void perf_event_free_task(struct task_struct *task) { }
diff --git a/include/linux/personality.h b/include/linux/personality.h
index eec3bae164d4..8fc7dd1a57ff 100644
--- a/include/linux/personality.h
+++ b/include/linux/personality.h
@@ -22,6 +22,7 @@ extern int __set_personality(unsigned int);
22 * These occupy the top three bytes. 22 * These occupy the top three bytes.
23 */ 23 */
24enum { 24enum {
25 UNAME26 = 0x0020000,
25 ADDR_NO_RANDOMIZE = 0x0040000, /* disable randomization of VA space */ 26 ADDR_NO_RANDOMIZE = 0x0040000, /* disable randomization of VA space */
26 FDPIC_FUNCPTRS = 0x0080000, /* userspace function ptrs point to descriptors 27 FDPIC_FUNCPTRS = 0x0080000, /* userspace function ptrs point to descriptors
27 * (signal handling) 28 * (signal handling)
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 21097cb086fe..f9ec1736a116 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -72,8 +72,6 @@ extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
72extern void pm_genpd_init(struct generic_pm_domain *genpd, 72extern void pm_genpd_init(struct generic_pm_domain *genpd,
73 struct dev_power_governor *gov, bool is_off); 73 struct dev_power_governor *gov, bool is_off);
74extern int pm_genpd_poweron(struct generic_pm_domain *genpd); 74extern int pm_genpd_poweron(struct generic_pm_domain *genpd);
75extern void pm_genpd_poweroff_unused(void);
76extern void genpd_queue_power_off_work(struct generic_pm_domain *genpd);
77#else 75#else
78static inline int pm_genpd_add_device(struct generic_pm_domain *genpd, 76static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,
79 struct device *dev) 77 struct device *dev)
@@ -101,8 +99,14 @@ static inline int pm_genpd_poweron(struct generic_pm_domain *genpd)
101{ 99{
102 return -ENOSYS; 100 return -ENOSYS;
103} 101}
104static inline void pm_genpd_poweroff_unused(void) {} 102#endif
103
104#ifdef CONFIG_PM_GENERIC_DOMAINS_RUNTIME
105extern void genpd_queue_power_off_work(struct generic_pm_domain *genpd);
106extern void pm_genpd_poweroff_unused(void);
107#else
105static inline void genpd_queue_power_off_work(struct generic_pm_domain *gpd) {} 108static inline void genpd_queue_power_off_work(struct generic_pm_domain *gpd) {}
109static inline void pm_genpd_poweroff_unused(void) {}
106#endif 110#endif
107 111
108#endif /* _LINUX_PM_DOMAIN_H */ 112#endif /* _LINUX_PM_DOMAIN_H */
diff --git a/include/linux/pwm_backlight.h b/include/linux/pwm_backlight.h
index 5e3e25a3c9c3..63d2df43e61a 100644
--- a/include/linux/pwm_backlight.h
+++ b/include/linux/pwm_backlight.h
@@ -14,6 +14,7 @@ struct platform_pwm_backlight_data {
14 unsigned int pwm_period_ns; 14 unsigned int pwm_period_ns;
15 int (*init)(struct device *dev); 15 int (*init)(struct device *dev);
16 int (*notify)(struct device *dev, int brightness); 16 int (*notify)(struct device *dev, int brightness);
17 void (*notify_after)(struct device *dev, int brightness);
17 void (*exit)(struct device *dev); 18 void (*exit)(struct device *dev);
18 int (*check_fb)(struct device *dev, struct fb_info *info); 19 int (*check_fb)(struct device *dev, struct fb_info *info);
19}; 20};
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index 26f6ea4444e3..b47771aa5718 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -123,7 +123,7 @@ struct regulator_bulk_data {
123 const char *supply; 123 const char *supply;
124 struct regulator *consumer; 124 struct regulator *consumer;
125 125
126 /* Internal use */ 126 /* private: Internal use */
127 int ret; 127 int ret;
128}; 128};
129 129
diff --git a/include/linux/rio_regs.h b/include/linux/rio_regs.h
index 9026b30238f3..218168a2b5e9 100644
--- a/include/linux/rio_regs.h
+++ b/include/linux/rio_regs.h
@@ -36,12 +36,12 @@
36#define RIO_PEF_PROCESSOR 0x20000000 /* [I] Processor */ 36#define RIO_PEF_PROCESSOR 0x20000000 /* [I] Processor */
37#define RIO_PEF_SWITCH 0x10000000 /* [I] Switch */ 37#define RIO_PEF_SWITCH 0x10000000 /* [I] Switch */
38#define RIO_PEF_MULTIPORT 0x08000000 /* [VI, 2.1] Multiport */ 38#define RIO_PEF_MULTIPORT 0x08000000 /* [VI, 2.1] Multiport */
39#define RIO_PEF_INB_MBOX 0x00f00000 /* [II] Mailboxes */ 39#define RIO_PEF_INB_MBOX 0x00f00000 /* [II, <= 1.2] Mailboxes */
40#define RIO_PEF_INB_MBOX0 0x00800000 /* [II] Mailbox 0 */ 40#define RIO_PEF_INB_MBOX0 0x00800000 /* [II, <= 1.2] Mailbox 0 */
41#define RIO_PEF_INB_MBOX1 0x00400000 /* [II] Mailbox 1 */ 41#define RIO_PEF_INB_MBOX1 0x00400000 /* [II, <= 1.2] Mailbox 1 */
42#define RIO_PEF_INB_MBOX2 0x00200000 /* [II] Mailbox 2 */ 42#define RIO_PEF_INB_MBOX2 0x00200000 /* [II, <= 1.2] Mailbox 2 */
43#define RIO_PEF_INB_MBOX3 0x00100000 /* [II] Mailbox 3 */ 43#define RIO_PEF_INB_MBOX3 0x00100000 /* [II, <= 1.2] Mailbox 3 */
44#define RIO_PEF_INB_DOORBELL 0x00080000 /* [II] Doorbells */ 44#define RIO_PEF_INB_DOORBELL 0x00080000 /* [II, <= 1.2] Doorbells */
45#define RIO_PEF_EXT_RT 0x00000200 /* [III, 1.3] Extended route table support */ 45#define RIO_PEF_EXT_RT 0x00000200 /* [III, 1.3] Extended route table support */
46#define RIO_PEF_STD_RT 0x00000100 /* [III, 1.3] Standard route table support */ 46#define RIO_PEF_STD_RT 0x00000100 /* [III, 1.3] Standard route table support */
47#define RIO_PEF_CTLS 0x00000010 /* [III] CTLS */ 47#define RIO_PEF_CTLS 0x00000010 /* [III] CTLS */
@@ -102,7 +102,7 @@
102#define RIO_SWITCH_RT_LIMIT 0x34 /* [III, 1.3] Switch Route Table Destination ID Limit CAR */ 102#define RIO_SWITCH_RT_LIMIT 0x34 /* [III, 1.3] Switch Route Table Destination ID Limit CAR */
103#define RIO_RT_MAX_DESTID 0x0000ffff 103#define RIO_RT_MAX_DESTID 0x0000ffff
104 104
105#define RIO_MBOX_CSR 0x40 /* [II] Mailbox CSR */ 105#define RIO_MBOX_CSR 0x40 /* [II, <= 1.2] Mailbox CSR */
106#define RIO_MBOX0_AVAIL 0x80000000 /* [II] Mbox 0 avail */ 106#define RIO_MBOX0_AVAIL 0x80000000 /* [II] Mbox 0 avail */
107#define RIO_MBOX0_FULL 0x40000000 /* [II] Mbox 0 full */ 107#define RIO_MBOX0_FULL 0x40000000 /* [II] Mbox 0 full */
108#define RIO_MBOX0_EMPTY 0x20000000 /* [II] Mbox 0 empty */ 108#define RIO_MBOX0_EMPTY 0x20000000 /* [II] Mbox 0 empty */
@@ -128,8 +128,8 @@
128#define RIO_MBOX3_FAIL 0x00000008 /* [II] Mbox 3 fail */ 128#define RIO_MBOX3_FAIL 0x00000008 /* [II] Mbox 3 fail */
129#define RIO_MBOX3_ERROR 0x00000004 /* [II] Mbox 3 error */ 129#define RIO_MBOX3_ERROR 0x00000004 /* [II] Mbox 3 error */
130 130
131#define RIO_WRITE_PORT_CSR 0x44 /* [I] Write Port CSR */ 131#define RIO_WRITE_PORT_CSR 0x44 /* [I, <= 1.2] Write Port CSR */
132#define RIO_DOORBELL_CSR 0x44 /* [II] Doorbell CSR */ 132#define RIO_DOORBELL_CSR 0x44 /* [II, <= 1.2] Doorbell CSR */
133#define RIO_DOORBELL_AVAIL 0x80000000 /* [II] Doorbell avail */ 133#define RIO_DOORBELL_AVAIL 0x80000000 /* [II] Doorbell avail */
134#define RIO_DOORBELL_FULL 0x40000000 /* [II] Doorbell full */ 134#define RIO_DOORBELL_FULL 0x40000000 /* [II] Doorbell full */
135#define RIO_DOORBELL_EMPTY 0x20000000 /* [II] Doorbell empty */ 135#define RIO_DOORBELL_EMPTY 0x20000000 /* [II] Doorbell empty */
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index b27ebea25660..93f4d035076b 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -97,6 +97,9 @@ struct rtc_pll_info {
97#define RTC_AF 0x20 /* Alarm interrupt */ 97#define RTC_AF 0x20 /* Alarm interrupt */
98#define RTC_UF 0x10 /* Update interrupt for 1Hz RTC */ 98#define RTC_UF 0x10 /* Update interrupt for 1Hz RTC */
99 99
100
101#define RTC_MAX_FREQ 8192
102
100#ifdef __KERNEL__ 103#ifdef __KERNEL__
101 104
102#include <linux/types.h> 105#include <linux/types.h>
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 8d426281259d..ac6b05a325cc 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -528,6 +528,7 @@ static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
528extern bool skb_recycle_check(struct sk_buff *skb, int skb_size); 528extern bool skb_recycle_check(struct sk_buff *skb, int skb_size);
529 529
530extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src); 530extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
531extern int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
531extern struct sk_buff *skb_clone(struct sk_buff *skb, 532extern struct sk_buff *skb_clone(struct sk_buff *skb,
532 gfp_t priority); 533 gfp_t priority);
533extern struct sk_buff *skb_copy(const struct sk_buff *skb, 534extern struct sk_buff *skb_copy(const struct sk_buff *skb,
diff --git a/include/linux/snmp.h b/include/linux/snmp.h
index 12b2b18e50c1..e16557a357e5 100644
--- a/include/linux/snmp.h
+++ b/include/linux/snmp.h
@@ -231,6 +231,8 @@ enum
231 LINUX_MIB_TCPDEFERACCEPTDROP, 231 LINUX_MIB_TCPDEFERACCEPTDROP,
232 LINUX_MIB_IPRPFILTER, /* IP Reverse Path Filter (rp_filter) */ 232 LINUX_MIB_IPRPFILTER, /* IP Reverse Path Filter (rp_filter) */
233 LINUX_MIB_TCPTIMEWAITOVERFLOW, /* TCPTimeWaitOverflow */ 233 LINUX_MIB_TCPTIMEWAITOVERFLOW, /* TCPTimeWaitOverflow */
234 LINUX_MIB_TCPREQQFULLDOCOOKIES, /* TCPReqQFullDoCookies */
235 LINUX_MIB_TCPREQQFULLDROP, /* TCPReqQFullDrop */
234 __LINUX_MIB_MAX 236 __LINUX_MIB_MAX
235}; 237};
236 238
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 14d62490922e..c71f84bb62ec 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -252,6 +252,12 @@ static inline void lru_cache_add_file(struct page *page)
252extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, 252extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
253 gfp_t gfp_mask, nodemask_t *mask); 253 gfp_t gfp_mask, nodemask_t *mask);
254extern int __isolate_lru_page(struct page *page, int mode, int file); 254extern int __isolate_lru_page(struct page *page, int mode, int file);
255extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem,
256 gfp_t gfp_mask, bool noswap);
257extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
258 gfp_t gfp_mask, bool noswap,
259 struct zone *zone,
260 unsigned long *nr_scanned);
255extern unsigned long shrink_all_memory(unsigned long nr_pages); 261extern unsigned long shrink_all_memory(unsigned long nr_pages);
256extern int vm_swappiness; 262extern int vm_swappiness;
257extern int remove_mapping(struct address_space *mapping, struct page *page); 263extern int remove_mapping(struct address_space *mapping, struct page *page);
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 8c03b98df5f9..1ff0ec2a5e8d 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -702,9 +702,6 @@ asmlinkage long sys_sysctl(struct __sysctl_args __user *args);
702asmlinkage long sys_sysinfo(struct sysinfo __user *info); 702asmlinkage long sys_sysinfo(struct sysinfo __user *info);
703asmlinkage long sys_sysfs(int option, 703asmlinkage long sys_sysfs(int option,
704 unsigned long arg1, unsigned long arg2); 704 unsigned long arg1, unsigned long arg2);
705asmlinkage long sys_nfsservctl(int cmd,
706 struct nfsctl_arg __user *arg,
707 void __user *res);
708asmlinkage long sys_syslog(int type, char __user *buf, int len); 705asmlinkage long sys_syslog(int type, char __user *buf, int len);
709asmlinkage long sys_uselib(const char __user *library); 706asmlinkage long sys_uselib(const char __user *library);
710asmlinkage long sys_ni_syscall(void); 707asmlinkage long sys_ni_syscall(void);
diff --git a/include/linux/ti_wilink_st.h b/include/linux/ti_wilink_st.h
index b004e557caa9..2ef4385da6bf 100644
--- a/include/linux/ti_wilink_st.h
+++ b/include/linux/ti_wilink_st.h
@@ -410,7 +410,28 @@ struct gps_event_hdr {
410 u16 plen; 410 u16 plen;
411} __attribute__ ((packed)); 411} __attribute__ ((packed));
412 412
413/* platform data */ 413/**
414 * struct ti_st_plat_data - platform data shared between ST driver and
415 * platform specific board file which adds the ST device.
416 * @nshutdown_gpio: Host's GPIO line to which chip's BT_EN is connected.
417 * @dev_name: The UART/TTY name to which chip is interfaced. (eg: /dev/ttyS1)
418 * @flow_cntrl: Should always be 1, since UART's CTS/RTS is used for PM
419 * purposes.
420 * @baud_rate: The baud rate supported by the Host UART controller, this will
421 * be shared across with the chip via a HCI VS command from User-Space Init
422 * Mgr application.
423 * @suspend:
424 * @resume: legacy PM routines hooked to platform specific board file, so as
425 * to take chip-host interface specific action.
426 * @chip_enable:
427 * @chip_disable: Platform/Interface specific mux mode setting, GPIO
428 * configuring, Host side PM disabling etc.. can be done here.
429 * @chip_asleep:
430 * @chip_awake: Chip specific deep sleep states is communicated to Host
431 * specific board-xx.c to take actions such as cut UART clocks when chip
432 * asleep or run host faster when chip awake etc..
433 *
434 */
414struct ti_st_plat_data { 435struct ti_st_plat_data {
415 long nshutdown_gpio; 436 long nshutdown_gpio;
416 unsigned char dev_name[UART_DEV_NAME_LEN]; /* uart name */ 437 unsigned char dev_name[UART_DEV_NAME_LEN]; /* uart name */
@@ -418,6 +439,10 @@ struct ti_st_plat_data {
418 unsigned long baud_rate; 439 unsigned long baud_rate;
419 int (*suspend)(struct platform_device *, pm_message_t); 440 int (*suspend)(struct platform_device *, pm_message_t);
420 int (*resume)(struct platform_device *); 441 int (*resume)(struct platform_device *);
442 int (*chip_enable) (struct kim_data_s *);
443 int (*chip_disable) (struct kim_data_s *);
444 int (*chip_asleep) (struct kim_data_s *);
445 int (*chip_awake) (struct kim_data_s *);
421}; 446};
422 447
423#endif /* TI_WILINK_ST_H */ 448#endif /* TI_WILINK_ST_H */
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 44bc0c5617e1..5f2ede82b3d6 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -421,6 +421,8 @@ extern void tty_driver_flush_buffer(struct tty_struct *tty);
421extern void tty_throttle(struct tty_struct *tty); 421extern void tty_throttle(struct tty_struct *tty);
422extern void tty_unthrottle(struct tty_struct *tty); 422extern void tty_unthrottle(struct tty_struct *tty);
423extern int tty_do_resize(struct tty_struct *tty, struct winsize *ws); 423extern int tty_do_resize(struct tty_struct *tty, struct winsize *ws);
424extern void tty_driver_remove_tty(struct tty_driver *driver,
425 struct tty_struct *tty);
424extern void tty_shutdown(struct tty_struct *tty); 426extern void tty_shutdown(struct tty_struct *tty);
425extern void tty_free_termios(struct tty_struct *tty); 427extern void tty_free_termios(struct tty_struct *tty);
426extern int is_current_pgrp_orphaned(void); 428extern int is_current_pgrp_orphaned(void);
diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
index 9deeac855240..ecdaeb98b293 100644
--- a/include/linux/tty_driver.h
+++ b/include/linux/tty_driver.h
@@ -47,6 +47,9 @@
47 * 47 *
48 * This routine is called synchronously when a particular tty device 48 * This routine is called synchronously when a particular tty device
49 * is closed for the last time freeing up the resources. 49 * is closed for the last time freeing up the resources.
50 * Note that tty_shutdown() is not called if ops->shutdown is defined.
51 * This means one is responsible to take care of calling ops->remove (e.g.
52 * via tty_driver_remove_tty) and releasing tty->termios.
50 * 53 *
51 * 54 *
52 * void (*cleanup)(struct tty_struct * tty); 55 * void (*cleanup)(struct tty_struct * tty);
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index f1bfa12ea246..2b8963ff0f35 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -12,15 +12,6 @@
12 * 12 *
13 * (thresh - thresh/DIRTY_FULL_SCOPE, thresh) 13 * (thresh - thresh/DIRTY_FULL_SCOPE, thresh)
14 * 14 *
15 * The 1/16 region above the global dirty limit will be put to maximum pauses:
16 *
17 * (limit, limit + limit/DIRTY_MAXPAUSE_AREA)
18 *
19 * The 1/16 region above the max-pause region, dirty exceeded bdi's will be put
20 * to loops:
21 *
22 * (limit + limit/DIRTY_MAXPAUSE_AREA, limit + limit/DIRTY_PASSGOOD_AREA)
23 *
24 * Further beyond, all dirtier tasks will enter a loop waiting (possibly long 15 * Further beyond, all dirtier tasks will enter a loop waiting (possibly long
25 * time) for the dirty pages to drop, unless written enough pages. 16 * time) for the dirty pages to drop, unless written enough pages.
26 * 17 *
@@ -31,8 +22,6 @@
31 */ 22 */
32#define DIRTY_SCOPE 8 23#define DIRTY_SCOPE 8
33#define DIRTY_FULL_SCOPE (DIRTY_SCOPE / 2) 24#define DIRTY_FULL_SCOPE (DIRTY_SCOPE / 2)
34#define DIRTY_MAXPAUSE_AREA 16
35#define DIRTY_PASSGOOD_AREA 8
36 25
37/* 26/*
38 * 4MB minimal write chunk size 27 * 4MB minimal write chunk size
diff --git a/include/net/9p/9p.h b/include/net/9p/9p.h
index 342dcf13d039..a6326ef8ade6 100644
--- a/include/net/9p/9p.h
+++ b/include/net/9p/9p.h
@@ -288,6 +288,35 @@ enum p9_perm_t {
288 P9_DMSETVTX = 0x00010000, 288 P9_DMSETVTX = 0x00010000,
289}; 289};
290 290
291/* 9p2000.L open flags */
292#define P9_DOTL_RDONLY 00000000
293#define P9_DOTL_WRONLY 00000001
294#define P9_DOTL_RDWR 00000002
295#define P9_DOTL_NOACCESS 00000003
296#define P9_DOTL_CREATE 00000100
297#define P9_DOTL_EXCL 00000200
298#define P9_DOTL_NOCTTY 00000400
299#define P9_DOTL_TRUNC 00001000
300#define P9_DOTL_APPEND 00002000
301#define P9_DOTL_NONBLOCK 00004000
302#define P9_DOTL_DSYNC 00010000
303#define P9_DOTL_FASYNC 00020000
304#define P9_DOTL_DIRECT 00040000
305#define P9_DOTL_LARGEFILE 00100000
306#define P9_DOTL_DIRECTORY 00200000
307#define P9_DOTL_NOFOLLOW 00400000
308#define P9_DOTL_NOATIME 01000000
309#define P9_DOTL_CLOEXEC 02000000
310#define P9_DOTL_SYNC 04000000
311
312/* 9p2000.L at flags */
313#define P9_DOTL_AT_REMOVEDIR 0x200
314
315/* 9p2000.L lock type */
316#define P9_LOCK_TYPE_RDLCK 0
317#define P9_LOCK_TYPE_WRLCK 1
318#define P9_LOCK_TYPE_UNLCK 2
319
291/** 320/**
292 * enum p9_qid_t - QID types 321 * enum p9_qid_t - QID types
293 * @P9_QTDIR: directory 322 * @P9_QTDIR: directory
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index b42136a61f3a..f4b19b223941 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -1942,6 +1942,9 @@ struct wiphy {
1942 * you need use set_wiphy_dev() (see below) */ 1942 * you need use set_wiphy_dev() (see below) */
1943 struct device dev; 1943 struct device dev;
1944 1944
1945 /* protects ->resume, ->suspend sysfs callbacks against unregister hw */
1946 bool registered;
1947
1945 /* dir in debugfs: ieee80211/<wiphyname> */ 1948 /* dir in debugfs: ieee80211/<wiphyname> */
1946 struct dentry *debugfsdir; 1949 struct dentry *debugfsdir;
1947 1950
diff --git a/include/net/flow.h b/include/net/flow.h
index 78113daadd63..a09447749e2d 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -7,6 +7,7 @@
7#ifndef _NET_FLOW_H 7#ifndef _NET_FLOW_H
8#define _NET_FLOW_H 8#define _NET_FLOW_H
9 9
10#include <linux/socket.h>
10#include <linux/in6.h> 11#include <linux/in6.h>
11#include <linux/atomic.h> 12#include <linux/atomic.h>
12 13
@@ -68,7 +69,7 @@ struct flowi4 {
68#define fl4_ipsec_spi uli.spi 69#define fl4_ipsec_spi uli.spi
69#define fl4_mh_type uli.mht.type 70#define fl4_mh_type uli.mht.type
70#define fl4_gre_key uli.gre_key 71#define fl4_gre_key uli.gre_key
71}; 72} __attribute__((__aligned__(BITS_PER_LONG/8)));
72 73
73static inline void flowi4_init_output(struct flowi4 *fl4, int oif, 74static inline void flowi4_init_output(struct flowi4 *fl4, int oif,
74 __u32 mark, __u8 tos, __u8 scope, 75 __u32 mark, __u8 tos, __u8 scope,
@@ -112,7 +113,7 @@ struct flowi6 {
112#define fl6_ipsec_spi uli.spi 113#define fl6_ipsec_spi uli.spi
113#define fl6_mh_type uli.mht.type 114#define fl6_mh_type uli.mht.type
114#define fl6_gre_key uli.gre_key 115#define fl6_gre_key uli.gre_key
115}; 116} __attribute__((__aligned__(BITS_PER_LONG/8)));
116 117
117struct flowidn { 118struct flowidn {
118 struct flowi_common __fl_common; 119 struct flowi_common __fl_common;
@@ -127,7 +128,7 @@ struct flowidn {
127 union flowi_uli uli; 128 union flowi_uli uli;
128#define fld_sport uli.ports.sport 129#define fld_sport uli.ports.sport
129#define fld_dport uli.ports.dport 130#define fld_dport uli.ports.dport
130}; 131} __attribute__((__aligned__(BITS_PER_LONG/8)));
131 132
132struct flowi { 133struct flowi {
133 union { 134 union {
@@ -161,6 +162,24 @@ static inline struct flowi *flowidn_to_flowi(struct flowidn *fldn)
161 return container_of(fldn, struct flowi, u.dn); 162 return container_of(fldn, struct flowi, u.dn);
162} 163}
163 164
165typedef unsigned long flow_compare_t;
166
167static inline size_t flow_key_size(u16 family)
168{
169 switch (family) {
170 case AF_INET:
171 BUILD_BUG_ON(sizeof(struct flowi4) % sizeof(flow_compare_t));
172 return sizeof(struct flowi4) / sizeof(flow_compare_t);
173 case AF_INET6:
174 BUILD_BUG_ON(sizeof(struct flowi6) % sizeof(flow_compare_t));
175 return sizeof(struct flowi6) / sizeof(flow_compare_t);
176 case AF_DECnet:
177 BUILD_BUG_ON(sizeof(struct flowidn) % sizeof(flow_compare_t));
178 return sizeof(struct flowidn) / sizeof(flow_compare_t);
179 }
180 return 0;
181}
182
164#define FLOW_DIR_IN 0 183#define FLOW_DIR_IN 0
165#define FLOW_DIR_OUT 1 184#define FLOW_DIR_OUT 1
166#define FLOW_DIR_FWD 2 185#define FLOW_DIR_FWD 2
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index 99e6e19b57c2..4c0766e201e3 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -96,7 +96,8 @@ extern int sysctl_max_syn_backlog;
96 */ 96 */
97struct listen_sock { 97struct listen_sock {
98 u8 max_qlen_log; 98 u8 max_qlen_log;
99 /* 3 bytes hole, try to use */ 99 u8 synflood_warned;
100 /* 2 bytes hole, try to use */
100 int qlen; 101 int qlen;
101 int qlen_young; 102 int qlen_young;
102 int clock_hand; 103 int clock_hand;
diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h
index 6506458ccd33..712b3bebeda7 100644
--- a/include/net/sctp/command.h
+++ b/include/net/sctp/command.h
@@ -109,6 +109,7 @@ typedef enum {
109 SCTP_CMD_SEND_MSG, /* Send the whole use message */ 109 SCTP_CMD_SEND_MSG, /* Send the whole use message */
110 SCTP_CMD_SEND_NEXT_ASCONF, /* Send the next ASCONF after ACK */ 110 SCTP_CMD_SEND_NEXT_ASCONF, /* Send the next ASCONF after ACK */
111 SCTP_CMD_PURGE_ASCONF_QUEUE, /* Purge all asconf queues.*/ 111 SCTP_CMD_PURGE_ASCONF_QUEUE, /* Purge all asconf queues.*/
112 SCTP_CMD_SET_ASOC, /* Restore association context */
112 SCTP_CMD_LAST 113 SCTP_CMD_LAST
113} sctp_verb_t; 114} sctp_verb_t;
114 115
diff --git a/include/net/tcp.h b/include/net/tcp.h
index d6ca00072cdf..f357befaaa01 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -431,17 +431,34 @@ extern int tcp_disconnect(struct sock *sk, int flags);
431extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS]; 431extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];
432extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, 432extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
433 struct ip_options *opt); 433 struct ip_options *opt);
434#ifdef CONFIG_SYN_COOKIES
434extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, 435extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb,
435 __u16 *mss); 436 __u16 *mss);
437#else
438static inline __u32 cookie_v4_init_sequence(struct sock *sk,
439 struct sk_buff *skb,
440 __u16 *mss)
441{
442 return 0;
443}
444#endif
436 445
437extern __u32 cookie_init_timestamp(struct request_sock *req); 446extern __u32 cookie_init_timestamp(struct request_sock *req);
438extern bool cookie_check_timestamp(struct tcp_options_received *opt, bool *); 447extern bool cookie_check_timestamp(struct tcp_options_received *opt, bool *);
439 448
440/* From net/ipv6/syncookies.c */ 449/* From net/ipv6/syncookies.c */
441extern struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb); 450extern struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
451#ifdef CONFIG_SYN_COOKIES
442extern __u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb, 452extern __u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb,
443 __u16 *mss); 453 __u16 *mss);
444 454#else
455static inline __u32 cookie_v6_init_sequence(struct sock *sk,
456 struct sk_buff *skb,
457 __u16 *mss)
458{
459 return 0;
460}
461#endif
445/* tcp_output.c */ 462/* tcp_output.c */
446 463
447extern void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, 464extern void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
@@ -460,6 +477,9 @@ extern int tcp_write_wakeup(struct sock *);
460extern void tcp_send_fin(struct sock *sk); 477extern void tcp_send_fin(struct sock *sk);
461extern void tcp_send_active_reset(struct sock *sk, gfp_t priority); 478extern void tcp_send_active_reset(struct sock *sk, gfp_t priority);
462extern int tcp_send_synack(struct sock *); 479extern int tcp_send_synack(struct sock *);
480extern int tcp_syn_flood_action(struct sock *sk,
481 const struct sk_buff *skb,
482 const char *proto);
463extern void tcp_push_one(struct sock *, unsigned int mss_now); 483extern void tcp_push_one(struct sock *, unsigned int mss_now);
464extern void tcp_send_ack(struct sock *sk); 484extern void tcp_send_ack(struct sock *sk);
465extern void tcp_send_delayed_ack(struct sock *sk); 485extern void tcp_send_delayed_ack(struct sock *sk);
diff --git a/include/net/transp_v6.h b/include/net/transp_v6.h
index 5271a741c3a3..498433dd067d 100644
--- a/include/net/transp_v6.h
+++ b/include/net/transp_v6.h
@@ -39,6 +39,7 @@ extern int datagram_recv_ctl(struct sock *sk,
39 struct sk_buff *skb); 39 struct sk_buff *skb);
40 40
41extern int datagram_send_ctl(struct net *net, 41extern int datagram_send_ctl(struct net *net,
42 struct sock *sk,
42 struct msghdr *msg, 43 struct msghdr *msg,
43 struct flowi6 *fl6, 44 struct flowi6 *fl6,
44 struct ipv6_txoptions *opt, 45 struct ipv6_txoptions *opt,
diff --git a/include/sound/tlv320aic3x.h b/include/sound/tlv320aic3x.h
index 99e0308bf2c2..ffd9bc793105 100644
--- a/include/sound/tlv320aic3x.h
+++ b/include/sound/tlv320aic3x.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Platform data for Texas Instruments TLV320AIC3x codec 2 * Platform data for Texas Instruments TLV320AIC3x codec
3 * 3 *
4 * Author: Jarkko Nikula <jhnikula@gmail.com> 4 * Author: Jarkko Nikula <jarkko.nikula@bitmer.com>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 7 * it under the terms of the GNU General Public License version 2 as
diff --git a/include/target/target_core_fabric_ops.h b/include/target/target_core_fabric_ops.h
index 2de8fe907596..126c675f4f14 100644
--- a/include/target/target_core_fabric_ops.h
+++ b/include/target/target_core_fabric_ops.h
@@ -27,6 +27,12 @@ struct target_core_fabric_ops {
27 int (*tpg_check_demo_mode_cache)(struct se_portal_group *); 27 int (*tpg_check_demo_mode_cache)(struct se_portal_group *);
28 int (*tpg_check_demo_mode_write_protect)(struct se_portal_group *); 28 int (*tpg_check_demo_mode_write_protect)(struct se_portal_group *);
29 int (*tpg_check_prod_mode_write_protect)(struct se_portal_group *); 29 int (*tpg_check_prod_mode_write_protect)(struct se_portal_group *);
30 /*
31 * Optionally used by fabrics to allow demo-mode login, but not
32 * expose any TPG LUNs, and return 'not connected' in standard
33 * inquiry response
34 */
35 int (*tpg_check_demo_mode_login_only)(struct se_portal_group *);
30 struct se_node_acl *(*tpg_alloc_fabric_acl)( 36 struct se_node_acl *(*tpg_alloc_fabric_acl)(
31 struct se_portal_group *); 37 struct se_portal_group *);
32 void (*tpg_release_fabric_acl)(struct se_portal_group *, 38 void (*tpg_release_fabric_acl)(struct se_portal_group *,
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
index bf366547da25..05c5e61f0a7c 100644
--- a/include/trace/events/block.h
+++ b/include/trace/events/block.h
@@ -8,6 +8,8 @@
8#include <linux/blkdev.h> 8#include <linux/blkdev.h>
9#include <linux/tracepoint.h> 9#include <linux/tracepoint.h>
10 10
11#define RWBS_LEN 8
12
11DECLARE_EVENT_CLASS(block_rq_with_error, 13DECLARE_EVENT_CLASS(block_rq_with_error,
12 14
13 TP_PROTO(struct request_queue *q, struct request *rq), 15 TP_PROTO(struct request_queue *q, struct request *rq),
@@ -19,7 +21,7 @@ DECLARE_EVENT_CLASS(block_rq_with_error,
19 __field( sector_t, sector ) 21 __field( sector_t, sector )
20 __field( unsigned int, nr_sector ) 22 __field( unsigned int, nr_sector )
21 __field( int, errors ) 23 __field( int, errors )
22 __array( char, rwbs, 6 ) 24 __array( char, rwbs, RWBS_LEN )
23 __dynamic_array( char, cmd, blk_cmd_buf_len(rq) ) 25 __dynamic_array( char, cmd, blk_cmd_buf_len(rq) )
24 ), 26 ),
25 27
@@ -104,7 +106,7 @@ DECLARE_EVENT_CLASS(block_rq,
104 __field( sector_t, sector ) 106 __field( sector_t, sector )
105 __field( unsigned int, nr_sector ) 107 __field( unsigned int, nr_sector )
106 __field( unsigned int, bytes ) 108 __field( unsigned int, bytes )
107 __array( char, rwbs, 6 ) 109 __array( char, rwbs, RWBS_LEN )
108 __array( char, comm, TASK_COMM_LEN ) 110 __array( char, comm, TASK_COMM_LEN )
109 __dynamic_array( char, cmd, blk_cmd_buf_len(rq) ) 111 __dynamic_array( char, cmd, blk_cmd_buf_len(rq) )
110 ), 112 ),
@@ -183,7 +185,7 @@ TRACE_EVENT(block_bio_bounce,
183 __field( dev_t, dev ) 185 __field( dev_t, dev )
184 __field( sector_t, sector ) 186 __field( sector_t, sector )
185 __field( unsigned int, nr_sector ) 187 __field( unsigned int, nr_sector )
186 __array( char, rwbs, 6 ) 188 __array( char, rwbs, RWBS_LEN )
187 __array( char, comm, TASK_COMM_LEN ) 189 __array( char, comm, TASK_COMM_LEN )
188 ), 190 ),
189 191
@@ -222,7 +224,7 @@ TRACE_EVENT(block_bio_complete,
222 __field( sector_t, sector ) 224 __field( sector_t, sector )
223 __field( unsigned, nr_sector ) 225 __field( unsigned, nr_sector )
224 __field( int, error ) 226 __field( int, error )
225 __array( char, rwbs, 6 ) 227 __array( char, rwbs, RWBS_LEN)
226 ), 228 ),
227 229
228 TP_fast_assign( 230 TP_fast_assign(
@@ -249,7 +251,7 @@ DECLARE_EVENT_CLASS(block_bio,
249 __field( dev_t, dev ) 251 __field( dev_t, dev )
250 __field( sector_t, sector ) 252 __field( sector_t, sector )
251 __field( unsigned int, nr_sector ) 253 __field( unsigned int, nr_sector )
252 __array( char, rwbs, 6 ) 254 __array( char, rwbs, RWBS_LEN )
253 __array( char, comm, TASK_COMM_LEN ) 255 __array( char, comm, TASK_COMM_LEN )
254 ), 256 ),
255 257
@@ -321,7 +323,7 @@ DECLARE_EVENT_CLASS(block_get_rq,
321 __field( dev_t, dev ) 323 __field( dev_t, dev )
322 __field( sector_t, sector ) 324 __field( sector_t, sector )
323 __field( unsigned int, nr_sector ) 325 __field( unsigned int, nr_sector )
324 __array( char, rwbs, 6 ) 326 __array( char, rwbs, RWBS_LEN )
325 __array( char, comm, TASK_COMM_LEN ) 327 __array( char, comm, TASK_COMM_LEN )
326 ), 328 ),
327 329
@@ -456,7 +458,7 @@ TRACE_EVENT(block_split,
456 __field( dev_t, dev ) 458 __field( dev_t, dev )
457 __field( sector_t, sector ) 459 __field( sector_t, sector )
458 __field( sector_t, new_sector ) 460 __field( sector_t, new_sector )
459 __array( char, rwbs, 6 ) 461 __array( char, rwbs, RWBS_LEN )
460 __array( char, comm, TASK_COMM_LEN ) 462 __array( char, comm, TASK_COMM_LEN )
461 ), 463 ),
462 464
@@ -498,7 +500,7 @@ TRACE_EVENT(block_bio_remap,
498 __field( unsigned int, nr_sector ) 500 __field( unsigned int, nr_sector )
499 __field( dev_t, old_dev ) 501 __field( dev_t, old_dev )
500 __field( sector_t, old_sector ) 502 __field( sector_t, old_sector )
501 __array( char, rwbs, 6 ) 503 __array( char, rwbs, RWBS_LEN)
502 ), 504 ),
503 505
504 TP_fast_assign( 506 TP_fast_assign(
@@ -542,7 +544,7 @@ TRACE_EVENT(block_rq_remap,
542 __field( unsigned int, nr_sector ) 544 __field( unsigned int, nr_sector )
543 __field( dev_t, old_dev ) 545 __field( dev_t, old_dev )
544 __field( sector_t, old_sector ) 546 __field( sector_t, old_sector )
545 __array( char, rwbs, 6 ) 547 __array( char, rwbs, RWBS_LEN)
546 ), 548 ),
547 549
548 TP_fast_assign( 550 TP_fast_assign(
diff --git a/init/main.c b/init/main.c
index 9c51ee7adf3d..2a9b88aa5e76 100644
--- a/init/main.c
+++ b/init/main.c
@@ -209,8 +209,19 @@ early_param("quiet", quiet_kernel);
209 209
210static int __init loglevel(char *str) 210static int __init loglevel(char *str)
211{ 211{
212 get_option(&str, &console_loglevel); 212 int newlevel;
213 return 0; 213
214 /*
215 * Only update loglevel value when a correct setting was passed,
216 * to prevent blind crashes (when loglevel being set to 0) that
217 * are quite hard to debug
218 */
219 if (get_option(&str, &newlevel)) {
220 console_loglevel = newlevel;
221 return 0;
222 }
223
224 return -EINVAL;
214} 225}
215 226
216early_param("loglevel", loglevel); 227early_param("loglevel", loglevel);
diff --git a/kernel/events/core.c b/kernel/events/core.c
index b8785e26ee1c..0f857782d06f 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -399,14 +399,54 @@ void perf_cgroup_switch(struct task_struct *task, int mode)
399 local_irq_restore(flags); 399 local_irq_restore(flags);
400} 400}
401 401
402static inline void perf_cgroup_sched_out(struct task_struct *task) 402static inline void perf_cgroup_sched_out(struct task_struct *task,
403 struct task_struct *next)
403{ 404{
404 perf_cgroup_switch(task, PERF_CGROUP_SWOUT); 405 struct perf_cgroup *cgrp1;
406 struct perf_cgroup *cgrp2 = NULL;
407
408 /*
409 * we come here when we know perf_cgroup_events > 0
410 */
411 cgrp1 = perf_cgroup_from_task(task);
412
413 /*
414 * next is NULL when called from perf_event_enable_on_exec()
415 * that will systematically cause a cgroup_switch()
416 */
417 if (next)
418 cgrp2 = perf_cgroup_from_task(next);
419
420 /*
421 * only schedule out current cgroup events if we know
422 * that we are switching to a different cgroup. Otherwise,
423 * do no touch the cgroup events.
424 */
425 if (cgrp1 != cgrp2)
426 perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
405} 427}
406 428
407static inline void perf_cgroup_sched_in(struct task_struct *task) 429static inline void perf_cgroup_sched_in(struct task_struct *prev,
430 struct task_struct *task)
408{ 431{
409 perf_cgroup_switch(task, PERF_CGROUP_SWIN); 432 struct perf_cgroup *cgrp1;
433 struct perf_cgroup *cgrp2 = NULL;
434
435 /*
436 * we come here when we know perf_cgroup_events > 0
437 */
438 cgrp1 = perf_cgroup_from_task(task);
439
440 /* prev can never be NULL */
441 cgrp2 = perf_cgroup_from_task(prev);
442
443 /*
444 * only need to schedule in cgroup events if we are changing
445 * cgroup during ctxsw. Cgroup events were not scheduled
446 * out of ctxsw out if that was not the case.
447 */
448 if (cgrp1 != cgrp2)
449 perf_cgroup_switch(task, PERF_CGROUP_SWIN);
410} 450}
411 451
412static inline int perf_cgroup_connect(int fd, struct perf_event *event, 452static inline int perf_cgroup_connect(int fd, struct perf_event *event,
@@ -518,11 +558,13 @@ static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
518{ 558{
519} 559}
520 560
521static inline void perf_cgroup_sched_out(struct task_struct *task) 561static inline void perf_cgroup_sched_out(struct task_struct *task,
562 struct task_struct *next)
522{ 563{
523} 564}
524 565
525static inline void perf_cgroup_sched_in(struct task_struct *task) 566static inline void perf_cgroup_sched_in(struct task_struct *prev,
567 struct task_struct *task)
526{ 568{
527} 569}
528 570
@@ -1988,7 +2030,7 @@ void __perf_event_task_sched_out(struct task_struct *task,
1988 * cgroup event are system-wide mode only 2030 * cgroup event are system-wide mode only
1989 */ 2031 */
1990 if (atomic_read(&__get_cpu_var(perf_cgroup_events))) 2032 if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
1991 perf_cgroup_sched_out(task); 2033 perf_cgroup_sched_out(task, next);
1992} 2034}
1993 2035
1994static void task_ctx_sched_out(struct perf_event_context *ctx) 2036static void task_ctx_sched_out(struct perf_event_context *ctx)
@@ -2153,7 +2195,8 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
2153 * accessing the event control register. If a NMI hits, then it will 2195 * accessing the event control register. If a NMI hits, then it will
2154 * keep the event running. 2196 * keep the event running.
2155 */ 2197 */
2156void __perf_event_task_sched_in(struct task_struct *task) 2198void __perf_event_task_sched_in(struct task_struct *prev,
2199 struct task_struct *task)
2157{ 2200{
2158 struct perf_event_context *ctx; 2201 struct perf_event_context *ctx;
2159 int ctxn; 2202 int ctxn;
@@ -2171,7 +2214,7 @@ void __perf_event_task_sched_in(struct task_struct *task)
2171 * cgroup event are system-wide mode only 2214 * cgroup event are system-wide mode only
2172 */ 2215 */
2173 if (atomic_read(&__get_cpu_var(perf_cgroup_events))) 2216 if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
2174 perf_cgroup_sched_in(task); 2217 perf_cgroup_sched_in(prev, task);
2175} 2218}
2176 2219
2177static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) 2220static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
@@ -2427,7 +2470,7 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx)
2427 * ctxswin cgroup events which are already scheduled 2470 * ctxswin cgroup events which are already scheduled
2428 * in. 2471 * in.
2429 */ 2472 */
2430 perf_cgroup_sched_out(current); 2473 perf_cgroup_sched_out(current, NULL);
2431 2474
2432 raw_spin_lock(&ctx->lock); 2475 raw_spin_lock(&ctx->lock);
2433 task_ctx_sched_out(ctx); 2476 task_ctx_sched_out(ctx);
@@ -3353,8 +3396,8 @@ static int perf_event_index(struct perf_event *event)
3353} 3396}
3354 3397
3355static void calc_timer_values(struct perf_event *event, 3398static void calc_timer_values(struct perf_event *event,
3356 u64 *running, 3399 u64 *enabled,
3357 u64 *enabled) 3400 u64 *running)
3358{ 3401{
3359 u64 now, ctx_time; 3402 u64 now, ctx_time;
3360 3403
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index d5a3009da71a..dc5114b4c16c 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -178,7 +178,7 @@ void irq_shutdown(struct irq_desc *desc)
178 desc->depth = 1; 178 desc->depth = 1;
179 if (desc->irq_data.chip->irq_shutdown) 179 if (desc->irq_data.chip->irq_shutdown)
180 desc->irq_data.chip->irq_shutdown(&desc->irq_data); 180 desc->irq_data.chip->irq_shutdown(&desc->irq_data);
181 if (desc->irq_data.chip->irq_disable) 181 else if (desc->irq_data.chip->irq_disable)
182 desc->irq_data.chip->irq_disable(&desc->irq_data); 182 desc->irq_data.chip->irq_disable(&desc->irq_data);
183 else 183 else
184 desc->irq_data.chip->irq_mask(&desc->irq_data); 184 desc->irq_data.chip->irq_mask(&desc->irq_data);
diff --git a/kernel/irq/generic-chip.c b/kernel/irq/generic-chip.c
index 3a2cab407b93..e38544dddb18 100644
--- a/kernel/irq/generic-chip.c
+++ b/kernel/irq/generic-chip.c
@@ -246,7 +246,7 @@ void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk,
246 gc->mask_cache = irq_reg_readl(gc->reg_base + ct->regs.mask); 246 gc->mask_cache = irq_reg_readl(gc->reg_base + ct->regs.mask);
247 247
248 for (i = gc->irq_base; msk; msk >>= 1, i++) { 248 for (i = gc->irq_base; msk; msk >>= 1, i++) {
249 if (!msk & 0x01) 249 if (!(msk & 0x01))
250 continue; 250 continue;
251 251
252 if (flags & IRQ_GC_INIT_NESTED_LOCK) 252 if (flags & IRQ_GC_INIT_NESTED_LOCK)
@@ -301,7 +301,7 @@ void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk,
301 raw_spin_unlock(&gc_lock); 301 raw_spin_unlock(&gc_lock);
302 302
303 for (; msk; msk >>= 1, i++) { 303 for (; msk; msk >>= 1, i++) {
304 if (!msk & 0x01) 304 if (!(msk & 0x01))
305 continue; 305 continue;
306 306
307 /* Remove handler first. That will mask the irq line */ 307 /* Remove handler first. That will mask the irq line */
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 4c60a50e66b2..039b889ea053 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -70,7 +70,8 @@ static inline void desc_smp_init(struct irq_desc *desc, int node) { }
70static inline int desc_node(struct irq_desc *desc) { return 0; } 70static inline int desc_node(struct irq_desc *desc) { return 0; }
71#endif 71#endif
72 72
73static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node) 73static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node,
74 struct module *owner)
74{ 75{
75 int cpu; 76 int cpu;
76 77
@@ -86,6 +87,7 @@ static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node)
86 desc->irq_count = 0; 87 desc->irq_count = 0;
87 desc->irqs_unhandled = 0; 88 desc->irqs_unhandled = 0;
88 desc->name = NULL; 89 desc->name = NULL;
90 desc->owner = owner;
89 for_each_possible_cpu(cpu) 91 for_each_possible_cpu(cpu)
90 *per_cpu_ptr(desc->kstat_irqs, cpu) = 0; 92 *per_cpu_ptr(desc->kstat_irqs, cpu) = 0;
91 desc_smp_init(desc, node); 93 desc_smp_init(desc, node);
@@ -128,7 +130,7 @@ static void free_masks(struct irq_desc *desc)
128static inline void free_masks(struct irq_desc *desc) { } 130static inline void free_masks(struct irq_desc *desc) { }
129#endif 131#endif
130 132
131static struct irq_desc *alloc_desc(int irq, int node) 133static struct irq_desc *alloc_desc(int irq, int node, struct module *owner)
132{ 134{
133 struct irq_desc *desc; 135 struct irq_desc *desc;
134 gfp_t gfp = GFP_KERNEL; 136 gfp_t gfp = GFP_KERNEL;
@@ -147,7 +149,7 @@ static struct irq_desc *alloc_desc(int irq, int node)
147 raw_spin_lock_init(&desc->lock); 149 raw_spin_lock_init(&desc->lock);
148 lockdep_set_class(&desc->lock, &irq_desc_lock_class); 150 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
149 151
150 desc_set_defaults(irq, desc, node); 152 desc_set_defaults(irq, desc, node, owner);
151 153
152 return desc; 154 return desc;
153 155
@@ -173,13 +175,14 @@ static void free_desc(unsigned int irq)
173 kfree(desc); 175 kfree(desc);
174} 176}
175 177
176static int alloc_descs(unsigned int start, unsigned int cnt, int node) 178static int alloc_descs(unsigned int start, unsigned int cnt, int node,
179 struct module *owner)
177{ 180{
178 struct irq_desc *desc; 181 struct irq_desc *desc;
179 int i; 182 int i;
180 183
181 for (i = 0; i < cnt; i++) { 184 for (i = 0; i < cnt; i++) {
182 desc = alloc_desc(start + i, node); 185 desc = alloc_desc(start + i, node, owner);
183 if (!desc) 186 if (!desc)
184 goto err; 187 goto err;
185 mutex_lock(&sparse_irq_lock); 188 mutex_lock(&sparse_irq_lock);
@@ -227,7 +230,7 @@ int __init early_irq_init(void)
227 nr_irqs = initcnt; 230 nr_irqs = initcnt;
228 231
229 for (i = 0; i < initcnt; i++) { 232 for (i = 0; i < initcnt; i++) {
230 desc = alloc_desc(i, node); 233 desc = alloc_desc(i, node, NULL);
231 set_bit(i, allocated_irqs); 234 set_bit(i, allocated_irqs);
232 irq_insert_desc(i, desc); 235 irq_insert_desc(i, desc);
233 } 236 }
@@ -261,7 +264,7 @@ int __init early_irq_init(void)
261 alloc_masks(&desc[i], GFP_KERNEL, node); 264 alloc_masks(&desc[i], GFP_KERNEL, node);
262 raw_spin_lock_init(&desc[i].lock); 265 raw_spin_lock_init(&desc[i].lock);
263 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); 266 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
264 desc_set_defaults(i, &desc[i], node); 267 desc_set_defaults(i, &desc[i], node, NULL);
265 } 268 }
266 return arch_early_irq_init(); 269 return arch_early_irq_init();
267} 270}
@@ -276,8 +279,16 @@ static void free_desc(unsigned int irq)
276 dynamic_irq_cleanup(irq); 279 dynamic_irq_cleanup(irq);
277} 280}
278 281
279static inline int alloc_descs(unsigned int start, unsigned int cnt, int node) 282static inline int alloc_descs(unsigned int start, unsigned int cnt, int node,
283 struct module *owner)
280{ 284{
285 u32 i;
286
287 for (i = 0; i < cnt; i++) {
288 struct irq_desc *desc = irq_to_desc(start + i);
289
290 desc->owner = owner;
291 }
281 return start; 292 return start;
282} 293}
283 294
@@ -333,11 +344,13 @@ EXPORT_SYMBOL_GPL(irq_free_descs);
333 * @from: Start the search from this irq number 344 * @from: Start the search from this irq number
334 * @cnt: Number of consecutive irqs to allocate. 345 * @cnt: Number of consecutive irqs to allocate.
335 * @node: Preferred node on which the irq descriptor should be allocated 346 * @node: Preferred node on which the irq descriptor should be allocated
347 * @owner: Owning module (can be NULL)
336 * 348 *
337 * Returns the first irq number or error code 349 * Returns the first irq number or error code
338 */ 350 */
339int __ref 351int __ref
340irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node) 352__irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
353 struct module *owner)
341{ 354{
342 int start, ret; 355 int start, ret;
343 356
@@ -366,13 +379,13 @@ irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node)
366 379
367 bitmap_set(allocated_irqs, start, cnt); 380 bitmap_set(allocated_irqs, start, cnt);
368 mutex_unlock(&sparse_irq_lock); 381 mutex_unlock(&sparse_irq_lock);
369 return alloc_descs(start, cnt, node); 382 return alloc_descs(start, cnt, node, owner);
370 383
371err: 384err:
372 mutex_unlock(&sparse_irq_lock); 385 mutex_unlock(&sparse_irq_lock);
373 return ret; 386 return ret;
374} 387}
375EXPORT_SYMBOL_GPL(irq_alloc_descs); 388EXPORT_SYMBOL_GPL(__irq_alloc_descs);
376 389
377/** 390/**
378 * irq_reserve_irqs - mark irqs allocated 391 * irq_reserve_irqs - mark irqs allocated
@@ -440,7 +453,7 @@ void dynamic_irq_cleanup(unsigned int irq)
440 unsigned long flags; 453 unsigned long flags;
441 454
442 raw_spin_lock_irqsave(&desc->lock, flags); 455 raw_spin_lock_irqsave(&desc->lock, flags);
443 desc_set_defaults(irq, desc, desc_node(desc)); 456 desc_set_defaults(irq, desc, desc_node(desc), NULL);
444 raw_spin_unlock_irqrestore(&desc->lock, flags); 457 raw_spin_unlock_irqrestore(&desc->lock, flags);
445} 458}
446 459
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 0a7840aeb0fb..9b956fa20308 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -883,6 +883,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
883 883
884 if (desc->irq_data.chip == &no_irq_chip) 884 if (desc->irq_data.chip == &no_irq_chip)
885 return -ENOSYS; 885 return -ENOSYS;
886 if (!try_module_get(desc->owner))
887 return -ENODEV;
886 /* 888 /*
887 * Some drivers like serial.c use request_irq() heavily, 889 * Some drivers like serial.c use request_irq() heavily,
888 * so we have to be careful not to interfere with a 890 * so we have to be careful not to interfere with a
@@ -906,8 +908,10 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
906 */ 908 */
907 nested = irq_settings_is_nested_thread(desc); 909 nested = irq_settings_is_nested_thread(desc);
908 if (nested) { 910 if (nested) {
909 if (!new->thread_fn) 911 if (!new->thread_fn) {
910 return -EINVAL; 912 ret = -EINVAL;
913 goto out_mput;
914 }
911 /* 915 /*
912 * Replace the primary handler which was provided from 916 * Replace the primary handler which was provided from
913 * the driver for non nested interrupt handling by the 917 * the driver for non nested interrupt handling by the
@@ -929,8 +933,10 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
929 933
930 t = kthread_create(irq_thread, new, "irq/%d-%s", irq, 934 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
931 new->name); 935 new->name);
932 if (IS_ERR(t)) 936 if (IS_ERR(t)) {
933 return PTR_ERR(t); 937 ret = PTR_ERR(t);
938 goto out_mput;
939 }
934 /* 940 /*
935 * We keep the reference to the task struct even if 941 * We keep the reference to the task struct even if
936 * the thread dies to avoid that the interrupt code 942 * the thread dies to avoid that the interrupt code
@@ -1095,6 +1101,8 @@ out_thread:
1095 kthread_stop(t); 1101 kthread_stop(t);
1096 put_task_struct(t); 1102 put_task_struct(t);
1097 } 1103 }
1104out_mput:
1105 module_put(desc->owner);
1098 return ret; 1106 return ret;
1099} 1107}
1100 1108
@@ -1203,6 +1211,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
1203 put_task_struct(action->thread); 1211 put_task_struct(action->thread);
1204 } 1212 }
1205 1213
1214 module_put(desc->owner);
1206 return action; 1215 return action;
1207} 1216}
1208 1217
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 8c24294e477f..91d67ce3a8d5 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -3111,7 +3111,13 @@ static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
3111 if (!class) 3111 if (!class)
3112 class = look_up_lock_class(lock, 0); 3112 class = look_up_lock_class(lock, 0);
3113 3113
3114 if (DEBUG_LOCKS_WARN_ON(!class)) 3114 /*
3115 * If look_up_lock_class() failed to find a class, we're trying
3116 * to test if we hold a lock that has never yet been acquired.
3117 * Clearly if the lock hasn't been acquired _ever_, we're not
3118 * holding it either, so report failure.
3119 */
3120 if (!class)
3115 return 0; 3121 return 0;
3116 3122
3117 if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock)) 3123 if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock))
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index b1914cb9095c..3744c594b19b 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -231,3 +231,7 @@ config PM_CLK
231config PM_GENERIC_DOMAINS 231config PM_GENERIC_DOMAINS
232 bool 232 bool
233 depends on PM 233 depends on PM
234
235config PM_GENERIC_DOMAINS_RUNTIME
236 def_bool y
237 depends on PM_RUNTIME && PM_GENERIC_DOMAINS
diff --git a/kernel/printk.c b/kernel/printk.c
index 836a2ae0ac31..28a40d8171b8 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -1604,7 +1604,7 @@ static int __init printk_late_init(void)
1604 struct console *con; 1604 struct console *con;
1605 1605
1606 for_each_console(con) { 1606 for_each_console(con) {
1607 if (con->flags & CON_BOOT) { 1607 if (!keep_bootcon && con->flags & CON_BOOT) {
1608 printk(KERN_INFO "turn off boot console %s%d\n", 1608 printk(KERN_INFO "turn off boot console %s%d\n",
1609 con->name, con->index); 1609 con->name, con->index);
1610 unregister_console(con); 1610 unregister_console(con);
diff --git a/kernel/sched.c b/kernel/sched.c
index ccacdbdecf45..ec5f472bc5b9 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3065,7 +3065,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
3065#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW 3065#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
3066 local_irq_disable(); 3066 local_irq_disable();
3067#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ 3067#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
3068 perf_event_task_sched_in(current); 3068 perf_event_task_sched_in(prev, current);
3069#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW 3069#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
3070 local_irq_enable(); 3070 local_irq_enable();
3071#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ 3071#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
@@ -4279,9 +4279,9 @@ pick_next_task(struct rq *rq)
4279} 4279}
4280 4280
4281/* 4281/*
4282 * schedule() is the main scheduler function. 4282 * __schedule() is the main scheduler function.
4283 */ 4283 */
4284asmlinkage void __sched schedule(void) 4284static void __sched __schedule(void)
4285{ 4285{
4286 struct task_struct *prev, *next; 4286 struct task_struct *prev, *next;
4287 unsigned long *switch_count; 4287 unsigned long *switch_count;
@@ -4322,16 +4322,6 @@ need_resched:
4322 if (to_wakeup) 4322 if (to_wakeup)
4323 try_to_wake_up_local(to_wakeup); 4323 try_to_wake_up_local(to_wakeup);
4324 } 4324 }
4325
4326 /*
4327 * If we are going to sleep and we have plugged IO
4328 * queued, make sure to submit it to avoid deadlocks.
4329 */
4330 if (blk_needs_flush_plug(prev)) {
4331 raw_spin_unlock(&rq->lock);
4332 blk_schedule_flush_plug(prev);
4333 raw_spin_lock(&rq->lock);
4334 }
4335 } 4325 }
4336 switch_count = &prev->nvcsw; 4326 switch_count = &prev->nvcsw;
4337 } 4327 }
@@ -4369,6 +4359,26 @@ need_resched:
4369 if (need_resched()) 4359 if (need_resched())
4370 goto need_resched; 4360 goto need_resched;
4371} 4361}
4362
4363static inline void sched_submit_work(struct task_struct *tsk)
4364{
4365 if (!tsk->state)
4366 return;
4367 /*
4368 * If we are going to sleep and we have plugged IO queued,
4369 * make sure to submit it to avoid deadlocks.
4370 */
4371 if (blk_needs_flush_plug(tsk))
4372 blk_schedule_flush_plug(tsk);
4373}
4374
4375asmlinkage void schedule(void)
4376{
4377 struct task_struct *tsk = current;
4378
4379 sched_submit_work(tsk);
4380 __schedule();
4381}
4372EXPORT_SYMBOL(schedule); 4382EXPORT_SYMBOL(schedule);
4373 4383
4374#ifdef CONFIG_MUTEX_SPIN_ON_OWNER 4384#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
@@ -4435,7 +4445,7 @@ asmlinkage void __sched notrace preempt_schedule(void)
4435 4445
4436 do { 4446 do {
4437 add_preempt_count_notrace(PREEMPT_ACTIVE); 4447 add_preempt_count_notrace(PREEMPT_ACTIVE);
4438 schedule(); 4448 __schedule();
4439 sub_preempt_count_notrace(PREEMPT_ACTIVE); 4449 sub_preempt_count_notrace(PREEMPT_ACTIVE);
4440 4450
4441 /* 4451 /*
@@ -4463,7 +4473,7 @@ asmlinkage void __sched preempt_schedule_irq(void)
4463 do { 4473 do {
4464 add_preempt_count(PREEMPT_ACTIVE); 4474 add_preempt_count(PREEMPT_ACTIVE);
4465 local_irq_enable(); 4475 local_irq_enable();
4466 schedule(); 4476 __schedule();
4467 local_irq_disable(); 4477 local_irq_disable();
4468 sub_preempt_count(PREEMPT_ACTIVE); 4478 sub_preempt_count(PREEMPT_ACTIVE);
4469 4479
@@ -5588,7 +5598,7 @@ static inline int should_resched(void)
5588static void __cond_resched(void) 5598static void __cond_resched(void)
5589{ 5599{
5590 add_preempt_count(PREEMPT_ACTIVE); 5600 add_preempt_count(PREEMPT_ACTIVE);
5591 schedule(); 5601 __schedule();
5592 sub_preempt_count(PREEMPT_ACTIVE); 5602 sub_preempt_count(PREEMPT_ACTIVE);
5593} 5603}
5594 5604
@@ -7443,6 +7453,7 @@ static void __sdt_free(const struct cpumask *cpu_map)
7443 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j); 7453 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j);
7444 if (sd && (sd->flags & SD_OVERLAP)) 7454 if (sd && (sd->flags & SD_OVERLAP))
7445 free_sched_groups(sd->groups, 0); 7455 free_sched_groups(sd->groups, 0);
7456 kfree(*per_cpu_ptr(sdd->sd, j));
7446 kfree(*per_cpu_ptr(sdd->sg, j)); 7457 kfree(*per_cpu_ptr(sdd->sg, j));
7447 kfree(*per_cpu_ptr(sdd->sgp, j)); 7458 kfree(*per_cpu_ptr(sdd->sgp, j));
7448 } 7459 }
diff --git a/kernel/sys.c b/kernel/sys.c
index dd948a1fca4c..18ee1d2f6474 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -37,6 +37,8 @@
37#include <linux/fs_struct.h> 37#include <linux/fs_struct.h>
38#include <linux/gfp.h> 38#include <linux/gfp.h>
39#include <linux/syscore_ops.h> 39#include <linux/syscore_ops.h>
40#include <linux/version.h>
41#include <linux/ctype.h>
40 42
41#include <linux/compat.h> 43#include <linux/compat.h>
42#include <linux/syscalls.h> 44#include <linux/syscalls.h>
@@ -44,6 +46,8 @@
44#include <linux/user_namespace.h> 46#include <linux/user_namespace.h>
45 47
46#include <linux/kmsg_dump.h> 48#include <linux/kmsg_dump.h>
49/* Move somewhere else to avoid recompiling? */
50#include <generated/utsrelease.h>
47 51
48#include <asm/uaccess.h> 52#include <asm/uaccess.h>
49#include <asm/io.h> 53#include <asm/io.h>
@@ -1161,6 +1165,34 @@ DECLARE_RWSEM(uts_sem);
1161#define override_architecture(name) 0 1165#define override_architecture(name) 0
1162#endif 1166#endif
1163 1167
1168/*
1169 * Work around broken programs that cannot handle "Linux 3.0".
1170 * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
1171 */
1172static int override_release(char __user *release, int len)
1173{
1174 int ret = 0;
1175 char buf[len];
1176
1177 if (current->personality & UNAME26) {
1178 char *rest = UTS_RELEASE;
1179 int ndots = 0;
1180 unsigned v;
1181
1182 while (*rest) {
1183 if (*rest == '.' && ++ndots >= 3)
1184 break;
1185 if (!isdigit(*rest) && *rest != '.')
1186 break;
1187 rest++;
1188 }
1189 v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
1190 snprintf(buf, len, "2.6.%u%s", v, rest);
1191 ret = copy_to_user(release, buf, len);
1192 }
1193 return ret;
1194}
1195
1164SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name) 1196SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
1165{ 1197{
1166 int errno = 0; 1198 int errno = 0;
@@ -1170,6 +1202,8 @@ SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
1170 errno = -EFAULT; 1202 errno = -EFAULT;
1171 up_read(&uts_sem); 1203 up_read(&uts_sem);
1172 1204
1205 if (!errno && override_release(name->release, sizeof(name->release)))
1206 errno = -EFAULT;
1173 if (!errno && override_architecture(name)) 1207 if (!errno && override_architecture(name))
1174 errno = -EFAULT; 1208 errno = -EFAULT;
1175 return errno; 1209 return errno;
@@ -1191,6 +1225,8 @@ SYSCALL_DEFINE1(uname, struct old_utsname __user *, name)
1191 error = -EFAULT; 1225 error = -EFAULT;
1192 up_read(&uts_sem); 1226 up_read(&uts_sem);
1193 1227
1228 if (!error && override_release(name->release, sizeof(name->release)))
1229 error = -EFAULT;
1194 if (!error && override_architecture(name)) 1230 if (!error && override_architecture(name))
1195 error = -EFAULT; 1231 error = -EFAULT;
1196 return error; 1232 return error;
@@ -1225,6 +1261,8 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
1225 1261
1226 if (!error && override_architecture(name)) 1262 if (!error && override_architecture(name))
1227 error = -EFAULT; 1263 error = -EFAULT;
1264 if (!error && override_release(name->release, sizeof(name->release)))
1265 error = -EFAULT;
1228 return error ? -EFAULT : 0; 1266 return error ? -EFAULT : 0;
1229} 1267}
1230#endif 1268#endif
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index 62cbc8877fef..a9a5de07c4f1 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -16,7 +16,6 @@ asmlinkage long sys_ni_syscall(void)
16 return -ENOSYS; 16 return -ENOSYS;
17} 17}
18 18
19cond_syscall(sys_nfsservctl);
20cond_syscall(sys_quotactl); 19cond_syscall(sys_quotactl);
21cond_syscall(sys32_quotactl); 20cond_syscall(sys32_quotactl);
22cond_syscall(sys_acct); 21cond_syscall(sys_acct);
diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
index 3b8e028b9601..e8bffbe2ba4b 100644
--- a/kernel/sysctl_binary.c
+++ b/kernel/sysctl_binary.c
@@ -1,6 +1,6 @@
1#include <linux/stat.h> 1#include <linux/stat.h>
2#include <linux/sysctl.h> 2#include <linux/sysctl.h>
3#include "../fs/xfs/linux-2.6/xfs_sysctl.h" 3#include "../fs/xfs/xfs_sysctl.h"
4#include <linux/sunrpc/debug.h> 4#include <linux/sunrpc/debug.h>
5#include <linux/string.h> 5#include <linux/string.h>
6#include <net/ip_vs.h> 6#include <net/ip_vs.h>
diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
index 4e4932a7b360..362da653813d 100644
--- a/kernel/sysctl_check.c
+++ b/kernel/sysctl_check.c
@@ -1,6 +1,6 @@
1#include <linux/stat.h> 1#include <linux/stat.h>
2#include <linux/sysctl.h> 2#include <linux/sysctl.h>
3#include "../fs/xfs/linux-2.6/xfs_sysctl.h" 3#include "../fs/xfs/xfs_sysctl.h"
4#include <linux/sunrpc/debug.h> 4#include <linux/sunrpc/debug.h>
5#include <linux/string.h> 5#include <linux/string.h>
6#include <net/ip_vs.h> 6#include <net/ip_vs.h>
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index e19ce1454ee1..e66046456f4f 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -655,6 +655,7 @@ static struct genl_ops taskstats_ops = {
655 .cmd = TASKSTATS_CMD_GET, 655 .cmd = TASKSTATS_CMD_GET,
656 .doit = taskstats_user_cmd, 656 .doit = taskstats_user_cmd,
657 .policy = taskstats_cmd_get_policy, 657 .policy = taskstats_cmd_get_policy,
658 .flags = GENL_ADMIN_PERM,
658}; 659};
659 660
660static struct genl_ops cgroupstats_ops = { 661static struct genl_ops cgroupstats_ops = {
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index 59f369f98a04..ea5e1a928d5b 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -441,6 +441,8 @@ static int alarm_timer_create(struct k_itimer *new_timer)
441static void alarm_timer_get(struct k_itimer *timr, 441static void alarm_timer_get(struct k_itimer *timr,
442 struct itimerspec *cur_setting) 442 struct itimerspec *cur_setting)
443{ 443{
444 memset(cur_setting, 0, sizeof(struct itimerspec));
445
444 cur_setting->it_interval = 446 cur_setting->it_interval =
445 ktime_to_timespec(timr->it.alarmtimer.period); 447 ktime_to_timespec(timr->it.alarmtimer.period);
446 cur_setting->it_value = 448 cur_setting->it_value =
@@ -479,11 +481,17 @@ static int alarm_timer_set(struct k_itimer *timr, int flags,
479 if (!rtcdev) 481 if (!rtcdev)
480 return -ENOTSUPP; 482 return -ENOTSUPP;
481 483
482 /* Save old values */ 484 /*
483 old_setting->it_interval = 485 * XXX HACK! Currently we can DOS a system if the interval
484 ktime_to_timespec(timr->it.alarmtimer.period); 486 * period on alarmtimers is too small. Cap the interval here
485 old_setting->it_value = 487 * to 100us and solve this properly in a future patch! -jstultz
486 ktime_to_timespec(timr->it.alarmtimer.node.expires); 488 */
489 if ((new_setting->it_interval.tv_sec == 0) &&
490 (new_setting->it_interval.tv_nsec < 100000))
491 new_setting->it_interval.tv_nsec = 100000;
492
493 if (old_setting)
494 alarm_timer_get(timr, old_setting);
487 495
488 /* If the timer was already set, cancel it */ 496 /* If the timer was already set, cancel it */
489 alarm_cancel(&timr->it.alarmtimer); 497 alarm_cancel(&timr->it.alarmtimer);
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 6957aa298dfa..7c910a5593a6 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -206,6 +206,8 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
206 what |= MASK_TC_BIT(rw, RAHEAD); 206 what |= MASK_TC_BIT(rw, RAHEAD);
207 what |= MASK_TC_BIT(rw, META); 207 what |= MASK_TC_BIT(rw, META);
208 what |= MASK_TC_BIT(rw, DISCARD); 208 what |= MASK_TC_BIT(rw, DISCARD);
209 what |= MASK_TC_BIT(rw, FLUSH);
210 what |= MASK_TC_BIT(rw, FUA);
209 211
210 pid = tsk->pid; 212 pid = tsk->pid;
211 if (act_log_check(bt, what, sector, pid)) 213 if (act_log_check(bt, what, sector, pid))
@@ -1054,6 +1056,9 @@ static void fill_rwbs(char *rwbs, const struct blk_io_trace *t)
1054 goto out; 1056 goto out;
1055 } 1057 }
1056 1058
1059 if (tc & BLK_TC_FLUSH)
1060 rwbs[i++] = 'F';
1061
1057 if (tc & BLK_TC_DISCARD) 1062 if (tc & BLK_TC_DISCARD)
1058 rwbs[i++] = 'D'; 1063 rwbs[i++] = 'D';
1059 else if (tc & BLK_TC_WRITE) 1064 else if (tc & BLK_TC_WRITE)
@@ -1063,10 +1068,10 @@ static void fill_rwbs(char *rwbs, const struct blk_io_trace *t)
1063 else 1068 else
1064 rwbs[i++] = 'N'; 1069 rwbs[i++] = 'N';
1065 1070
1071 if (tc & BLK_TC_FUA)
1072 rwbs[i++] = 'F';
1066 if (tc & BLK_TC_AHEAD) 1073 if (tc & BLK_TC_AHEAD)
1067 rwbs[i++] = 'A'; 1074 rwbs[i++] = 'A';
1068 if (tc & BLK_TC_BARRIER)
1069 rwbs[i++] = 'B';
1070 if (tc & BLK_TC_SYNC) 1075 if (tc & BLK_TC_SYNC)
1071 rwbs[i++] = 'S'; 1076 rwbs[i++] = 'S';
1072 if (tc & BLK_TC_META) 1077 if (tc & BLK_TC_META)
@@ -1132,7 +1137,7 @@ typedef int (blk_log_action_t) (struct trace_iterator *iter, const char *act);
1132 1137
1133static int blk_log_action_classic(struct trace_iterator *iter, const char *act) 1138static int blk_log_action_classic(struct trace_iterator *iter, const char *act)
1134{ 1139{
1135 char rwbs[6]; 1140 char rwbs[RWBS_LEN];
1136 unsigned long long ts = iter->ts; 1141 unsigned long long ts = iter->ts;
1137 unsigned long nsec_rem = do_div(ts, NSEC_PER_SEC); 1142 unsigned long nsec_rem = do_div(ts, NSEC_PER_SEC);
1138 unsigned secs = (unsigned long)ts; 1143 unsigned secs = (unsigned long)ts;
@@ -1148,7 +1153,7 @@ static int blk_log_action_classic(struct trace_iterator *iter, const char *act)
1148 1153
1149static int blk_log_action(struct trace_iterator *iter, const char *act) 1154static int blk_log_action(struct trace_iterator *iter, const char *act)
1150{ 1155{
1151 char rwbs[6]; 1156 char rwbs[RWBS_LEN];
1152 const struct blk_io_trace *t = te_blk_io_trace(iter->ent); 1157 const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
1153 1158
1154 fill_rwbs(rwbs, t); 1159 fill_rwbs(rwbs, t);
@@ -1561,7 +1566,7 @@ static const struct {
1561} mask_maps[] = { 1566} mask_maps[] = {
1562 { BLK_TC_READ, "read" }, 1567 { BLK_TC_READ, "read" },
1563 { BLK_TC_WRITE, "write" }, 1568 { BLK_TC_WRITE, "write" },
1564 { BLK_TC_BARRIER, "barrier" }, 1569 { BLK_TC_FLUSH, "flush" },
1565 { BLK_TC_SYNC, "sync" }, 1570 { BLK_TC_SYNC, "sync" },
1566 { BLK_TC_QUEUE, "queue" }, 1571 { BLK_TC_QUEUE, "queue" },
1567 { BLK_TC_REQUEUE, "requeue" }, 1572 { BLK_TC_REQUEUE, "requeue" },
@@ -1573,6 +1578,7 @@ static const struct {
1573 { BLK_TC_META, "meta" }, 1578 { BLK_TC_META, "meta" },
1574 { BLK_TC_DISCARD, "discard" }, 1579 { BLK_TC_DISCARD, "discard" },
1575 { BLK_TC_DRV_DATA, "drv_data" }, 1580 { BLK_TC_DRV_DATA, "drv_data" },
1581 { BLK_TC_FUA, "fua" },
1576}; 1582};
1577 1583
1578static int blk_trace_str2mask(const char *str) 1584static int blk_trace_str2mask(const char *str)
@@ -1788,6 +1794,9 @@ void blk_fill_rwbs(char *rwbs, u32 rw, int bytes)
1788{ 1794{
1789 int i = 0; 1795 int i = 0;
1790 1796
1797 if (rw & REQ_FLUSH)
1798 rwbs[i++] = 'F';
1799
1791 if (rw & WRITE) 1800 if (rw & WRITE)
1792 rwbs[i++] = 'W'; 1801 rwbs[i++] = 'W';
1793 else if (rw & REQ_DISCARD) 1802 else if (rw & REQ_DISCARD)
@@ -1797,6 +1806,8 @@ void blk_fill_rwbs(char *rwbs, u32 rw, int bytes)
1797 else 1806 else
1798 rwbs[i++] = 'N'; 1807 rwbs[i++] = 'N';
1799 1808
1809 if (rw & REQ_FUA)
1810 rwbs[i++] = 'F';
1800 if (rw & REQ_RAHEAD) 1811 if (rw & REQ_RAHEAD)
1801 rwbs[i++] = 'A'; 1812 rwbs[i++] = 'A';
1802 if (rw & REQ_SYNC) 1813 if (rw & REQ_SYNC)
diff --git a/kernel/tsacct.c b/kernel/tsacct.c
index 24dc60d9fa1f..5bbfac85866e 100644
--- a/kernel/tsacct.c
+++ b/kernel/tsacct.c
@@ -78,6 +78,7 @@ void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk)
78 78
79#define KB 1024 79#define KB 1024
80#define MB (1024*KB) 80#define MB (1024*KB)
81#define KB_MASK (~(KB-1))
81/* 82/*
82 * fill in extended accounting fields 83 * fill in extended accounting fields
83 */ 84 */
@@ -95,14 +96,14 @@ void xacct_add_tsk(struct taskstats *stats, struct task_struct *p)
95 stats->hiwater_vm = get_mm_hiwater_vm(mm) * PAGE_SIZE / KB; 96 stats->hiwater_vm = get_mm_hiwater_vm(mm) * PAGE_SIZE / KB;
96 mmput(mm); 97 mmput(mm);
97 } 98 }
98 stats->read_char = p->ioac.rchar; 99 stats->read_char = p->ioac.rchar & KB_MASK;
99 stats->write_char = p->ioac.wchar; 100 stats->write_char = p->ioac.wchar & KB_MASK;
100 stats->read_syscalls = p->ioac.syscr; 101 stats->read_syscalls = p->ioac.syscr & KB_MASK;
101 stats->write_syscalls = p->ioac.syscw; 102 stats->write_syscalls = p->ioac.syscw & KB_MASK;
102#ifdef CONFIG_TASK_IO_ACCOUNTING 103#ifdef CONFIG_TASK_IO_ACCOUNTING
103 stats->read_bytes = p->ioac.read_bytes; 104 stats->read_bytes = p->ioac.read_bytes & KB_MASK;
104 stats->write_bytes = p->ioac.write_bytes; 105 stats->write_bytes = p->ioac.write_bytes & KB_MASK;
105 stats->cancelled_write_bytes = p->ioac.cancelled_write_bytes; 106 stats->cancelled_write_bytes = p->ioac.cancelled_write_bytes & KB_MASK;
106#else 107#else
107 stats->read_bytes = 0; 108 stats->read_bytes = 0;
108 stats->write_bytes = 0; 109 stats->write_bytes = 0;
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 25fb1b0e53fa..1783aabc6128 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2412,8 +2412,13 @@ reflush:
2412 2412
2413 for_each_cwq_cpu(cpu, wq) { 2413 for_each_cwq_cpu(cpu, wq) {
2414 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 2414 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2415 bool drained;
2415 2416
2416 if (!cwq->nr_active && list_empty(&cwq->delayed_works)) 2417 spin_lock_irq(&cwq->gcwq->lock);
2418 drained = !cwq->nr_active && list_empty(&cwq->delayed_works);
2419 spin_unlock_irq(&cwq->gcwq->lock);
2420
2421 if (drained)
2417 continue; 2422 continue;
2418 2423
2419 if (++flush_cnt == 10 || 2424 if (++flush_cnt == 10 ||
diff --git a/lib/Makefile b/lib/Makefile
index d5d175c8a6ca..3f5bc6d903e0 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -12,7 +12,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
12 idr.o int_sqrt.o extable.o prio_tree.o \ 12 idr.o int_sqrt.o extable.o prio_tree.o \
13 sha1.o md5.o irq_regs.o reciprocal_div.o argv_split.o \ 13 sha1.o md5.o irq_regs.o reciprocal_div.o argv_split.o \
14 proportions.o prio_heap.o ratelimit.o show_mem.o \ 14 proportions.o prio_heap.o ratelimit.o show_mem.o \
15 is_single_threaded.o plist.o decompress.o find_next_bit.o 15 is_single_threaded.o plist.o decompress.o
16 16
17lib-$(CONFIG_MMU) += ioremap.o 17lib-$(CONFIG_MMU) += ioremap.o
18lib-$(CONFIG_SMP) += cpumask.o 18lib-$(CONFIG_SMP) += cpumask.o
@@ -22,7 +22,7 @@ lib-y += kobject.o kref.o klist.o
22obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ 22obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
23 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ 23 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
24 string_helpers.o gcd.o lcm.o list_sort.o uuid.o flex_array.o \ 24 string_helpers.o gcd.o lcm.o list_sort.o uuid.o flex_array.o \
25 bsearch.o find_last_bit.o 25 bsearch.o find_last_bit.o find_next_bit.o
26obj-y += kstrtox.o 26obj-y += kstrtox.o
27obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o 27obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
28 28
diff --git a/lib/sha1.c b/lib/sha1.c
index f33271dd00cb..1de509a159c8 100644
--- a/lib/sha1.c
+++ b/lib/sha1.c
@@ -8,6 +8,7 @@
8#include <linux/kernel.h> 8#include <linux/kernel.h>
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/bitops.h> 10#include <linux/bitops.h>
11#include <linux/cryptohash.h>
11#include <asm/unaligned.h> 12#include <asm/unaligned.h>
12 13
13/* 14/*
diff --git a/lib/xz/xz_dec_bcj.c b/lib/xz/xz_dec_bcj.c
index e51e2558ca9d..a768e6d28bbb 100644
--- a/lib/xz/xz_dec_bcj.c
+++ b/lib/xz/xz_dec_bcj.c
@@ -441,8 +441,12 @@ XZ_EXTERN enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s,
441 * next filter in the chain. Apply the BCJ filter on the new data 441 * next filter in the chain. Apply the BCJ filter on the new data
442 * in the output buffer. If everything cannot be filtered, copy it 442 * in the output buffer. If everything cannot be filtered, copy it
443 * to temp and rewind the output buffer position accordingly. 443 * to temp and rewind the output buffer position accordingly.
444 *
445 * This needs to be always run when temp.size == 0 to handle a special
446 * case where the output buffer is full and the next filter has no
447 * more output coming but hasn't returned XZ_STREAM_END yet.
444 */ 448 */
445 if (s->temp.size < b->out_size - b->out_pos) { 449 if (s->temp.size < b->out_size - b->out_pos || s->temp.size == 0) {
446 out_start = b->out_pos; 450 out_start = b->out_pos;
447 memcpy(b->out + b->out_pos, s->temp.buf, s->temp.size); 451 memcpy(b->out + b->out_pos, s->temp.buf, s->temp.size);
448 b->out_pos += s->temp.size; 452 b->out_pos += s->temp.size;
@@ -465,16 +469,25 @@ XZ_EXTERN enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s,
465 s->temp.size = b->out_pos - out_start; 469 s->temp.size = b->out_pos - out_start;
466 b->out_pos -= s->temp.size; 470 b->out_pos -= s->temp.size;
467 memcpy(s->temp.buf, b->out + b->out_pos, s->temp.size); 471 memcpy(s->temp.buf, b->out + b->out_pos, s->temp.size);
472
473 /*
474 * If there wasn't enough input to the next filter to fill
475 * the output buffer with unfiltered data, there's no point
476 * to try decoding more data to temp.
477 */
478 if (b->out_pos + s->temp.size < b->out_size)
479 return XZ_OK;
468 } 480 }
469 481
470 /* 482 /*
471 * If we have unfiltered data in temp, try to fill by decoding more 483 * We have unfiltered data in temp. If the output buffer isn't full
472 * data from the next filter. Apply the BCJ filter on temp. Then we 484 * yet, try to fill the temp buffer by decoding more data from the
473 * hopefully can fill the actual output buffer by copying filtered 485 * next filter. Apply the BCJ filter on temp. Then we hopefully can
474 * data from temp. A mix of filtered and unfiltered data may be left 486 * fill the actual output buffer by copying filtered data from temp.
475 * in temp; it will be taken care on the next call to this function. 487 * A mix of filtered and unfiltered data may be left in temp; it will
488 * be taken care on the next call to this function.
476 */ 489 */
477 if (s->temp.size > 0) { 490 if (b->out_pos < b->out_size) {
478 /* Make b->out{,_pos,_size} temporarily point to s->temp. */ 491 /* Make b->out{,_pos,_size} temporarily point to s->temp. */
479 s->out = b->out; 492 s->out = b->out;
480 s->out_pos = b->out_pos; 493 s->out_pos = b->out_pos;
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index d6edf8d14f9c..a87da524a4a0 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -359,6 +359,17 @@ static unsigned long bdi_longest_inactive(void)
359 return max(5UL * 60 * HZ, interval); 359 return max(5UL * 60 * HZ, interval);
360} 360}
361 361
362/*
363 * Clear pending bit and wakeup anybody waiting for flusher thread creation or
364 * shutdown
365 */
366static void bdi_clear_pending(struct backing_dev_info *bdi)
367{
368 clear_bit(BDI_pending, &bdi->state);
369 smp_mb__after_clear_bit();
370 wake_up_bit(&bdi->state, BDI_pending);
371}
372
362static int bdi_forker_thread(void *ptr) 373static int bdi_forker_thread(void *ptr)
363{ 374{
364 struct bdi_writeback *me = ptr; 375 struct bdi_writeback *me = ptr;
@@ -390,6 +401,13 @@ static int bdi_forker_thread(void *ptr)
390 } 401 }
391 402
392 spin_lock_bh(&bdi_lock); 403 spin_lock_bh(&bdi_lock);
404 /*
405 * In the following loop we are going to check whether we have
406 * some work to do without any synchronization with tasks
407 * waking us up to do work for them. So we have to set task
408 * state already here so that we don't miss wakeups coming
409 * after we verify some condition.
410 */
393 set_current_state(TASK_INTERRUPTIBLE); 411 set_current_state(TASK_INTERRUPTIBLE);
394 412
395 list_for_each_entry(bdi, &bdi_list, bdi_list) { 413 list_for_each_entry(bdi, &bdi_list, bdi_list) {
@@ -469,11 +487,13 @@ static int bdi_forker_thread(void *ptr)
469 spin_unlock_bh(&bdi->wb_lock); 487 spin_unlock_bh(&bdi->wb_lock);
470 wake_up_process(task); 488 wake_up_process(task);
471 } 489 }
490 bdi_clear_pending(bdi);
472 break; 491 break;
473 492
474 case KILL_THREAD: 493 case KILL_THREAD:
475 __set_current_state(TASK_RUNNING); 494 __set_current_state(TASK_RUNNING);
476 kthread_stop(task); 495 kthread_stop(task);
496 bdi_clear_pending(bdi);
477 break; 497 break;
478 498
479 case NO_ACTION: 499 case NO_ACTION:
@@ -489,16 +509,8 @@ static int bdi_forker_thread(void *ptr)
489 else 509 else
490 schedule_timeout(msecs_to_jiffies(dirty_writeback_interval * 10)); 510 schedule_timeout(msecs_to_jiffies(dirty_writeback_interval * 10));
491 try_to_freeze(); 511 try_to_freeze();
492 /* Back to the main loop */ 512 break;
493 continue;
494 } 513 }
495
496 /*
497 * Clear pending bit and wakeup anybody waiting to tear us down.
498 */
499 clear_bit(BDI_pending, &bdi->state);
500 smp_mb__after_clear_bit();
501 wake_up_bit(&bdi->state, BDI_pending);
502 } 514 }
503 515
504 return 0; 516 return 0;
diff --git a/mm/filemap.c b/mm/filemap.c
index 645a080ba4df..7771871fa353 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -827,13 +827,14 @@ unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
827{ 827{
828 unsigned int i; 828 unsigned int i;
829 unsigned int ret; 829 unsigned int ret;
830 unsigned int nr_found; 830 unsigned int nr_found, nr_skip;
831 831
832 rcu_read_lock(); 832 rcu_read_lock();
833restart: 833restart:
834 nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree, 834 nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree,
835 (void ***)pages, NULL, start, nr_pages); 835 (void ***)pages, NULL, start, nr_pages);
836 ret = 0; 836 ret = 0;
837 nr_skip = 0;
837 for (i = 0; i < nr_found; i++) { 838 for (i = 0; i < nr_found; i++) {
838 struct page *page; 839 struct page *page;
839repeat: 840repeat:
@@ -856,6 +857,7 @@ repeat:
856 * here as an exceptional entry: so skip over it - 857 * here as an exceptional entry: so skip over it -
857 * we only reach this from invalidate_mapping_pages(). 858 * we only reach this from invalidate_mapping_pages().
858 */ 859 */
860 nr_skip++;
859 continue; 861 continue;
860 } 862 }
861 863
@@ -876,7 +878,7 @@ repeat:
876 * If all entries were removed before we could secure them, 878 * If all entries were removed before we could secure them,
877 * try again, because callers stop trying once 0 is returned. 879 * try again, because callers stop trying once 0 is returned.
878 */ 880 */
879 if (unlikely(!ret && nr_found)) 881 if (unlikely(!ret && nr_found > nr_skip))
880 goto restart; 882 goto restart;
881 rcu_read_unlock(); 883 rcu_read_unlock();
882 return ret; 884 return ret;
diff --git a/mm/highmem.c b/mm/highmem.c
index 693394daa2ed..5ef672c07f75 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -326,7 +326,7 @@ static struct page_address_slot {
326 spinlock_t lock; /* Protect this bucket's list */ 326 spinlock_t lock; /* Protect this bucket's list */
327} ____cacheline_aligned_in_smp page_address_htable[1<<PA_HASH_ORDER]; 327} ____cacheline_aligned_in_smp page_address_htable[1<<PA_HASH_ORDER];
328 328
329static struct page_address_slot *page_slot(struct page *page) 329static struct page_address_slot *page_slot(const struct page *page)
330{ 330{
331 return &page_address_htable[hash_ptr(page, PA_HASH_ORDER)]; 331 return &page_address_htable[hash_ptr(page, PA_HASH_ORDER)];
332} 332}
@@ -337,7 +337,7 @@ static struct page_address_slot *page_slot(struct page *page)
337 * 337 *
338 * Returns the page's virtual address. 338 * Returns the page's virtual address.
339 */ 339 */
340void *page_address(struct page *page) 340void *page_address(const struct page *page)
341{ 341{
342 unsigned long flags; 342 unsigned long flags;
343 void *ret; 343 void *ret;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 930de9437271..3508777837c7 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -204,50 +204,6 @@ struct mem_cgroup_eventfd_list {
204static void mem_cgroup_threshold(struct mem_cgroup *mem); 204static void mem_cgroup_threshold(struct mem_cgroup *mem);
205static void mem_cgroup_oom_notify(struct mem_cgroup *mem); 205static void mem_cgroup_oom_notify(struct mem_cgroup *mem);
206 206
207enum {
208 SCAN_BY_LIMIT,
209 SCAN_BY_SYSTEM,
210 NR_SCAN_CONTEXT,
211 SCAN_BY_SHRINK, /* not recorded now */
212};
213
214enum {
215 SCAN,
216 SCAN_ANON,
217 SCAN_FILE,
218 ROTATE,
219 ROTATE_ANON,
220 ROTATE_FILE,
221 FREED,
222 FREED_ANON,
223 FREED_FILE,
224 ELAPSED,
225 NR_SCANSTATS,
226};
227
228struct scanstat {
229 spinlock_t lock;
230 unsigned long stats[NR_SCAN_CONTEXT][NR_SCANSTATS];
231 unsigned long rootstats[NR_SCAN_CONTEXT][NR_SCANSTATS];
232};
233
234const char *scanstat_string[NR_SCANSTATS] = {
235 "scanned_pages",
236 "scanned_anon_pages",
237 "scanned_file_pages",
238 "rotated_pages",
239 "rotated_anon_pages",
240 "rotated_file_pages",
241 "freed_pages",
242 "freed_anon_pages",
243 "freed_file_pages",
244 "elapsed_ns",
245};
246#define SCANSTAT_WORD_LIMIT "_by_limit"
247#define SCANSTAT_WORD_SYSTEM "_by_system"
248#define SCANSTAT_WORD_HIERARCHY "_under_hierarchy"
249
250
251/* 207/*
252 * The memory controller data structure. The memory controller controls both 208 * The memory controller data structure. The memory controller controls both
253 * page cache and RSS per cgroup. We would eventually like to provide 209 * page cache and RSS per cgroup. We would eventually like to provide
@@ -313,8 +269,7 @@ struct mem_cgroup {
313 269
314 /* For oom notifier event fd */ 270 /* For oom notifier event fd */
315 struct list_head oom_notify; 271 struct list_head oom_notify;
316 /* For recording LRU-scan statistics */ 272
317 struct scanstat scanstat;
318 /* 273 /*
319 * Should we move charges of a task when a task is moved into this 274 * Should we move charges of a task when a task is moved into this
320 * mem_cgroup ? And what type of charges should we move ? 275 * mem_cgroup ? And what type of charges should we move ?
@@ -1678,44 +1633,6 @@ bool mem_cgroup_reclaimable(struct mem_cgroup *mem, bool noswap)
1678} 1633}
1679#endif 1634#endif
1680 1635
1681static void __mem_cgroup_record_scanstat(unsigned long *stats,
1682 struct memcg_scanrecord *rec)
1683{
1684
1685 stats[SCAN] += rec->nr_scanned[0] + rec->nr_scanned[1];
1686 stats[SCAN_ANON] += rec->nr_scanned[0];
1687 stats[SCAN_FILE] += rec->nr_scanned[1];
1688
1689 stats[ROTATE] += rec->nr_rotated[0] + rec->nr_rotated[1];
1690 stats[ROTATE_ANON] += rec->nr_rotated[0];
1691 stats[ROTATE_FILE] += rec->nr_rotated[1];
1692
1693 stats[FREED] += rec->nr_freed[0] + rec->nr_freed[1];
1694 stats[FREED_ANON] += rec->nr_freed[0];
1695 stats[FREED_FILE] += rec->nr_freed[1];
1696
1697 stats[ELAPSED] += rec->elapsed;
1698}
1699
1700static void mem_cgroup_record_scanstat(struct memcg_scanrecord *rec)
1701{
1702 struct mem_cgroup *mem;
1703 int context = rec->context;
1704
1705 if (context >= NR_SCAN_CONTEXT)
1706 return;
1707
1708 mem = rec->mem;
1709 spin_lock(&mem->scanstat.lock);
1710 __mem_cgroup_record_scanstat(mem->scanstat.stats[context], rec);
1711 spin_unlock(&mem->scanstat.lock);
1712
1713 mem = rec->root;
1714 spin_lock(&mem->scanstat.lock);
1715 __mem_cgroup_record_scanstat(mem->scanstat.rootstats[context], rec);
1716 spin_unlock(&mem->scanstat.lock);
1717}
1718
1719/* 1636/*
1720 * Scan the hierarchy if needed to reclaim memory. We remember the last child 1637 * Scan the hierarchy if needed to reclaim memory. We remember the last child
1721 * we reclaimed from, so that we don't end up penalizing one child extensively 1638 * we reclaimed from, so that we don't end up penalizing one child extensively
@@ -1740,9 +1657,8 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
1740 bool noswap = reclaim_options & MEM_CGROUP_RECLAIM_NOSWAP; 1657 bool noswap = reclaim_options & MEM_CGROUP_RECLAIM_NOSWAP;
1741 bool shrink = reclaim_options & MEM_CGROUP_RECLAIM_SHRINK; 1658 bool shrink = reclaim_options & MEM_CGROUP_RECLAIM_SHRINK;
1742 bool check_soft = reclaim_options & MEM_CGROUP_RECLAIM_SOFT; 1659 bool check_soft = reclaim_options & MEM_CGROUP_RECLAIM_SOFT;
1743 struct memcg_scanrecord rec;
1744 unsigned long excess; 1660 unsigned long excess;
1745 unsigned long scanned; 1661 unsigned long nr_scanned;
1746 1662
1747 excess = res_counter_soft_limit_excess(&root_mem->res) >> PAGE_SHIFT; 1663 excess = res_counter_soft_limit_excess(&root_mem->res) >> PAGE_SHIFT;
1748 1664
@@ -1750,15 +1666,6 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
1750 if (!check_soft && !shrink && root_mem->memsw_is_minimum) 1666 if (!check_soft && !shrink && root_mem->memsw_is_minimum)
1751 noswap = true; 1667 noswap = true;
1752 1668
1753 if (shrink)
1754 rec.context = SCAN_BY_SHRINK;
1755 else if (check_soft)
1756 rec.context = SCAN_BY_SYSTEM;
1757 else
1758 rec.context = SCAN_BY_LIMIT;
1759
1760 rec.root = root_mem;
1761
1762 while (1) { 1669 while (1) {
1763 victim = mem_cgroup_select_victim(root_mem); 1670 victim = mem_cgroup_select_victim(root_mem);
1764 if (victim == root_mem) { 1671 if (victim == root_mem) {
@@ -1799,23 +1706,14 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
1799 css_put(&victim->css); 1706 css_put(&victim->css);
1800 continue; 1707 continue;
1801 } 1708 }
1802 rec.mem = victim;
1803 rec.nr_scanned[0] = 0;
1804 rec.nr_scanned[1] = 0;
1805 rec.nr_rotated[0] = 0;
1806 rec.nr_rotated[1] = 0;
1807 rec.nr_freed[0] = 0;
1808 rec.nr_freed[1] = 0;
1809 rec.elapsed = 0;
1810 /* we use swappiness of local cgroup */ 1709 /* we use swappiness of local cgroup */
1811 if (check_soft) { 1710 if (check_soft) {
1812 ret = mem_cgroup_shrink_node_zone(victim, gfp_mask, 1711 ret = mem_cgroup_shrink_node_zone(victim, gfp_mask,
1813 noswap, zone, &rec, &scanned); 1712 noswap, zone, &nr_scanned);
1814 *total_scanned += scanned; 1713 *total_scanned += nr_scanned;
1815 } else 1714 } else
1816 ret = try_to_free_mem_cgroup_pages(victim, gfp_mask, 1715 ret = try_to_free_mem_cgroup_pages(victim, gfp_mask,
1817 noswap, &rec); 1716 noswap);
1818 mem_cgroup_record_scanstat(&rec);
1819 css_put(&victim->css); 1717 css_put(&victim->css);
1820 /* 1718 /*
1821 * At shrinking usage, we can't check we should stop here or 1719 * At shrinking usage, we can't check we should stop here or
@@ -1841,29 +1739,23 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
1841 */ 1739 */
1842static bool mem_cgroup_oom_lock(struct mem_cgroup *mem) 1740static bool mem_cgroup_oom_lock(struct mem_cgroup *mem)
1843{ 1741{
1844 int lock_count = -1;
1845 struct mem_cgroup *iter, *failed = NULL; 1742 struct mem_cgroup *iter, *failed = NULL;
1846 bool cond = true; 1743 bool cond = true;
1847 1744
1848 for_each_mem_cgroup_tree_cond(iter, mem, cond) { 1745 for_each_mem_cgroup_tree_cond(iter, mem, cond) {
1849 bool locked = iter->oom_lock; 1746 if (iter->oom_lock) {
1850
1851 iter->oom_lock = true;
1852 if (lock_count == -1)
1853 lock_count = iter->oom_lock;
1854 else if (lock_count != locked) {
1855 /* 1747 /*
1856 * this subtree of our hierarchy is already locked 1748 * this subtree of our hierarchy is already locked
1857 * so we cannot give a lock. 1749 * so we cannot give a lock.
1858 */ 1750 */
1859 lock_count = 0;
1860 failed = iter; 1751 failed = iter;
1861 cond = false; 1752 cond = false;
1862 } 1753 } else
1754 iter->oom_lock = true;
1863 } 1755 }
1864 1756
1865 if (!failed) 1757 if (!failed)
1866 goto done; 1758 return true;
1867 1759
1868 /* 1760 /*
1869 * OK, we failed to lock the whole subtree so we have to clean up 1761 * OK, we failed to lock the whole subtree so we have to clean up
@@ -1877,8 +1769,7 @@ static bool mem_cgroup_oom_lock(struct mem_cgroup *mem)
1877 } 1769 }
1878 iter->oom_lock = false; 1770 iter->oom_lock = false;
1879 } 1771 }
1880done: 1772 return false;
1881 return lock_count;
1882} 1773}
1883 1774
1884/* 1775/*
@@ -2169,13 +2060,7 @@ static void drain_all_stock(struct mem_cgroup *root_mem, bool sync)
2169 2060
2170 /* Notify other cpus that system-wide "drain" is running */ 2061 /* Notify other cpus that system-wide "drain" is running */
2171 get_online_cpus(); 2062 get_online_cpus();
2172 /* 2063 curcpu = get_cpu();
2173 * Get a hint for avoiding draining charges on the current cpu,
2174 * which must be exhausted by our charging. It is not required that
2175 * this be a precise check, so we use raw_smp_processor_id() instead of
2176 * getcpu()/putcpu().
2177 */
2178 curcpu = raw_smp_processor_id();
2179 for_each_online_cpu(cpu) { 2064 for_each_online_cpu(cpu) {
2180 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); 2065 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2181 struct mem_cgroup *mem; 2066 struct mem_cgroup *mem;
@@ -2192,6 +2077,7 @@ static void drain_all_stock(struct mem_cgroup *root_mem, bool sync)
2192 schedule_work_on(cpu, &stock->work); 2077 schedule_work_on(cpu, &stock->work);
2193 } 2078 }
2194 } 2079 }
2080 put_cpu();
2195 2081
2196 if (!sync) 2082 if (!sync)
2197 goto out; 2083 goto out;
@@ -3866,18 +3752,14 @@ try_to_free:
3866 /* try to free all pages in this cgroup */ 3752 /* try to free all pages in this cgroup */
3867 shrink = 1; 3753 shrink = 1;
3868 while (nr_retries && mem->res.usage > 0) { 3754 while (nr_retries && mem->res.usage > 0) {
3869 struct memcg_scanrecord rec;
3870 int progress; 3755 int progress;
3871 3756
3872 if (signal_pending(current)) { 3757 if (signal_pending(current)) {
3873 ret = -EINTR; 3758 ret = -EINTR;
3874 goto out; 3759 goto out;
3875 } 3760 }
3876 rec.context = SCAN_BY_SHRINK;
3877 rec.mem = mem;
3878 rec.root = mem;
3879 progress = try_to_free_mem_cgroup_pages(mem, GFP_KERNEL, 3761 progress = try_to_free_mem_cgroup_pages(mem, GFP_KERNEL,
3880 false, &rec); 3762 false);
3881 if (!progress) { 3763 if (!progress) {
3882 nr_retries--; 3764 nr_retries--;
3883 /* maybe some writeback is necessary */ 3765 /* maybe some writeback is necessary */
@@ -4721,54 +4603,6 @@ static int mem_control_numa_stat_open(struct inode *unused, struct file *file)
4721} 4603}
4722#endif /* CONFIG_NUMA */ 4604#endif /* CONFIG_NUMA */
4723 4605
4724static int mem_cgroup_vmscan_stat_read(struct cgroup *cgrp,
4725 struct cftype *cft,
4726 struct cgroup_map_cb *cb)
4727{
4728 struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
4729 char string[64];
4730 int i;
4731
4732 for (i = 0; i < NR_SCANSTATS; i++) {
4733 strcpy(string, scanstat_string[i]);
4734 strcat(string, SCANSTAT_WORD_LIMIT);
4735 cb->fill(cb, string, mem->scanstat.stats[SCAN_BY_LIMIT][i]);
4736 }
4737
4738 for (i = 0; i < NR_SCANSTATS; i++) {
4739 strcpy(string, scanstat_string[i]);
4740 strcat(string, SCANSTAT_WORD_SYSTEM);
4741 cb->fill(cb, string, mem->scanstat.stats[SCAN_BY_SYSTEM][i]);
4742 }
4743
4744 for (i = 0; i < NR_SCANSTATS; i++) {
4745 strcpy(string, scanstat_string[i]);
4746 strcat(string, SCANSTAT_WORD_LIMIT);
4747 strcat(string, SCANSTAT_WORD_HIERARCHY);
4748 cb->fill(cb, string, mem->scanstat.rootstats[SCAN_BY_LIMIT][i]);
4749 }
4750 for (i = 0; i < NR_SCANSTATS; i++) {
4751 strcpy(string, scanstat_string[i]);
4752 strcat(string, SCANSTAT_WORD_SYSTEM);
4753 strcat(string, SCANSTAT_WORD_HIERARCHY);
4754 cb->fill(cb, string, mem->scanstat.rootstats[SCAN_BY_SYSTEM][i]);
4755 }
4756 return 0;
4757}
4758
4759static int mem_cgroup_reset_vmscan_stat(struct cgroup *cgrp,
4760 unsigned int event)
4761{
4762 struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
4763
4764 spin_lock(&mem->scanstat.lock);
4765 memset(&mem->scanstat.stats, 0, sizeof(mem->scanstat.stats));
4766 memset(&mem->scanstat.rootstats, 0, sizeof(mem->scanstat.rootstats));
4767 spin_unlock(&mem->scanstat.lock);
4768 return 0;
4769}
4770
4771
4772static struct cftype mem_cgroup_files[] = { 4606static struct cftype mem_cgroup_files[] = {
4773 { 4607 {
4774 .name = "usage_in_bytes", 4608 .name = "usage_in_bytes",
@@ -4839,11 +4673,6 @@ static struct cftype mem_cgroup_files[] = {
4839 .mode = S_IRUGO, 4673 .mode = S_IRUGO,
4840 }, 4674 },
4841#endif 4675#endif
4842 {
4843 .name = "vmscan_stat",
4844 .read_map = mem_cgroup_vmscan_stat_read,
4845 .trigger = mem_cgroup_reset_vmscan_stat,
4846 },
4847}; 4676};
4848 4677
4849#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 4678#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
@@ -5107,7 +4936,6 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
5107 atomic_set(&mem->refcnt, 1); 4936 atomic_set(&mem->refcnt, 1);
5108 mem->move_charge_at_immigrate = 0; 4937 mem->move_charge_at_immigrate = 0;
5109 mutex_init(&mem->thresholds_lock); 4938 mutex_init(&mem->thresholds_lock);
5110 spin_lock_init(&mem->scanstat.lock);
5111 return &mem->css; 4939 return &mem->css;
5112free_out: 4940free_out:
5113 __mem_cgroup_free(mem); 4941 __mem_cgroup_free(mem);
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 8b57173c1dd5..9c51f9f58cac 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -636,7 +636,6 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
636 struct vm_area_struct *prev; 636 struct vm_area_struct *prev;
637 struct vm_area_struct *vma; 637 struct vm_area_struct *vma;
638 int err = 0; 638 int err = 0;
639 pgoff_t pgoff;
640 unsigned long vmstart; 639 unsigned long vmstart;
641 unsigned long vmend; 640 unsigned long vmend;
642 641
@@ -649,9 +648,9 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
649 vmstart = max(start, vma->vm_start); 648 vmstart = max(start, vma->vm_start);
650 vmend = min(end, vma->vm_end); 649 vmend = min(end, vma->vm_end);
651 650
652 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
653 prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags, 651 prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
654 vma->anon_vma, vma->vm_file, pgoff, new_pol); 652 vma->anon_vma, vma->vm_file, vma->vm_pgoff,
653 new_pol);
655 if (prev) { 654 if (prev) {
656 vma = prev; 655 vma = prev;
657 next = vma->vm_next; 656 next = vma->vm_next;
@@ -1412,7 +1411,9 @@ asmlinkage long compat_sys_get_mempolicy(int __user *policy,
1412 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags); 1411 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1413 1412
1414 if (!err && nmask) { 1413 if (!err && nmask) {
1415 err = copy_from_user(bm, nm, alloc_size); 1414 unsigned long copy_size;
1415 copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
1416 err = copy_from_user(bm, nm, copy_size);
1416 /* ensure entire bitmap is zeroed */ 1417 /* ensure entire bitmap is zeroed */
1417 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8); 1418 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1418 err |= compat_put_bitmap(nmask, bm, nr_bits); 1419 err |= compat_put_bitmap(nmask, bm, nr_bits);
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index d1960744f881..0e309cd1b5b9 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -754,21 +754,10 @@ static void balance_dirty_pages(struct address_space *mapping,
754 * 200ms is typically more than enough to curb heavy dirtiers; 754 * 200ms is typically more than enough to curb heavy dirtiers;
755 * (b) the pause time limit makes the dirtiers more responsive. 755 * (b) the pause time limit makes the dirtiers more responsive.
756 */ 756 */
757 if (nr_dirty < dirty_thresh + 757 if (nr_dirty < dirty_thresh &&
758 dirty_thresh / DIRTY_MAXPAUSE_AREA && 758 bdi_dirty < (task_bdi_thresh + bdi_thresh) / 2 &&
759 time_after(jiffies, start_time + MAX_PAUSE)) 759 time_after(jiffies, start_time + MAX_PAUSE))
760 break; 760 break;
761 /*
762 * pass-good area. When some bdi gets blocked (eg. NFS server
763 * not responding), or write bandwidth dropped dramatically due
764 * to concurrent reads, or dirty threshold suddenly dropped and
765 * the dirty pages cannot be brought down anytime soon (eg. on
766 * slow USB stick), at least let go of the good bdi's.
767 */
768 if (nr_dirty < dirty_thresh +
769 dirty_thresh / DIRTY_PASSGOOD_AREA &&
770 bdi_dirty < bdi_thresh)
771 break;
772 761
773 /* 762 /*
774 * Increase the delay for each loop, up to our previous 763 * Increase the delay for each loop, up to our previous
diff --git a/mm/slub.c b/mm/slub.c
index 9f662d70eb47..7c54fe83a90c 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2377,7 +2377,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
2377 */ 2377 */
2378 if (unlikely(!prior)) { 2378 if (unlikely(!prior)) {
2379 remove_full(s, page); 2379 remove_full(s, page);
2380 add_partial(n, page, 0); 2380 add_partial(n, page, 1);
2381 stat(s, FREE_ADD_PARTIAL); 2381 stat(s, FREE_ADD_PARTIAL);
2382 } 2382 }
2383 } 2383 }
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 464621d18eb2..5016f19e1661 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -725,9 +725,10 @@ static void free_unmap_vmap_area_addr(unsigned long addr)
725#define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2) 725#define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2)
726#define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */ 726#define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */
727#define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */ 727#define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */
728#define VMAP_BBMAP_BITS VMAP_MIN(VMAP_BBMAP_BITS_MAX, \ 728#define VMAP_BBMAP_BITS \
729 VMAP_MAX(VMAP_BBMAP_BITS_MIN, \ 729 VMAP_MIN(VMAP_BBMAP_BITS_MAX, \
730 VMALLOC_PAGES / NR_CPUS / 16)) 730 VMAP_MAX(VMAP_BBMAP_BITS_MIN, \
731 VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
731 732
732#define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE) 733#define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE)
733 734
@@ -2139,6 +2140,14 @@ struct vm_struct *alloc_vm_area(size_t size)
2139 return NULL; 2140 return NULL;
2140 } 2141 }
2141 2142
2143 /*
2144 * If the allocated address space is passed to a hypercall
2145 * before being used then we cannot rely on a page fault to
2146 * trigger an update of the page tables. So sync all the page
2147 * tables here.
2148 */
2149 vmalloc_sync_all();
2150
2142 return area; 2151 return area;
2143} 2152}
2144EXPORT_SYMBOL_GPL(alloc_vm_area); 2153EXPORT_SYMBOL_GPL(alloc_vm_area);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 7ef69124fa3e..b55699cd9067 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -105,7 +105,6 @@ struct scan_control {
105 105
106 /* Which cgroup do we reclaim from */ 106 /* Which cgroup do we reclaim from */
107 struct mem_cgroup *mem_cgroup; 107 struct mem_cgroup *mem_cgroup;
108 struct memcg_scanrecord *memcg_record;
109 108
110 /* 109 /*
111 * Nodemask of nodes allowed by the caller. If NULL, all nodes 110 * Nodemask of nodes allowed by the caller. If NULL, all nodes
@@ -1349,8 +1348,6 @@ putback_lru_pages(struct zone *zone, struct scan_control *sc,
1349 int file = is_file_lru(lru); 1348 int file = is_file_lru(lru);
1350 int numpages = hpage_nr_pages(page); 1349 int numpages = hpage_nr_pages(page);
1351 reclaim_stat->recent_rotated[file] += numpages; 1350 reclaim_stat->recent_rotated[file] += numpages;
1352 if (!scanning_global_lru(sc))
1353 sc->memcg_record->nr_rotated[file] += numpages;
1354 } 1351 }
1355 if (!pagevec_add(&pvec, page)) { 1352 if (!pagevec_add(&pvec, page)) {
1356 spin_unlock_irq(&zone->lru_lock); 1353 spin_unlock_irq(&zone->lru_lock);
@@ -1394,10 +1391,6 @@ static noinline_for_stack void update_isolated_counts(struct zone *zone,
1394 1391
1395 reclaim_stat->recent_scanned[0] += *nr_anon; 1392 reclaim_stat->recent_scanned[0] += *nr_anon;
1396 reclaim_stat->recent_scanned[1] += *nr_file; 1393 reclaim_stat->recent_scanned[1] += *nr_file;
1397 if (!scanning_global_lru(sc)) {
1398 sc->memcg_record->nr_scanned[0] += *nr_anon;
1399 sc->memcg_record->nr_scanned[1] += *nr_file;
1400 }
1401} 1394}
1402 1395
1403/* 1396/*
@@ -1511,9 +1504,6 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
1511 nr_reclaimed += shrink_page_list(&page_list, zone, sc); 1504 nr_reclaimed += shrink_page_list(&page_list, zone, sc);
1512 } 1505 }
1513 1506
1514 if (!scanning_global_lru(sc))
1515 sc->memcg_record->nr_freed[file] += nr_reclaimed;
1516
1517 local_irq_disable(); 1507 local_irq_disable();
1518 if (current_is_kswapd()) 1508 if (current_is_kswapd())
1519 __count_vm_events(KSWAPD_STEAL, nr_reclaimed); 1509 __count_vm_events(KSWAPD_STEAL, nr_reclaimed);
@@ -1613,8 +1603,6 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1613 } 1603 }
1614 1604
1615 reclaim_stat->recent_scanned[file] += nr_taken; 1605 reclaim_stat->recent_scanned[file] += nr_taken;
1616 if (!scanning_global_lru(sc))
1617 sc->memcg_record->nr_scanned[file] += nr_taken;
1618 1606
1619 __count_zone_vm_events(PGREFILL, zone, pgscanned); 1607 __count_zone_vm_events(PGREFILL, zone, pgscanned);
1620 if (file) 1608 if (file)
@@ -1666,8 +1654,6 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1666 * get_scan_ratio. 1654 * get_scan_ratio.
1667 */ 1655 */
1668 reclaim_stat->recent_rotated[file] += nr_rotated; 1656 reclaim_stat->recent_rotated[file] += nr_rotated;
1669 if (!scanning_global_lru(sc))
1670 sc->memcg_record->nr_rotated[file] += nr_rotated;
1671 1657
1672 move_active_pages_to_lru(zone, &l_active, 1658 move_active_pages_to_lru(zone, &l_active,
1673 LRU_ACTIVE + file * LRU_FILE); 1659 LRU_ACTIVE + file * LRU_FILE);
@@ -1808,23 +1794,15 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
1808 u64 fraction[2], denominator; 1794 u64 fraction[2], denominator;
1809 enum lru_list l; 1795 enum lru_list l;
1810 int noswap = 0; 1796 int noswap = 0;
1811 int force_scan = 0; 1797 bool force_scan = false;
1812 unsigned long nr_force_scan[2]; 1798 unsigned long nr_force_scan[2];
1813 1799
1814 1800 /* kswapd does zone balancing and needs to scan this zone */
1815 anon = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) + 1801 if (scanning_global_lru(sc) && current_is_kswapd())
1816 zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON); 1802 force_scan = true;
1817 file = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) + 1803 /* memcg may have small limit and need to avoid priority drop */
1818 zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE); 1804 if (!scanning_global_lru(sc))
1819 1805 force_scan = true;
1820 if (((anon + file) >> priority) < SWAP_CLUSTER_MAX) {
1821 /* kswapd does zone balancing and need to scan this zone */
1822 if (scanning_global_lru(sc) && current_is_kswapd())
1823 force_scan = 1;
1824 /* memcg may have small limit and need to avoid priority drop */
1825 if (!scanning_global_lru(sc))
1826 force_scan = 1;
1827 }
1828 1806
1829 /* If we have no swap space, do not bother scanning anon pages. */ 1807 /* If we have no swap space, do not bother scanning anon pages. */
1830 if (!sc->may_swap || (nr_swap_pages <= 0)) { 1808 if (!sc->may_swap || (nr_swap_pages <= 0)) {
@@ -1837,6 +1815,11 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
1837 goto out; 1815 goto out;
1838 } 1816 }
1839 1817
1818 anon = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) +
1819 zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON);
1820 file = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) +
1821 zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE);
1822
1840 if (scanning_global_lru(sc)) { 1823 if (scanning_global_lru(sc)) {
1841 free = zone_page_state(zone, NR_FREE_PAGES); 1824 free = zone_page_state(zone, NR_FREE_PAGES);
1842 /* If we have very few page cache pages, 1825 /* If we have very few page cache pages,
@@ -2268,10 +2251,9 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
2268#ifdef CONFIG_CGROUP_MEM_RES_CTLR 2251#ifdef CONFIG_CGROUP_MEM_RES_CTLR
2269 2252
2270unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, 2253unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
2271 gfp_t gfp_mask, bool noswap, 2254 gfp_t gfp_mask, bool noswap,
2272 struct zone *zone, 2255 struct zone *zone,
2273 struct memcg_scanrecord *rec, 2256 unsigned long *nr_scanned)
2274 unsigned long *scanned)
2275{ 2257{
2276 struct scan_control sc = { 2258 struct scan_control sc = {
2277 .nr_scanned = 0, 2259 .nr_scanned = 0,
@@ -2281,9 +2263,7 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
2281 .may_swap = !noswap, 2263 .may_swap = !noswap,
2282 .order = 0, 2264 .order = 0,
2283 .mem_cgroup = mem, 2265 .mem_cgroup = mem,
2284 .memcg_record = rec,
2285 }; 2266 };
2286 unsigned long start, end;
2287 2267
2288 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | 2268 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
2289 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); 2269 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
@@ -2292,7 +2272,6 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
2292 sc.may_writepage, 2272 sc.may_writepage,
2293 sc.gfp_mask); 2273 sc.gfp_mask);
2294 2274
2295 start = sched_clock();
2296 /* 2275 /*
2297 * NOTE: Although we can get the priority field, using it 2276 * NOTE: Although we can get the priority field, using it
2298 * here is not a good idea, since it limits the pages we can scan. 2277 * here is not a good idea, since it limits the pages we can scan.
@@ -2301,25 +2280,19 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
2301 * the priority and make it zero. 2280 * the priority and make it zero.
2302 */ 2281 */
2303 shrink_zone(0, zone, &sc); 2282 shrink_zone(0, zone, &sc);
2304 end = sched_clock();
2305
2306 if (rec)
2307 rec->elapsed += end - start;
2308 *scanned = sc.nr_scanned;
2309 2283
2310 trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed); 2284 trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
2311 2285
2286 *nr_scanned = sc.nr_scanned;
2312 return sc.nr_reclaimed; 2287 return sc.nr_reclaimed;
2313} 2288}
2314 2289
2315unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, 2290unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
2316 gfp_t gfp_mask, 2291 gfp_t gfp_mask,
2317 bool noswap, 2292 bool noswap)
2318 struct memcg_scanrecord *rec)
2319{ 2293{
2320 struct zonelist *zonelist; 2294 struct zonelist *zonelist;
2321 unsigned long nr_reclaimed; 2295 unsigned long nr_reclaimed;
2322 unsigned long start, end;
2323 int nid; 2296 int nid;
2324 struct scan_control sc = { 2297 struct scan_control sc = {
2325 .may_writepage = !laptop_mode, 2298 .may_writepage = !laptop_mode,
@@ -2328,7 +2301,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
2328 .nr_to_reclaim = SWAP_CLUSTER_MAX, 2301 .nr_to_reclaim = SWAP_CLUSTER_MAX,
2329 .order = 0, 2302 .order = 0,
2330 .mem_cgroup = mem_cont, 2303 .mem_cgroup = mem_cont,
2331 .memcg_record = rec,
2332 .nodemask = NULL, /* we don't care the placement */ 2304 .nodemask = NULL, /* we don't care the placement */
2333 .gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | 2305 .gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
2334 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK), 2306 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK),
@@ -2337,7 +2309,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
2337 .gfp_mask = sc.gfp_mask, 2309 .gfp_mask = sc.gfp_mask,
2338 }; 2310 };
2339 2311
2340 start = sched_clock();
2341 /* 2312 /*
2342 * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't 2313 * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't
2343 * take care of from where we get pages. So the node where we start the 2314 * take care of from where we get pages. So the node where we start the
@@ -2352,9 +2323,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
2352 sc.gfp_mask); 2323 sc.gfp_mask);
2353 2324
2354 nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink); 2325 nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
2355 end = sched_clock();
2356 if (rec)
2357 rec->elapsed += end - start;
2358 2326
2359 trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed); 2327 trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
2360 2328
@@ -2529,6 +2497,9 @@ loop_again:
2529 high_wmark_pages(zone), 0, 0)) { 2497 high_wmark_pages(zone), 0, 0)) {
2530 end_zone = i; 2498 end_zone = i;
2531 break; 2499 break;
2500 } else {
2501 /* If balanced, clear the congested flag */
2502 zone_clear_flag(zone, ZONE_CONGESTED);
2532 } 2503 }
2533 } 2504 }
2534 if (i < 0) 2505 if (i < 0)
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 20c18b7694b2..d52b13d28e8f 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -659,7 +659,7 @@ static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
659} 659}
660#endif 660#endif
661 661
662#if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) 662#if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || defined(CONFIG_NUMA)
663#ifdef CONFIG_ZONE_DMA 663#ifdef CONFIG_ZONE_DMA
664#define TEXT_FOR_DMA(xx) xx "_dma", 664#define TEXT_FOR_DMA(xx) xx "_dma",
665#else 665#else
@@ -788,7 +788,7 @@ const char * const vmstat_text[] = {
788 788
789#endif /* CONFIG_VM_EVENTS_COUNTERS */ 789#endif /* CONFIG_VM_EVENTS_COUNTERS */
790}; 790};
791#endif /* CONFIG_PROC_FS || CONFIG_SYSFS */ 791#endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA */
792 792
793 793
794#ifdef CONFIG_PROC_FS 794#ifdef CONFIG_PROC_FS
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index 175b5135bdcf..e317583fcc73 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -263,7 +263,6 @@ p9_virtio_request(struct p9_client *client, struct p9_req_t *req)
263{ 263{
264 int in, out, inp, outp; 264 int in, out, inp, outp;
265 struct virtio_chan *chan = client->trans; 265 struct virtio_chan *chan = client->trans;
266 char *rdata = (char *)req->rc+sizeof(struct p9_fcall);
267 unsigned long flags; 266 unsigned long flags;
268 size_t pdata_off = 0; 267 size_t pdata_off = 0;
269 struct trans_rpage_info *rpinfo = NULL; 268 struct trans_rpage_info *rpinfo = NULL;
@@ -346,7 +345,8 @@ req_retry_pinned:
346 * Arrange in such a way that server places header in the 345 * Arrange in such a way that server places header in the
347 * alloced memory and payload onto the user buffer. 346 * alloced memory and payload onto the user buffer.
348 */ 347 */
349 inp = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, rdata, 11); 348 inp = pack_sg_list(chan->sg, out,
349 VIRTQUEUE_NUM, req->rc->sdata, 11);
350 /* 350 /*
351 * Running executables in the filesystem may result in 351 * Running executables in the filesystem may result in
352 * a read request with kernel buffer as opposed to user buffer. 352 * a read request with kernel buffer as opposed to user buffer.
@@ -366,8 +366,8 @@ req_retry_pinned:
366 } 366 }
367 in += inp; 367 in += inp;
368 } else { 368 } else {
369 in = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, rdata, 369 in = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM,
370 req->rc->capacity); 370 req->rc->sdata, req->rc->capacity);
371 } 371 }
372 372
373 err = virtqueue_add_buf(chan->vq, chan->sg, out, in, req->tc); 373 err = virtqueue_add_buf(chan->vq, chan->sg, out, in, req->tc);
@@ -592,7 +592,14 @@ static struct p9_trans_module p9_virtio_trans = {
592 .close = p9_virtio_close, 592 .close = p9_virtio_close,
593 .request = p9_virtio_request, 593 .request = p9_virtio_request,
594 .cancel = p9_virtio_cancel, 594 .cancel = p9_virtio_cancel,
595 .maxsize = PAGE_SIZE*VIRTQUEUE_NUM, 595
596 /*
597 * We leave one entry for input and one entry for response
598 * headers. We also skip one more entry to accomodate, address
599 * that are not at page boundary, that can result in an extra
600 * page in zero copy.
601 */
602 .maxsize = PAGE_SIZE * (VIRTQUEUE_NUM - 3),
596 .pref = P9_TRANS_PREF_PAYLOAD_SEP, 603 .pref = P9_TRANS_PREF_PAYLOAD_SEP,
597 .def = 0, 604 .def = 0,
598 .owner = THIS_MODULE, 605 .owner = THIS_MODULE,
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index 52cfd0c3ea71..d07223c834af 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -558,12 +558,13 @@ static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg)
558 spin_unlock_irqrestore(&rq->lock, flags); 558 spin_unlock_irqrestore(&rq->lock, flags);
559 559
560 skb_queue_walk_safe(&queue, skb, tmp) { 560 skb_queue_walk_safe(&queue, skb, tmp) {
561 struct net_device *dev = skb->dev; 561 struct net_device *dev;
562
563 br2684_push(atmvcc, skb);
564 dev = skb->dev;
562 565
563 dev->stats.rx_bytes -= skb->len; 566 dev->stats.rx_bytes -= skb->len;
564 dev->stats.rx_packets--; 567 dev->stats.rx_packets--;
565
566 br2684_push(atmvcc, skb);
567 } 568 }
568 569
569 /* initialize netdev carrier state */ 570 /* initialize netdev carrier state */
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 8add9b499912..117e0d161780 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -494,9 +494,8 @@ int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo)
494 BT_DBG("sk %p", sk); 494 BT_DBG("sk %p", sk);
495 495
496 add_wait_queue(sk_sleep(sk), &wait); 496 add_wait_queue(sk_sleep(sk), &wait);
497 set_current_state(TASK_INTERRUPTIBLE);
497 while (sk->sk_state != state) { 498 while (sk->sk_state != state) {
498 set_current_state(TASK_INTERRUPTIBLE);
499
500 if (!timeo) { 499 if (!timeo) {
501 err = -EINPROGRESS; 500 err = -EINPROGRESS;
502 break; 501 break;
@@ -510,12 +509,13 @@ int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo)
510 release_sock(sk); 509 release_sock(sk);
511 timeo = schedule_timeout(timeo); 510 timeo = schedule_timeout(timeo);
512 lock_sock(sk); 511 lock_sock(sk);
512 set_current_state(TASK_INTERRUPTIBLE);
513 513
514 err = sock_error(sk); 514 err = sock_error(sk);
515 if (err) 515 if (err)
516 break; 516 break;
517 } 517 }
518 set_current_state(TASK_RUNNING); 518 __set_current_state(TASK_RUNNING);
519 remove_wait_queue(sk_sleep(sk), &wait); 519 remove_wait_queue(sk_sleep(sk), &wait);
520 return err; 520 return err;
521} 521}
diff --git a/net/bluetooth/bnep/bnep.h b/net/bluetooth/bnep/bnep.h
index 8e6c06158f8e..e7ee5314f39a 100644
--- a/net/bluetooth/bnep/bnep.h
+++ b/net/bluetooth/bnep/bnep.h
@@ -155,6 +155,7 @@ struct bnep_session {
155 unsigned int role; 155 unsigned int role;
156 unsigned long state; 156 unsigned long state;
157 unsigned long flags; 157 unsigned long flags;
158 atomic_t terminate;
158 struct task_struct *task; 159 struct task_struct *task;
159 160
160 struct ethhdr eh; 161 struct ethhdr eh;
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index ca39fcf010ce..d9edfe8bf9d6 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -484,9 +484,11 @@ static int bnep_session(void *arg)
484 484
485 init_waitqueue_entry(&wait, current); 485 init_waitqueue_entry(&wait, current);
486 add_wait_queue(sk_sleep(sk), &wait); 486 add_wait_queue(sk_sleep(sk), &wait);
487 while (!kthread_should_stop()) { 487 while (1) {
488 set_current_state(TASK_INTERRUPTIBLE); 488 set_current_state(TASK_INTERRUPTIBLE);
489 489
490 if (atomic_read(&s->terminate))
491 break;
490 /* RX */ 492 /* RX */
491 while ((skb = skb_dequeue(&sk->sk_receive_queue))) { 493 while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
492 skb_orphan(skb); 494 skb_orphan(skb);
@@ -504,7 +506,7 @@ static int bnep_session(void *arg)
504 506
505 schedule(); 507 schedule();
506 } 508 }
507 set_current_state(TASK_RUNNING); 509 __set_current_state(TASK_RUNNING);
508 remove_wait_queue(sk_sleep(sk), &wait); 510 remove_wait_queue(sk_sleep(sk), &wait);
509 511
510 /* Cleanup session */ 512 /* Cleanup session */
@@ -640,9 +642,10 @@ int bnep_del_connection(struct bnep_conndel_req *req)
640 down_read(&bnep_session_sem); 642 down_read(&bnep_session_sem);
641 643
642 s = __bnep_get_session(req->dst); 644 s = __bnep_get_session(req->dst);
643 if (s) 645 if (s) {
644 kthread_stop(s->task); 646 atomic_inc(&s->terminate);
645 else 647 wake_up_process(s->task);
648 } else
646 err = -ENOENT; 649 err = -ENOENT;
647 650
648 up_read(&bnep_session_sem); 651 up_read(&bnep_session_sem);
diff --git a/net/bluetooth/cmtp/capi.c b/net/bluetooth/cmtp/capi.c
index 040f67b12978..50f0d135eb8f 100644
--- a/net/bluetooth/cmtp/capi.c
+++ b/net/bluetooth/cmtp/capi.c
@@ -386,7 +386,8 @@ static void cmtp_reset_ctr(struct capi_ctr *ctrl)
386 386
387 capi_ctr_down(ctrl); 387 capi_ctr_down(ctrl);
388 388
389 kthread_stop(session->task); 389 atomic_inc(&session->terminate);
390 wake_up_process(session->task);
390} 391}
391 392
392static void cmtp_register_appl(struct capi_ctr *ctrl, __u16 appl, capi_register_params *rp) 393static void cmtp_register_appl(struct capi_ctr *ctrl, __u16 appl, capi_register_params *rp)
diff --git a/net/bluetooth/cmtp/cmtp.h b/net/bluetooth/cmtp/cmtp.h
index db43b54ac9af..c32638dddbf9 100644
--- a/net/bluetooth/cmtp/cmtp.h
+++ b/net/bluetooth/cmtp/cmtp.h
@@ -81,6 +81,7 @@ struct cmtp_session {
81 81
82 char name[BTNAMSIZ]; 82 char name[BTNAMSIZ];
83 83
84 atomic_t terminate;
84 struct task_struct *task; 85 struct task_struct *task;
85 86
86 wait_queue_head_t wait; 87 wait_queue_head_t wait;
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
index c5b11af908be..521baa4fe835 100644
--- a/net/bluetooth/cmtp/core.c
+++ b/net/bluetooth/cmtp/core.c
@@ -292,9 +292,11 @@ static int cmtp_session(void *arg)
292 292
293 init_waitqueue_entry(&wait, current); 293 init_waitqueue_entry(&wait, current);
294 add_wait_queue(sk_sleep(sk), &wait); 294 add_wait_queue(sk_sleep(sk), &wait);
295 while (!kthread_should_stop()) { 295 while (1) {
296 set_current_state(TASK_INTERRUPTIBLE); 296 set_current_state(TASK_INTERRUPTIBLE);
297 297
298 if (atomic_read(&session->terminate))
299 break;
298 if (sk->sk_state != BT_CONNECTED) 300 if (sk->sk_state != BT_CONNECTED)
299 break; 301 break;
300 302
@@ -307,7 +309,7 @@ static int cmtp_session(void *arg)
307 309
308 schedule(); 310 schedule();
309 } 311 }
310 set_current_state(TASK_RUNNING); 312 __set_current_state(TASK_RUNNING);
311 remove_wait_queue(sk_sleep(sk), &wait); 313 remove_wait_queue(sk_sleep(sk), &wait);
312 314
313 down_write(&cmtp_session_sem); 315 down_write(&cmtp_session_sem);
@@ -380,16 +382,17 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock)
380 382
381 if (!(session->flags & (1 << CMTP_LOOPBACK))) { 383 if (!(session->flags & (1 << CMTP_LOOPBACK))) {
382 err = cmtp_attach_device(session); 384 err = cmtp_attach_device(session);
383 if (err < 0) 385 if (err < 0) {
384 goto detach; 386 atomic_inc(&session->terminate);
387 wake_up_process(session->task);
388 up_write(&cmtp_session_sem);
389 return err;
390 }
385 } 391 }
386 392
387 up_write(&cmtp_session_sem); 393 up_write(&cmtp_session_sem);
388 return 0; 394 return 0;
389 395
390detach:
391 cmtp_detach_device(session);
392
393unlink: 396unlink:
394 __cmtp_unlink_session(session); 397 __cmtp_unlink_session(session);
395 398
@@ -414,7 +417,8 @@ int cmtp_del_connection(struct cmtp_conndel_req *req)
414 skb_queue_purge(&session->transmit); 417 skb_queue_purge(&session->transmit);
415 418
416 /* Stop session thread */ 419 /* Stop session thread */
417 kthread_stop(session->task); 420 atomic_inc(&session->terminate);
421 wake_up_process(session->task);
418 } else 422 } else
419 err = -ENOENT; 423 err = -ENOENT;
420 424
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index ec0bc3f60f2e..56943add45cc 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -1209,7 +1209,6 @@ static void hci_cmd_timer(unsigned long arg)
1209 1209
1210 BT_ERR("%s command tx timeout", hdev->name); 1210 BT_ERR("%s command tx timeout", hdev->name);
1211 atomic_set(&hdev->cmd_cnt, 1); 1211 atomic_set(&hdev->cmd_cnt, 1);
1212 clear_bit(HCI_RESET, &hdev->flags);
1213 tasklet_schedule(&hdev->cmd_task); 1212 tasklet_schedule(&hdev->cmd_task);
1214} 1213}
1215 1214
@@ -1327,7 +1326,7 @@ int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1327 1326
1328 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL); 1327 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1329 if (!entry) { 1328 if (!entry) {
1330 return -ENOMEM; 1329 err = -ENOMEM;
1331 goto err; 1330 goto err;
1332 } 1331 }
1333 1332
@@ -2408,7 +2407,10 @@ static void hci_cmd_task(unsigned long arg)
2408 if (hdev->sent_cmd) { 2407 if (hdev->sent_cmd) {
2409 atomic_dec(&hdev->cmd_cnt); 2408 atomic_dec(&hdev->cmd_cnt);
2410 hci_send_frame(skb); 2409 hci_send_frame(skb);
2411 mod_timer(&hdev->cmd_timer, 2410 if (test_bit(HCI_RESET, &hdev->flags))
2411 del_timer(&hdev->cmd_timer);
2412 else
2413 mod_timer(&hdev->cmd_timer,
2412 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT)); 2414 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2413 } else { 2415 } else {
2414 skb_queue_head(&hdev->cmd_q, skb); 2416 skb_queue_head(&hdev->cmd_q, skb);
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index a40170e022e8..7ef4eb4435fb 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -58,8 +58,8 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
58 if (status) 58 if (status)
59 return; 59 return;
60 60
61 if (test_bit(HCI_MGMT, &hdev->flags) && 61 if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) &&
62 test_and_clear_bit(HCI_INQUIRY, &hdev->flags)) 62 test_bit(HCI_MGMT, &hdev->flags))
63 mgmt_discovering(hdev->id, 0); 63 mgmt_discovering(hdev->id, 0);
64 64
65 hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status); 65 hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
@@ -76,8 +76,8 @@ static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
76 if (status) 76 if (status)
77 return; 77 return;
78 78
79 if (test_bit(HCI_MGMT, &hdev->flags) && 79 if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) &&
80 test_and_clear_bit(HCI_INQUIRY, &hdev->flags)) 80 test_bit(HCI_MGMT, &hdev->flags))
81 mgmt_discovering(hdev->id, 0); 81 mgmt_discovering(hdev->id, 0);
82 82
83 hci_conn_check_pending(hdev); 83 hci_conn_check_pending(hdev);
@@ -959,9 +959,8 @@ static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
959 return; 959 return;
960 } 960 }
961 961
962 if (test_bit(HCI_MGMT, &hdev->flags) && 962 if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags) &&
963 !test_and_set_bit(HCI_INQUIRY, 963 test_bit(HCI_MGMT, &hdev->flags))
964 &hdev->flags))
965 mgmt_discovering(hdev->id, 1); 964 mgmt_discovering(hdev->id, 1);
966} 965}
967 966
@@ -1340,8 +1339,8 @@ static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff
1340 1339
1341 BT_DBG("%s status %d", hdev->name, status); 1340 BT_DBG("%s status %d", hdev->name, status);
1342 1341
1343 if (test_bit(HCI_MGMT, &hdev->flags) && 1342 if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) &&
1344 test_and_clear_bit(HCI_INQUIRY, &hdev->flags)) 1343 test_bit(HCI_MGMT, &hdev->flags))
1345 mgmt_discovering(hdev->id, 0); 1344 mgmt_discovering(hdev->id, 0);
1346 1345
1347 hci_req_complete(hdev, HCI_OP_INQUIRY, status); 1346 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 43b4c2deb7cc..fb68f344c34a 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -764,6 +764,7 @@ static int hidp_session(void *arg)
764 764
765 up_write(&hidp_session_sem); 765 up_write(&hidp_session_sem);
766 766
767 kfree(session->rd_data);
767 kfree(session); 768 kfree(session);
768 return 0; 769 return 0;
769} 770}
@@ -841,7 +842,8 @@ static int hidp_setup_input(struct hidp_session *session,
841 842
842 err = input_register_device(input); 843 err = input_register_device(input);
843 if (err < 0) { 844 if (err < 0) {
844 hci_conn_put_device(session->conn); 845 input_free_device(input);
846 session->input = NULL;
845 return err; 847 return err;
846 } 848 }
847 849
@@ -1044,8 +1046,12 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
1044 } 1046 }
1045 1047
1046 err = hid_add_device(session->hid); 1048 err = hid_add_device(session->hid);
1047 if (err < 0) 1049 if (err < 0) {
1048 goto err_add_device; 1050 atomic_inc(&session->terminate);
1051 wake_up_process(session->task);
1052 up_write(&hidp_session_sem);
1053 return err;
1054 }
1049 1055
1050 if (session->input) { 1056 if (session->input) {
1051 hidp_send_ctrl_message(session, 1057 hidp_send_ctrl_message(session,
@@ -1059,12 +1065,6 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
1059 up_write(&hidp_session_sem); 1065 up_write(&hidp_session_sem);
1060 return 0; 1066 return 0;
1061 1067
1062err_add_device:
1063 hid_destroy_device(session->hid);
1064 session->hid = NULL;
1065 atomic_inc(&session->terminate);
1066 wake_up_process(session->task);
1067
1068unlink: 1068unlink:
1069 hidp_del_timer(session); 1069 hidp_del_timer(session);
1070 1070
@@ -1090,7 +1090,6 @@ purge:
1090failed: 1090failed:
1091 up_write(&hidp_session_sem); 1091 up_write(&hidp_session_sem);
1092 1092
1093 input_free_device(session->input);
1094 kfree(session); 1093 kfree(session);
1095 return err; 1094 return err;
1096} 1095}
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 3204ba8a701c..b3bdb482bbe6 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -1159,9 +1159,8 @@ int __l2cap_wait_ack(struct sock *sk)
1159 int timeo = HZ/5; 1159 int timeo = HZ/5;
1160 1160
1161 add_wait_queue(sk_sleep(sk), &wait); 1161 add_wait_queue(sk_sleep(sk), &wait);
1162 while ((chan->unacked_frames > 0 && chan->conn)) { 1162 set_current_state(TASK_INTERRUPTIBLE);
1163 set_current_state(TASK_INTERRUPTIBLE); 1163 while (chan->unacked_frames > 0 && chan->conn) {
1164
1165 if (!timeo) 1164 if (!timeo)
1166 timeo = HZ/5; 1165 timeo = HZ/5;
1167 1166
@@ -1173,6 +1172,7 @@ int __l2cap_wait_ack(struct sock *sk)
1173 release_sock(sk); 1172 release_sock(sk);
1174 timeo = schedule_timeout(timeo); 1173 timeo = schedule_timeout(timeo);
1175 lock_sock(sk); 1174 lock_sock(sk);
1175 set_current_state(TASK_INTERRUPTIBLE);
1176 1176
1177 err = sock_error(sk); 1177 err = sock_error(sk);
1178 if (err) 1178 if (err)
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 5c36b3e8739c..61f1f623091d 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -235,30 +235,26 @@ static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int fl
235 235
236 lock_sock_nested(sk, SINGLE_DEPTH_NESTING); 236 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
237 237
238 if (sk->sk_state != BT_LISTEN) {
239 err = -EBADFD;
240 goto done;
241 }
242
243 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); 238 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
244 239
245 BT_DBG("sk %p timeo %ld", sk, timeo); 240 BT_DBG("sk %p timeo %ld", sk, timeo);
246 241
247 /* Wait for an incoming connection. (wake-one). */ 242 /* Wait for an incoming connection. (wake-one). */
248 add_wait_queue_exclusive(sk_sleep(sk), &wait); 243 add_wait_queue_exclusive(sk_sleep(sk), &wait);
249 while (!(nsk = bt_accept_dequeue(sk, newsock))) { 244 while (1) {
250 set_current_state(TASK_INTERRUPTIBLE); 245 set_current_state(TASK_INTERRUPTIBLE);
251 if (!timeo) { 246
252 err = -EAGAIN; 247 if (sk->sk_state != BT_LISTEN) {
248 err = -EBADFD;
253 break; 249 break;
254 } 250 }
255 251
256 release_sock(sk); 252 nsk = bt_accept_dequeue(sk, newsock);
257 timeo = schedule_timeout(timeo); 253 if (nsk)
258 lock_sock_nested(sk, SINGLE_DEPTH_NESTING); 254 break;
259 255
260 if (sk->sk_state != BT_LISTEN) { 256 if (!timeo) {
261 err = -EBADFD; 257 err = -EAGAIN;
262 break; 258 break;
263 } 259 }
264 260
@@ -266,8 +262,12 @@ static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int fl
266 err = sock_intr_errno(timeo); 262 err = sock_intr_errno(timeo);
267 break; 263 break;
268 } 264 }
265
266 release_sock(sk);
267 timeo = schedule_timeout(timeo);
268 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
269 } 269 }
270 set_current_state(TASK_RUNNING); 270 __set_current_state(TASK_RUNNING);
271 remove_wait_queue(sk_sleep(sk), &wait); 271 remove_wait_queue(sk_sleep(sk), &wait);
272 272
273 if (err) 273 if (err)
@@ -993,7 +993,7 @@ static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int p
993 INIT_LIST_HEAD(&bt_sk(sk)->accept_q); 993 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
994 994
995 sk->sk_destruct = l2cap_sock_destruct; 995 sk->sk_destruct = l2cap_sock_destruct;
996 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT); 996 sk->sk_sndtimeo = L2CAP_CONN_TIMEOUT;
997 997
998 sock_reset_flag(sk, SOCK_ZAPPED); 998 sock_reset_flag(sk, SOCK_ZAPPED);
999 999
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 5759bb7054f7..5ba3f6df665c 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -62,7 +62,6 @@ static DEFINE_MUTEX(rfcomm_mutex);
62#define rfcomm_lock() mutex_lock(&rfcomm_mutex) 62#define rfcomm_lock() mutex_lock(&rfcomm_mutex)
63#define rfcomm_unlock() mutex_unlock(&rfcomm_mutex) 63#define rfcomm_unlock() mutex_unlock(&rfcomm_mutex)
64 64
65static unsigned long rfcomm_event;
66 65
67static LIST_HEAD(session_list); 66static LIST_HEAD(session_list);
68 67
@@ -120,7 +119,6 @@ static inline void rfcomm_schedule(void)
120{ 119{
121 if (!rfcomm_thread) 120 if (!rfcomm_thread)
122 return; 121 return;
123 set_bit(RFCOMM_SCHED_WAKEUP, &rfcomm_event);
124 wake_up_process(rfcomm_thread); 122 wake_up_process(rfcomm_thread);
125} 123}
126 124
@@ -2038,19 +2036,18 @@ static int rfcomm_run(void *unused)
2038 2036
2039 rfcomm_add_listener(BDADDR_ANY); 2037 rfcomm_add_listener(BDADDR_ANY);
2040 2038
2041 while (!kthread_should_stop()) { 2039 while (1) {
2042 set_current_state(TASK_INTERRUPTIBLE); 2040 set_current_state(TASK_INTERRUPTIBLE);
2043 if (!test_bit(RFCOMM_SCHED_WAKEUP, &rfcomm_event)) { 2041
2044 /* No pending events. Let's sleep. 2042 if (kthread_should_stop())
2045 * Incoming connections and data will wake us up. */ 2043 break;
2046 schedule();
2047 }
2048 set_current_state(TASK_RUNNING);
2049 2044
2050 /* Process stuff */ 2045 /* Process stuff */
2051 clear_bit(RFCOMM_SCHED_WAKEUP, &rfcomm_event);
2052 rfcomm_process_sessions(); 2046 rfcomm_process_sessions();
2047
2048 schedule();
2053 } 2049 }
2050 __set_current_state(TASK_RUNNING);
2054 2051
2055 rfcomm_kill_listener(); 2052 rfcomm_kill_listener();
2056 2053
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 8f01e6b11a70..482722bbc7a0 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -485,11 +485,6 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f
485 485
486 lock_sock(sk); 486 lock_sock(sk);
487 487
488 if (sk->sk_state != BT_LISTEN) {
489 err = -EBADFD;
490 goto done;
491 }
492
493 if (sk->sk_type != SOCK_STREAM) { 488 if (sk->sk_type != SOCK_STREAM) {
494 err = -EINVAL; 489 err = -EINVAL;
495 goto done; 490 goto done;
@@ -501,19 +496,20 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f
501 496
502 /* Wait for an incoming connection. (wake-one). */ 497 /* Wait for an incoming connection. (wake-one). */
503 add_wait_queue_exclusive(sk_sleep(sk), &wait); 498 add_wait_queue_exclusive(sk_sleep(sk), &wait);
504 while (!(nsk = bt_accept_dequeue(sk, newsock))) { 499 while (1) {
505 set_current_state(TASK_INTERRUPTIBLE); 500 set_current_state(TASK_INTERRUPTIBLE);
506 if (!timeo) { 501
507 err = -EAGAIN; 502 if (sk->sk_state != BT_LISTEN) {
503 err = -EBADFD;
508 break; 504 break;
509 } 505 }
510 506
511 release_sock(sk); 507 nsk = bt_accept_dequeue(sk, newsock);
512 timeo = schedule_timeout(timeo); 508 if (nsk)
513 lock_sock(sk); 509 break;
514 510
515 if (sk->sk_state != BT_LISTEN) { 511 if (!timeo) {
516 err = -EBADFD; 512 err = -EAGAIN;
517 break; 513 break;
518 } 514 }
519 515
@@ -521,8 +517,12 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f
521 err = sock_intr_errno(timeo); 517 err = sock_intr_errno(timeo);
522 break; 518 break;
523 } 519 }
520
521 release_sock(sk);
522 timeo = schedule_timeout(timeo);
523 lock_sock(sk);
524 } 524 }
525 set_current_state(TASK_RUNNING); 525 __set_current_state(TASK_RUNNING);
526 remove_wait_queue(sk_sleep(sk), &wait); 526 remove_wait_queue(sk_sleep(sk), &wait);
527 527
528 if (err) 528 if (err)
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 4c3621b5e0aa..8270f05e3f1f 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -564,30 +564,26 @@ static int sco_sock_accept(struct socket *sock, struct socket *newsock, int flag
564 564
565 lock_sock(sk); 565 lock_sock(sk);
566 566
567 if (sk->sk_state != BT_LISTEN) {
568 err = -EBADFD;
569 goto done;
570 }
571
572 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); 567 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
573 568
574 BT_DBG("sk %p timeo %ld", sk, timeo); 569 BT_DBG("sk %p timeo %ld", sk, timeo);
575 570
576 /* Wait for an incoming connection. (wake-one). */ 571 /* Wait for an incoming connection. (wake-one). */
577 add_wait_queue_exclusive(sk_sleep(sk), &wait); 572 add_wait_queue_exclusive(sk_sleep(sk), &wait);
578 while (!(ch = bt_accept_dequeue(sk, newsock))) { 573 while (1) {
579 set_current_state(TASK_INTERRUPTIBLE); 574 set_current_state(TASK_INTERRUPTIBLE);
580 if (!timeo) { 575
581 err = -EAGAIN; 576 if (sk->sk_state != BT_LISTEN) {
577 err = -EBADFD;
582 break; 578 break;
583 } 579 }
584 580
585 release_sock(sk); 581 ch = bt_accept_dequeue(sk, newsock);
586 timeo = schedule_timeout(timeo); 582 if (ch)
587 lock_sock(sk); 583 break;
588 584
589 if (sk->sk_state != BT_LISTEN) { 585 if (!timeo) {
590 err = -EBADFD; 586 err = -EAGAIN;
591 break; 587 break;
592 } 588 }
593 589
@@ -595,8 +591,12 @@ static int sco_sock_accept(struct socket *sock, struct socket *newsock, int flag
595 err = sock_intr_errno(timeo); 591 err = sock_intr_errno(timeo);
596 break; 592 break;
597 } 593 }
594
595 release_sock(sk);
596 timeo = schedule_timeout(timeo);
597 lock_sock(sk);
598 } 598 }
599 set_current_state(TASK_RUNNING); 599 __set_current_state(TASK_RUNNING);
600 remove_wait_queue(sk_sleep(sk), &wait); 600 remove_wait_queue(sk_sleep(sk), &wait);
601 601
602 if (err) 602 if (err)
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 043a5eb8cafc..13f34acb2a8e 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -229,6 +229,7 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br,
229int br_add_bridge(struct net *net, const char *name) 229int br_add_bridge(struct net *net, const char *name)
230{ 230{
231 struct net_device *dev; 231 struct net_device *dev;
232 int res;
232 233
233 dev = alloc_netdev(sizeof(struct net_bridge), name, 234 dev = alloc_netdev(sizeof(struct net_bridge), name,
234 br_dev_setup); 235 br_dev_setup);
@@ -238,7 +239,10 @@ int br_add_bridge(struct net *net, const char *name)
238 239
239 dev_net_set(dev, net); 240 dev_net_set(dev, net);
240 241
241 return register_netdev(dev); 242 res = register_netdev(dev);
243 if (res)
244 free_netdev(dev);
245 return res;
242} 246}
243 247
244int br_del_bridge(struct net *net, const char *name) 248int br_del_bridge(struct net *net, const char *name)
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 2d85ca7111d3..995cbe0ac0b2 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1456,7 +1456,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
1456{ 1456{
1457 struct sk_buff *skb2; 1457 struct sk_buff *skb2;
1458 const struct ipv6hdr *ip6h; 1458 const struct ipv6hdr *ip6h;
1459 struct icmp6hdr *icmp6h; 1459 u8 icmp6_type;
1460 u8 nexthdr; 1460 u8 nexthdr;
1461 unsigned len; 1461 unsigned len;
1462 int offset; 1462 int offset;
@@ -1502,9 +1502,9 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
1502 __skb_pull(skb2, offset); 1502 __skb_pull(skb2, offset);
1503 skb_reset_transport_header(skb2); 1503 skb_reset_transport_header(skb2);
1504 1504
1505 icmp6h = icmp6_hdr(skb2); 1505 icmp6_type = icmp6_hdr(skb2)->icmp6_type;
1506 1506
1507 switch (icmp6h->icmp6_type) { 1507 switch (icmp6_type) {
1508 case ICMPV6_MGM_QUERY: 1508 case ICMPV6_MGM_QUERY:
1509 case ICMPV6_MGM_REPORT: 1509 case ICMPV6_MGM_REPORT:
1510 case ICMPV6_MGM_REDUCTION: 1510 case ICMPV6_MGM_REDUCTION:
@@ -1520,16 +1520,23 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
1520 err = pskb_trim_rcsum(skb2, len); 1520 err = pskb_trim_rcsum(skb2, len);
1521 if (err) 1521 if (err)
1522 goto out; 1522 goto out;
1523 err = -EINVAL;
1523 } 1524 }
1524 1525
1526 ip6h = ipv6_hdr(skb2);
1527
1525 switch (skb2->ip_summed) { 1528 switch (skb2->ip_summed) {
1526 case CHECKSUM_COMPLETE: 1529 case CHECKSUM_COMPLETE:
1527 if (!csum_fold(skb2->csum)) 1530 if (!csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, skb2->len,
1531 IPPROTO_ICMPV6, skb2->csum))
1528 break; 1532 break;
1529 /*FALLTHROUGH*/ 1533 /*FALLTHROUGH*/
1530 case CHECKSUM_NONE: 1534 case CHECKSUM_NONE:
1531 skb2->csum = 0; 1535 skb2->csum = ~csum_unfold(csum_ipv6_magic(&ip6h->saddr,
1532 if (skb_checksum_complete(skb2)) 1536 &ip6h->daddr,
1537 skb2->len,
1538 IPPROTO_ICMPV6, 0));
1539 if (__skb_checksum_complete(skb2))
1533 goto out; 1540 goto out;
1534 } 1541 }
1535 1542
@@ -1537,7 +1544,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
1537 1544
1538 BR_INPUT_SKB_CB(skb)->igmp = 1; 1545 BR_INPUT_SKB_CB(skb)->igmp = 1;
1539 1546
1540 switch (icmp6h->icmp6_type) { 1547 switch (icmp6_type) {
1541 case ICMPV6_MGM_REPORT: 1548 case ICMPV6_MGM_REPORT:
1542 { 1549 {
1543 struct mld_msg *mld; 1550 struct mld_msg *mld;
diff --git a/net/bridge/netfilter/Kconfig b/net/bridge/netfilter/Kconfig
index ba6f73eb06c6..a9aff9c7d027 100644
--- a/net/bridge/netfilter/Kconfig
+++ b/net/bridge/netfilter/Kconfig
@@ -4,7 +4,7 @@
4 4
5menuconfig BRIDGE_NF_EBTABLES 5menuconfig BRIDGE_NF_EBTABLES
6 tristate "Ethernet Bridge tables (ebtables) support" 6 tristate "Ethernet Bridge tables (ebtables) support"
7 depends on BRIDGE && BRIDGE_NETFILTER 7 depends on BRIDGE && NETFILTER
8 select NETFILTER_XTABLES 8 select NETFILTER_XTABLES
9 help 9 help
10 ebtables is a general, extensible frame/packet identification 10 ebtables is a general, extensible frame/packet identification
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index 7c2fa0a08148..7f9ac0742d19 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -93,10 +93,14 @@ static struct caif_device_entry *caif_device_alloc(struct net_device *dev)
93 caifdevs = caif_device_list(dev_net(dev)); 93 caifdevs = caif_device_list(dev_net(dev));
94 BUG_ON(!caifdevs); 94 BUG_ON(!caifdevs);
95 95
96 caifd = kzalloc(sizeof(*caifd), GFP_ATOMIC); 96 caifd = kzalloc(sizeof(*caifd), GFP_KERNEL);
97 if (!caifd) 97 if (!caifd)
98 return NULL; 98 return NULL;
99 caifd->pcpu_refcnt = alloc_percpu(int); 99 caifd->pcpu_refcnt = alloc_percpu(int);
100 if (!caifd->pcpu_refcnt) {
101 kfree(caifd);
102 return NULL;
103 }
100 caifd->netdev = dev; 104 caifd->netdev = dev;
101 dev_hold(dev); 105 dev_hold(dev);
102 return caifd; 106 return caifd;
diff --git a/net/can/af_can.c b/net/can/af_can.c
index b9efa944cab9..d1ff5152c657 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -857,7 +857,7 @@ static __exit void can_exit(void)
857 struct net_device *dev; 857 struct net_device *dev;
858 858
859 if (stats_timer) 859 if (stats_timer)
860 del_timer(&can_stattimer); 860 del_timer_sync(&can_stattimer);
861 861
862 can_remove_proc(); 862 can_remove_proc();
863 863
diff --git a/net/ceph/msgpool.c b/net/ceph/msgpool.c
index d5f2d97ac05c..1f4cb30a42c5 100644
--- a/net/ceph/msgpool.c
+++ b/net/ceph/msgpool.c
@@ -7,27 +7,37 @@
7 7
8#include <linux/ceph/msgpool.h> 8#include <linux/ceph/msgpool.h>
9 9
10static void *alloc_fn(gfp_t gfp_mask, void *arg) 10static void *msgpool_alloc(gfp_t gfp_mask, void *arg)
11{ 11{
12 struct ceph_msgpool *pool = arg; 12 struct ceph_msgpool *pool = arg;
13 void *p; 13 struct ceph_msg *msg;
14 14
15 p = ceph_msg_new(0, pool->front_len, gfp_mask); 15 msg = ceph_msg_new(0, pool->front_len, gfp_mask);
16 if (!p) 16 if (!msg) {
17 pr_err("msgpool %s alloc failed\n", pool->name); 17 dout("msgpool_alloc %s failed\n", pool->name);
18 return p; 18 } else {
19 dout("msgpool_alloc %s %p\n", pool->name, msg);
20 msg->pool = pool;
21 }
22 return msg;
19} 23}
20 24
21static void free_fn(void *element, void *arg) 25static void msgpool_free(void *element, void *arg)
22{ 26{
23 ceph_msg_put(element); 27 struct ceph_msgpool *pool = arg;
28 struct ceph_msg *msg = element;
29
30 dout("msgpool_release %s %p\n", pool->name, msg);
31 msg->pool = NULL;
32 ceph_msg_put(msg);
24} 33}
25 34
26int ceph_msgpool_init(struct ceph_msgpool *pool, 35int ceph_msgpool_init(struct ceph_msgpool *pool,
27 int front_len, int size, bool blocking, const char *name) 36 int front_len, int size, bool blocking, const char *name)
28{ 37{
38 dout("msgpool %s init\n", name);
29 pool->front_len = front_len; 39 pool->front_len = front_len;
30 pool->pool = mempool_create(size, alloc_fn, free_fn, pool); 40 pool->pool = mempool_create(size, msgpool_alloc, msgpool_free, pool);
31 if (!pool->pool) 41 if (!pool->pool)
32 return -ENOMEM; 42 return -ENOMEM;
33 pool->name = name; 43 pool->name = name;
@@ -36,14 +46,17 @@ int ceph_msgpool_init(struct ceph_msgpool *pool,
36 46
37void ceph_msgpool_destroy(struct ceph_msgpool *pool) 47void ceph_msgpool_destroy(struct ceph_msgpool *pool)
38{ 48{
49 dout("msgpool %s destroy\n", pool->name);
39 mempool_destroy(pool->pool); 50 mempool_destroy(pool->pool);
40} 51}
41 52
42struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool, 53struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool,
43 int front_len) 54 int front_len)
44{ 55{
56 struct ceph_msg *msg;
57
45 if (front_len > pool->front_len) { 58 if (front_len > pool->front_len) {
46 pr_err("msgpool_get pool %s need front %d, pool size is %d\n", 59 dout("msgpool_get %s need front %d, pool size is %d\n",
47 pool->name, front_len, pool->front_len); 60 pool->name, front_len, pool->front_len);
48 WARN_ON(1); 61 WARN_ON(1);
49 62
@@ -51,14 +64,19 @@ struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool,
51 return ceph_msg_new(0, front_len, GFP_NOFS); 64 return ceph_msg_new(0, front_len, GFP_NOFS);
52 } 65 }
53 66
54 return mempool_alloc(pool->pool, GFP_NOFS); 67 msg = mempool_alloc(pool->pool, GFP_NOFS);
68 dout("msgpool_get %s %p\n", pool->name, msg);
69 return msg;
55} 70}
56 71
57void ceph_msgpool_put(struct ceph_msgpool *pool, struct ceph_msg *msg) 72void ceph_msgpool_put(struct ceph_msgpool *pool, struct ceph_msg *msg)
58{ 73{
74 dout("msgpool_put %s %p\n", pool->name, msg);
75
59 /* reset msg front_len; user may have changed it */ 76 /* reset msg front_len; user may have changed it */
60 msg->front.iov_len = pool->front_len; 77 msg->front.iov_len = pool->front_len;
61 msg->hdr.front_len = cpu_to_le32(pool->front_len); 78 msg->hdr.front_len = cpu_to_le32(pool->front_len);
62 79
63 kref_init(&msg->kref); /* retake single ref */ 80 kref_init(&msg->kref); /* retake single ref */
81 mempool_free(msg, pool->pool);
64} 82}
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index ce310eee708d..16836a7df7a6 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -685,6 +685,18 @@ static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
685 put_osd(osd); 685 put_osd(osd);
686} 686}
687 687
688static void remove_all_osds(struct ceph_osd_client *osdc)
689{
690 dout("__remove_old_osds %p\n", osdc);
691 mutex_lock(&osdc->request_mutex);
692 while (!RB_EMPTY_ROOT(&osdc->osds)) {
693 struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds),
694 struct ceph_osd, o_node);
695 __remove_osd(osdc, osd);
696 }
697 mutex_unlock(&osdc->request_mutex);
698}
699
688static void __move_osd_to_lru(struct ceph_osd_client *osdc, 700static void __move_osd_to_lru(struct ceph_osd_client *osdc,
689 struct ceph_osd *osd) 701 struct ceph_osd *osd)
690{ 702{
@@ -701,14 +713,14 @@ static void __remove_osd_from_lru(struct ceph_osd *osd)
701 list_del_init(&osd->o_osd_lru); 713 list_del_init(&osd->o_osd_lru);
702} 714}
703 715
704static void remove_old_osds(struct ceph_osd_client *osdc, int remove_all) 716static void remove_old_osds(struct ceph_osd_client *osdc)
705{ 717{
706 struct ceph_osd *osd, *nosd; 718 struct ceph_osd *osd, *nosd;
707 719
708 dout("__remove_old_osds %p\n", osdc); 720 dout("__remove_old_osds %p\n", osdc);
709 mutex_lock(&osdc->request_mutex); 721 mutex_lock(&osdc->request_mutex);
710 list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) { 722 list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) {
711 if (!remove_all && time_before(jiffies, osd->lru_ttl)) 723 if (time_before(jiffies, osd->lru_ttl))
712 break; 724 break;
713 __remove_osd(osdc, osd); 725 __remove_osd(osdc, osd);
714 } 726 }
@@ -751,6 +763,7 @@ static void __insert_osd(struct ceph_osd_client *osdc, struct ceph_osd *new)
751 struct rb_node *parent = NULL; 763 struct rb_node *parent = NULL;
752 struct ceph_osd *osd = NULL; 764 struct ceph_osd *osd = NULL;
753 765
766 dout("__insert_osd %p osd%d\n", new, new->o_osd);
754 while (*p) { 767 while (*p) {
755 parent = *p; 768 parent = *p;
756 osd = rb_entry(parent, struct ceph_osd, o_node); 769 osd = rb_entry(parent, struct ceph_osd, o_node);
@@ -1144,7 +1157,7 @@ static void handle_osds_timeout(struct work_struct *work)
1144 1157
1145 dout("osds timeout\n"); 1158 dout("osds timeout\n");
1146 down_read(&osdc->map_sem); 1159 down_read(&osdc->map_sem);
1147 remove_old_osds(osdc, 0); 1160 remove_old_osds(osdc);
1148 up_read(&osdc->map_sem); 1161 up_read(&osdc->map_sem);
1149 1162
1150 schedule_delayed_work(&osdc->osds_timeout_work, 1163 schedule_delayed_work(&osdc->osds_timeout_work,
@@ -1862,8 +1875,7 @@ void ceph_osdc_stop(struct ceph_osd_client *osdc)
1862 ceph_osdmap_destroy(osdc->osdmap); 1875 ceph_osdmap_destroy(osdc->osdmap);
1863 osdc->osdmap = NULL; 1876 osdc->osdmap = NULL;
1864 } 1877 }
1865 remove_old_osds(osdc, 1); 1878 remove_all_osds(osdc);
1866 WARN_ON(!RB_EMPTY_ROOT(&osdc->osds));
1867 mempool_destroy(osdc->req_mempool); 1879 mempool_destroy(osdc->req_mempool);
1868 ceph_msgpool_destroy(&osdc->msgpool_op); 1880 ceph_msgpool_destroy(&osdc->msgpool_op);
1869 ceph_msgpool_destroy(&osdc->msgpool_op_reply); 1881 ceph_msgpool_destroy(&osdc->msgpool_op_reply);
diff --git a/net/core/dev.c b/net/core/dev.c
index 4b9981caf06f..bf49a47ddfdb 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1517,6 +1517,14 @@ static inline bool is_skb_forwardable(struct net_device *dev,
1517 */ 1517 */
1518int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) 1518int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1519{ 1519{
1520 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
1521 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
1522 atomic_long_inc(&dev->rx_dropped);
1523 kfree_skb(skb);
1524 return NET_RX_DROP;
1525 }
1526 }
1527
1520 skb_orphan(skb); 1528 skb_orphan(skb);
1521 nf_reset(skb); 1529 nf_reset(skb);
1522 1530
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 67c5c288cd80..38be4744133f 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -384,8 +384,8 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
384 */ 384 */
385 list_for_each_entry(r, &ops->rules_list, list) { 385 list_for_each_entry(r, &ops->rules_list, list) {
386 if (r->action == FR_ACT_GOTO && 386 if (r->action == FR_ACT_GOTO &&
387 r->target == rule->pref) { 387 r->target == rule->pref &&
388 BUG_ON(rtnl_dereference(r->ctarget) != NULL); 388 rtnl_dereference(r->ctarget) == NULL) {
389 rcu_assign_pointer(r->ctarget, rule); 389 rcu_assign_pointer(r->ctarget, rule);
390 if (--ops->unresolved_rules == 0) 390 if (--ops->unresolved_rules == 0)
391 break; 391 break;
diff --git a/net/core/flow.c b/net/core/flow.c
index bf32c33cad3b..555a456efb07 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -30,6 +30,7 @@ struct flow_cache_entry {
30 struct hlist_node hlist; 30 struct hlist_node hlist;
31 struct list_head gc_list; 31 struct list_head gc_list;
32 } u; 32 } u;
33 struct net *net;
33 u16 family; 34 u16 family;
34 u8 dir; 35 u8 dir;
35 u32 genid; 36 u32 genid;
@@ -172,29 +173,26 @@ static void flow_new_hash_rnd(struct flow_cache *fc,
172 173
173static u32 flow_hash_code(struct flow_cache *fc, 174static u32 flow_hash_code(struct flow_cache *fc,
174 struct flow_cache_percpu *fcp, 175 struct flow_cache_percpu *fcp,
175 const struct flowi *key) 176 const struct flowi *key,
177 size_t keysize)
176{ 178{
177 const u32 *k = (const u32 *) key; 179 const u32 *k = (const u32 *) key;
180 const u32 length = keysize * sizeof(flow_compare_t) / sizeof(u32);
178 181
179 return jhash2(k, (sizeof(*key) / sizeof(u32)), fcp->hash_rnd) 182 return jhash2(k, length, fcp->hash_rnd)
180 & (flow_cache_hash_size(fc) - 1); 183 & (flow_cache_hash_size(fc) - 1);
181} 184}
182 185
183typedef unsigned long flow_compare_t;
184
185/* I hear what you're saying, use memcmp. But memcmp cannot make 186/* I hear what you're saying, use memcmp. But memcmp cannot make
186 * important assumptions that we can here, such as alignment and 187 * important assumptions that we can here, such as alignment.
187 * constant size.
188 */ 188 */
189static int flow_key_compare(const struct flowi *key1, const struct flowi *key2) 189static int flow_key_compare(const struct flowi *key1, const struct flowi *key2,
190 size_t keysize)
190{ 191{
191 const flow_compare_t *k1, *k1_lim, *k2; 192 const flow_compare_t *k1, *k1_lim, *k2;
192 const int n_elem = sizeof(struct flowi) / sizeof(flow_compare_t);
193
194 BUILD_BUG_ON(sizeof(struct flowi) % sizeof(flow_compare_t));
195 193
196 k1 = (const flow_compare_t *) key1; 194 k1 = (const flow_compare_t *) key1;
197 k1_lim = k1 + n_elem; 195 k1_lim = k1 + keysize;
198 196
199 k2 = (const flow_compare_t *) key2; 197 k2 = (const flow_compare_t *) key2;
200 198
@@ -215,6 +213,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
215 struct flow_cache_entry *fle, *tfle; 213 struct flow_cache_entry *fle, *tfle;
216 struct hlist_node *entry; 214 struct hlist_node *entry;
217 struct flow_cache_object *flo; 215 struct flow_cache_object *flo;
216 size_t keysize;
218 unsigned int hash; 217 unsigned int hash;
219 218
220 local_bh_disable(); 219 local_bh_disable();
@@ -222,6 +221,11 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
222 221
223 fle = NULL; 222 fle = NULL;
224 flo = NULL; 223 flo = NULL;
224
225 keysize = flow_key_size(family);
226 if (!keysize)
227 goto nocache;
228
225 /* Packet really early in init? Making flow_cache_init a 229 /* Packet really early in init? Making flow_cache_init a
226 * pre-smp initcall would solve this. --RR */ 230 * pre-smp initcall would solve this. --RR */
227 if (!fcp->hash_table) 231 if (!fcp->hash_table)
@@ -230,11 +234,12 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
230 if (fcp->hash_rnd_recalc) 234 if (fcp->hash_rnd_recalc)
231 flow_new_hash_rnd(fc, fcp); 235 flow_new_hash_rnd(fc, fcp);
232 236
233 hash = flow_hash_code(fc, fcp, key); 237 hash = flow_hash_code(fc, fcp, key, keysize);
234 hlist_for_each_entry(tfle, entry, &fcp->hash_table[hash], u.hlist) { 238 hlist_for_each_entry(tfle, entry, &fcp->hash_table[hash], u.hlist) {
235 if (tfle->family == family && 239 if (tfle->net == net &&
240 tfle->family == family &&
236 tfle->dir == dir && 241 tfle->dir == dir &&
237 flow_key_compare(key, &tfle->key) == 0) { 242 flow_key_compare(key, &tfle->key, keysize) == 0) {
238 fle = tfle; 243 fle = tfle;
239 break; 244 break;
240 } 245 }
@@ -246,9 +251,10 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
246 251
247 fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC); 252 fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC);
248 if (fle) { 253 if (fle) {
254 fle->net = net;
249 fle->family = family; 255 fle->family = family;
250 fle->dir = dir; 256 fle->dir = dir;
251 memcpy(&fle->key, key, sizeof(*key)); 257 memcpy(&fle->key, key, keysize * sizeof(flow_compare_t));
252 fle->object = NULL; 258 fle->object = NULL;
253 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]); 259 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
254 fcp->hash_count++; 260 fcp->hash_count++;
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 4002261f20d1..43449649cf73 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1331,11 +1331,15 @@ static void neigh_proxy_process(unsigned long arg)
1331 1331
1332 if (tdif <= 0) { 1332 if (tdif <= 0) {
1333 struct net_device *dev = skb->dev; 1333 struct net_device *dev = skb->dev;
1334
1334 __skb_unlink(skb, &tbl->proxy_queue); 1335 __skb_unlink(skb, &tbl->proxy_queue);
1335 if (tbl->proxy_redo && netif_running(dev)) 1336 if (tbl->proxy_redo && netif_running(dev)) {
1337 rcu_read_lock();
1336 tbl->proxy_redo(skb); 1338 tbl->proxy_redo(skb);
1337 else 1339 rcu_read_unlock();
1340 } else {
1338 kfree_skb(skb); 1341 kfree_skb(skb);
1342 }
1339 1343
1340 dev_put(dev); 1344 dev_put(dev);
1341 } else if (!sched_next || tdif < sched_next) 1345 } else if (!sched_next || tdif < sched_next)
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index d676a561d983..f57d94627a2a 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -558,13 +558,14 @@ int __netpoll_rx(struct sk_buff *skb)
558 if (skb_shared(skb)) 558 if (skb_shared(skb))
559 goto out; 559 goto out;
560 560
561 iph = (struct iphdr *)skb->data;
562 if (!pskb_may_pull(skb, sizeof(struct iphdr))) 561 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
563 goto out; 562 goto out;
563 iph = (struct iphdr *)skb->data;
564 if (iph->ihl < 5 || iph->version != 4) 564 if (iph->ihl < 5 || iph->version != 4)
565 goto out; 565 goto out;
566 if (!pskb_may_pull(skb, iph->ihl*4)) 566 if (!pskb_may_pull(skb, iph->ihl*4))
567 goto out; 567 goto out;
568 iph = (struct iphdr *)skb->data;
568 if (ip_fast_csum((u8 *)iph, iph->ihl) != 0) 569 if (ip_fast_csum((u8 *)iph, iph->ihl) != 0)
569 goto out; 570 goto out;
570 571
@@ -579,6 +580,7 @@ int __netpoll_rx(struct sk_buff *skb)
579 if (pskb_trim_rcsum(skb, len)) 580 if (pskb_trim_rcsum(skb, len))
580 goto out; 581 goto out;
581 582
583 iph = (struct iphdr *)skb->data;
582 if (iph->protocol != IPPROTO_UDP) 584 if (iph->protocol != IPPROTO_UDP)
583 goto out; 585 goto out;
584 586
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 296afd0aa8d2..5b2c5f1d4dba 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -613,8 +613,21 @@ struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
613} 613}
614EXPORT_SYMBOL_GPL(skb_morph); 614EXPORT_SYMBOL_GPL(skb_morph);
615 615
616/* skb frags copy userspace buffers to kernel */ 616/* skb_copy_ubufs - copy userspace skb frags buffers to kernel
617static int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) 617 * @skb: the skb to modify
618 * @gfp_mask: allocation priority
619 *
620 * This must be called on SKBTX_DEV_ZEROCOPY skb.
621 * It will copy all frags into kernel and drop the reference
622 * to userspace pages.
623 *
624 * If this function is called from an interrupt gfp_mask() must be
625 * %GFP_ATOMIC.
626 *
627 * Returns 0 on success or a negative error code on failure
628 * to allocate kernel memory to copy to.
629 */
630int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
618{ 631{
619 int i; 632 int i;
620 int num_frags = skb_shinfo(skb)->nr_frags; 633 int num_frags = skb_shinfo(skb)->nr_frags;
@@ -654,6 +667,8 @@ static int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
654 skb_shinfo(skb)->frags[i - 1].page = head; 667 skb_shinfo(skb)->frags[i - 1].page = head;
655 head = (struct page *)head->private; 668 head = (struct page *)head->private;
656 } 669 }
670
671 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
657 return 0; 672 return 0;
658} 673}
659 674
@@ -679,7 +694,6 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
679 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 694 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
680 if (skb_copy_ubufs(skb, gfp_mask)) 695 if (skb_copy_ubufs(skb, gfp_mask))
681 return NULL; 696 return NULL;
682 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
683 } 697 }
684 698
685 n = skb + 1; 699 n = skb + 1;
@@ -805,7 +819,6 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
805 n = NULL; 819 n = NULL;
806 goto out; 820 goto out;
807 } 821 }
808 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
809 } 822 }
810 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 823 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
811 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; 824 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
@@ -898,7 +911,6 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
898 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 911 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
899 if (skb_copy_ubufs(skb, gfp_mask)) 912 if (skb_copy_ubufs(skb, gfp_mask))
900 goto nofrags; 913 goto nofrags;
901 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
902 } 914 }
903 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 915 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
904 skb_frag_ref(skb, i); 916 skb_frag_ref(skb, i);
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 27997d35ebd3..a2468363978e 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -340,7 +340,7 @@ void ether_setup(struct net_device *dev)
340 dev->addr_len = ETH_ALEN; 340 dev->addr_len = ETH_ALEN;
341 dev->tx_queue_len = 1000; /* Ethernet wants good queues */ 341 dev->tx_queue_len = 1000; /* Ethernet wants good queues */
342 dev->flags = IFF_BROADCAST|IFF_MULTICAST; 342 dev->flags = IFF_BROADCAST|IFF_MULTICAST;
343 dev->priv_flags = IFF_TX_SKB_SHARING; 343 dev->priv_flags |= IFF_TX_SKB_SHARING;
344 344
345 memset(dev->broadcast, 0xFF, ETH_ALEN); 345 memset(dev->broadcast, 0xFF, ETH_ALEN);
346 346
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 1b745d412cf6..dd2b9478ddd1 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -466,8 +466,13 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
466 goto out; 466 goto out;
467 467
468 if (addr->sin_family != AF_INET) { 468 if (addr->sin_family != AF_INET) {
469 /* Compatibility games : accept AF_UNSPEC (mapped to AF_INET)
470 * only if s_addr is INADDR_ANY.
471 */
469 err = -EAFNOSUPPORT; 472 err = -EAFNOSUPPORT;
470 goto out; 473 if (addr->sin_family != AF_UNSPEC ||
474 addr->sin_addr.s_addr != htonl(INADDR_ANY))
475 goto out;
471 } 476 }
472 477
473 chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr); 478 chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr);
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 33e2c35b74b7..80106d89d548 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -142,6 +142,14 @@ const struct fib_prop fib_props[RTN_MAX + 1] = {
142}; 142};
143 143
144/* Release a nexthop info record */ 144/* Release a nexthop info record */
145static void free_fib_info_rcu(struct rcu_head *head)
146{
147 struct fib_info *fi = container_of(head, struct fib_info, rcu);
148
149 if (fi->fib_metrics != (u32 *) dst_default_metrics)
150 kfree(fi->fib_metrics);
151 kfree(fi);
152}
145 153
146void free_fib_info(struct fib_info *fi) 154void free_fib_info(struct fib_info *fi)
147{ 155{
@@ -156,7 +164,7 @@ void free_fib_info(struct fib_info *fi)
156 } endfor_nexthops(fi); 164 } endfor_nexthops(fi);
157 fib_info_cnt--; 165 fib_info_cnt--;
158 release_net(fi->fib_net); 166 release_net(fi->fib_net);
159 kfree_rcu(fi, rcu); 167 call_rcu(&fi->rcu, free_fib_info_rcu);
160} 168}
161 169
162void fib_release_info(struct fib_info *fi) 170void fib_release_info(struct fib_info *fi)
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index ce57bdee14cb..c7472eff2d51 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -767,7 +767,7 @@ static int igmp_xmarksources(struct ip_mc_list *pmc, int nsrcs, __be32 *srcs)
767 break; 767 break;
768 for (i=0; i<nsrcs; i++) { 768 for (i=0; i<nsrcs; i++) {
769 /* skip inactive filters */ 769 /* skip inactive filters */
770 if (pmc->sfcount[MCAST_INCLUDE] || 770 if (psf->sf_count[MCAST_INCLUDE] ||
771 pmc->sfcount[MCAST_EXCLUDE] != 771 pmc->sfcount[MCAST_EXCLUDE] !=
772 psf->sf_count[MCAST_EXCLUDE]) 772 psf->sf_count[MCAST_EXCLUDE])
773 continue; 773 continue;
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
index 5c9b9d963918..e59aabd0eae4 100644
--- a/net/ipv4/netfilter/ip_queue.c
+++ b/net/ipv4/netfilter/ip_queue.c
@@ -218,6 +218,7 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
218 return skb; 218 return skb;
219 219
220nlmsg_failure: 220nlmsg_failure:
221 kfree_skb(skb);
221 *errp = -EINVAL; 222 *errp = -EINVAL;
222 printk(KERN_ERR "ip_queue: error creating packet message\n"); 223 printk(KERN_ERR "ip_queue: error creating packet message\n");
223 return NULL; 224 return NULL;
@@ -313,7 +314,7 @@ ipq_set_verdict(struct ipq_verdict_msg *vmsg, unsigned int len)
313{ 314{
314 struct nf_queue_entry *entry; 315 struct nf_queue_entry *entry;
315 316
316 if (vmsg->value > NF_MAX_VERDICT) 317 if (vmsg->value > NF_MAX_VERDICT || vmsg->value == NF_STOLEN)
317 return -EINVAL; 318 return -EINVAL;
318 319
319 entry = ipq_find_dequeue_entry(vmsg->id); 320 entry = ipq_find_dequeue_entry(vmsg->id);
@@ -358,12 +359,9 @@ ipq_receive_peer(struct ipq_peer_msg *pmsg,
358 break; 359 break;
359 360
360 case IPQM_VERDICT: 361 case IPQM_VERDICT:
361 if (pmsg->msg.verdict.value > NF_MAX_VERDICT) 362 status = ipq_set_verdict(&pmsg->msg.verdict,
362 status = -EINVAL; 363 len - sizeof(*pmsg));
363 else 364 break;
364 status = ipq_set_verdict(&pmsg->msg.verdict,
365 len - sizeof(*pmsg));
366 break;
367 default: 365 default:
368 status = -EINVAL; 366 status = -EINVAL;
369 } 367 }
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index b14ec7d03b6e..4bfad5da94f4 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -254,6 +254,8 @@ static const struct snmp_mib snmp4_net_list[] = {
254 SNMP_MIB_ITEM("TCPDeferAcceptDrop", LINUX_MIB_TCPDEFERACCEPTDROP), 254 SNMP_MIB_ITEM("TCPDeferAcceptDrop", LINUX_MIB_TCPDEFERACCEPTDROP),
255 SNMP_MIB_ITEM("IPReversePathFilter", LINUX_MIB_IPRPFILTER), 255 SNMP_MIB_ITEM("IPReversePathFilter", LINUX_MIB_IPRPFILTER),
256 SNMP_MIB_ITEM("TCPTimeWaitOverflow", LINUX_MIB_TCPTIMEWAITOVERFLOW), 256 SNMP_MIB_ITEM("TCPTimeWaitOverflow", LINUX_MIB_TCPTIMEWAITOVERFLOW),
257 SNMP_MIB_ITEM("TCPReqQFullDoCookies", LINUX_MIB_TCPREQQFULLDOCOOKIES),
258 SNMP_MIB_ITEM("TCPReqQFullDrop", LINUX_MIB_TCPREQQFULLDROP),
257 SNMP_MIB_SENTINEL 259 SNMP_MIB_SENTINEL
258}; 260};
259 261
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 385c470195eb..a5d01b183cf7 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1124,7 +1124,7 @@ static int tcp_is_sackblock_valid(struct tcp_sock *tp, int is_dsack,
1124 return 0; 1124 return 0;
1125 1125
1126 /* ...Then it's D-SACK, and must reside below snd_una completely */ 1126 /* ...Then it's D-SACK, and must reside below snd_una completely */
1127 if (!after(end_seq, tp->snd_una)) 1127 if (after(end_seq, tp->snd_una))
1128 return 0; 1128 return 0;
1129 1129
1130 if (!before(start_seq, tp->undo_marker)) 1130 if (!before(start_seq, tp->undo_marker))
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index b3f26114b03e..c29912cd83a0 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -808,20 +808,38 @@ static void tcp_v4_reqsk_destructor(struct request_sock *req)
808 kfree(inet_rsk(req)->opt); 808 kfree(inet_rsk(req)->opt);
809} 809}
810 810
811static void syn_flood_warning(const struct sk_buff *skb) 811/*
812 * Return 1 if a syncookie should be sent
813 */
814int tcp_syn_flood_action(struct sock *sk,
815 const struct sk_buff *skb,
816 const char *proto)
812{ 817{
813 const char *msg; 818 const char *msg = "Dropping request";
819 int want_cookie = 0;
820 struct listen_sock *lopt;
821
822
814 823
815#ifdef CONFIG_SYN_COOKIES 824#ifdef CONFIG_SYN_COOKIES
816 if (sysctl_tcp_syncookies) 825 if (sysctl_tcp_syncookies) {
817 msg = "Sending cookies"; 826 msg = "Sending cookies";
818 else 827 want_cookie = 1;
828 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
829 } else
819#endif 830#endif
820 msg = "Dropping request"; 831 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
821 832
822 pr_info("TCP: Possible SYN flooding on port %d. %s.\n", 833 lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
823 ntohs(tcp_hdr(skb)->dest), msg); 834 if (!lopt->synflood_warned) {
835 lopt->synflood_warned = 1;
836 pr_info("%s: Possible SYN flooding on port %d. %s. "
837 " Check SNMP counters.\n",
838 proto, ntohs(tcp_hdr(skb)->dest), msg);
839 }
840 return want_cookie;
824} 841}
842EXPORT_SYMBOL(tcp_syn_flood_action);
825 843
826/* 844/*
827 * Save and compile IPv4 options into the request_sock if needed. 845 * Save and compile IPv4 options into the request_sock if needed.
@@ -1235,11 +1253,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1235 __be32 saddr = ip_hdr(skb)->saddr; 1253 __be32 saddr = ip_hdr(skb)->saddr;
1236 __be32 daddr = ip_hdr(skb)->daddr; 1254 __be32 daddr = ip_hdr(skb)->daddr;
1237 __u32 isn = TCP_SKB_CB(skb)->when; 1255 __u32 isn = TCP_SKB_CB(skb)->when;
1238#ifdef CONFIG_SYN_COOKIES
1239 int want_cookie = 0; 1256 int want_cookie = 0;
1240#else
1241#define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */
1242#endif
1243 1257
1244 /* Never answer to SYNs send to broadcast or multicast */ 1258 /* Never answer to SYNs send to broadcast or multicast */
1245 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) 1259 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
@@ -1250,14 +1264,9 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1250 * evidently real one. 1264 * evidently real one.
1251 */ 1265 */
1252 if (inet_csk_reqsk_queue_is_full(sk) && !isn) { 1266 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1253 if (net_ratelimit()) 1267 want_cookie = tcp_syn_flood_action(sk, skb, "TCP");
1254 syn_flood_warning(skb); 1268 if (!want_cookie)
1255#ifdef CONFIG_SYN_COOKIES 1269 goto drop;
1256 if (sysctl_tcp_syncookies) {
1257 want_cookie = 1;
1258 } else
1259#endif
1260 goto drop;
1261 } 1270 }
1262 1271
1263 /* Accept backlog is full. If we have already queued enough 1272 /* Accept backlog is full. If we have already queued enough
@@ -1303,9 +1312,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1303 while (l-- > 0) 1312 while (l-- > 0)
1304 *c++ ^= *hash_location++; 1313 *c++ ^= *hash_location++;
1305 1314
1306#ifdef CONFIG_SYN_COOKIES
1307 want_cookie = 0; /* not our kind of cookie */ 1315 want_cookie = 0; /* not our kind of cookie */
1308#endif
1309 tmp_ext.cookie_out_never = 0; /* false */ 1316 tmp_ext.cookie_out_never = 0; /* false */
1310 tmp_ext.cookie_plus = tmp_opt.cookie_plus; 1317 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1311 } else if (!tp->rx_opt.cookie_in_always) { 1318 } else if (!tp->rx_opt.cookie_in_always) {
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 3053c685e249..e39239e6426e 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -374,8 +374,8 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
374 "%s(): cannot allocate memory for statistics; dev=%s.\n", 374 "%s(): cannot allocate memory for statistics; dev=%s.\n",
375 __func__, dev->name)); 375 __func__, dev->name));
376 neigh_parms_release(&nd_tbl, ndev->nd_parms); 376 neigh_parms_release(&nd_tbl, ndev->nd_parms);
377 ndev->dead = 1; 377 dev_put(dev);
378 in6_dev_finish_destroy(ndev); 378 kfree(ndev);
379 return NULL; 379 return NULL;
380 } 380 }
381 381
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 9ef1831746ef..b46e9f88ce37 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -599,7 +599,7 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
599 return 0; 599 return 0;
600} 600}
601 601
602int datagram_send_ctl(struct net *net, 602int datagram_send_ctl(struct net *net, struct sock *sk,
603 struct msghdr *msg, struct flowi6 *fl6, 603 struct msghdr *msg, struct flowi6 *fl6,
604 struct ipv6_txoptions *opt, 604 struct ipv6_txoptions *opt,
605 int *hlimit, int *tclass, int *dontfrag) 605 int *hlimit, int *tclass, int *dontfrag)
@@ -658,7 +658,8 @@ int datagram_send_ctl(struct net *net,
658 658
659 if (addr_type != IPV6_ADDR_ANY) { 659 if (addr_type != IPV6_ADDR_ANY) {
660 int strict = __ipv6_addr_src_scope(addr_type) <= IPV6_ADDR_SCOPE_LINKLOCAL; 660 int strict = __ipv6_addr_src_scope(addr_type) <= IPV6_ADDR_SCOPE_LINKLOCAL;
661 if (!ipv6_chk_addr(net, &src_info->ipi6_addr, 661 if (!inet_sk(sk)->transparent &&
662 !ipv6_chk_addr(net, &src_info->ipi6_addr,
662 strict ? dev : NULL, 0)) 663 strict ? dev : NULL, 0))
663 err = -EINVAL; 664 err = -EINVAL;
664 else 665 else
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index f3caf1b8d572..543039450193 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -322,8 +322,8 @@ static int fl6_renew(struct ip6_flowlabel *fl, unsigned long linger, unsigned lo
322} 322}
323 323
324static struct ip6_flowlabel * 324static struct ip6_flowlabel *
325fl_create(struct net *net, struct in6_flowlabel_req *freq, char __user *optval, 325fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
326 int optlen, int *err_p) 326 char __user *optval, int optlen, int *err_p)
327{ 327{
328 struct ip6_flowlabel *fl = NULL; 328 struct ip6_flowlabel *fl = NULL;
329 int olen; 329 int olen;
@@ -360,7 +360,7 @@ fl_create(struct net *net, struct in6_flowlabel_req *freq, char __user *optval,
360 msg.msg_control = (void*)(fl->opt+1); 360 msg.msg_control = (void*)(fl->opt+1);
361 memset(&flowi6, 0, sizeof(flowi6)); 361 memset(&flowi6, 0, sizeof(flowi6));
362 362
363 err = datagram_send_ctl(net, &msg, &flowi6, fl->opt, &junk, 363 err = datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt, &junk,
364 &junk, &junk); 364 &junk, &junk);
365 if (err) 365 if (err)
366 goto done; 366 goto done;
@@ -528,7 +528,7 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
528 if (freq.flr_label & ~IPV6_FLOWLABEL_MASK) 528 if (freq.flr_label & ~IPV6_FLOWLABEL_MASK)
529 return -EINVAL; 529 return -EINVAL;
530 530
531 fl = fl_create(net, &freq, optval, optlen, &err); 531 fl = fl_create(net, sk, &freq, optval, optlen, &err);
532 if (fl == NULL) 532 if (fl == NULL)
533 return err; 533 return err;
534 sfl1 = kmalloc(sizeof(*sfl1), GFP_KERNEL); 534 sfl1 = kmalloc(sizeof(*sfl1), GFP_KERNEL);
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 147ede38ab48..2fbda5fc4cc4 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -475,7 +475,7 @@ sticky_done:
475 msg.msg_controllen = optlen; 475 msg.msg_controllen = optlen;
476 msg.msg_control = (void*)(opt+1); 476 msg.msg_control = (void*)(opt+1);
477 477
478 retv = datagram_send_ctl(net, &msg, &fl6, opt, &junk, &junk, 478 retv = datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk, &junk,
479 &junk); 479 &junk);
480 if (retv) 480 if (retv)
481 goto done; 481 goto done;
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 3e6ebcdb4779..ee7839f4d6e3 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -1059,7 +1059,7 @@ static int mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs,
1059 break; 1059 break;
1060 for (i=0; i<nsrcs; i++) { 1060 for (i=0; i<nsrcs; i++) {
1061 /* skip inactive filters */ 1061 /* skip inactive filters */
1062 if (pmc->mca_sfcount[MCAST_INCLUDE] || 1062 if (psf->sf_count[MCAST_INCLUDE] ||
1063 pmc->mca_sfcount[MCAST_EXCLUDE] != 1063 pmc->mca_sfcount[MCAST_EXCLUDE] !=
1064 psf->sf_count[MCAST_EXCLUDE]) 1064 psf->sf_count[MCAST_EXCLUDE])
1065 continue; 1065 continue;
diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
index 249394863284..e63c3972a739 100644
--- a/net/ipv6/netfilter/ip6_queue.c
+++ b/net/ipv6/netfilter/ip6_queue.c
@@ -218,6 +218,7 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
218 return skb; 218 return skb;
219 219
220nlmsg_failure: 220nlmsg_failure:
221 kfree_skb(skb);
221 *errp = -EINVAL; 222 *errp = -EINVAL;
222 printk(KERN_ERR "ip6_queue: error creating packet message\n"); 223 printk(KERN_ERR "ip6_queue: error creating packet message\n");
223 return NULL; 224 return NULL;
@@ -313,7 +314,7 @@ ipq_set_verdict(struct ipq_verdict_msg *vmsg, unsigned int len)
313{ 314{
314 struct nf_queue_entry *entry; 315 struct nf_queue_entry *entry;
315 316
316 if (vmsg->value > NF_MAX_VERDICT) 317 if (vmsg->value > NF_MAX_VERDICT || vmsg->value == NF_STOLEN)
317 return -EINVAL; 318 return -EINVAL;
318 319
319 entry = ipq_find_dequeue_entry(vmsg->id); 320 entry = ipq_find_dequeue_entry(vmsg->id);
@@ -358,12 +359,9 @@ ipq_receive_peer(struct ipq_peer_msg *pmsg,
358 break; 359 break;
359 360
360 case IPQM_VERDICT: 361 case IPQM_VERDICT:
361 if (pmsg->msg.verdict.value > NF_MAX_VERDICT) 362 status = ipq_set_verdict(&pmsg->msg.verdict,
362 status = -EINVAL; 363 len - sizeof(*pmsg));
363 else 364 break;
364 status = ipq_set_verdict(&pmsg->msg.verdict,
365 len - sizeof(*pmsg));
366 break;
367 default: 365 default:
368 status = -EINVAL; 366 status = -EINVAL;
369 } 367 }
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index f34902f1ba33..3486f62befa3 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -817,8 +817,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
817 memset(opt, 0, sizeof(struct ipv6_txoptions)); 817 memset(opt, 0, sizeof(struct ipv6_txoptions));
818 opt->tot_len = sizeof(struct ipv6_txoptions); 818 opt->tot_len = sizeof(struct ipv6_txoptions);
819 819
820 err = datagram_send_ctl(sock_net(sk), msg, &fl6, opt, &hlimit, 820 err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
821 &tclass, &dontfrag); 821 &hlimit, &tclass, &dontfrag);
822 if (err < 0) { 822 if (err < 0) {
823 fl6_sock_release(flowlabel); 823 fl6_sock_release(flowlabel);
824 return err; 824 return err;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 9e69eb0ec6dd..1250f9020670 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -104,6 +104,9 @@ static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
104 struct inet_peer *peer; 104 struct inet_peer *peer;
105 u32 *p = NULL; 105 u32 *p = NULL;
106 106
107 if (!(rt->dst.flags & DST_HOST))
108 return NULL;
109
107 if (!rt->rt6i_peer) 110 if (!rt->rt6i_peer)
108 rt6_bind_peer(rt, 1); 111 rt6_bind_peer(rt, 1);
109 112
@@ -252,6 +255,9 @@ static void ip6_dst_destroy(struct dst_entry *dst)
252 struct inet6_dev *idev = rt->rt6i_idev; 255 struct inet6_dev *idev = rt->rt6i_idev;
253 struct inet_peer *peer = rt->rt6i_peer; 256 struct inet_peer *peer = rt->rt6i_peer;
254 257
258 if (!(rt->dst.flags & DST_HOST))
259 dst_destroy_metrics_generic(dst);
260
255 if (idev != NULL) { 261 if (idev != NULL) {
256 rt->rt6i_idev = NULL; 262 rt->rt6i_idev = NULL;
257 in6_dev_put(idev); 263 in6_dev_put(idev);
@@ -723,9 +729,7 @@ static struct rt6_info *rt6_alloc_cow(const struct rt6_info *ort,
723 ipv6_addr_copy(&rt->rt6i_gateway, daddr); 729 ipv6_addr_copy(&rt->rt6i_gateway, daddr);
724 } 730 }
725 731
726 rt->rt6i_dst.plen = 128;
727 rt->rt6i_flags |= RTF_CACHE; 732 rt->rt6i_flags |= RTF_CACHE;
728 rt->dst.flags |= DST_HOST;
729 733
730#ifdef CONFIG_IPV6_SUBTREES 734#ifdef CONFIG_IPV6_SUBTREES
731 if (rt->rt6i_src.plen && saddr) { 735 if (rt->rt6i_src.plen && saddr) {
@@ -775,9 +779,7 @@ static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort,
775 struct rt6_info *rt = ip6_rt_copy(ort, daddr); 779 struct rt6_info *rt = ip6_rt_copy(ort, daddr);
776 780
777 if (rt) { 781 if (rt) {
778 rt->rt6i_dst.plen = 128;
779 rt->rt6i_flags |= RTF_CACHE; 782 rt->rt6i_flags |= RTF_CACHE;
780 rt->dst.flags |= DST_HOST;
781 dst_set_neighbour(&rt->dst, neigh_clone(dst_get_neighbour_raw(&ort->dst))); 783 dst_set_neighbour(&rt->dst, neigh_clone(dst_get_neighbour_raw(&ort->dst)));
782 } 784 }
783 return rt; 785 return rt;
@@ -1078,12 +1080,15 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
1078 neigh = NULL; 1080 neigh = NULL;
1079 } 1081 }
1080 1082
1081 rt->rt6i_idev = idev; 1083 rt->dst.flags |= DST_HOST;
1084 rt->dst.output = ip6_output;
1082 dst_set_neighbour(&rt->dst, neigh); 1085 dst_set_neighbour(&rt->dst, neigh);
1083 atomic_set(&rt->dst.__refcnt, 1); 1086 atomic_set(&rt->dst.__refcnt, 1);
1084 ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
1085 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255); 1087 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255);
1086 rt->dst.output = ip6_output; 1088
1089 ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
1090 rt->rt6i_dst.plen = 128;
1091 rt->rt6i_idev = idev;
1087 1092
1088 spin_lock_bh(&icmp6_dst_lock); 1093 spin_lock_bh(&icmp6_dst_lock);
1089 rt->dst.next = icmp6_dst_gc_list; 1094 rt->dst.next = icmp6_dst_gc_list;
@@ -1261,6 +1266,14 @@ int ip6_route_add(struct fib6_config *cfg)
1261 if (rt->rt6i_dst.plen == 128) 1266 if (rt->rt6i_dst.plen == 128)
1262 rt->dst.flags |= DST_HOST; 1267 rt->dst.flags |= DST_HOST;
1263 1268
1269 if (!(rt->dst.flags & DST_HOST) && cfg->fc_mx) {
1270 u32 *metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
1271 if (!metrics) {
1272 err = -ENOMEM;
1273 goto out;
1274 }
1275 dst_init_metrics(&rt->dst, metrics, 0);
1276 }
1264#ifdef CONFIG_IPV6_SUBTREES 1277#ifdef CONFIG_IPV6_SUBTREES
1265 ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len); 1278 ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len);
1266 rt->rt6i_src.plen = cfg->fc_src_len; 1279 rt->rt6i_src.plen = cfg->fc_src_len;
@@ -1607,9 +1620,6 @@ void rt6_redirect(const struct in6_addr *dest, const struct in6_addr *src,
1607 if (on_link) 1620 if (on_link)
1608 nrt->rt6i_flags &= ~RTF_GATEWAY; 1621 nrt->rt6i_flags &= ~RTF_GATEWAY;
1609 1622
1610 nrt->rt6i_dst.plen = 128;
1611 nrt->dst.flags |= DST_HOST;
1612
1613 ipv6_addr_copy(&nrt->rt6i_gateway, (struct in6_addr*)neigh->primary_key); 1623 ipv6_addr_copy(&nrt->rt6i_gateway, (struct in6_addr*)neigh->primary_key);
1614 dst_set_neighbour(&nrt->dst, neigh_clone(neigh)); 1624 dst_set_neighbour(&nrt->dst, neigh_clone(neigh));
1615 1625
@@ -1754,9 +1764,10 @@ static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort,
1754 if (rt) { 1764 if (rt) {
1755 rt->dst.input = ort->dst.input; 1765 rt->dst.input = ort->dst.input;
1756 rt->dst.output = ort->dst.output; 1766 rt->dst.output = ort->dst.output;
1767 rt->dst.flags |= DST_HOST;
1757 1768
1758 ipv6_addr_copy(&rt->rt6i_dst.addr, dest); 1769 ipv6_addr_copy(&rt->rt6i_dst.addr, dest);
1759 rt->rt6i_dst.plen = ort->rt6i_dst.plen; 1770 rt->rt6i_dst.plen = 128;
1760 dst_copy_metrics(&rt->dst, &ort->dst); 1771 dst_copy_metrics(&rt->dst, &ort->dst);
1761 rt->dst.error = ort->dst.error; 1772 rt->dst.error = ort->dst.error;
1762 rt->rt6i_idev = ort->rt6i_idev; 1773 rt->rt6i_idev = ort->rt6i_idev;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 44a5859535b5..12bdb9af96e5 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -531,20 +531,6 @@ static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
531 return tcp_v6_send_synack(sk, req, rvp); 531 return tcp_v6_send_synack(sk, req, rvp);
532} 532}
533 533
534static inline void syn_flood_warning(struct sk_buff *skb)
535{
536#ifdef CONFIG_SYN_COOKIES
537 if (sysctl_tcp_syncookies)
538 printk(KERN_INFO
539 "TCPv6: Possible SYN flooding on port %d. "
540 "Sending cookies.\n", ntohs(tcp_hdr(skb)->dest));
541 else
542#endif
543 printk(KERN_INFO
544 "TCPv6: Possible SYN flooding on port %d. "
545 "Dropping request.\n", ntohs(tcp_hdr(skb)->dest));
546}
547
548static void tcp_v6_reqsk_destructor(struct request_sock *req) 534static void tcp_v6_reqsk_destructor(struct request_sock *req)
549{ 535{
550 kfree_skb(inet6_rsk(req)->pktopts); 536 kfree_skb(inet6_rsk(req)->pktopts);
@@ -1179,11 +1165,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1179 struct tcp_sock *tp = tcp_sk(sk); 1165 struct tcp_sock *tp = tcp_sk(sk);
1180 __u32 isn = TCP_SKB_CB(skb)->when; 1166 __u32 isn = TCP_SKB_CB(skb)->when;
1181 struct dst_entry *dst = NULL; 1167 struct dst_entry *dst = NULL;
1182#ifdef CONFIG_SYN_COOKIES
1183 int want_cookie = 0; 1168 int want_cookie = 0;
1184#else
1185#define want_cookie 0
1186#endif
1187 1169
1188 if (skb->protocol == htons(ETH_P_IP)) 1170 if (skb->protocol == htons(ETH_P_IP))
1189 return tcp_v4_conn_request(sk, skb); 1171 return tcp_v4_conn_request(sk, skb);
@@ -1192,14 +1174,9 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1192 goto drop; 1174 goto drop;
1193 1175
1194 if (inet_csk_reqsk_queue_is_full(sk) && !isn) { 1176 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1195 if (net_ratelimit()) 1177 want_cookie = tcp_syn_flood_action(sk, skb, "TCPv6");
1196 syn_flood_warning(skb); 1178 if (!want_cookie)
1197#ifdef CONFIG_SYN_COOKIES 1179 goto drop;
1198 if (sysctl_tcp_syncookies)
1199 want_cookie = 1;
1200 else
1201#endif
1202 goto drop;
1203 } 1180 }
1204 1181
1205 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) 1182 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
@@ -1249,9 +1226,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1249 while (l-- > 0) 1226 while (l-- > 0)
1250 *c++ ^= *hash_location++; 1227 *c++ ^= *hash_location++;
1251 1228
1252#ifdef CONFIG_SYN_COOKIES
1253 want_cookie = 0; /* not our kind of cookie */ 1229 want_cookie = 0; /* not our kind of cookie */
1254#endif
1255 tmp_ext.cookie_out_never = 0; /* false */ 1230 tmp_ext.cookie_out_never = 0; /* false */
1256 tmp_ext.cookie_plus = tmp_opt.cookie_plus; 1231 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1257 } else if (!tp->rx_opt.cookie_in_always) { 1232 } else if (!tp->rx_opt.cookie_in_always) {
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 35bbdc42241e..f4ca0a5b3457 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1090,8 +1090,8 @@ do_udp_sendmsg:
1090 memset(opt, 0, sizeof(struct ipv6_txoptions)); 1090 memset(opt, 0, sizeof(struct ipv6_txoptions));
1091 opt->tot_len = sizeof(*opt); 1091 opt->tot_len = sizeof(*opt);
1092 1092
1093 err = datagram_send_ctl(sock_net(sk), msg, &fl6, opt, &hlimit, 1093 err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
1094 &tclass, &dontfrag); 1094 &hlimit, &tclass, &dontfrag);
1095 if (err < 0) { 1095 if (err < 0) {
1096 fl6_sock_release(flowlabel); 1096 fl6_sock_release(flowlabel);
1097 return err; 1097 return err;
diff --git a/net/irda/irsysctl.c b/net/irda/irsysctl.c
index d0b70dadf73b..2615ffc8e785 100644
--- a/net/irda/irsysctl.c
+++ b/net/irda/irsysctl.c
@@ -40,9 +40,9 @@ extern int sysctl_slot_timeout;
40extern int sysctl_fast_poll_increase; 40extern int sysctl_fast_poll_increase;
41extern char sysctl_devname[]; 41extern char sysctl_devname[];
42extern int sysctl_max_baud_rate; 42extern int sysctl_max_baud_rate;
43extern int sysctl_min_tx_turn_time; 43extern unsigned int sysctl_min_tx_turn_time;
44extern int sysctl_max_tx_data_size; 44extern unsigned int sysctl_max_tx_data_size;
45extern int sysctl_max_tx_window; 45extern unsigned int sysctl_max_tx_window;
46extern int sysctl_max_noreply_time; 46extern int sysctl_max_noreply_time;
47extern int sysctl_warn_noreply_time; 47extern int sysctl_warn_noreply_time;
48extern int sysctl_lap_keepalive_time; 48extern int sysctl_lap_keepalive_time;
diff --git a/net/irda/qos.c b/net/irda/qos.c
index 1b51bcf42394..4369f7f41bcb 100644
--- a/net/irda/qos.c
+++ b/net/irda/qos.c
@@ -60,7 +60,7 @@ int sysctl_max_noreply_time = 12;
60 * Default is 10us which means using the unmodified value given by the 60 * Default is 10us which means using the unmodified value given by the
61 * peer except if it's 0 (0 is likely a bug in the other stack). 61 * peer except if it's 0 (0 is likely a bug in the other stack).
62 */ 62 */
63unsigned sysctl_min_tx_turn_time = 10; 63unsigned int sysctl_min_tx_turn_time = 10;
64/* 64/*
65 * Maximum data size to be used in transmission in payload of LAP frame. 65 * Maximum data size to be used in transmission in payload of LAP frame.
66 * There is a bit of confusion in the IrDA spec : 66 * There is a bit of confusion in the IrDA spec :
@@ -75,13 +75,13 @@ unsigned sysctl_min_tx_turn_time = 10;
75 * bytes frames or all negotiated frame sizes, but you can use the sysctl 75 * bytes frames or all negotiated frame sizes, but you can use the sysctl
76 * to play with this value anyway. 76 * to play with this value anyway.
77 * Jean II */ 77 * Jean II */
78unsigned sysctl_max_tx_data_size = 2042; 78unsigned int sysctl_max_tx_data_size = 2042;
79/* 79/*
80 * Maximum transmit window, i.e. number of LAP frames between turn-around. 80 * Maximum transmit window, i.e. number of LAP frames between turn-around.
81 * This allow to override what the peer told us. Some peers are buggy and 81 * This allow to override what the peer told us. Some peers are buggy and
82 * don't always support what they tell us. 82 * don't always support what they tell us.
83 * Jean II */ 83 * Jean II */
84unsigned sysctl_max_tx_window = 7; 84unsigned int sysctl_max_tx_window = 7;
85 85
86static int irlap_param_baud_rate(void *instance, irda_param_t *param, int get); 86static int irlap_param_baud_rate(void *instance, irda_param_t *param, int get);
87static int irlap_param_link_disconnect(void *instance, irda_param_t *parm, 87static int irlap_param_link_disconnect(void *instance, irda_param_t *parm,
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 104fdd9862bd..a5809a1a6239 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -1013,7 +1013,6 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
1013 cancel_work_sync(&local->reconfig_filter); 1013 cancel_work_sync(&local->reconfig_filter);
1014 1014
1015 ieee80211_clear_tx_pending(local); 1015 ieee80211_clear_tx_pending(local);
1016 sta_info_stop(local);
1017 rate_control_deinitialize(local); 1016 rate_control_deinitialize(local);
1018 1017
1019 if (skb_queue_len(&local->skb_queue) || 1018 if (skb_queue_len(&local->skb_queue) ||
@@ -1025,6 +1024,7 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
1025 1024
1026 destroy_workqueue(local->workqueue); 1025 destroy_workqueue(local->workqueue);
1027 wiphy_unregister(local->hw.wiphy); 1026 wiphy_unregister(local->hw.wiphy);
1027 sta_info_stop(local);
1028 ieee80211_wep_free(local); 1028 ieee80211_wep_free(local);
1029 ieee80211_led_exit(local); 1029 ieee80211_led_exit(local);
1030 kfree(local->int_scan_req); 1030 kfree(local->int_scan_req);
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 695447e988cb..0a7e0fed3251 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -792,7 +792,7 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
792 BUG_ON(!sdata->bss); 792 BUG_ON(!sdata->bss);
793 793
794 atomic_dec(&sdata->bss->num_sta_ps); 794 atomic_dec(&sdata->bss->num_sta_ps);
795 __sta_info_clear_tim_bit(sdata->bss, sta); 795 sta_info_clear_tim_bit(sta);
796 } 796 }
797 797
798 local->num_sta--; 798 local->num_sta--;
diff --git a/net/netfilter/nf_conntrack_pptp.c b/net/netfilter/nf_conntrack_pptp.c
index 2fd4565144de..31d56b23b9e9 100644
--- a/net/netfilter/nf_conntrack_pptp.c
+++ b/net/netfilter/nf_conntrack_pptp.c
@@ -364,6 +364,7 @@ pptp_inbound_pkt(struct sk_buff *skb,
364 break; 364 break;
365 365
366 case PPTP_WAN_ERROR_NOTIFY: 366 case PPTP_WAN_ERROR_NOTIFY:
367 case PPTP_SET_LINK_INFO:
367 case PPTP_ECHO_REQUEST: 368 case PPTP_ECHO_REQUEST:
368 case PPTP_ECHO_REPLY: 369 case PPTP_ECHO_REPLY:
369 /* I don't have to explain these ;) */ 370 /* I don't have to explain these ;) */
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 37bf94394be0..8235b86b4e87 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -409,7 +409,7 @@ static void tcp_options(const struct sk_buff *skb,
409 if (opsize < 2) /* "silly options" */ 409 if (opsize < 2) /* "silly options" */
410 return; 410 return;
411 if (opsize > length) 411 if (opsize > length)
412 break; /* don't parse partial options */ 412 return; /* don't parse partial options */
413 413
414 if (opcode == TCPOPT_SACK_PERM 414 if (opcode == TCPOPT_SACK_PERM
415 && opsize == TCPOLEN_SACK_PERM) 415 && opsize == TCPOLEN_SACK_PERM)
@@ -447,7 +447,7 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
447 BUG_ON(ptr == NULL); 447 BUG_ON(ptr == NULL);
448 448
449 /* Fast path for timestamp-only option */ 449 /* Fast path for timestamp-only option */
450 if (length == TCPOLEN_TSTAMP_ALIGNED*4 450 if (length == TCPOLEN_TSTAMP_ALIGNED
451 && *(__be32 *)ptr == htonl((TCPOPT_NOP << 24) 451 && *(__be32 *)ptr == htonl((TCPOPT_NOP << 24)
452 | (TCPOPT_NOP << 16) 452 | (TCPOPT_NOP << 16)
453 | (TCPOPT_TIMESTAMP << 8) 453 | (TCPOPT_TIMESTAMP << 8)
@@ -469,7 +469,7 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
469 if (opsize < 2) /* "silly options" */ 469 if (opsize < 2) /* "silly options" */
470 return; 470 return;
471 if (opsize > length) 471 if (opsize > length)
472 break; /* don't parse partial options */ 472 return; /* don't parse partial options */
473 473
474 if (opcode == TCPOPT_SACK 474 if (opcode == TCPOPT_SACK
475 && opsize >= (TCPOLEN_SACK_BASE 475 && opsize >= (TCPOLEN_SACK_BASE
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 00bd475eab4b..a80b0cb03f17 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -646,8 +646,8 @@ verdicthdr_get(const struct nlattr * const nfqa[])
646 return NULL; 646 return NULL;
647 647
648 vhdr = nla_data(nfqa[NFQA_VERDICT_HDR]); 648 vhdr = nla_data(nfqa[NFQA_VERDICT_HDR]);
649 verdict = ntohl(vhdr->verdict); 649 verdict = ntohl(vhdr->verdict) & NF_VERDICT_MASK;
650 if ((verdict & NF_VERDICT_MASK) > NF_MAX_VERDICT) 650 if (verdict > NF_MAX_VERDICT || verdict == NF_STOLEN)
651 return NULL; 651 return NULL;
652 return vhdr; 652 return vhdr;
653} 653}
diff --git a/net/netfilter/xt_rateest.c b/net/netfilter/xt_rateest.c
index 76a083184d8e..ed0db15ab00e 100644
--- a/net/netfilter/xt_rateest.c
+++ b/net/netfilter/xt_rateest.c
@@ -78,7 +78,7 @@ static int xt_rateest_mt_checkentry(const struct xt_mtchk_param *par)
78{ 78{
79 struct xt_rateest_match_info *info = par->matchinfo; 79 struct xt_rateest_match_info *info = par->matchinfo;
80 struct xt_rateest *est1, *est2; 80 struct xt_rateest *est1, *est2;
81 int ret = false; 81 int ret = -EINVAL;
82 82
83 if (hweight32(info->flags & (XT_RATEEST_MATCH_ABS | 83 if (hweight32(info->flags & (XT_RATEEST_MATCH_ABS |
84 XT_RATEEST_MATCH_REL)) != 1) 84 XT_RATEEST_MATCH_REL)) != 1)
@@ -101,13 +101,12 @@ static int xt_rateest_mt_checkentry(const struct xt_mtchk_param *par)
101 if (!est1) 101 if (!est1)
102 goto err1; 102 goto err1;
103 103
104 est2 = NULL;
104 if (info->flags & XT_RATEEST_MATCH_REL) { 105 if (info->flags & XT_RATEEST_MATCH_REL) {
105 est2 = xt_rateest_lookup(info->name2); 106 est2 = xt_rateest_lookup(info->name2);
106 if (!est2) 107 if (!est2)
107 goto err2; 108 goto err2;
108 } else 109 }
109 est2 = NULL;
110
111 110
112 info->est1 = est1; 111 info->est1 = est1;
113 info->est2 = est2; 112 info->est2 = est2;
@@ -116,7 +115,7 @@ static int xt_rateest_mt_checkentry(const struct xt_mtchk_param *par)
116err2: 115err2:
117 xt_rateest_put(est1); 116 xt_rateest_put(est1);
118err1: 117err1:
119 return -EINVAL; 118 return ret;
120} 119}
121 120
122static void xt_rateest_mt_destroy(const struct xt_mtdtor_param *par) 121static void xt_rateest_mt_destroy(const struct xt_mtdtor_param *par)
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
index be4505ee67a9..b01427924f81 100644
--- a/net/sched/cls_rsvp.h
+++ b/net/sched/cls_rsvp.h
@@ -425,7 +425,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
425 struct rsvp_filter *f, **fp; 425 struct rsvp_filter *f, **fp;
426 struct rsvp_session *s, **sp; 426 struct rsvp_session *s, **sp;
427 struct tc_rsvp_pinfo *pinfo = NULL; 427 struct tc_rsvp_pinfo *pinfo = NULL;
428 struct nlattr *opt = tca[TCA_OPTIONS-1]; 428 struct nlattr *opt = tca[TCA_OPTIONS];
429 struct nlattr *tb[TCA_RSVP_MAX + 1]; 429 struct nlattr *tb[TCA_RSVP_MAX + 1];
430 struct tcf_exts e; 430 struct tcf_exts e;
431 unsigned int h1, h2; 431 unsigned int h1, h2;
@@ -439,7 +439,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
439 if (err < 0) 439 if (err < 0)
440 return err; 440 return err;
441 441
442 err = tcf_exts_validate(tp, tb, tca[TCA_RATE-1], &e, &rsvp_ext_map); 442 err = tcf_exts_validate(tp, tb, tca[TCA_RATE], &e, &rsvp_ext_map);
443 if (err < 0) 443 if (err < 0)
444 return err; 444 return err;
445 445
@@ -449,8 +449,8 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
449 449
450 if (f->handle != handle && handle) 450 if (f->handle != handle && handle)
451 goto errout2; 451 goto errout2;
452 if (tb[TCA_RSVP_CLASSID-1]) { 452 if (tb[TCA_RSVP_CLASSID]) {
453 f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID-1]); 453 f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]);
454 tcf_bind_filter(tp, &f->res, base); 454 tcf_bind_filter(tp, &f->res, base);
455 } 455 }
456 456
@@ -462,7 +462,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
462 err = -EINVAL; 462 err = -EINVAL;
463 if (handle) 463 if (handle)
464 goto errout2; 464 goto errout2;
465 if (tb[TCA_RSVP_DST-1] == NULL) 465 if (tb[TCA_RSVP_DST] == NULL)
466 goto errout2; 466 goto errout2;
467 467
468 err = -ENOBUFS; 468 err = -ENOBUFS;
@@ -471,19 +471,19 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
471 goto errout2; 471 goto errout2;
472 472
473 h2 = 16; 473 h2 = 16;
474 if (tb[TCA_RSVP_SRC-1]) { 474 if (tb[TCA_RSVP_SRC]) {
475 memcpy(f->src, nla_data(tb[TCA_RSVP_SRC-1]), sizeof(f->src)); 475 memcpy(f->src, nla_data(tb[TCA_RSVP_SRC]), sizeof(f->src));
476 h2 = hash_src(f->src); 476 h2 = hash_src(f->src);
477 } 477 }
478 if (tb[TCA_RSVP_PINFO-1]) { 478 if (tb[TCA_RSVP_PINFO]) {
479 pinfo = nla_data(tb[TCA_RSVP_PINFO-1]); 479 pinfo = nla_data(tb[TCA_RSVP_PINFO]);
480 f->spi = pinfo->spi; 480 f->spi = pinfo->spi;
481 f->tunnelhdr = pinfo->tunnelhdr; 481 f->tunnelhdr = pinfo->tunnelhdr;
482 } 482 }
483 if (tb[TCA_RSVP_CLASSID-1]) 483 if (tb[TCA_RSVP_CLASSID])
484 f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID-1]); 484 f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]);
485 485
486 dst = nla_data(tb[TCA_RSVP_DST-1]); 486 dst = nla_data(tb[TCA_RSVP_DST]);
487 h1 = hash_dst(dst, pinfo ? pinfo->protocol : 0, pinfo ? pinfo->tunnelid : 0); 487 h1 = hash_dst(dst, pinfo ? pinfo->protocol : 0, pinfo ? pinfo->tunnelid : 0);
488 488
489 err = -ENOMEM; 489 err = -ENOMEM;
@@ -642,8 +642,7 @@ nla_put_failure:
642 return -1; 642 return -1;
643} 643}
644 644
645static struct tcf_proto_ops RSVP_OPS = { 645static struct tcf_proto_ops RSVP_OPS __read_mostly = {
646 .next = NULL,
647 .kind = RSVP_ID, 646 .kind = RSVP_ID,
648 .classify = rsvp_classify, 647 .classify = rsvp_classify,
649 .init = rsvp_init, 648 .init = rsvp_init,
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 167c880cf8da..76388b083f28 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -1689,6 +1689,11 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1689 case SCTP_CMD_PURGE_ASCONF_QUEUE: 1689 case SCTP_CMD_PURGE_ASCONF_QUEUE:
1690 sctp_asconf_queue_teardown(asoc); 1690 sctp_asconf_queue_teardown(asoc);
1691 break; 1691 break;
1692
1693 case SCTP_CMD_SET_ASOC:
1694 asoc = cmd->obj.asoc;
1695 break;
1696
1692 default: 1697 default:
1693 pr_warn("Impossible command: %u, %p\n", 1698 pr_warn("Impossible command: %u, %p\n",
1694 cmd->verb, cmd->obj.ptr); 1699 cmd->verb, cmd->obj.ptr);
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 73d14fc02606..891f5db8cc31 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -2047,6 +2047,12 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(const struct sctp_endpoint *ep,
2047 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc)); 2047 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc));
2048 sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); 2048 sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
2049 2049
2050 /* Restore association pointer to provide SCTP command interpeter
2051 * with a valid context in case it needs to manipulate
2052 * the queues */
2053 sctp_add_cmd_sf(commands, SCTP_CMD_SET_ASOC,
2054 SCTP_ASOC((struct sctp_association *)asoc));
2055
2050 return retval; 2056 return retval;
2051 2057
2052nomem: 2058nomem:
diff --git a/net/socket.c b/net/socket.c
index 2517e11a5300..2877647f347b 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -1965,8 +1965,9 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
1965 * used_address->name_len is initialized to UINT_MAX so that the first 1965 * used_address->name_len is initialized to UINT_MAX so that the first
1966 * destination address never matches. 1966 * destination address never matches.
1967 */ 1967 */
1968 if (used_address && used_address->name_len == msg_sys->msg_namelen && 1968 if (used_address && msg_sys->msg_name &&
1969 !memcmp(&used_address->name, msg->msg_name, 1969 used_address->name_len == msg_sys->msg_namelen &&
1970 !memcmp(&used_address->name, msg_sys->msg_name,
1970 used_address->name_len)) { 1971 used_address->name_len)) {
1971 err = sock_sendmsg_nosec(sock, msg_sys, total_len); 1972 err = sock_sendmsg_nosec(sock, msg_sys, total_len);
1972 goto out_freectl; 1973 goto out_freectl;
@@ -1978,8 +1979,9 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
1978 */ 1979 */
1979 if (used_address && err >= 0) { 1980 if (used_address && err >= 0) {
1980 used_address->name_len = msg_sys->msg_namelen; 1981 used_address->name_len = msg_sys->msg_namelen;
1981 memcpy(&used_address->name, msg->msg_name, 1982 if (msg_sys->msg_name)
1982 used_address->name_len); 1983 memcpy(&used_address->name, msg_sys->msg_name,
1984 used_address->name_len);
1983 } 1985 }
1984 1986
1985out_freectl: 1987out_freectl:
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 44cbebac25e0..220f3bd176f8 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -616,6 +616,9 @@ int wiphy_register(struct wiphy *wiphy)
616 if (res) 616 if (res)
617 goto out_rm_dev; 617 goto out_rm_dev;
618 618
619 rtnl_lock();
620 rdev->wiphy.registered = true;
621 rtnl_unlock();
619 return 0; 622 return 0;
620 623
621out_rm_dev: 624out_rm_dev:
@@ -647,6 +650,10 @@ void wiphy_unregister(struct wiphy *wiphy)
647{ 650{
648 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 651 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
649 652
653 rtnl_lock();
654 rdev->wiphy.registered = false;
655 rtnl_unlock();
656
650 rfkill_unregister(rdev->rfkill); 657 rfkill_unregister(rdev->rfkill);
651 658
652 /* protect the device list */ 659 /* protect the device list */
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 18fc37b6f2bd..2520a1b7e7db 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -851,6 +851,7 @@ static void handle_channel(struct wiphy *wiphy,
851 return; 851 return;
852 } 852 }
853 853
854 chan->beacon_found = false;
854 chan->flags = flags | bw_flags | map_regdom_flags(reg_rule->flags); 855 chan->flags = flags | bw_flags | map_regdom_flags(reg_rule->flags);
855 chan->max_antenna_gain = min(chan->orig_mag, 856 chan->max_antenna_gain = min(chan->orig_mag,
856 (int) MBI_TO_DBI(power_rule->max_antenna_gain)); 857 (int) MBI_TO_DBI(power_rule->max_antenna_gain));
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index b7b6ff8be553..dec0fa28372e 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -118,6 +118,8 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev)
118 i++, j++) 118 i++, j++)
119 request->channels[i] = 119 request->channels[i] =
120 &wdev->wiphy->bands[band]->channels[j]; 120 &wdev->wiphy->bands[band]->channels[j];
121 request->rates[band] =
122 (1 << wdev->wiphy->bands[band]->n_bitrates) - 1;
121 } 123 }
122 } 124 }
123 request->n_channels = n_channels; 125 request->n_channels = n_channels;
diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
index c6e4ca6a7d2e..ff574597a854 100644
--- a/net/wireless/sysfs.c
+++ b/net/wireless/sysfs.c
@@ -93,7 +93,8 @@ static int wiphy_suspend(struct device *dev, pm_message_t state)
93 93
94 if (rdev->ops->suspend) { 94 if (rdev->ops->suspend) {
95 rtnl_lock(); 95 rtnl_lock();
96 ret = rdev->ops->suspend(&rdev->wiphy, rdev->wowlan); 96 if (rdev->wiphy.registered)
97 ret = rdev->ops->suspend(&rdev->wiphy, rdev->wowlan);
97 rtnl_unlock(); 98 rtnl_unlock();
98 } 99 }
99 100
@@ -112,7 +113,8 @@ static int wiphy_resume(struct device *dev)
112 113
113 if (rdev->ops->resume) { 114 if (rdev->ops->resume) {
114 rtnl_lock(); 115 rtnl_lock();
115 ret = rdev->ops->resume(&rdev->wiphy); 116 if (rdev->wiphy.registered)
117 ret = rdev->ops->resume(&rdev->wiphy);
116 rtnl_unlock(); 118 rtnl_unlock();
117 } 119 }
118 120
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index a026b0ef2443..54a0dc2e2f8d 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -212,6 +212,11 @@ resume:
212 /* only the first xfrm gets the encap type */ 212 /* only the first xfrm gets the encap type */
213 encap_type = 0; 213 encap_type = 0;
214 214
215 if (async && x->repl->check(x, skb, seq)) {
216 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
217 goto drop_unlock;
218 }
219
215 x->repl->advance(x, seq); 220 x->repl->advance(x, seq);
216 221
217 x->curlft.bytes += skb->len; 222 x->curlft.bytes += skb->len;
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 9d761c95eca2..3dfc47134e51 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -2574,7 +2574,8 @@ sub process {
2574 } else { 2574 } else {
2575 $cast = $cast2; 2575 $cast = $cast2;
2576 } 2576 }
2577 WARN("$call() should probably be ${call}_t($cast, $arg1, $arg2)\n" . $herecurr); 2577 WARN("MINMAX",
2578 "$call() should probably be ${call}_t($cast, $arg1, $arg2)\n" . $herecurr);
2578 } 2579 }
2579 } 2580 }
2580 2581
diff --git a/scripts/get_maintainer.pl b/scripts/get_maintainer.pl
index eb2f1e64edf7..4594f3341051 100755
--- a/scripts/get_maintainer.pl
+++ b/scripts/get_maintainer.pl
@@ -1389,7 +1389,7 @@ sub vcs_exists {
1389 warn("$P: No supported VCS found. Add --nogit to options?\n"); 1389 warn("$P: No supported VCS found. Add --nogit to options?\n");
1390 warn("Using a git repository produces better results.\n"); 1390 warn("Using a git repository produces better results.\n");
1391 warn("Try Linus Torvalds' latest git repository using:\n"); 1391 warn("Try Linus Torvalds' latest git repository using:\n");
1392 warn("git clone git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git\n"); 1392 warn("git clone git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git\n");
1393 $printed_novcs = 1; 1393 $printed_novcs = 1;
1394 } 1394 }
1395 return 0; 1395 return 0;
diff --git a/sound/aoa/fabrics/layout.c b/sound/aoa/fabrics/layout.c
index 3fd1a7e24928..552b97afbca5 100644
--- a/sound/aoa/fabrics/layout.c
+++ b/sound/aoa/fabrics/layout.c
@@ -1073,10 +1073,10 @@ static int aoa_fabric_layout_probe(struct soundbus_dev *sdev)
1073 sdev->pcmid = -1; 1073 sdev->pcmid = -1;
1074 list_del(&ldev->list); 1074 list_del(&ldev->list);
1075 layouts_list_items--; 1075 layouts_list_items--;
1076 kfree(ldev);
1076 outnodev: 1077 outnodev:
1077 of_node_put(sound); 1078 of_node_put(sound);
1078 layout_device = NULL; 1079 layout_device = NULL;
1079 kfree(ldev);
1080 return -ENODEV; 1080 return -ENODEV;
1081} 1081}
1082 1082
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index 86d0caf91b35..62e90b862a0d 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -1761,6 +1761,10 @@ static int wait_for_avail(struct snd_pcm_substream *substream,
1761 snd_pcm_uframes_t avail = 0; 1761 snd_pcm_uframes_t avail = 0;
1762 long wait_time, tout; 1762 long wait_time, tout;
1763 1763
1764 init_waitqueue_entry(&wait, current);
1765 set_current_state(TASK_INTERRUPTIBLE);
1766 add_wait_queue(&runtime->tsleep, &wait);
1767
1764 if (runtime->no_period_wakeup) 1768 if (runtime->no_period_wakeup)
1765 wait_time = MAX_SCHEDULE_TIMEOUT; 1769 wait_time = MAX_SCHEDULE_TIMEOUT;
1766 else { 1770 else {
@@ -1771,16 +1775,32 @@ static int wait_for_avail(struct snd_pcm_substream *substream,
1771 } 1775 }
1772 wait_time = msecs_to_jiffies(wait_time * 1000); 1776 wait_time = msecs_to_jiffies(wait_time * 1000);
1773 } 1777 }
1774 init_waitqueue_entry(&wait, current); 1778
1775 add_wait_queue(&runtime->tsleep, &wait);
1776 for (;;) { 1779 for (;;) {
1777 if (signal_pending(current)) { 1780 if (signal_pending(current)) {
1778 err = -ERESTARTSYS; 1781 err = -ERESTARTSYS;
1779 break; 1782 break;
1780 } 1783 }
1784
1785 /*
1786 * We need to check if space became available already
1787 * (and thus the wakeup happened already) first to close
1788 * the race of space already having become available.
1789 * This check must happen after been added to the waitqueue
1790 * and having current state be INTERRUPTIBLE.
1791 */
1792 if (is_playback)
1793 avail = snd_pcm_playback_avail(runtime);
1794 else
1795 avail = snd_pcm_capture_avail(runtime);
1796 if (avail >= runtime->twake)
1797 break;
1781 snd_pcm_stream_unlock_irq(substream); 1798 snd_pcm_stream_unlock_irq(substream);
1782 tout = schedule_timeout_interruptible(wait_time); 1799
1800 tout = schedule_timeout(wait_time);
1801
1783 snd_pcm_stream_lock_irq(substream); 1802 snd_pcm_stream_lock_irq(substream);
1803 set_current_state(TASK_INTERRUPTIBLE);
1784 switch (runtime->status->state) { 1804 switch (runtime->status->state) {
1785 case SNDRV_PCM_STATE_SUSPENDED: 1805 case SNDRV_PCM_STATE_SUSPENDED:
1786 err = -ESTRPIPE; 1806 err = -ESTRPIPE;
@@ -1806,14 +1826,9 @@ static int wait_for_avail(struct snd_pcm_substream *substream,
1806 err = -EIO; 1826 err = -EIO;
1807 break; 1827 break;
1808 } 1828 }
1809 if (is_playback)
1810 avail = snd_pcm_playback_avail(runtime);
1811 else
1812 avail = snd_pcm_capture_avail(runtime);
1813 if (avail >= runtime->twake)
1814 break;
1815 } 1829 }
1816 _endloop: 1830 _endloop:
1831 set_current_state(TASK_RUNNING);
1817 remove_wait_queue(&runtime->tsleep, &wait); 1832 remove_wait_queue(&runtime->tsleep, &wait);
1818 *availp = avail; 1833 *availp = avail;
1819 return err; 1834 return err;
diff --git a/sound/pci/ac97/ac97_patch.c b/sound/pci/ac97/ac97_patch.c
index 200c9a1d48b7..a872d0a82976 100644
--- a/sound/pci/ac97/ac97_patch.c
+++ b/sound/pci/ac97/ac97_patch.c
@@ -1909,6 +1909,7 @@ static unsigned int ad1981_jacks_whitelist[] = {
1909 0x103c0944, /* HP nc6220 */ 1909 0x103c0944, /* HP nc6220 */
1910 0x103c0934, /* HP nc8220 */ 1910 0x103c0934, /* HP nc8220 */
1911 0x103c006d, /* HP nx9105 */ 1911 0x103c006d, /* HP nx9105 */
1912 0x103c300d, /* HP Compaq dc5100 SFF(PT003AW) */
1912 0x17340088, /* FSC Scenic-W */ 1913 0x17340088, /* FSC Scenic-W */
1913 0 /* end */ 1914 0 /* end */
1914}; 1915};
diff --git a/sound/pci/azt3328.c b/sound/pci/azt3328.c
index e4d76a270c9f..579fc0dce128 100644
--- a/sound/pci/azt3328.c
+++ b/sound/pci/azt3328.c
@@ -2625,16 +2625,19 @@ snd_azf3328_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
2625 int err; 2625 int err;
2626 2626
2627 snd_azf3328_dbgcallenter(); 2627 snd_azf3328_dbgcallenter();
2628 if (dev >= SNDRV_CARDS) 2628 if (dev >= SNDRV_CARDS) {
2629 return -ENODEV; 2629 err = -ENODEV;
2630 goto out;
2631 }
2630 if (!enable[dev]) { 2632 if (!enable[dev]) {
2631 dev++; 2633 dev++;
2632 return -ENOENT; 2634 err = -ENOENT;
2635 goto out;
2633 } 2636 }
2634 2637
2635 err = snd_card_create(index[dev], id[dev], THIS_MODULE, 0, &card); 2638 err = snd_card_create(index[dev], id[dev], THIS_MODULE, 0, &card);
2636 if (err < 0) 2639 if (err < 0)
2637 return err; 2640 goto out;
2638 2641
2639 strcpy(card->driver, "AZF3328"); 2642 strcpy(card->driver, "AZF3328");
2640 strcpy(card->shortname, "Aztech AZF3328 (PCI168)"); 2643 strcpy(card->shortname, "Aztech AZF3328 (PCI168)");
diff --git a/sound/pci/hda/alc268_quirks.c b/sound/pci/hda/alc268_quirks.c
index be58bf2f3aec..2e5876ce71fe 100644
--- a/sound/pci/hda/alc268_quirks.c
+++ b/sound/pci/hda/alc268_quirks.c
@@ -476,8 +476,8 @@ static const struct snd_pci_quirk alc268_ssid_cfg_tbl[] = {
476 476
477static const struct alc_config_preset alc268_presets[] = { 477static const struct alc_config_preset alc268_presets[] = {
478 [ALC267_QUANTA_IL1] = { 478 [ALC267_QUANTA_IL1] = {
479 .mixers = { alc267_quanta_il1_mixer, alc268_beep_mixer, 479 .mixers = { alc267_quanta_il1_mixer, alc268_beep_mixer },
480 alc268_capture_nosrc_mixer }, 480 .cap_mixer = alc268_capture_nosrc_mixer,
481 .init_verbs = { alc268_base_init_verbs, alc268_eapd_verbs, 481 .init_verbs = { alc268_base_init_verbs, alc268_eapd_verbs,
482 alc267_quanta_il1_verbs }, 482 alc267_quanta_il1_verbs },
483 .num_dacs = ARRAY_SIZE(alc268_dac_nids), 483 .num_dacs = ARRAY_SIZE(alc268_dac_nids),
@@ -492,8 +492,8 @@ static const struct alc_config_preset alc268_presets[] = {
492 .init_hook = alc_inithook, 492 .init_hook = alc_inithook,
493 }, 493 },
494 [ALC268_3ST] = { 494 [ALC268_3ST] = {
495 .mixers = { alc268_base_mixer, alc268_capture_alt_mixer, 495 .mixers = { alc268_base_mixer, alc268_beep_mixer },
496 alc268_beep_mixer }, 496 .cap_mixer = alc268_capture_alt_mixer,
497 .init_verbs = { alc268_base_init_verbs }, 497 .init_verbs = { alc268_base_init_verbs },
498 .num_dacs = ARRAY_SIZE(alc268_dac_nids), 498 .num_dacs = ARRAY_SIZE(alc268_dac_nids),
499 .dac_nids = alc268_dac_nids, 499 .dac_nids = alc268_dac_nids,
@@ -507,8 +507,8 @@ static const struct alc_config_preset alc268_presets[] = {
507 .input_mux = &alc268_capture_source, 507 .input_mux = &alc268_capture_source,
508 }, 508 },
509 [ALC268_TOSHIBA] = { 509 [ALC268_TOSHIBA] = {
510 .mixers = { alc268_toshiba_mixer, alc268_capture_alt_mixer, 510 .mixers = { alc268_toshiba_mixer, alc268_beep_mixer },
511 alc268_beep_mixer }, 511 .cap_mixer = alc268_capture_alt_mixer,
512 .init_verbs = { alc268_base_init_verbs, alc268_eapd_verbs, 512 .init_verbs = { alc268_base_init_verbs, alc268_eapd_verbs,
513 alc268_toshiba_verbs }, 513 alc268_toshiba_verbs },
514 .num_dacs = ARRAY_SIZE(alc268_dac_nids), 514 .num_dacs = ARRAY_SIZE(alc268_dac_nids),
@@ -525,8 +525,8 @@ static const struct alc_config_preset alc268_presets[] = {
525 .init_hook = alc_inithook, 525 .init_hook = alc_inithook,
526 }, 526 },
527 [ALC268_ACER] = { 527 [ALC268_ACER] = {
528 .mixers = { alc268_acer_mixer, alc268_capture_alt_mixer, 528 .mixers = { alc268_acer_mixer, alc268_beep_mixer },
529 alc268_beep_mixer }, 529 .cap_mixer = alc268_capture_alt_mixer,
530 .init_verbs = { alc268_base_init_verbs, alc268_eapd_verbs, 530 .init_verbs = { alc268_base_init_verbs, alc268_eapd_verbs,
531 alc268_acer_verbs }, 531 alc268_acer_verbs },
532 .num_dacs = ARRAY_SIZE(alc268_dac_nids), 532 .num_dacs = ARRAY_SIZE(alc268_dac_nids),
@@ -543,8 +543,8 @@ static const struct alc_config_preset alc268_presets[] = {
543 .init_hook = alc_inithook, 543 .init_hook = alc_inithook,
544 }, 544 },
545 [ALC268_ACER_DMIC] = { 545 [ALC268_ACER_DMIC] = {
546 .mixers = { alc268_acer_dmic_mixer, alc268_capture_alt_mixer, 546 .mixers = { alc268_acer_dmic_mixer, alc268_beep_mixer },
547 alc268_beep_mixer }, 547 .cap_mixer = alc268_capture_alt_mixer,
548 .init_verbs = { alc268_base_init_verbs, alc268_eapd_verbs, 548 .init_verbs = { alc268_base_init_verbs, alc268_eapd_verbs,
549 alc268_acer_verbs }, 549 alc268_acer_verbs },
550 .num_dacs = ARRAY_SIZE(alc268_dac_nids), 550 .num_dacs = ARRAY_SIZE(alc268_dac_nids),
@@ -561,9 +561,8 @@ static const struct alc_config_preset alc268_presets[] = {
561 .init_hook = alc_inithook, 561 .init_hook = alc_inithook,
562 }, 562 },
563 [ALC268_ACER_ASPIRE_ONE] = { 563 [ALC268_ACER_ASPIRE_ONE] = {
564 .mixers = { alc268_acer_aspire_one_mixer, 564 .mixers = { alc268_acer_aspire_one_mixer, alc268_beep_mixer},
565 alc268_beep_mixer, 565 .cap_mixer = alc268_capture_nosrc_mixer,
566 alc268_capture_nosrc_mixer },
567 .init_verbs = { alc268_base_init_verbs, alc268_eapd_verbs, 566 .init_verbs = { alc268_base_init_verbs, alc268_eapd_verbs,
568 alc268_acer_aspire_one_verbs }, 567 alc268_acer_aspire_one_verbs },
569 .num_dacs = ARRAY_SIZE(alc268_dac_nids), 568 .num_dacs = ARRAY_SIZE(alc268_dac_nids),
@@ -579,8 +578,8 @@ static const struct alc_config_preset alc268_presets[] = {
579 .init_hook = alc_inithook, 578 .init_hook = alc_inithook,
580 }, 579 },
581 [ALC268_DELL] = { 580 [ALC268_DELL] = {
582 .mixers = { alc268_dell_mixer, alc268_beep_mixer, 581 .mixers = { alc268_dell_mixer, alc268_beep_mixer},
583 alc268_capture_nosrc_mixer }, 582 .cap_mixer = alc268_capture_nosrc_mixer,
584 .init_verbs = { alc268_base_init_verbs, alc268_eapd_verbs, 583 .init_verbs = { alc268_base_init_verbs, alc268_eapd_verbs,
585 alc268_dell_verbs }, 584 alc268_dell_verbs },
586 .num_dacs = ARRAY_SIZE(alc268_dac_nids), 585 .num_dacs = ARRAY_SIZE(alc268_dac_nids),
@@ -596,8 +595,8 @@ static const struct alc_config_preset alc268_presets[] = {
596 .init_hook = alc_inithook, 595 .init_hook = alc_inithook,
597 }, 596 },
598 [ALC268_ZEPTO] = { 597 [ALC268_ZEPTO] = {
599 .mixers = { alc268_base_mixer, alc268_capture_alt_mixer, 598 .mixers = { alc268_base_mixer, alc268_beep_mixer },
600 alc268_beep_mixer }, 599 .cap_mixer = alc268_capture_alt_mixer,
601 .init_verbs = { alc268_base_init_verbs, alc268_eapd_verbs, 600 .init_verbs = { alc268_base_init_verbs, alc268_eapd_verbs,
602 alc268_toshiba_verbs }, 601 alc268_toshiba_verbs },
603 .num_dacs = ARRAY_SIZE(alc268_dac_nids), 602 .num_dacs = ARRAY_SIZE(alc268_dac_nids),
@@ -616,7 +615,8 @@ static const struct alc_config_preset alc268_presets[] = {
616 }, 615 },
617#ifdef CONFIG_SND_DEBUG 616#ifdef CONFIG_SND_DEBUG
618 [ALC268_TEST] = { 617 [ALC268_TEST] = {
619 .mixers = { alc268_test_mixer, alc268_capture_mixer }, 618 .mixers = { alc268_test_mixer },
619 .cap_mixer = alc268_capture_mixer,
620 .init_verbs = { alc268_base_init_verbs, alc268_eapd_verbs, 620 .init_verbs = { alc268_base_init_verbs, alc268_eapd_verbs,
621 alc268_volume_init_verbs, 621 alc268_volume_init_verbs,
622 alc268_beep_init_verbs }, 622 alc268_beep_init_verbs },
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 3e7850c238c3..f3aefef37216 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -579,9 +579,13 @@ int snd_hda_get_conn_index(struct hda_codec *codec, hda_nid_t mux,
579 return -1; 579 return -1;
580 } 580 }
581 recursive++; 581 recursive++;
582 for (i = 0; i < nums; i++) 582 for (i = 0; i < nums; i++) {
583 unsigned int type = get_wcaps_type(get_wcaps(codec, conn[i]));
584 if (type == AC_WID_PIN || type == AC_WID_AUD_OUT)
585 continue;
583 if (snd_hda_get_conn_index(codec, conn[i], nid, recursive) >= 0) 586 if (snd_hda_get_conn_index(codec, conn[i], nid, recursive) >= 0)
584 return i; 587 return i;
588 }
585 return -1; 589 return -1;
586} 590}
587EXPORT_SYMBOL_HDA(snd_hda_get_conn_index); 591EXPORT_SYMBOL_HDA(snd_hda_get_conn_index);
diff --git a/sound/pci/hda/hda_eld.c b/sound/pci/hda/hda_eld.c
index 28ce17d09c33..c34f730f4815 100644
--- a/sound/pci/hda/hda_eld.c
+++ b/sound/pci/hda/hda_eld.c
@@ -144,25 +144,17 @@ static int cea_sampling_frequencies[8] = {
144 SNDRV_PCM_RATE_192000, /* 7: 192000Hz */ 144 SNDRV_PCM_RATE_192000, /* 7: 192000Hz */
145}; 145};
146 146
147static unsigned char hdmi_get_eld_byte(struct hda_codec *codec, hda_nid_t nid, 147static unsigned int hdmi_get_eld_data(struct hda_codec *codec, hda_nid_t nid,
148 int byte_index) 148 int byte_index)
149{ 149{
150 unsigned int val; 150 unsigned int val;
151 151
152 val = snd_hda_codec_read(codec, nid, 0, 152 val = snd_hda_codec_read(codec, nid, 0,
153 AC_VERB_GET_HDMI_ELDD, byte_index); 153 AC_VERB_GET_HDMI_ELDD, byte_index);
154
155#ifdef BE_PARANOID 154#ifdef BE_PARANOID
156 printk(KERN_INFO "HDMI: ELD data byte %d: 0x%x\n", byte_index, val); 155 printk(KERN_INFO "HDMI: ELD data byte %d: 0x%x\n", byte_index, val);
157#endif 156#endif
158 157 return val;
159 if ((val & AC_ELDD_ELD_VALID) == 0) {
160 snd_printd(KERN_INFO "HDMI: invalid ELD data byte %d\n",
161 byte_index);
162 val = 0;
163 }
164
165 return val & AC_ELDD_ELD_DATA;
166} 158}
167 159
168#define GRAB_BITS(buf, byte, lowbit, bits) \ 160#define GRAB_BITS(buf, byte, lowbit, bits) \
@@ -344,11 +336,26 @@ int snd_hdmi_get_eld(struct hdmi_eld *eld,
344 if (!buf) 336 if (!buf)
345 return -ENOMEM; 337 return -ENOMEM;
346 338
347 for (i = 0; i < size; i++) 339 for (i = 0; i < size; i++) {
348 buf[i] = hdmi_get_eld_byte(codec, nid, i); 340 unsigned int val = hdmi_get_eld_data(codec, nid, i);
341 if (!(val & AC_ELDD_ELD_VALID)) {
342 if (!i) {
343 snd_printd(KERN_INFO
344 "HDMI: invalid ELD data\n");
345 ret = -EINVAL;
346 goto error;
347 }
348 snd_printd(KERN_INFO
349 "HDMI: invalid ELD data byte %d\n", i);
350 val = 0;
351 } else
352 val &= AC_ELDD_ELD_DATA;
353 buf[i] = val;
354 }
349 355
350 ret = hdmi_update_eld(eld, buf, size); 356 ret = hdmi_update_eld(eld, buf, size);
351 357
358error:
352 kfree(buf); 359 kfree(buf);
353 return ret; 360 return ret;
354} 361}
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
index 47d6ffc9b5b5..c45f3e69bcf0 100644
--- a/sound/pci/hda/patch_cirrus.c
+++ b/sound/pci/hda/patch_cirrus.c
@@ -375,7 +375,7 @@ static int is_ext_mic(struct hda_codec *codec, unsigned int idx)
375static hda_nid_t get_adc(struct hda_codec *codec, hda_nid_t pin, 375static hda_nid_t get_adc(struct hda_codec *codec, hda_nid_t pin,
376 unsigned int *idxp) 376 unsigned int *idxp)
377{ 377{
378 int i; 378 int i, idx;
379 hda_nid_t nid; 379 hda_nid_t nid;
380 380
381 nid = codec->start_nid; 381 nid = codec->start_nid;
@@ -384,9 +384,11 @@ static hda_nid_t get_adc(struct hda_codec *codec, hda_nid_t pin,
384 type = get_wcaps_type(get_wcaps(codec, nid)); 384 type = get_wcaps_type(get_wcaps(codec, nid));
385 if (type != AC_WID_AUD_IN) 385 if (type != AC_WID_AUD_IN)
386 continue; 386 continue;
387 *idxp = snd_hda_get_conn_index(codec, nid, pin, false); 387 idx = snd_hda_get_conn_index(codec, nid, pin, false);
388 if (*idxp >= 0) 388 if (idx >= 0) {
389 *idxp = idx;
389 return nid; 390 return nid;
391 }
390 } 392 }
391 return 0; 393 return 0;
392} 394}
@@ -533,7 +535,7 @@ static int add_volume(struct hda_codec *codec, const char *name,
533 int index, unsigned int pval, int dir, 535 int index, unsigned int pval, int dir,
534 struct snd_kcontrol **kctlp) 536 struct snd_kcontrol **kctlp)
535{ 537{
536 char tmp[32]; 538 char tmp[44];
537 struct snd_kcontrol_new knew = 539 struct snd_kcontrol_new knew =
538 HDA_CODEC_VOLUME_IDX(tmp, index, 0, 0, HDA_OUTPUT); 540 HDA_CODEC_VOLUME_IDX(tmp, index, 0, 0, HDA_OUTPUT);
539 knew.private_value = pval; 541 knew.private_value = pval;
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 502fc9499453..7696d05b9356 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -3348,6 +3348,8 @@ static hda_nid_t get_unassigned_dac(struct hda_codec *codec, hda_nid_t pin,
3348 3348
3349#define MAX_AUTO_DACS 5 3349#define MAX_AUTO_DACS 5
3350 3350
3351#define DAC_SLAVE_FLAG 0x8000 /* filled dac is a slave */
3352
3351/* fill analog DAC list from the widget tree */ 3353/* fill analog DAC list from the widget tree */
3352static int fill_cx_auto_dacs(struct hda_codec *codec, hda_nid_t *dacs) 3354static int fill_cx_auto_dacs(struct hda_codec *codec, hda_nid_t *dacs)
3353{ 3355{
@@ -3370,16 +3372,26 @@ static int fill_cx_auto_dacs(struct hda_codec *codec, hda_nid_t *dacs)
3370/* fill pin_dac_pair list from the pin and dac list */ 3372/* fill pin_dac_pair list from the pin and dac list */
3371static int fill_dacs_for_pins(struct hda_codec *codec, hda_nid_t *pins, 3373static int fill_dacs_for_pins(struct hda_codec *codec, hda_nid_t *pins,
3372 int num_pins, hda_nid_t *dacs, int *rest, 3374 int num_pins, hda_nid_t *dacs, int *rest,
3373 struct pin_dac_pair *filled, int type) 3375 struct pin_dac_pair *filled, int nums,
3376 int type)
3374{ 3377{
3375 int i, nums; 3378 int i, start = nums;
3376 3379
3377 nums = 0; 3380 for (i = 0; i < num_pins; i++, nums++) {
3378 for (i = 0; i < num_pins; i++) {
3379 filled[nums].pin = pins[i]; 3381 filled[nums].pin = pins[i];
3380 filled[nums].type = type; 3382 filled[nums].type = type;
3381 filled[nums].dac = get_unassigned_dac(codec, pins[i], dacs, rest); 3383 filled[nums].dac = get_unassigned_dac(codec, pins[i], dacs, rest);
3382 nums++; 3384 if (filled[nums].dac)
3385 continue;
3386 if (filled[start].dac && get_connection_index(codec, pins[i], filled[start].dac) >= 0) {
3387 filled[nums].dac = filled[start].dac | DAC_SLAVE_FLAG;
3388 continue;
3389 }
3390 if (filled[0].dac && get_connection_index(codec, pins[i], filled[0].dac) >= 0) {
3391 filled[nums].dac = filled[0].dac | DAC_SLAVE_FLAG;
3392 continue;
3393 }
3394 snd_printdd("Failed to find a DAC for pin 0x%x", pins[i]);
3383 } 3395 }
3384 return nums; 3396 return nums;
3385} 3397}
@@ -3395,19 +3407,19 @@ static void cx_auto_parse_output(struct hda_codec *codec)
3395 rest = fill_cx_auto_dacs(codec, dacs); 3407 rest = fill_cx_auto_dacs(codec, dacs);
3396 /* parse all analog output pins */ 3408 /* parse all analog output pins */
3397 nums = fill_dacs_for_pins(codec, cfg->line_out_pins, cfg->line_outs, 3409 nums = fill_dacs_for_pins(codec, cfg->line_out_pins, cfg->line_outs,
3398 dacs, &rest, spec->dac_info, 3410 dacs, &rest, spec->dac_info, 0,
3399 AUTO_PIN_LINE_OUT); 3411 AUTO_PIN_LINE_OUT);
3400 nums += fill_dacs_for_pins(codec, cfg->hp_pins, cfg->hp_outs, 3412 nums = fill_dacs_for_pins(codec, cfg->hp_pins, cfg->hp_outs,
3401 dacs, &rest, spec->dac_info + nums, 3413 dacs, &rest, spec->dac_info, nums,
3402 AUTO_PIN_HP_OUT); 3414 AUTO_PIN_HP_OUT);
3403 nums += fill_dacs_for_pins(codec, cfg->speaker_pins, cfg->speaker_outs, 3415 nums = fill_dacs_for_pins(codec, cfg->speaker_pins, cfg->speaker_outs,
3404 dacs, &rest, spec->dac_info + nums, 3416 dacs, &rest, spec->dac_info, nums,
3405 AUTO_PIN_SPEAKER_OUT); 3417 AUTO_PIN_SPEAKER_OUT);
3406 spec->dac_info_filled = nums; 3418 spec->dac_info_filled = nums;
3407 /* fill multiout struct */ 3419 /* fill multiout struct */
3408 for (i = 0; i < nums; i++) { 3420 for (i = 0; i < nums; i++) {
3409 hda_nid_t dac = spec->dac_info[i].dac; 3421 hda_nid_t dac = spec->dac_info[i].dac;
3410 if (!dac) 3422 if (!dac || (dac & DAC_SLAVE_FLAG))
3411 continue; 3423 continue;
3412 switch (spec->dac_info[i].type) { 3424 switch (spec->dac_info[i].type) {
3413 case AUTO_PIN_LINE_OUT: 3425 case AUTO_PIN_LINE_OUT:
@@ -3862,7 +3874,7 @@ static void cx_auto_parse_input(struct hda_codec *codec)
3862 } 3874 }
3863 if (imux->num_items >= 2 && cfg->num_inputs == imux->num_items) 3875 if (imux->num_items >= 2 && cfg->num_inputs == imux->num_items)
3864 cx_auto_check_auto_mic(codec); 3876 cx_auto_check_auto_mic(codec);
3865 if (imux->num_items > 1 && !spec->auto_mic) { 3877 if (imux->num_items > 1) {
3866 for (i = 1; i < imux->num_items; i++) { 3878 for (i = 1; i < imux->num_items; i++) {
3867 if (spec->imux_info[i].adc != spec->imux_info[0].adc) { 3879 if (spec->imux_info[i].adc != spec->imux_info[0].adc) {
3868 spec->adc_switching = 1; 3880 spec->adc_switching = 1;
@@ -4035,6 +4047,8 @@ static void cx_auto_init_output(struct hda_codec *codec)
4035 nid = spec->dac_info[i].dac; 4047 nid = spec->dac_info[i].dac;
4036 if (!nid) 4048 if (!nid)
4037 nid = spec->multiout.dac_nids[0]; 4049 nid = spec->multiout.dac_nids[0];
4050 else if (nid & DAC_SLAVE_FLAG)
4051 nid &= ~DAC_SLAVE_FLAG;
4038 select_connection(codec, spec->dac_info[i].pin, nid); 4052 select_connection(codec, spec->dac_info[i].pin, nid);
4039 } 4053 }
4040 if (spec->auto_mute) { 4054 if (spec->auto_mute) {
@@ -4167,9 +4181,11 @@ static int try_add_pb_volume(struct hda_codec *codec, hda_nid_t dac,
4167 hda_nid_t pin, const char *name, int idx) 4181 hda_nid_t pin, const char *name, int idx)
4168{ 4182{
4169 unsigned int caps; 4183 unsigned int caps;
4170 caps = query_amp_caps(codec, dac, HDA_OUTPUT); 4184 if (dac && !(dac & DAC_SLAVE_FLAG)) {
4171 if (caps & AC_AMPCAP_NUM_STEPS) 4185 caps = query_amp_caps(codec, dac, HDA_OUTPUT);
4172 return cx_auto_add_pb_volume(codec, dac, name, idx); 4186 if (caps & AC_AMPCAP_NUM_STEPS)
4187 return cx_auto_add_pb_volume(codec, dac, name, idx);
4188 }
4173 caps = query_amp_caps(codec, pin, HDA_OUTPUT); 4189 caps = query_amp_caps(codec, pin, HDA_OUTPUT);
4174 if (caps & AC_AMPCAP_NUM_STEPS) 4190 if (caps & AC_AMPCAP_NUM_STEPS)
4175 return cx_auto_add_pb_volume(codec, pin, name, idx); 4191 return cx_auto_add_pb_volume(codec, pin, name, idx);
@@ -4191,8 +4207,7 @@ static int cx_auto_build_output_controls(struct hda_codec *codec)
4191 for (i = 0; i < spec->dac_info_filled; i++) { 4207 for (i = 0; i < spec->dac_info_filled; i++) {
4192 const char *label; 4208 const char *label;
4193 int idx, type; 4209 int idx, type;
4194 if (!spec->dac_info[i].dac) 4210 hda_nid_t dac = spec->dac_info[i].dac;
4195 continue;
4196 type = spec->dac_info[i].type; 4211 type = spec->dac_info[i].type;
4197 if (type == AUTO_PIN_LINE_OUT) 4212 if (type == AUTO_PIN_LINE_OUT)
4198 type = spec->autocfg.line_out_type; 4213 type = spec->autocfg.line_out_type;
@@ -4211,7 +4226,7 @@ static int cx_auto_build_output_controls(struct hda_codec *codec)
4211 idx = num_spk++; 4226 idx = num_spk++;
4212 break; 4227 break;
4213 } 4228 }
4214 err = try_add_pb_volume(codec, spec->dac_info[i].dac, 4229 err = try_add_pb_volume(codec, dac,
4215 spec->dac_info[i].pin, 4230 spec->dac_info[i].pin,
4216 label, idx); 4231 label, idx);
4217 if (err < 0) 4232 if (err < 0)
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 9a1aa09f47fe..0503c999e7d3 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -168,7 +168,7 @@ struct alc_spec {
168 unsigned int auto_mic_valid_imux:1; /* valid imux for auto-mic */ 168 unsigned int auto_mic_valid_imux:1; /* valid imux for auto-mic */
169 unsigned int automute:1; /* HP automute enabled */ 169 unsigned int automute:1; /* HP automute enabled */
170 unsigned int detect_line:1; /* Line-out detection enabled */ 170 unsigned int detect_line:1; /* Line-out detection enabled */
171 unsigned int automute_lines:1; /* automute line-out as well */ 171 unsigned int automute_lines:1; /* automute line-out as well; NOP when automute_hp_lo isn't set */
172 unsigned int automute_hp_lo:1; /* both HP and LO available */ 172 unsigned int automute_hp_lo:1; /* both HP and LO available */
173 173
174 /* other flags */ 174 /* other flags */
@@ -551,7 +551,7 @@ static void update_speakers(struct hda_codec *codec)
551 if (spec->autocfg.line_out_pins[0] == spec->autocfg.hp_pins[0] || 551 if (spec->autocfg.line_out_pins[0] == spec->autocfg.hp_pins[0] ||
552 spec->autocfg.line_out_pins[0] == spec->autocfg.speaker_pins[0]) 552 spec->autocfg.line_out_pins[0] == spec->autocfg.speaker_pins[0])
553 return; 553 return;
554 if (!spec->automute_lines || !spec->automute) 554 if (!spec->automute || (spec->automute_hp_lo && !spec->automute_lines))
555 on = 0; 555 on = 0;
556 else 556 else
557 on = spec->jack_present; 557 on = spec->jack_present;
@@ -565,11 +565,11 @@ static void alc_hp_automute(struct hda_codec *codec)
565{ 565{
566 struct alc_spec *spec = codec->spec; 566 struct alc_spec *spec = codec->spec;
567 567
568 if (!spec->automute)
569 return;
570 spec->jack_present = 568 spec->jack_present =
571 detect_jacks(codec, ARRAY_SIZE(spec->autocfg.hp_pins), 569 detect_jacks(codec, ARRAY_SIZE(spec->autocfg.hp_pins),
572 spec->autocfg.hp_pins); 570 spec->autocfg.hp_pins);
571 if (!spec->automute)
572 return;
573 update_speakers(codec); 573 update_speakers(codec);
574} 574}
575 575
@@ -578,11 +578,11 @@ static void alc_line_automute(struct hda_codec *codec)
578{ 578{
579 struct alc_spec *spec = codec->spec; 579 struct alc_spec *spec = codec->spec;
580 580
581 if (!spec->automute || !spec->detect_line)
582 return;
583 spec->line_jack_present = 581 spec->line_jack_present =
584 detect_jacks(codec, ARRAY_SIZE(spec->autocfg.line_out_pins), 582 detect_jacks(codec, ARRAY_SIZE(spec->autocfg.line_out_pins),
585 spec->autocfg.line_out_pins); 583 spec->autocfg.line_out_pins);
584 if (!spec->automute || !spec->detect_line)
585 return;
586 update_speakers(codec); 586 update_speakers(codec);
587} 587}
588 588
@@ -803,7 +803,7 @@ static int alc_automute_mode_get(struct snd_kcontrol *kcontrol,
803 unsigned int val; 803 unsigned int val;
804 if (!spec->automute) 804 if (!spec->automute)
805 val = 0; 805 val = 0;
806 else if (!spec->automute_lines) 806 else if (!spec->automute_hp_lo || !spec->automute_lines)
807 val = 1; 807 val = 1;
808 else 808 else
809 val = 2; 809 val = 2;
@@ -824,7 +824,8 @@ static int alc_automute_mode_put(struct snd_kcontrol *kcontrol,
824 spec->automute = 0; 824 spec->automute = 0;
825 break; 825 break;
826 case 1: 826 case 1:
827 if (spec->automute && !spec->automute_lines) 827 if (spec->automute &&
828 (!spec->automute_hp_lo || !spec->automute_lines))
828 return 0; 829 return 0;
829 spec->automute = 1; 830 spec->automute = 1;
830 spec->automute_lines = 0; 831 spec->automute_lines = 0;
@@ -1784,6 +1785,7 @@ static const char * const alc_slave_vols[] = {
1784 "Speaker Playback Volume", 1785 "Speaker Playback Volume",
1785 "Mono Playback Volume", 1786 "Mono Playback Volume",
1786 "Line-Out Playback Volume", 1787 "Line-Out Playback Volume",
1788 "PCM Playback Volume",
1787 NULL, 1789 NULL,
1788}; 1790};
1789 1791
@@ -1798,6 +1800,7 @@ static const char * const alc_slave_sws[] = {
1798 "Mono Playback Switch", 1800 "Mono Playback Switch",
1799 "IEC958 Playback Switch", 1801 "IEC958 Playback Switch",
1800 "Line-Out Playback Switch", 1802 "Line-Out Playback Switch",
1803 "PCM Playback Switch",
1801 NULL, 1804 NULL,
1802}; 1805};
1803 1806
@@ -3081,16 +3084,22 @@ static void alc_auto_init_multi_out(struct hda_codec *codec)
3081static void alc_auto_init_extra_out(struct hda_codec *codec) 3084static void alc_auto_init_extra_out(struct hda_codec *codec)
3082{ 3085{
3083 struct alc_spec *spec = codec->spec; 3086 struct alc_spec *spec = codec->spec;
3084 hda_nid_t pin; 3087 hda_nid_t pin, dac;
3085 3088
3086 pin = spec->autocfg.hp_pins[0]; 3089 pin = spec->autocfg.hp_pins[0];
3087 if (pin) 3090 if (pin) {
3088 alc_auto_set_output_and_unmute(codec, pin, PIN_HP, 3091 dac = spec->multiout.hp_nid;
3089 spec->multiout.hp_nid); 3092 if (!dac)
3093 dac = spec->multiout.dac_nids[0];
3094 alc_auto_set_output_and_unmute(codec, pin, PIN_HP, dac);
3095 }
3090 pin = spec->autocfg.speaker_pins[0]; 3096 pin = spec->autocfg.speaker_pins[0];
3091 if (pin) 3097 if (pin) {
3092 alc_auto_set_output_and_unmute(codec, pin, PIN_OUT, 3098 dac = spec->multiout.extra_out_nid[0];
3093 spec->multiout.extra_out_nid[0]); 3099 if (!dac)
3100 dac = spec->multiout.dac_nids[0];
3101 alc_auto_set_output_and_unmute(codec, pin, PIN_OUT, dac);
3102 }
3094} 3103}
3095 3104
3096/* 3105/*
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index aa376b59c006..1b7c11432aa7 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -673,6 +673,7 @@ static int stac92xx_smux_enum_put(struct snd_kcontrol *kcontrol,
673 return 0; 673 return 0;
674} 674}
675 675
676#ifdef CONFIG_SND_HDA_POWER_SAVE
676static int stac_vrefout_set(struct hda_codec *codec, 677static int stac_vrefout_set(struct hda_codec *codec,
677 hda_nid_t nid, unsigned int new_vref) 678 hda_nid_t nid, unsigned int new_vref)
678{ 679{
@@ -696,6 +697,7 @@ static int stac_vrefout_set(struct hda_codec *codec,
696 697
697 return 1; 698 return 1;
698} 699}
700#endif
699 701
700static unsigned int stac92xx_vref_set(struct hda_codec *codec, 702static unsigned int stac92xx_vref_set(struct hda_codec *codec,
701 hda_nid_t nid, unsigned int new_vref) 703 hda_nid_t nid, unsigned int new_vref)
@@ -6571,6 +6573,7 @@ static const struct hda_codec_preset snd_hda_preset_sigmatel[] = {
6571 { .id = 0x111d76cc, .name = "92HD89F3", .patch = patch_stac92hd73xx }, 6573 { .id = 0x111d76cc, .name = "92HD89F3", .patch = patch_stac92hd73xx },
6572 { .id = 0x111d76cd, .name = "92HD89F2", .patch = patch_stac92hd73xx }, 6574 { .id = 0x111d76cd, .name = "92HD89F2", .patch = patch_stac92hd73xx },
6573 { .id = 0x111d76ce, .name = "92HD89F1", .patch = patch_stac92hd73xx }, 6575 { .id = 0x111d76ce, .name = "92HD89F1", .patch = patch_stac92hd73xx },
6576 { .id = 0x111d76df, .name = "92HD93BXX", .patch = patch_stac92hd83xxx},
6574 { .id = 0x111d76e0, .name = "92HD91BXX", .patch = patch_stac92hd83xxx}, 6577 { .id = 0x111d76e0, .name = "92HD91BXX", .patch = patch_stac92hd83xxx},
6575 { .id = 0x111d76e3, .name = "92HD98BXX", .patch = patch_stac92hd83xxx}, 6578 { .id = 0x111d76e3, .name = "92HD98BXX", .patch = patch_stac92hd83xxx},
6576 { .id = 0x111d76e5, .name = "92HD99BXX", .patch = patch_stac92hd83xxx}, 6579 { .id = 0x111d76e5, .name = "92HD99BXX", .patch = patch_stac92hd83xxx},
diff --git a/sound/soc/blackfin/bf5xx-ad193x.c b/sound/soc/blackfin/bf5xx-ad193x.c
index d6651c033cb7..5956584ea3a4 100644
--- a/sound/soc/blackfin/bf5xx-ad193x.c
+++ b/sound/soc/blackfin/bf5xx-ad193x.c
@@ -56,7 +56,7 @@ static int bf5xx_ad193x_hw_params(struct snd_pcm_substream *substream,
56 56
57 switch (params_rate(params)) { 57 switch (params_rate(params)) {
58 case 48000: 58 case 48000:
59 clk = 12288000; 59 clk = 24576000;
60 break; 60 break;
61 } 61 }
62 62
@@ -103,7 +103,7 @@ static struct snd_soc_dai_link bf5xx_ad193x_dai[] = {
103 .cpu_dai_name = "bfin-tdm.0", 103 .cpu_dai_name = "bfin-tdm.0",
104 .codec_dai_name ="ad193x-hifi", 104 .codec_dai_name ="ad193x-hifi",
105 .platform_name = "bfin-tdm-pcm-audio", 105 .platform_name = "bfin-tdm-pcm-audio",
106 .codec_name = "ad193x.5", 106 .codec_name = "spi0.5",
107 .ops = &bf5xx_ad193x_ops, 107 .ops = &bf5xx_ad193x_ops,
108 }, 108 },
109 { 109 {
@@ -112,7 +112,7 @@ static struct snd_soc_dai_link bf5xx_ad193x_dai[] = {
112 .cpu_dai_name = "bfin-tdm.1", 112 .cpu_dai_name = "bfin-tdm.1",
113 .codec_dai_name ="ad193x-hifi", 113 .codec_dai_name ="ad193x-hifi",
114 .platform_name = "bfin-tdm-pcm-audio", 114 .platform_name = "bfin-tdm-pcm-audio",
115 .codec_name = "ad193x.5", 115 .codec_name = "spi0.5",
116 .ops = &bf5xx_ad193x_ops, 116 .ops = &bf5xx_ad193x_ops,
117 }, 117 },
118}; 118};
diff --git a/sound/soc/codecs/ad193x.c b/sound/soc/codecs/ad193x.c
index 2374ca5ffe68..eedb6f5e5823 100644
--- a/sound/soc/codecs/ad193x.c
+++ b/sound/soc/codecs/ad193x.c
@@ -27,11 +27,6 @@ struct ad193x_priv {
27 int sysclk; 27 int sysclk;
28}; 28};
29 29
30/* ad193x register cache & default register settings */
31static const u8 ad193x_reg[AD193X_NUM_REGS] = {
32 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0,
33};
34
35/* 30/*
36 * AD193X volume/mute/de-emphasis etc. controls 31 * AD193X volume/mute/de-emphasis etc. controls
37 */ 32 */
@@ -307,7 +302,8 @@ static int ad193x_hw_params(struct snd_pcm_substream *substream,
307 snd_soc_write(codec, AD193X_PLL_CLK_CTRL0, reg); 302 snd_soc_write(codec, AD193X_PLL_CLK_CTRL0, reg);
308 303
309 reg = snd_soc_read(codec, AD193X_DAC_CTRL2); 304 reg = snd_soc_read(codec, AD193X_DAC_CTRL2);
310 reg = (reg & (~AD193X_DAC_WORD_LEN_MASK)) | word_len; 305 reg = (reg & (~AD193X_DAC_WORD_LEN_MASK))
306 | (word_len << AD193X_DAC_WORD_LEN_SHFT);
311 snd_soc_write(codec, AD193X_DAC_CTRL2, reg); 307 snd_soc_write(codec, AD193X_DAC_CTRL2, reg);
312 308
313 reg = snd_soc_read(codec, AD193X_ADC_CTRL1); 309 reg = snd_soc_read(codec, AD193X_ADC_CTRL1);
@@ -389,9 +385,6 @@ static int ad193x_probe(struct snd_soc_codec *codec)
389 385
390static struct snd_soc_codec_driver soc_codec_dev_ad193x = { 386static struct snd_soc_codec_driver soc_codec_dev_ad193x = {
391 .probe = ad193x_probe, 387 .probe = ad193x_probe,
392 .reg_cache_default = ad193x_reg,
393 .reg_cache_size = AD193X_NUM_REGS,
394 .reg_word_size = sizeof(u16),
395}; 388};
396 389
397#if defined(CONFIG_SPI_MASTER) 390#if defined(CONFIG_SPI_MASTER)
diff --git a/sound/soc/codecs/ad193x.h b/sound/soc/codecs/ad193x.h
index 9747b5497877..cccc2e8e5fbd 100644
--- a/sound/soc/codecs/ad193x.h
+++ b/sound/soc/codecs/ad193x.h
@@ -34,7 +34,8 @@
34#define AD193X_DAC_LEFT_HIGH (1 << 3) 34#define AD193X_DAC_LEFT_HIGH (1 << 3)
35#define AD193X_DAC_BCLK_INV (1 << 7) 35#define AD193X_DAC_BCLK_INV (1 << 7)
36#define AD193X_DAC_CTRL2 0x804 36#define AD193X_DAC_CTRL2 0x804
37#define AD193X_DAC_WORD_LEN_MASK 0xC 37#define AD193X_DAC_WORD_LEN_SHFT 3
38#define AD193X_DAC_WORD_LEN_MASK 0x18
38#define AD193X_DAC_MASTER_MUTE 1 39#define AD193X_DAC_MASTER_MUTE 1
39#define AD193X_DAC_CHNL_MUTE 0x805 40#define AD193X_DAC_CHNL_MUTE 0x805
40#define AD193X_DACL1_MUTE 0 41#define AD193X_DACL1_MUTE 0
@@ -63,7 +64,7 @@
63#define AD193X_ADC_CTRL1 0x80f 64#define AD193X_ADC_CTRL1 0x80f
64#define AD193X_ADC_SERFMT_MASK 0x60 65#define AD193X_ADC_SERFMT_MASK 0x60
65#define AD193X_ADC_SERFMT_STEREO (0 << 5) 66#define AD193X_ADC_SERFMT_STEREO (0 << 5)
66#define AD193X_ADC_SERFMT_TDM (1 << 2) 67#define AD193X_ADC_SERFMT_TDM (1 << 5)
67#define AD193X_ADC_SERFMT_AUX (2 << 5) 68#define AD193X_ADC_SERFMT_AUX (2 << 5)
68#define AD193X_ADC_WORD_LEN_MASK 0x3 69#define AD193X_ADC_WORD_LEN_MASK 0x3
69#define AD193X_ADC_CTRL2 0x810 70#define AD193X_ADC_CTRL2 0x810
diff --git a/sound/soc/codecs/sta32x.c b/sound/soc/codecs/sta32x.c
index 409d89d1f34c..fbd7eb9e61ce 100644
--- a/sound/soc/codecs/sta32x.c
+++ b/sound/soc/codecs/sta32x.c
@@ -857,6 +857,7 @@ static __devinit int sta32x_i2c_probe(struct i2c_client *i2c,
857 ret = snd_soc_register_codec(&i2c->dev, &sta32x_codec, &sta32x_dai, 1); 857 ret = snd_soc_register_codec(&i2c->dev, &sta32x_codec, &sta32x_dai, 1);
858 if (ret != 0) { 858 if (ret != 0) {
859 dev_err(&i2c->dev, "Failed to register codec (%d)\n", ret); 859 dev_err(&i2c->dev, "Failed to register codec (%d)\n", ret);
860 kfree(sta32x);
860 return ret; 861 return ret;
861 } 862 }
862 863
diff --git a/sound/soc/codecs/wm8750.c b/sound/soc/codecs/wm8750.c
index 38f38fddd190..d0003cc3bcd6 100644
--- a/sound/soc/codecs/wm8750.c
+++ b/sound/soc/codecs/wm8750.c
@@ -778,11 +778,19 @@ static int __devexit wm8750_spi_remove(struct spi_device *spi)
778 return 0; 778 return 0;
779} 779}
780 780
781static const struct spi_device_id wm8750_spi_ids[] = {
782 { "wm8750", 0 },
783 { "wm8987", 0 },
784 { },
785};
786MODULE_DEVICE_TABLE(spi, wm8750_spi_ids);
787
781static struct spi_driver wm8750_spi_driver = { 788static struct spi_driver wm8750_spi_driver = {
782 .driver = { 789 .driver = {
783 .name = "wm8750-codec", 790 .name = "wm8750-codec",
784 .owner = THIS_MODULE, 791 .owner = THIS_MODULE,
785 }, 792 },
793 .id_table = wm8750_spi_ids,
786 .probe = wm8750_spi_probe, 794 .probe = wm8750_spi_probe,
787 .remove = __devexit_p(wm8750_spi_remove), 795 .remove = __devexit_p(wm8750_spi_remove),
788}; 796};
diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c
index 43e3d760766f..4ad8ebd290e3 100644
--- a/sound/soc/codecs/wm8903.c
+++ b/sound/soc/codecs/wm8903.c
@@ -2046,8 +2046,13 @@ static int wm8903_probe(struct snd_soc_codec *codec)
2046/* power down chip */ 2046/* power down chip */
2047static int wm8903_remove(struct snd_soc_codec *codec) 2047static int wm8903_remove(struct snd_soc_codec *codec)
2048{ 2048{
2049 struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec);
2050
2049 wm8903_free_gpio(codec); 2051 wm8903_free_gpio(codec);
2050 wm8903_set_bias_level(codec, SND_SOC_BIAS_OFF); 2052 wm8903_set_bias_level(codec, SND_SOC_BIAS_OFF);
2053 if (wm8903->irq)
2054 free_irq(wm8903->irq, codec);
2055
2051 return 0; 2056 return 0;
2052} 2057}
2053 2058
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index 60d740ebeb5b..1725550c293e 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -2221,6 +2221,8 @@ static int sysclk_event(struct snd_soc_dapm_widget *w,
2221 switch (event) { 2221 switch (event) {
2222 case SND_SOC_DAPM_PRE_PMU: 2222 case SND_SOC_DAPM_PRE_PMU:
2223 if (fll) { 2223 if (fll) {
2224 try_wait_for_completion(&wm8962->fll_lock);
2225
2224 snd_soc_update_bits(codec, WM8962_FLL_CONTROL_1, 2226 snd_soc_update_bits(codec, WM8962_FLL_CONTROL_1,
2225 WM8962_FLL_ENA, WM8962_FLL_ENA); 2227 WM8962_FLL_ENA, WM8962_FLL_ENA);
2226 if (wm8962->irq) { 2228 if (wm8962->irq) {
@@ -2927,10 +2929,6 @@ static int wm8962_set_bias_level(struct snd_soc_codec *codec,
2927 WM8962_BIAS_ENA | 0x180); 2929 WM8962_BIAS_ENA | 0x180);
2928 2930
2929 msleep(5); 2931 msleep(5);
2930
2931 snd_soc_update_bits(codec, WM8962_CLOCKING2,
2932 WM8962_CLKREG_OVD,
2933 WM8962_CLKREG_OVD);
2934 } 2932 }
2935 2933
2936 /* VMID 2*250k */ 2934 /* VMID 2*250k */
@@ -3288,6 +3286,8 @@ static int wm8962_set_fll(struct snd_soc_codec *codec, int fll_id, int source,
3288 snd_soc_write(codec, WM8962_FLL_CONTROL_7, fll_div.lambda); 3286 snd_soc_write(codec, WM8962_FLL_CONTROL_7, fll_div.lambda);
3289 snd_soc_write(codec, WM8962_FLL_CONTROL_8, fll_div.n); 3287 snd_soc_write(codec, WM8962_FLL_CONTROL_8, fll_div.n);
3290 3288
3289 try_wait_for_completion(&wm8962->fll_lock);
3290
3291 snd_soc_update_bits(codec, WM8962_FLL_CONTROL_1, 3291 snd_soc_update_bits(codec, WM8962_FLL_CONTROL_1,
3292 WM8962_FLL_FRAC | WM8962_FLL_REFCLK_SRC_MASK | 3292 WM8962_FLL_FRAC | WM8962_FLL_REFCLK_SRC_MASK |
3293 WM8962_FLL_ENA, fll1); 3293 WM8962_FLL_ENA, fll1);
@@ -3868,6 +3868,10 @@ static int wm8962_probe(struct snd_soc_codec *codec)
3868 */ 3868 */
3869 snd_soc_update_bits(codec, WM8962_CLOCKING2, WM8962_SYSCLK_ENA, 0); 3869 snd_soc_update_bits(codec, WM8962_CLOCKING2, WM8962_SYSCLK_ENA, 0);
3870 3870
3871 /* Ensure we have soft control over all registers */
3872 snd_soc_update_bits(codec, WM8962_CLOCKING2,
3873 WM8962_CLKREG_OVD, WM8962_CLKREG_OVD);
3874
3871 regulator_bulk_disable(ARRAY_SIZE(wm8962->supplies), wm8962->supplies); 3875 regulator_bulk_disable(ARRAY_SIZE(wm8962->supplies), wm8962->supplies);
3872 3876
3873 if (pdata) { 3877 if (pdata) {
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index 09e680ae88b2..b393f9fac97a 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -2981,6 +2981,7 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec)
2981 wm8994->hubs.dcs_readback_mode = 1; 2981 wm8994->hubs.dcs_readback_mode = 1;
2982 break; 2982 break;
2983 } 2983 }
2984 break;
2984 2985
2985 case WM8958: 2986 case WM8958:
2986 wm8994->hubs.dcs_readback_mode = 1; 2987 wm8994->hubs.dcs_readback_mode = 1;
diff --git a/sound/soc/codecs/wm8996.c b/sound/soc/codecs/wm8996.c
index ab8e9d1aaff0..0cdb9d105671 100644
--- a/sound/soc/codecs/wm8996.c
+++ b/sound/soc/codecs/wm8996.c
@@ -420,7 +420,7 @@ static const char *sidetone_hpf_text[] = {
420}; 420};
421 421
422static const struct soc_enum sidetone_hpf = 422static const struct soc_enum sidetone_hpf =
423 SOC_ENUM_SINGLE(WM8996_SIDETONE, 7, 6, sidetone_hpf_text); 423 SOC_ENUM_SINGLE(WM8996_SIDETONE, 7, 7, sidetone_hpf_text);
424 424
425static const char *hpf_mode_text[] = { 425static const char *hpf_mode_text[] = {
426 "HiFi", "Custom", "Voice" 426 "HiFi", "Custom", "Voice"
@@ -988,15 +988,10 @@ SND_SOC_DAPM_MICBIAS("MICB1", WM8996_POWER_MANAGEMENT_1, 8, 0),
988SND_SOC_DAPM_PGA("IN1L PGA", WM8996_POWER_MANAGEMENT_2, 5, 0, NULL, 0), 988SND_SOC_DAPM_PGA("IN1L PGA", WM8996_POWER_MANAGEMENT_2, 5, 0, NULL, 0),
989SND_SOC_DAPM_PGA("IN1R PGA", WM8996_POWER_MANAGEMENT_2, 4, 0, NULL, 0), 989SND_SOC_DAPM_PGA("IN1R PGA", WM8996_POWER_MANAGEMENT_2, 4, 0, NULL, 0),
990 990
991SND_SOC_DAPM_MUX("IN1L Mux", SND_SOC_NOPM, 0, 0, &in1_mux), 991SND_SOC_DAPM_MUX("IN1L Mux", WM8996_POWER_MANAGEMENT_7, 2, 0, &in1_mux),
992SND_SOC_DAPM_MUX("IN1R Mux", SND_SOC_NOPM, 0, 0, &in1_mux), 992SND_SOC_DAPM_MUX("IN1R Mux", WM8996_POWER_MANAGEMENT_7, 3, 0, &in1_mux),
993SND_SOC_DAPM_MUX("IN2L Mux", SND_SOC_NOPM, 0, 0, &in2_mux), 993SND_SOC_DAPM_MUX("IN2L Mux", WM8996_POWER_MANAGEMENT_7, 6, 0, &in2_mux),
994SND_SOC_DAPM_MUX("IN2R Mux", SND_SOC_NOPM, 0, 0, &in2_mux), 994SND_SOC_DAPM_MUX("IN2R Mux", WM8996_POWER_MANAGEMENT_7, 7, 0, &in2_mux),
995
996SND_SOC_DAPM_PGA("IN1L", WM8996_POWER_MANAGEMENT_7, 2, 0, NULL, 0),
997SND_SOC_DAPM_PGA("IN1R", WM8996_POWER_MANAGEMENT_7, 3, 0, NULL, 0),
998SND_SOC_DAPM_PGA("IN2L", WM8996_POWER_MANAGEMENT_7, 6, 0, NULL, 0),
999SND_SOC_DAPM_PGA("IN2R", WM8996_POWER_MANAGEMENT_7, 7, 0, NULL, 0),
1000 995
1001SND_SOC_DAPM_SUPPLY("DMIC2", WM8996_POWER_MANAGEMENT_7, 9, 0, NULL, 0), 996SND_SOC_DAPM_SUPPLY("DMIC2", WM8996_POWER_MANAGEMENT_7, 9, 0, NULL, 0),
1002SND_SOC_DAPM_SUPPLY("DMIC1", WM8996_POWER_MANAGEMENT_7, 8, 0, NULL, 0), 997SND_SOC_DAPM_SUPPLY("DMIC1", WM8996_POWER_MANAGEMENT_7, 8, 0, NULL, 0),
@@ -1213,6 +1208,16 @@ static const struct snd_soc_dapm_route wm8996_dapm_routes[] = {
1213 { "AIF2RX0", NULL, "AIFCLK" }, 1208 { "AIF2RX0", NULL, "AIFCLK" },
1214 { "AIF2RX1", NULL, "AIFCLK" }, 1209 { "AIF2RX1", NULL, "AIFCLK" },
1215 1210
1211 { "AIF1TX0", NULL, "AIFCLK" },
1212 { "AIF1TX1", NULL, "AIFCLK" },
1213 { "AIF1TX2", NULL, "AIFCLK" },
1214 { "AIF1TX3", NULL, "AIFCLK" },
1215 { "AIF1TX4", NULL, "AIFCLK" },
1216 { "AIF1TX5", NULL, "AIFCLK" },
1217
1218 { "AIF2TX0", NULL, "AIFCLK" },
1219 { "AIF2TX1", NULL, "AIFCLK" },
1220
1216 { "DSP1RXL", NULL, "SYSDSPCLK" }, 1221 { "DSP1RXL", NULL, "SYSDSPCLK" },
1217 { "DSP1RXR", NULL, "SYSDSPCLK" }, 1222 { "DSP1RXR", NULL, "SYSDSPCLK" },
1218 { "DSP2RXL", NULL, "SYSDSPCLK" }, 1223 { "DSP2RXL", NULL, "SYSDSPCLK" },
@@ -2106,6 +2111,9 @@ static int wm8996_set_fll(struct snd_soc_codec *codec, int fll_id, int source,
2106 2111
2107 snd_soc_write(codec, WM8996_FLL_EFS_1, fll_div.lambda); 2112 snd_soc_write(codec, WM8996_FLL_EFS_1, fll_div.lambda);
2108 2113
2114 /* Clear any pending completions (eg, from failed startups) */
2115 try_wait_for_completion(&wm8996->fll_lock);
2116
2109 snd_soc_update_bits(codec, WM8996_FLL_CONTROL_1, 2117 snd_soc_update_bits(codec, WM8996_FLL_CONTROL_1,
2110 WM8996_FLL_ENA, WM8996_FLL_ENA); 2118 WM8996_FLL_ENA, WM8996_FLL_ENA);
2111 2119
diff --git a/sound/soc/ep93xx/ep93xx-i2s.c b/sound/soc/ep93xx/ep93xx-i2s.c
index 56efa0c1c9a9..099614e16651 100644
--- a/sound/soc/ep93xx/ep93xx-i2s.c
+++ b/sound/soc/ep93xx/ep93xx-i2s.c
@@ -385,14 +385,14 @@ static int ep93xx_i2s_probe(struct platform_device *pdev)
385 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 385 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
386 if (!res) { 386 if (!res) {
387 err = -ENODEV; 387 err = -ENODEV;
388 goto fail; 388 goto fail_free_info;
389 } 389 }
390 390
391 info->mem = request_mem_region(res->start, resource_size(res), 391 info->mem = request_mem_region(res->start, resource_size(res),
392 pdev->name); 392 pdev->name);
393 if (!info->mem) { 393 if (!info->mem) {
394 err = -EBUSY; 394 err = -EBUSY;
395 goto fail; 395 goto fail_free_info;
396 } 396 }
397 397
398 info->regs = ioremap(info->mem->start, resource_size(info->mem)); 398 info->regs = ioremap(info->mem->start, resource_size(info->mem));
@@ -435,6 +435,7 @@ fail_unmap_mem:
435 iounmap(info->regs); 435 iounmap(info->regs);
436fail_release_mem: 436fail_release_mem:
437 release_mem_region(info->mem->start, resource_size(info->mem)); 437 release_mem_region(info->mem->start, resource_size(info->mem));
438fail_free_info:
438 kfree(info); 439 kfree(info);
439fail: 440fail:
440 return err; 441 return err;
diff --git a/sound/soc/fsl/fsl_dma.c b/sound/soc/fsl/fsl_dma.c
index 732208c8c0b4..cb50598338e9 100644
--- a/sound/soc/fsl/fsl_dma.c
+++ b/sound/soc/fsl/fsl_dma.c
@@ -879,10 +879,12 @@ static struct device_node *find_ssi_node(struct device_node *dma_channel_np)
879 * assume that device_node pointers are a valid comparison. 879 * assume that device_node pointers are a valid comparison.
880 */ 880 */
881 np = of_parse_phandle(ssi_np, "fsl,playback-dma", 0); 881 np = of_parse_phandle(ssi_np, "fsl,playback-dma", 0);
882 of_node_put(np);
882 if (np == dma_channel_np) 883 if (np == dma_channel_np)
883 return ssi_np; 884 return ssi_np;
884 885
885 np = of_parse_phandle(ssi_np, "fsl,capture-dma", 0); 886 np = of_parse_phandle(ssi_np, "fsl,capture-dma", 0);
887 of_node_put(np);
886 if (np == dma_channel_np) 888 if (np == dma_channel_np)
887 return ssi_np; 889 return ssi_np;
888 } 890 }
diff --git a/sound/soc/fsl/mpc5200_dma.c b/sound/soc/fsl/mpc5200_dma.c
index fd0dc46afc34..5c6c2457386e 100644
--- a/sound/soc/fsl/mpc5200_dma.c
+++ b/sound/soc/fsl/mpc5200_dma.c
@@ -369,7 +369,7 @@ static struct snd_soc_platform_driver mpc5200_audio_dma_platform = {
369 .pcm_free = &psc_dma_free, 369 .pcm_free = &psc_dma_free,
370}; 370};
371 371
372static int mpc5200_hpcd_probe(struct of_device *op) 372static int mpc5200_hpcd_probe(struct platform_device *op)
373{ 373{
374 phys_addr_t fifo; 374 phys_addr_t fifo;
375 struct psc_dma *psc_dma; 375 struct psc_dma *psc_dma;
@@ -487,7 +487,7 @@ out_unmap:
487 return ret; 487 return ret;
488} 488}
489 489
490static int mpc5200_hpcd_remove(struct of_device *op) 490static int mpc5200_hpcd_remove(struct platform_device *op)
491{ 491{
492 struct psc_dma *psc_dma = dev_get_drvdata(&op->dev); 492 struct psc_dma *psc_dma = dev_get_drvdata(&op->dev);
493 493
@@ -519,7 +519,7 @@ MODULE_DEVICE_TABLE(of, mpc5200_hpcd_match);
519static struct platform_driver mpc5200_hpcd_of_driver = { 519static struct platform_driver mpc5200_hpcd_of_driver = {
520 .probe = mpc5200_hpcd_probe, 520 .probe = mpc5200_hpcd_probe,
521 .remove = mpc5200_hpcd_remove, 521 .remove = mpc5200_hpcd_remove,
522 .dev = { 522 .driver = {
523 .owner = THIS_MODULE, 523 .owner = THIS_MODULE,
524 .name = "mpc5200-pcm-audio", 524 .name = "mpc5200-pcm-audio",
525 .of_match_table = mpc5200_hpcd_match, 525 .of_match_table = mpc5200_hpcd_match,
diff --git a/sound/soc/fsl/mpc8610_hpcd.c b/sound/soc/fsl/mpc8610_hpcd.c
index a19297959587..358f0baaf71b 100644
--- a/sound/soc/fsl/mpc8610_hpcd.c
+++ b/sound/soc/fsl/mpc8610_hpcd.c
@@ -345,8 +345,10 @@ static int mpc8610_hpcd_probe(struct platform_device *pdev)
345 } 345 }
346 346
347 machine_data = kzalloc(sizeof(struct mpc8610_hpcd_data), GFP_KERNEL); 347 machine_data = kzalloc(sizeof(struct mpc8610_hpcd_data), GFP_KERNEL);
348 if (!machine_data) 348 if (!machine_data) {
349 return -ENOMEM; 349 ret = -ENOMEM;
350 goto error_alloc;
351 }
350 352
351 machine_data->dai[0].cpu_dai_name = dev_name(&ssi_pdev->dev); 353 machine_data->dai[0].cpu_dai_name = dev_name(&ssi_pdev->dev);
352 machine_data->dai[0].ops = &mpc8610_hpcd_ops; 354 machine_data->dai[0].ops = &mpc8610_hpcd_ops;
@@ -494,7 +496,7 @@ static int mpc8610_hpcd_probe(struct platform_device *pdev)
494 ret = platform_device_add(sound_device); 496 ret = platform_device_add(sound_device);
495 if (ret) { 497 if (ret) {
496 dev_err(&pdev->dev, "platform device add failed\n"); 498 dev_err(&pdev->dev, "platform device add failed\n");
497 goto error; 499 goto error_sound;
498 } 500 }
499 dev_set_drvdata(&pdev->dev, sound_device); 501 dev_set_drvdata(&pdev->dev, sound_device);
500 502
@@ -502,14 +504,12 @@ static int mpc8610_hpcd_probe(struct platform_device *pdev)
502 504
503 return 0; 505 return 0;
504 506
507error_sound:
508 platform_device_unregister(sound_device);
505error: 509error:
506 of_node_put(codec_np);
507
508 if (sound_device)
509 platform_device_unregister(sound_device);
510
511 kfree(machine_data); 510 kfree(machine_data);
512 511error_alloc:
512 of_node_put(codec_np);
513 return ret; 513 return ret;
514} 514}
515 515
diff --git a/sound/soc/fsl/p1022_ds.c b/sound/soc/fsl/p1022_ds.c
index 8fa4d5f8eda1..fcb862eb0c73 100644
--- a/sound/soc/fsl/p1022_ds.c
+++ b/sound/soc/fsl/p1022_ds.c
@@ -297,8 +297,10 @@ static int get_dma_channel(struct device_node *ssi_np,
297 * dai->platform name should already point to an allocated buffer. 297 * dai->platform name should already point to an allocated buffer.
298 */ 298 */
299 ret = of_address_to_resource(dma_channel_np, 0, &res); 299 ret = of_address_to_resource(dma_channel_np, 0, &res);
300 if (ret) 300 if (ret) {
301 of_node_put(dma_channel_np);
301 return ret; 302 return ret;
303 }
302 snprintf((char *)dai->platform_name, DAI_NAME_SIZE, "%llx.%s", 304 snprintf((char *)dai->platform_name, DAI_NAME_SIZE, "%llx.%s",
303 (unsigned long long) res.start, dma_channel_np->name); 305 (unsigned long long) res.start, dma_channel_np->name);
304 306
diff --git a/sound/soc/imx/imx-pcm-fiq.c b/sound/soc/imx/imx-pcm-fiq.c
index 309c59e6fb6c..7945625e0e08 100644
--- a/sound/soc/imx/imx-pcm-fiq.c
+++ b/sound/soc/imx/imx-pcm-fiq.c
@@ -240,7 +240,6 @@ static int ssi_irq = 0;
240 240
241static int imx_pcm_fiq_new(struct snd_soc_pcm_runtime *rtd) 241static int imx_pcm_fiq_new(struct snd_soc_pcm_runtime *rtd)
242{ 242{
243 struct snd_card *card = rtd->card->snd_card;
244 struct snd_soc_dai *dai = rtd->cpu_dai; 243 struct snd_soc_dai *dai = rtd->cpu_dai;
245 struct snd_pcm *pcm = rtd->pcm; 244 struct snd_pcm *pcm = rtd->pcm;
246 int ret; 245 int ret;
diff --git a/sound/soc/kirkwood/kirkwood-i2s.c b/sound/soc/kirkwood/kirkwood-i2s.c
index a33fc51f363b..d0bcf3fcea01 100644
--- a/sound/soc/kirkwood/kirkwood-i2s.c
+++ b/sound/soc/kirkwood/kirkwood-i2s.c
@@ -424,7 +424,7 @@ static __devinit int kirkwood_i2s_dev_probe(struct platform_device *pdev)
424 if (!priv->mem) { 424 if (!priv->mem) {
425 dev_err(&pdev->dev, "request_mem_region failed\n"); 425 dev_err(&pdev->dev, "request_mem_region failed\n");
426 err = -EBUSY; 426 err = -EBUSY;
427 goto error; 427 goto err_alloc;
428 } 428 }
429 429
430 priv->io = ioremap(priv->mem->start, SZ_16K); 430 priv->io = ioremap(priv->mem->start, SZ_16K);
diff --git a/sound/soc/omap/ams-delta.c b/sound/soc/omap/ams-delta.c
index 30fe0d0efe1c..0aa475f92efa 100644
--- a/sound/soc/omap/ams-delta.c
+++ b/sound/soc/omap/ams-delta.c
@@ -514,7 +514,7 @@ static int ams_delta_cx20442_init(struct snd_soc_pcm_runtime *rtd)
514 } 514 }
515 515
516 /* Set codec bias level */ 516 /* Set codec bias level */
517 ams_delta_set_bias_level(card, SND_SOC_BIAS_STANDBY); 517 ams_delta_set_bias_level(card, dapm, SND_SOC_BIAS_STANDBY);
518 518
519 /* Add hook switch - can be used to control the codec from userspace 519 /* Add hook switch - can be used to control the codec from userspace
520 * even if line discipline fails */ 520 * even if line discipline fails */
@@ -649,7 +649,9 @@ static void __exit ams_delta_module_exit(void)
649 ams_delta_hook_switch_gpios); 649 ams_delta_hook_switch_gpios);
650 650
651 /* Keep modem power on */ 651 /* Keep modem power on */
652 ams_delta_set_bias_level(&ams_delta_audio_card, SND_SOC_BIAS_STANDBY); 652 ams_delta_set_bias_level(&ams_delta_audio_card,
653 &ams_delta_audio_card.rtd[0].codec->dapm,
654 SND_SOC_BIAS_STANDBY);
653 655
654 platform_device_unregister(cx20442_platform_device); 656 platform_device_unregister(cx20442_platform_device);
655 platform_device_unregister(ams_delta_audio_platform_device); 657 platform_device_unregister(ams_delta_audio_platform_device);
diff --git a/sound/soc/omap/n810.c b/sound/soc/omap/n810.c
index 83d213bfd3d1..62e292f49313 100644
--- a/sound/soc/omap/n810.c
+++ b/sound/soc/omap/n810.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2008 Nokia Corporation 4 * Copyright (C) 2008 Nokia Corporation
5 * 5 *
6 * Contact: Jarkko Nikula <jhnikula@gmail.com> 6 * Contact: Jarkko Nikula <jarkko.nikula@bitmer.com>
7 * 7 *
8 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
@@ -402,6 +402,6 @@ static void __exit n810_soc_exit(void)
402module_init(n810_soc_init); 402module_init(n810_soc_init);
403module_exit(n810_soc_exit); 403module_exit(n810_soc_exit);
404 404
405MODULE_AUTHOR("Jarkko Nikula <jhnikula@gmail.com>"); 405MODULE_AUTHOR("Jarkko Nikula <jarkko.nikula@bitmer.com>");
406MODULE_DESCRIPTION("ALSA SoC Nokia N810"); 406MODULE_DESCRIPTION("ALSA SoC Nokia N810");
407MODULE_LICENSE("GPL"); 407MODULE_LICENSE("GPL");
diff --git a/sound/soc/omap/omap-mcbsp.c b/sound/soc/omap/omap-mcbsp.c
index 07b772357244..ebcc2d4d2b18 100644
--- a/sound/soc/omap/omap-mcbsp.c
+++ b/sound/soc/omap/omap-mcbsp.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2008 Nokia Corporation 4 * Copyright (C) 2008 Nokia Corporation
5 * 5 *
6 * Contact: Jarkko Nikula <jhnikula@gmail.com> 6 * Contact: Jarkko Nikula <jarkko.nikula@bitmer.com>
7 * Peter Ujfalusi <peter.ujfalusi@ti.com> 7 * Peter Ujfalusi <peter.ujfalusi@ti.com>
8 * 8 *
9 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
@@ -780,6 +780,6 @@ static void __exit snd_omap_mcbsp_exit(void)
780} 780}
781module_exit(snd_omap_mcbsp_exit); 781module_exit(snd_omap_mcbsp_exit);
782 782
783MODULE_AUTHOR("Jarkko Nikula <jhnikula@gmail.com>"); 783MODULE_AUTHOR("Jarkko Nikula <jarkko.nikula@bitmer.com>");
784MODULE_DESCRIPTION("OMAP I2S SoC Interface"); 784MODULE_DESCRIPTION("OMAP I2S SoC Interface");
785MODULE_LICENSE("GPL"); 785MODULE_LICENSE("GPL");
diff --git a/sound/soc/omap/omap-mcbsp.h b/sound/soc/omap/omap-mcbsp.h
index 9a7dedd6f5a9..65cde9d3807b 100644
--- a/sound/soc/omap/omap-mcbsp.h
+++ b/sound/soc/omap/omap-mcbsp.h
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2008 Nokia Corporation 4 * Copyright (C) 2008 Nokia Corporation
5 * 5 *
6 * Contact: Jarkko Nikula <jhnikula@gmail.com> 6 * Contact: Jarkko Nikula <jarkko.nikula@bitmer.com>
7 * Peter Ujfalusi <peter.ujfalusi@ti.com> 7 * Peter Ujfalusi <peter.ujfalusi@ti.com>
8 * 8 *
9 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
diff --git a/sound/soc/omap/omap-pcm.c b/sound/soc/omap/omap-pcm.c
index b2f5751edae3..9b5c88ac35b9 100644
--- a/sound/soc/omap/omap-pcm.c
+++ b/sound/soc/omap/omap-pcm.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2008 Nokia Corporation 4 * Copyright (C) 2008 Nokia Corporation
5 * 5 *
6 * Contact: Jarkko Nikula <jhnikula@gmail.com> 6 * Contact: Jarkko Nikula <jarkko.nikula@bitmer.com>
7 * Peter Ujfalusi <peter.ujfalusi@ti.com> 7 * Peter Ujfalusi <peter.ujfalusi@ti.com>
8 * 8 *
9 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
@@ -436,6 +436,6 @@ static void __exit snd_omap_pcm_exit(void)
436} 436}
437module_exit(snd_omap_pcm_exit); 437module_exit(snd_omap_pcm_exit);
438 438
439MODULE_AUTHOR("Jarkko Nikula <jhnikula@gmail.com>"); 439MODULE_AUTHOR("Jarkko Nikula <jarkko.nikula@bitmer.com>");
440MODULE_DESCRIPTION("OMAP PCM DMA module"); 440MODULE_DESCRIPTION("OMAP PCM DMA module");
441MODULE_LICENSE("GPL"); 441MODULE_LICENSE("GPL");
diff --git a/sound/soc/omap/omap-pcm.h b/sound/soc/omap/omap-pcm.h
index a0ed1dbb52d6..f95fe3064172 100644
--- a/sound/soc/omap/omap-pcm.h
+++ b/sound/soc/omap/omap-pcm.h
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2008 Nokia Corporation 4 * Copyright (C) 2008 Nokia Corporation
5 * 5 *
6 * Contact: Jarkko Nikula <jhnikula@gmail.com> 6 * Contact: Jarkko Nikula <jarkko.nikula@bitmer.com>
7 * Peter Ujfalusi <peter.ujfalusi@ti.com> 7 * Peter Ujfalusi <peter.ujfalusi@ti.com>
8 * 8 *
9 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
diff --git a/sound/soc/omap/rx51.c b/sound/soc/omap/rx51.c
index 0aae998b6540..893300a53bab 100644
--- a/sound/soc/omap/rx51.c
+++ b/sound/soc/omap/rx51.c
@@ -5,7 +5,7 @@
5 * 5 *
6 * Contact: Peter Ujfalusi <peter.ujfalusi@ti.com> 6 * Contact: Peter Ujfalusi <peter.ujfalusi@ti.com>
7 * Eduardo Valentin <eduardo.valentin@nokia.com> 7 * Eduardo Valentin <eduardo.valentin@nokia.com>
8 * Jarkko Nikula <jhnikula@gmail.com> 8 * Jarkko Nikula <jarkko.nikula@bitmer.com>
9 * 9 *
10 * This program is free software; you can redistribute it and/or 10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 11 * modify it under the terms of the GNU General Public License
diff --git a/sound/soc/samsung/Kconfig b/sound/soc/samsung/Kconfig
index b99091fc34eb..65f980ef2870 100644
--- a/sound/soc/samsung/Kconfig
+++ b/sound/soc/samsung/Kconfig
@@ -185,6 +185,7 @@ config SND_SOC_SPEYSIDE
185 select SND_SAMSUNG_I2S 185 select SND_SAMSUNG_I2S
186 select SND_SOC_WM8996 186 select SND_SOC_WM8996
187 select SND_SOC_WM9081 187 select SND_SOC_WM9081
188 select SND_SOC_WM1250_EV1
188 189
189config SND_SOC_SPEYSIDE_WM8962 190config SND_SOC_SPEYSIDE_WM8962
190 tristate "Audio support for Wolfson Speyside with WM8962" 191 tristate "Audio support for Wolfson Speyside with WM8962"
diff --git a/sound/soc/samsung/Makefile b/sound/soc/samsung/Makefile
index 9eb3b12eb72f..8509d3c4366e 100644
--- a/sound/soc/samsung/Makefile
+++ b/sound/soc/samsung/Makefile
@@ -1,5 +1,6 @@
1# S3c24XX Platform Support 1# S3c24XX Platform Support
2snd-soc-s3c24xx-objs := dma.o 2snd-soc-s3c24xx-objs := dma.o
3snd-soc-idma-objs := idma.o
3snd-soc-s3c24xx-i2s-objs := s3c24xx-i2s.o 4snd-soc-s3c24xx-i2s-objs := s3c24xx-i2s.o
4snd-soc-s3c2412-i2s-objs := s3c2412-i2s.o 5snd-soc-s3c2412-i2s-objs := s3c2412-i2s.o
5snd-soc-ac97-objs := ac97.o 6snd-soc-ac97-objs := ac97.o
@@ -16,6 +17,7 @@ obj-$(CONFIG_SND_S3C_I2SV2_SOC) += snd-soc-s3c-i2s-v2.o
16obj-$(CONFIG_SND_SAMSUNG_SPDIF) += snd-soc-samsung-spdif.o 17obj-$(CONFIG_SND_SAMSUNG_SPDIF) += snd-soc-samsung-spdif.o
17obj-$(CONFIG_SND_SAMSUNG_PCM) += snd-soc-pcm.o 18obj-$(CONFIG_SND_SAMSUNG_PCM) += snd-soc-pcm.o
18obj-$(CONFIG_SND_SAMSUNG_I2S) += snd-soc-i2s.o 19obj-$(CONFIG_SND_SAMSUNG_I2S) += snd-soc-i2s.o
20obj-$(CONFIG_SND_SAMSUNG_I2S) += snd-soc-idma.o
19 21
20# S3C24XX Machine Support 22# S3C24XX Machine Support
21snd-soc-jive-wm8750-objs := jive_wm8750.o 23snd-soc-jive-wm8750-objs := jive_wm8750.o
diff --git a/sound/soc/samsung/h1940_uda1380.c b/sound/soc/samsung/h1940_uda1380.c
index 241f55d00660..c6c65892294e 100644
--- a/sound/soc/samsung/h1940_uda1380.c
+++ b/sound/soc/samsung/h1940_uda1380.c
@@ -13,6 +13,7 @@
13 * 13 *
14 */ 14 */
15 15
16#include <linux/types.h>
16#include <linux/gpio.h> 17#include <linux/gpio.h>
17 18
18#include <sound/soc.h> 19#include <sound/soc.h>
diff --git a/sound/soc/samsung/idma.c b/sound/soc/samsung/idma.c
new file mode 100644
index 000000000000..ebde0740ab19
--- /dev/null
+++ b/sound/soc/samsung/idma.c
@@ -0,0 +1,453 @@
1/*
2 * sound/soc/samsung/idma.c
3 *
4 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com
6 *
7 * I2S0's Internal DMA driver
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 */
14#include <linux/interrupt.h>
15#include <linux/platform_device.h>
16#include <linux/dma-mapping.h>
17#include <linux/slab.h>
18#include <sound/pcm.h>
19#include <sound/pcm_params.h>
20#include <sound/soc.h>
21
22#include "i2s.h"
23#include "idma.h"
24#include "dma.h"
25#include "i2s-regs.h"
26
27#define ST_RUNNING (1<<0)
28#define ST_OPENED (1<<1)
29
30static const struct snd_pcm_hardware idma_hardware = {
31 .info = SNDRV_PCM_INFO_INTERLEAVED |
32 SNDRV_PCM_INFO_BLOCK_TRANSFER |
33 SNDRV_PCM_INFO_MMAP |
34 SNDRV_PCM_INFO_MMAP_VALID |
35 SNDRV_PCM_INFO_PAUSE |
36 SNDRV_PCM_INFO_RESUME,
37 .formats = SNDRV_PCM_FMTBIT_S16_LE |
38 SNDRV_PCM_FMTBIT_U16_LE |
39 SNDRV_PCM_FMTBIT_S24_LE |
40 SNDRV_PCM_FMTBIT_U24_LE |
41 SNDRV_PCM_FMTBIT_U8 |
42 SNDRV_PCM_FMTBIT_S8,
43 .channels_min = 2,
44 .channels_max = 2,
45 .buffer_bytes_max = MAX_IDMA_BUFFER,
46 .period_bytes_min = 128,
47 .period_bytes_max = MAX_IDMA_PERIOD,
48 .periods_min = 1,
49 .periods_max = 2,
50};
51
52struct idma_ctrl {
53 spinlock_t lock;
54 int state;
55 dma_addr_t start;
56 dma_addr_t pos;
57 dma_addr_t end;
58 dma_addr_t period;
59 dma_addr_t periodsz;
60 void *token;
61 void (*cb)(void *dt, int bytes_xfer);
62};
63
64static struct idma_info {
65 spinlock_t lock;
66 void __iomem *regs;
67 dma_addr_t lp_tx_addr;
68} idma;
69
70static void idma_getpos(dma_addr_t *src)
71{
72 *src = idma.lp_tx_addr +
73 (readl(idma.regs + I2STRNCNT) & 0xffffff) * 4;
74}
75
76static int idma_enqueue(struct snd_pcm_substream *substream)
77{
78 struct snd_pcm_runtime *runtime = substream->runtime;
79 struct idma_ctrl *prtd = substream->runtime->private_data;
80 u32 val;
81
82 spin_lock(&prtd->lock);
83 prtd->token = (void *) substream;
84 spin_unlock(&prtd->lock);
85
86 /* Internal DMA Level0 Interrupt Address */
87 val = idma.lp_tx_addr + prtd->periodsz;
88 writel(val, idma.regs + I2SLVL0ADDR);
89
90 /* Start address0 of I2S internal DMA operation. */
91 val = idma.lp_tx_addr;
92 writel(val, idma.regs + I2SSTR0);
93
94 /*
95 * Transfer block size for I2S internal DMA.
96 * Should decide transfer size before start dma operation
97 */
98 val = readl(idma.regs + I2SSIZE);
99 val &= ~(I2SSIZE_TRNMSK << I2SSIZE_SHIFT);
100 val |= (((runtime->dma_bytes >> 2) &
101 I2SSIZE_TRNMSK) << I2SSIZE_SHIFT);
102 writel(val, idma.regs + I2SSIZE);
103
104 val = readl(idma.regs + I2SAHB);
105 val |= AHB_INTENLVL0;
106 writel(val, idma.regs + I2SAHB);
107
108 return 0;
109}
110
111static void idma_setcallbk(struct snd_pcm_substream *substream,
112 void (*cb)(void *, int))
113{
114 struct idma_ctrl *prtd = substream->runtime->private_data;
115
116 spin_lock(&prtd->lock);
117 prtd->cb = cb;
118 spin_unlock(&prtd->lock);
119}
120
121static void idma_control(int op)
122{
123 u32 val = readl(idma.regs + I2SAHB);
124
125 spin_lock(&idma.lock);
126
127 switch (op) {
128 case LPAM_DMA_START:
129 val |= (AHB_INTENLVL0 | AHB_DMAEN);
130 break;
131 case LPAM_DMA_STOP:
132 val &= ~(AHB_INTENLVL0 | AHB_DMAEN);
133 break;
134 default:
135 spin_unlock(&idma.lock);
136 return;
137 }
138
139 writel(val, idma.regs + I2SAHB);
140 spin_unlock(&idma.lock);
141}
142
143static void idma_done(void *id, int bytes_xfer)
144{
145 struct snd_pcm_substream *substream = id;
146 struct idma_ctrl *prtd = substream->runtime->private_data;
147
148 if (prtd && (prtd->state & ST_RUNNING))
149 snd_pcm_period_elapsed(substream);
150}
151
152static int idma_hw_params(struct snd_pcm_substream *substream,
153 struct snd_pcm_hw_params *params)
154{
155 struct snd_pcm_runtime *runtime = substream->runtime;
156 struct idma_ctrl *prtd = substream->runtime->private_data;
157 u32 mod = readl(idma.regs + I2SMOD);
158 u32 ahb = readl(idma.regs + I2SAHB);
159
160 ahb |= (AHB_DMARLD | AHB_INTMASK);
161 mod |= MOD_TXS_IDMA;
162 writel(ahb, idma.regs + I2SAHB);
163 writel(mod, idma.regs + I2SMOD);
164
165 snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
166 runtime->dma_bytes = params_buffer_bytes(params);
167
168 prtd->start = prtd->pos = runtime->dma_addr;
169 prtd->period = params_periods(params);
170 prtd->periodsz = params_period_bytes(params);
171 prtd->end = runtime->dma_addr + runtime->dma_bytes;
172
173 idma_setcallbk(substream, idma_done);
174
175 return 0;
176}
177
178static int idma_hw_free(struct snd_pcm_substream *substream)
179{
180 snd_pcm_set_runtime_buffer(substream, NULL);
181
182 return 0;
183}
184
185static int idma_prepare(struct snd_pcm_substream *substream)
186{
187 struct idma_ctrl *prtd = substream->runtime->private_data;
188
189 prtd->pos = prtd->start;
190
191 /* flush the DMA channel */
192 idma_control(LPAM_DMA_STOP);
193 idma_enqueue(substream);
194
195 return 0;
196}
197
198static int idma_trigger(struct snd_pcm_substream *substream, int cmd)
199{
200 struct idma_ctrl *prtd = substream->runtime->private_data;
201 int ret = 0;
202
203 spin_lock(&prtd->lock);
204
205 switch (cmd) {
206 case SNDRV_PCM_TRIGGER_RESUME:
207 case SNDRV_PCM_TRIGGER_START:
208 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
209 prtd->state |= ST_RUNNING;
210 idma_control(LPAM_DMA_START);
211 break;
212
213 case SNDRV_PCM_TRIGGER_SUSPEND:
214 case SNDRV_PCM_TRIGGER_STOP:
215 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
216 prtd->state &= ~ST_RUNNING;
217 idma_control(LPAM_DMA_STOP);
218 break;
219
220 default:
221 ret = -EINVAL;
222 break;
223 }
224
225 spin_unlock(&prtd->lock);
226
227 return ret;
228}
229
230static snd_pcm_uframes_t
231 idma_pointer(struct snd_pcm_substream *substream)
232{
233 struct snd_pcm_runtime *runtime = substream->runtime;
234 struct idma_ctrl *prtd = runtime->private_data;
235 dma_addr_t src;
236 unsigned long res;
237
238 spin_lock(&prtd->lock);
239
240 idma_getpos(&src);
241 res = src - prtd->start;
242
243 spin_unlock(&prtd->lock);
244
245 return bytes_to_frames(substream->runtime, res);
246}
247
248static int idma_mmap(struct snd_pcm_substream *substream,
249 struct vm_area_struct *vma)
250{
251 struct snd_pcm_runtime *runtime = substream->runtime;
252 unsigned long size, offset;
253 int ret;
254
255 /* From snd_pcm_lib_mmap_iomem */
256 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
257 vma->vm_flags |= VM_IO;
258 size = vma->vm_end - vma->vm_start;
259 offset = vma->vm_pgoff << PAGE_SHIFT;
260 ret = io_remap_pfn_range(vma, vma->vm_start,
261 (runtime->dma_addr + offset) >> PAGE_SHIFT,
262 size, vma->vm_page_prot);
263
264 return ret;
265}
266
267static irqreturn_t iis_irq(int irqno, void *dev_id)
268{
269 struct idma_ctrl *prtd = (struct idma_ctrl *)dev_id;
270 u32 iiscon, iisahb, val, addr;
271
272 iisahb = readl(idma.regs + I2SAHB);
273 iiscon = readl(idma.regs + I2SCON);
274
275 val = (iisahb & AHB_LVL0INT) ? AHB_CLRLVL0INT : 0;
276
277 if (val) {
278 iisahb |= val;
279 writel(iisahb, idma.regs + I2SAHB);
280
281 addr = readl(idma.regs + I2SLVL0ADDR) - idma.lp_tx_addr;
282 addr += prtd->periodsz;
283 addr %= (prtd->end - prtd->start);
284 addr += idma.lp_tx_addr;
285
286 writel(addr, idma.regs + I2SLVL0ADDR);
287
288 if (prtd->cb)
289 prtd->cb(prtd->token, prtd->period);
290 }
291
292 return IRQ_HANDLED;
293}
294
295static int idma_open(struct snd_pcm_substream *substream)
296{
297 struct snd_pcm_runtime *runtime = substream->runtime;
298 struct idma_ctrl *prtd;
299 int ret;
300
301 snd_soc_set_runtime_hwparams(substream, &idma_hardware);
302
303 prtd = kzalloc(sizeof(struct idma_ctrl), GFP_KERNEL);
304 if (prtd == NULL)
305 return -ENOMEM;
306
307 ret = request_irq(IRQ_I2S0, iis_irq, 0, "i2s", prtd);
308 if (ret < 0) {
309 pr_err("fail to claim i2s irq , ret = %d\n", ret);
310 kfree(prtd);
311 return ret;
312 }
313
314 spin_lock_init(&prtd->lock);
315
316 runtime->private_data = prtd;
317
318 return 0;
319}
320
321static int idma_close(struct snd_pcm_substream *substream)
322{
323 struct snd_pcm_runtime *runtime = substream->runtime;
324 struct idma_ctrl *prtd = runtime->private_data;
325
326 free_irq(IRQ_I2S0, prtd);
327
328 if (!prtd)
329 pr_err("idma_close called with prtd == NULL\n");
330
331 kfree(prtd);
332
333 return 0;
334}
335
336static struct snd_pcm_ops idma_ops = {
337 .open = idma_open,
338 .close = idma_close,
339 .ioctl = snd_pcm_lib_ioctl,
340 .trigger = idma_trigger,
341 .pointer = idma_pointer,
342 .mmap = idma_mmap,
343 .hw_params = idma_hw_params,
344 .hw_free = idma_hw_free,
345 .prepare = idma_prepare,
346};
347
348static void idma_free(struct snd_pcm *pcm)
349{
350 struct snd_pcm_substream *substream;
351 struct snd_dma_buffer *buf;
352
353 substream = pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
354 if (!substream)
355 return;
356
357 buf = &substream->dma_buffer;
358 if (!buf->area)
359 return;
360
361 iounmap(buf->area);
362
363 buf->area = NULL;
364 buf->addr = 0;
365}
366
367static int preallocate_idma_buffer(struct snd_pcm *pcm, int stream)
368{
369 struct snd_pcm_substream *substream = pcm->streams[stream].substream;
370 struct snd_dma_buffer *buf = &substream->dma_buffer;
371
372 buf->dev.dev = pcm->card->dev;
373 buf->private_data = NULL;
374
375 /* Assign PCM buffer pointers */
376 buf->dev.type = SNDRV_DMA_TYPE_CONTINUOUS;
377 buf->addr = idma.lp_tx_addr;
378 buf->bytes = idma_hardware.buffer_bytes_max;
379 buf->area = (unsigned char *)ioremap(buf->addr, buf->bytes);
380
381 return 0;
382}
383
384static u64 idma_mask = DMA_BIT_MASK(32);
385
386static int idma_new(struct snd_soc_pcm_runtime *rtd)
387{
388 struct snd_card *card = rtd->card->snd_card;
389 struct snd_soc_dai *dai = rtd->cpu_dai;
390 struct snd_pcm *pcm = rtd->pcm;
391 int ret = 0;
392
393 if (!card->dev->dma_mask)
394 card->dev->dma_mask = &idma_mask;
395 if (!card->dev->coherent_dma_mask)
396 card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
397
398 if (dai->driver->playback.channels_min)
399 ret = preallocate_idma_buffer(pcm,
400 SNDRV_PCM_STREAM_PLAYBACK);
401
402 return ret;
403}
404
405void idma_reg_addr_init(void *regs, dma_addr_t addr)
406{
407 spin_lock_init(&idma.lock);
408 idma.regs = regs;
409 idma.lp_tx_addr = addr;
410}
411
412struct snd_soc_platform_driver asoc_idma_platform = {
413 .ops = &idma_ops,
414 .pcm_new = idma_new,
415 .pcm_free = idma_free,
416};
417
418static int __devinit asoc_idma_platform_probe(struct platform_device *pdev)
419{
420 return snd_soc_register_platform(&pdev->dev, &asoc_idma_platform);
421}
422
423static int __devexit asoc_idma_platform_remove(struct platform_device *pdev)
424{
425 snd_soc_unregister_platform(&pdev->dev);
426 return 0;
427}
428
429static struct platform_driver asoc_idma_driver = {
430 .driver = {
431 .name = "samsung-idma",
432 .owner = THIS_MODULE,
433 },
434
435 .probe = asoc_idma_platform_probe,
436 .remove = __devexit_p(asoc_idma_platform_remove),
437};
438
439static int __init asoc_idma_init(void)
440{
441 return platform_driver_register(&asoc_idma_driver);
442}
443module_init(asoc_idma_init);
444
445static void __exit asoc_idma_exit(void)
446{
447 platform_driver_unregister(&asoc_idma_driver);
448}
449module_exit(asoc_idma_exit);
450
451MODULE_AUTHOR("Jaswinder Singh, <jassisinghbrar@gmail.com>");
452MODULE_DESCRIPTION("Samsung ASoC IDMA Driver");
453MODULE_LICENSE("GPL");
diff --git a/sound/soc/samsung/idma.h b/sound/soc/samsung/idma.h
new file mode 100644
index 000000000000..48273216166e
--- /dev/null
+++ b/sound/soc/samsung/idma.h
@@ -0,0 +1,26 @@
1/*
2 * sound/soc/samsung/idma.h
3 *
4 * Copyright (c) 2011 Samsung Electronics Co., Ltd
5 * http://www.samsung.com
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 *
12 */
13
14#ifndef __SND_SOC_SAMSUNG_IDMA_H_
15#define __SND_SOC_SAMSUNG_IDMA_H_
16
17extern void idma_reg_addr_init(void *regs, dma_addr_t addr);
18
19/* dma_state */
20#define LPAM_DMA_STOP 0
21#define LPAM_DMA_START 1
22
23#define MAX_IDMA_PERIOD (128 * 1024)
24#define MAX_IDMA_BUFFER (160 * 1024)
25
26#endif /* __SND_SOC_SAMSUNG_IDMA_H_ */
diff --git a/sound/soc/samsung/jive_wm8750.c b/sound/soc/samsung/jive_wm8750.c
index 3b53ad54bc33..14eb6ea69e7c 100644
--- a/sound/soc/samsung/jive_wm8750.c
+++ b/sound/soc/samsung/jive_wm8750.c
@@ -131,7 +131,7 @@ static struct snd_soc_dai_link jive_dai = {
131 .cpu_dai_name = "s3c2412-i2s", 131 .cpu_dai_name = "s3c2412-i2s",
132 .codec_dai_name = "wm8750-hifi", 132 .codec_dai_name = "wm8750-hifi",
133 .platform_name = "samsung-audio", 133 .platform_name = "samsung-audio",
134 .codec_name = "wm8750-codec.0-0x1a", 134 .codec_name = "wm8750-codec.0-001a",
135 .init = jive_wm8750_init, 135 .init = jive_wm8750_init,
136 .ops = &jive_ops, 136 .ops = &jive_ops,
137}; 137};
diff --git a/sound/soc/samsung/rx1950_uda1380.c b/sound/soc/samsung/rx1950_uda1380.c
index 1e574a5d440d..bc8c1676459f 100644
--- a/sound/soc/samsung/rx1950_uda1380.c
+++ b/sound/soc/samsung/rx1950_uda1380.c
@@ -17,6 +17,7 @@
17 * 17 *
18 */ 18 */
19 19
20#include <linux/types.h>
20#include <linux/gpio.h> 21#include <linux/gpio.h>
21 22
22#include <sound/soc.h> 23#include <sound/soc.h>
diff --git a/sound/soc/samsung/speyside_wm8962.c b/sound/soc/samsung/speyside_wm8962.c
index 8ac42bf82090..72535f2daaf2 100644
--- a/sound/soc/samsung/speyside_wm8962.c
+++ b/sound/soc/samsung/speyside_wm8962.c
@@ -23,6 +23,9 @@ static int speyside_wm8962_set_bias_level(struct snd_soc_card *card,
23 struct snd_soc_dai *codec_dai = card->rtd[0].codec_dai; 23 struct snd_soc_dai *codec_dai = card->rtd[0].codec_dai;
24 int ret; 24 int ret;
25 25
26 if (dapm->dev != codec_dai->dev)
27 return 0;
28
26 switch (level) { 29 switch (level) {
27 case SND_SOC_BIAS_PREPARE: 30 case SND_SOC_BIAS_PREPARE:
28 if (dapm->bias_level == SND_SOC_BIAS_STANDBY) { 31 if (dapm->bias_level == SND_SOC_BIAS_STANDBY) {
@@ -37,7 +40,7 @@ static int speyside_wm8962_set_bias_level(struct snd_soc_card *card,
37 44100 * 256, 40 44100 * 256,
38 SND_SOC_CLOCK_IN); 41 SND_SOC_CLOCK_IN);
39 if (ret < 0) { 42 if (ret < 0) {
40 pr_err("Failed to set SYSCLK: %d\n"); 43 pr_err("Failed to set SYSCLK: %d\n", ret);
41 return ret; 44 return ret;
42 } 45 }
43 } 46 }
@@ -57,6 +60,9 @@ static int speyside_wm8962_set_bias_level_post(struct snd_soc_card *card,
57 struct snd_soc_dai *codec_dai = card->rtd[0].codec_dai; 60 struct snd_soc_dai *codec_dai = card->rtd[0].codec_dai;
58 int ret; 61 int ret;
59 62
63 if (dapm->dev != codec_dai->dev)
64 return 0;
65
60 switch (level) { 66 switch (level) {
61 case SND_SOC_BIAS_STANDBY: 67 case SND_SOC_BIAS_STANDBY:
62 ret = snd_soc_dai_set_sysclk(codec_dai, WM8962_SYSCLK_MCLK, 68 ret = snd_soc_dai_set_sysclk(codec_dai, WM8962_SYSCLK_MCLK,
diff --git a/sound/soc/soc-cache.c b/sound/soc/soc-cache.c
index d9f8aded51f3..20b7f3b003a3 100644
--- a/sound/soc/soc-cache.c
+++ b/sound/soc/soc-cache.c
@@ -203,14 +203,14 @@ static int snd_soc_rbtree_cache_sync(struct snd_soc_codec *codec)
203 rbnode = rb_entry(node, struct snd_soc_rbtree_node, node); 203 rbnode = rb_entry(node, struct snd_soc_rbtree_node, node);
204 for (i = 0; i < rbnode->blklen; ++i) { 204 for (i = 0; i < rbnode->blklen; ++i) {
205 regtmp = rbnode->base_reg + i; 205 regtmp = rbnode->base_reg + i;
206 WARN_ON(codec->writable_register &&
207 codec->writable_register(codec, regtmp));
208 val = snd_soc_rbtree_get_register(rbnode, i); 206 val = snd_soc_rbtree_get_register(rbnode, i);
209 def = snd_soc_get_cache_val(codec->reg_def_copy, i, 207 def = snd_soc_get_cache_val(codec->reg_def_copy, i,
210 rbnode->word_size); 208 rbnode->word_size);
211 if (val == def) 209 if (val == def)
212 continue; 210 continue;
213 211
212 WARN_ON(!snd_soc_codec_writable_register(codec, regtmp));
213
214 codec->cache_bypass = 1; 214 codec->cache_bypass = 1;
215 ret = snd_soc_write(codec, regtmp, val); 215 ret = snd_soc_write(codec, regtmp, val);
216 codec->cache_bypass = 0; 216 codec->cache_bypass = 0;
@@ -563,8 +563,7 @@ static int snd_soc_lzo_cache_sync(struct snd_soc_codec *codec)
563 563
564 lzo_blocks = codec->reg_cache; 564 lzo_blocks = codec->reg_cache;
565 for_each_set_bit(i, lzo_blocks[0]->sync_bmp, lzo_blocks[0]->sync_bmp_nbits) { 565 for_each_set_bit(i, lzo_blocks[0]->sync_bmp, lzo_blocks[0]->sync_bmp_nbits) {
566 WARN_ON(codec->writable_register && 566 WARN_ON(!snd_soc_codec_writable_register(codec, i));
567 codec->writable_register(codec, i));
568 ret = snd_soc_cache_read(codec, i, &val); 567 ret = snd_soc_cache_read(codec, i, &val);
569 if (ret) 568 if (ret)
570 return ret; 569 return ret;
@@ -823,8 +822,6 @@ static int snd_soc_flat_cache_sync(struct snd_soc_codec *codec)
823 822
824 codec_drv = codec->driver; 823 codec_drv = codec->driver;
825 for (i = 0; i < codec_drv->reg_cache_size; ++i) { 824 for (i = 0; i < codec_drv->reg_cache_size; ++i) {
826 WARN_ON(codec->writable_register &&
827 codec->writable_register(codec, i));
828 ret = snd_soc_cache_read(codec, i, &val); 825 ret = snd_soc_cache_read(codec, i, &val);
829 if (ret) 826 if (ret)
830 return ret; 827 return ret;
@@ -832,6 +829,9 @@ static int snd_soc_flat_cache_sync(struct snd_soc_codec *codec)
832 if (snd_soc_get_cache_val(codec->reg_def_copy, 829 if (snd_soc_get_cache_val(codec->reg_def_copy,
833 i, codec_drv->reg_word_size) == val) 830 i, codec_drv->reg_word_size) == val)
834 continue; 831 continue;
832
833 WARN_ON(!snd_soc_codec_writable_register(codec, i));
834
835 ret = snd_soc_write(codec, i, val); 835 ret = snd_soc_write(codec, i, val);
836 if (ret) 836 if (ret)
837 return ret; 837 return ret;
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 83ad8ca27490..d2ef014af215 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -1633,7 +1633,7 @@ int snd_soc_codec_readable_register(struct snd_soc_codec *codec,
1633 if (codec->readable_register) 1633 if (codec->readable_register)
1634 return codec->readable_register(codec, reg); 1634 return codec->readable_register(codec, reg);
1635 else 1635 else
1636 return 0; 1636 return 1;
1637} 1637}
1638EXPORT_SYMBOL_GPL(snd_soc_codec_readable_register); 1638EXPORT_SYMBOL_GPL(snd_soc_codec_readable_register);
1639 1639
@@ -1651,7 +1651,7 @@ int snd_soc_codec_writable_register(struct snd_soc_codec *codec,
1651 if (codec->writable_register) 1651 if (codec->writable_register)
1652 return codec->writable_register(codec, reg); 1652 return codec->writable_register(codec, reg);
1653 else 1653 else
1654 return 0; 1654 return 1;
1655} 1655}
1656EXPORT_SYMBOL_GPL(snd_soc_codec_writable_register); 1656EXPORT_SYMBOL_GPL(snd_soc_codec_writable_register);
1657 1657
@@ -1913,7 +1913,7 @@ struct snd_kcontrol *snd_soc_cnew(const struct snd_kcontrol_new *_template,
1913 1913
1914 if (prefix) { 1914 if (prefix) {
1915 name_len = strlen(long_name) + strlen(prefix) + 2; 1915 name_len = strlen(long_name) + strlen(prefix) + 2;
1916 name = kmalloc(name_len, GFP_ATOMIC); 1916 name = kmalloc(name_len, GFP_KERNEL);
1917 if (!name) 1917 if (!name)
1918 return NULL; 1918 return NULL;
1919 1919
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 7e15914b3633..d67c637557a7 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -2763,7 +2763,7 @@ EXPORT_SYMBOL_GPL(snd_soc_dapm_ignore_suspend);
2763 2763
2764/** 2764/**
2765 * snd_soc_dapm_free - free dapm resources 2765 * snd_soc_dapm_free - free dapm resources
2766 * @card: SoC device 2766 * @dapm: DAPM context
2767 * 2767 *
2768 * Free all dapm widgets and resources. 2768 * Free all dapm widgets and resources.
2769 */ 2769 */
diff --git a/sound/soc/soc-io.c b/sound/soc/soc-io.c
index cca490c80589..a62f7dd4ba96 100644
--- a/sound/soc/soc-io.c
+++ b/sound/soc/soc-io.c
@@ -205,6 +205,25 @@ static unsigned int snd_soc_16_8_read_i2c(struct snd_soc_codec *codec,
205#define snd_soc_16_8_read_i2c NULL 205#define snd_soc_16_8_read_i2c NULL
206#endif 206#endif
207 207
208#if defined(CONFIG_SPI_MASTER)
209static unsigned int snd_soc_16_8_read_spi(struct snd_soc_codec *codec,
210 unsigned int r)
211{
212 struct spi_device *spi = codec->control_data;
213
214 const u16 reg = cpu_to_be16(r | 0x100);
215 u8 data;
216 int ret;
217
218 ret = spi_write_then_read(spi, &reg, 2, &data, 1);
219 if (ret < 0)
220 return 0;
221 return data;
222}
223#else
224#define snd_soc_16_8_read_spi NULL
225#endif
226
208static int snd_soc_16_8_write(struct snd_soc_codec *codec, unsigned int reg, 227static int snd_soc_16_8_write(struct snd_soc_codec *codec, unsigned int reg,
209 unsigned int value) 228 unsigned int value)
210{ 229{
@@ -295,6 +314,7 @@ static struct {
295 int (*write)(struct snd_soc_codec *codec, unsigned int, unsigned int); 314 int (*write)(struct snd_soc_codec *codec, unsigned int, unsigned int);
296 unsigned int (*read)(struct snd_soc_codec *, unsigned int); 315 unsigned int (*read)(struct snd_soc_codec *, unsigned int);
297 unsigned int (*i2c_read)(struct snd_soc_codec *, unsigned int); 316 unsigned int (*i2c_read)(struct snd_soc_codec *, unsigned int);
317 unsigned int (*spi_read)(struct snd_soc_codec *, unsigned int);
298} io_types[] = { 318} io_types[] = {
299 { 319 {
300 .addr_bits = 4, .data_bits = 12, 320 .addr_bits = 4, .data_bits = 12,
@@ -318,6 +338,7 @@ static struct {
318 .addr_bits = 16, .data_bits = 8, 338 .addr_bits = 16, .data_bits = 8,
319 .write = snd_soc_16_8_write, 339 .write = snd_soc_16_8_write,
320 .i2c_read = snd_soc_16_8_read_i2c, 340 .i2c_read = snd_soc_16_8_read_i2c,
341 .spi_read = snd_soc_16_8_read_spi,
321 }, 342 },
322 { 343 {
323 .addr_bits = 16, .data_bits = 16, 344 .addr_bits = 16, .data_bits = 16,
@@ -383,6 +404,8 @@ int snd_soc_codec_set_cache_io(struct snd_soc_codec *codec,
383#ifdef CONFIG_SPI_MASTER 404#ifdef CONFIG_SPI_MASTER
384 codec->hw_write = do_spi_write; 405 codec->hw_write = do_spi_write;
385#endif 406#endif
407 if (io_types[i].spi_read)
408 codec->hw_read = io_types[i].spi_read;
386 409
387 codec->control_data = container_of(codec->dev, 410 codec->control_data = container_of(codec->dev,
388 struct spi_device, 411 struct spi_device,
diff --git a/sound/soc/soc-jack.c b/sound/soc/soc-jack.c
index 7c17b98d5846..fa31d9c2abd8 100644
--- a/sound/soc/soc-jack.c
+++ b/sound/soc/soc-jack.c
@@ -105,7 +105,7 @@ void snd_soc_jack_report(struct snd_soc_jack *jack, int status, int mask)
105 105
106 snd_soc_dapm_sync(dapm); 106 snd_soc_dapm_sync(dapm);
107 107
108 snd_jack_report(jack->jack, status); 108 snd_jack_report(jack->jack, jack->status);
109 109
110out: 110out:
111 mutex_unlock(&codec->mutex); 111 mutex_unlock(&codec->mutex);
@@ -327,7 +327,7 @@ int snd_soc_jack_add_gpios(struct snd_soc_jack *jack, int count,
327 IRQF_TRIGGER_FALLING, 327 IRQF_TRIGGER_FALLING,
328 gpios[i].name, 328 gpios[i].name,
329 &gpios[i]); 329 &gpios[i]);
330 if (ret) 330 if (ret < 0)
331 goto err; 331 goto err;
332 332
333 if (gpios[i].wake) { 333 if (gpios[i].wake) {
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index b5759397afa3..2879c883eebc 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -290,6 +290,9 @@ static int soc_pcm_close(struct snd_pcm_substream *substream)
290 codec_dai->active--; 290 codec_dai->active--;
291 codec->active--; 291 codec->active--;
292 292
293 if (!cpu_dai->active && !codec_dai->active)
294 rtd->rate = 0;
295
293 /* Muting the DAC suppresses artifacts caused during digital 296 /* Muting the DAC suppresses artifacts caused during digital
294 * shutdown, for example from stopping clocks. 297 * shutdown, for example from stopping clocks.
295 */ 298 */
diff --git a/sound/soc/tegra/tegra_pcm.c b/sound/soc/tegra/tegra_pcm.c
index ff86e5e3db68..c7cfd96e991e 100644
--- a/sound/soc/tegra/tegra_pcm.c
+++ b/sound/soc/tegra/tegra_pcm.c
@@ -309,9 +309,14 @@ static int tegra_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream)
309 309
310static void tegra_pcm_deallocate_dma_buffer(struct snd_pcm *pcm, int stream) 310static void tegra_pcm_deallocate_dma_buffer(struct snd_pcm *pcm, int stream)
311{ 311{
312 struct snd_pcm_substream *substream = pcm->streams[stream].substream; 312 struct snd_pcm_substream *substream;
313 struct snd_dma_buffer *buf = &substream->dma_buffer; 313 struct snd_dma_buffer *buf;
314
315 substream = pcm->streams[stream].substream;
316 if (!substream)
317 return;
314 318
319 buf = &substream->dma_buffer;
315 if (!buf->area) 320 if (!buf->area)
316 return; 321 return;
317 322
diff --git a/sound/soc/tegra/tegra_wm8903.c b/sound/soc/tegra/tegra_wm8903.c
index a42e9ac30f28..be27f1d229af 100644
--- a/sound/soc/tegra/tegra_wm8903.c
+++ b/sound/soc/tegra/tegra_wm8903.c
@@ -56,6 +56,7 @@
56#define GPIO_HP_MUTE BIT(1) 56#define GPIO_HP_MUTE BIT(1)
57#define GPIO_INT_MIC_EN BIT(2) 57#define GPIO_INT_MIC_EN BIT(2)
58#define GPIO_EXT_MIC_EN BIT(3) 58#define GPIO_EXT_MIC_EN BIT(3)
59#define GPIO_HP_DET BIT(4)
59 60
60struct tegra_wm8903 { 61struct tegra_wm8903 {
61 struct tegra_asoc_utils_data util_data; 62 struct tegra_asoc_utils_data util_data;
@@ -304,6 +305,7 @@ static int tegra_wm8903_init(struct snd_soc_pcm_runtime *rtd)
304 snd_soc_jack_add_gpios(&tegra_wm8903_hp_jack, 305 snd_soc_jack_add_gpios(&tegra_wm8903_hp_jack,
305 1, 306 1,
306 &tegra_wm8903_hp_jack_gpio); 307 &tegra_wm8903_hp_jack_gpio);
308 machine->gpio_requested |= GPIO_HP_DET;
307 } 309 }
308 310
309 snd_soc_jack_new(codec, "Mic Jack", SND_JACK_MICROPHONE, 311 snd_soc_jack_new(codec, "Mic Jack", SND_JACK_MICROPHONE,
@@ -317,7 +319,7 @@ static int tegra_wm8903_init(struct snd_soc_pcm_runtime *rtd)
317 snd_soc_dapm_force_enable_pin(dapm, "Mic Bias"); 319 snd_soc_dapm_force_enable_pin(dapm, "Mic Bias");
318 320
319 /* FIXME: Calculate automatically based on DAPM routes? */ 321 /* FIXME: Calculate automatically based on DAPM routes? */
320 if (!machine_is_harmony() && !machine_is_ventana()) 322 if (!machine_is_harmony())
321 snd_soc_dapm_nc_pin(dapm, "IN1L"); 323 snd_soc_dapm_nc_pin(dapm, "IN1L");
322 if (!machine_is_seaboard() && !machine_is_aebl()) 324 if (!machine_is_seaboard() && !machine_is_aebl())
323 snd_soc_dapm_nc_pin(dapm, "IN1R"); 325 snd_soc_dapm_nc_pin(dapm, "IN1R");
@@ -393,7 +395,7 @@ static __devinit int tegra_wm8903_driver_probe(struct platform_device *pdev)
393 platform_set_drvdata(pdev, card); 395 platform_set_drvdata(pdev, card);
394 snd_soc_card_set_drvdata(card, machine); 396 snd_soc_card_set_drvdata(card, machine);
395 397
396 if (machine_is_harmony() || machine_is_ventana()) { 398 if (machine_is_harmony()) {
397 card->dapm_routes = harmony_audio_map; 399 card->dapm_routes = harmony_audio_map;
398 card->num_dapm_routes = ARRAY_SIZE(harmony_audio_map); 400 card->num_dapm_routes = ARRAY_SIZE(harmony_audio_map);
399 } else if (machine_is_seaboard()) { 401 } else if (machine_is_seaboard()) {
@@ -429,10 +431,10 @@ static int __devexit tegra_wm8903_driver_remove(struct platform_device *pdev)
429 struct tegra_wm8903 *machine = snd_soc_card_get_drvdata(card); 431 struct tegra_wm8903 *machine = snd_soc_card_get_drvdata(card);
430 struct tegra_wm8903_platform_data *pdata = machine->pdata; 432 struct tegra_wm8903_platform_data *pdata = machine->pdata;
431 433
432 snd_soc_unregister_card(card); 434 if (machine->gpio_requested & GPIO_HP_DET)
433 435 snd_soc_jack_free_gpios(&tegra_wm8903_hp_jack,
434 tegra_asoc_utils_fini(&machine->util_data); 436 1,
435 437 &tegra_wm8903_hp_jack_gpio);
436 if (machine->gpio_requested & GPIO_EXT_MIC_EN) 438 if (machine->gpio_requested & GPIO_EXT_MIC_EN)
437 gpio_free(pdata->gpio_ext_mic_en); 439 gpio_free(pdata->gpio_ext_mic_en);
438 if (machine->gpio_requested & GPIO_INT_MIC_EN) 440 if (machine->gpio_requested & GPIO_INT_MIC_EN)
@@ -441,6 +443,11 @@ static int __devexit tegra_wm8903_driver_remove(struct platform_device *pdev)
441 gpio_free(pdata->gpio_hp_mute); 443 gpio_free(pdata->gpio_hp_mute);
442 if (machine->gpio_requested & GPIO_SPKR_EN) 444 if (machine->gpio_requested & GPIO_SPKR_EN)
443 gpio_free(pdata->gpio_spkr_en); 445 gpio_free(pdata->gpio_spkr_en);
446 machine->gpio_requested = 0;
447
448 snd_soc_unregister_card(card);
449
450 tegra_asoc_utils_fini(&machine->util_data);
444 451
445 kfree(machine); 452 kfree(machine);
446 453
diff --git a/sound/usb/caiaq/audio.c b/sound/usb/caiaq/audio.c
index d0d493ca28ae..2cf87f5afed4 100644
--- a/sound/usb/caiaq/audio.c
+++ b/sound/usb/caiaq/audio.c
@@ -139,8 +139,12 @@ static void stream_stop(struct snd_usb_caiaqdev *dev)
139 139
140 for (i = 0; i < N_URBS; i++) { 140 for (i = 0; i < N_URBS; i++) {
141 usb_kill_urb(dev->data_urbs_in[i]); 141 usb_kill_urb(dev->data_urbs_in[i]);
142 usb_kill_urb(dev->data_urbs_out[i]); 142
143 if (test_bit(i, &dev->outurb_active_mask))
144 usb_kill_urb(dev->data_urbs_out[i]);
143 } 145 }
146
147 dev->outurb_active_mask = 0;
144} 148}
145 149
146static int snd_usb_caiaq_substream_open(struct snd_pcm_substream *substream) 150static int snd_usb_caiaq_substream_open(struct snd_pcm_substream *substream)
@@ -612,8 +616,9 @@ static void read_completed(struct urb *urb)
612{ 616{
613 struct snd_usb_caiaq_cb_info *info = urb->context; 617 struct snd_usb_caiaq_cb_info *info = urb->context;
614 struct snd_usb_caiaqdev *dev; 618 struct snd_usb_caiaqdev *dev;
615 struct urb *out; 619 struct urb *out = NULL;
616 int frame, len, send_it = 0, outframe = 0; 620 int i, frame, len, send_it = 0, outframe = 0;
621 size_t offset = 0;
617 622
618 if (urb->status || !info) 623 if (urb->status || !info)
619 return; 624 return;
@@ -623,7 +628,17 @@ static void read_completed(struct urb *urb)
623 if (!dev->streaming) 628 if (!dev->streaming)
624 return; 629 return;
625 630
626 out = dev->data_urbs_out[info->index]; 631 /* find an unused output urb that is unused */
632 for (i = 0; i < N_URBS; i++)
633 if (test_and_set_bit(i, &dev->outurb_active_mask) == 0) {
634 out = dev->data_urbs_out[i];
635 break;
636 }
637
638 if (!out) {
639 log("Unable to find an output urb to use\n");
640 goto requeue;
641 }
627 642
628 /* read the recently received packet and send back one which has 643 /* read the recently received packet and send back one which has
629 * the same layout */ 644 * the same layout */
@@ -634,7 +649,8 @@ static void read_completed(struct urb *urb)
634 len = urb->iso_frame_desc[outframe].actual_length; 649 len = urb->iso_frame_desc[outframe].actual_length;
635 out->iso_frame_desc[outframe].length = len; 650 out->iso_frame_desc[outframe].length = len;
636 out->iso_frame_desc[outframe].actual_length = 0; 651 out->iso_frame_desc[outframe].actual_length = 0;
637 out->iso_frame_desc[outframe].offset = BYTES_PER_FRAME * frame; 652 out->iso_frame_desc[outframe].offset = offset;
653 offset += len;
638 654
639 if (len > 0) { 655 if (len > 0) {
640 spin_lock(&dev->spinlock); 656 spin_lock(&dev->spinlock);
@@ -650,11 +666,15 @@ static void read_completed(struct urb *urb)
650 } 666 }
651 667
652 if (send_it) { 668 if (send_it) {
653 out->number_of_packets = FRAMES_PER_URB; 669 out->number_of_packets = outframe;
654 out->transfer_flags = URB_ISO_ASAP; 670 out->transfer_flags = URB_ISO_ASAP;
655 usb_submit_urb(out, GFP_ATOMIC); 671 usb_submit_urb(out, GFP_ATOMIC);
672 } else {
673 struct snd_usb_caiaq_cb_info *oinfo = out->context;
674 clear_bit(oinfo->index, &dev->outurb_active_mask);
656 } 675 }
657 676
677requeue:
658 /* re-submit inbound urb */ 678 /* re-submit inbound urb */
659 for (frame = 0; frame < FRAMES_PER_URB; frame++) { 679 for (frame = 0; frame < FRAMES_PER_URB; frame++) {
660 urb->iso_frame_desc[frame].offset = BYTES_PER_FRAME * frame; 680 urb->iso_frame_desc[frame].offset = BYTES_PER_FRAME * frame;
@@ -676,6 +696,8 @@ static void write_completed(struct urb *urb)
676 dev->output_running = 1; 696 dev->output_running = 1;
677 wake_up(&dev->prepare_wait_queue); 697 wake_up(&dev->prepare_wait_queue);
678 } 698 }
699
700 clear_bit(info->index, &dev->outurb_active_mask);
679} 701}
680 702
681static struct urb **alloc_urbs(struct snd_usb_caiaqdev *dev, int dir, int *ret) 703static struct urb **alloc_urbs(struct snd_usb_caiaqdev *dev, int dir, int *ret)
@@ -827,6 +849,9 @@ int snd_usb_caiaq_audio_init(struct snd_usb_caiaqdev *dev)
827 if (!dev->data_cb_info) 849 if (!dev->data_cb_info)
828 return -ENOMEM; 850 return -ENOMEM;
829 851
852 dev->outurb_active_mask = 0;
853 BUILD_BUG_ON(N_URBS > (sizeof(dev->outurb_active_mask) * 8));
854
830 for (i = 0; i < N_URBS; i++) { 855 for (i = 0; i < N_URBS; i++) {
831 dev->data_cb_info[i].dev = dev; 856 dev->data_cb_info[i].dev = dev;
832 dev->data_cb_info[i].index = i; 857 dev->data_cb_info[i].index = i;
diff --git a/sound/usb/caiaq/device.h b/sound/usb/caiaq/device.h
index b2b310194ffa..3f9c6339ae90 100644
--- a/sound/usb/caiaq/device.h
+++ b/sound/usb/caiaq/device.h
@@ -96,6 +96,7 @@ struct snd_usb_caiaqdev {
96 int input_panic, output_panic, warned; 96 int input_panic, output_panic, warned;
97 char *audio_in_buf, *audio_out_buf; 97 char *audio_in_buf, *audio_out_buf;
98 unsigned int samplerates, bpp; 98 unsigned int samplerates, bpp;
99 unsigned long outurb_active_mask;
99 100
100 struct snd_pcm_substream *sub_playback[MAX_STREAMS]; 101 struct snd_pcm_substream *sub_playback[MAX_STREAMS];
101 struct snd_pcm_substream *sub_capture[MAX_STREAMS]; 102 struct snd_pcm_substream *sub_capture[MAX_STREAMS];
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index c04d7c71ac88..cdd19d7fe500 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -152,6 +152,7 @@ static inline void check_mapped_dB(const struct usbmix_name_map *p,
152 if (p && p->dB) { 152 if (p && p->dB) {
153 cval->dBmin = p->dB->min; 153 cval->dBmin = p->dB->min;
154 cval->dBmax = p->dB->max; 154 cval->dBmax = p->dB->max;
155 cval->initialized = 1;
155 } 156 }
156} 157}
157 158
@@ -1092,7 +1093,7 @@ static void build_feature_ctl(struct mixer_build *state, void *raw_desc,
1092 " Switch" : " Volume"); 1093 " Switch" : " Volume");
1093 if (control == UAC_FU_VOLUME) { 1094 if (control == UAC_FU_VOLUME) {
1094 check_mapped_dB(map, cval); 1095 check_mapped_dB(map, cval);
1095 if (cval->dBmin < cval->dBmax) { 1096 if (cval->dBmin < cval->dBmax || !cval->initialized) {
1096 kctl->tlv.c = mixer_vol_tlv; 1097 kctl->tlv.c = mixer_vol_tlv;
1097 kctl->vd[0].access |= 1098 kctl->vd[0].access |=
1098 SNDRV_CTL_ELEM_ACCESS_TLV_READ | 1099 SNDRV_CTL_ELEM_ACCESS_TLV_READ |
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index 4d4f86552a23..a42e3ef3832d 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -1707,6 +1707,40 @@ YAMAHA_DEVICE(0x7010, "UB99"),
1707 } 1707 }
1708 } 1708 }
1709}, 1709},
1710{
1711 USB_DEVICE(0x0582, 0x0130),
1712 .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
1713 /* .vendor_name = "BOSS", */
1714 /* .product_name = "MICRO BR-80", */
1715 .ifnum = QUIRK_ANY_INTERFACE,
1716 .type = QUIRK_COMPOSITE,
1717 .data = (const struct snd_usb_audio_quirk[]) {
1718 {
1719 .ifnum = 0,
1720 .type = QUIRK_IGNORE_INTERFACE
1721 },
1722 {
1723 .ifnum = 1,
1724 .type = QUIRK_AUDIO_STANDARD_INTERFACE
1725 },
1726 {
1727 .ifnum = 2,
1728 .type = QUIRK_AUDIO_STANDARD_INTERFACE
1729 },
1730 {
1731 .ifnum = 3,
1732 .type = QUIRK_MIDI_FIXED_ENDPOINT,
1733 .data = & (const struct snd_usb_midi_endpoint_info) {
1734 .out_cables = 0x0001,
1735 .in_cables = 0x0001
1736 }
1737 },
1738 {
1739 .ifnum = -1
1740 }
1741 }
1742 }
1743},
1710 1744
1711/* Guillemot devices */ 1745/* Guillemot devices */
1712{ 1746{
diff --git a/tools/perf/arch/arm/util/dwarf-regs.c b/tools/perf/arch/arm/util/dwarf-regs.c
index fff6450c8c99..e8d5c551c69c 100644
--- a/tools/perf/arch/arm/util/dwarf-regs.c
+++ b/tools/perf/arch/arm/util/dwarf-regs.c
@@ -8,7 +8,10 @@
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10 10
11#include <stdlib.h>
12#ifndef __UCLIBC__
11#include <libio.h> 13#include <libio.h>
14#endif
12#include <dwarf-regs.h> 15#include <dwarf-regs.h>
13 16
14struct pt_regs_dwarfnum { 17struct pt_regs_dwarfnum {
diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
index 5f2a5c7046df..710ae3d0a489 100644
--- a/tools/perf/builtin-probe.c
+++ b/tools/perf/builtin-probe.c
@@ -134,10 +134,18 @@ static int opt_show_lines(const struct option *opt __used,
134{ 134{
135 int ret = 0; 135 int ret = 0;
136 136
137 if (str) 137 if (!str)
138 ret = parse_line_range_desc(str, &params.line_range); 138 return 0;
139 INIT_LIST_HEAD(&params.line_range.line_list); 139
140 if (params.show_lines) {
141 pr_warning("Warning: more than one --line options are"
142 " detected. Only the first one is valid.\n");
143 return 0;
144 }
145
140 params.show_lines = true; 146 params.show_lines = true;
147 ret = parse_line_range_desc(str, &params.line_range);
148 INIT_LIST_HEAD(&params.line_range.line_list);
141 149
142 return ret; 150 return ret;
143} 151}
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index f6426b496f4a..6b0519f885e4 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -45,7 +45,7 @@ static int freq = 1000;
45static int output; 45static int output;
46static int pipe_output = 0; 46static int pipe_output = 0;
47static const char *output_name = NULL; 47static const char *output_name = NULL;
48static int group = 0; 48static bool group = false;
49static int realtime_prio = 0; 49static int realtime_prio = 0;
50static bool nodelay = false; 50static bool nodelay = false;
51static bool raw_samples = false; 51static bool raw_samples = false;
@@ -753,6 +753,8 @@ const struct option record_options[] = {
753 "child tasks do not inherit counters"), 753 "child tasks do not inherit counters"),
754 OPT_UINTEGER('F', "freq", &user_freq, "profile at this frequency"), 754 OPT_UINTEGER('F', "freq", &user_freq, "profile at this frequency"),
755 OPT_UINTEGER('m', "mmap-pages", &mmap_pages, "number of mmap data pages"), 755 OPT_UINTEGER('m', "mmap-pages", &mmap_pages, "number of mmap data pages"),
756 OPT_BOOLEAN(0, "group", &group,
757 "put the counters into a counter group"),
756 OPT_BOOLEAN('g', "call-graph", &call_graph, 758 OPT_BOOLEAN('g', "call-graph", &call_graph,
757 "do call-graph (stack chain/backtrace) recording"), 759 "do call-graph (stack chain/backtrace) recording"),
758 OPT_INCR('v', "verbose", &verbose, 760 OPT_INCR('v', "verbose", &verbose,
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 1ad04ce29c34..5deb17d9e795 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -193,6 +193,7 @@ static int big_num_opt = -1;
193static const char *cpu_list; 193static const char *cpu_list;
194static const char *csv_sep = NULL; 194static const char *csv_sep = NULL;
195static bool csv_output = false; 195static bool csv_output = false;
196static bool group = false;
196 197
197static volatile int done = 0; 198static volatile int done = 0;
198 199
@@ -280,14 +281,14 @@ static int create_perf_stat_counter(struct perf_evsel *evsel)
280 attr->inherit = !no_inherit; 281 attr->inherit = !no_inherit;
281 282
282 if (system_wide) 283 if (system_wide)
283 return perf_evsel__open_per_cpu(evsel, evsel_list->cpus, false); 284 return perf_evsel__open_per_cpu(evsel, evsel_list->cpus, group);
284 285
285 if (target_pid == -1 && target_tid == -1) { 286 if (target_pid == -1 && target_tid == -1) {
286 attr->disabled = 1; 287 attr->disabled = 1;
287 attr->enable_on_exec = 1; 288 attr->enable_on_exec = 1;
288 } 289 }
289 290
290 return perf_evsel__open_per_thread(evsel, evsel_list->threads, false); 291 return perf_evsel__open_per_thread(evsel, evsel_list->threads, group);
291} 292}
292 293
293/* 294/*
@@ -1043,6 +1044,8 @@ static const struct option options[] = {
1043 "stat events on existing thread id"), 1044 "stat events on existing thread id"),
1044 OPT_BOOLEAN('a', "all-cpus", &system_wide, 1045 OPT_BOOLEAN('a', "all-cpus", &system_wide,
1045 "system-wide collection from all CPUs"), 1046 "system-wide collection from all CPUs"),
1047 OPT_BOOLEAN('g', "group", &group,
1048 "put the counters into a counter group"),
1046 OPT_BOOLEAN('c', "scale", &scale, 1049 OPT_BOOLEAN('c', "scale", &scale,
1047 "scale/normalize counters"), 1050 "scale/normalize counters"),
1048 OPT_INCR('v', "verbose", &verbose, 1051 OPT_INCR('v', "verbose", &verbose,
diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c
index fddf40f30d3e..ee51e9b4dc09 100644
--- a/tools/perf/util/dwarf-aux.c
+++ b/tools/perf/util/dwarf-aux.c
@@ -96,6 +96,39 @@ int cu_find_lineinfo(Dwarf_Die *cu_die, unsigned long addr,
96 return *lineno ?: -ENOENT; 96 return *lineno ?: -ENOENT;
97} 97}
98 98
99static int __die_find_inline_cb(Dwarf_Die *die_mem, void *data);
100
101/**
102 * cu_walk_functions_at - Walk on function DIEs at given address
103 * @cu_die: A CU DIE
104 * @addr: An address
105 * @callback: A callback which called with found DIEs
106 * @data: A user data
107 *
108 * Walk on function DIEs at given @addr in @cu_die. Passed DIEs
109 * should be subprogram or inlined-subroutines.
110 */
111int cu_walk_functions_at(Dwarf_Die *cu_die, Dwarf_Addr addr,
112 int (*callback)(Dwarf_Die *, void *), void *data)
113{
114 Dwarf_Die die_mem;
115 Dwarf_Die *sc_die;
116 int ret = -ENOENT;
117
118 /* Inlined function could be recursive. Trace it until fail */
119 for (sc_die = die_find_realfunc(cu_die, addr, &die_mem);
120 sc_die != NULL;
121 sc_die = die_find_child(sc_die, __die_find_inline_cb, &addr,
122 &die_mem)) {
123 ret = callback(sc_die, data);
124 if (ret)
125 break;
126 }
127
128 return ret;
129
130}
131
99/** 132/**
100 * die_compare_name - Compare diename and tname 133 * die_compare_name - Compare diename and tname
101 * @dw_die: a DIE 134 * @dw_die: a DIE
@@ -198,6 +231,19 @@ static int die_get_attr_udata(Dwarf_Die *tp_die, unsigned int attr_name,
198 return 0; 231 return 0;
199} 232}
200 233
234/* Get attribute and translate it as a sdata */
235static int die_get_attr_sdata(Dwarf_Die *tp_die, unsigned int attr_name,
236 Dwarf_Sword *result)
237{
238 Dwarf_Attribute attr;
239
240 if (dwarf_attr(tp_die, attr_name, &attr) == NULL ||
241 dwarf_formsdata(&attr, result) != 0)
242 return -ENOENT;
243
244 return 0;
245}
246
201/** 247/**
202 * die_is_signed_type - Check whether a type DIE is signed or not 248 * die_is_signed_type - Check whether a type DIE is signed or not
203 * @tp_die: a DIE of a type 249 * @tp_die: a DIE of a type
@@ -250,6 +296,50 @@ int die_get_data_member_location(Dwarf_Die *mb_die, Dwarf_Word *offs)
250 return 0; 296 return 0;
251} 297}
252 298
299/* Get the call file index number in CU DIE */
300static int die_get_call_fileno(Dwarf_Die *in_die)
301{
302 Dwarf_Sword idx;
303
304 if (die_get_attr_sdata(in_die, DW_AT_call_file, &idx) == 0)
305 return (int)idx;
306 else
307 return -ENOENT;
308}
309
310/* Get the declared file index number in CU DIE */
311static int die_get_decl_fileno(Dwarf_Die *pdie)
312{
313 Dwarf_Sword idx;
314
315 if (die_get_attr_sdata(pdie, DW_AT_decl_file, &idx) == 0)
316 return (int)idx;
317 else
318 return -ENOENT;
319}
320
321/**
322 * die_get_call_file - Get callsite file name of inlined function instance
323 * @in_die: a DIE of an inlined function instance
324 *
325 * Get call-site file name of @in_die. This means from which file the inline
326 * function is called.
327 */
328const char *die_get_call_file(Dwarf_Die *in_die)
329{
330 Dwarf_Die cu_die;
331 Dwarf_Files *files;
332 int idx;
333
334 idx = die_get_call_fileno(in_die);
335 if (idx < 0 || !dwarf_diecu(in_die, &cu_die, NULL, NULL) ||
336 dwarf_getsrcfiles(&cu_die, &files, NULL) != 0)
337 return NULL;
338
339 return dwarf_filesrc(files, idx, NULL, NULL);
340}
341
342
253/** 343/**
254 * die_find_child - Generic DIE search function in DIE tree 344 * die_find_child - Generic DIE search function in DIE tree
255 * @rt_die: a root DIE 345 * @rt_die: a root DIE
@@ -374,9 +464,78 @@ Dwarf_Die *die_find_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr,
374 return die_mem; 464 return die_mem;
375} 465}
376 466
467struct __instance_walk_param {
468 void *addr;
469 int (*callback)(Dwarf_Die *, void *);
470 void *data;
471 int retval;
472};
473
474static int __die_walk_instances_cb(Dwarf_Die *inst, void *data)
475{
476 struct __instance_walk_param *iwp = data;
477 Dwarf_Attribute attr_mem;
478 Dwarf_Die origin_mem;
479 Dwarf_Attribute *attr;
480 Dwarf_Die *origin;
481 int tmp;
482
483 attr = dwarf_attr(inst, DW_AT_abstract_origin, &attr_mem);
484 if (attr == NULL)
485 return DIE_FIND_CB_CONTINUE;
486
487 origin = dwarf_formref_die(attr, &origin_mem);
488 if (origin == NULL || origin->addr != iwp->addr)
489 return DIE_FIND_CB_CONTINUE;
490
491 /* Ignore redundant instances */
492 if (dwarf_tag(inst) == DW_TAG_inlined_subroutine) {
493 dwarf_decl_line(origin, &tmp);
494 if (die_get_call_lineno(inst) == tmp) {
495 tmp = die_get_decl_fileno(origin);
496 if (die_get_call_fileno(inst) == tmp)
497 return DIE_FIND_CB_CONTINUE;
498 }
499 }
500
501 iwp->retval = iwp->callback(inst, iwp->data);
502
503 return (iwp->retval) ? DIE_FIND_CB_END : DIE_FIND_CB_CONTINUE;
504}
505
506/**
507 * die_walk_instances - Walk on instances of given DIE
508 * @or_die: an abstract original DIE
509 * @callback: a callback function which is called with instance DIE
510 * @data: user data
511 *
512 * Walk on the instances of give @in_die. @in_die must be an inlined function
513 * declartion. This returns the return value of @callback if it returns
514 * non-zero value, or -ENOENT if there is no instance.
515 */
516int die_walk_instances(Dwarf_Die *or_die, int (*callback)(Dwarf_Die *, void *),
517 void *data)
518{
519 Dwarf_Die cu_die;
520 Dwarf_Die die_mem;
521 struct __instance_walk_param iwp = {
522 .addr = or_die->addr,
523 .callback = callback,
524 .data = data,
525 .retval = -ENOENT,
526 };
527
528 if (dwarf_diecu(or_die, &cu_die, NULL, NULL) == NULL)
529 return -ENOENT;
530
531 die_find_child(&cu_die, __die_walk_instances_cb, &iwp, &die_mem);
532
533 return iwp.retval;
534}
535
377/* Line walker internal parameters */ 536/* Line walker internal parameters */
378struct __line_walk_param { 537struct __line_walk_param {
379 const char *fname; 538 bool recursive;
380 line_walk_callback_t callback; 539 line_walk_callback_t callback;
381 void *data; 540 void *data;
382 int retval; 541 int retval;
@@ -385,39 +544,56 @@ struct __line_walk_param {
385static int __die_walk_funclines_cb(Dwarf_Die *in_die, void *data) 544static int __die_walk_funclines_cb(Dwarf_Die *in_die, void *data)
386{ 545{
387 struct __line_walk_param *lw = data; 546 struct __line_walk_param *lw = data;
388 Dwarf_Addr addr; 547 Dwarf_Addr addr = 0;
548 const char *fname;
389 int lineno; 549 int lineno;
390 550
391 if (dwarf_tag(in_die) == DW_TAG_inlined_subroutine) { 551 if (dwarf_tag(in_die) == DW_TAG_inlined_subroutine) {
552 fname = die_get_call_file(in_die);
392 lineno = die_get_call_lineno(in_die); 553 lineno = die_get_call_lineno(in_die);
393 if (lineno > 0 && dwarf_entrypc(in_die, &addr) == 0) { 554 if (fname && lineno > 0 && dwarf_entrypc(in_die, &addr) == 0) {
394 lw->retval = lw->callback(lw->fname, lineno, addr, 555 lw->retval = lw->callback(fname, lineno, addr, lw->data);
395 lw->data);
396 if (lw->retval != 0) 556 if (lw->retval != 0)
397 return DIE_FIND_CB_END; 557 return DIE_FIND_CB_END;
398 } 558 }
399 } 559 }
400 return DIE_FIND_CB_SIBLING; 560 if (!lw->recursive)
561 /* Don't need to search recursively */
562 return DIE_FIND_CB_SIBLING;
563
564 if (addr) {
565 fname = dwarf_decl_file(in_die);
566 if (fname && dwarf_decl_line(in_die, &lineno) == 0) {
567 lw->retval = lw->callback(fname, lineno, addr, lw->data);
568 if (lw->retval != 0)
569 return DIE_FIND_CB_END;
570 }
571 }
572
573 /* Continue to search nested inlined function call-sites */
574 return DIE_FIND_CB_CONTINUE;
401} 575}
402 576
403/* Walk on lines of blocks included in given DIE */ 577/* Walk on lines of blocks included in given DIE */
404static int __die_walk_funclines(Dwarf_Die *sp_die, 578static int __die_walk_funclines(Dwarf_Die *sp_die, bool recursive,
405 line_walk_callback_t callback, void *data) 579 line_walk_callback_t callback, void *data)
406{ 580{
407 struct __line_walk_param lw = { 581 struct __line_walk_param lw = {
582 .recursive = recursive,
408 .callback = callback, 583 .callback = callback,
409 .data = data, 584 .data = data,
410 .retval = 0, 585 .retval = 0,
411 }; 586 };
412 Dwarf_Die die_mem; 587 Dwarf_Die die_mem;
413 Dwarf_Addr addr; 588 Dwarf_Addr addr;
589 const char *fname;
414 int lineno; 590 int lineno;
415 591
416 /* Handle function declaration line */ 592 /* Handle function declaration line */
417 lw.fname = dwarf_decl_file(sp_die); 593 fname = dwarf_decl_file(sp_die);
418 if (lw.fname && dwarf_decl_line(sp_die, &lineno) == 0 && 594 if (fname && dwarf_decl_line(sp_die, &lineno) == 0 &&
419 dwarf_entrypc(sp_die, &addr) == 0) { 595 dwarf_entrypc(sp_die, &addr) == 0) {
420 lw.retval = callback(lw.fname, lineno, addr, data); 596 lw.retval = callback(fname, lineno, addr, data);
421 if (lw.retval != 0) 597 if (lw.retval != 0)
422 goto done; 598 goto done;
423 } 599 }
@@ -430,7 +606,7 @@ static int __die_walk_culines_cb(Dwarf_Die *sp_die, void *data)
430{ 606{
431 struct __line_walk_param *lw = data; 607 struct __line_walk_param *lw = data;
432 608
433 lw->retval = __die_walk_funclines(sp_die, lw->callback, lw->data); 609 lw->retval = __die_walk_funclines(sp_die, true, lw->callback, lw->data);
434 if (lw->retval != 0) 610 if (lw->retval != 0)
435 return DWARF_CB_ABORT; 611 return DWARF_CB_ABORT;
436 612
@@ -439,7 +615,7 @@ static int __die_walk_culines_cb(Dwarf_Die *sp_die, void *data)
439 615
440/** 616/**
441 * die_walk_lines - Walk on lines inside given DIE 617 * die_walk_lines - Walk on lines inside given DIE
442 * @rt_die: a root DIE (CU or subprogram) 618 * @rt_die: a root DIE (CU, subprogram or inlined_subroutine)
443 * @callback: callback routine 619 * @callback: callback routine
444 * @data: user data 620 * @data: user data
445 * 621 *
@@ -460,12 +636,12 @@ int die_walk_lines(Dwarf_Die *rt_die, line_walk_callback_t callback, void *data)
460 size_t nlines, i; 636 size_t nlines, i;
461 637
462 /* Get the CU die */ 638 /* Get the CU die */
463 if (dwarf_tag(rt_die) == DW_TAG_subprogram) 639 if (dwarf_tag(rt_die) != DW_TAG_compile_unit)
464 cu_die = dwarf_diecu(rt_die, &die_mem, NULL, NULL); 640 cu_die = dwarf_diecu(rt_die, &die_mem, NULL, NULL);
465 else 641 else
466 cu_die = rt_die; 642 cu_die = rt_die;
467 if (!cu_die) { 643 if (!cu_die) {
468 pr_debug2("Failed to get CU from subprogram\n"); 644 pr_debug2("Failed to get CU from given DIE.\n");
469 return -EINVAL; 645 return -EINVAL;
470 } 646 }
471 647
@@ -509,7 +685,11 @@ int die_walk_lines(Dwarf_Die *rt_die, line_walk_callback_t callback, void *data)
509 * subroutines. We have to check functions list or given function. 685 * subroutines. We have to check functions list or given function.
510 */ 686 */
511 if (rt_die != cu_die) 687 if (rt_die != cu_die)
512 ret = __die_walk_funclines(rt_die, callback, data); 688 /*
689 * Don't need walk functions recursively, because nested
690 * inlined functions don't have lines of the specified DIE.
691 */
692 ret = __die_walk_funclines(rt_die, false, callback, data);
513 else { 693 else {
514 struct __line_walk_param param = { 694 struct __line_walk_param param = {
515 .callback = callback, 695 .callback = callback,
diff --git a/tools/perf/util/dwarf-aux.h b/tools/perf/util/dwarf-aux.h
index bc3b21167e70..6ce1717784b7 100644
--- a/tools/perf/util/dwarf-aux.h
+++ b/tools/perf/util/dwarf-aux.h
@@ -34,12 +34,19 @@ extern const char *cu_get_comp_dir(Dwarf_Die *cu_die);
34extern int cu_find_lineinfo(Dwarf_Die *cudie, unsigned long addr, 34extern int cu_find_lineinfo(Dwarf_Die *cudie, unsigned long addr,
35 const char **fname, int *lineno); 35 const char **fname, int *lineno);
36 36
37/* Walk on funcitons at given address */
38extern int cu_walk_functions_at(Dwarf_Die *cu_die, Dwarf_Addr addr,
39 int (*callback)(Dwarf_Die *, void *), void *data);
40
37/* Compare diename and tname */ 41/* Compare diename and tname */
38extern bool die_compare_name(Dwarf_Die *dw_die, const char *tname); 42extern bool die_compare_name(Dwarf_Die *dw_die, const char *tname);
39 43
40/* Get callsite line number of inline-function instance */ 44/* Get callsite line number of inline-function instance */
41extern int die_get_call_lineno(Dwarf_Die *in_die); 45extern int die_get_call_lineno(Dwarf_Die *in_die);
42 46
47/* Get callsite file name of inlined function instance */
48extern const char *die_get_call_file(Dwarf_Die *in_die);
49
43/* Get type die */ 50/* Get type die */
44extern Dwarf_Die *die_get_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem); 51extern Dwarf_Die *die_get_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem);
45 52
@@ -73,6 +80,10 @@ extern Dwarf_Die *die_find_realfunc(Dwarf_Die *cu_die, Dwarf_Addr addr,
73extern Dwarf_Die *die_find_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr, 80extern Dwarf_Die *die_find_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr,
74 Dwarf_Die *die_mem); 81 Dwarf_Die *die_mem);
75 82
83/* Walk on the instances of given DIE */
84extern int die_walk_instances(Dwarf_Die *in_die,
85 int (*callback)(Dwarf_Die *, void *), void *data);
86
76/* Walker on lines (Note: line number will not be sorted) */ 87/* Walker on lines (Note: line number will not be sorted) */
77typedef int (* line_walk_callback_t) (const char *fname, int lineno, 88typedef int (* line_walk_callback_t) (const char *fname, int lineno,
78 Dwarf_Addr addr, void *data); 89 Dwarf_Addr addr, void *data);
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index e03e7bc8205e..c12bd476c6f7 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -85,10 +85,19 @@ int perf_evlist__add_default(struct perf_evlist *evlist)
85 struct perf_evsel *evsel = perf_evsel__new(&attr, 0); 85 struct perf_evsel *evsel = perf_evsel__new(&attr, 0);
86 86
87 if (evsel == NULL) 87 if (evsel == NULL)
88 return -ENOMEM; 88 goto error;
89
90 /* use strdup() because free(evsel) assumes name is allocated */
91 evsel->name = strdup("cycles");
92 if (!evsel->name)
93 goto error_free;
89 94
90 perf_evlist__add(evlist, evsel); 95 perf_evlist__add(evlist, evsel);
91 return 0; 96 return 0;
97error_free:
98 perf_evsel__delete(evsel);
99error:
100 return -ENOMEM;
92} 101}
93 102
94void perf_evlist__disable(struct perf_evlist *evlist) 103void perf_evlist__disable(struct perf_evlist *evlist)
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index d4f3101773db..b6c1ad123ca9 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -726,7 +726,16 @@ static int perf_header__read_build_ids_abi_quirk(struct perf_header *header,
726 return -1; 726 return -1;
727 727
728 bev.header = old_bev.header; 728 bev.header = old_bev.header;
729 bev.pid = 0; 729
730 /*
731 * As the pid is the missing value, we need to fill
732 * it properly. The header.misc value give us nice hint.
733 */
734 bev.pid = HOST_KERNEL_ID;
735 if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER ||
736 bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL)
737 bev.pid = DEFAULT_GUEST_KERNEL_ID;
738
730 memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id)); 739 memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id));
731 __event_process_build_id(&bev, filename, session); 740 __event_process_build_id(&bev, filename, session);
732 741
diff --git a/tools/perf/util/include/linux/compiler.h b/tools/perf/util/include/linux/compiler.h
index 791f9dd27ebf..547628e97f3d 100644
--- a/tools/perf/util/include/linux/compiler.h
+++ b/tools/perf/util/include/linux/compiler.h
@@ -5,7 +5,9 @@
5#define __always_inline inline 5#define __always_inline inline
6#endif 6#endif
7#define __user 7#define __user
8#ifndef __attribute_const__
8#define __attribute_const__ 9#define __attribute_const__
10#endif
9 11
10#define __used __attribute__((__unused__)) 12#define __used __attribute__((__unused__))
11 13
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 4ea7e19f5251..928918b796b2 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -697,7 +697,11 @@ parse_raw_event(const char **strp, struct perf_event_attr *attr)
697 return EVT_FAILED; 697 return EVT_FAILED;
698 n = hex2u64(str + 1, &config); 698 n = hex2u64(str + 1, &config);
699 if (n > 0) { 699 if (n > 0) {
700 *strp = str + n + 1; 700 const char *end = str + n + 1;
701 if (*end != '\0' && *end != ',' && *end != ':')
702 return EVT_FAILED;
703
704 *strp = end;
701 attr->type = PERF_TYPE_RAW; 705 attr->type = PERF_TYPE_RAW;
702 attr->config = config; 706 attr->config = config;
703 return EVT_HANDLED; 707 return EVT_HANDLED;
@@ -1097,6 +1101,4 @@ void print_events(const char *event_glob)
1097 printf("\n"); 1101 printf("\n");
1098 1102
1099 print_tracepoint_events(NULL, NULL); 1103 print_tracepoint_events(NULL, NULL);
1100
1101 exit(129);
1102} 1104}
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index 3e44a3e36519..555fc3864b90 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -612,12 +612,12 @@ static int convert_variable(Dwarf_Die *vr_die, struct probe_finder *pf)
612 return ret; 612 return ret;
613} 613}
614 614
615/* Find a variable in a subprogram die */ 615/* Find a variable in a scope DIE */
616static int find_variable(Dwarf_Die *sp_die, struct probe_finder *pf) 616static int find_variable(Dwarf_Die *sc_die, struct probe_finder *pf)
617{ 617{
618 Dwarf_Die vr_die, *scopes; 618 Dwarf_Die vr_die;
619 char buf[32], *ptr; 619 char buf[32], *ptr;
620 int ret, nscopes; 620 int ret = 0;
621 621
622 if (!is_c_varname(pf->pvar->var)) { 622 if (!is_c_varname(pf->pvar->var)) {
623 /* Copy raw parameters */ 623 /* Copy raw parameters */
@@ -652,30 +652,16 @@ static int find_variable(Dwarf_Die *sp_die, struct probe_finder *pf)
652 if (pf->tvar->name == NULL) 652 if (pf->tvar->name == NULL)
653 return -ENOMEM; 653 return -ENOMEM;
654 654
655 pr_debug("Searching '%s' variable in context.\n", 655 pr_debug("Searching '%s' variable in context.\n", pf->pvar->var);
656 pf->pvar->var);
657 /* Search child die for local variables and parameters. */ 656 /* Search child die for local variables and parameters. */
658 if (die_find_variable_at(sp_die, pf->pvar->var, pf->addr, &vr_die)) 657 if (!die_find_variable_at(sc_die, pf->pvar->var, pf->addr, &vr_die)) {
659 ret = convert_variable(&vr_die, pf); 658 /* Search again in global variables */
660 else { 659 if (!die_find_variable_at(&pf->cu_die, pf->pvar->var, 0, &vr_die))
661 /* Search upper class */ 660 ret = -ENOENT;
662 nscopes = dwarf_getscopes_die(sp_die, &scopes);
663 while (nscopes-- > 1) {
664 pr_debug("Searching variables in %s\n",
665 dwarf_diename(&scopes[nscopes]));
666 /* We should check this scope, so give dummy address */
667 if (die_find_variable_at(&scopes[nscopes],
668 pf->pvar->var, 0,
669 &vr_die)) {
670 ret = convert_variable(&vr_die, pf);
671 goto found;
672 }
673 }
674 if (scopes)
675 free(scopes);
676 ret = -ENOENT;
677 } 661 }
678found: 662 if (ret == 0)
663 ret = convert_variable(&vr_die, pf);
664
679 if (ret < 0) 665 if (ret < 0)
680 pr_warning("Failed to find '%s' in this function.\n", 666 pr_warning("Failed to find '%s' in this function.\n",
681 pf->pvar->var); 667 pf->pvar->var);
@@ -718,26 +704,30 @@ static int convert_to_trace_point(Dwarf_Die *sp_die, Dwarf_Addr paddr,
718 return 0; 704 return 0;
719} 705}
720 706
721/* Call probe_finder callback with real subprogram DIE */ 707/* Call probe_finder callback with scope DIE */
722static int call_probe_finder(Dwarf_Die *sp_die, struct probe_finder *pf) 708static int call_probe_finder(Dwarf_Die *sc_die, struct probe_finder *pf)
723{ 709{
724 Dwarf_Die die_mem;
725 Dwarf_Attribute fb_attr; 710 Dwarf_Attribute fb_attr;
726 size_t nops; 711 size_t nops;
727 int ret; 712 int ret;
728 713
729 /* If no real subprogram, find a real one */ 714 if (!sc_die) {
730 if (!sp_die || dwarf_tag(sp_die) != DW_TAG_subprogram) { 715 pr_err("Caller must pass a scope DIE. Program error.\n");
731 sp_die = die_find_realfunc(&pf->cu_die, pf->addr, &die_mem); 716 return -EINVAL;
732 if (!sp_die) { 717 }
718
719 /* If not a real subprogram, find a real one */
720 if (dwarf_tag(sc_die) != DW_TAG_subprogram) {
721 if (!die_find_realfunc(&pf->cu_die, pf->addr, &pf->sp_die)) {
733 pr_warning("Failed to find probe point in any " 722 pr_warning("Failed to find probe point in any "
734 "functions.\n"); 723 "functions.\n");
735 return -ENOENT; 724 return -ENOENT;
736 } 725 }
737 } 726 } else
727 memcpy(&pf->sp_die, sc_die, sizeof(Dwarf_Die));
738 728
739 /* Get the frame base attribute/ops */ 729 /* Get the frame base attribute/ops from subprogram */
740 dwarf_attr(sp_die, DW_AT_frame_base, &fb_attr); 730 dwarf_attr(&pf->sp_die, DW_AT_frame_base, &fb_attr);
741 ret = dwarf_getlocation_addr(&fb_attr, pf->addr, &pf->fb_ops, &nops, 1); 731 ret = dwarf_getlocation_addr(&fb_attr, pf->addr, &pf->fb_ops, &nops, 1);
742 if (ret <= 0 || nops == 0) { 732 if (ret <= 0 || nops == 0) {
743 pf->fb_ops = NULL; 733 pf->fb_ops = NULL;
@@ -755,7 +745,7 @@ static int call_probe_finder(Dwarf_Die *sp_die, struct probe_finder *pf)
755 } 745 }
756 746
757 /* Call finder's callback handler */ 747 /* Call finder's callback handler */
758 ret = pf->callback(sp_die, pf); 748 ret = pf->callback(sc_die, pf);
759 749
760 /* *pf->fb_ops will be cached in libdw. Don't free it. */ 750 /* *pf->fb_ops will be cached in libdw. Don't free it. */
761 pf->fb_ops = NULL; 751 pf->fb_ops = NULL;
@@ -763,17 +753,82 @@ static int call_probe_finder(Dwarf_Die *sp_die, struct probe_finder *pf)
763 return ret; 753 return ret;
764} 754}
765 755
756struct find_scope_param {
757 const char *function;
758 const char *file;
759 int line;
760 int diff;
761 Dwarf_Die *die_mem;
762 bool found;
763};
764
765static int find_best_scope_cb(Dwarf_Die *fn_die, void *data)
766{
767 struct find_scope_param *fsp = data;
768 const char *file;
769 int lno;
770
771 /* Skip if declared file name does not match */
772 if (fsp->file) {
773 file = dwarf_decl_file(fn_die);
774 if (!file || strcmp(fsp->file, file) != 0)
775 return 0;
776 }
777 /* If the function name is given, that's what user expects */
778 if (fsp->function) {
779 if (die_compare_name(fn_die, fsp->function)) {
780 memcpy(fsp->die_mem, fn_die, sizeof(Dwarf_Die));
781 fsp->found = true;
782 return 1;
783 }
784 } else {
785 /* With the line number, find the nearest declared DIE */
786 dwarf_decl_line(fn_die, &lno);
787 if (lno < fsp->line && fsp->diff > fsp->line - lno) {
788 /* Keep a candidate and continue */
789 fsp->diff = fsp->line - lno;
790 memcpy(fsp->die_mem, fn_die, sizeof(Dwarf_Die));
791 fsp->found = true;
792 }
793 }
794 return 0;
795}
796
797/* Find an appropriate scope fits to given conditions */
798static Dwarf_Die *find_best_scope(struct probe_finder *pf, Dwarf_Die *die_mem)
799{
800 struct find_scope_param fsp = {
801 .function = pf->pev->point.function,
802 .file = pf->fname,
803 .line = pf->lno,
804 .diff = INT_MAX,
805 .die_mem = die_mem,
806 .found = false,
807 };
808
809 cu_walk_functions_at(&pf->cu_die, pf->addr, find_best_scope_cb, &fsp);
810
811 return fsp.found ? die_mem : NULL;
812}
813
766static int probe_point_line_walker(const char *fname, int lineno, 814static int probe_point_line_walker(const char *fname, int lineno,
767 Dwarf_Addr addr, void *data) 815 Dwarf_Addr addr, void *data)
768{ 816{
769 struct probe_finder *pf = data; 817 struct probe_finder *pf = data;
818 Dwarf_Die *sc_die, die_mem;
770 int ret; 819 int ret;
771 820
772 if (lineno != pf->lno || strtailcmp(fname, pf->fname) != 0) 821 if (lineno != pf->lno || strtailcmp(fname, pf->fname) != 0)
773 return 0; 822 return 0;
774 823
775 pf->addr = addr; 824 pf->addr = addr;
776 ret = call_probe_finder(NULL, pf); 825 sc_die = find_best_scope(pf, &die_mem);
826 if (!sc_die) {
827 pr_warning("Failed to find scope of probe point.\n");
828 return -ENOENT;
829 }
830
831 ret = call_probe_finder(sc_die, pf);
777 832
778 /* Continue if no error, because the line will be in inline function */ 833 /* Continue if no error, because the line will be in inline function */
779 return ret < 0 ? ret : 0; 834 return ret < 0 ? ret : 0;
@@ -827,6 +882,7 @@ static int probe_point_lazy_walker(const char *fname, int lineno,
827 Dwarf_Addr addr, void *data) 882 Dwarf_Addr addr, void *data)
828{ 883{
829 struct probe_finder *pf = data; 884 struct probe_finder *pf = data;
885 Dwarf_Die *sc_die, die_mem;
830 int ret; 886 int ret;
831 887
832 if (!line_list__has_line(&pf->lcache, lineno) || 888 if (!line_list__has_line(&pf->lcache, lineno) ||
@@ -836,7 +892,14 @@ static int probe_point_lazy_walker(const char *fname, int lineno,
836 pr_debug("Probe line found: line:%d addr:0x%llx\n", 892 pr_debug("Probe line found: line:%d addr:0x%llx\n",
837 lineno, (unsigned long long)addr); 893 lineno, (unsigned long long)addr);
838 pf->addr = addr; 894 pf->addr = addr;
839 ret = call_probe_finder(NULL, pf); 895 pf->lno = lineno;
896 sc_die = find_best_scope(pf, &die_mem);
897 if (!sc_die) {
898 pr_warning("Failed to find scope of probe point.\n");
899 return -ENOENT;
900 }
901
902 ret = call_probe_finder(sc_die, pf);
840 903
841 /* 904 /*
842 * Continue if no error, because the lazy pattern will match 905 * Continue if no error, because the lazy pattern will match
@@ -861,42 +924,39 @@ static int find_probe_point_lazy(Dwarf_Die *sp_die, struct probe_finder *pf)
861 return die_walk_lines(sp_die, probe_point_lazy_walker, pf); 924 return die_walk_lines(sp_die, probe_point_lazy_walker, pf);
862} 925}
863 926
864/* Callback parameter with return value */
865struct dwarf_callback_param {
866 void *data;
867 int retval;
868};
869
870static int probe_point_inline_cb(Dwarf_Die *in_die, void *data) 927static int probe_point_inline_cb(Dwarf_Die *in_die, void *data)
871{ 928{
872 struct dwarf_callback_param *param = data; 929 struct probe_finder *pf = data;
873 struct probe_finder *pf = param->data;
874 struct perf_probe_point *pp = &pf->pev->point; 930 struct perf_probe_point *pp = &pf->pev->point;
875 Dwarf_Addr addr; 931 Dwarf_Addr addr;
932 int ret;
876 933
877 if (pp->lazy_line) 934 if (pp->lazy_line)
878 param->retval = find_probe_point_lazy(in_die, pf); 935 ret = find_probe_point_lazy(in_die, pf);
879 else { 936 else {
880 /* Get probe address */ 937 /* Get probe address */
881 if (dwarf_entrypc(in_die, &addr) != 0) { 938 if (dwarf_entrypc(in_die, &addr) != 0) {
882 pr_warning("Failed to get entry address of %s.\n", 939 pr_warning("Failed to get entry address of %s.\n",
883 dwarf_diename(in_die)); 940 dwarf_diename(in_die));
884 param->retval = -ENOENT; 941 return -ENOENT;
885 return DWARF_CB_ABORT;
886 } 942 }
887 pf->addr = addr; 943 pf->addr = addr;
888 pf->addr += pp->offset; 944 pf->addr += pp->offset;
889 pr_debug("found inline addr: 0x%jx\n", 945 pr_debug("found inline addr: 0x%jx\n",
890 (uintmax_t)pf->addr); 946 (uintmax_t)pf->addr);
891 947
892 param->retval = call_probe_finder(in_die, pf); 948 ret = call_probe_finder(in_die, pf);
893 if (param->retval < 0)
894 return DWARF_CB_ABORT;
895 } 949 }
896 950
897 return DWARF_CB_OK; 951 return ret;
898} 952}
899 953
954/* Callback parameter with return value for libdw */
955struct dwarf_callback_param {
956 void *data;
957 int retval;
958};
959
900/* Search function from function name */ 960/* Search function from function name */
901static int probe_point_search_cb(Dwarf_Die *sp_die, void *data) 961static int probe_point_search_cb(Dwarf_Die *sp_die, void *data)
902{ 962{
@@ -933,14 +993,10 @@ static int probe_point_search_cb(Dwarf_Die *sp_die, void *data)
933 /* TODO: Check the address in this function */ 993 /* TODO: Check the address in this function */
934 param->retval = call_probe_finder(sp_die, pf); 994 param->retval = call_probe_finder(sp_die, pf);
935 } 995 }
936 } else { 996 } else
937 struct dwarf_callback_param _param = {.data = (void *)pf,
938 .retval = 0};
939 /* Inlined function: search instances */ 997 /* Inlined function: search instances */
940 dwarf_func_inline_instances(sp_die, probe_point_inline_cb, 998 param->retval = die_walk_instances(sp_die,
941 &_param); 999 probe_point_inline_cb, (void *)pf);
942 param->retval = _param.retval;
943 }
944 1000
945 return DWARF_CB_ABORT; /* Exit; no same symbol in this CU. */ 1001 return DWARF_CB_ABORT; /* Exit; no same symbol in this CU. */
946} 1002}
@@ -1060,7 +1116,7 @@ found:
1060} 1116}
1061 1117
1062/* Add a found probe point into trace event list */ 1118/* Add a found probe point into trace event list */
1063static int add_probe_trace_event(Dwarf_Die *sp_die, struct probe_finder *pf) 1119static int add_probe_trace_event(Dwarf_Die *sc_die, struct probe_finder *pf)
1064{ 1120{
1065 struct trace_event_finder *tf = 1121 struct trace_event_finder *tf =
1066 container_of(pf, struct trace_event_finder, pf); 1122 container_of(pf, struct trace_event_finder, pf);
@@ -1075,8 +1131,9 @@ static int add_probe_trace_event(Dwarf_Die *sp_die, struct probe_finder *pf)
1075 } 1131 }
1076 tev = &tf->tevs[tf->ntevs++]; 1132 tev = &tf->tevs[tf->ntevs++];
1077 1133
1078 ret = convert_to_trace_point(sp_die, pf->addr, pf->pev->point.retprobe, 1134 /* Trace point should be converted from subprogram DIE */
1079 &tev->point); 1135 ret = convert_to_trace_point(&pf->sp_die, pf->addr,
1136 pf->pev->point.retprobe, &tev->point);
1080 if (ret < 0) 1137 if (ret < 0)
1081 return ret; 1138 return ret;
1082 1139
@@ -1091,7 +1148,8 @@ static int add_probe_trace_event(Dwarf_Die *sp_die, struct probe_finder *pf)
1091 for (i = 0; i < pf->pev->nargs; i++) { 1148 for (i = 0; i < pf->pev->nargs; i++) {
1092 pf->pvar = &pf->pev->args[i]; 1149 pf->pvar = &pf->pev->args[i];
1093 pf->tvar = &tev->args[i]; 1150 pf->tvar = &tev->args[i];
1094 ret = find_variable(sp_die, pf); 1151 /* Variable should be found from scope DIE */
1152 ret = find_variable(sc_die, pf);
1095 if (ret != 0) 1153 if (ret != 0)
1096 return ret; 1154 return ret;
1097 } 1155 }
@@ -1159,13 +1217,13 @@ static int collect_variables_cb(Dwarf_Die *die_mem, void *data)
1159} 1217}
1160 1218
1161/* Add a found vars into available variables list */ 1219/* Add a found vars into available variables list */
1162static int add_available_vars(Dwarf_Die *sp_die, struct probe_finder *pf) 1220static int add_available_vars(Dwarf_Die *sc_die, struct probe_finder *pf)
1163{ 1221{
1164 struct available_var_finder *af = 1222 struct available_var_finder *af =
1165 container_of(pf, struct available_var_finder, pf); 1223 container_of(pf, struct available_var_finder, pf);
1166 struct variable_list *vl; 1224 struct variable_list *vl;
1167 Dwarf_Die die_mem, *scopes = NULL; 1225 Dwarf_Die die_mem;
1168 int ret, nscopes; 1226 int ret;
1169 1227
1170 /* Check number of tevs */ 1228 /* Check number of tevs */
1171 if (af->nvls == af->max_vls) { 1229 if (af->nvls == af->max_vls) {
@@ -1174,8 +1232,9 @@ static int add_available_vars(Dwarf_Die *sp_die, struct probe_finder *pf)
1174 } 1232 }
1175 vl = &af->vls[af->nvls++]; 1233 vl = &af->vls[af->nvls++];
1176 1234
1177 ret = convert_to_trace_point(sp_die, pf->addr, pf->pev->point.retprobe, 1235 /* Trace point should be converted from subprogram DIE */
1178 &vl->point); 1236 ret = convert_to_trace_point(&pf->sp_die, pf->addr,
1237 pf->pev->point.retprobe, &vl->point);
1179 if (ret < 0) 1238 if (ret < 0)
1180 return ret; 1239 return ret;
1181 1240
@@ -1187,19 +1246,14 @@ static int add_available_vars(Dwarf_Die *sp_die, struct probe_finder *pf)
1187 if (vl->vars == NULL) 1246 if (vl->vars == NULL)
1188 return -ENOMEM; 1247 return -ENOMEM;
1189 af->child = true; 1248 af->child = true;
1190 die_find_child(sp_die, collect_variables_cb, (void *)af, &die_mem); 1249 die_find_child(sc_die, collect_variables_cb, (void *)af, &die_mem);
1191 1250
1192 /* Find external variables */ 1251 /* Find external variables */
1193 if (!af->externs) 1252 if (!af->externs)
1194 goto out; 1253 goto out;
1195 /* Don't need to search child DIE for externs. */ 1254 /* Don't need to search child DIE for externs. */
1196 af->child = false; 1255 af->child = false;
1197 nscopes = dwarf_getscopes_die(sp_die, &scopes); 1256 die_find_child(&pf->cu_die, collect_variables_cb, (void *)af, &die_mem);
1198 while (nscopes-- > 1)
1199 die_find_child(&scopes[nscopes], collect_variables_cb,
1200 (void *)af, &die_mem);
1201 if (scopes)
1202 free(scopes);
1203 1257
1204out: 1258out:
1205 if (strlist__empty(vl->vars)) { 1259 if (strlist__empty(vl->vars)) {
@@ -1391,10 +1445,14 @@ static int find_line_range_by_line(Dwarf_Die *sp_die, struct line_finder *lf)
1391 1445
1392static int line_range_inline_cb(Dwarf_Die *in_die, void *data) 1446static int line_range_inline_cb(Dwarf_Die *in_die, void *data)
1393{ 1447{
1394 struct dwarf_callback_param *param = data; 1448 find_line_range_by_line(in_die, data);
1395 1449
1396 param->retval = find_line_range_by_line(in_die, param->data); 1450 /*
1397 return DWARF_CB_ABORT; /* No need to find other instances */ 1451 * We have to check all instances of inlined function, because
1452 * some execution paths can be optimized out depends on the
1453 * function argument of instances
1454 */
1455 return 0;
1398} 1456}
1399 1457
1400/* Search function from function name */ 1458/* Search function from function name */
@@ -1422,15 +1480,10 @@ static int line_range_search_cb(Dwarf_Die *sp_die, void *data)
1422 pr_debug("New line range: %d to %d\n", lf->lno_s, lf->lno_e); 1480 pr_debug("New line range: %d to %d\n", lf->lno_s, lf->lno_e);
1423 lr->start = lf->lno_s; 1481 lr->start = lf->lno_s;
1424 lr->end = lf->lno_e; 1482 lr->end = lf->lno_e;
1425 if (dwarf_func_inline(sp_die)) { 1483 if (dwarf_func_inline(sp_die))
1426 struct dwarf_callback_param _param; 1484 param->retval = die_walk_instances(sp_die,
1427 _param.data = (void *)lf; 1485 line_range_inline_cb, lf);
1428 _param.retval = 0; 1486 else
1429 dwarf_func_inline_instances(sp_die,
1430 line_range_inline_cb,
1431 &_param);
1432 param->retval = _param.retval;
1433 } else
1434 param->retval = find_line_range_by_line(sp_die, lf); 1487 param->retval = find_line_range_by_line(sp_die, lf);
1435 return DWARF_CB_ABORT; 1488 return DWARF_CB_ABORT;
1436 } 1489 }
diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h
index c478b42a2473..1132c8f0ce89 100644
--- a/tools/perf/util/probe-finder.h
+++ b/tools/perf/util/probe-finder.h
@@ -57,7 +57,7 @@ struct probe_finder {
57 struct perf_probe_event *pev; /* Target probe event */ 57 struct perf_probe_event *pev; /* Target probe event */
58 58
59 /* Callback when a probe point is found */ 59 /* Callback when a probe point is found */
60 int (*callback)(Dwarf_Die *sp_die, struct probe_finder *pf); 60 int (*callback)(Dwarf_Die *sc_die, struct probe_finder *pf);
61 61
62 /* For function searching */ 62 /* For function searching */
63 int lno; /* Line number */ 63 int lno; /* Line number */
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index a8b53714542a..469c0264ed29 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -1506,7 +1506,7 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
1506 if (strncmp(dso->name, "/tmp/perf-", 10) == 0) { 1506 if (strncmp(dso->name, "/tmp/perf-", 10) == 0) {
1507 struct stat st; 1507 struct stat st;
1508 1508
1509 if (stat(dso->name, &st) < 0) 1509 if (lstat(dso->name, &st) < 0)
1510 return -1; 1510 return -1;
1511 1511
1512 if (st.st_uid && (st.st_uid != geteuid())) { 1512 if (st.st_uid && (st.st_uid != geteuid())) {
@@ -2181,27 +2181,22 @@ size_t machines__fprintf_dsos_buildid(struct rb_root *machines,
2181 return ret; 2181 return ret;
2182} 2182}
2183 2183
2184struct dso *dso__new_kernel(const char *name) 2184static struct dso*
2185dso__kernel_findnew(struct machine *machine, const char *name,
2186 const char *short_name, int dso_type)
2185{ 2187{
2186 struct dso *dso = dso__new(name ?: "[kernel.kallsyms]"); 2188 /*
2187 2189 * The kernel dso could be created by build_id processing.
2188 if (dso != NULL) { 2190 */
2189 dso__set_short_name(dso, "[kernel]"); 2191 struct dso *dso = __dsos__findnew(&machine->kernel_dsos, name);
2190 dso->kernel = DSO_TYPE_KERNEL;
2191 }
2192
2193 return dso;
2194}
2195 2192
2196static struct dso *dso__new_guest_kernel(struct machine *machine, 2193 /*
2197 const char *name) 2194 * We need to run this in all cases, since during the build_id
2198{ 2195 * processing we had no idea this was the kernel dso.
2199 char bf[PATH_MAX]; 2196 */
2200 struct dso *dso = dso__new(name ?: machine__mmap_name(machine, bf,
2201 sizeof(bf)));
2202 if (dso != NULL) { 2197 if (dso != NULL) {
2203 dso__set_short_name(dso, "[guest.kernel]"); 2198 dso__set_short_name(dso, short_name);
2204 dso->kernel = DSO_TYPE_GUEST_KERNEL; 2199 dso->kernel = dso_type;
2205 } 2200 }
2206 2201
2207 return dso; 2202 return dso;
@@ -2219,24 +2214,36 @@ void dso__read_running_kernel_build_id(struct dso *dso, struct machine *machine)
2219 dso->has_build_id = true; 2214 dso->has_build_id = true;
2220} 2215}
2221 2216
2222static struct dso *machine__create_kernel(struct machine *machine) 2217static struct dso *machine__get_kernel(struct machine *machine)
2223{ 2218{
2224 const char *vmlinux_name = NULL; 2219 const char *vmlinux_name = NULL;
2225 struct dso *kernel; 2220 struct dso *kernel;
2226 2221
2227 if (machine__is_host(machine)) { 2222 if (machine__is_host(machine)) {
2228 vmlinux_name = symbol_conf.vmlinux_name; 2223 vmlinux_name = symbol_conf.vmlinux_name;
2229 kernel = dso__new_kernel(vmlinux_name); 2224 if (!vmlinux_name)
2225 vmlinux_name = "[kernel.kallsyms]";
2226
2227 kernel = dso__kernel_findnew(machine, vmlinux_name,
2228 "[kernel]",
2229 DSO_TYPE_KERNEL);
2230 } else { 2230 } else {
2231 char bf[PATH_MAX];
2232
2231 if (machine__is_default_guest(machine)) 2233 if (machine__is_default_guest(machine))
2232 vmlinux_name = symbol_conf.default_guest_vmlinux_name; 2234 vmlinux_name = symbol_conf.default_guest_vmlinux_name;
2233 kernel = dso__new_guest_kernel(machine, vmlinux_name); 2235 if (!vmlinux_name)
2236 vmlinux_name = machine__mmap_name(machine, bf,
2237 sizeof(bf));
2238
2239 kernel = dso__kernel_findnew(machine, vmlinux_name,
2240 "[guest.kernel]",
2241 DSO_TYPE_GUEST_KERNEL);
2234 } 2242 }
2235 2243
2236 if (kernel != NULL) { 2244 if (kernel != NULL && (!kernel->has_build_id))
2237 dso__read_running_kernel_build_id(kernel, machine); 2245 dso__read_running_kernel_build_id(kernel, machine);
2238 dsos__add(&machine->kernel_dsos, kernel); 2246
2239 }
2240 return kernel; 2247 return kernel;
2241} 2248}
2242 2249
@@ -2340,7 +2347,7 @@ void machine__destroy_kernel_maps(struct machine *machine)
2340 2347
2341int machine__create_kernel_maps(struct machine *machine) 2348int machine__create_kernel_maps(struct machine *machine)
2342{ 2349{
2343 struct dso *kernel = machine__create_kernel(machine); 2350 struct dso *kernel = machine__get_kernel(machine);
2344 2351
2345 if (kernel == NULL || 2352 if (kernel == NULL ||
2346 __machine__create_kernel_maps(machine, kernel) < 0) 2353 __machine__create_kernel_maps(machine, kernel) < 0)
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index 325ee36a9d29..4f377d92e75a 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -155,7 +155,6 @@ struct dso {
155}; 155};
156 156
157struct dso *dso__new(const char *name); 157struct dso *dso__new(const char *name);
158struct dso *dso__new_kernel(const char *name);
159void dso__delete(struct dso *dso); 158void dso__delete(struct dso *dso);
160 159
161int dso__name_len(const struct dso *dso); 160int dso__name_len(const struct dso *dso);
diff --git a/tools/perf/util/ui/browsers/top.c b/tools/perf/util/ui/browsers/top.c
index 5a06538532af..88403cf8396a 100644
--- a/tools/perf/util/ui/browsers/top.c
+++ b/tools/perf/util/ui/browsers/top.c
@@ -208,6 +208,5 @@ int perf_top__tui_browser(struct perf_top *top)
208 }, 208 },
209 }; 209 };
210 210
211 ui_helpline__push("Press <- or ESC to exit");
212 return perf_top_browser__run(&browser); 211 return perf_top_browser__run(&browser);
213} 212}
diff --git a/tools/power/cpupower/Makefile b/tools/power/cpupower/Makefile
index 94c2cf0a98b8..e8a03aceceb1 100644
--- a/tools/power/cpupower/Makefile
+++ b/tools/power/cpupower/Makefile
@@ -24,7 +24,7 @@
24 24
25# Set the following to `true' to make a unstripped, unoptimized 25# Set the following to `true' to make a unstripped, unoptimized
26# binary. Leave this set to `false' for production use. 26# binary. Leave this set to `false' for production use.
27DEBUG ?= false 27DEBUG ?= true
28 28
29# make the build silent. Set this to something else to make it noisy again. 29# make the build silent. Set this to something else to make it noisy again.
30V ?= false 30V ?= false
@@ -35,7 +35,7 @@ NLS ?= true
35 35
36# Set the following to 'true' to build/install the 36# Set the following to 'true' to build/install the
37# cpufreq-bench benchmarking tool 37# cpufreq-bench benchmarking tool
38CPUFRQ_BENCH ?= true 38CPUFREQ_BENCH ?= true
39 39
40# Prefix to the directories we're installing to 40# Prefix to the directories we're installing to
41DESTDIR ?= 41DESTDIR ?=
@@ -137,9 +137,10 @@ CFLAGS += -pipe
137ifeq ($(strip $(NLS)),true) 137ifeq ($(strip $(NLS)),true)
138 INSTALL_NLS += install-gmo 138 INSTALL_NLS += install-gmo
139 COMPILE_NLS += create-gmo 139 COMPILE_NLS += create-gmo
140 CFLAGS += -DNLS
140endif 141endif
141 142
142ifeq ($(strip $(CPUFRQ_BENCH)),true) 143ifeq ($(strip $(CPUFREQ_BENCH)),true)
143 INSTALL_BENCH += install-bench 144 INSTALL_BENCH += install-bench
144 COMPILE_BENCH += compile-bench 145 COMPILE_BENCH += compile-bench
145endif 146endif
diff --git a/tools/power/cpupower/debug/x86_64/Makefile b/tools/power/cpupower/debug/x86_64/Makefile
index dbf13998462a..3326217dd311 100644
--- a/tools/power/cpupower/debug/x86_64/Makefile
+++ b/tools/power/cpupower/debug/x86_64/Makefile
@@ -1,10 +1,10 @@
1default: all 1default: all
2 2
3centrino-decode: centrino-decode.c 3centrino-decode: ../i386/centrino-decode.c
4 $(CC) $(CFLAGS) -o centrino-decode centrino-decode.c 4 $(CC) $(CFLAGS) -o $@ $<
5 5
6powernow-k8-decode: powernow-k8-decode.c 6powernow-k8-decode: ../i386/powernow-k8-decode.c
7 $(CC) $(CFLAGS) -o powernow-k8-decode powernow-k8-decode.c 7 $(CC) $(CFLAGS) -o $@ $<
8 8
9all: centrino-decode powernow-k8-decode 9all: centrino-decode powernow-k8-decode
10 10
diff --git a/tools/power/cpupower/debug/x86_64/centrino-decode.c b/tools/power/cpupower/debug/x86_64/centrino-decode.c
deleted file mode 120000
index 26fb3f1d8fc7..000000000000
--- a/tools/power/cpupower/debug/x86_64/centrino-decode.c
+++ /dev/null
@@ -1 +0,0 @@
1../i386/centrino-decode.c \ No newline at end of file
diff --git a/tools/power/cpupower/debug/x86_64/powernow-k8-decode.c b/tools/power/cpupower/debug/x86_64/powernow-k8-decode.c
deleted file mode 120000
index eb30c79cf9df..000000000000
--- a/tools/power/cpupower/debug/x86_64/powernow-k8-decode.c
+++ /dev/null
@@ -1 +0,0 @@
1../i386/powernow-k8-decode.c \ No newline at end of file
diff --git a/tools/power/cpupower/man/cpupower-frequency-info.1 b/tools/power/cpupower/man/cpupower-frequency-info.1
index 3194811d58f5..bb60a8d1e45a 100644
--- a/tools/power/cpupower/man/cpupower-frequency-info.1
+++ b/tools/power/cpupower/man/cpupower-frequency-info.1
@@ -1,10 +1,10 @@
1.TH "cpufreq-info" "1" "0.1" "Mattia Dongili" "" 1.TH "cpupower-frequency-info" "1" "0.1" "Mattia Dongili" ""
2.SH "NAME" 2.SH "NAME"
3.LP 3.LP
4cpufreq\-info \- Utility to retrieve cpufreq kernel information 4cpupower frequency\-info \- Utility to retrieve cpufreq kernel information
5.SH "SYNTAX" 5.SH "SYNTAX"
6.LP 6.LP
7cpufreq\-info [\fIoptions\fP] 7cpupower [ \-c cpulist ] frequency\-info [\fIoptions\fP]
8.SH "DESCRIPTION" 8.SH "DESCRIPTION"
9.LP 9.LP
10A small tool which prints out cpufreq information helpful to developers and interested users. 10A small tool which prints out cpufreq information helpful to developers and interested users.
diff --git a/tools/power/cpupower/man/cpupower-frequency-set.1 b/tools/power/cpupower/man/cpupower-frequency-set.1
index 26e3e13eee3b..685f469093ad 100644
--- a/tools/power/cpupower/man/cpupower-frequency-set.1
+++ b/tools/power/cpupower/man/cpupower-frequency-set.1
@@ -1,13 +1,13 @@
1.TH "cpufreq-set" "1" "0.1" "Mattia Dongili" "" 1.TH "cpupower-freqency-set" "1" "0.1" "Mattia Dongili" ""
2.SH "NAME" 2.SH "NAME"
3.LP 3.LP
4cpufreq\-set \- A small tool which allows to modify cpufreq settings. 4cpupower frequency\-set \- A small tool which allows to modify cpufreq settings.
5.SH "SYNTAX" 5.SH "SYNTAX"
6.LP 6.LP
7cpufreq\-set [\fIoptions\fP] 7cpupower [ \-c cpu ] frequency\-set [\fIoptions\fP]
8.SH "DESCRIPTION" 8.SH "DESCRIPTION"
9.LP 9.LP
10cpufreq\-set allows you to modify cpufreq settings without having to type e.g. "/sys/devices/system/cpu/cpu0/cpufreq/scaling_set_speed" all the time. 10cpupower frequency\-set allows you to modify cpufreq settings without having to type e.g. "/sys/devices/system/cpu/cpu0/cpufreq/scaling_set_speed" all the time.
11.SH "OPTIONS" 11.SH "OPTIONS"
12.LP 12.LP
13.TP 13.TP
diff --git a/tools/power/cpupower/man/cpupower.1 b/tools/power/cpupower/man/cpupower.1
index 78c20feab85c..baf741d06e82 100644
--- a/tools/power/cpupower/man/cpupower.1
+++ b/tools/power/cpupower/man/cpupower.1
@@ -3,7 +3,7 @@
3cpupower \- Shows and sets processor power related values 3cpupower \- Shows and sets processor power related values
4.SH SYNOPSIS 4.SH SYNOPSIS
5.ft B 5.ft B
6.B cpupower [ \-c cpulist ] subcommand [ARGS] 6.B cpupower [ \-c cpulist ] <command> [ARGS]
7 7
8.B cpupower \-v|\-\-version 8.B cpupower \-v|\-\-version
9 9
@@ -13,24 +13,24 @@ cpupower \- Shows and sets processor power related values
13\fBcpupower \fP is a collection of tools to examine and tune power saving 13\fBcpupower \fP is a collection of tools to examine and tune power saving
14related features of your processor. 14related features of your processor.
15 15
16The manpages of the subcommands (cpupower\-<subcommand>(1)) provide detailed 16The manpages of the commands (cpupower\-<command>(1)) provide detailed
17descriptions of supported features. Run \fBcpupower help\fP to get an overview 17descriptions of supported features. Run \fBcpupower help\fP to get an overview
18of supported subcommands. 18of supported commands.
19 19
20.SH Options 20.SH Options
21.PP 21.PP
22\-\-help, \-h 22\-\-help, \-h
23.RS 4 23.RS 4
24Shows supported subcommands and general usage. 24Shows supported commands and general usage.
25.RE 25.RE
26.PP 26.PP
27\-\-cpu cpulist, \-c cpulist 27\-\-cpu cpulist, \-c cpulist
28.RS 4 28.RS 4
29Only show or set values for specific cores. 29Only show or set values for specific cores.
30This option is not supported by all subcommands, details can be found in the 30This option is not supported by all commands, details can be found in the
31manpages of the subcommands. 31manpages of the commands.
32 32
33Some subcommands access all cores (typically the *\-set commands), some only 33Some commands access all cores (typically the *\-set commands), some only
34the first core (typically the *\-info commands) by default. 34the first core (typically the *\-info commands) by default.
35 35
36The syntax for <cpulist> is based on how the kernel exports CPU bitmasks via 36The syntax for <cpulist> is based on how the kernel exports CPU bitmasks via
diff --git a/tools/power/cpupower/utils/builtin.h b/tools/power/cpupower/utils/builtin.h
index c870ffba5219..c10496fbe3c6 100644
--- a/tools/power/cpupower/utils/builtin.h
+++ b/tools/power/cpupower/utils/builtin.h
@@ -8,11 +8,4 @@ extern int cmd_freq_info(int argc, const char **argv);
8extern int cmd_idle_info(int argc, const char **argv); 8extern int cmd_idle_info(int argc, const char **argv);
9extern int cmd_monitor(int argc, const char **argv); 9extern int cmd_monitor(int argc, const char **argv);
10 10
11extern void set_help(void);
12extern void info_help(void);
13extern void freq_set_help(void);
14extern void freq_info_help(void);
15extern void idle_info_help(void);
16extern void monitor_help(void);
17
18#endif 11#endif
diff --git a/tools/power/cpupower/utils/cpufreq-info.c b/tools/power/cpupower/utils/cpufreq-info.c
index 5a1d25f056b3..28953c9a7bd5 100644
--- a/tools/power/cpupower/utils/cpufreq-info.c
+++ b/tools/power/cpupower/utils/cpufreq-info.c
@@ -510,37 +510,6 @@ static int get_latency(unsigned int cpu, unsigned int human)
510 return 0; 510 return 0;
511} 511}
512 512
513void freq_info_help(void)
514{
515 printf(_("Usage: cpupower freqinfo [options]\n"));
516 printf(_("Options:\n"));
517 printf(_(" -e, --debug Prints out debug information [default]\n"));
518 printf(_(" -f, --freq Get frequency the CPU currently runs at, according\n"
519 " to the cpufreq core *\n"));
520 printf(_(" -w, --hwfreq Get frequency the CPU currently runs at, by reading\n"
521 " it from hardware (only available to root) *\n"));
522 printf(_(" -l, --hwlimits Determine the minimum and maximum CPU frequency allowed *\n"));
523 printf(_(" -d, --driver Determines the used cpufreq kernel driver *\n"));
524 printf(_(" -p, --policy Gets the currently used cpufreq policy *\n"));
525 printf(_(" -g, --governors Determines available cpufreq governors *\n"));
526 printf(_(" -r, --related-cpus Determines which CPUs run at the same hardware frequency *\n"));
527 printf(_(" -a, --affected-cpus Determines which CPUs need to have their frequency\n"
528 " coordinated by software *\n"));
529 printf(_(" -s, --stats Shows cpufreq statistics if available\n"));
530 printf(_(" -y, --latency Determines the maximum latency on CPU frequency changes *\n"));
531 printf(_(" -b, --boost Checks for turbo or boost modes *\n"));
532 printf(_(" -o, --proc Prints out information like provided by the /proc/cpufreq\n"
533 " interface in 2.4. and early 2.6. kernels\n"));
534 printf(_(" -m, --human human-readable output for the -f, -w, -s and -y parameters\n"));
535 printf(_(" -h, --help Prints out this screen\n"));
536
537 printf("\n");
538 printf(_("If no argument is given, full output about\n"
539 "cpufreq is printed which is useful e.g. for reporting bugs.\n\n"));
540 printf(_("By default info of CPU 0 is shown which can be overridden\n"
541 "with the cpupower --cpu main command option.\n"));
542}
543
544static struct option info_opts[] = { 513static struct option info_opts[] = {
545 { .name = "debug", .has_arg = no_argument, .flag = NULL, .val = 'e'}, 514 { .name = "debug", .has_arg = no_argument, .flag = NULL, .val = 'e'},
546 { .name = "boost", .has_arg = no_argument, .flag = NULL, .val = 'b'}, 515 { .name = "boost", .has_arg = no_argument, .flag = NULL, .val = 'b'},
@@ -556,7 +525,6 @@ static struct option info_opts[] = {
556 { .name = "latency", .has_arg = no_argument, .flag = NULL, .val = 'y'}, 525 { .name = "latency", .has_arg = no_argument, .flag = NULL, .val = 'y'},
557 { .name = "proc", .has_arg = no_argument, .flag = NULL, .val = 'o'}, 526 { .name = "proc", .has_arg = no_argument, .flag = NULL, .val = 'o'},
558 { .name = "human", .has_arg = no_argument, .flag = NULL, .val = 'm'}, 527 { .name = "human", .has_arg = no_argument, .flag = NULL, .val = 'm'},
559 { .name = "help", .has_arg = no_argument, .flag = NULL, .val = 'h'},
560 { }, 528 { },
561}; 529};
562 530
@@ -570,16 +538,12 @@ int cmd_freq_info(int argc, char **argv)
570 int output_param = 0; 538 int output_param = 0;
571 539
572 do { 540 do {
573 ret = getopt_long(argc, argv, "hoefwldpgrasmyb", info_opts, NULL); 541 ret = getopt_long(argc, argv, "oefwldpgrasmyb", info_opts, NULL);
574 switch (ret) { 542 switch (ret) {
575 case '?': 543 case '?':
576 output_param = '?'; 544 output_param = '?';
577 cont = 0; 545 cont = 0;
578 break; 546 break;
579 case 'h':
580 output_param = 'h';
581 cont = 0;
582 break;
583 case -1: 547 case -1:
584 cont = 0; 548 cont = 0;
585 break; 549 break;
@@ -642,11 +606,7 @@ int cmd_freq_info(int argc, char **argv)
642 return -EINVAL; 606 return -EINVAL;
643 case '?': 607 case '?':
644 printf(_("invalid or unknown argument\n")); 608 printf(_("invalid or unknown argument\n"));
645 freq_info_help();
646 return -EINVAL; 609 return -EINVAL;
647 case 'h':
648 freq_info_help();
649 return EXIT_SUCCESS;
650 case 'o': 610 case 'o':
651 proc_cpufreq_output(); 611 proc_cpufreq_output();
652 return EXIT_SUCCESS; 612 return EXIT_SUCCESS;
diff --git a/tools/power/cpupower/utils/cpufreq-set.c b/tools/power/cpupower/utils/cpufreq-set.c
index 5f783622bf31..dd1539eb8c63 100644
--- a/tools/power/cpupower/utils/cpufreq-set.c
+++ b/tools/power/cpupower/utils/cpufreq-set.c
@@ -20,34 +20,11 @@
20 20
21#define NORM_FREQ_LEN 32 21#define NORM_FREQ_LEN 32
22 22
23void freq_set_help(void)
24{
25 printf(_("Usage: cpupower frequency-set [options]\n"));
26 printf(_("Options:\n"));
27 printf(_(" -d FREQ, --min FREQ new minimum CPU frequency the governor may select\n"));
28 printf(_(" -u FREQ, --max FREQ new maximum CPU frequency the governor may select\n"));
29 printf(_(" -g GOV, --governor GOV new cpufreq governor\n"));
30 printf(_(" -f FREQ, --freq FREQ specific frequency to be set. Requires userspace\n"
31 " governor to be available and loaded\n"));
32 printf(_(" -r, --related Switches all hardware-related CPUs\n"));
33 printf(_(" -h, --help Prints out this screen\n"));
34 printf("\n");
35 printf(_("Notes:\n"
36 "1. Omitting the -c or --cpu argument is equivalent to setting it to \"all\"\n"));
37 printf(_("2. The -f FREQ, --freq FREQ parameter cannot be combined with any other parameter\n"
38 " except the -c CPU, --cpu CPU parameter\n"
39 "3. FREQuencies can be passed in Hz, kHz (default), MHz, GHz, or THz\n"
40 " by postfixing the value with the wanted unit name, without any space\n"
41 " (FREQuency in kHz =^ Hz * 0.001 =^ MHz * 1000 =^ GHz * 1000000).\n"));
42
43}
44
45static struct option set_opts[] = { 23static struct option set_opts[] = {
46 { .name = "min", .has_arg = required_argument, .flag = NULL, .val = 'd'}, 24 { .name = "min", .has_arg = required_argument, .flag = NULL, .val = 'd'},
47 { .name = "max", .has_arg = required_argument, .flag = NULL, .val = 'u'}, 25 { .name = "max", .has_arg = required_argument, .flag = NULL, .val = 'u'},
48 { .name = "governor", .has_arg = required_argument, .flag = NULL, .val = 'g'}, 26 { .name = "governor", .has_arg = required_argument, .flag = NULL, .val = 'g'},
49 { .name = "freq", .has_arg = required_argument, .flag = NULL, .val = 'f'}, 27 { .name = "freq", .has_arg = required_argument, .flag = NULL, .val = 'f'},
50 { .name = "help", .has_arg = no_argument, .flag = NULL, .val = 'h'},
51 { .name = "related", .has_arg = no_argument, .flag = NULL, .val='r'}, 28 { .name = "related", .has_arg = no_argument, .flag = NULL, .val='r'},
52 { }, 29 { },
53}; 30};
@@ -80,7 +57,6 @@ const struct freq_units def_units[] = {
80static void print_unknown_arg(void) 57static void print_unknown_arg(void)
81{ 58{
82 printf(_("invalid or unknown argument\n")); 59 printf(_("invalid or unknown argument\n"));
83 freq_set_help();
84} 60}
85 61
86static unsigned long string_to_frequency(const char *str) 62static unsigned long string_to_frequency(const char *str)
@@ -231,14 +207,11 @@ int cmd_freq_set(int argc, char **argv)
231 207
232 /* parameter parsing */ 208 /* parameter parsing */
233 do { 209 do {
234 ret = getopt_long(argc, argv, "d:u:g:f:hr", set_opts, NULL); 210 ret = getopt_long(argc, argv, "d:u:g:f:r", set_opts, NULL);
235 switch (ret) { 211 switch (ret) {
236 case '?': 212 case '?':
237 print_unknown_arg(); 213 print_unknown_arg();
238 return -EINVAL; 214 return -EINVAL;
239 case 'h':
240 freq_set_help();
241 return 0;
242 case -1: 215 case -1:
243 cont = 0; 216 cont = 0;
244 break; 217 break;
diff --git a/tools/power/cpupower/utils/cpuidle-info.c b/tools/power/cpupower/utils/cpuidle-info.c
index 70da3574f1e9..b028267c1376 100644
--- a/tools/power/cpupower/utils/cpuidle-info.c
+++ b/tools/power/cpupower/utils/cpuidle-info.c
@@ -139,30 +139,14 @@ static void proc_cpuidle_cpu_output(unsigned int cpu)
139 } 139 }
140} 140}
141 141
142/* --freq / -f */
143
144void idle_info_help(void)
145{
146 printf(_ ("Usage: cpupower idleinfo [options]\n"));
147 printf(_ ("Options:\n"));
148 printf(_ (" -s, --silent Only show general C-state information\n"));
149 printf(_ (" -o, --proc Prints out information like provided by the /proc/acpi/processor/*/power\n"
150 " interface in older kernels\n"));
151 printf(_ (" -h, --help Prints out this screen\n"));
152
153 printf("\n");
154}
155
156static struct option info_opts[] = { 142static struct option info_opts[] = {
157 { .name = "silent", .has_arg = no_argument, .flag = NULL, .val = 's'}, 143 { .name = "silent", .has_arg = no_argument, .flag = NULL, .val = 's'},
158 { .name = "proc", .has_arg = no_argument, .flag = NULL, .val = 'o'}, 144 { .name = "proc", .has_arg = no_argument, .flag = NULL, .val = 'o'},
159 { .name = "help", .has_arg = no_argument, .flag = NULL, .val = 'h'},
160 { }, 145 { },
161}; 146};
162 147
163static inline void cpuidle_exit(int fail) 148static inline void cpuidle_exit(int fail)
164{ 149{
165 idle_info_help();
166 exit(EXIT_FAILURE); 150 exit(EXIT_FAILURE);
167} 151}
168 152
@@ -174,7 +158,7 @@ int cmd_idle_info(int argc, char **argv)
174 unsigned int cpu = 0; 158 unsigned int cpu = 0;
175 159
176 do { 160 do {
177 ret = getopt_long(argc, argv, "hos", info_opts, NULL); 161 ret = getopt_long(argc, argv, "os", info_opts, NULL);
178 if (ret == -1) 162 if (ret == -1)
179 break; 163 break;
180 switch (ret) { 164 switch (ret) {
@@ -182,10 +166,6 @@ int cmd_idle_info(int argc, char **argv)
182 output_param = '?'; 166 output_param = '?';
183 cont = 0; 167 cont = 0;
184 break; 168 break;
185 case 'h':
186 output_param = 'h';
187 cont = 0;
188 break;
189 case 's': 169 case 's':
190 verbose = 0; 170 verbose = 0;
191 break; 171 break;
@@ -211,8 +191,6 @@ int cmd_idle_info(int argc, char **argv)
211 case '?': 191 case '?':
212 printf(_("invalid or unknown argument\n")); 192 printf(_("invalid or unknown argument\n"));
213 cpuidle_exit(EXIT_FAILURE); 193 cpuidle_exit(EXIT_FAILURE);
214 case 'h':
215 cpuidle_exit(EXIT_SUCCESS);
216 } 194 }
217 195
218 /* Default is: show output of CPU 0 only */ 196 /* Default is: show output of CPU 0 only */
diff --git a/tools/power/cpupower/utils/cpupower-info.c b/tools/power/cpupower/utils/cpupower-info.c
index 85253cb7600e..3f68632c28c7 100644
--- a/tools/power/cpupower/utils/cpupower-info.c
+++ b/tools/power/cpupower/utils/cpupower-info.c
@@ -16,31 +16,16 @@
16#include "helpers/helpers.h" 16#include "helpers/helpers.h"
17#include "helpers/sysfs.h" 17#include "helpers/sysfs.h"
18 18
19void info_help(void)
20{
21 printf(_("Usage: cpupower info [ -b ] [ -m ] [ -s ]\n"));
22 printf(_("Options:\n"));
23 printf(_(" -b, --perf-bias Gets CPU's power vs performance policy on some\n"
24 " Intel models [0-15], see manpage for details\n"));
25 printf(_(" -m, --sched-mc Gets the kernel's multi core scheduler policy.\n"));
26 printf(_(" -s, --sched-smt Gets the kernel's thread sibling scheduler policy.\n"));
27 printf(_(" -h, --help Prints out this screen\n"));
28 printf(_("\nPassing no option will show all info, by default only on core 0\n"));
29 printf("\n");
30}
31
32static struct option set_opts[] = { 19static struct option set_opts[] = {
33 { .name = "perf-bias", .has_arg = optional_argument, .flag = NULL, .val = 'b'}, 20 { .name = "perf-bias", .has_arg = optional_argument, .flag = NULL, .val = 'b'},
34 { .name = "sched-mc", .has_arg = optional_argument, .flag = NULL, .val = 'm'}, 21 { .name = "sched-mc", .has_arg = optional_argument, .flag = NULL, .val = 'm'},
35 { .name = "sched-smt", .has_arg = optional_argument, .flag = NULL, .val = 's'}, 22 { .name = "sched-smt", .has_arg = optional_argument, .flag = NULL, .val = 's'},
36 { .name = "help", .has_arg = no_argument, .flag = NULL, .val = 'h'},
37 { }, 23 { },
38}; 24};
39 25
40static void print_wrong_arg_exit(void) 26static void print_wrong_arg_exit(void)
41{ 27{
42 printf(_("invalid or unknown argument\n")); 28 printf(_("invalid or unknown argument\n"));
43 info_help();
44 exit(EXIT_FAILURE); 29 exit(EXIT_FAILURE);
45} 30}
46 31
@@ -64,11 +49,8 @@ int cmd_info(int argc, char **argv)
64 textdomain(PACKAGE); 49 textdomain(PACKAGE);
65 50
66 /* parameter parsing */ 51 /* parameter parsing */
67 while ((ret = getopt_long(argc, argv, "msbh", set_opts, NULL)) != -1) { 52 while ((ret = getopt_long(argc, argv, "msb", set_opts, NULL)) != -1) {
68 switch (ret) { 53 switch (ret) {
69 case 'h':
70 info_help();
71 return 0;
72 case 'b': 54 case 'b':
73 if (params.perf_bias) 55 if (params.perf_bias)
74 print_wrong_arg_exit(); 56 print_wrong_arg_exit();
diff --git a/tools/power/cpupower/utils/cpupower-set.c b/tools/power/cpupower/utils/cpupower-set.c
index bc1b391e46f0..dc4de3762111 100644
--- a/tools/power/cpupower/utils/cpupower-set.c
+++ b/tools/power/cpupower/utils/cpupower-set.c
@@ -17,30 +17,16 @@
17#include "helpers/sysfs.h" 17#include "helpers/sysfs.h"
18#include "helpers/bitmask.h" 18#include "helpers/bitmask.h"
19 19
20void set_help(void)
21{
22 printf(_("Usage: cpupower set [ -b val ] [ -m val ] [ -s val ]\n"));
23 printf(_("Options:\n"));
24 printf(_(" -b, --perf-bias [VAL] Sets CPU's power vs performance policy on some\n"
25 " Intel models [0-15], see manpage for details\n"));
26 printf(_(" -m, --sched-mc [VAL] Sets the kernel's multi core scheduler policy.\n"));
27 printf(_(" -s, --sched-smt [VAL] Sets the kernel's thread sibling scheduler policy.\n"));
28 printf(_(" -h, --help Prints out this screen\n"));
29 printf("\n");
30}
31
32static struct option set_opts[] = { 20static struct option set_opts[] = {
33 { .name = "perf-bias", .has_arg = optional_argument, .flag = NULL, .val = 'b'}, 21 { .name = "perf-bias", .has_arg = optional_argument, .flag = NULL, .val = 'b'},
34 { .name = "sched-mc", .has_arg = optional_argument, .flag = NULL, .val = 'm'}, 22 { .name = "sched-mc", .has_arg = optional_argument, .flag = NULL, .val = 'm'},
35 { .name = "sched-smt", .has_arg = optional_argument, .flag = NULL, .val = 's'}, 23 { .name = "sched-smt", .has_arg = optional_argument, .flag = NULL, .val = 's'},
36 { .name = "help", .has_arg = no_argument, .flag = NULL, .val = 'h'},
37 { }, 24 { },
38}; 25};
39 26
40static void print_wrong_arg_exit(void) 27static void print_wrong_arg_exit(void)
41{ 28{
42 printf(_("invalid or unknown argument\n")); 29 printf(_("invalid or unknown argument\n"));
43 set_help();
44 exit(EXIT_FAILURE); 30 exit(EXIT_FAILURE);
45} 31}
46 32
@@ -66,12 +52,9 @@ int cmd_set(int argc, char **argv)
66 52
67 params.params = 0; 53 params.params = 0;
68 /* parameter parsing */ 54 /* parameter parsing */
69 while ((ret = getopt_long(argc, argv, "m:s:b:h", 55 while ((ret = getopt_long(argc, argv, "m:s:b:",
70 set_opts, NULL)) != -1) { 56 set_opts, NULL)) != -1) {
71 switch (ret) { 57 switch (ret) {
72 case 'h':
73 set_help();
74 return 0;
75 case 'b': 58 case 'b':
76 if (params.perf_bias) 59 if (params.perf_bias)
77 print_wrong_arg_exit(); 60 print_wrong_arg_exit();
@@ -110,10 +93,8 @@ int cmd_set(int argc, char **argv)
110 } 93 }
111 }; 94 };
112 95
113 if (!params.params) { 96 if (!params.params)
114 set_help(); 97 print_wrong_arg_exit();
115 return -EINVAL;
116 }
117 98
118 if (params.sched_mc) { 99 if (params.sched_mc) {
119 ret = sysfs_set_sched("mc", sched_mc); 100 ret = sysfs_set_sched("mc", sched_mc);
diff --git a/tools/power/cpupower/utils/cpupower.c b/tools/power/cpupower/utils/cpupower.c
index 5844ae0f786f..52bee591c1c5 100644
--- a/tools/power/cpupower/utils/cpupower.c
+++ b/tools/power/cpupower/utils/cpupower.c
@@ -11,6 +11,7 @@
11#include <stdlib.h> 11#include <stdlib.h>
12#include <string.h> 12#include <string.h>
13#include <unistd.h> 13#include <unistd.h>
14#include <errno.h>
14 15
15#include "builtin.h" 16#include "builtin.h"
16#include "helpers/helpers.h" 17#include "helpers/helpers.h"
@@ -19,13 +20,12 @@
19struct cmd_struct { 20struct cmd_struct {
20 const char *cmd; 21 const char *cmd;
21 int (*main)(int, const char **); 22 int (*main)(int, const char **);
22 void (*usage)(void);
23 int needs_root; 23 int needs_root;
24}; 24};
25 25
26#define ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0])) 26#define ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0]))
27 27
28int cmd_help(int argc, const char **argv); 28static int cmd_help(int argc, const char **argv);
29 29
30/* Global cpu_info object available for all binaries 30/* Global cpu_info object available for all binaries
31 * Info only retrieved from CPU 0 31 * Info only retrieved from CPU 0
@@ -44,55 +44,66 @@ int be_verbose;
44static void print_help(void); 44static void print_help(void);
45 45
46static struct cmd_struct commands[] = { 46static struct cmd_struct commands[] = {
47 { "frequency-info", cmd_freq_info, freq_info_help, 0 }, 47 { "frequency-info", cmd_freq_info, 0 },
48 { "frequency-set", cmd_freq_set, freq_set_help, 1 }, 48 { "frequency-set", cmd_freq_set, 1 },
49 { "idle-info", cmd_idle_info, idle_info_help, 0 }, 49 { "idle-info", cmd_idle_info, 0 },
50 { "set", cmd_set, set_help, 1 }, 50 { "set", cmd_set, 1 },
51 { "info", cmd_info, info_help, 0 }, 51 { "info", cmd_info, 0 },
52 { "monitor", cmd_monitor, monitor_help, 0 }, 52 { "monitor", cmd_monitor, 0 },
53 { "help", cmd_help, print_help, 0 }, 53 { "help", cmd_help, 0 },
54 /* { "bench", cmd_bench, NULL, 1 }, */ 54 /* { "bench", cmd_bench, 1 }, */
55}; 55};
56 56
57int cmd_help(int argc, const char **argv)
58{
59 unsigned int i;
60
61 if (argc > 1) {
62 for (i = 0; i < ARRAY_SIZE(commands); i++) {
63 struct cmd_struct *p = commands + i;
64 if (strcmp(p->cmd, argv[1]))
65 continue;
66 if (p->usage) {
67 p->usage();
68 return EXIT_SUCCESS;
69 }
70 }
71 }
72 print_help();
73 if (argc == 1)
74 return EXIT_SUCCESS; /* cpupower help */
75 return EXIT_FAILURE;
76}
77
78static void print_help(void) 57static void print_help(void)
79{ 58{
80 unsigned int i; 59 unsigned int i;
81 60
82#ifdef DEBUG 61#ifdef DEBUG
83 printf(_("cpupower [ -d ][ -c cpulist ] subcommand [ARGS]\n")); 62 printf(_("Usage:\tcpupower [-d|--debug] [-c|--cpu cpulist ] <command> [<args>]\n"));
84 printf(_(" -d, --debug May increase output (stderr) on some subcommands\n"));
85#else 63#else
86 printf(_("cpupower [ -c cpulist ] subcommand [ARGS]\n")); 64 printf(_("Usage:\tcpupower [-c|--cpu cpulist ] <command> [<args>]\n"));
87#endif 65#endif
88 printf(_("cpupower --version\n")); 66 printf(_("Supported commands are:\n"));
89 printf(_("Supported subcommands are:\n"));
90 for (i = 0; i < ARRAY_SIZE(commands); i++) 67 for (i = 0; i < ARRAY_SIZE(commands); i++)
91 printf("\t%s\n", commands[i].cmd); 68 printf("\t%s\n", commands[i].cmd);
92 printf(_("\nSome subcommands can make use of the -c cpulist option.\n")); 69 printf(_("\nNot all commands can make use of the -c cpulist option.\n"));
93 printf(_("Look at the general cpupower manpage how to use it\n")); 70 printf(_("\nUse 'cpupower help <command>' for getting help for above commands.\n"));
94 printf(_("and read up the subcommand's manpage whether it is supported.\n")); 71}
95 printf(_("\nUse cpupower help subcommand for getting help for above subcommands.\n")); 72
73static int print_man_page(const char *subpage)
74{
75 int len;
76 char *page;
77
78 len = 10; /* enough for "cpupower-" */
79 if (subpage != NULL)
80 len += strlen(subpage);
81
82 page = malloc(len);
83 if (!page)
84 return -ENOMEM;
85
86 sprintf(page, "cpupower");
87 if ((subpage != NULL) && strcmp(subpage, "help")) {
88 strcat(page, "-");
89 strcat(page, subpage);
90 }
91
92 execlp("man", "man", page, NULL);
93
94 /* should not be reached */
95 return -EINVAL;
96}
97
98static int cmd_help(int argc, const char **argv)
99{
100 if (argc > 1) {
101 print_man_page(argv[1]); /* exits within execlp() */
102 return EXIT_FAILURE;
103 }
104
105 print_help();
106 return EXIT_SUCCESS;
96} 107}
97 108
98static void print_version(void) 109static void print_version(void)
diff --git a/tools/power/cpupower/utils/helpers/helpers.h b/tools/power/cpupower/utils/helpers/helpers.h
index 592ee362b877..2747e738efb0 100644
--- a/tools/power/cpupower/utils/helpers/helpers.h
+++ b/tools/power/cpupower/utils/helpers/helpers.h
@@ -16,11 +16,20 @@
16#include "helpers/bitmask.h" 16#include "helpers/bitmask.h"
17 17
18/* Internationalization ****************************/ 18/* Internationalization ****************************/
19#ifdef NLS
20
19#define _(String) gettext(String) 21#define _(String) gettext(String)
20#ifndef gettext_noop 22#ifndef gettext_noop
21#define gettext_noop(String) String 23#define gettext_noop(String) String
22#endif 24#endif
23#define N_(String) gettext_noop(String) 25#define N_(String) gettext_noop(String)
26
27#else /* !NLS */
28
29#define _(String) String
30#define N_(String) String
31
32#endif
24/* Internationalization ****************************/ 33/* Internationalization ****************************/
25 34
26extern int run_as_root; 35extern int run_as_root;
@@ -96,6 +105,9 @@ struct cpupower_topology {
96 int pkg; 105 int pkg;
97 int core; 106 int core;
98 int cpu; 107 int cpu;
108
109 /* flags */
110 unsigned int is_online:1;
99 } *core_info; 111 } *core_info;
100}; 112};
101 113
diff --git a/tools/power/cpupower/utils/helpers/sysfs.c b/tools/power/cpupower/utils/helpers/sysfs.c
index 55e2466674c6..c6343024a611 100644
--- a/tools/power/cpupower/utils/helpers/sysfs.c
+++ b/tools/power/cpupower/utils/helpers/sysfs.c
@@ -56,6 +56,56 @@ static unsigned int sysfs_write_file(const char *path,
56 return (unsigned int) numwrite; 56 return (unsigned int) numwrite;
57} 57}
58 58
59/*
60 * Detect whether a CPU is online
61 *
62 * Returns:
63 * 1 -> if CPU is online
64 * 0 -> if CPU is offline
65 * negative errno values in error case
66 */
67int sysfs_is_cpu_online(unsigned int cpu)
68{
69 char path[SYSFS_PATH_MAX];
70 int fd;
71 ssize_t numread;
72 unsigned long long value;
73 char linebuf[MAX_LINE_LEN];
74 char *endp;
75 struct stat statbuf;
76
77 snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u", cpu);
78
79 if (stat(path, &statbuf) != 0)
80 return 0;
81
82 /*
83 * kernel without CONFIG_HOTPLUG_CPU
84 * -> cpuX directory exists, but not cpuX/online file
85 */
86 snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/online", cpu);
87 if (stat(path, &statbuf) != 0)
88 return 1;
89
90 fd = open(path, O_RDONLY);
91 if (fd == -1)
92 return -errno;
93
94 numread = read(fd, linebuf, MAX_LINE_LEN - 1);
95 if (numread < 1) {
96 close(fd);
97 return -EIO;
98 }
99 linebuf[numread] = '\0';
100 close(fd);
101
102 value = strtoull(linebuf, &endp, 0);
103 if (value > 1 || value < 0)
104 return -EINVAL;
105
106 return value;
107}
108
59/* CPUidle idlestate specific /sys/devices/system/cpu/cpuX/cpuidle/ access */ 109/* CPUidle idlestate specific /sys/devices/system/cpu/cpuX/cpuidle/ access */
60 110
61/* 111/*
diff --git a/tools/power/cpupower/utils/helpers/sysfs.h b/tools/power/cpupower/utils/helpers/sysfs.h
index f9373e090637..8cb797bbceb0 100644
--- a/tools/power/cpupower/utils/helpers/sysfs.h
+++ b/tools/power/cpupower/utils/helpers/sysfs.h
@@ -7,6 +7,8 @@
7 7
8extern unsigned int sysfs_read_file(const char *path, char *buf, size_t buflen); 8extern unsigned int sysfs_read_file(const char *path, char *buf, size_t buflen);
9 9
10extern int sysfs_is_cpu_online(unsigned int cpu);
11
10extern unsigned long sysfs_get_idlestate_latency(unsigned int cpu, 12extern unsigned long sysfs_get_idlestate_latency(unsigned int cpu,
11 unsigned int idlestate); 13 unsigned int idlestate);
12extern unsigned long sysfs_get_idlestate_usage(unsigned int cpu, 14extern unsigned long sysfs_get_idlestate_usage(unsigned int cpu,
diff --git a/tools/power/cpupower/utils/helpers/topology.c b/tools/power/cpupower/utils/helpers/topology.c
index 385ee5c7570c..4eae2c47ba48 100644
--- a/tools/power/cpupower/utils/helpers/topology.c
+++ b/tools/power/cpupower/utils/helpers/topology.c
@@ -41,6 +41,8 @@ struct cpuid_core_info {
41 unsigned int pkg; 41 unsigned int pkg;
42 unsigned int thread; 42 unsigned int thread;
43 unsigned int cpu; 43 unsigned int cpu;
44 /* flags */
45 unsigned int is_online:1;
44}; 46};
45 47
46static int __compare(const void *t1, const void *t2) 48static int __compare(const void *t1, const void *t2)
@@ -78,6 +80,8 @@ int get_cpu_topology(struct cpupower_topology *cpu_top)
78 return -ENOMEM; 80 return -ENOMEM;
79 cpu_top->pkgs = cpu_top->cores = 0; 81 cpu_top->pkgs = cpu_top->cores = 0;
80 for (cpu = 0; cpu < cpus; cpu++) { 82 for (cpu = 0; cpu < cpus; cpu++) {
83 cpu_top->core_info[cpu].cpu = cpu;
84 cpu_top->core_info[cpu].is_online = sysfs_is_cpu_online(cpu);
81 cpu_top->core_info[cpu].pkg = 85 cpu_top->core_info[cpu].pkg =
82 sysfs_topology_read_file(cpu, "physical_package_id"); 86 sysfs_topology_read_file(cpu, "physical_package_id");
83 if ((int)cpu_top->core_info[cpu].pkg != -1 && 87 if ((int)cpu_top->core_info[cpu].pkg != -1 &&
@@ -85,7 +89,6 @@ int get_cpu_topology(struct cpupower_topology *cpu_top)
85 cpu_top->pkgs = cpu_top->core_info[cpu].pkg; 89 cpu_top->pkgs = cpu_top->core_info[cpu].pkg;
86 cpu_top->core_info[cpu].core = 90 cpu_top->core_info[cpu].core =
87 sysfs_topology_read_file(cpu, "core_id"); 91 sysfs_topology_read_file(cpu, "core_id");
88 cpu_top->core_info[cpu].cpu = cpu;
89 } 92 }
90 cpu_top->pkgs++; 93 cpu_top->pkgs++;
91 94
diff --git a/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c b/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
index d048b96a6155..bcd22a1a3970 100644
--- a/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
+++ b/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
@@ -134,7 +134,7 @@ static struct cpuidle_monitor *cpuidle_register(void)
134 /* Assume idle state count is the same for all CPUs */ 134 /* Assume idle state count is the same for all CPUs */
135 cpuidle_sysfs_monitor.hw_states_num = sysfs_get_idlestate_count(0); 135 cpuidle_sysfs_monitor.hw_states_num = sysfs_get_idlestate_count(0);
136 136
137 if (cpuidle_sysfs_monitor.hw_states_num == 0) 137 if (cpuidle_sysfs_monitor.hw_states_num <= 0)
138 return NULL; 138 return NULL;
139 139
140 for (num = 0; num < cpuidle_sysfs_monitor.hw_states_num; num++) { 140 for (num = 0; num < cpuidle_sysfs_monitor.hw_states_num; num++) {
diff --git a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
index ba4bf068380d..0d6571e418db 100644
--- a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
+++ b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
@@ -43,6 +43,12 @@ static struct cpupower_topology cpu_top;
43/* ToDo: Document this in the manpage */ 43/* ToDo: Document this in the manpage */
44static char range_abbr[RANGE_MAX] = { 'T', 'C', 'P', 'M', }; 44static char range_abbr[RANGE_MAX] = { 'T', 'C', 'P', 'M', };
45 45
46static void print_wrong_arg_exit(void)
47{
48 printf(_("invalid or unknown argument\n"));
49 exit(EXIT_FAILURE);
50}
51
46long long timespec_diff_us(struct timespec start, struct timespec end) 52long long timespec_diff_us(struct timespec start, struct timespec end)
47{ 53{
48 struct timespec temp; 54 struct timespec temp;
@@ -56,21 +62,6 @@ long long timespec_diff_us(struct timespec start, struct timespec end)
56 return (temp.tv_sec * 1000000) + (temp.tv_nsec / 1000); 62 return (temp.tv_sec * 1000000) + (temp.tv_nsec / 1000);
57} 63}
58 64
59void monitor_help(void)
60{
61 printf(_("cpupower monitor: [-m <mon1>,[<mon2>],.. ] command\n"));
62 printf(_("cpupower monitor: [-m <mon1>,[<mon2>],.. ] [ -i interval_sec ]\n"));
63 printf(_("cpupower monitor: -l\n"));
64 printf(_("\t command: pass an arbitrary command to measure specific workload\n"));
65 printf(_("\t -i: time intervall to measure for in seconds (default 1)\n"));
66 printf(_("\t -l: list available CPU sleep monitors (for use with -m)\n"));
67 printf(_("\t -m: show specific CPU sleep monitors only (in same order)\n"));
68 printf(_("\t -h: print this help\n"));
69 printf("\n");
70 printf(_("only one of: -l, -m are allowed\nIf none of them is passed,"));
71 printf(_(" all supported monitors are shown\n"));
72}
73
74void print_n_spaces(int n) 65void print_n_spaces(int n)
75{ 66{
76 int x; 67 int x;
@@ -149,6 +140,10 @@ void print_results(int topology_depth, int cpu)
149 unsigned long long result; 140 unsigned long long result;
150 cstate_t s; 141 cstate_t s;
151 142
143 /* Be careful CPUs may got resorted for pkg value do not just use cpu */
144 if (!bitmask_isbitset(cpus_chosen, cpu_top.core_info[cpu].cpu))
145 return;
146
152 if (topology_depth > 2) 147 if (topology_depth > 2)
153 printf("%4d|", cpu_top.core_info[cpu].pkg); 148 printf("%4d|", cpu_top.core_info[cpu].pkg);
154 if (topology_depth > 1) 149 if (topology_depth > 1)
@@ -190,9 +185,13 @@ void print_results(int topology_depth, int cpu)
190 } 185 }
191 } 186 }
192 } 187 }
193 /* cpu offline */ 188 /*
194 if (cpu_top.core_info[cpu].pkg == -1 || 189 * The monitor could still provide useful data, for example
195 cpu_top.core_info[cpu].core == -1) { 190 * AMD HW counters partly sit in PCI config space.
191 * It's up to the monitor plug-in to check .is_online, this one
192 * is just for additional info.
193 */
194 if (!cpu_top.core_info[cpu].is_online) {
196 printf(_(" *is offline\n")); 195 printf(_(" *is offline\n"));
197 return; 196 return;
198 } else 197 } else
@@ -238,7 +237,6 @@ static void parse_monitor_param(char *param)
238 if (hits == 0) { 237 if (hits == 0) {
239 printf(_("No matching monitor found in %s, " 238 printf(_("No matching monitor found in %s, "
240 "try -l option\n"), param); 239 "try -l option\n"), param);
241 monitor_help();
242 exit(EXIT_FAILURE); 240 exit(EXIT_FAILURE);
243 } 241 }
244 /* Override detected/registerd monitors array with requested one */ 242 /* Override detected/registerd monitors array with requested one */
@@ -335,37 +333,27 @@ static void cmdline(int argc, char *argv[])
335 int opt; 333 int opt;
336 progname = basename(argv[0]); 334 progname = basename(argv[0]);
337 335
338 while ((opt = getopt(argc, argv, "+hli:m:")) != -1) { 336 while ((opt = getopt(argc, argv, "+li:m:")) != -1) {
339 switch (opt) { 337 switch (opt) {
340 case 'h':
341 monitor_help();
342 exit(EXIT_SUCCESS);
343 case 'l': 338 case 'l':
344 if (mode) { 339 if (mode)
345 monitor_help(); 340 print_wrong_arg_exit();
346 exit(EXIT_FAILURE);
347 }
348 mode = list; 341 mode = list;
349 break; 342 break;
350 case 'i': 343 case 'i':
351 /* only allow -i with -m or no option */ 344 /* only allow -i with -m or no option */
352 if (mode && mode != show) { 345 if (mode && mode != show)
353 monitor_help(); 346 print_wrong_arg_exit();
354 exit(EXIT_FAILURE);
355 }
356 interval = atoi(optarg); 347 interval = atoi(optarg);
357 break; 348 break;
358 case 'm': 349 case 'm':
359 if (mode) { 350 if (mode)
360 monitor_help(); 351 print_wrong_arg_exit();
361 exit(EXIT_FAILURE);
362 }
363 mode = show; 352 mode = show;
364 show_monitors_param = optarg; 353 show_monitors_param = optarg;
365 break; 354 break;
366 default: 355 default:
367 monitor_help(); 356 print_wrong_arg_exit();
368 exit(EXIT_FAILURE);
369 } 357 }
370 } 358 }
371 if (!mode) 359 if (!mode)
@@ -385,6 +373,10 @@ int cmd_monitor(int argc, char **argv)
385 return EXIT_FAILURE; 373 return EXIT_FAILURE;
386 } 374 }
387 375
376 /* Default is: monitor all CPUs */
377 if (bitmask_isallclear(cpus_chosen))
378 bitmask_setall(cpus_chosen);
379
388 dprint("System has up to %d CPU cores\n", cpu_count); 380 dprint("System has up to %d CPU cores\n", cpu_count);
389 381
390 for (num = 0; all_monitors[num]; num++) { 382 for (num = 0; all_monitors[num]; num++) {
diff --git a/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c b/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
index 63ca87a05e5f..5650ab5a2c20 100644
--- a/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
+++ b/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
@@ -22,12 +22,15 @@
22 22
23#define MSR_TSC 0x10 23#define MSR_TSC 0x10
24 24
25#define MSR_AMD_HWCR 0xc0010015
26
25enum mperf_id { C0 = 0, Cx, AVG_FREQ, MPERF_CSTATE_COUNT }; 27enum mperf_id { C0 = 0, Cx, AVG_FREQ, MPERF_CSTATE_COUNT };
26 28
27static int mperf_get_count_percent(unsigned int self_id, double *percent, 29static int mperf_get_count_percent(unsigned int self_id, double *percent,
28 unsigned int cpu); 30 unsigned int cpu);
29static int mperf_get_count_freq(unsigned int id, unsigned long long *count, 31static int mperf_get_count_freq(unsigned int id, unsigned long long *count,
30 unsigned int cpu); 32 unsigned int cpu);
33static struct timespec time_start, time_end;
31 34
32static cstate_t mperf_cstates[MPERF_CSTATE_COUNT] = { 35static cstate_t mperf_cstates[MPERF_CSTATE_COUNT] = {
33 { 36 {
@@ -54,19 +57,33 @@ static cstate_t mperf_cstates[MPERF_CSTATE_COUNT] = {
54 }, 57 },
55}; 58};
56 59
60enum MAX_FREQ_MODE { MAX_FREQ_SYSFS, MAX_FREQ_TSC_REF };
61static int max_freq_mode;
62/*
63 * The max frequency mperf is ticking at (in C0), either retrieved via:
64 * 1) calculated after measurements if we know TSC ticks at mperf/P0 frequency
65 * 2) cpufreq /sys/devices/.../cpu0/cpufreq/cpuinfo_max_freq at init time
66 * 1. Is preferred as it also works without cpufreq subsystem (e.g. on Xen)
67 */
68static unsigned long max_frequency;
69
57static unsigned long long tsc_at_measure_start; 70static unsigned long long tsc_at_measure_start;
58static unsigned long long tsc_at_measure_end; 71static unsigned long long tsc_at_measure_end;
59static unsigned long max_frequency;
60static unsigned long long *mperf_previous_count; 72static unsigned long long *mperf_previous_count;
61static unsigned long long *aperf_previous_count; 73static unsigned long long *aperf_previous_count;
62static unsigned long long *mperf_current_count; 74static unsigned long long *mperf_current_count;
63static unsigned long long *aperf_current_count; 75static unsigned long long *aperf_current_count;
76
64/* valid flag for all CPUs. If a MSR read failed it will be zero */ 77/* valid flag for all CPUs. If a MSR read failed it will be zero */
65static int *is_valid; 78static int *is_valid;
66 79
67static int mperf_get_tsc(unsigned long long *tsc) 80static int mperf_get_tsc(unsigned long long *tsc)
68{ 81{
69 return read_msr(0, MSR_TSC, tsc); 82 int ret;
83 ret = read_msr(0, MSR_TSC, tsc);
84 if (ret)
85 dprint("Reading TSC MSR failed, returning %llu\n", *tsc);
86 return ret;
70} 87}
71 88
72static int mperf_init_stats(unsigned int cpu) 89static int mperf_init_stats(unsigned int cpu)
@@ -97,36 +114,11 @@ static int mperf_measure_stats(unsigned int cpu)
97 return 0; 114 return 0;
98} 115}
99 116
100/*
101 * get_average_perf()
102 *
103 * Returns the average performance (also considers boosted frequencies)
104 *
105 * Input:
106 * aperf_diff: Difference of the aperf register over a time period
107 * mperf_diff: Difference of the mperf register over the same time period
108 * max_freq: Maximum frequency (P0)
109 *
110 * Returns:
111 * Average performance over the time period
112 */
113static unsigned long get_average_perf(unsigned long long aperf_diff,
114 unsigned long long mperf_diff)
115{
116 unsigned int perf_percent = 0;
117 if (((unsigned long)(-1) / 100) < aperf_diff) {
118 int shift_count = 7;
119 aperf_diff >>= shift_count;
120 mperf_diff >>= shift_count;
121 }
122 perf_percent = (aperf_diff * 100) / mperf_diff;
123 return (max_frequency * perf_percent) / 100;
124}
125
126static int mperf_get_count_percent(unsigned int id, double *percent, 117static int mperf_get_count_percent(unsigned int id, double *percent,
127 unsigned int cpu) 118 unsigned int cpu)
128{ 119{
129 unsigned long long aperf_diff, mperf_diff, tsc_diff; 120 unsigned long long aperf_diff, mperf_diff, tsc_diff;
121 unsigned long long timediff;
130 122
131 if (!is_valid[cpu]) 123 if (!is_valid[cpu])
132 return -1; 124 return -1;
@@ -136,11 +128,19 @@ static int mperf_get_count_percent(unsigned int id, double *percent,
136 128
137 mperf_diff = mperf_current_count[cpu] - mperf_previous_count[cpu]; 129 mperf_diff = mperf_current_count[cpu] - mperf_previous_count[cpu];
138 aperf_diff = aperf_current_count[cpu] - aperf_previous_count[cpu]; 130 aperf_diff = aperf_current_count[cpu] - aperf_previous_count[cpu];
139 tsc_diff = tsc_at_measure_end - tsc_at_measure_start;
140 131
141 *percent = 100.0 * mperf_diff / tsc_diff; 132 if (max_freq_mode == MAX_FREQ_TSC_REF) {
142 dprint("%s: mperf_diff: %llu, tsc_diff: %llu\n", 133 tsc_diff = tsc_at_measure_end - tsc_at_measure_start;
143 mperf_cstates[id].name, mperf_diff, tsc_diff); 134 *percent = 100.0 * mperf_diff / tsc_diff;
135 dprint("%s: TSC Ref - mperf_diff: %llu, tsc_diff: %llu\n",
136 mperf_cstates[id].name, mperf_diff, tsc_diff);
137 } else if (max_freq_mode == MAX_FREQ_SYSFS) {
138 timediff = timespec_diff_us(time_start, time_end);
139 *percent = 100.0 * mperf_diff / timediff;
140 dprint("%s: MAXFREQ - mperf_diff: %llu, time_diff: %llu\n",
141 mperf_cstates[id].name, mperf_diff, timediff);
142 } else
143 return -1;
144 144
145 if (id == Cx) 145 if (id == Cx)
146 *percent = 100.0 - *percent; 146 *percent = 100.0 - *percent;
@@ -154,7 +154,7 @@ static int mperf_get_count_percent(unsigned int id, double *percent,
154static int mperf_get_count_freq(unsigned int id, unsigned long long *count, 154static int mperf_get_count_freq(unsigned int id, unsigned long long *count,
155 unsigned int cpu) 155 unsigned int cpu)
156{ 156{
157 unsigned long long aperf_diff, mperf_diff; 157 unsigned long long aperf_diff, mperf_diff, time_diff, tsc_diff;
158 158
159 if (id != AVG_FREQ) 159 if (id != AVG_FREQ)
160 return 1; 160 return 1;
@@ -165,11 +165,21 @@ static int mperf_get_count_freq(unsigned int id, unsigned long long *count,
165 mperf_diff = mperf_current_count[cpu] - mperf_previous_count[cpu]; 165 mperf_diff = mperf_current_count[cpu] - mperf_previous_count[cpu];
166 aperf_diff = aperf_current_count[cpu] - aperf_previous_count[cpu]; 166 aperf_diff = aperf_current_count[cpu] - aperf_previous_count[cpu];
167 167
168 /* Return MHz for now, might want to return KHz if column width is more 168 if (max_freq_mode == MAX_FREQ_TSC_REF) {
169 generic */ 169 /* Calculate max_freq from TSC count */
170 *count = get_average_perf(aperf_diff, mperf_diff) / 1000; 170 tsc_diff = tsc_at_measure_end - tsc_at_measure_start;
171 dprint("%s: %llu\n", mperf_cstates[id].name, *count); 171 time_diff = timespec_diff_us(time_start, time_end);
172 max_frequency = tsc_diff / time_diff;
173 }
172 174
175 *count = max_frequency * ((double)aperf_diff / mperf_diff);
176 dprint("%s: Average freq based on %s maximum frequency:\n",
177 mperf_cstates[id].name,
178 (max_freq_mode == MAX_FREQ_TSC_REF) ? "TSC calculated" : "sysfs read");
179 dprint("%max_frequency: %lu", max_frequency);
180 dprint("aperf_diff: %llu\n", aperf_diff);
181 dprint("mperf_diff: %llu\n", mperf_diff);
182 dprint("avg freq: %llu\n", *count);
173 return 0; 183 return 0;
174} 184}
175 185
@@ -178,6 +188,7 @@ static int mperf_start(void)
178 int cpu; 188 int cpu;
179 unsigned long long dbg; 189 unsigned long long dbg;
180 190
191 clock_gettime(CLOCK_REALTIME, &time_start);
181 mperf_get_tsc(&tsc_at_measure_start); 192 mperf_get_tsc(&tsc_at_measure_start);
182 193
183 for (cpu = 0; cpu < cpu_count; cpu++) 194 for (cpu = 0; cpu < cpu_count; cpu++)
@@ -193,32 +204,104 @@ static int mperf_stop(void)
193 unsigned long long dbg; 204 unsigned long long dbg;
194 int cpu; 205 int cpu;
195 206
196 mperf_get_tsc(&tsc_at_measure_end);
197
198 for (cpu = 0; cpu < cpu_count; cpu++) 207 for (cpu = 0; cpu < cpu_count; cpu++)
199 mperf_measure_stats(cpu); 208 mperf_measure_stats(cpu);
200 209
210 mperf_get_tsc(&tsc_at_measure_end);
211 clock_gettime(CLOCK_REALTIME, &time_end);
212
201 mperf_get_tsc(&dbg); 213 mperf_get_tsc(&dbg);
202 dprint("TSC diff: %llu\n", dbg - tsc_at_measure_end); 214 dprint("TSC diff: %llu\n", dbg - tsc_at_measure_end);
203 215
204 return 0; 216 return 0;
205} 217}
206 218
207struct cpuidle_monitor mperf_monitor; 219/*
208 220 * Mperf register is defined to tick at P0 (maximum) frequency
209struct cpuidle_monitor *mperf_register(void) 221 *
222 * Instead of reading out P0 which can be tricky to read out from HW,
223 * we use TSC counter if it reliably ticks at P0/mperf frequency.
224 *
225 * Still try to fall back to:
226 * /sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq
227 * on older Intel HW without invariant TSC feature.
228 * Or on AMD machines where TSC does not tick at P0 (do not exist yet, but
229 * it's still double checked (MSR_AMD_HWCR)).
230 *
231 * On these machines the user would still get useful mperf
232 * stats when acpi-cpufreq driver is loaded.
233 */
234static int init_maxfreq_mode(void)
210{ 235{
236 int ret;
237 unsigned long long hwcr;
211 unsigned long min; 238 unsigned long min;
212 239
213 if (!(cpupower_cpu_info.caps & CPUPOWER_CAP_APERF)) 240 if (!cpupower_cpu_info.caps & CPUPOWER_CAP_INV_TSC)
214 return NULL; 241 goto use_sysfs;
215 242
216 /* Assume min/max all the same on all cores */ 243 if (cpupower_cpu_info.vendor == X86_VENDOR_AMD) {
244 /* MSR_AMD_HWCR tells us whether TSC runs at P0/mperf
245 * freq.
246 * A test whether hwcr is accessable/available would be:
247 * (cpupower_cpu_info.family > 0x10 ||
248 * cpupower_cpu_info.family == 0x10 &&
249 * cpupower_cpu_info.model >= 0x2))
250 * This should be the case for all aperf/mperf
251 * capable AMD machines and is therefore safe to test here.
252 * Compare with Linus kernel git commit: acf01734b1747b1ec4
253 */
254 ret = read_msr(0, MSR_AMD_HWCR, &hwcr);
255 /*
256 * If the MSR read failed, assume a Xen system that did
257 * not explicitly provide access to it and assume TSC works
258 */
259 if (ret != 0) {
260 dprint("TSC read 0x%x failed - assume TSC working\n",
261 MSR_AMD_HWCR);
262 return 0;
263 } else if (1 & (hwcr >> 24)) {
264 max_freq_mode = MAX_FREQ_TSC_REF;
265 return 0;
266 } else { /* Use sysfs max frequency if available */ }
267 } else if (cpupower_cpu_info.vendor == X86_VENDOR_INTEL) {
268 /*
269 * On Intel we assume mperf (in C0) is ticking at same
270 * rate than TSC
271 */
272 max_freq_mode = MAX_FREQ_TSC_REF;
273 return 0;
274 }
275use_sysfs:
217 if (cpufreq_get_hardware_limits(0, &min, &max_frequency)) { 276 if (cpufreq_get_hardware_limits(0, &min, &max_frequency)) {
218 dprint("Cannot retrieve max freq from cpufreq kernel " 277 dprint("Cannot retrieve max freq from cpufreq kernel "
219 "subsystem\n"); 278 "subsystem\n");
220 return NULL; 279 return -1;
221 } 280 }
281 max_freq_mode = MAX_FREQ_SYSFS;
282 return 0;
283}
284
285/*
286 * This monitor provides:
287 *
288 * 1) Average frequency a CPU resided in
289 * This always works if the CPU has aperf/mperf capabilities
290 *
291 * 2) C0 and Cx (any sleep state) time a CPU resided in
292 * Works if mperf timer stops ticking in sleep states which
293 * seem to be the case on all current HW.
294 * Both is directly retrieved from HW registers and is independent
295 * from kernel statistics.
296 */
297struct cpuidle_monitor mperf_monitor;
298struct cpuidle_monitor *mperf_register(void)
299{
300 if (!(cpupower_cpu_info.caps & CPUPOWER_CAP_APERF))
301 return NULL;
302
303 if (init_maxfreq_mode())
304 return NULL;
222 305
223 /* Free this at program termination */ 306 /* Free this at program termination */
224 is_valid = calloc(cpu_count, sizeof(int)); 307 is_valid = calloc(cpu_count, sizeof(int));