aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--CREDITS8
-rw-r--r--Documentation/DocBook/device-drivers.tmpl1
-rw-r--r--Documentation/DocBook/kernel-api.tmpl1
-rw-r--r--Documentation/DocBook/kernel-locking.tmpl6
-rw-r--r--Documentation/block/cfq-iosched.txt45
-rw-r--r--Documentation/cgroups/blkio-controller.txt28
-rw-r--r--Documentation/gpio.txt22
-rw-r--r--Documentation/hwmon/sysfs-interface7
-rw-r--r--Documentation/kernel-doc-nano-HOWTO.txt5
-rw-r--r--Documentation/mutex-design.txt3
-rw-r--r--Documentation/power/regulator/overview.txt2
-rw-r--r--Documentation/sound/alsa/HD-Audio-Models.txt1
-rw-r--r--Documentation/workqueue.txt380
-rw-r--r--MAINTAINERS62
-rw-r--r--Makefile2
-rw-r--r--arch/Kconfig4
-rw-r--r--arch/alpha/include/asm/cacheflush.h2
-rw-r--r--arch/alpha/include/asm/unistd.h6
-rw-r--r--arch/alpha/kernel/entry.S81
-rw-r--r--arch/alpha/kernel/err_ev6.c12
-rw-r--r--arch/alpha/kernel/err_marvel.c33
-rw-r--r--arch/alpha/kernel/err_titan.c35
-rw-r--r--arch/alpha/kernel/osf_sys.c9
-rw-r--r--arch/alpha/kernel/pci-sysfs.c2
-rw-r--r--arch/alpha/kernel/process.c2
-rw-r--r--arch/alpha/kernel/signal.c97
-rw-r--r--arch/alpha/kernel/srm_env.c2
-rw-r--r--arch/alpha/kernel/systbls.S5
-rw-r--r--arch/alpha/kernel/time.c10
-rw-r--r--arch/alpha/kernel/traps.c3
-rw-r--r--arch/arm/Kconfig118
-rw-r--r--arch/arm/boot/Makefile8
-rw-r--r--arch/arm/boot/compressed/Makefile6
-rw-r--r--arch/arm/boot/compressed/head.S2
-rw-r--r--arch/arm/common/it8152.c16
-rw-r--r--arch/arm/include/asm/dma-mapping.h8
-rw-r--r--arch/arm/include/asm/perf_event.h2
-rw-r--r--arch/arm/include/asm/pgtable.h4
-rw-r--r--arch/arm/include/asm/unistd.h3
-rw-r--r--arch/arm/kernel/calls.S3
-rw-r--r--arch/arm/kernel/entry-common.S4
-rw-r--r--arch/arm/kernel/perf_event.c12
-rw-r--r--arch/arm/mach-at91/at91sam9g45.c15
-rw-r--r--arch/arm/mach-at91/at91sam9g45_devices.c10
-rw-r--r--arch/arm/mach-at91/board-sam9261ek.c31
-rw-r--r--arch/arm/mach-at91/clock.c3
-rw-r--r--arch/arm/mach-davinci/dm355.c3
-rw-r--r--arch/arm/mach-davinci/dm365.c3
-rw-r--r--arch/arm/mach-davinci/dm644x.c3
-rw-r--r--arch/arm/mach-davinci/dm646x.c3
-rw-r--r--arch/arm/mach-dove/include/mach/io.h6
-rw-r--r--arch/arm/mach-ep93xx/clock.c2
-rw-r--r--arch/arm/mach-ixp4xx/common-pci.c8
-rw-r--r--arch/arm/mach-ixp4xx/include/mach/hardware.h2
-rw-r--r--arch/arm/mach-kirkwood/include/mach/kirkwood.h2
-rw-r--r--arch/arm/mach-kirkwood/pcie.c4
-rw-r--r--arch/arm/mach-mmp/include/mach/system.h7
-rw-r--r--arch/arm/mach-mx25/eukrea_mbimxsd-baseboard.c2
-rw-r--r--arch/arm/mach-mx25/mach-cpuimx25.c4
-rw-r--r--arch/arm/mach-mx3/clock-imx35.c77
-rw-r--r--arch/arm/mach-mx3/eukrea_mbimxsd-baseboard.c2
-rw-r--r--arch/arm/mach-mx3/mach-cpuimx35.c4
-rw-r--r--arch/arm/mach-mx5/clock-mx51.c2
-rw-r--r--arch/arm/mach-pxa/cpufreq-pxa2xx.c5
-rw-r--r--arch/arm/mach-pxa/cpufreq-pxa3xx.c2
-rw-r--r--arch/arm/mach-pxa/include/mach/hardware.h14
-rw-r--r--arch/arm/mach-pxa/include/mach/io.h2
-rw-r--r--arch/arm/mach-pxa/include/mach/mfp-pxa300.h4
-rw-r--r--arch/arm/mach-pxa/palm27x.c6
-rw-r--r--arch/arm/mach-pxa/vpac270.c1
-rw-r--r--arch/arm/mach-s3c64xx/dev-spi.c3
-rw-r--r--arch/arm/mach-s3c64xx/mach-real6410.c104
-rw-r--r--arch/arm/mach-s5pv210/clock.c20
-rw-r--r--arch/arm/mach-s5pv210/cpu.c2
-rw-r--r--arch/arm/mach-shmobile/Makefile2
-rw-r--r--arch/arm/mach-shmobile/board-ap4evb.c56
-rw-r--r--arch/arm/mach-shmobile/clock-sh7372.c9
-rw-r--r--arch/arm/mach-shmobile/clock.c4
-rw-r--r--arch/arm/mach-shmobile/pm_runtime.c169
-rw-r--r--arch/arm/mach-u300/include/mach/gpio.h3
-rw-r--r--arch/arm/mach-vexpress/ct-ca9x4.c8
-rw-r--r--arch/arm/mm/Kconfig2
-rw-r--r--arch/arm/mm/alignment.c19
-rw-r--r--arch/arm/mm/dma-mapping.c2
-rw-r--r--arch/arm/mm/mmu.c31
-rw-r--r--arch/arm/mm/proc-v7.S62
-rw-r--r--arch/arm/oprofile/common.c7
-rw-r--r--arch/arm/plat-mxc/Kconfig1
-rw-r--r--arch/arm/plat-mxc/include/mach/eukrea-baseboards.h4
-rw-r--r--arch/arm/plat-mxc/tzic.c5
-rw-r--r--arch/arm/plat-nomadik/timer.c33
-rw-r--r--arch/arm/plat-omap/Kconfig2
-rw-r--r--arch/arm/plat-omap/mcbsp.c2
-rw-r--r--arch/arm/plat-omap/sram.c25
-rw-r--r--arch/arm/plat-pxa/pwm.c2
-rw-r--r--arch/arm/plat-s5p/dev-fimc0.c9
-rw-r--r--arch/arm/plat-s5p/dev-fimc1.c9
-rw-r--r--arch/arm/plat-s5p/dev-fimc2.c9
-rw-r--r--arch/arm/plat-samsung/gpio-config.c7
-rw-r--r--arch/arm/plat-samsung/include/plat/gpio-cfg.h10
-rw-r--r--arch/arm/tools/mach-types98
-rw-r--r--arch/avr32/kernel/module.c3
-rw-r--r--arch/frv/kernel/signal.c51
-rw-r--r--arch/h8300/kernel/module.c3
-rw-r--r--arch/ia64/include/asm/compat.h2
-rw-r--r--arch/ia64/kernel/fsys.S30
-rw-r--r--arch/m32r/include/asm/signal.h1
-rw-r--r--arch/m32r/include/asm/unistd.h1
-rw-r--r--arch/m32r/kernel/entry.S5
-rw-r--r--arch/m32r/kernel/ptrace.c7
-rw-r--r--arch/m32r/kernel/signal.c105
-rw-r--r--arch/m68k/include/asm/unistd.h5
-rw-r--r--arch/m68k/kernel/entry.S3
-rw-r--r--arch/m68k/mac/macboing.c6
-rw-r--r--arch/m68knommu/kernel/syscalltable.S3
-rw-r--r--arch/mips/Kconfig21
-rw-r--r--arch/mips/alchemy/common/prom.c5
-rw-r--r--arch/mips/boot/compressed/Makefile2
-rw-r--r--arch/mips/cavium-octeon/Kconfig4
-rw-r--r--arch/mips/cavium-octeon/cpu.c2
-rw-r--r--arch/mips/cavium-octeon/executive/Makefile2
-rw-r--r--arch/mips/include/asm/atomic.h4
-rw-r--r--arch/mips/include/asm/compat.h2
-rw-r--r--arch/mips/include/asm/cop2.h2
-rw-r--r--arch/mips/include/asm/gic.h1
-rw-r--r--arch/mips/include/asm/mach-tx49xx/kmalloc.h2
-rw-r--r--arch/mips/include/asm/mips-boards/maltaint.h3
-rw-r--r--arch/mips/include/asm/page.h14
-rw-r--r--arch/mips/include/asm/thread_info.h3
-rw-r--r--arch/mips/include/asm/unistd.h21
-rw-r--r--arch/mips/kernel/irq-gic.c5
-rw-r--r--arch/mips/kernel/kgdb.c2
-rw-r--r--arch/mips/kernel/kspd.c2
-rw-r--r--arch/mips/kernel/linux32.c7
-rw-r--r--arch/mips/kernel/scall32-o32.S5
-rw-r--r--arch/mips/kernel/scall64-64.S7
-rw-r--r--arch/mips/kernel/scall64-n32.S5
-rw-r--r--arch/mips/kernel/scall64-o32.S5
-rw-r--r--arch/mips/mm/dma-default.c28
-rw-r--r--arch/mips/mm/sc-rm7k.c2
-rw-r--r--arch/mips/mti-malta/malta-int.c3
-rw-r--r--arch/mips/pci/pci-rc32434.c2
-rw-r--r--arch/mips/pnx8550/common/reset.c20
-rw-r--r--arch/mips/pnx8550/common/setup.c3
-rw-r--r--arch/mn10300/Kconfig1
-rw-r--r--arch/mn10300/Kconfig.debug2
-rw-r--r--arch/mn10300/include/asm/bitops.h4
-rw-r--r--arch/mn10300/include/asm/signal.h2
-rw-r--r--arch/mn10300/kernel/mn10300-serial.c22
-rw-r--r--arch/mn10300/kernel/module.c3
-rw-r--r--arch/mn10300/kernel/signal.c35
-rw-r--r--arch/mn10300/mm/Makefile14
-rw-r--r--arch/mn10300/mm/cache-disabled.c21
-rw-r--r--arch/mn10300/mm/cache.c20
-rw-r--r--arch/parisc/include/asm/compat.h2
-rw-r--r--arch/parisc/kernel/module.c3
-rw-r--r--arch/powerpc/include/asm/compat.h2
-rw-r--r--arch/powerpc/include/asm/fsldma.h1
-rw-r--r--arch/powerpc/kernel/module.c6
-rw-r--r--arch/powerpc/kernel/signal.c2
-rw-r--r--arch/powerpc/kernel/signal_32.c3
-rw-r--r--arch/powerpc/kernel/signal_64.c2
-rw-r--r--arch/powerpc/platforms/512x/clock.c2
-rw-r--r--arch/powerpc/platforms/52xx/efika.c9
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_common.c8
-rw-r--r--arch/s390/include/asm/compat.h2
-rw-r--r--arch/s390/kernel/module.c3
-rw-r--r--arch/sh/kernel/module.c2
-rw-r--r--arch/sparc/include/asm/compat.h2
-rw-r--r--arch/sparc/kernel/perf_event.c14
-rw-r--r--arch/sparc/kernel/signal32.c161
-rw-r--r--arch/sparc/kernel/signal_32.c55
-rw-r--r--arch/sparc/kernel/signal_64.c45
-rw-r--r--arch/sparc/kernel/sys_sparc_32.c2
-rw-r--r--arch/sparc/kernel/unaligned_32.c3
-rw-r--r--arch/sparc/kernel/windows.c2
-rw-r--r--arch/tile/include/arch/chip_tile64.h3
-rw-r--r--arch/tile/include/arch/chip_tilepro.h3
-rw-r--r--arch/tile/include/asm/compat.h7
-rw-r--r--arch/tile/include/asm/io.h8
-rw-r--r--arch/tile/include/asm/processor.h12
-rw-r--r--arch/tile/include/asm/ptrace.h15
-rw-r--r--arch/tile/include/asm/sigcontext.h18
-rw-r--r--arch/tile/include/asm/signal.h1
-rw-r--r--arch/tile/include/asm/syscalls.h21
-rw-r--r--arch/tile/kernel/intvec_32.S7
-rw-r--r--arch/tile/kernel/process.c30
-rw-r--r--arch/tile/kernel/signal.c27
-rw-r--r--arch/tile/kernel/stack.c2
-rw-r--r--arch/um/kernel/exec.c6
-rw-r--r--arch/um/kernel/internal.h2
-rw-r--r--arch/um/kernel/syscall.c4
-rw-r--r--arch/x86/Makefile2
-rw-r--r--arch/x86/boot/early_serial_console.c14
-rw-r--r--arch/x86/ia32/ia32entry.S22
-rw-r--r--arch/x86/include/asm/amd_iommu_proto.h6
-rw-r--r--arch/x86/include/asm/amd_iommu_types.h12
-rw-r--r--arch/x86/include/asm/bitops.h2
-rw-r--r--arch/x86/include/asm/compat.h2
-rw-r--r--arch/x86/include/asm/cpufeature.h5
-rw-r--r--arch/x86/include/asm/hpet.h1
-rw-r--r--arch/x86/include/asm/hw_breakpoint.h2
-rw-r--r--arch/x86/include/asm/iomap.h4
-rw-r--r--arch/x86/include/asm/kvm_emulate.h7
-rw-r--r--arch/x86/kernel/Makefile2
-rw-r--r--arch/x86/kernel/acpi/cstate.c2
-rw-r--r--arch/x86/kernel/amd_iommu.c4
-rw-r--r--arch/x86/kernel/amd_iommu_init.c67
-rw-r--r--arch/x86/kernel/apic/io_apic.c11
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c6
-rw-r--r--arch/x86/kernel/cpu/common.c2
-rw-r--r--arch/x86/kernel/cpu/cpu.h1
-rw-r--r--arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c18
-rw-r--r--arch/x86/kernel/cpu/intel.c1
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c4
-rw-r--r--arch/x86/kernel/cpu/mcheck/therm_throt.c9
-rw-r--r--arch/x86/kernel/cpu/perf_event.c71
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c15
-rw-r--r--arch/x86/kernel/cpu/perf_event_p4.c8
-rw-r--r--arch/x86/kernel/cpu/scattered.c1
-rw-r--r--arch/x86/kernel/early-quirks.c18
-rw-r--r--arch/x86/kernel/hpet.c33
-rw-r--r--arch/x86/kernel/hw_breakpoint.c40
-rw-r--r--arch/x86/kernel/module.c3
-rw-r--r--arch/x86/kernel/trampoline.c3
-rw-r--r--arch/x86/kernel/tsc.c2
-rw-r--r--arch/x86/kvm/emulate.c9
-rw-r--r--arch/x86/kvm/i8259.c3
-rw-r--r--arch/x86/kvm/irq.h2
-rw-r--r--arch/x86/lguest/boot.c13
-rw-r--r--arch/x86/mm/iomap_32.c6
-rw-r--r--arch/x86/oprofile/nmi_int.c27
-rw-r--r--arch/x86/xen/time.c5
-rw-r--r--block/blk-cgroup.c2
-rw-r--r--block/blk-core.c6
-rw-r--r--block/blk-map.c2
-rw-r--r--block/blk-merge.c12
-rw-r--r--block/blk-sysfs.c1
-rw-r--r--block/blk.h8
-rw-r--r--block/cfq-iosched.c119
-rw-r--r--block/elevator.c44
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/acpi/Kconfig2
-rw-r--r--drivers/acpi/acpi_pad.c34
-rw-r--r--drivers/acpi/acpica/aclocal.h1
-rw-r--r--drivers/acpi/acpica/exutils.c2
-rw-r--r--drivers/acpi/acpica/rsutils.c2
-rw-r--r--drivers/acpi/apei/Kconfig2
-rw-r--r--drivers/acpi/apei/apei-base.c21
-rw-r--r--drivers/acpi/apei/einj.c4
-rw-r--r--drivers/acpi/apei/erst-dbg.c18
-rw-r--r--drivers/acpi/apei/erst.c29
-rw-r--r--drivers/acpi/apei/ghes.c2
-rw-r--r--drivers/acpi/apei/hest.c11
-rw-r--r--drivers/acpi/atomicio.c2
-rw-r--r--drivers/acpi/battery.c1
-rw-r--r--drivers/acpi/blacklist.c18
-rw-r--r--drivers/acpi/bus.c18
-rw-r--r--drivers/acpi/fan.c2
-rw-r--r--drivers/acpi/processor_core.c6
-rw-r--r--drivers/acpi/processor_driver.c2
-rw-r--r--drivers/acpi/processor_perflib.c4
-rw-r--r--drivers/acpi/sleep.c22
-rw-r--r--drivers/acpi/sysfs.c20
-rw-r--r--drivers/acpi/video_detect.c4
-rw-r--r--drivers/ata/ahci.c7
-rw-r--r--drivers/ata/ahci.h12
-rw-r--r--drivers/ata/ahci_platform.c6
-rw-r--r--drivers/ata/ata_piix.c4
-rw-r--r--drivers/ata/libahci.c18
-rw-r--r--drivers/ata/libata-core.c14
-rw-r--r--drivers/ata/libata-eh.c4
-rw-r--r--drivers/ata/libata-sff.c41
-rw-r--r--drivers/ata/pata_artop.c3
-rw-r--r--drivers/ata/pata_via.c2
-rw-r--r--drivers/ata/sata_mv.c2
-rw-r--r--drivers/base/power/main.c1
-rw-r--r--drivers/block/cciss.c13
-rw-r--r--drivers/block/loop.c2
-rw-r--r--drivers/block/mg_disk.c3
-rw-r--r--drivers/block/pktcdvd.c2
-rw-r--r--drivers/char/agp/intel-agp.c2
-rw-r--r--drivers/char/agp/intel-agp.h2
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c17
-rw-r--r--drivers/char/mem.c3
-rw-r--r--drivers/char/virtio_console.c6
-rw-r--r--drivers/char/vt_ioctl.c16
-rw-r--r--drivers/cpuidle/governors/menu.c2
-rw-r--r--drivers/dma/mv_xor.c2
-rw-r--r--drivers/dma/shdma.c3
-rw-r--r--drivers/edac/edac_mc.c3
-rw-r--r--drivers/edac/i7core_edac.c1
-rw-r--r--drivers/firewire/ohci.c1
-rw-r--r--drivers/gpio/sx150x.c26
-rw-r--r--drivers/gpu/drm/drm_buffer.c6
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c10
-rw-r--r--drivers/gpu/drm/drm_gem.c39
-rw-r--r--drivers/gpu/drm/drm_info.c2
-rw-r--r--drivers/gpu/drm/drm_pci.c4
-rw-r--r--drivers/gpu/drm/drm_platform.c5
-rw-r--r--drivers/gpu/drm/drm_sysfs.c2
-rw-r--r--drivers/gpu/drm/drm_vm.c28
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c2
-rw-r--r--drivers/gpu/drm/i830/i830_dma.c2
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c6
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c1
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c89
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c44
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c22
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h8
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c36
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c10
-rw-r--r--drivers/gpu/drm/i915/intel_display.c93
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c21
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h1
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c3
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c4
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c9
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c20
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c1
-rw-r--r--drivers/gpu/drm/radeon/atombios.h2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c5
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c27
-rw-r--r--drivers/gpu/drm/radeon/r100.c24
-rw-r--r--drivers/gpu/drm/radeon/r600.c5
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c25
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_shaders.h24
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c47
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c145
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c17
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c34
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c4
-rw-r--r--drivers/gpu/vga/vgaarb.c2
-rw-r--r--drivers/hid/hid-core.c4
-rw-r--r--drivers/hid/hid-ids.h4
-rw-r--r--drivers/hid/hid-mosart.c1
-rw-r--r--drivers/hid/hid-topseed.c1
-rw-r--r--drivers/hid/usbhid/hid-core.c8
-rw-r--r--drivers/hid/usbhid/hid-quirks.c4
-rw-r--r--drivers/hid/usbhid/hiddev.c2
-rw-r--r--drivers/hid/usbhid/usbhid.h1
-rw-r--r--drivers/hwmon/Kconfig2
-rw-r--r--drivers/hwmon/adm1031.c43
-rw-r--r--drivers/hwmon/coretemp.c57
-rw-r--r--drivers/hwmon/emc1403.c1
-rw-r--r--drivers/hwmon/f71882fg.c32
-rw-r--r--drivers/hwmon/f75375s.c6
-rw-r--r--drivers/hwmon/hp_accel.c2
-rw-r--r--drivers/hwmon/lis3lv02d.c4
-rw-r--r--drivers/hwmon/lis3lv02d_i2c.c4
-rw-r--r--drivers/hwmon/lis3lv02d_spi.c4
-rw-r--r--drivers/hwmon/lm95241.c21
-rw-r--r--drivers/hwmon/pkgtemp.c23
-rw-r--r--drivers/hwmon/w83627ehf.c1
-rw-r--r--drivers/i2c/busses/i2c-davinci.c6
-rw-r--r--drivers/i2c/busses/i2c-octeon.c2
-rw-r--r--drivers/i2c/busses/i2c-omap.c2
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c4
-rw-r--r--drivers/ide/ide-probe.c12
-rw-r--r--[-rwxr-xr-x]drivers/idle/intel_idle.c20
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.h2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c6
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c18
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c14
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.h1
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c4
-rw-r--r--drivers/input/input.c11
-rw-r--r--drivers/input/mouse/bcm5974.c12
-rw-r--r--drivers/input/serio/i8042.c2
-rw-r--r--drivers/input/tablet/wacom_wac.c4
-rw-r--r--drivers/leds/leds-ns2.c9
-rw-r--r--drivers/md/md.c6
-rw-r--r--drivers/mfd/max8925-core.c13
-rw-r--r--drivers/mfd/wm831x-irq.c9
-rw-r--r--drivers/misc/Kconfig2
-rw-r--r--drivers/misc/Makefile2
-rw-r--r--drivers/misc/vmw_balloon.c (renamed from drivers/misc/vmware_balloon.c)0
-rw-r--r--drivers/mmc/core/sdio.c5
-rw-r--r--drivers/mmc/host/at91_mci.c1
-rw-r--r--drivers/mmc/host/imxmmc.c3
-rw-r--r--drivers/mmc/host/omap_hsmmc.c3
-rw-r--r--drivers/mmc/host/s3cmci.c2
-rw-r--r--drivers/mmc/host/sdhci-s3c.c12
-rw-r--r--drivers/mmc/host/tmio_mmc.c7
-rw-r--r--drivers/mmc/host/tmio_mmc.h13
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c9
-rw-r--r--drivers/mtd/nand/mxc_nand.c47
-rw-r--r--drivers/mtd/nand/omap2.c2
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c6
-rw-r--r--drivers/mtd/onenand/samsung.c16
-rw-r--r--drivers/net/ll_temac_main.c1
-rw-r--r--drivers/net/ll_temac_mdio.c1
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c139
-rw-r--r--drivers/oprofile/buffer_sync.c27
-rw-r--r--drivers/oprofile/cpu_buffer.c2
-rw-r--r--drivers/pci/intel-iommu.c117
-rw-r--r--drivers/pci/iov.c2
-rw-r--r--drivers/pci/pci.h5
-rw-r--r--drivers/pci/quirks.c20
-rw-r--r--drivers/pcmcia/pcmcia_resource.c57
-rw-r--r--drivers/pcmcia/pd6729.c2
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c5
-rw-r--r--drivers/power/apm_power.c1
-rw-r--r--drivers/power/intel_mid_battery.c6
-rw-r--r--drivers/regulator/88pm8607.c4
-rw-r--r--drivers/regulator/ab3100.c5
-rw-r--r--drivers/regulator/ab8500.c9
-rw-r--r--drivers/regulator/ad5398.c12
-rw-r--r--drivers/regulator/core.c6
-rw-r--r--drivers/regulator/isl6271a-regulator.c2
-rw-r--r--drivers/regulator/max1586.c12
-rw-r--r--drivers/regulator/max8649.c2
-rw-r--r--drivers/regulator/max8998.c8
-rw-r--r--drivers/regulator/tps6507x-regulator.c6
-rw-r--r--drivers/regulator/tps6586x-regulator.c4
-rw-r--r--drivers/regulator/wm831x-ldo.c7
-rw-r--r--drivers/regulator/wm8350-regulator.c2
-rw-r--r--drivers/rtc/rtc-ab3100.c2
-rw-r--r--drivers/rtc/rtc-bfin.c15
-rw-r--r--drivers/rtc/rtc-m41t80.c2
-rw-r--r--drivers/rtc/rtc-pl031.c2
-rw-r--r--drivers/rtc/rtc-s3c.c13
-rw-r--r--drivers/s390/char/tape_block.c3
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c5
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c2
-rw-r--r--drivers/scsi/constants.c6
-rw-r--r--drivers/scsi/hpsa.c6
-rw-r--r--drivers/scsi/osd/osd_initiator.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c23
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h20
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c94
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c68
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c36
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c30
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h4
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/scsi/sd.c8
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.c10
-rw-r--r--drivers/serial/amba-pl010.c9
-rw-r--r--drivers/serial/mfd.c18
-rw-r--r--drivers/serial/mpc52xx_uart.c1
-rw-r--r--drivers/serial/mrst_max3110.c1
-rw-r--r--drivers/serial/serial_cs.c62
-rw-r--r--drivers/spi/amba-pl022.c16
-rw-r--r--drivers/spi/dw_spi.c24
-rw-r--r--drivers/spi/spi.c14
-rw-r--r--drivers/spi/spi_gpio.c2
-rw-r--r--drivers/spi/spi_mpc8xxx.c10
-rw-r--r--drivers/spi/spi_s3c64xx.c37
-rw-r--r--drivers/staging/batman-adv/hard-interface.c13
-rw-r--r--drivers/staging/batman-adv/send.c8
-rw-r--r--drivers/staging/ti-st/st.h1
-rw-r--r--drivers/staging/ti-st/st_core.c9
-rw-r--r--drivers/staging/ti-st/st_core.h2
-rw-r--r--drivers/staging/ti-st/st_kim.c22
-rw-r--r--drivers/staging/vt6655/wpactl.c11
-rw-r--r--drivers/usb/core/Kconfig6
-rw-r--r--drivers/usb/core/file.c35
-rw-r--r--drivers/usb/core/message.c1
-rw-r--r--drivers/usb/host/ehci-pci.c5
-rw-r--r--drivers/usb/musb/cppi_dma.c1
-rw-r--r--drivers/usb/musb/musb_debugfs.c5
-rw-r--r--drivers/usb/musb/musb_gadget.c75
-rw-r--r--drivers/usb/musb/musb_gadget.h2
-rw-r--r--drivers/usb/musb/musb_gadget_ep0.c9
-rw-r--r--drivers/usb/musb/musb_host.c6
-rw-r--r--drivers/usb/otg/twl4030-usb.c78
-rw-r--r--drivers/usb/serial/mos7720.c3
-rw-r--r--drivers/usb/serial/mos7840.c3
-rw-r--r--drivers/video/console/fbcon.c5
-rw-r--r--drivers/video/efifb.c103
-rw-r--r--drivers/video/pxa168fb.c10
-rw-r--r--drivers/video/sis/sis_main.c3
-rw-r--r--drivers/video/via/ioctl.c2
-rw-r--r--drivers/watchdog/Kconfig6
-rw-r--r--drivers/watchdog/sb_wdog.c12
-rw-r--r--drivers/watchdog/ts72xx_wdt.c3
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c9
-rw-r--r--fs/9p/vfs_dir.c6
-rw-r--r--fs/9p/vfs_inode.c9
-rw-r--r--fs/9p/vfs_super.c20
-rw-r--r--fs/aio.c13
-rw-r--r--fs/binfmt_misc.c2
-rw-r--r--fs/bio-integrity.c4
-rw-r--r--fs/ceph/Kconfig1
-rw-r--r--fs/ceph/addr.c7
-rw-r--r--fs/ceph/caps.c27
-rw-r--r--fs/ceph/dir.c10
-rw-r--r--fs/ceph/inode.c11
-rw-r--r--fs/ceph/mds_client.c2
-rw-r--r--fs/ceph/pagelist.c12
-rw-r--r--fs/ceph/snap.c92
-rw-r--r--fs/ceph/super.h5
-rw-r--r--fs/char_dev.c4
-rw-r--r--fs/cifs/Kconfig2
-rw-r--r--fs/cifs/asn1.c6
-rw-r--r--fs/cifs/cifsencrypt.c418
-rw-r--r--fs/cifs/cifsglob.h25
-rw-r--r--fs/cifs/cifspdu.h7
-rw-r--r--fs/cifs/cifsproto.h13
-rw-r--r--fs/cifs/cifssmb.c62
-rw-r--r--fs/cifs/connect.c77
-rw-r--r--fs/cifs/inode.c32
-rw-r--r--fs/cifs/netmisc.c22
-rw-r--r--fs/cifs/ntlmssp.h13
-rw-r--r--fs/cifs/sess.c132
-rw-r--r--fs/cifs/transport.c6
-rw-r--r--fs/coda/psdev.c4
-rw-r--r--fs/compat.c2
-rw-r--r--fs/direct-io.c4
-rw-r--r--fs/exec.c14
-rw-r--r--fs/fcntl.c10
-rw-r--r--fs/fs-writeback.c14
-rw-r--r--fs/fuse/dev.c44
-rw-r--r--fs/fuse/file.c8
-rw-r--r--fs/gfs2/log.c2
-rw-r--r--fs/minix/namei.c2
-rw-r--r--fs/nfs/Kconfig1
-rw-r--r--fs/nfs/client.c2
-rw-r--r--fs/nfs/file.c4
-rw-r--r--fs/nfs/super.c8
-rw-r--r--fs/nfsd/Kconfig1
-rw-r--r--fs/nfsd/nfs4state.c2
-rw-r--r--fs/ocfs2/acl.c3
-rw-r--r--fs/ocfs2/alloc.c2
-rw-r--r--fs/ocfs2/blockcheck.c4
-rw-r--r--fs/ocfs2/cluster/tcp.c2
-rw-r--r--fs/ocfs2/dir.c24
-rw-r--r--fs/ocfs2/dlm/dlmcommon.h1
-rw-r--r--fs/ocfs2/dlm/dlmdebug.c9
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c1
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c40
-rw-r--r--fs/ocfs2/dlmglue.h1
-rw-r--r--fs/ocfs2/file.c15
-rw-r--r--fs/ocfs2/inode.c6
-rw-r--r--fs/ocfs2/mmap.c8
-rw-r--r--fs/ocfs2/namei.c302
-rw-r--r--fs/ocfs2/ocfs2_fs.h37
-rw-r--r--fs/ocfs2/ocfs2_ioctl.h8
-rw-r--r--fs/ocfs2/refcounttree.c10
-rw-r--r--fs/ocfs2/reservations.c22
-rw-r--r--fs/ocfs2/suballoc.c223
-rw-r--r--fs/ocfs2/suballoc.h21
-rw-r--r--fs/ocfs2/symlink.c2
-rw-r--r--fs/ocfs2/xattr.c4
-rw-r--r--fs/proc/base.c4
-rw-r--r--fs/proc/page.c2
-rw-r--r--fs/proc/task_mmu.c7
-rw-r--r--fs/proc/vmcore.c2
-rw-r--r--fs/reiserfs/ioctl.c7
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c3
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl.c2
-rw-r--r--fs/xfs/xfs_log_cil.c12
-rw-r--r--fs/xfs/xfs_log_priv.h37
-rw-r--r--include/acpi/acpixf.h2
-rw-r--r--include/asm-generic/gpio.h14
-rw-r--r--include/drm/drmP.h29
-rw-r--r--include/drm/drm_crtc.h10
-rw-r--r--include/drm/drm_pciids.h2
-rw-r--r--include/linux/cgroup.h1
-rw-r--r--include/linux/compat.h3
-rw-r--r--include/linux/cpuidle.h1
-rw-r--r--include/linux/dma-mapping.h4
-rw-r--r--include/linux/dmaengine.h2
-rw-r--r--include/linux/elevator.h1
-rw-r--r--include/linux/fs.h4
-rw-r--r--include/linux/gpio.h1
-rw-r--r--include/linux/i2c/sx150x.h4
-rw-r--r--include/linux/io-mapping.h24
-rw-r--r--include/linux/kfifo.h58
-rw-r--r--include/linux/ksm.h20
-rw-r--r--include/linux/lglock.h4
-rw-r--r--include/linux/libata.h4
-rw-r--r--include/linux/mm.h6
-rw-r--r--include/linux/mmc/sdio.h2
-rw-r--r--include/linux/mmzone.h13
-rw-r--r--include/linux/module.h5
-rw-r--r--include/linux/mutex.h8
-rw-r--r--include/linux/pci_ids.h3
-rw-r--r--include/linux/quotaops.h10
-rw-r--r--include/linux/rcupdate.h2
-rw-r--r--include/linux/semaphore.h3
-rw-r--r--include/linux/spi/dw_spi.h2
-rw-r--r--include/linux/sunrpc/clnt.h2
-rw-r--r--include/linux/swap.h11
-rw-r--r--include/linux/vmstat.h22
-rw-r--r--include/linux/wait.h1
-rw-r--r--include/linux/workqueue.h4
-rw-r--r--ipc/sem.c2
-rw-r--r--kernel/cgroup.c6
-rw-r--r--kernel/compat.c21
-rw-r--r--kernel/debug/kdb/kdb_bp.c2
-rw-r--r--kernel/fork.c2
-rw-r--r--kernel/gcov/fs.c244
-rw-r--r--kernel/groups.c5
-rw-r--r--kernel/hrtimer.c3
-rw-r--r--kernel/hw_breakpoint.c3
-rw-r--r--kernel/kfifo.c2
-rw-r--r--kernel/module.c4
-rw-r--r--kernel/mutex.c23
-rw-r--r--kernel/perf_event.c32
-rw-r--r--kernel/pm_qos_params.c4
-rw-r--r--kernel/power/hibernate.c1
-rw-r--r--kernel/power/snapshot.c86
-rw-r--r--kernel/power/swap.c6
-rw-r--r--kernel/sched.c14
-rw-r--r--kernel/sched_fair.c13
-rw-r--r--kernel/smp.c17
-rw-r--r--kernel/sys.c2
-rw-r--r--kernel/sysctl.c5
-rw-r--r--kernel/trace/ftrace.c19
-rw-r--r--kernel/trace/ring_buffer.c2
-rw-r--r--kernel/trace/trace_event_perf.c3
-rw-r--r--kernel/trace/trace_kprobe.c43
-rw-r--r--kernel/watchdog.c17
-rw-r--r--kernel/workqueue.c27
-rw-r--r--lib/bug.c6
-rw-r--r--lib/list_sort.c2
-rw-r--r--lib/scatterlist.c14
-rw-r--r--mm/Kconfig2
-rw-r--r--mm/backing-dev.c9
-rw-r--r--mm/bounce.c2
-rw-r--r--mm/compaction.c7
-rw-r--r--mm/fremap.c7
-rw-r--r--mm/hugetlb.c24
-rw-r--r--mm/ksm.c9
-rw-r--r--mm/memory.c41
-rw-r--r--mm/memory_hotplug.c16
-rw-r--r--mm/mlock.c6
-rw-r--r--mm/mmap.c1
-rw-r--r--mm/mmzone.c21
-rw-r--r--mm/oom_kill.c49
-rw-r--r--mm/page_alloc.c33
-rw-r--r--mm/percpu.c2
-rw-r--r--mm/rmap.c23
-rw-r--r--mm/swapfile.c129
-rw-r--r--mm/vmscan.c43
-rw-r--r--mm/vmstat.c16
-rw-r--r--net/9p/client.c7
-rw-r--r--net/9p/trans_rdma.c29
-rw-r--r--net/sunrpc/auth.c2
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c9
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_mech.c10
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_mech.c5
-rw-r--r--net/sunrpc/clnt.c116
-rw-r--r--net/sunrpc/rpc_pipe.c20
-rw-r--r--samples/kfifo/dma-example.c17
-rw-r--r--scripts/basic/docproc.c129
-rwxr-xr-xscripts/kernel-doc54
-rw-r--r--security/apparmor/include/resource.h4
-rw-r--r--security/apparmor/lib.c2
-rw-r--r--security/apparmor/lsm.c2
-rw-r--r--security/apparmor/path.c38
-rw-r--r--security/apparmor/policy.c6
-rw-r--r--security/apparmor/resource.c20
-rw-r--r--security/integrity/ima/ima.h1
-rw-r--r--security/integrity/ima/ima_iint.c4
-rw-r--r--security/integrity/ima/ima_main.c8
-rw-r--r--security/keys/keyctl.c6
-rw-r--r--security/tomoyo/common.c6
-rw-r--r--security/tomoyo/common.h3
-rw-r--r--sound/core/control.c5
-rw-r--r--sound/core/pcm.c33
-rw-r--r--sound/core/pcm_native.c2
-rw-r--r--sound/core/rawmidi.c2
-rw-r--r--sound/core/seq/oss/seq_oss_init.c9
-rw-r--r--sound/i2c/other/ak4xxx-adda.c2
-rw-r--r--sound/isa/msnd/msnd_pinnacle.c8
-rw-r--r--sound/pci/hda/hda_codec.c2
-rw-r--r--sound/pci/hda/hda_intel.c3
-rw-r--r--sound/pci/hda/patch_analog.c1
-rw-r--r--sound/pci/hda/patch_cirrus.c50
-rw-r--r--sound/pci/hda/patch_conexant.c59
-rw-r--r--sound/pci/hda/patch_nvhdmi.c2
-rw-r--r--sound/pci/hda/patch_realtek.c33
-rw-r--r--sound/pci/oxygen/oxygen.c4
-rw-r--r--sound/pci/oxygen/oxygen.h1
-rw-r--r--sound/pci/oxygen/oxygen_lib.c21
-rw-r--r--sound/pci/oxygen/virtuoso.c1
-rw-r--r--sound/pci/oxygen/xonar_wm87x6.c22
-rw-r--r--sound/pci/rme9652/hdsp.c1
-rw-r--r--sound/pci/rme9652/hdspm.c1
-rw-r--r--sound/ppc/snd_ps3.c2
-rw-r--r--sound/soc/s3c24xx/s3c-dma.c3
-rw-r--r--sound/soc/sh/migor.c15
-rw-r--r--sound/soc/soc-cache.c5
-rw-r--r--sound/usb/card.c19
-rw-r--r--sound/usb/clock.c3
-rw-r--r--sound/usb/endpoint.c11
-rw-r--r--sound/usb/format.c22
-rw-r--r--sound/usb/mixer.c10
-rw-r--r--sound/usb/pcm.c3
-rw-r--r--tools/perf/Makefile2
-rw-r--r--tools/perf/util/callchain.h1
-rw-r--r--tools/perf/util/probe-event.c1
-rw-r--r--tools/perf/util/probe-finder.c42
-rw-r--r--tools/perf/util/symbol.c7
-rw-r--r--tools/perf/util/symbol.h3
-rw-r--r--tools/perf/util/trace-event-scripting.c4
-rw-r--r--tools/perf/util/ui/browsers/hists.c2
-rw-r--r--virt/kvm/eventfd.c3
-rw-r--r--virt/kvm/kvm_main.c9
723 files changed, 7368 insertions, 3979 deletions
diff --git a/CREDITS b/CREDITS
index 72b487869788..41d8e63d5165 100644
--- a/CREDITS
+++ b/CREDITS
@@ -3554,12 +3554,12 @@ E: cvance@nai.com
3554D: portions of the Linux Security Module (LSM) framework and security modules 3554D: portions of the Linux Security Module (LSM) framework and security modules
3555 3555
3556N: Petr Vandrovec 3556N: Petr Vandrovec
3557E: vandrove@vc.cvut.cz 3557E: petr@vandrovec.name
3558D: Small contributions to ncpfs 3558D: Small contributions to ncpfs
3559D: Matrox framebuffer driver 3559D: Matrox framebuffer driver
3560S: Chudenicka 8 3560S: 21513 Conradia Ct
3561S: 10200 Prague 10, Hostivar 3561S: Cupertino, CA 95014
3562S: Czech Republic 3562S: USA
3563 3563
3564N: Thibaut Varene 3564N: Thibaut Varene
3565E: T-Bone@parisc-linux.org 3565E: T-Bone@parisc-linux.org
diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl
index ecd35e9d4410..feca0758391e 100644
--- a/Documentation/DocBook/device-drivers.tmpl
+++ b/Documentation/DocBook/device-drivers.tmpl
@@ -46,7 +46,6 @@
46 46
47 <sect1><title>Atomic and pointer manipulation</title> 47 <sect1><title>Atomic and pointer manipulation</title>
48!Iarch/x86/include/asm/atomic.h 48!Iarch/x86/include/asm/atomic.h
49!Iarch/x86/include/asm/unaligned.h
50 </sect1> 49 </sect1>
51 50
52 <sect1><title>Delaying, scheduling, and timer routines</title> 51 <sect1><title>Delaying, scheduling, and timer routines</title>
diff --git a/Documentation/DocBook/kernel-api.tmpl b/Documentation/DocBook/kernel-api.tmpl
index a20c6f6fffc3..6899f471fb15 100644
--- a/Documentation/DocBook/kernel-api.tmpl
+++ b/Documentation/DocBook/kernel-api.tmpl
@@ -57,7 +57,6 @@
57 </para> 57 </para>
58 58
59 <sect1><title>String Conversions</title> 59 <sect1><title>String Conversions</title>
60!Ilib/vsprintf.c
61!Elib/vsprintf.c 60!Elib/vsprintf.c
62 </sect1> 61 </sect1>
63 <sect1><title>String Manipulation</title> 62 <sect1><title>String Manipulation</title>
diff --git a/Documentation/DocBook/kernel-locking.tmpl b/Documentation/DocBook/kernel-locking.tmpl
index 0b1a3f97f285..a0d479d1e1dd 100644
--- a/Documentation/DocBook/kernel-locking.tmpl
+++ b/Documentation/DocBook/kernel-locking.tmpl
@@ -1961,6 +1961,12 @@ machines due to caching.
1961 </sect1> 1961 </sect1>
1962 </chapter> 1962 </chapter>
1963 1963
1964 <chapter id="apiref">
1965 <title>Mutex API reference</title>
1966!Iinclude/linux/mutex.h
1967!Ekernel/mutex.c
1968 </chapter>
1969
1964 <chapter id="references"> 1970 <chapter id="references">
1965 <title>Further reading</title> 1971 <title>Further reading</title>
1966 1972
diff --git a/Documentation/block/cfq-iosched.txt b/Documentation/block/cfq-iosched.txt
new file mode 100644
index 000000000000..e578feed6d81
--- /dev/null
+++ b/Documentation/block/cfq-iosched.txt
@@ -0,0 +1,45 @@
1CFQ ioscheduler tunables
2========================
3
4slice_idle
5----------
6This specifies how long CFQ should idle for next request on certain cfq queues
7(for sequential workloads) and service trees (for random workloads) before
8queue is expired and CFQ selects next queue to dispatch from.
9
10By default slice_idle is a non-zero value. That means by default we idle on
11queues/service trees. This can be very helpful on highly seeky media like
12single spindle SATA/SAS disks where we can cut down on overall number of
13seeks and see improved throughput.
14
15Setting slice_idle to 0 will remove all the idling on queues/service tree
16level and one should see an overall improved throughput on faster storage
17devices like multiple SATA/SAS disks in hardware RAID configuration. The down
18side is that isolation provided from WRITES also goes down and notion of
19IO priority becomes weaker.
20
21So depending on storage and workload, it might be useful to set slice_idle=0.
22In general I think for SATA/SAS disks and software RAID of SATA/SAS disks
23keeping slice_idle enabled should be useful. For any configurations where
24there are multiple spindles behind single LUN (Host based hardware RAID
25controller or for storage arrays), setting slice_idle=0 might end up in better
26throughput and acceptable latencies.
27
28CFQ IOPS Mode for group scheduling
29===================================
30Basic CFQ design is to provide priority based time slices. Higher priority
31process gets bigger time slice and lower priority process gets smaller time
32slice. Measuring time becomes harder if storage is fast and supports NCQ and
33it would be better to dispatch multiple requests from multiple cfq queues in
34request queue at a time. In such scenario, it is not possible to measure time
35consumed by single queue accurately.
36
37What is possible though is to measure number of requests dispatched from a
38single queue and also allow dispatch from multiple cfq queue at the same time.
39This effectively becomes the fairness in terms of IOPS (IO operations per
40second).
41
42If one sets slice_idle=0 and if storage supports NCQ, CFQ internally switches
43to IOPS mode and starts providing fairness in terms of number of requests
44dispatched. Note that this mode switching takes effect only for group
45scheduling. For non-cgroup users nothing should change.
diff --git a/Documentation/cgroups/blkio-controller.txt b/Documentation/cgroups/blkio-controller.txt
index 48e0b21b0059..6919d62591d9 100644
--- a/Documentation/cgroups/blkio-controller.txt
+++ b/Documentation/cgroups/blkio-controller.txt
@@ -217,6 +217,7 @@ Details of cgroup files
217CFQ sysfs tunable 217CFQ sysfs tunable
218================= 218=================
219/sys/block/<disk>/queue/iosched/group_isolation 219/sys/block/<disk>/queue/iosched/group_isolation
220-----------------------------------------------
220 221
221If group_isolation=1, it provides stronger isolation between groups at the 222If group_isolation=1, it provides stronger isolation between groups at the
222expense of throughput. By default group_isolation is 0. In general that 223expense of throughput. By default group_isolation is 0. In general that
@@ -243,6 +244,33 @@ By default one should run with group_isolation=0. If that is not sufficient
243and one wants stronger isolation between groups, then set group_isolation=1 244and one wants stronger isolation between groups, then set group_isolation=1
244but this will come at cost of reduced throughput. 245but this will come at cost of reduced throughput.
245 246
247/sys/block/<disk>/queue/iosched/slice_idle
248------------------------------------------
249On a faster hardware CFQ can be slow, especially with sequential workload.
250This happens because CFQ idles on a single queue and single queue might not
251drive deeper request queue depths to keep the storage busy. In such scenarios
252one can try setting slice_idle=0 and that would switch CFQ to IOPS
253(IO operations per second) mode on NCQ supporting hardware.
254
255That means CFQ will not idle between cfq queues of a cfq group and hence be
256able to driver higher queue depth and achieve better throughput. That also
257means that cfq provides fairness among groups in terms of IOPS and not in
258terms of disk time.
259
260/sys/block/<disk>/queue/iosched/group_idle
261------------------------------------------
262If one disables idling on individual cfq queues and cfq service trees by
263setting slice_idle=0, group_idle kicks in. That means CFQ will still idle
264on the group in an attempt to provide fairness among groups.
265
266By default group_idle is same as slice_idle and does not do anything if
267slice_idle is enabled.
268
269One can experience an overall throughput drop if you have created multiple
270groups and put applications in that group which are not driving enough
271IO to keep disk busy. In that case set group_idle=0, and CFQ will not idle
272on individual groups and throughput should improve.
273
246What works 274What works
247========== 275==========
248- Currently only sync IO queues are support. All the buffered writes are 276- Currently only sync IO queues are support. All the buffered writes are
diff --git a/Documentation/gpio.txt b/Documentation/gpio.txt
index d96a6dba5748..9633da01ff46 100644
--- a/Documentation/gpio.txt
+++ b/Documentation/gpio.txt
@@ -109,17 +109,19 @@ use numbers 2000-2063 to identify GPIOs in a bank of I2C GPIO expanders.
109 109
110If you want to initialize a structure with an invalid GPIO number, use 110If you want to initialize a structure with an invalid GPIO number, use
111some negative number (perhaps "-EINVAL"); that will never be valid. To 111some negative number (perhaps "-EINVAL"); that will never be valid. To
112test if a number could reference a GPIO, you may use this predicate: 112test if such number from such a structure could reference a GPIO, you
113may use this predicate:
113 114
114 int gpio_is_valid(int number); 115 int gpio_is_valid(int number);
115 116
116A number that's not valid will be rejected by calls which may request 117A number that's not valid will be rejected by calls which may request
117or free GPIOs (see below). Other numbers may also be rejected; for 118or free GPIOs (see below). Other numbers may also be rejected; for
118example, a number might be valid but unused on a given board. 119example, a number might be valid but temporarily unused on a given board.
119
120Whether a platform supports multiple GPIO controllers is currently a
121platform-specific implementation issue.
122 120
121Whether a platform supports multiple GPIO controllers is a platform-specific
122implementation issue, as are whether that support can leave "holes" in the space
123of GPIO numbers, and whether new controllers can be added at runtime. Such issues
124can affect things including whether adjacent GPIO numbers are both valid.
123 125
124Using GPIOs 126Using GPIOs
125----------- 127-----------
@@ -480,12 +482,16 @@ To support this framework, a platform's Kconfig will "select" either
480ARCH_REQUIRE_GPIOLIB or ARCH_WANT_OPTIONAL_GPIOLIB 482ARCH_REQUIRE_GPIOLIB or ARCH_WANT_OPTIONAL_GPIOLIB
481and arrange that its <asm/gpio.h> includes <asm-generic/gpio.h> and defines 483and arrange that its <asm/gpio.h> includes <asm-generic/gpio.h> and defines
482three functions: gpio_get_value(), gpio_set_value(), and gpio_cansleep(). 484three functions: gpio_get_value(), gpio_set_value(), and gpio_cansleep().
483They may also want to provide a custom value for ARCH_NR_GPIOS.
484 485
485ARCH_REQUIRE_GPIOLIB means that the gpio-lib code will always get compiled 486It may also provide a custom value for ARCH_NR_GPIOS, so that it better
487reflects the number of GPIOs in actual use on that platform, without
488wasting static table space. (It should count both built-in/SoC GPIOs and
489also ones on GPIO expanders.
490
491ARCH_REQUIRE_GPIOLIB means that the gpiolib code will always get compiled
486into the kernel on that architecture. 492into the kernel on that architecture.
487 493
488ARCH_WANT_OPTIONAL_GPIOLIB means the gpio-lib code defaults to off and the user 494ARCH_WANT_OPTIONAL_GPIOLIB means the gpiolib code defaults to off and the user
489can enable it and build it into the kernel optionally. 495can enable it and build it into the kernel optionally.
490 496
491If neither of these options are selected, the platform does not support 497If neither of these options are selected, the platform does not support
diff --git a/Documentation/hwmon/sysfs-interface b/Documentation/hwmon/sysfs-interface
index ff45d1f837c8..48ceabedf55d 100644
--- a/Documentation/hwmon/sysfs-interface
+++ b/Documentation/hwmon/sysfs-interface
@@ -91,12 +91,11 @@ name The chip name.
91 I2C devices get this attribute created automatically. 91 I2C devices get this attribute created automatically.
92 RO 92 RO
93 93
94update_rate The rate at which the chip will update readings. 94update_interval The interval at which the chip will update readings.
95 Unit: millisecond 95 Unit: millisecond
96 RW 96 RW
97 Some devices have a variable update rate. This attribute 97 Some devices have a variable update rate or interval.
98 can be used to change the update rate to the desired 98 This attribute can be used to change it to the desired value.
99 frequency.
100 99
101 100
102************ 101************
diff --git a/Documentation/kernel-doc-nano-HOWTO.txt b/Documentation/kernel-doc-nano-HOWTO.txt
index 27a52b35d55b..3d8a97747f77 100644
--- a/Documentation/kernel-doc-nano-HOWTO.txt
+++ b/Documentation/kernel-doc-nano-HOWTO.txt
@@ -345,5 +345,10 @@ documentation, in <filename>, for the functions listed.
345section titled <section title> from <filename>. 345section titled <section title> from <filename>.
346Spaces are allowed in <section title>; do not quote the <section title>. 346Spaces are allowed in <section title>; do not quote the <section title>.
347 347
348!C<filename> is replaced by nothing, but makes the tools check that
349all DOC: sections and documented functions, symbols, etc. are used.
350This makes sense to use when you use !F/!P only and want to verify
351that all documentation is included.
352
348Tim. 353Tim.
349*/ <twaugh@redhat.com> 354*/ <twaugh@redhat.com>
diff --git a/Documentation/mutex-design.txt b/Documentation/mutex-design.txt
index c91ccc0720fa..38c10fd7f411 100644
--- a/Documentation/mutex-design.txt
+++ b/Documentation/mutex-design.txt
@@ -9,7 +9,7 @@ firstly, there's nothing wrong with semaphores. But if the simpler
9mutex semantics are sufficient for your code, then there are a couple 9mutex semantics are sufficient for your code, then there are a couple
10of advantages of mutexes: 10of advantages of mutexes:
11 11
12 - 'struct mutex' is smaller on most architectures: .e.g on x86, 12 - 'struct mutex' is smaller on most architectures: E.g. on x86,
13 'struct semaphore' is 20 bytes, 'struct mutex' is 16 bytes. 13 'struct semaphore' is 20 bytes, 'struct mutex' is 16 bytes.
14 A smaller structure size means less RAM footprint, and better 14 A smaller structure size means less RAM footprint, and better
15 CPU-cache utilization. 15 CPU-cache utilization.
@@ -136,3 +136,4 @@ the APIs of 'struct mutex' have been streamlined:
136 void mutex_lock_nested(struct mutex *lock, unsigned int subclass); 136 void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
137 int mutex_lock_interruptible_nested(struct mutex *lock, 137 int mutex_lock_interruptible_nested(struct mutex *lock,
138 unsigned int subclass); 138 unsigned int subclass);
139 int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
diff --git a/Documentation/power/regulator/overview.txt b/Documentation/power/regulator/overview.txt
index 9363e056188a..8ed17587a74b 100644
--- a/Documentation/power/regulator/overview.txt
+++ b/Documentation/power/regulator/overview.txt
@@ -13,7 +13,7 @@ regulators (where voltage output is controllable) and current sinks (where
13current limit is controllable). 13current limit is controllable).
14 14
15(C) 2008 Wolfson Microelectronics PLC. 15(C) 2008 Wolfson Microelectronics PLC.
16Author: Liam Girdwood <lg@opensource.wolfsonmicro.com> 16Author: Liam Girdwood <lrg@slimlogic.co.uk>
17 17
18 18
19Nomenclature 19Nomenclature
diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt
index ce46fa1e643e..37c6aad5e590 100644
--- a/Documentation/sound/alsa/HD-Audio-Models.txt
+++ b/Documentation/sound/alsa/HD-Audio-Models.txt
@@ -296,6 +296,7 @@ Conexant 5051
296Conexant 5066 296Conexant 5066
297============= 297=============
298 laptop Basic Laptop config (default) 298 laptop Basic Laptop config (default)
299 hp-laptop HP laptops, e g G60
299 dell-laptop Dell laptops 300 dell-laptop Dell laptops
300 dell-vostro Dell Vostro 301 dell-vostro Dell Vostro
301 olpc-xo-1_5 OLPC XO 1.5 302 olpc-xo-1_5 OLPC XO 1.5
diff --git a/Documentation/workqueue.txt b/Documentation/workqueue.txt
new file mode 100644
index 000000000000..e4498a2872c3
--- /dev/null
+++ b/Documentation/workqueue.txt
@@ -0,0 +1,380 @@
1
2Concurrency Managed Workqueue (cmwq)
3
4September, 2010 Tejun Heo <tj@kernel.org>
5 Florian Mickler <florian@mickler.org>
6
7CONTENTS
8
91. Introduction
102. Why cmwq?
113. The Design
124. Application Programming Interface (API)
135. Example Execution Scenarios
146. Guidelines
15
16
171. Introduction
18
19There are many cases where an asynchronous process execution context
20is needed and the workqueue (wq) API is the most commonly used
21mechanism for such cases.
22
23When such an asynchronous execution context is needed, a work item
24describing which function to execute is put on a queue. An
25independent thread serves as the asynchronous execution context. The
26queue is called workqueue and the thread is called worker.
27
28While there are work items on the workqueue the worker executes the
29functions associated with the work items one after the other. When
30there is no work item left on the workqueue the worker becomes idle.
31When a new work item gets queued, the worker begins executing again.
32
33
342. Why cmwq?
35
36In the original wq implementation, a multi threaded (MT) wq had one
37worker thread per CPU and a single threaded (ST) wq had one worker
38thread system-wide. A single MT wq needed to keep around the same
39number of workers as the number of CPUs. The kernel grew a lot of MT
40wq users over the years and with the number of CPU cores continuously
41rising, some systems saturated the default 32k PID space just booting
42up.
43
44Although MT wq wasted a lot of resource, the level of concurrency
45provided was unsatisfactory. The limitation was common to both ST and
46MT wq albeit less severe on MT. Each wq maintained its own separate
47worker pool. A MT wq could provide only one execution context per CPU
48while a ST wq one for the whole system. Work items had to compete for
49those very limited execution contexts leading to various problems
50including proneness to deadlocks around the single execution context.
51
52The tension between the provided level of concurrency and resource
53usage also forced its users to make unnecessary tradeoffs like libata
54choosing to use ST wq for polling PIOs and accepting an unnecessary
55limitation that no two polling PIOs can progress at the same time. As
56MT wq don't provide much better concurrency, users which require
57higher level of concurrency, like async or fscache, had to implement
58their own thread pool.
59
60Concurrency Managed Workqueue (cmwq) is a reimplementation of wq with
61focus on the following goals.
62
63* Maintain compatibility with the original workqueue API.
64
65* Use per-CPU unified worker pools shared by all wq to provide
66 flexible level of concurrency on demand without wasting a lot of
67 resource.
68
69* Automatically regulate worker pool and level of concurrency so that
70 the API users don't need to worry about such details.
71
72
733. The Design
74
75In order to ease the asynchronous execution of functions a new
76abstraction, the work item, is introduced.
77
78A work item is a simple struct that holds a pointer to the function
79that is to be executed asynchronously. Whenever a driver or subsystem
80wants a function to be executed asynchronously it has to set up a work
81item pointing to that function and queue that work item on a
82workqueue.
83
84Special purpose threads, called worker threads, execute the functions
85off of the queue, one after the other. If no work is queued, the
86worker threads become idle. These worker threads are managed in so
87called thread-pools.
88
89The cmwq design differentiates between the user-facing workqueues that
90subsystems and drivers queue work items on and the backend mechanism
91which manages thread-pool and processes the queued work items.
92
93The backend is called gcwq. There is one gcwq for each possible CPU
94and one gcwq to serve work items queued on unbound workqueues.
95
96Subsystems and drivers can create and queue work items through special
97workqueue API functions as they see fit. They can influence some
98aspects of the way the work items are executed by setting flags on the
99workqueue they are putting the work item on. These flags include
100things like CPU locality, reentrancy, concurrency limits and more. To
101get a detailed overview refer to the API description of
102alloc_workqueue() below.
103
104When a work item is queued to a workqueue, the target gcwq is
105determined according to the queue parameters and workqueue attributes
106and appended on the shared worklist of the gcwq. For example, unless
107specifically overridden, a work item of a bound workqueue will be
108queued on the worklist of exactly that gcwq that is associated to the
109CPU the issuer is running on.
110
111For any worker pool implementation, managing the concurrency level
112(how many execution contexts are active) is an important issue. cmwq
113tries to keep the concurrency at a minimal but sufficient level.
114Minimal to save resources and sufficient in that the system is used at
115its full capacity.
116
117Each gcwq bound to an actual CPU implements concurrency management by
118hooking into the scheduler. The gcwq is notified whenever an active
119worker wakes up or sleeps and keeps track of the number of the
120currently runnable workers. Generally, work items are not expected to
121hog a CPU and consume many cycles. That means maintaining just enough
122concurrency to prevent work processing from stalling should be
123optimal. As long as there are one or more runnable workers on the
124CPU, the gcwq doesn't start execution of a new work, but, when the
125last running worker goes to sleep, it immediately schedules a new
126worker so that the CPU doesn't sit idle while there are pending work
127items. This allows using a minimal number of workers without losing
128execution bandwidth.
129
130Keeping idle workers around doesn't cost other than the memory space
131for kthreads, so cmwq holds onto idle ones for a while before killing
132them.
133
134For an unbound wq, the above concurrency management doesn't apply and
135the gcwq for the pseudo unbound CPU tries to start executing all work
136items as soon as possible. The responsibility of regulating
137concurrency level is on the users. There is also a flag to mark a
138bound wq to ignore the concurrency management. Please refer to the
139API section for details.
140
141Forward progress guarantee relies on that workers can be created when
142more execution contexts are necessary, which in turn is guaranteed
143through the use of rescue workers. All work items which might be used
144on code paths that handle memory reclaim are required to be queued on
145wq's that have a rescue-worker reserved for execution under memory
146pressure. Else it is possible that the thread-pool deadlocks waiting
147for execution contexts to free up.
148
149
1504. Application Programming Interface (API)
151
152alloc_workqueue() allocates a wq. The original create_*workqueue()
153functions are deprecated and scheduled for removal. alloc_workqueue()
154takes three arguments - @name, @flags and @max_active. @name is the
155name of the wq and also used as the name of the rescuer thread if
156there is one.
157
158A wq no longer manages execution resources but serves as a domain for
159forward progress guarantee, flush and work item attributes. @flags
160and @max_active control how work items are assigned execution
161resources, scheduled and executed.
162
163@flags:
164
165 WQ_NON_REENTRANT
166
167 By default, a wq guarantees non-reentrance only on the same
168 CPU. A work item may not be executed concurrently on the same
169 CPU by multiple workers but is allowed to be executed
170 concurrently on multiple CPUs. This flag makes sure
171 non-reentrance is enforced across all CPUs. Work items queued
172 to a non-reentrant wq are guaranteed to be executed by at most
173 one worker system-wide at any given time.
174
175 WQ_UNBOUND
176
177 Work items queued to an unbound wq are served by a special
178 gcwq which hosts workers which are not bound to any specific
179 CPU. This makes the wq behave as a simple execution context
180 provider without concurrency management. The unbound gcwq
181 tries to start execution of work items as soon as possible.
182 Unbound wq sacrifices locality but is useful for the following
183 cases.
184
185 * Wide fluctuation in the concurrency level requirement is
186 expected and using bound wq may end up creating large number
187 of mostly unused workers across different CPUs as the issuer
188 hops through different CPUs.
189
190 * Long running CPU intensive workloads which can be better
191 managed by the system scheduler.
192
193 WQ_FREEZEABLE
194
195 A freezeable wq participates in the freeze phase of the system
196 suspend operations. Work items on the wq are drained and no
197 new work item starts execution until thawed.
198
199 WQ_RESCUER
200
201 All wq which might be used in the memory reclaim paths _MUST_
202 have this flag set. This reserves one worker exclusively for
203 the execution of this wq under memory pressure.
204
205 WQ_HIGHPRI
206
207 Work items of a highpri wq are queued at the head of the
208 worklist of the target gcwq and start execution regardless of
209 the current concurrency level. In other words, highpri work
210 items will always start execution as soon as execution
211 resource is available.
212
213 Ordering among highpri work items is preserved - a highpri
214 work item queued after another highpri work item will start
215 execution after the earlier highpri work item starts.
216
217 Although highpri work items are not held back by other
218 runnable work items, they still contribute to the concurrency
219 level. Highpri work items in runnable state will prevent
220 non-highpri work items from starting execution.
221
222 This flag is meaningless for unbound wq.
223
224 WQ_CPU_INTENSIVE
225
226 Work items of a CPU intensive wq do not contribute to the
227 concurrency level. In other words, runnable CPU intensive
228 work items will not prevent other work items from starting
229 execution. This is useful for bound work items which are
230 expected to hog CPU cycles so that their execution is
231 regulated by the system scheduler.
232
233 Although CPU intensive work items don't contribute to the
234 concurrency level, start of their executions is still
235 regulated by the concurrency management and runnable
236 non-CPU-intensive work items can delay execution of CPU
237 intensive work items.
238
239 This flag is meaningless for unbound wq.
240
241 WQ_HIGHPRI | WQ_CPU_INTENSIVE
242
243 This combination makes the wq avoid interaction with
244 concurrency management completely and behave as a simple
245 per-CPU execution context provider. Work items queued on a
246 highpri CPU-intensive wq start execution as soon as resources
247 are available and don't affect execution of other work items.
248
249@max_active:
250
251@max_active determines the maximum number of execution contexts per
252CPU which can be assigned to the work items of a wq. For example,
253with @max_active of 16, at most 16 work items of the wq can be
254executing at the same time per CPU.
255
256Currently, for a bound wq, the maximum limit for @max_active is 512
257and the default value used when 0 is specified is 256. For an unbound
258wq, the limit is higher of 512 and 4 * num_possible_cpus(). These
259values are chosen sufficiently high such that they are not the
260limiting factor while providing protection in runaway cases.
261
262The number of active work items of a wq is usually regulated by the
263users of the wq, more specifically, by how many work items the users
264may queue at the same time. Unless there is a specific need for
265throttling the number of active work items, specifying '0' is
266recommended.
267
268Some users depend on the strict execution ordering of ST wq. The
269combination of @max_active of 1 and WQ_UNBOUND is used to achieve this
270behavior. Work items on such wq are always queued to the unbound gcwq
271and only one work item can be active at any given time thus achieving
272the same ordering property as ST wq.
273
274
2755. Example Execution Scenarios
276
277The following example execution scenarios try to illustrate how cmwq
278behave under different configurations.
279
280 Work items w0, w1, w2 are queued to a bound wq q0 on the same CPU.
281 w0 burns CPU for 5ms then sleeps for 10ms then burns CPU for 5ms
282 again before finishing. w1 and w2 burn CPU for 5ms then sleep for
283 10ms.
284
285Ignoring all other tasks, works and processing overhead, and assuming
286simple FIFO scheduling, the following is one highly simplified version
287of possible sequences of events with the original wq.
288
289 TIME IN MSECS EVENT
290 0 w0 starts and burns CPU
291 5 w0 sleeps
292 15 w0 wakes up and burns CPU
293 20 w0 finishes
294 20 w1 starts and burns CPU
295 25 w1 sleeps
296 35 w1 wakes up and finishes
297 35 w2 starts and burns CPU
298 40 w2 sleeps
299 50 w2 wakes up and finishes
300
301And with cmwq with @max_active >= 3,
302
303 TIME IN MSECS EVENT
304 0 w0 starts and burns CPU
305 5 w0 sleeps
306 5 w1 starts and burns CPU
307 10 w1 sleeps
308 10 w2 starts and burns CPU
309 15 w2 sleeps
310 15 w0 wakes up and burns CPU
311 20 w0 finishes
312 20 w1 wakes up and finishes
313 25 w2 wakes up and finishes
314
315If @max_active == 2,
316
317 TIME IN MSECS EVENT
318 0 w0 starts and burns CPU
319 5 w0 sleeps
320 5 w1 starts and burns CPU
321 10 w1 sleeps
322 15 w0 wakes up and burns CPU
323 20 w0 finishes
324 20 w1 wakes up and finishes
325 20 w2 starts and burns CPU
326 25 w2 sleeps
327 35 w2 wakes up and finishes
328
329Now, let's assume w1 and w2 are queued to a different wq q1 which has
330WQ_HIGHPRI set,
331
332 TIME IN MSECS EVENT
333 0 w1 and w2 start and burn CPU
334 5 w1 sleeps
335 10 w2 sleeps
336 10 w0 starts and burns CPU
337 15 w0 sleeps
338 15 w1 wakes up and finishes
339 20 w2 wakes up and finishes
340 25 w0 wakes up and burns CPU
341 30 w0 finishes
342
343If q1 has WQ_CPU_INTENSIVE set,
344
345 TIME IN MSECS EVENT
346 0 w0 starts and burns CPU
347 5 w0 sleeps
348 5 w1 and w2 start and burn CPU
349 10 w1 sleeps
350 15 w2 sleeps
351 15 w0 wakes up and burns CPU
352 20 w0 finishes
353 20 w1 wakes up and finishes
354 25 w2 wakes up and finishes
355
356
3576. Guidelines
358
359* Do not forget to use WQ_RESCUER if a wq may process work items which
360 are used during memory reclaim. Each wq with WQ_RESCUER set has one
361 rescuer thread reserved for it. If there is dependency among
362 multiple work items used during memory reclaim, they should be
363 queued to separate wq each with WQ_RESCUER.
364
365* Unless strict ordering is required, there is no need to use ST wq.
366
367* Unless there is a specific need, using 0 for @max_active is
368 recommended. In most use cases, concurrency level usually stays
369 well under the default limit.
370
371* A wq serves as a domain for forward progress guarantee (WQ_RESCUER),
372 flush and work item attributes. Work items which are not involved
373 in memory reclaim and don't need to be flushed as a part of a group
374 of work items, and don't require any special attribute, can use one
375 of the system wq. There is no difference in execution
376 characteristics between using a dedicated wq and a system wq.
377
378* Unless work items are expected to consume a huge amount of CPU
379 cycles, using a bound wq is usually beneficial due to the increased
380 level of locality in wq operations and work item execution.
diff --git a/MAINTAINERS b/MAINTAINERS
index 20a03b93bda5..ae95fd4efbd4 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -962,6 +962,13 @@ W: http://www.fluff.org/ben/linux/
962S: Maintained 962S: Maintained
963F: arch/arm/mach-s3c6410/ 963F: arch/arm/mach-s3c6410/
964 964
965ARM/S5P ARM ARCHITECTURES
966M: Kukjin Kim <kgene.kim@samsung.com>
967L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
968L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
969S: Maintained
970F: arch/arm/mach-s5p*/
971
965ARM/SHMOBILE ARM ARCHITECTURE 972ARM/SHMOBILE ARM ARCHITECTURE
966M: Paul Mundt <lethal@linux-sh.org> 973M: Paul Mundt <lethal@linux-sh.org>
967M: Magnus Damm <magnus.damm@gmail.com> 974M: Magnus Damm <magnus.damm@gmail.com>
@@ -1220,7 +1227,7 @@ F: drivers/auxdisplay/
1220F: include/linux/cfag12864b.h 1227F: include/linux/cfag12864b.h
1221 1228
1222AVR32 ARCHITECTURE 1229AVR32 ARCHITECTURE
1223M: Haavard Skinnemoen <hskinnemoen@atmel.com> 1230M: Hans-Christian Egtvedt <hans-christian.egtvedt@atmel.com>
1224W: http://www.atmel.com/products/AVR32/ 1231W: http://www.atmel.com/products/AVR32/
1225W: http://avr32linux.org/ 1232W: http://avr32linux.org/
1226W: http://avrfreaks.net/ 1233W: http://avrfreaks.net/
@@ -1228,7 +1235,7 @@ S: Supported
1228F: arch/avr32/ 1235F: arch/avr32/
1229 1236
1230AVR32/AT32AP MACHINE SUPPORT 1237AVR32/AT32AP MACHINE SUPPORT
1231M: Haavard Skinnemoen <hskinnemoen@atmel.com> 1238M: Hans-Christian Egtvedt <hans-christian.egtvedt@atmel.com>
1232S: Supported 1239S: Supported
1233F: arch/avr32/mach-at32ap/ 1240F: arch/avr32/mach-at32ap/
1234 1241
@@ -2199,6 +2206,12 @@ W: http://acpi4asus.sf.net
2199S: Maintained 2206S: Maintained
2200F: drivers/platform/x86/eeepc-laptop.c 2207F: drivers/platform/x86/eeepc-laptop.c
2201 2208
2209EFIFB FRAMEBUFFER DRIVER
2210L: linux-fbdev@vger.kernel.org
2211M: Peter Jones <pjones@redhat.com>
2212S: Maintained
2213F: drivers/video/efifb.c
2214
2202EFS FILESYSTEM 2215EFS FILESYSTEM
2203W: http://aeschi.ch.eu.org/efs/ 2216W: http://aeschi.ch.eu.org/efs/
2204S: Orphan 2217S: Orphan
@@ -2657,9 +2670,14 @@ S: Maintained
2657F: drivers/media/video/gspca/ 2670F: drivers/media/video/gspca/
2658 2671
2659HARDWARE MONITORING 2672HARDWARE MONITORING
2673M: Jean Delvare <khali@linux-fr.org>
2674M: Guenter Roeck <guenter.roeck@ericsson.com>
2660L: lm-sensors@lm-sensors.org 2675L: lm-sensors@lm-sensors.org
2661W: http://www.lm-sensors.org/ 2676W: http://www.lm-sensors.org/
2662S: Orphan 2677T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-hwmon/
2678T: quilt kernel.org/pub/linux/kernel/people/groeck/linux-staging/
2679T: git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git
2680S: Maintained
2663F: Documentation/hwmon/ 2681F: Documentation/hwmon/
2664F: drivers/hwmon/ 2682F: drivers/hwmon/
2665F: include/linux/hwmon*.h 2683F: include/linux/hwmon*.h
@@ -2797,11 +2815,6 @@ S: Maintained
2797F: arch/x86/kernel/hpet.c 2815F: arch/x86/kernel/hpet.c
2798F: arch/x86/include/asm/hpet.h 2816F: arch/x86/include/asm/hpet.h
2799 2817
2800HPET: ACPI
2801M: Bob Picco <bob.picco@hp.com>
2802S: Maintained
2803F: drivers/char/hpet.c
2804
2805HPFS FILESYSTEM 2818HPFS FILESYSTEM
2806M: Mikulas Patocka <mikulas@artax.karlin.mff.cuni.cz> 2819M: Mikulas Patocka <mikulas@artax.karlin.mff.cuni.cz>
2807W: http://artax.karlin.mff.cuni.cz/~mikulas/vyplody/hpfs/index-e.cgi 2820W: http://artax.karlin.mff.cuni.cz/~mikulas/vyplody/hpfs/index-e.cgi
@@ -3426,7 +3439,7 @@ F: drivers/s390/kvm/
3426 3439
3427KEXEC 3440KEXEC
3428M: Eric Biederman <ebiederm@xmission.com> 3441M: Eric Biederman <ebiederm@xmission.com>
3429W: http://ftp.kernel.org/pub/linux/kernel/people/horms/kexec-tools/ 3442W: http://kernel.org/pub/linux/utils/kernel/kexec/
3430L: kexec@lists.infradead.org 3443L: kexec@lists.infradead.org
3431S: Maintained 3444S: Maintained
3432F: include/linux/kexec.h 3445F: include/linux/kexec.h
@@ -3787,9 +3800,8 @@ W: http://www.syskonnect.com
3787S: Supported 3800S: Supported
3788 3801
3789MATROX FRAMEBUFFER DRIVER 3802MATROX FRAMEBUFFER DRIVER
3790M: Petr Vandrovec <vandrove@vc.cvut.cz>
3791L: linux-fbdev@vger.kernel.org 3803L: linux-fbdev@vger.kernel.org
3792S: Maintained 3804S: Orphan
3793F: drivers/video/matrox/matroxfb_* 3805F: drivers/video/matrox/matroxfb_*
3794F: include/linux/matroxfb.h 3806F: include/linux/matroxfb.h
3795 3807
@@ -3913,10 +3925,8 @@ F: Documentation/serial/moxa-smartio
3913F: drivers/char/mxser.* 3925F: drivers/char/mxser.*
3914 3926
3915MSI LAPTOP SUPPORT 3927MSI LAPTOP SUPPORT
3916M: Lennart Poettering <mzxreary@0pointer.de> 3928M: Lee, Chun-Yi <jlee@novell.com>
3917L: platform-driver-x86@vger.kernel.org 3929L: platform-driver-x86@vger.kernel.org
3918W: https://tango.0pointer.de/mailman/listinfo/s270-linux
3919W: http://0pointer.de/lennart/tchibo.html
3920S: Maintained 3930S: Maintained
3921F: drivers/platform/x86/msi-laptop.c 3931F: drivers/platform/x86/msi-laptop.c
3922 3932
@@ -3933,8 +3943,10 @@ S: Supported
3933F: drivers/mfd/ 3943F: drivers/mfd/
3934 3944
3935MULTIMEDIA CARD (MMC), SECURE DIGITAL (SD) AND SDIO SUBSYSTEM 3945MULTIMEDIA CARD (MMC), SECURE DIGITAL (SD) AND SDIO SUBSYSTEM
3936S: Orphan 3946M: Chris Ball <cjb@laptop.org>
3937L: linux-mmc@vger.kernel.org 3947L: linux-mmc@vger.kernel.org
3948T: git git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc.git
3949S: Maintained
3938F: drivers/mmc/ 3950F: drivers/mmc/
3939F: include/linux/mmc/ 3951F: include/linux/mmc/
3940 3952
@@ -3956,7 +3968,7 @@ F: drivers/char/isicom.c
3956F: include/linux/isicom.h 3968F: include/linux/isicom.h
3957 3969
3958MUSB MULTIPOINT HIGH SPEED DUAL-ROLE CONTROLLER 3970MUSB MULTIPOINT HIGH SPEED DUAL-ROLE CONTROLLER
3959M: Felipe Balbi <felipe.balbi@nokia.com> 3971M: Felipe Balbi <balbi@ti.com>
3960L: linux-usb@vger.kernel.org 3972L: linux-usb@vger.kernel.org
3961T: git git://gitorious.org/usb/usb.git 3973T: git git://gitorious.org/usb/usb.git
3962S: Maintained 3974S: Maintained
@@ -3976,8 +3988,8 @@ S: Maintained
3976F: drivers/net/natsemi.c 3988F: drivers/net/natsemi.c
3977 3989
3978NCP FILESYSTEM 3990NCP FILESYSTEM
3979M: Petr Vandrovec <vandrove@vc.cvut.cz> 3991M: Petr Vandrovec <petr@vandrovec.name>
3980S: Maintained 3992S: Odd Fixes
3981F: fs/ncpfs/ 3993F: fs/ncpfs/
3982 3994
3983NCR DUAL 700 SCSI DRIVER (MICROCHANNEL) 3995NCR DUAL 700 SCSI DRIVER (MICROCHANNEL)
@@ -4254,7 +4266,7 @@ S: Maintained
4254F: drivers/char/hw_random/omap-rng.c 4266F: drivers/char/hw_random/omap-rng.c
4255 4267
4256OMAP USB SUPPORT 4268OMAP USB SUPPORT
4257M: Felipe Balbi <felipe.balbi@nokia.com> 4269M: Felipe Balbi <balbi@ti.com>
4258M: David Brownell <dbrownell@users.sourceforge.net> 4270M: David Brownell <dbrownell@users.sourceforge.net>
4259L: linux-usb@vger.kernel.org 4271L: linux-usb@vger.kernel.org
4260L: linux-omap@vger.kernel.org 4272L: linux-omap@vger.kernel.org
@@ -4832,6 +4844,7 @@ RCUTORTURE MODULE
4832M: Josh Triplett <josh@freedesktop.org> 4844M: Josh Triplett <josh@freedesktop.org>
4833M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> 4845M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
4834S: Supported 4846S: Supported
4847T: git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu.git
4835F: Documentation/RCU/torture.txt 4848F: Documentation/RCU/torture.txt
4836F: kernel/rcutorture.c 4849F: kernel/rcutorture.c
4837 4850
@@ -4856,6 +4869,7 @@ M: Dipankar Sarma <dipankar@in.ibm.com>
4856M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> 4869M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
4857W: http://www.rdrop.com/users/paulmck/rclock/ 4870W: http://www.rdrop.com/users/paulmck/rclock/
4858S: Supported 4871S: Supported
4872T: git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu.git
4859F: Documentation/RCU/ 4873F: Documentation/RCU/
4860F: include/linux/rcu* 4874F: include/linux/rcu*
4861F: include/linux/srcu* 4875F: include/linux/srcu*
@@ -4863,12 +4877,10 @@ F: kernel/rcu*
4863F: kernel/srcu* 4877F: kernel/srcu*
4864X: kernel/rcutorture.c 4878X: kernel/rcutorture.c
4865 4879
4866REAL TIME CLOCK DRIVER 4880REAL TIME CLOCK DRIVER (LEGACY)
4867M: Paul Gortmaker <p_gortmaker@yahoo.com> 4881M: Paul Gortmaker <p_gortmaker@yahoo.com>
4868S: Maintained 4882S: Maintained
4869F: Documentation/rtc.txt 4883F: drivers/char/rtc.c
4870F: drivers/rtc/
4871F: include/linux/rtc.h
4872 4884
4873REAL TIME CLOCK (RTC) SUBSYSTEM 4885REAL TIME CLOCK (RTC) SUBSYSTEM
4874M: Alessandro Zummo <a.zummo@towertech.it> 4886M: Alessandro Zummo <a.zummo@towertech.it>
@@ -5105,8 +5117,10 @@ S: Maintained
5105F: drivers/mmc/host/sdricoh_cs.c 5117F: drivers/mmc/host/sdricoh_cs.c
5106 5118
5107SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) DRIVER 5119SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) DRIVER
5108S: Orphan 5120M: Chris Ball <cjb@laptop.org>
5109L: linux-mmc@vger.kernel.org 5121L: linux-mmc@vger.kernel.org
5122T: git git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc.git
5123S: Maintained
5110F: drivers/mmc/host/sdhci.* 5124F: drivers/mmc/host/sdhci.*
5111 5125
5112SECURE DIGITAL HOST CONTROLLER INTERFACE, OPEN FIRMWARE BINDINGS (SDHCI-OF) 5126SECURE DIGITAL HOST CONTROLLER INTERFACE, OPEN FIRMWARE BINDINGS (SDHCI-OF)
diff --git a/Makefile b/Makefile
index 4df9873f83b2..77b5c6ed0ce5 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 2 1VERSION = 2
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 36 3SUBLEVEL = 36
4EXTRAVERSION = -rc3 4EXTRAVERSION = -rc7
5NAME = Sheep on Meth 5NAME = Sheep on Meth
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/Kconfig b/arch/Kconfig
index 4877a8c8ee16..fe48fc7a3eba 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -32,8 +32,9 @@ config HAVE_OPROFILE
32 32
33config KPROBES 33config KPROBES
34 bool "Kprobes" 34 bool "Kprobes"
35 depends on KALLSYMS && MODULES 35 depends on MODULES
36 depends on HAVE_KPROBES 36 depends on HAVE_KPROBES
37 select KALLSYMS
37 help 38 help
38 Kprobes allows you to trap at almost any kernel address and 39 Kprobes allows you to trap at almost any kernel address and
39 execute a callback function. register_kprobe() establishes 40 execute a callback function. register_kprobe() establishes
@@ -45,7 +46,6 @@ config OPTPROBES
45 def_bool y 46 def_bool y
46 depends on KPROBES && HAVE_OPTPROBES 47 depends on KPROBES && HAVE_OPTPROBES
47 depends on !PREEMPT 48 depends on !PREEMPT
48 select KALLSYMS_ALL
49 49
50config HAVE_EFFICIENT_UNALIGNED_ACCESS 50config HAVE_EFFICIENT_UNALIGNED_ACCESS
51 bool 51 bool
diff --git a/arch/alpha/include/asm/cacheflush.h b/arch/alpha/include/asm/cacheflush.h
index 01d71e1c8a9e..012f1243b1c1 100644
--- a/arch/alpha/include/asm/cacheflush.h
+++ b/arch/alpha/include/asm/cacheflush.h
@@ -43,6 +43,8 @@ extern void smp_imb(void);
43/* ??? Ought to use this in arch/alpha/kernel/signal.c too. */ 43/* ??? Ought to use this in arch/alpha/kernel/signal.c too. */
44 44
45#ifndef CONFIG_SMP 45#ifndef CONFIG_SMP
46#include <linux/sched.h>
47
46extern void __load_new_mm_context(struct mm_struct *); 48extern void __load_new_mm_context(struct mm_struct *);
47static inline void 49static inline void
48flush_icache_user_range(struct vm_area_struct *vma, struct page *page, 50flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
diff --git a/arch/alpha/include/asm/unistd.h b/arch/alpha/include/asm/unistd.h
index 804e5311c841..058937bf5a77 100644
--- a/arch/alpha/include/asm/unistd.h
+++ b/arch/alpha/include/asm/unistd.h
@@ -449,10 +449,13 @@
449#define __NR_pwritev 491 449#define __NR_pwritev 491
450#define __NR_rt_tgsigqueueinfo 492 450#define __NR_rt_tgsigqueueinfo 492
451#define __NR_perf_event_open 493 451#define __NR_perf_event_open 493
452#define __NR_fanotify_init 494
453#define __NR_fanotify_mark 495
454#define __NR_prlimit64 496
452 455
453#ifdef __KERNEL__ 456#ifdef __KERNEL__
454 457
455#define NR_SYSCALLS 494 458#define NR_SYSCALLS 497
456 459
457#define __ARCH_WANT_IPC_PARSE_VERSION 460#define __ARCH_WANT_IPC_PARSE_VERSION
458#define __ARCH_WANT_OLD_READDIR 461#define __ARCH_WANT_OLD_READDIR
@@ -463,6 +466,7 @@
463#define __ARCH_WANT_SYS_OLD_GETRLIMIT 466#define __ARCH_WANT_SYS_OLD_GETRLIMIT
464#define __ARCH_WANT_SYS_OLDUMOUNT 467#define __ARCH_WANT_SYS_OLDUMOUNT
465#define __ARCH_WANT_SYS_SIGPENDING 468#define __ARCH_WANT_SYS_SIGPENDING
469#define __ARCH_WANT_SYS_RT_SIGSUSPEND
466 470
467/* "Conditional" syscalls. What we want is 471/* "Conditional" syscalls. What we want is
468 472
diff --git a/arch/alpha/kernel/entry.S b/arch/alpha/kernel/entry.S
index b45d913a51c3..6d159cee5f2f 100644
--- a/arch/alpha/kernel/entry.S
+++ b/arch/alpha/kernel/entry.S
@@ -73,8 +73,6 @@
73 ldq $20, HAE_REG($19); \ 73 ldq $20, HAE_REG($19); \
74 stq $21, HAE_CACHE($19); \ 74 stq $21, HAE_CACHE($19); \
75 stq $21, 0($20); \ 75 stq $21, 0($20); \
76 ldq $0, 0($sp); \
77 ldq $1, 8($sp); \
7899:; \ 7699:; \
79 ldq $19, 72($sp); \ 77 ldq $19, 72($sp); \
80 ldq $20, 80($sp); \ 78 ldq $20, 80($sp); \
@@ -316,19 +314,24 @@ ret_from_sys_call:
316 cmovne $26, 0, $19 /* $19 = 0 => non-restartable */ 314 cmovne $26, 0, $19 /* $19 = 0 => non-restartable */
317 ldq $0, SP_OFF($sp) 315 ldq $0, SP_OFF($sp)
318 and $0, 8, $0 316 and $0, 8, $0
319 beq $0, restore_all 317 beq $0, ret_to_kernel
320ret_from_reschedule: 318ret_to_user:
321 /* Make sure need_resched and sigpending don't change between 319 /* Make sure need_resched and sigpending don't change between
322 sampling and the rti. */ 320 sampling and the rti. */
323 lda $16, 7 321 lda $16, 7
324 call_pal PAL_swpipl 322 call_pal PAL_swpipl
325 ldl $5, TI_FLAGS($8) 323 ldl $5, TI_FLAGS($8)
326 and $5, _TIF_WORK_MASK, $2 324 and $5, _TIF_WORK_MASK, $2
327 bne $5, work_pending 325 bne $2, work_pending
328restore_all: 326restore_all:
329 RESTORE_ALL 327 RESTORE_ALL
330 call_pal PAL_rti 328 call_pal PAL_rti
331 329
330ret_to_kernel:
331 lda $16, 7
332 call_pal PAL_swpipl
333 br restore_all
334
332 .align 3 335 .align 3
333$syscall_error: 336$syscall_error:
334 /* 337 /*
@@ -363,7 +366,7 @@ $ret_success:
363 * $8: current. 366 * $8: current.
364 * $19: The old syscall number, or zero if this is not a return 367 * $19: The old syscall number, or zero if this is not a return
365 * from a syscall that errored and is possibly restartable. 368 * from a syscall that errored and is possibly restartable.
366 * $20: Error indication. 369 * $20: The old a3 value
367 */ 370 */
368 371
369 .align 4 372 .align 4
@@ -392,12 +395,18 @@ $work_resched:
392 395
393$work_notifysig: 396$work_notifysig:
394 mov $sp, $16 397 mov $sp, $16
395 br $1, do_switch_stack 398 bsr $1, do_switch_stack
396 mov $sp, $17 399 mov $sp, $17
397 mov $5, $18 400 mov $5, $18
401 mov $19, $9 /* save old syscall number */
402 mov $20, $10 /* save old a3 */
403 and $5, _TIF_SIGPENDING, $2
404 cmovne $2, 0, $9 /* we don't want double syscall restarts */
398 jsr $26, do_notify_resume 405 jsr $26, do_notify_resume
406 mov $9, $19
407 mov $10, $20
399 bsr $1, undo_switch_stack 408 bsr $1, undo_switch_stack
400 br restore_all 409 br ret_to_user
401.end work_pending 410.end work_pending
402 411
403/* 412/*
@@ -430,6 +439,7 @@ strace:
430 beq $1, 1f 439 beq $1, 1f
431 ldq $27, 0($2) 440 ldq $27, 0($2)
4321: jsr $26, ($27), sys_gettimeofday 4411: jsr $26, ($27), sys_gettimeofday
442ret_from_straced:
433 ldgp $gp, 0($26) 443 ldgp $gp, 0($26)
434 444
435 /* check return.. */ 445 /* check return.. */
@@ -650,7 +660,7 @@ kernel_thread:
650 /* We don't actually care for a3 success widgetry in the kernel. 660 /* We don't actually care for a3 success widgetry in the kernel.
651 Not for positive errno values. */ 661 Not for positive errno values. */
652 stq $0, 0($sp) /* $0 */ 662 stq $0, 0($sp) /* $0 */
653 br restore_all 663 br ret_to_kernel
654.end kernel_thread 664.end kernel_thread
655 665
656/* 666/*
@@ -757,11 +767,15 @@ sys_vfork:
757 .ent sys_sigreturn 767 .ent sys_sigreturn
758sys_sigreturn: 768sys_sigreturn:
759 .prologue 0 769 .prologue 0
770 lda $9, ret_from_straced
771 cmpult $26, $9, $9
760 mov $sp, $17 772 mov $sp, $17
761 lda $18, -SWITCH_STACK_SIZE($sp) 773 lda $18, -SWITCH_STACK_SIZE($sp)
762 lda $sp, -SWITCH_STACK_SIZE($sp) 774 lda $sp, -SWITCH_STACK_SIZE($sp)
763 jsr $26, do_sigreturn 775 jsr $26, do_sigreturn
764 br $1, undo_switch_stack 776 bne $9, 1f
777 jsr $26, syscall_trace
7781: br $1, undo_switch_stack
765 br ret_from_sys_call 779 br ret_from_sys_call
766.end sys_sigreturn 780.end sys_sigreturn
767 781
@@ -770,47 +784,19 @@ sys_sigreturn:
770 .ent sys_rt_sigreturn 784 .ent sys_rt_sigreturn
771sys_rt_sigreturn: 785sys_rt_sigreturn:
772 .prologue 0 786 .prologue 0
787 lda $9, ret_from_straced
788 cmpult $26, $9, $9
773 mov $sp, $17 789 mov $sp, $17
774 lda $18, -SWITCH_STACK_SIZE($sp) 790 lda $18, -SWITCH_STACK_SIZE($sp)
775 lda $sp, -SWITCH_STACK_SIZE($sp) 791 lda $sp, -SWITCH_STACK_SIZE($sp)
776 jsr $26, do_rt_sigreturn 792 jsr $26, do_rt_sigreturn
777 br $1, undo_switch_stack 793 bne $9, 1f
794 jsr $26, syscall_trace
7951: br $1, undo_switch_stack
778 br ret_from_sys_call 796 br ret_from_sys_call
779.end sys_rt_sigreturn 797.end sys_rt_sigreturn
780 798
781 .align 4 799 .align 4
782 .globl sys_sigsuspend
783 .ent sys_sigsuspend
784sys_sigsuspend:
785 .prologue 0
786 mov $sp, $17
787 br $1, do_switch_stack
788 mov $sp, $18
789 subq $sp, 16, $sp
790 stq $26, 0($sp)
791 jsr $26, do_sigsuspend
792 ldq $26, 0($sp)
793 lda $sp, SWITCH_STACK_SIZE+16($sp)
794 ret
795.end sys_sigsuspend
796
797 .align 4
798 .globl sys_rt_sigsuspend
799 .ent sys_rt_sigsuspend
800sys_rt_sigsuspend:
801 .prologue 0
802 mov $sp, $18
803 br $1, do_switch_stack
804 mov $sp, $19
805 subq $sp, 16, $sp
806 stq $26, 0($sp)
807 jsr $26, do_rt_sigsuspend
808 ldq $26, 0($sp)
809 lda $sp, SWITCH_STACK_SIZE+16($sp)
810 ret
811.end sys_rt_sigsuspend
812
813 .align 4
814 .globl sys_sethae 800 .globl sys_sethae
815 .ent sys_sethae 801 .ent sys_sethae
816sys_sethae: 802sys_sethae:
@@ -929,15 +915,6 @@ sys_execve:
929.end sys_execve 915.end sys_execve
930 916
931 .align 4 917 .align 4
932 .globl osf_sigprocmask
933 .ent osf_sigprocmask
934osf_sigprocmask:
935 .prologue 0
936 mov $sp, $18
937 jmp $31, sys_osf_sigprocmask
938.end osf_sigprocmask
939
940 .align 4
941 .globl alpha_ni_syscall 918 .globl alpha_ni_syscall
942 .ent alpha_ni_syscall 919 .ent alpha_ni_syscall
943alpha_ni_syscall: 920alpha_ni_syscall:
diff --git a/arch/alpha/kernel/err_ev6.c b/arch/alpha/kernel/err_ev6.c
index 8ca6345bf131..253cf1a87481 100644
--- a/arch/alpha/kernel/err_ev6.c
+++ b/arch/alpha/kernel/err_ev6.c
@@ -90,11 +90,13 @@ static int
90ev6_parse_cbox(u64 c_addr, u64 c1_syn, u64 c2_syn, 90ev6_parse_cbox(u64 c_addr, u64 c1_syn, u64 c2_syn,
91 u64 c_stat, u64 c_sts, int print) 91 u64 c_stat, u64 c_sts, int print)
92{ 92{
93 char *sourcename[] = { "UNKNOWN", "UNKNOWN", "UNKNOWN", 93 static const char * const sourcename[] = {
94 "MEMORY", "BCACHE", "DCACHE", 94 "UNKNOWN", "UNKNOWN", "UNKNOWN",
95 "BCACHE PROBE", "BCACHE PROBE" }; 95 "MEMORY", "BCACHE", "DCACHE",
96 char *streamname[] = { "D", "I" }; 96 "BCACHE PROBE", "BCACHE PROBE"
97 char *bitsname[] = { "SINGLE", "DOUBLE" }; 97 };
98 static const char * const streamname[] = { "D", "I" };
99 static const char * const bitsname[] = { "SINGLE", "DOUBLE" };
98 int status = MCHK_DISPOSITION_REPORT; 100 int status = MCHK_DISPOSITION_REPORT;
99 int source = -1, stream = -1, bits = -1; 101 int source = -1, stream = -1, bits = -1;
100 102
diff --git a/arch/alpha/kernel/err_marvel.c b/arch/alpha/kernel/err_marvel.c
index 5c905aaaeccd..648ae88aeb8a 100644
--- a/arch/alpha/kernel/err_marvel.c
+++ b/arch/alpha/kernel/err_marvel.c
@@ -589,22 +589,23 @@ marvel_print_pox_spl_cmplt(u64 spl_cmplt)
589static void 589static void
590marvel_print_pox_trans_sum(u64 trans_sum) 590marvel_print_pox_trans_sum(u64 trans_sum)
591{ 591{
592 char *pcix_cmd[] = { "Interrupt Acknowledge", 592 static const char * const pcix_cmd[] = {
593 "Special Cycle", 593 "Interrupt Acknowledge",
594 "I/O Read", 594 "Special Cycle",
595 "I/O Write", 595 "I/O Read",
596 "Reserved", 596 "I/O Write",
597 "Reserved / Device ID Message", 597 "Reserved",
598 "Memory Read", 598 "Reserved / Device ID Message",
599 "Memory Write", 599 "Memory Read",
600 "Reserved / Alias to Memory Read Block", 600 "Memory Write",
601 "Reserved / Alias to Memory Write Block", 601 "Reserved / Alias to Memory Read Block",
602 "Configuration Read", 602 "Reserved / Alias to Memory Write Block",
603 "Configuration Write", 603 "Configuration Read",
604 "Memory Read Multiple / Split Completion", 604 "Configuration Write",
605 "Dual Address Cycle", 605 "Memory Read Multiple / Split Completion",
606 "Memory Read Line / Memory Read Block", 606 "Dual Address Cycle",
607 "Memory Write and Invalidate / Memory Write Block" 607 "Memory Read Line / Memory Read Block",
608 "Memory Write and Invalidate / Memory Write Block"
608 }; 609 };
609 610
610#define IO7__POX_TRANSUM__PCI_ADDR__S (0) 611#define IO7__POX_TRANSUM__PCI_ADDR__S (0)
diff --git a/arch/alpha/kernel/err_titan.c b/arch/alpha/kernel/err_titan.c
index f7ed97ce0dfd..c3b3781a03de 100644
--- a/arch/alpha/kernel/err_titan.c
+++ b/arch/alpha/kernel/err_titan.c
@@ -75,8 +75,12 @@ titan_parse_p_serror(int which, u64 serror, int print)
75 int status = MCHK_DISPOSITION_REPORT; 75 int status = MCHK_DISPOSITION_REPORT;
76 76
77#ifdef CONFIG_VERBOSE_MCHECK 77#ifdef CONFIG_VERBOSE_MCHECK
78 char *serror_src[] = {"GPCI", "APCI", "AGP HP", "AGP LP"}; 78 static const char * const serror_src[] = {
79 char *serror_cmd[] = {"DMA Read", "DMA RMW", "SGTE Read", "Reserved"}; 79 "GPCI", "APCI", "AGP HP", "AGP LP"
80 };
81 static const char * const serror_cmd[] = {
82 "DMA Read", "DMA RMW", "SGTE Read", "Reserved"
83 };
80#endif /* CONFIG_VERBOSE_MCHECK */ 84#endif /* CONFIG_VERBOSE_MCHECK */
81 85
82#define TITAN__PCHIP_SERROR__LOST_UECC (1UL << 0) 86#define TITAN__PCHIP_SERROR__LOST_UECC (1UL << 0)
@@ -140,14 +144,15 @@ titan_parse_p_perror(int which, int port, u64 perror, int print)
140 int status = MCHK_DISPOSITION_REPORT; 144 int status = MCHK_DISPOSITION_REPORT;
141 145
142#ifdef CONFIG_VERBOSE_MCHECK 146#ifdef CONFIG_VERBOSE_MCHECK
143 char *perror_cmd[] = { "Interrupt Acknowledge", "Special Cycle", 147 static const char * const perror_cmd[] = {
144 "I/O Read", "I/O Write", 148 "Interrupt Acknowledge", "Special Cycle",
145 "Reserved", "Reserved", 149 "I/O Read", "I/O Write",
146 "Memory Read", "Memory Write", 150 "Reserved", "Reserved",
147 "Reserved", "Reserved", 151 "Memory Read", "Memory Write",
148 "Configuration Read", "Configuration Write", 152 "Reserved", "Reserved",
149 "Memory Read Multiple", "Dual Address Cycle", 153 "Configuration Read", "Configuration Write",
150 "Memory Read Line","Memory Write and Invalidate" 154 "Memory Read Multiple", "Dual Address Cycle",
155 "Memory Read Line", "Memory Write and Invalidate"
151 }; 156 };
152#endif /* CONFIG_VERBOSE_MCHECK */ 157#endif /* CONFIG_VERBOSE_MCHECK */
153 158
@@ -273,11 +278,11 @@ titan_parse_p_agperror(int which, u64 agperror, int print)
273 int cmd, len; 278 int cmd, len;
274 unsigned long addr; 279 unsigned long addr;
275 280
276 char *agperror_cmd[] = { "Read (low-priority)", "Read (high-priority)", 281 static const char * const agperror_cmd[] = {
277 "Write (low-priority)", 282 "Read (low-priority)", "Read (high-priority)",
278 "Write (high-priority)", 283 "Write (low-priority)", "Write (high-priority)",
279 "Reserved", "Reserved", 284 "Reserved", "Reserved",
280 "Flush", "Fence" 285 "Flush", "Fence"
281 }; 286 };
282#endif /* CONFIG_VERBOSE_MCHECK */ 287#endif /* CONFIG_VERBOSE_MCHECK */
283 288
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index 5d1e6d6ce684..547e8b84b2f7 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -15,7 +15,6 @@
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/mm.h> 16#include <linux/mm.h>
17#include <linux/smp.h> 17#include <linux/smp.h>
18#include <linux/smp_lock.h>
19#include <linux/stddef.h> 18#include <linux/stddef.h>
20#include <linux/syscalls.h> 19#include <linux/syscalls.h>
21#include <linux/unistd.h> 20#include <linux/unistd.h>
@@ -69,7 +68,6 @@ SYSCALL_DEFINE4(osf_set_program_attributes, unsigned long, text_start,
69{ 68{
70 struct mm_struct *mm; 69 struct mm_struct *mm;
71 70
72 lock_kernel();
73 mm = current->mm; 71 mm = current->mm;
74 mm->end_code = bss_start + bss_len; 72 mm->end_code = bss_start + bss_len;
75 mm->start_brk = bss_start + bss_len; 73 mm->start_brk = bss_start + bss_len;
@@ -78,7 +76,6 @@ SYSCALL_DEFINE4(osf_set_program_attributes, unsigned long, text_start,
78 printk("set_program_attributes(%lx %lx %lx %lx)\n", 76 printk("set_program_attributes(%lx %lx %lx %lx)\n",
79 text_start, text_len, bss_start, bss_len); 77 text_start, text_len, bss_start, bss_len);
80#endif 78#endif
81 unlock_kernel();
82 return 0; 79 return 0;
83} 80}
84 81
@@ -517,7 +514,6 @@ SYSCALL_DEFINE2(osf_proplist_syscall, enum pl_code, code,
517 long error; 514 long error;
518 int __user *min_buf_size_ptr; 515 int __user *min_buf_size_ptr;
519 516
520 lock_kernel();
521 switch (code) { 517 switch (code) {
522 case PL_SET: 518 case PL_SET:
523 if (get_user(error, &args->set.nbytes)) 519 if (get_user(error, &args->set.nbytes))
@@ -547,7 +543,6 @@ SYSCALL_DEFINE2(osf_proplist_syscall, enum pl_code, code,
547 error = -EOPNOTSUPP; 543 error = -EOPNOTSUPP;
548 break; 544 break;
549 }; 545 };
550 unlock_kernel();
551 return error; 546 return error;
552} 547}
553 548
@@ -594,7 +589,7 @@ SYSCALL_DEFINE2(osf_sigstack, struct sigstack __user *, uss,
594 589
595SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count) 590SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count)
596{ 591{
597 char *sysinfo_table[] = { 592 const char *sysinfo_table[] = {
598 utsname()->sysname, 593 utsname()->sysname,
599 utsname()->nodename, 594 utsname()->nodename,
600 utsname()->release, 595 utsname()->release,
@@ -606,7 +601,7 @@ SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count)
606 "dummy", /* secure RPC domain */ 601 "dummy", /* secure RPC domain */
607 }; 602 };
608 unsigned long offset; 603 unsigned long offset;
609 char *res; 604 const char *res;
610 long len, err = -EINVAL; 605 long len, err = -EINVAL;
611 606
612 offset = command-1; 607 offset = command-1;
diff --git a/arch/alpha/kernel/pci-sysfs.c b/arch/alpha/kernel/pci-sysfs.c
index 738fc824e2ea..b899e95f79fd 100644
--- a/arch/alpha/kernel/pci-sysfs.c
+++ b/arch/alpha/kernel/pci-sysfs.c
@@ -66,7 +66,7 @@ static int pci_mmap_resource(struct kobject *kobj,
66{ 66{
67 struct pci_dev *pdev = to_pci_dev(container_of(kobj, 67 struct pci_dev *pdev = to_pci_dev(container_of(kobj,
68 struct device, kobj)); 68 struct device, kobj));
69 struct resource *res = (struct resource *)attr->private; 69 struct resource *res = attr->private;
70 enum pci_mmap_state mmap_type; 70 enum pci_mmap_state mmap_type;
71 struct pci_bus_region bar; 71 struct pci_bus_region bar;
72 int i; 72 int i;
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c
index 842dba308eab..3ec35066f1dc 100644
--- a/arch/alpha/kernel/process.c
+++ b/arch/alpha/kernel/process.c
@@ -356,7 +356,7 @@ dump_elf_thread(elf_greg_t *dest, struct pt_regs *pt, struct thread_info *ti)
356 dest[27] = pt->r27; 356 dest[27] = pt->r27;
357 dest[28] = pt->r28; 357 dest[28] = pt->r28;
358 dest[29] = pt->gp; 358 dest[29] = pt->gp;
359 dest[30] = rdusp(); 359 dest[30] = ti == current_thread_info() ? rdusp() : ti->pcb.usp;
360 dest[31] = pt->pc; 360 dest[31] = pt->pc;
361 361
362 /* Once upon a time this was the PS value. Which is stupid 362 /* Once upon a time this was the PS value. Which is stupid
diff --git a/arch/alpha/kernel/signal.c b/arch/alpha/kernel/signal.c
index 0932dbb1ef8e..6f7feb5db271 100644
--- a/arch/alpha/kernel/signal.c
+++ b/arch/alpha/kernel/signal.c
@@ -41,46 +41,20 @@ static void do_signal(struct pt_regs *, struct switch_stack *,
41/* 41/*
42 * The OSF/1 sigprocmask calling sequence is different from the 42 * The OSF/1 sigprocmask calling sequence is different from the
43 * C sigprocmask() sequence.. 43 * C sigprocmask() sequence..
44 *
45 * how:
46 * 1 - SIG_BLOCK
47 * 2 - SIG_UNBLOCK
48 * 3 - SIG_SETMASK
49 *
50 * We change the range to -1 .. 1 in order to let gcc easily
51 * use the conditional move instructions.
52 *
53 * Note that we don't need to acquire the kernel lock for SMP
54 * operation, as all of this is local to this thread.
55 */ 44 */
56SYSCALL_DEFINE3(osf_sigprocmask, int, how, unsigned long, newmask, 45SYSCALL_DEFINE2(osf_sigprocmask, int, how, unsigned long, newmask)
57 struct pt_regs *, regs)
58{ 46{
59 unsigned long oldmask = -EINVAL; 47 sigset_t oldmask;
60 48 sigset_t mask;
61 if ((unsigned long)how-1 <= 2) { 49 unsigned long res;
62 long sign = how-2; /* -1 .. 1 */ 50
63 unsigned long block, unblock; 51 siginitset(&mask, newmask & _BLOCKABLE);
64 52 res = sigprocmask(how, &mask, &oldmask);
65 newmask &= _BLOCKABLE; 53 if (!res) {
66 spin_lock_irq(&current->sighand->siglock); 54 force_successful_syscall_return();
67 oldmask = current->blocked.sig[0]; 55 res = oldmask.sig[0];
68
69 unblock = oldmask & ~newmask;
70 block = oldmask | newmask;
71 if (!sign)
72 block = unblock;
73 if (sign <= 0)
74 newmask = block;
75 if (_NSIG_WORDS > 1 && sign > 0)
76 sigemptyset(&current->blocked);
77 current->blocked.sig[0] = newmask;
78 recalc_sigpending();
79 spin_unlock_irq(&current->sighand->siglock);
80
81 regs->r0 = 0; /* special no error return */
82 } 56 }
83 return oldmask; 57 return res;
84} 58}
85 59
86SYSCALL_DEFINE3(osf_sigaction, int, sig, 60SYSCALL_DEFINE3(osf_sigaction, int, sig,
@@ -94,9 +68,9 @@ SYSCALL_DEFINE3(osf_sigaction, int, sig,
94 old_sigset_t mask; 68 old_sigset_t mask;
95 if (!access_ok(VERIFY_READ, act, sizeof(*act)) || 69 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
96 __get_user(new_ka.sa.sa_handler, &act->sa_handler) || 70 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
97 __get_user(new_ka.sa.sa_flags, &act->sa_flags)) 71 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
72 __get_user(mask, &act->sa_mask))
98 return -EFAULT; 73 return -EFAULT;
99 __get_user(mask, &act->sa_mask);
100 siginitset(&new_ka.sa.sa_mask, mask); 74 siginitset(&new_ka.sa.sa_mask, mask);
101 new_ka.ka_restorer = NULL; 75 new_ka.ka_restorer = NULL;
102 } 76 }
@@ -106,9 +80,9 @@ SYSCALL_DEFINE3(osf_sigaction, int, sig,
106 if (!ret && oact) { 80 if (!ret && oact) {
107 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || 81 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
108 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || 82 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
109 __put_user(old_ka.sa.sa_flags, &oact->sa_flags)) 83 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
84 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
110 return -EFAULT; 85 return -EFAULT;
111 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
112 } 86 }
113 87
114 return ret; 88 return ret;
@@ -144,8 +118,7 @@ SYSCALL_DEFINE5(rt_sigaction, int, sig, const struct sigaction __user *, act,
144/* 118/*
145 * Atomically swap in the new signal mask, and wait for a signal. 119 * Atomically swap in the new signal mask, and wait for a signal.
146 */ 120 */
147asmlinkage int 121SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
148do_sigsuspend(old_sigset_t mask, struct pt_regs *regs, struct switch_stack *sw)
149{ 122{
150 mask &= _BLOCKABLE; 123 mask &= _BLOCKABLE;
151 spin_lock_irq(&current->sighand->siglock); 124 spin_lock_irq(&current->sighand->siglock);
@@ -154,41 +127,6 @@ do_sigsuspend(old_sigset_t mask, struct pt_regs *regs, struct switch_stack *sw)
154 recalc_sigpending(); 127 recalc_sigpending();
155 spin_unlock_irq(&current->sighand->siglock); 128 spin_unlock_irq(&current->sighand->siglock);
156 129
157 /* Indicate EINTR on return from any possible signal handler,
158 which will not come back through here, but via sigreturn. */
159 regs->r0 = EINTR;
160 regs->r19 = 1;
161
162 current->state = TASK_INTERRUPTIBLE;
163 schedule();
164 set_thread_flag(TIF_RESTORE_SIGMASK);
165 return -ERESTARTNOHAND;
166}
167
168asmlinkage int
169do_rt_sigsuspend(sigset_t __user *uset, size_t sigsetsize,
170 struct pt_regs *regs, struct switch_stack *sw)
171{
172 sigset_t set;
173
174 /* XXX: Don't preclude handling different sized sigset_t's. */
175 if (sigsetsize != sizeof(sigset_t))
176 return -EINVAL;
177 if (copy_from_user(&set, uset, sizeof(set)))
178 return -EFAULT;
179
180 sigdelsetmask(&set, ~_BLOCKABLE);
181 spin_lock_irq(&current->sighand->siglock);
182 current->saved_sigmask = current->blocked;
183 current->blocked = set;
184 recalc_sigpending();
185 spin_unlock_irq(&current->sighand->siglock);
186
187 /* Indicate EINTR on return from any possible signal handler,
188 which will not come back through here, but via sigreturn. */
189 regs->r0 = EINTR;
190 regs->r19 = 1;
191
192 current->state = TASK_INTERRUPTIBLE; 130 current->state = TASK_INTERRUPTIBLE;
193 schedule(); 131 schedule();
194 set_thread_flag(TIF_RESTORE_SIGMASK); 132 set_thread_flag(TIF_RESTORE_SIGMASK);
@@ -239,6 +177,8 @@ restore_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
239 unsigned long usp; 177 unsigned long usp;
240 long i, err = __get_user(regs->pc, &sc->sc_pc); 178 long i, err = __get_user(regs->pc, &sc->sc_pc);
241 179
180 current_thread_info()->restart_block.fn = do_no_restart_syscall;
181
242 sw->r26 = (unsigned long) ret_from_sys_call; 182 sw->r26 = (unsigned long) ret_from_sys_call;
243 183
244 err |= __get_user(regs->r0, sc->sc_regs+0); 184 err |= __get_user(regs->r0, sc->sc_regs+0);
@@ -591,7 +531,6 @@ syscall_restart(unsigned long r0, unsigned long r19,
591 regs->pc -= 4; 531 regs->pc -= 4;
592 break; 532 break;
593 case ERESTART_RESTARTBLOCK: 533 case ERESTART_RESTARTBLOCK:
594 current_thread_info()->restart_block.fn = do_no_restart_syscall;
595 regs->r0 = EINTR; 534 regs->r0 = EINTR;
596 break; 535 break;
597 } 536 }
diff --git a/arch/alpha/kernel/srm_env.c b/arch/alpha/kernel/srm_env.c
index 4afc1a1e2e5a..f0df3fbd8402 100644
--- a/arch/alpha/kernel/srm_env.c
+++ b/arch/alpha/kernel/srm_env.c
@@ -87,7 +87,7 @@ static int srm_env_proc_show(struct seq_file *m, void *v)
87 srm_env_t *entry; 87 srm_env_t *entry;
88 char *page; 88 char *page;
89 89
90 entry = (srm_env_t *)m->private; 90 entry = m->private;
91 page = (char *)__get_free_page(GFP_USER); 91 page = (char *)__get_free_page(GFP_USER);
92 if (!page) 92 if (!page)
93 return -ENOMEM; 93 return -ENOMEM;
diff --git a/arch/alpha/kernel/systbls.S b/arch/alpha/kernel/systbls.S
index 09acb786e72b..a6a1de9db16f 100644
--- a/arch/alpha/kernel/systbls.S
+++ b/arch/alpha/kernel/systbls.S
@@ -58,7 +58,7 @@ sys_call_table:
58 .quad sys_open /* 45 */ 58 .quad sys_open /* 45 */
59 .quad alpha_ni_syscall 59 .quad alpha_ni_syscall
60 .quad sys_getxgid 60 .quad sys_getxgid
61 .quad osf_sigprocmask 61 .quad sys_osf_sigprocmask
62 .quad alpha_ni_syscall 62 .quad alpha_ni_syscall
63 .quad alpha_ni_syscall /* 50 */ 63 .quad alpha_ni_syscall /* 50 */
64 .quad sys_acct 64 .quad sys_acct
@@ -512,6 +512,9 @@ sys_call_table:
512 .quad sys_pwritev 512 .quad sys_pwritev
513 .quad sys_rt_tgsigqueueinfo 513 .quad sys_rt_tgsigqueueinfo
514 .quad sys_perf_event_open 514 .quad sys_perf_event_open
515 .quad sys_fanotify_init
516 .quad sys_fanotify_mark /* 495 */
517 .quad sys_prlimit64
515 518
516 .size sys_call_table, . - sys_call_table 519 .size sys_call_table, . - sys_call_table
517 .type sys_call_table, @object 520 .type sys_call_table, @object
diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c
index eacceb26d9c8..396af1799ea4 100644
--- a/arch/alpha/kernel/time.c
+++ b/arch/alpha/kernel/time.c
@@ -191,16 +191,16 @@ irqreturn_t timer_interrupt(int irq, void *dev)
191 191
192 write_sequnlock(&xtime_lock); 192 write_sequnlock(&xtime_lock);
193 193
194#ifndef CONFIG_SMP
195 while (nticks--)
196 update_process_times(user_mode(get_irq_regs()));
197#endif
198
199 if (test_perf_event_pending()) { 194 if (test_perf_event_pending()) {
200 clear_perf_event_pending(); 195 clear_perf_event_pending();
201 perf_event_do_pending(); 196 perf_event_do_pending();
202 } 197 }
203 198
199#ifndef CONFIG_SMP
200 while (nticks--)
201 update_process_times(user_mode(get_irq_regs()));
202#endif
203
204 return IRQ_HANDLED; 204 return IRQ_HANDLED;
205} 205}
206 206
diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c
index b14f015008ad..0414e021a91c 100644
--- a/arch/alpha/kernel/traps.c
+++ b/arch/alpha/kernel/traps.c
@@ -13,7 +13,6 @@
13#include <linux/sched.h> 13#include <linux/sched.h>
14#include <linux/tty.h> 14#include <linux/tty.h>
15#include <linux/delay.h> 15#include <linux/delay.h>
16#include <linux/smp_lock.h>
17#include <linux/module.h> 16#include <linux/module.h>
18#include <linux/init.h> 17#include <linux/init.h>
19#include <linux/kallsyms.h> 18#include <linux/kallsyms.h>
@@ -623,7 +622,6 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg,
623 return; 622 return;
624 } 623 }
625 624
626 lock_kernel();
627 printk("Bad unaligned kernel access at %016lx: %p %lx %lu\n", 625 printk("Bad unaligned kernel access at %016lx: %p %lx %lu\n",
628 pc, va, opcode, reg); 626 pc, va, opcode, reg);
629 do_exit(SIGSEGV); 627 do_exit(SIGSEGV);
@@ -646,7 +644,6 @@ got_exception:
646 * Yikes! No one to forward the exception to. 644 * Yikes! No one to forward the exception to.
647 * Since the registers are in a weird format, dump them ourselves. 645 * Since the registers are in a weird format, dump them ourselves.
648 */ 646 */
649 lock_kernel();
650 647
651 printk("%s(%d): unhandled unaligned exception\n", 648 printk("%s(%d): unhandled unaligned exception\n",
652 current->comm, task_pid_nr(current)); 649 current->comm, task_pid_nr(current));
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 16bc8eb4901c..88c97bc7a6f5 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -271,7 +271,6 @@ config ARCH_AT91
271 bool "Atmel AT91" 271 bool "Atmel AT91"
272 select ARCH_REQUIRE_GPIOLIB 272 select ARCH_REQUIRE_GPIOLIB
273 select HAVE_CLK 273 select HAVE_CLK
274 select ARCH_USES_GETTIMEOFFSET
275 help 274 help
276 This enables support for systems based on the Atmel AT91RM9200, 275 This enables support for systems based on the Atmel AT91RM9200,
277 AT91SAM9 and AT91CAP9 processors. 276 AT91SAM9 and AT91CAP9 processors.
@@ -1051,6 +1050,32 @@ config ARM_ERRATA_460075
1051 ACTLR register. Note that setting specific bits in the ACTLR register 1050 ACTLR register. Note that setting specific bits in the ACTLR register
1052 may not be available in non-secure mode. 1051 may not be available in non-secure mode.
1053 1052
1053config ARM_ERRATA_742230
1054 bool "ARM errata: DMB operation may be faulty"
1055 depends on CPU_V7 && SMP
1056 help
1057 This option enables the workaround for the 742230 Cortex-A9
1058 (r1p0..r2p2) erratum. Under rare circumstances, a DMB instruction
1059 between two write operations may not ensure the correct visibility
1060 ordering of the two writes. This workaround sets a specific bit in
1061 the diagnostic register of the Cortex-A9 which causes the DMB
1062 instruction to behave as a DSB, ensuring the correct behaviour of
1063 the two writes.
1064
1065config ARM_ERRATA_742231
1066 bool "ARM errata: Incorrect hazard handling in the SCU may lead to data corruption"
1067 depends on CPU_V7 && SMP
1068 help
1069 This option enables the workaround for the 742231 Cortex-A9
1070 (r2p0..r2p2) erratum. Under certain conditions, specific to the
1071 Cortex-A9 MPCore micro-architecture, two CPUs working in SMP mode,
1072 accessing some data located in the same cache line, may get corrupted
1073 data due to bad handling of the address hazard when the line gets
1074 replaced from one of the CPUs at the same time as another CPU is
1075 accessing it. This workaround sets specific bits in the diagnostic
1076 register of the Cortex-A9 which reduces the linefill issuing
1077 capabilities of the processor.
1078
1054config PL310_ERRATA_588369 1079config PL310_ERRATA_588369
1055 bool "Clean & Invalidate maintenance operations do not invalidate clean lines" 1080 bool "Clean & Invalidate maintenance operations do not invalidate clean lines"
1056 depends on CACHE_L2X0 && ARCH_OMAP4 1081 depends on CACHE_L2X0 && ARCH_OMAP4
@@ -1576,97 +1601,6 @@ config AUTO_ZRELADDR
1576 0xf8000000. This assumes the zImage being placed in the first 128MB 1601 0xf8000000. This assumes the zImage being placed in the first 128MB
1577 from start of memory. 1602 from start of memory.
1578 1603
1579config ZRELADDR
1580 hex "Physical address of the decompressed kernel image"
1581 depends on !AUTO_ZRELADDR
1582 default 0x00008000 if ARCH_BCMRING ||\
1583 ARCH_CNS3XXX ||\
1584 ARCH_DOVE ||\
1585 ARCH_EBSA110 ||\
1586 ARCH_FOOTBRIDGE ||\
1587 ARCH_INTEGRATOR ||\
1588 ARCH_IOP13XX ||\
1589 ARCH_IOP33X ||\
1590 ARCH_IXP2000 ||\
1591 ARCH_IXP23XX ||\
1592 ARCH_IXP4XX ||\
1593 ARCH_KIRKWOOD ||\
1594 ARCH_KS8695 ||\
1595 ARCH_LOKI ||\
1596 ARCH_MMP ||\
1597 ARCH_MV78XX0 ||\
1598 ARCH_NOMADIK ||\
1599 ARCH_NUC93X ||\
1600 ARCH_NS9XXX ||\
1601 ARCH_ORION5X ||\
1602 ARCH_SPEAR3XX ||\
1603 ARCH_SPEAR6XX ||\
1604 ARCH_TEGRA ||\
1605 ARCH_U8500 ||\
1606 ARCH_VERSATILE ||\
1607 ARCH_W90X900
1608 default 0x08008000 if ARCH_MX1 ||\
1609 ARCH_SHARK
1610 default 0x10008000 if ARCH_MSM ||\
1611 ARCH_OMAP1 ||\
1612 ARCH_RPC
1613 default 0x20008000 if ARCH_S5P6440 ||\
1614 ARCH_S5P6442 ||\
1615 ARCH_S5PC100 ||\
1616 ARCH_S5PV210
1617 default 0x30008000 if ARCH_S3C2410 ||\
1618 ARCH_S3C2400 ||\
1619 ARCH_S3C2412 ||\
1620 ARCH_S3C2416 ||\
1621 ARCH_S3C2440 ||\
1622 ARCH_S3C2443
1623 default 0x40008000 if ARCH_STMP378X ||\
1624 ARCH_STMP37XX ||\
1625 ARCH_SH7372 ||\
1626 ARCH_SH7377 ||\
1627 ARCH_S5PV310
1628 default 0x50008000 if ARCH_S3C64XX ||\
1629 ARCH_SH7367
1630 default 0x60008000 if ARCH_VEXPRESS
1631 default 0x80008000 if ARCH_MX25 ||\
1632 ARCH_MX3 ||\
1633 ARCH_NETX ||\
1634 ARCH_OMAP2PLUS ||\
1635 ARCH_PNX4008
1636 default 0x90008000 if ARCH_MX5 ||\
1637 ARCH_MX91231
1638 default 0xa0008000 if ARCH_IOP32X ||\
1639 ARCH_PXA ||\
1640 MACH_MX27
1641 default 0xc0008000 if ARCH_LH7A40X ||\
1642 MACH_MX21
1643 default 0xf0008000 if ARCH_AAEC2000 ||\
1644 ARCH_L7200
1645 default 0xc0028000 if ARCH_CLPS711X
1646 default 0x70008000 if ARCH_AT91 && (ARCH_AT91CAP9 || ARCH_AT91SAM9G45)
1647 default 0x20008000 if ARCH_AT91 && !(ARCH_AT91CAP9 || ARCH_AT91SAM9G45)
1648 default 0xc0008000 if ARCH_DAVINCI && ARCH_DAVINCI_DA8XX
1649 default 0x80008000 if ARCH_DAVINCI && !ARCH_DAVINCI_DA8XX
1650 default 0x00008000 if ARCH_EP93XX && EP93XX_SDCE3_SYNC_PHYS_OFFSET
1651 default 0xc0008000 if ARCH_EP93XX && EP93XX_SDCE0_PHYS_OFFSET
1652 default 0xd0008000 if ARCH_EP93XX && EP93XX_SDCE1_PHYS_OFFSET
1653 default 0xe0008000 if ARCH_EP93XX && EP93XX_SDCE2_PHYS_OFFSET
1654 default 0xf0008000 if ARCH_EP93XX && EP93XX_SDCE3_ASYNC_PHYS_OFFSET
1655 default 0x00008000 if ARCH_GEMINI && GEMINI_MEM_SWAP
1656 default 0x10008000 if ARCH_GEMINI && !GEMINI_MEM_SWAP
1657 default 0x70008000 if ARCH_REALVIEW && REALVIEW_HIGH_PHYS_OFFSET
1658 default 0x00008000 if ARCH_REALVIEW && !REALVIEW_HIGH_PHYS_OFFSET
1659 default 0xc0208000 if ARCH_SA1100 && SA1111
1660 default 0xc0008000 if ARCH_SA1100 && !SA1111
1661 default 0x30108000 if ARCH_S3C2410 && PM_H1940
1662 default 0x28E08000 if ARCH_U300 && MACH_U300_SINGLE_RAM
1663 default 0x48008000 if ARCH_U300 && !MACH_U300_SINGLE_RAM
1664 help
1665 ZRELADDR is the physical address where the decompressed kernel
1666 image will be placed. ZRELADDR has to be specified when the
1667 assumption of AUTO_ZRELADDR is not valid, or when ZBOOT_ROM is
1668 selected.
1669
1670endmenu 1604endmenu
1671 1605
1672menu "CPU Power Management" 1606menu "CPU Power Management"
diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile
index f705213caa88..4a590f4113e2 100644
--- a/arch/arm/boot/Makefile
+++ b/arch/arm/boot/Makefile
@@ -14,16 +14,18 @@
14MKIMAGE := $(srctree)/scripts/mkuboot.sh 14MKIMAGE := $(srctree)/scripts/mkuboot.sh
15 15
16ifneq ($(MACHINE),) 16ifneq ($(MACHINE),)
17-include $(srctree)/$(MACHINE)/Makefile.boot 17include $(srctree)/$(MACHINE)/Makefile.boot
18endif 18endif
19 19
20# Note: the following conditions must always be true: 20# Note: the following conditions must always be true:
21# ZRELADDR == virt_to_phys(PAGE_OFFSET + TEXT_OFFSET)
21# PARAMS_PHYS must be within 4MB of ZRELADDR 22# PARAMS_PHYS must be within 4MB of ZRELADDR
22# INITRD_PHYS must be in RAM 23# INITRD_PHYS must be in RAM
24ZRELADDR := $(zreladdr-y)
23PARAMS_PHYS := $(params_phys-y) 25PARAMS_PHYS := $(params_phys-y)
24INITRD_PHYS := $(initrd_phys-y) 26INITRD_PHYS := $(initrd_phys-y)
25 27
26export INITRD_PHYS PARAMS_PHYS 28export ZRELADDR INITRD_PHYS PARAMS_PHYS
27 29
28targets := Image zImage xipImage bootpImage uImage 30targets := Image zImage xipImage bootpImage uImage
29 31
@@ -65,7 +67,7 @@ quiet_cmd_uimage = UIMAGE $@
65ifeq ($(CONFIG_ZBOOT_ROM),y) 67ifeq ($(CONFIG_ZBOOT_ROM),y)
66$(obj)/uImage: LOADADDR=$(CONFIG_ZBOOT_ROM_TEXT) 68$(obj)/uImage: LOADADDR=$(CONFIG_ZBOOT_ROM_TEXT)
67else 69else
68$(obj)/uImage: LOADADDR=$(CONFIG_ZRELADDR) 70$(obj)/uImage: LOADADDR=$(ZRELADDR)
69endif 71endif
70 72
71ifeq ($(CONFIG_THUMB2_KERNEL),y) 73ifeq ($(CONFIG_THUMB2_KERNEL),y)
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index 68775e33476c..65a7c1c588a9 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -79,6 +79,10 @@ endif
79EXTRA_CFLAGS := -fpic -fno-builtin 79EXTRA_CFLAGS := -fpic -fno-builtin
80EXTRA_AFLAGS := -Wa,-march=all 80EXTRA_AFLAGS := -Wa,-march=all
81 81
82# Supply ZRELADDR to the decompressor via a linker symbol.
83ifneq ($(CONFIG_AUTO_ZRELADDR),y)
84LDFLAGS_vmlinux := --defsym zreladdr=$(ZRELADDR)
85endif
82ifeq ($(CONFIG_CPU_ENDIAN_BE8),y) 86ifeq ($(CONFIG_CPU_ENDIAN_BE8),y)
83LDFLAGS_vmlinux += --be8 87LDFLAGS_vmlinux += --be8
84endif 88endif
@@ -112,5 +116,5 @@ CFLAGS_font.o := -Dstatic=
112$(obj)/font.c: $(FONTC) 116$(obj)/font.c: $(FONTC)
113 $(call cmd,shipped) 117 $(call cmd,shipped)
114 118
115$(obj)/vmlinux.lds: $(obj)/vmlinux.lds.in arch/arm/boot/Makefile .config 119$(obj)/vmlinux.lds: $(obj)/vmlinux.lds.in arch/arm/boot/Makefile $(KCONFIG_CONFIG)
116 @sed "$(SEDFLAGS)" < $< > $@ 120 @sed "$(SEDFLAGS)" < $< > $@
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 6af9907c3b5c..6825c34646d4 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -177,7 +177,7 @@ not_angel:
177 and r4, pc, #0xf8000000 177 and r4, pc, #0xf8000000
178 add r4, r4, #TEXT_OFFSET 178 add r4, r4, #TEXT_OFFSET
179#else 179#else
180 ldr r4, =CONFIG_ZRELADDR 180 ldr r4, =zreladdr
181#endif 181#endif
182 subs r0, r0, r1 @ calculate the delta offset 182 subs r0, r0, r1 @ calculate the delta offset
183 183
diff --git a/arch/arm/common/it8152.c b/arch/arm/common/it8152.c
index 6c0913562455..1bec96e85196 100644
--- a/arch/arm/common/it8152.c
+++ b/arch/arm/common/it8152.c
@@ -263,6 +263,22 @@ static int it8152_pci_platform_notify_remove(struct device *dev)
263 return 0; 263 return 0;
264} 264}
265 265
266int dma_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
267{
268 dev_dbg(dev, "%s: dma_addr %08x, size %08x\n",
269 __func__, dma_addr, size);
270 return (dev->bus == &pci_bus_type) &&
271 ((dma_addr + size - PHYS_OFFSET) >= SZ_64M);
272}
273
274int dma_set_coherent_mask(struct device *dev, u64 mask)
275{
276 if (mask >= PHYS_OFFSET + SZ_64M - 1)
277 return 0;
278
279 return -EIO;
280}
281
266int __init it8152_pci_setup(int nr, struct pci_sys_data *sys) 282int __init it8152_pci_setup(int nr, struct pci_sys_data *sys)
267{ 283{
268 it8152_io.start = IT8152_IO_BASE + 0x12000; 284 it8152_io.start = IT8152_IO_BASE + 0x12000;
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index c226fe10553e..c568da7dcae4 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -288,15 +288,7 @@ extern void dmabounce_unregister_dev(struct device *);
288 * DMA access and 1 if the buffer needs to be bounced. 288 * DMA access and 1 if the buffer needs to be bounced.
289 * 289 *
290 */ 290 */
291#ifdef CONFIG_SA1111
292extern int dma_needs_bounce(struct device*, dma_addr_t, size_t); 291extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
293#else
294static inline int dma_needs_bounce(struct device *dev, dma_addr_t addr,
295 size_t size)
296{
297 return 0;
298}
299#endif
300 292
301/* 293/*
302 * The DMA API, implemented by dmabounce.c. See below for descriptions. 294 * The DMA API, implemented by dmabounce.c. See below for descriptions.
diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h
index 48837e6d8887..b5799a3b7117 100644
--- a/arch/arm/include/asm/perf_event.h
+++ b/arch/arm/include/asm/perf_event.h
@@ -17,7 +17,7 @@
17 * counter interrupts are regular interrupts and not an NMI. This 17 * counter interrupts are regular interrupts and not an NMI. This
18 * means that when we receive the interrupt we can call 18 * means that when we receive the interrupt we can call
19 * perf_event_do_pending() that handles all of the work with 19 * perf_event_do_pending() that handles all of the work with
20 * interrupts enabled. 20 * interrupts disabled.
21 */ 21 */
22static inline void 22static inline void
23set_perf_event_pending(void) 23set_perf_event_pending(void)
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index ab68cf1ef80f..e90b167ea848 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -317,6 +317,10 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
317#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE 317#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
318#define pgprot_dmacoherent(prot) \ 318#define pgprot_dmacoherent(prot) \
319 __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_BUFFERABLE) 319 __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_BUFFERABLE)
320#define __HAVE_PHYS_MEM_ACCESS_PROT
321struct file;
322extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
323 unsigned long size, pgprot_t vma_prot);
320#else 324#else
321#define pgprot_dmacoherent(prot) \ 325#define pgprot_dmacoherent(prot) \
322 __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_UNCACHED) 326 __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_UNCACHED)
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index d02cfb683487..c891eb76c0e3 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -393,6 +393,9 @@
393#define __NR_perf_event_open (__NR_SYSCALL_BASE+364) 393#define __NR_perf_event_open (__NR_SYSCALL_BASE+364)
394#define __NR_recvmmsg (__NR_SYSCALL_BASE+365) 394#define __NR_recvmmsg (__NR_SYSCALL_BASE+365)
395#define __NR_accept4 (__NR_SYSCALL_BASE+366) 395#define __NR_accept4 (__NR_SYSCALL_BASE+366)
396#define __NR_fanotify_init (__NR_SYSCALL_BASE+367)
397#define __NR_fanotify_mark (__NR_SYSCALL_BASE+368)
398#define __NR_prlimit64 (__NR_SYSCALL_BASE+369)
396 399
397/* 400/*
398 * The following SWIs are ARM private. 401 * The following SWIs are ARM private.
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index afeb71fa72cb..5c26eccef998 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -376,6 +376,9 @@
376 CALL(sys_perf_event_open) 376 CALL(sys_perf_event_open)
377/* 365 */ CALL(sys_recvmmsg) 377/* 365 */ CALL(sys_recvmmsg)
378 CALL(sys_accept4) 378 CALL(sys_accept4)
379 CALL(sys_fanotify_init)
380 CALL(sys_fanotify_mark)
381 CALL(sys_prlimit64)
379#ifndef syscalls_counted 382#ifndef syscalls_counted
380.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls 383.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
381#define syscalls_counted 384#define syscalls_counted
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index f05a35a59694..7885722bdf4e 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -48,6 +48,8 @@ work_pending:
48 beq no_work_pending 48 beq no_work_pending
49 mov r0, sp @ 'regs' 49 mov r0, sp @ 'regs'
50 mov r2, why @ 'syscall' 50 mov r2, why @ 'syscall'
51 tst r1, #_TIF_SIGPENDING @ delivering a signal?
52 movne why, #0 @ prevent further restarts
51 bl do_notify_resume 53 bl do_notify_resume
52 b ret_slow_syscall @ Check work again 54 b ret_slow_syscall @ Check work again
53 55
@@ -418,11 +420,13 @@ ENDPROC(sys_clone_wrapper)
418 420
419sys_sigreturn_wrapper: 421sys_sigreturn_wrapper:
420 add r0, sp, #S_OFF 422 add r0, sp, #S_OFF
423 mov why, #0 @ prevent syscall restart handling
421 b sys_sigreturn 424 b sys_sigreturn
422ENDPROC(sys_sigreturn_wrapper) 425ENDPROC(sys_sigreturn_wrapper)
423 426
424sys_rt_sigreturn_wrapper: 427sys_rt_sigreturn_wrapper:
425 add r0, sp, #S_OFF 428 add r0, sp, #S_OFF
429 mov why, #0 @ prevent syscall restart handling
426 b sys_rt_sigreturn 430 b sys_rt_sigreturn
427ENDPROC(sys_rt_sigreturn_wrapper) 431ENDPROC(sys_rt_sigreturn_wrapper)
428 432
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 417c392ddf1c..ecbb0288e5dd 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -319,8 +319,8 @@ validate_event(struct cpu_hw_events *cpuc,
319{ 319{
320 struct hw_perf_event fake_event = event->hw; 320 struct hw_perf_event fake_event = event->hw;
321 321
322 if (event->pmu && event->pmu != &pmu) 322 if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF)
323 return 0; 323 return 1;
324 324
325 return armpmu->get_event_idx(cpuc, &fake_event) >= 0; 325 return armpmu->get_event_idx(cpuc, &fake_event) >= 0;
326} 326}
@@ -1041,8 +1041,8 @@ armv6pmu_handle_irq(int irq_num,
1041 /* 1041 /*
1042 * Handle the pending perf events. 1042 * Handle the pending perf events.
1043 * 1043 *
1044 * Note: this call *must* be run with interrupts enabled. For 1044 * Note: this call *must* be run with interrupts disabled. For
1045 * platforms that can have the PMU interrupts raised as a PMI, this 1045 * platforms that can have the PMU interrupts raised as an NMI, this
1046 * will not work. 1046 * will not work.
1047 */ 1047 */
1048 perf_event_do_pending(); 1048 perf_event_do_pending();
@@ -2017,8 +2017,8 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
2017 /* 2017 /*
2018 * Handle the pending perf events. 2018 * Handle the pending perf events.
2019 * 2019 *
2020 * Note: this call *must* be run with interrupts enabled. For 2020 * Note: this call *must* be run with interrupts disabled. For
2021 * platforms that can have the PMU interrupts raised as a PMI, this 2021 * platforms that can have the PMU interrupts raised as an NMI, this
2022 * will not work. 2022 * will not work.
2023 */ 2023 */
2024 perf_event_do_pending(); 2024 perf_event_do_pending();
diff --git a/arch/arm/mach-at91/at91sam9g45.c b/arch/arm/mach-at91/at91sam9g45.c
index 753c0d31a3d3..c67b47f1c0fd 100644
--- a/arch/arm/mach-at91/at91sam9g45.c
+++ b/arch/arm/mach-at91/at91sam9g45.c
@@ -121,8 +121,8 @@ static struct clk ssc1_clk = {
121 .pmc_mask = 1 << AT91SAM9G45_ID_SSC1, 121 .pmc_mask = 1 << AT91SAM9G45_ID_SSC1,
122 .type = CLK_TYPE_PERIPHERAL, 122 .type = CLK_TYPE_PERIPHERAL,
123}; 123};
124static struct clk tcb_clk = { 124static struct clk tcb0_clk = {
125 .name = "tcb_clk", 125 .name = "tcb0_clk",
126 .pmc_mask = 1 << AT91SAM9G45_ID_TCB, 126 .pmc_mask = 1 << AT91SAM9G45_ID_TCB,
127 .type = CLK_TYPE_PERIPHERAL, 127 .type = CLK_TYPE_PERIPHERAL,
128}; 128};
@@ -192,6 +192,14 @@ static struct clk ohci_clk = {
192 .parent = &uhphs_clk, 192 .parent = &uhphs_clk,
193}; 193};
194 194
195/* One additional fake clock for second TC block */
196static struct clk tcb1_clk = {
197 .name = "tcb1_clk",
198 .pmc_mask = 0,
199 .type = CLK_TYPE_PERIPHERAL,
200 .parent = &tcb0_clk,
201};
202
195static struct clk *periph_clocks[] __initdata = { 203static struct clk *periph_clocks[] __initdata = {
196 &pioA_clk, 204 &pioA_clk,
197 &pioB_clk, 205 &pioB_clk,
@@ -208,7 +216,7 @@ static struct clk *periph_clocks[] __initdata = {
208 &spi1_clk, 216 &spi1_clk,
209 &ssc0_clk, 217 &ssc0_clk,
210 &ssc1_clk, 218 &ssc1_clk,
211 &tcb_clk, 219 &tcb0_clk,
212 &pwm_clk, 220 &pwm_clk,
213 &tsc_clk, 221 &tsc_clk,
214 &dma_clk, 222 &dma_clk,
@@ -221,6 +229,7 @@ static struct clk *periph_clocks[] __initdata = {
221 &mmc1_clk, 229 &mmc1_clk,
222 // irq0 230 // irq0
223 &ohci_clk, 231 &ohci_clk,
232 &tcb1_clk,
224}; 233};
225 234
226/* 235/*
diff --git a/arch/arm/mach-at91/at91sam9g45_devices.c b/arch/arm/mach-at91/at91sam9g45_devices.c
index 809114d5a5a6..1276babf84d5 100644
--- a/arch/arm/mach-at91/at91sam9g45_devices.c
+++ b/arch/arm/mach-at91/at91sam9g45_devices.c
@@ -46,7 +46,7 @@ static struct resource hdmac_resources[] = {
46 .end = AT91_BASE_SYS + AT91_DMA + SZ_512 - 1, 46 .end = AT91_BASE_SYS + AT91_DMA + SZ_512 - 1,
47 .flags = IORESOURCE_MEM, 47 .flags = IORESOURCE_MEM,
48 }, 48 },
49 [2] = { 49 [1] = {
50 .start = AT91SAM9G45_ID_DMA, 50 .start = AT91SAM9G45_ID_DMA,
51 .end = AT91SAM9G45_ID_DMA, 51 .end = AT91SAM9G45_ID_DMA,
52 .flags = IORESOURCE_IRQ, 52 .flags = IORESOURCE_IRQ,
@@ -426,7 +426,7 @@ static struct i2c_gpio_platform_data pdata_i2c0 = {
426 .sda_is_open_drain = 1, 426 .sda_is_open_drain = 1,
427 .scl_pin = AT91_PIN_PA21, 427 .scl_pin = AT91_PIN_PA21,
428 .scl_is_open_drain = 1, 428 .scl_is_open_drain = 1,
429 .udelay = 2, /* ~100 kHz */ 429 .udelay = 5, /* ~100 kHz */
430}; 430};
431 431
432static struct platform_device at91sam9g45_twi0_device = { 432static struct platform_device at91sam9g45_twi0_device = {
@@ -440,7 +440,7 @@ static struct i2c_gpio_platform_data pdata_i2c1 = {
440 .sda_is_open_drain = 1, 440 .sda_is_open_drain = 1,
441 .scl_pin = AT91_PIN_PB11, 441 .scl_pin = AT91_PIN_PB11,
442 .scl_is_open_drain = 1, 442 .scl_is_open_drain = 1,
443 .udelay = 2, /* ~100 kHz */ 443 .udelay = 5, /* ~100 kHz */
444}; 444};
445 445
446static struct platform_device at91sam9g45_twi1_device = { 446static struct platform_device at91sam9g45_twi1_device = {
@@ -835,9 +835,9 @@ static struct platform_device at91sam9g45_tcb1_device = {
835static void __init at91_add_device_tc(void) 835static void __init at91_add_device_tc(void)
836{ 836{
837 /* this chip has one clock and irq for all six TC channels */ 837 /* this chip has one clock and irq for all six TC channels */
838 at91_clock_associate("tcb_clk", &at91sam9g45_tcb0_device.dev, "t0_clk"); 838 at91_clock_associate("tcb0_clk", &at91sam9g45_tcb0_device.dev, "t0_clk");
839 platform_device_register(&at91sam9g45_tcb0_device); 839 platform_device_register(&at91sam9g45_tcb0_device);
840 at91_clock_associate("tcb_clk", &at91sam9g45_tcb1_device.dev, "t0_clk"); 840 at91_clock_associate("tcb1_clk", &at91sam9g45_tcb1_device.dev, "t0_clk");
841 platform_device_register(&at91sam9g45_tcb1_device); 841 platform_device_register(&at91sam9g45_tcb1_device);
842} 842}
843#else 843#else
diff --git a/arch/arm/mach-at91/board-sam9261ek.c b/arch/arm/mach-at91/board-sam9261ek.c
index c4c8865d52d7..65eb0943194f 100644
--- a/arch/arm/mach-at91/board-sam9261ek.c
+++ b/arch/arm/mach-at91/board-sam9261ek.c
@@ -93,11 +93,12 @@ static struct resource dm9000_resource[] = {
93 .start = AT91_PIN_PC11, 93 .start = AT91_PIN_PC11,
94 .end = AT91_PIN_PC11, 94 .end = AT91_PIN_PC11,
95 .flags = IORESOURCE_IRQ 95 .flags = IORESOURCE_IRQ
96 | IORESOURCE_IRQ_LOWEDGE | IORESOURCE_IRQ_HIGHEDGE,
96 } 97 }
97}; 98};
98 99
99static struct dm9000_plat_data dm9000_platdata = { 100static struct dm9000_plat_data dm9000_platdata = {
100 .flags = DM9000_PLATF_16BITONLY, 101 .flags = DM9000_PLATF_16BITONLY | DM9000_PLATF_NO_EEPROM,
101}; 102};
102 103
103static struct platform_device dm9000_device = { 104static struct platform_device dm9000_device = {
@@ -168,17 +169,6 @@ static struct at91_udc_data __initdata ek_udc_data = {
168 169
169 170
170/* 171/*
171 * MCI (SD/MMC)
172 */
173static struct at91_mmc_data __initdata ek_mmc_data = {
174 .wire4 = 1,
175// .det_pin = ... not connected
176// .wp_pin = ... not connected
177// .vcc_pin = ... not connected
178};
179
180
181/*
182 * NAND flash 172 * NAND flash
183 */ 173 */
184static struct mtd_partition __initdata ek_nand_partition[] = { 174static struct mtd_partition __initdata ek_nand_partition[] = {
@@ -246,6 +236,10 @@ static void __init ek_add_device_nand(void)
246 at91_add_device_nand(&ek_nand_data); 236 at91_add_device_nand(&ek_nand_data);
247} 237}
248 238
239/*
240 * SPI related devices
241 */
242#if defined(CONFIG_SPI_ATMEL) || defined(CONFIG_SPI_ATMEL_MODULE)
249 243
250/* 244/*
251 * ADS7846 Touchscreen 245 * ADS7846 Touchscreen
@@ -356,6 +350,19 @@ static struct spi_board_info ek_spi_devices[] = {
356#endif 350#endif
357}; 351};
358 352
353#else /* CONFIG_SPI_ATMEL_* */
354/* spi0 and mmc/sd share the same PIO pins: cannot be used at the same time */
355
356/*
357 * MCI (SD/MMC)
358 * det_pin, wp_pin and vcc_pin are not connected
359 */
360static struct at91_mmc_data __initdata ek_mmc_data = {
361 .wire4 = 1,
362};
363
364#endif /* CONFIG_SPI_ATMEL_* */
365
359 366
360/* 367/*
361 * LCD Controller 368 * LCD Controller
diff --git a/arch/arm/mach-at91/clock.c b/arch/arm/mach-at91/clock.c
index 7f7da439341f..7525cee3983f 100644
--- a/arch/arm/mach-at91/clock.c
+++ b/arch/arm/mach-at91/clock.c
@@ -501,7 +501,8 @@ postcore_initcall(at91_clk_debugfs_init);
501int __init clk_register(struct clk *clk) 501int __init clk_register(struct clk *clk)
502{ 502{
503 if (clk_is_peripheral(clk)) { 503 if (clk_is_peripheral(clk)) {
504 clk->parent = &mck; 504 if (!clk->parent)
505 clk->parent = &mck;
505 clk->mode = pmc_periph_mode; 506 clk->mode = pmc_periph_mode;
506 list_add_tail(&clk->node, &clocks); 507 list_add_tail(&clk->node, &clocks);
507 } 508 }
diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c
index 3d996b659ff4..9be261beae7d 100644
--- a/arch/arm/mach-davinci/dm355.c
+++ b/arch/arm/mach-davinci/dm355.c
@@ -769,8 +769,7 @@ static struct map_desc dm355_io_desc[] = {
769 .virtual = SRAM_VIRT, 769 .virtual = SRAM_VIRT,
770 .pfn = __phys_to_pfn(0x00010000), 770 .pfn = __phys_to_pfn(0x00010000),
771 .length = SZ_32K, 771 .length = SZ_32K,
772 /* MT_MEMORY_NONCACHED requires supersection alignment */ 772 .type = MT_MEMORY_NONCACHED,
773 .type = MT_DEVICE,
774 }, 773 },
775}; 774};
776 775
diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c
index 6b6f4c643709..7781e35daec3 100644
--- a/arch/arm/mach-davinci/dm365.c
+++ b/arch/arm/mach-davinci/dm365.c
@@ -969,8 +969,7 @@ static struct map_desc dm365_io_desc[] = {
969 .virtual = SRAM_VIRT, 969 .virtual = SRAM_VIRT,
970 .pfn = __phys_to_pfn(0x00010000), 970 .pfn = __phys_to_pfn(0x00010000),
971 .length = SZ_32K, 971 .length = SZ_32K,
972 /* MT_MEMORY_NONCACHED requires supersection alignment */ 972 .type = MT_MEMORY_NONCACHED,
973 .type = MT_DEVICE,
974 }, 973 },
975}; 974};
976 975
diff --git a/arch/arm/mach-davinci/dm644x.c b/arch/arm/mach-davinci/dm644x.c
index 40fec315c99a..5e5b0a7831fb 100644
--- a/arch/arm/mach-davinci/dm644x.c
+++ b/arch/arm/mach-davinci/dm644x.c
@@ -653,8 +653,7 @@ static struct map_desc dm644x_io_desc[] = {
653 .virtual = SRAM_VIRT, 653 .virtual = SRAM_VIRT,
654 .pfn = __phys_to_pfn(0x00008000), 654 .pfn = __phys_to_pfn(0x00008000),
655 .length = SZ_16K, 655 .length = SZ_16K,
656 /* MT_MEMORY_NONCACHED requires supersection alignment */ 656 .type = MT_MEMORY_NONCACHED,
657 .type = MT_DEVICE,
658 }, 657 },
659}; 658};
660 659
diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c
index e4a3df1872ac..26e8a9c7f50b 100644
--- a/arch/arm/mach-davinci/dm646x.c
+++ b/arch/arm/mach-davinci/dm646x.c
@@ -737,8 +737,7 @@ static struct map_desc dm646x_io_desc[] = {
737 .virtual = SRAM_VIRT, 737 .virtual = SRAM_VIRT,
738 .pfn = __phys_to_pfn(0x00010000), 738 .pfn = __phys_to_pfn(0x00010000),
739 .length = SZ_32K, 739 .length = SZ_32K,
740 /* MT_MEMORY_NONCACHED requires supersection alignment */ 740 .type = MT_MEMORY_NONCACHED,
741 .type = MT_DEVICE,
742 }, 741 },
743}; 742};
744 743
diff --git a/arch/arm/mach-dove/include/mach/io.h b/arch/arm/mach-dove/include/mach/io.h
index 3b3e4721ce2e..eb4936ff90ad 100644
--- a/arch/arm/mach-dove/include/mach/io.h
+++ b/arch/arm/mach-dove/include/mach/io.h
@@ -13,8 +13,8 @@
13 13
14#define IO_SPACE_LIMIT 0xffffffff 14#define IO_SPACE_LIMIT 0xffffffff
15 15
16#define __io(a) ((void __iomem *)(((a) - DOVE_PCIE0_IO_PHYS_BASE) +\ 16#define __io(a) ((void __iomem *)(((a) - DOVE_PCIE0_IO_BUS_BASE) + \
17 DOVE_PCIE0_IO_VIRT_BASE)) 17 DOVE_PCIE0_IO_VIRT_BASE))
18#define __mem_pci(a) (a) 18#define __mem_pci(a) (a)
19 19
20#endif 20#endif
diff --git a/arch/arm/mach-ep93xx/clock.c b/arch/arm/mach-ep93xx/clock.c
index 8bf3cec98cfa..4566bd1c8660 100644
--- a/arch/arm/mach-ep93xx/clock.c
+++ b/arch/arm/mach-ep93xx/clock.c
@@ -560,4 +560,4 @@ static int __init ep93xx_clock_init(void)
560 clkdev_add_table(clocks, ARRAY_SIZE(clocks)); 560 clkdev_add_table(clocks, ARRAY_SIZE(clocks));
561 return 0; 561 return 0;
562} 562}
563arch_initcall(ep93xx_clock_init); 563postcore_initcall(ep93xx_clock_init);
diff --git a/arch/arm/mach-ixp4xx/common-pci.c b/arch/arm/mach-ixp4xx/common-pci.c
index 61cd4d64b985..24498a932ba6 100644
--- a/arch/arm/mach-ixp4xx/common-pci.c
+++ b/arch/arm/mach-ixp4xx/common-pci.c
@@ -503,6 +503,14 @@ struct pci_bus * __devinit ixp4xx_scan_bus(int nr, struct pci_sys_data *sys)
503 return pci_scan_bus(sys->busnr, &ixp4xx_ops, sys); 503 return pci_scan_bus(sys->busnr, &ixp4xx_ops, sys);
504} 504}
505 505
506int dma_set_coherent_mask(struct device *dev, u64 mask)
507{
508 if (mask >= SZ_64M - 1)
509 return 0;
510
511 return -EIO;
512}
513
506EXPORT_SYMBOL(ixp4xx_pci_read); 514EXPORT_SYMBOL(ixp4xx_pci_read);
507EXPORT_SYMBOL(ixp4xx_pci_write); 515EXPORT_SYMBOL(ixp4xx_pci_write);
508 516
diff --git a/arch/arm/mach-ixp4xx/include/mach/hardware.h b/arch/arm/mach-ixp4xx/include/mach/hardware.h
index f91ca6d4fbe8..8138371c406e 100644
--- a/arch/arm/mach-ixp4xx/include/mach/hardware.h
+++ b/arch/arm/mach-ixp4xx/include/mach/hardware.h
@@ -26,6 +26,8 @@
26#define PCIBIOS_MAX_MEM 0x4BFFFFFF 26#define PCIBIOS_MAX_MEM 0x4BFFFFFF
27#endif 27#endif
28 28
29#define ARCH_HAS_DMA_SET_COHERENT_MASK
30
29#define pcibios_assign_all_busses() 1 31#define pcibios_assign_all_busses() 1
30 32
31/* Register locations and bits */ 33/* Register locations and bits */
diff --git a/arch/arm/mach-kirkwood/include/mach/kirkwood.h b/arch/arm/mach-kirkwood/include/mach/kirkwood.h
index 93fc2ec95e76..6e924b398919 100644
--- a/arch/arm/mach-kirkwood/include/mach/kirkwood.h
+++ b/arch/arm/mach-kirkwood/include/mach/kirkwood.h
@@ -38,7 +38,7 @@
38 38
39#define KIRKWOOD_PCIE1_IO_PHYS_BASE 0xf3000000 39#define KIRKWOOD_PCIE1_IO_PHYS_BASE 0xf3000000
40#define KIRKWOOD_PCIE1_IO_VIRT_BASE 0xfef00000 40#define KIRKWOOD_PCIE1_IO_VIRT_BASE 0xfef00000
41#define KIRKWOOD_PCIE1_IO_BUS_BASE 0x00000000 41#define KIRKWOOD_PCIE1_IO_BUS_BASE 0x00100000
42#define KIRKWOOD_PCIE1_IO_SIZE SZ_1M 42#define KIRKWOOD_PCIE1_IO_SIZE SZ_1M
43 43
44#define KIRKWOOD_PCIE_IO_PHYS_BASE 0xf2000000 44#define KIRKWOOD_PCIE_IO_PHYS_BASE 0xf2000000
diff --git a/arch/arm/mach-kirkwood/pcie.c b/arch/arm/mach-kirkwood/pcie.c
index 55e7f00836b7..513ad3102d7c 100644
--- a/arch/arm/mach-kirkwood/pcie.c
+++ b/arch/arm/mach-kirkwood/pcie.c
@@ -117,7 +117,7 @@ static void __init pcie0_ioresources_init(struct pcie_port *pp)
117 * IORESOURCE_IO 117 * IORESOURCE_IO
118 */ 118 */
119 pp->res[0].name = "PCIe 0 I/O Space"; 119 pp->res[0].name = "PCIe 0 I/O Space";
120 pp->res[0].start = KIRKWOOD_PCIE_IO_PHYS_BASE; 120 pp->res[0].start = KIRKWOOD_PCIE_IO_BUS_BASE;
121 pp->res[0].end = pp->res[0].start + KIRKWOOD_PCIE_IO_SIZE - 1; 121 pp->res[0].end = pp->res[0].start + KIRKWOOD_PCIE_IO_SIZE - 1;
122 pp->res[0].flags = IORESOURCE_IO; 122 pp->res[0].flags = IORESOURCE_IO;
123 123
@@ -139,7 +139,7 @@ static void __init pcie1_ioresources_init(struct pcie_port *pp)
139 * IORESOURCE_IO 139 * IORESOURCE_IO
140 */ 140 */
141 pp->res[0].name = "PCIe 1 I/O Space"; 141 pp->res[0].name = "PCIe 1 I/O Space";
142 pp->res[0].start = KIRKWOOD_PCIE1_IO_PHYS_BASE; 142 pp->res[0].start = KIRKWOOD_PCIE1_IO_BUS_BASE;
143 pp->res[0].end = pp->res[0].start + KIRKWOOD_PCIE1_IO_SIZE - 1; 143 pp->res[0].end = pp->res[0].start + KIRKWOOD_PCIE1_IO_SIZE - 1;
144 pp->res[0].flags = IORESOURCE_IO; 144 pp->res[0].flags = IORESOURCE_IO;
145 145
diff --git a/arch/arm/mach-mmp/include/mach/system.h b/arch/arm/mach-mmp/include/mach/system.h
index 4f5b0e0ce6cf..1a8a25edb1b4 100644
--- a/arch/arm/mach-mmp/include/mach/system.h
+++ b/arch/arm/mach-mmp/include/mach/system.h
@@ -9,6 +9,8 @@
9#ifndef __ASM_MACH_SYSTEM_H 9#ifndef __ASM_MACH_SYSTEM_H
10#define __ASM_MACH_SYSTEM_H 10#define __ASM_MACH_SYSTEM_H
11 11
12#include <mach/cputype.h>
13
12static inline void arch_idle(void) 14static inline void arch_idle(void)
13{ 15{
14 cpu_do_idle(); 16 cpu_do_idle();
@@ -16,6 +18,9 @@ static inline void arch_idle(void)
16 18
17static inline void arch_reset(char mode, const char *cmd) 19static inline void arch_reset(char mode, const char *cmd)
18{ 20{
19 cpu_reset(0); 21 if (cpu_is_pxa168())
22 cpu_reset(0xffff0000);
23 else
24 cpu_reset(0);
20} 25}
21#endif /* __ASM_MACH_SYSTEM_H */ 26#endif /* __ASM_MACH_SYSTEM_H */
diff --git a/arch/arm/mach-mx25/eukrea_mbimxsd-baseboard.c b/arch/arm/mach-mx25/eukrea_mbimxsd-baseboard.c
index 91931dcb0689..4aaadc753d3e 100644
--- a/arch/arm/mach-mx25/eukrea_mbimxsd-baseboard.c
+++ b/arch/arm/mach-mx25/eukrea_mbimxsd-baseboard.c
@@ -215,7 +215,7 @@ struct imx_ssi_platform_data eukrea_mbimxsd_ssi_pdata = {
215 * Add platform devices present on this baseboard and init 215 * Add platform devices present on this baseboard and init
216 * them from CPU side as far as required to use them later on 216 * them from CPU side as far as required to use them later on
217 */ 217 */
218void __init eukrea_mbimxsd_baseboard_init(void) 218void __init eukrea_mbimxsd25_baseboard_init(void)
219{ 219{
220 if (mxc_iomux_v3_setup_multiple_pads(eukrea_mbimxsd_pads, 220 if (mxc_iomux_v3_setup_multiple_pads(eukrea_mbimxsd_pads,
221 ARRAY_SIZE(eukrea_mbimxsd_pads))) 221 ARRAY_SIZE(eukrea_mbimxsd_pads)))
diff --git a/arch/arm/mach-mx25/mach-cpuimx25.c b/arch/arm/mach-mx25/mach-cpuimx25.c
index a5f0174290b4..e064bb3d6919 100644
--- a/arch/arm/mach-mx25/mach-cpuimx25.c
+++ b/arch/arm/mach-mx25/mach-cpuimx25.c
@@ -147,8 +147,8 @@ static void __init eukrea_cpuimx25_init(void)
147 if (!otg_mode_host) 147 if (!otg_mode_host)
148 mxc_register_device(&otg_udc_device, &otg_device_pdata); 148 mxc_register_device(&otg_udc_device, &otg_device_pdata);
149 149
150#ifdef CONFIG_MACH_EUKREA_MBIMXSD_BASEBOARD 150#ifdef CONFIG_MACH_EUKREA_MBIMXSD25_BASEBOARD
151 eukrea_mbimxsd_baseboard_init(); 151 eukrea_mbimxsd25_baseboard_init();
152#endif 152#endif
153} 153}
154 154
diff --git a/arch/arm/mach-mx3/clock-imx35.c b/arch/arm/mach-mx3/clock-imx35.c
index d3af0fdf8475..7a62e744a8b0 100644
--- a/arch/arm/mach-mx3/clock-imx35.c
+++ b/arch/arm/mach-mx3/clock-imx35.c
@@ -155,7 +155,7 @@ static unsigned long get_rate_arm(void)
155 155
156 aad = &clk_consumer[(pdr0 >> 16) & 0xf]; 156 aad = &clk_consumer[(pdr0 >> 16) & 0xf];
157 if (aad->sel) 157 if (aad->sel)
158 fref = fref * 2 / 3; 158 fref = fref * 3 / 4;
159 159
160 return fref / aad->arm; 160 return fref / aad->arm;
161} 161}
@@ -164,7 +164,7 @@ static unsigned long get_rate_ahb(struct clk *clk)
164{ 164{
165 unsigned long pdr0 = __raw_readl(CCM_BASE + CCM_PDR0); 165 unsigned long pdr0 = __raw_readl(CCM_BASE + CCM_PDR0);
166 struct arm_ahb_div *aad; 166 struct arm_ahb_div *aad;
167 unsigned long fref = get_rate_mpll(); 167 unsigned long fref = get_rate_arm();
168 168
169 aad = &clk_consumer[(pdr0 >> 16) & 0xf]; 169 aad = &clk_consumer[(pdr0 >> 16) & 0xf];
170 170
@@ -176,16 +176,11 @@ static unsigned long get_rate_ipg(struct clk *clk)
176 return get_rate_ahb(NULL) >> 1; 176 return get_rate_ahb(NULL) >> 1;
177} 177}
178 178
179static unsigned long get_3_3_div(unsigned long in)
180{
181 return (((in >> 3) & 0x7) + 1) * ((in & 0x7) + 1);
182}
183
184static unsigned long get_rate_uart(struct clk *clk) 179static unsigned long get_rate_uart(struct clk *clk)
185{ 180{
186 unsigned long pdr3 = __raw_readl(CCM_BASE + CCM_PDR3); 181 unsigned long pdr3 = __raw_readl(CCM_BASE + CCM_PDR3);
187 unsigned long pdr4 = __raw_readl(CCM_BASE + CCM_PDR4); 182 unsigned long pdr4 = __raw_readl(CCM_BASE + CCM_PDR4);
188 unsigned long div = get_3_3_div(pdr4 >> 10); 183 unsigned long div = ((pdr4 >> 10) & 0x3f) + 1;
189 184
190 if (pdr3 & (1 << 14)) 185 if (pdr3 & (1 << 14))
191 return get_rate_arm() / div; 186 return get_rate_arm() / div;
@@ -216,7 +211,7 @@ static unsigned long get_rate_sdhc(struct clk *clk)
216 break; 211 break;
217 } 212 }
218 213
219 return rate / get_3_3_div(div); 214 return rate / (div + 1);
220} 215}
221 216
222static unsigned long get_rate_mshc(struct clk *clk) 217static unsigned long get_rate_mshc(struct clk *clk)
@@ -270,7 +265,7 @@ static unsigned long get_rate_csi(struct clk *clk)
270 else 265 else
271 rate = get_rate_ppll(); 266 rate = get_rate_ppll();
272 267
273 return rate / get_3_3_div((pdr2 >> 16) & 0x3f); 268 return rate / (((pdr2 >> 16) & 0x3f) + 1);
274} 269}
275 270
276static unsigned long get_rate_otg(struct clk *clk) 271static unsigned long get_rate_otg(struct clk *clk)
@@ -283,25 +278,51 @@ static unsigned long get_rate_otg(struct clk *clk)
283 else 278 else
284 rate = get_rate_ppll(); 279 rate = get_rate_ppll();
285 280
286 return rate / get_3_3_div((pdr4 >> 22) & 0x3f); 281 return rate / (((pdr4 >> 22) & 0x3f) + 1);
287} 282}
288 283
289static unsigned long get_rate_ipg_per(struct clk *clk) 284static unsigned long get_rate_ipg_per(struct clk *clk)
290{ 285{
291 unsigned long pdr0 = __raw_readl(CCM_BASE + CCM_PDR0); 286 unsigned long pdr0 = __raw_readl(CCM_BASE + CCM_PDR0);
292 unsigned long pdr4 = __raw_readl(CCM_BASE + CCM_PDR4); 287 unsigned long pdr4 = __raw_readl(CCM_BASE + CCM_PDR4);
293 unsigned long div1, div2; 288 unsigned long div;
294 289
295 if (pdr0 & (1 << 26)) { 290 if (pdr0 & (1 << 26)) {
296 div1 = (pdr4 >> 19) & 0x7; 291 div = (pdr4 >> 16) & 0x3f;
297 div2 = (pdr4 >> 16) & 0x7; 292 return get_rate_arm() / (div + 1);
298 return get_rate_arm() / ((div1 + 1) * (div2 + 1));
299 } else { 293 } else {
300 div1 = (pdr0 >> 12) & 0x7; 294 div = (pdr0 >> 12) & 0x7;
301 return get_rate_ahb(NULL) / div1; 295 return get_rate_ahb(NULL) / (div + 1);
302 } 296 }
303} 297}
304 298
299static unsigned long get_rate_hsp(struct clk *clk)
300{
301 unsigned long hsp_podf = (__raw_readl(CCM_BASE + CCM_PDR0) >> 20) & 0x03;
302 unsigned long fref = get_rate_mpll();
303
304 if (fref > 400 * 1000 * 1000) {
305 switch (hsp_podf) {
306 case 0:
307 return fref >> 2;
308 case 1:
309 return fref >> 3;
310 case 2:
311 return fref / 3;
312 }
313 } else {
314 switch (hsp_podf) {
315 case 0:
316 case 2:
317 return fref / 3;
318 case 1:
319 return fref / 6;
320 }
321 }
322
323 return 0;
324}
325
305static int clk_cgr_enable(struct clk *clk) 326static int clk_cgr_enable(struct clk *clk)
306{ 327{
307 u32 reg; 328 u32 reg;
@@ -359,7 +380,7 @@ DEFINE_CLOCK(i2c1_clk, 0, CCM_CGR1, 10, get_rate_ipg_per, NULL);
359DEFINE_CLOCK(i2c2_clk, 1, CCM_CGR1, 12, get_rate_ipg_per, NULL); 380DEFINE_CLOCK(i2c2_clk, 1, CCM_CGR1, 12, get_rate_ipg_per, NULL);
360DEFINE_CLOCK(i2c3_clk, 2, CCM_CGR1, 14, get_rate_ipg_per, NULL); 381DEFINE_CLOCK(i2c3_clk, 2, CCM_CGR1, 14, get_rate_ipg_per, NULL);
361DEFINE_CLOCK(iomuxc_clk, 0, CCM_CGR1, 16, NULL, NULL); 382DEFINE_CLOCK(iomuxc_clk, 0, CCM_CGR1, 16, NULL, NULL);
362DEFINE_CLOCK(ipu_clk, 0, CCM_CGR1, 18, get_rate_ahb, NULL); 383DEFINE_CLOCK(ipu_clk, 0, CCM_CGR1, 18, get_rate_hsp, NULL);
363DEFINE_CLOCK(kpp_clk, 0, CCM_CGR1, 20, get_rate_ipg, NULL); 384DEFINE_CLOCK(kpp_clk, 0, CCM_CGR1, 20, get_rate_ipg, NULL);
364DEFINE_CLOCK(mlb_clk, 0, CCM_CGR1, 22, get_rate_ahb, NULL); 385DEFINE_CLOCK(mlb_clk, 0, CCM_CGR1, 22, get_rate_ahb, NULL);
365DEFINE_CLOCK(mshc_clk, 0, CCM_CGR1, 24, get_rate_mshc, NULL); 386DEFINE_CLOCK(mshc_clk, 0, CCM_CGR1, 24, get_rate_mshc, NULL);
@@ -485,10 +506,10 @@ static struct clk_lookup lookups[] = {
485 506
486int __init mx35_clocks_init() 507int __init mx35_clocks_init()
487{ 508{
488 unsigned int ll = 0; 509 unsigned int cgr2 = 3 << 26, cgr3 = 0;
489 510
490#if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_ICEDCC) 511#if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_ICEDCC)
491 ll = (3 << 16); 512 cgr2 |= 3 << 16;
492#endif 513#endif
493 514
494 clkdev_add_table(lookups, ARRAY_SIZE(lookups)); 515 clkdev_add_table(lookups, ARRAY_SIZE(lookups));
@@ -499,8 +520,20 @@ int __init mx35_clocks_init()
499 __raw_writel((3 << 18), CCM_BASE + CCM_CGR0); 520 __raw_writel((3 << 18), CCM_BASE + CCM_CGR0);
500 __raw_writel((3 << 2) | (3 << 4) | (3 << 6) | (3 << 8) | (3 << 16), 521 __raw_writel((3 << 2) | (3 << 4) | (3 << 6) | (3 << 8) | (3 << 16),
501 CCM_BASE + CCM_CGR1); 522 CCM_BASE + CCM_CGR1);
502 __raw_writel((3 << 26) | ll, CCM_BASE + CCM_CGR2); 523
503 __raw_writel(0, CCM_BASE + CCM_CGR3); 524 /*
525 * Check if we came up in internal boot mode. If yes, we need some
526 * extra clocks turned on, otherwise the MX35 boot ROM code will
527 * hang after a watchdog reset.
528 */
529 if (!(__raw_readl(CCM_BASE + CCM_RCSR) & (3 << 10))) {
530 /* Additionally turn on UART1, SCC, and IIM clocks */
531 cgr2 |= 3 << 16 | 3 << 4;
532 cgr3 |= 3 << 2;
533 }
534
535 __raw_writel(cgr2, CCM_BASE + CCM_CGR2);
536 __raw_writel(cgr3, CCM_BASE + CCM_CGR3);
504 537
505 mxc_timer_init(&gpt_clk, 538 mxc_timer_init(&gpt_clk,
506 MX35_IO_ADDRESS(MX35_GPT1_BASE_ADDR), MX35_INT_GPT); 539 MX35_IO_ADDRESS(MX35_GPT1_BASE_ADDR), MX35_INT_GPT);
diff --git a/arch/arm/mach-mx3/eukrea_mbimxsd-baseboard.c b/arch/arm/mach-mx3/eukrea_mbimxsd-baseboard.c
index 1dc5004df866..f8f15e3ac7a0 100644
--- a/arch/arm/mach-mx3/eukrea_mbimxsd-baseboard.c
+++ b/arch/arm/mach-mx3/eukrea_mbimxsd-baseboard.c
@@ -216,7 +216,7 @@ struct imx_ssi_platform_data eukrea_mbimxsd_ssi_pdata = {
216 * Add platform devices present on this baseboard and init 216 * Add platform devices present on this baseboard and init
217 * them from CPU side as far as required to use them later on 217 * them from CPU side as far as required to use them later on
218 */ 218 */
219void __init eukrea_mbimxsd_baseboard_init(void) 219void __init eukrea_mbimxsd35_baseboard_init(void)
220{ 220{
221 if (mxc_iomux_v3_setup_multiple_pads(eukrea_mbimxsd_pads, 221 if (mxc_iomux_v3_setup_multiple_pads(eukrea_mbimxsd_pads,
222 ARRAY_SIZE(eukrea_mbimxsd_pads))) 222 ARRAY_SIZE(eukrea_mbimxsd_pads)))
diff --git a/arch/arm/mach-mx3/mach-cpuimx35.c b/arch/arm/mach-mx3/mach-cpuimx35.c
index 9770a6a973be..2a4f8b781ba4 100644
--- a/arch/arm/mach-mx3/mach-cpuimx35.c
+++ b/arch/arm/mach-mx3/mach-cpuimx35.c
@@ -201,8 +201,8 @@ static void __init mxc_board_init(void)
201 if (!otg_mode_host) 201 if (!otg_mode_host)
202 mxc_register_device(&mxc_otg_udc_device, &otg_device_pdata); 202 mxc_register_device(&mxc_otg_udc_device, &otg_device_pdata);
203 203
204#ifdef CONFIG_MACH_EUKREA_MBIMXSD_BASEBOARD 204#ifdef CONFIG_MACH_EUKREA_MBIMXSD35_BASEBOARD
205 eukrea_mbimxsd_baseboard_init(); 205 eukrea_mbimxsd35_baseboard_init();
206#endif 206#endif
207} 207}
208 208
diff --git a/arch/arm/mach-mx5/clock-mx51.c b/arch/arm/mach-mx5/clock-mx51.c
index 6af69def357f..57c10a9926cc 100644
--- a/arch/arm/mach-mx5/clock-mx51.c
+++ b/arch/arm/mach-mx5/clock-mx51.c
@@ -56,7 +56,7 @@ static void _clk_ccgr_disable(struct clk *clk)
56{ 56{
57 u32 reg; 57 u32 reg;
58 reg = __raw_readl(clk->enable_reg); 58 reg = __raw_readl(clk->enable_reg);
59 reg &= ~(MXC_CCM_CCGRx_MOD_OFF << clk->enable_shift); 59 reg &= ~(MXC_CCM_CCGRx_CG_MASK << clk->enable_shift);
60 __raw_writel(reg, clk->enable_reg); 60 __raw_writel(reg, clk->enable_reg);
61 61
62} 62}
diff --git a/arch/arm/mach-pxa/cpufreq-pxa2xx.c b/arch/arm/mach-pxa/cpufreq-pxa2xx.c
index 268a9bc6be8a..58093d9e07be 100644
--- a/arch/arm/mach-pxa/cpufreq-pxa2xx.c
+++ b/arch/arm/mach-pxa/cpufreq-pxa2xx.c
@@ -312,8 +312,7 @@ static int pxa_set_target(struct cpufreq_policy *policy,
312 freqs.cpu = policy->cpu; 312 freqs.cpu = policy->cpu;
313 313
314 if (freq_debug) 314 if (freq_debug)
315 pr_debug(KERN_INFO "Changing CPU frequency to %d Mhz, " 315 pr_debug("Changing CPU frequency to %d Mhz, (SDRAM %d Mhz)\n",
316 "(SDRAM %d Mhz)\n",
317 freqs.new / 1000, (pxa_freq_settings[idx].div2) ? 316 freqs.new / 1000, (pxa_freq_settings[idx].div2) ?
318 (new_freq_mem / 2000) : (new_freq_mem / 1000)); 317 (new_freq_mem / 2000) : (new_freq_mem / 1000));
319 318
@@ -398,7 +397,7 @@ static int pxa_set_target(struct cpufreq_policy *policy,
398 return 0; 397 return 0;
399} 398}
400 399
401static __init int pxa_cpufreq_init(struct cpufreq_policy *policy) 400static int pxa_cpufreq_init(struct cpufreq_policy *policy)
402{ 401{
403 int i; 402 int i;
404 unsigned int freq; 403 unsigned int freq;
diff --git a/arch/arm/mach-pxa/cpufreq-pxa3xx.c b/arch/arm/mach-pxa/cpufreq-pxa3xx.c
index 27fa329d9a8b..0a0d0fe99220 100644
--- a/arch/arm/mach-pxa/cpufreq-pxa3xx.c
+++ b/arch/arm/mach-pxa/cpufreq-pxa3xx.c
@@ -204,7 +204,7 @@ static int pxa3xx_cpufreq_set(struct cpufreq_policy *policy,
204 return 0; 204 return 0;
205} 205}
206 206
207static __init int pxa3xx_cpufreq_init(struct cpufreq_policy *policy) 207static int pxa3xx_cpufreq_init(struct cpufreq_policy *policy)
208{ 208{
209 int ret = -EINVAL; 209 int ret = -EINVAL;
210 210
diff --git a/arch/arm/mach-pxa/include/mach/hardware.h b/arch/arm/mach-pxa/include/mach/hardware.h
index 7f64d24cd564..814f1458a06a 100644
--- a/arch/arm/mach-pxa/include/mach/hardware.h
+++ b/arch/arm/mach-pxa/include/mach/hardware.h
@@ -264,23 +264,35 @@
264 * <= 0x2 for pxa21x/pxa25x/pxa26x/pxa27x 264 * <= 0x2 for pxa21x/pxa25x/pxa26x/pxa27x
265 * == 0x3 for pxa300/pxa310/pxa320 265 * == 0x3 for pxa300/pxa310/pxa320
266 */ 266 */
267#if defined(CONFIG_PXA25x) || defined(CONFIG_PXA27x)
267#define __cpu_is_pxa2xx(id) \ 268#define __cpu_is_pxa2xx(id) \
268 ({ \ 269 ({ \
269 unsigned int _id = (id) >> 13 & 0x7; \ 270 unsigned int _id = (id) >> 13 & 0x7; \
270 _id <= 0x2; \ 271 _id <= 0x2; \
271 }) 272 })
273#else
274#define __cpu_is_pxa2xx(id) (0)
275#endif
272 276
277#ifdef CONFIG_PXA3xx
273#define __cpu_is_pxa3xx(id) \ 278#define __cpu_is_pxa3xx(id) \
274 ({ \ 279 ({ \
275 unsigned int _id = (id) >> 13 & 0x7; \ 280 unsigned int _id = (id) >> 13 & 0x7; \
276 _id == 0x3; \ 281 _id == 0x3; \
277 }) 282 })
283#else
284#define __cpu_is_pxa3xx(id) (0)
285#endif
278 286
287#if defined(CONFIG_CPU_PXA930) || defined(CONFIG_CPU_PXA935)
279#define __cpu_is_pxa93x(id) \ 288#define __cpu_is_pxa93x(id) \
280 ({ \ 289 ({ \
281 unsigned int _id = (id) >> 4 & 0xfff; \ 290 unsigned int _id = (id) >> 4 & 0xfff; \
282 _id == 0x683 || _id == 0x693; \ 291 _id == 0x683 || _id == 0x693; \
283 }) 292 })
293#else
294#define __cpu_is_pxa93x(id) (0)
295#endif
284 296
285#define cpu_is_pxa2xx() \ 297#define cpu_is_pxa2xx() \
286 ({ \ 298 ({ \
@@ -309,7 +321,7 @@ extern unsigned long get_clock_tick_rate(void);
309#define PCIBIOS_MIN_IO 0 321#define PCIBIOS_MIN_IO 0
310#define PCIBIOS_MIN_MEM 0 322#define PCIBIOS_MIN_MEM 0
311#define pcibios_assign_all_busses() 1 323#define pcibios_assign_all_busses() 1
324#define ARCH_HAS_DMA_SET_COHERENT_MASK
312#endif 325#endif
313 326
314
315#endif /* _ASM_ARCH_HARDWARE_H */ 327#endif /* _ASM_ARCH_HARDWARE_H */
diff --git a/arch/arm/mach-pxa/include/mach/io.h b/arch/arm/mach-pxa/include/mach/io.h
index 262691fb97d8..fdca3be47d9b 100644
--- a/arch/arm/mach-pxa/include/mach/io.h
+++ b/arch/arm/mach-pxa/include/mach/io.h
@@ -6,6 +6,8 @@
6#ifndef __ASM_ARM_ARCH_IO_H 6#ifndef __ASM_ARM_ARCH_IO_H
7#define __ASM_ARM_ARCH_IO_H 7#define __ASM_ARM_ARCH_IO_H
8 8
9#include <mach/hardware.h>
10
9#define IO_SPACE_LIMIT 0xffffffff 11#define IO_SPACE_LIMIT 0xffffffff
10 12
11/* 13/*
diff --git a/arch/arm/mach-pxa/include/mach/mfp-pxa300.h b/arch/arm/mach-pxa/include/mach/mfp-pxa300.h
index 7139e0dc26d1..4e1287070d21 100644
--- a/arch/arm/mach-pxa/include/mach/mfp-pxa300.h
+++ b/arch/arm/mach-pxa/include/mach/mfp-pxa300.h
@@ -71,10 +71,10 @@
71#define GPIO46_CI_DD_7 MFP_CFG_DRV(GPIO46, AF0, DS04X) 71#define GPIO46_CI_DD_7 MFP_CFG_DRV(GPIO46, AF0, DS04X)
72#define GPIO47_CI_DD_8 MFP_CFG_DRV(GPIO47, AF1, DS04X) 72#define GPIO47_CI_DD_8 MFP_CFG_DRV(GPIO47, AF1, DS04X)
73#define GPIO48_CI_DD_9 MFP_CFG_DRV(GPIO48, AF1, DS04X) 73#define GPIO48_CI_DD_9 MFP_CFG_DRV(GPIO48, AF1, DS04X)
74#define GPIO52_CI_HSYNC MFP_CFG_DRV(GPIO52, AF0, DS04X)
75#define GPIO51_CI_VSYNC MFP_CFG_DRV(GPIO51, AF0, DS04X)
76#define GPIO49_CI_MCLK MFP_CFG_DRV(GPIO49, AF0, DS04X) 74#define GPIO49_CI_MCLK MFP_CFG_DRV(GPIO49, AF0, DS04X)
77#define GPIO50_CI_PCLK MFP_CFG_DRV(GPIO50, AF0, DS04X) 75#define GPIO50_CI_PCLK MFP_CFG_DRV(GPIO50, AF0, DS04X)
76#define GPIO51_CI_HSYNC MFP_CFG_DRV(GPIO51, AF0, DS04X)
77#define GPIO52_CI_VSYNC MFP_CFG_DRV(GPIO52, AF0, DS04X)
78 78
79/* KEYPAD */ 79/* KEYPAD */
80#define GPIO3_KP_DKIN_6 MFP_CFG_LPM(GPIO3, AF2, FLOAT) 80#define GPIO3_KP_DKIN_6 MFP_CFG_LPM(GPIO3, AF2, FLOAT)
diff --git a/arch/arm/mach-pxa/palm27x.c b/arch/arm/mach-pxa/palm27x.c
index 77ad6d34ab5b..405b92a29793 100644
--- a/arch/arm/mach-pxa/palm27x.c
+++ b/arch/arm/mach-pxa/palm27x.c
@@ -469,9 +469,13 @@ static struct i2c_board_info __initdata palm27x_pi2c_board_info[] = {
469 }, 469 },
470}; 470};
471 471
472static struct i2c_pxa_platform_data palm27x_i2c_power_info = {
473 .use_pio = 1,
474};
475
472void __init palm27x_pmic_init(void) 476void __init palm27x_pmic_init(void)
473{ 477{
474 i2c_register_board_info(1, ARRAY_AND_SIZE(palm27x_pi2c_board_info)); 478 i2c_register_board_info(1, ARRAY_AND_SIZE(palm27x_pi2c_board_info));
475 pxa27x_set_i2c_power_info(NULL); 479 pxa27x_set_i2c_power_info(&palm27x_i2c_power_info);
476} 480}
477#endif 481#endif
diff --git a/arch/arm/mach-pxa/vpac270.c b/arch/arm/mach-pxa/vpac270.c
index c9b747cedea8..37d6173bbb66 100644
--- a/arch/arm/mach-pxa/vpac270.c
+++ b/arch/arm/mach-pxa/vpac270.c
@@ -240,6 +240,7 @@ static void __init vpac270_onenand_init(void) {}
240#if defined(CONFIG_MMC_PXA) || defined(CONFIG_MMC_PXA_MODULE) 240#if defined(CONFIG_MMC_PXA) || defined(CONFIG_MMC_PXA_MODULE)
241static struct pxamci_platform_data vpac270_mci_platform_data = { 241static struct pxamci_platform_data vpac270_mci_platform_data = {
242 .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, 242 .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
243 .gpio_power = -1,
243 .gpio_card_detect = GPIO53_VPAC270_SD_DETECT_N, 244 .gpio_card_detect = GPIO53_VPAC270_SD_DETECT_N,
244 .gpio_card_ro = GPIO52_VPAC270_SD_READONLY, 245 .gpio_card_ro = GPIO52_VPAC270_SD_READONLY,
245 .detect_delay_ms = 200, 246 .detect_delay_ms = 200,
diff --git a/arch/arm/mach-s3c64xx/dev-spi.c b/arch/arm/mach-s3c64xx/dev-spi.c
index a492b982aa06..405e62128917 100644
--- a/arch/arm/mach-s3c64xx/dev-spi.c
+++ b/arch/arm/mach-s3c64xx/dev-spi.c
@@ -18,10 +18,11 @@
18#include <mach/map.h> 18#include <mach/map.h>
19#include <mach/gpio-bank-c.h> 19#include <mach/gpio-bank-c.h>
20#include <mach/spi-clocks.h> 20#include <mach/spi-clocks.h>
21#include <mach/irqs.h>
21 22
22#include <plat/s3c64xx-spi.h> 23#include <plat/s3c64xx-spi.h>
23#include <plat/gpio-cfg.h> 24#include <plat/gpio-cfg.h>
24#include <plat/irqs.h> 25#include <plat/devs.h>
25 26
26static char *spi_src_clks[] = { 27static char *spi_src_clks[] = {
27 [S3C64XX_SPI_SRCCLK_PCLK] = "pclk", 28 [S3C64XX_SPI_SRCCLK_PCLK] = "pclk",
diff --git a/arch/arm/mach-s3c64xx/mach-real6410.c b/arch/arm/mach-s3c64xx/mach-real6410.c
index 5c07d013b23d..e130379ba0e8 100644
--- a/arch/arm/mach-s3c64xx/mach-real6410.c
+++ b/arch/arm/mach-s3c64xx/mach-real6410.c
@@ -30,73 +30,73 @@
30#include <plat/devs.h> 30#include <plat/devs.h>
31#include <plat/regs-serial.h> 31#include <plat/regs-serial.h>
32 32
33#define UCON S3C2410_UCON_DEFAULT | S3C2410_UCON_UCLK 33#define UCON (S3C2410_UCON_DEFAULT | S3C2410_UCON_UCLK)
34#define ULCON S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB 34#define ULCON (S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB)
35#define UFCON S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE 35#define UFCON (S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE)
36 36
37static struct s3c2410_uartcfg real6410_uartcfgs[] __initdata = { 37static struct s3c2410_uartcfg real6410_uartcfgs[] __initdata = {
38 [0] = { 38 [0] = {
39 .hwport = 0, 39 .hwport = 0,
40 .flags = 0, 40 .flags = 0,
41 .ucon = UCON, 41 .ucon = UCON,
42 .ulcon = ULCON, 42 .ulcon = ULCON,
43 .ufcon = UFCON, 43 .ufcon = UFCON,
44 }, 44 },
45 [1] = { 45 [1] = {
46 .hwport = 1, 46 .hwport = 1,
47 .flags = 0, 47 .flags = 0,
48 .ucon = UCON, 48 .ucon = UCON,
49 .ulcon = ULCON, 49 .ulcon = ULCON,
50 .ufcon = UFCON, 50 .ufcon = UFCON,
51 }, 51 },
52 [2] = { 52 [2] = {
53 .hwport = 2, 53 .hwport = 2,
54 .flags = 0, 54 .flags = 0,
55 .ucon = UCON, 55 .ucon = UCON,
56 .ulcon = ULCON, 56 .ulcon = ULCON,
57 .ufcon = UFCON, 57 .ufcon = UFCON,
58 }, 58 },
59 [3] = { 59 [3] = {
60 .hwport = 3, 60 .hwport = 3,
61 .flags = 0, 61 .flags = 0,
62 .ucon = UCON, 62 .ucon = UCON,
63 .ulcon = ULCON, 63 .ulcon = ULCON,
64 .ufcon = UFCON, 64 .ufcon = UFCON,
65 }, 65 },
66}; 66};
67 67
68/* DM9000AEP 10/100 ethernet controller */ 68/* DM9000AEP 10/100 ethernet controller */
69 69
70static struct resource real6410_dm9k_resource[] = { 70static struct resource real6410_dm9k_resource[] = {
71 [0] = { 71 [0] = {
72 .start = S3C64XX_PA_XM0CSN1, 72 .start = S3C64XX_PA_XM0CSN1,
73 .end = S3C64XX_PA_XM0CSN1 + 1, 73 .end = S3C64XX_PA_XM0CSN1 + 1,
74 .flags = IORESOURCE_MEM 74 .flags = IORESOURCE_MEM
75 }, 75 },
76 [1] = { 76 [1] = {
77 .start = S3C64XX_PA_XM0CSN1 + 4, 77 .start = S3C64XX_PA_XM0CSN1 + 4,
78 .end = S3C64XX_PA_XM0CSN1 + 5, 78 .end = S3C64XX_PA_XM0CSN1 + 5,
79 .flags = IORESOURCE_MEM 79 .flags = IORESOURCE_MEM
80 }, 80 },
81 [2] = { 81 [2] = {
82 .start = S3C_EINT(7), 82 .start = S3C_EINT(7),
83 .end = S3C_EINT(7), 83 .end = S3C_EINT(7),
84 .flags = IORESOURCE_IRQ, 84 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL
85 } 85 }
86}; 86};
87 87
88static struct dm9000_plat_data real6410_dm9k_pdata = { 88static struct dm9000_plat_data real6410_dm9k_pdata = {
89 .flags = (DM9000_PLATF_16BITONLY | DM9000_PLATF_NO_EEPROM), 89 .flags = (DM9000_PLATF_16BITONLY | DM9000_PLATF_NO_EEPROM),
90}; 90};
91 91
92static struct platform_device real6410_device_eth = { 92static struct platform_device real6410_device_eth = {
93 .name = "dm9000", 93 .name = "dm9000",
94 .id = -1, 94 .id = -1,
95 .num_resources = ARRAY_SIZE(real6410_dm9k_resource), 95 .num_resources = ARRAY_SIZE(real6410_dm9k_resource),
96 .resource = real6410_dm9k_resource, 96 .resource = real6410_dm9k_resource,
97 .dev = { 97 .dev = {
98 .platform_data = &real6410_dm9k_pdata, 98 .platform_data = &real6410_dm9k_pdata,
99 }, 99 },
100}; 100};
101 101
102static struct platform_device *real6410_devices[] __initdata = { 102static struct platform_device *real6410_devices[] __initdata = {
@@ -129,12 +129,12 @@ static void __init real6410_machine_init(void)
129 /* set timing for nCS1 suitable for ethernet chip */ 129 /* set timing for nCS1 suitable for ethernet chip */
130 130
131 __raw_writel((0 << S3C64XX_SROM_BCX__PMC__SHIFT) | 131 __raw_writel((0 << S3C64XX_SROM_BCX__PMC__SHIFT) |
132 (6 << S3C64XX_SROM_BCX__TACP__SHIFT) | 132 (6 << S3C64XX_SROM_BCX__TACP__SHIFT) |
133 (4 << S3C64XX_SROM_BCX__TCAH__SHIFT) | 133 (4 << S3C64XX_SROM_BCX__TCAH__SHIFT) |
134 (1 << S3C64XX_SROM_BCX__TCOH__SHIFT) | 134 (1 << S3C64XX_SROM_BCX__TCOH__SHIFT) |
135 (13 << S3C64XX_SROM_BCX__TACC__SHIFT) | 135 (13 << S3C64XX_SROM_BCX__TACC__SHIFT) |
136 (4 << S3C64XX_SROM_BCX__TCOS__SHIFT) | 136 (4 << S3C64XX_SROM_BCX__TCOS__SHIFT) |
137 (0 << S3C64XX_SROM_BCX__TACS__SHIFT), S3C64XX_SROM_BC1); 137 (0 << S3C64XX_SROM_BCX__TACS__SHIFT), S3C64XX_SROM_BC1);
138 138
139 platform_add_devices(real6410_devices, ARRAY_SIZE(real6410_devices)); 139 platform_add_devices(real6410_devices, ARRAY_SIZE(real6410_devices));
140} 140}
diff --git a/arch/arm/mach-s5pv210/clock.c b/arch/arm/mach-s5pv210/clock.c
index af91fefef2c6..cfecd70657cb 100644
--- a/arch/arm/mach-s5pv210/clock.c
+++ b/arch/arm/mach-s5pv210/clock.c
@@ -281,6 +281,24 @@ static struct clk init_clocks_disable[] = {
281 .enable = s5pv210_clk_ip0_ctrl, 281 .enable = s5pv210_clk_ip0_ctrl,
282 .ctrlbit = (1<<29), 282 .ctrlbit = (1<<29),
283 }, { 283 }, {
284 .name = "fimc",
285 .id = 0,
286 .parent = &clk_hclk_dsys.clk,
287 .enable = s5pv210_clk_ip0_ctrl,
288 .ctrlbit = (1 << 24),
289 }, {
290 .name = "fimc",
291 .id = 1,
292 .parent = &clk_hclk_dsys.clk,
293 .enable = s5pv210_clk_ip0_ctrl,
294 .ctrlbit = (1 << 25),
295 }, {
296 .name = "fimc",
297 .id = 2,
298 .parent = &clk_hclk_dsys.clk,
299 .enable = s5pv210_clk_ip0_ctrl,
300 .ctrlbit = (1 << 26),
301 }, {
284 .name = "otg", 302 .name = "otg",
285 .id = -1, 303 .id = -1,
286 .parent = &clk_hclk_psys.clk, 304 .parent = &clk_hclk_psys.clk,
@@ -357,7 +375,7 @@ static struct clk init_clocks_disable[] = {
357 .id = 1, 375 .id = 1,
358 .parent = &clk_pclk_psys.clk, 376 .parent = &clk_pclk_psys.clk,
359 .enable = s5pv210_clk_ip3_ctrl, 377 .enable = s5pv210_clk_ip3_ctrl,
360 .ctrlbit = (1<<8), 378 .ctrlbit = (1 << 10),
361 }, { 379 }, {
362 .name = "i2c", 380 .name = "i2c",
363 .id = 2, 381 .id = 2,
diff --git a/arch/arm/mach-s5pv210/cpu.c b/arch/arm/mach-s5pv210/cpu.c
index b9f4d677cf55..77f456c91ad3 100644
--- a/arch/arm/mach-s5pv210/cpu.c
+++ b/arch/arm/mach-s5pv210/cpu.c
@@ -47,7 +47,7 @@ static struct map_desc s5pv210_iodesc[] __initdata = {
47 { 47 {
48 .virtual = (unsigned long)S5P_VA_SYSTIMER, 48 .virtual = (unsigned long)S5P_VA_SYSTIMER,
49 .pfn = __phys_to_pfn(S5PV210_PA_SYSTIMER), 49 .pfn = __phys_to_pfn(S5PV210_PA_SYSTIMER),
50 .length = SZ_1M, 50 .length = SZ_4K,
51 .type = MT_DEVICE, 51 .type = MT_DEVICE,
52 }, { 52 }, {
53 .virtual = (unsigned long)VA_VIC2, 53 .virtual = (unsigned long)VA_VIC2,
diff --git a/arch/arm/mach-shmobile/Makefile b/arch/arm/mach-shmobile/Makefile
index 5e16b4c69222..ae416fe7daf2 100644
--- a/arch/arm/mach-shmobile/Makefile
+++ b/arch/arm/mach-shmobile/Makefile
@@ -3,7 +3,7 @@
3# 3#
4 4
5# Common objects 5# Common objects
6obj-y := timer.o console.o clock.o 6obj-y := timer.o console.o clock.o pm_runtime.o
7 7
8# CPU objects 8# CPU objects
9obj-$(CONFIG_ARCH_SH7367) += setup-sh7367.o clock-sh7367.o intc-sh7367.o 9obj-$(CONFIG_ARCH_SH7367) += setup-sh7367.o clock-sh7367.o intc-sh7367.o
diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c
index 23d472f9525e..95935c83c306 100644
--- a/arch/arm/mach-shmobile/board-ap4evb.c
+++ b/arch/arm/mach-shmobile/board-ap4evb.c
@@ -25,6 +25,7 @@
25#include <linux/platform_device.h> 25#include <linux/platform_device.h>
26#include <linux/delay.h> 26#include <linux/delay.h>
27#include <linux/mfd/sh_mobile_sdhi.h> 27#include <linux/mfd/sh_mobile_sdhi.h>
28#include <linux/mfd/tmio.h>
28#include <linux/mmc/host.h> 29#include <linux/mmc/host.h>
29#include <linux/mtd/mtd.h> 30#include <linux/mtd/mtd.h>
30#include <linux/mtd/partitions.h> 31#include <linux/mtd/partitions.h>
@@ -39,6 +40,7 @@
39#include <linux/sh_clk.h> 40#include <linux/sh_clk.h>
40#include <linux/gpio.h> 41#include <linux/gpio.h>
41#include <linux/input.h> 42#include <linux/input.h>
43#include <linux/leds.h>
42#include <linux/input/sh_keysc.h> 44#include <linux/input/sh_keysc.h>
43#include <linux/usb/r8a66597.h> 45#include <linux/usb/r8a66597.h>
44 46
@@ -307,6 +309,7 @@ static struct sh_mobile_sdhi_info sdhi1_info = {
307 .dma_slave_tx = SHDMA_SLAVE_SDHI1_TX, 309 .dma_slave_tx = SHDMA_SLAVE_SDHI1_TX,
308 .dma_slave_rx = SHDMA_SLAVE_SDHI1_RX, 310 .dma_slave_rx = SHDMA_SLAVE_SDHI1_RX,
309 .tmio_ocr_mask = MMC_VDD_165_195, 311 .tmio_ocr_mask = MMC_VDD_165_195,
312 .tmio_flags = TMIO_MMC_WRPROTECT_DISABLE,
310}; 313};
311 314
312static struct resource sdhi1_resources[] = { 315static struct resource sdhi1_resources[] = {
@@ -558,7 +561,7 @@ static struct resource fsi_resources[] = {
558 561
559static struct platform_device fsi_device = { 562static struct platform_device fsi_device = {
560 .name = "sh_fsi2", 563 .name = "sh_fsi2",
561 .id = 0, 564 .id = -1,
562 .num_resources = ARRAY_SIZE(fsi_resources), 565 .num_resources = ARRAY_SIZE(fsi_resources),
563 .resource = fsi_resources, 566 .resource = fsi_resources,
564 .dev = { 567 .dev = {
@@ -650,7 +653,44 @@ static struct platform_device hdmi_device = {
650 }, 653 },
651}; 654};
652 655
656static struct gpio_led ap4evb_leds[] = {
657 {
658 .name = "led4",
659 .gpio = GPIO_PORT185,
660 .default_state = LEDS_GPIO_DEFSTATE_ON,
661 },
662 {
663 .name = "led2",
664 .gpio = GPIO_PORT186,
665 .default_state = LEDS_GPIO_DEFSTATE_ON,
666 },
667 {
668 .name = "led3",
669 .gpio = GPIO_PORT187,
670 .default_state = LEDS_GPIO_DEFSTATE_ON,
671 },
672 {
673 .name = "led1",
674 .gpio = GPIO_PORT188,
675 .default_state = LEDS_GPIO_DEFSTATE_ON,
676 }
677};
678
679static struct gpio_led_platform_data ap4evb_leds_pdata = {
680 .num_leds = ARRAY_SIZE(ap4evb_leds),
681 .leds = ap4evb_leds,
682};
683
684static struct platform_device leds_device = {
685 .name = "leds-gpio",
686 .id = 0,
687 .dev = {
688 .platform_data = &ap4evb_leds_pdata,
689 },
690};
691
653static struct platform_device *ap4evb_devices[] __initdata = { 692static struct platform_device *ap4evb_devices[] __initdata = {
693 &leds_device,
654 &nor_flash_device, 694 &nor_flash_device,
655 &smc911x_device, 695 &smc911x_device,
656 &sdhi0_device, 696 &sdhi0_device,
@@ -840,20 +880,6 @@ static void __init ap4evb_init(void)
840 gpio_request(GPIO_FN_CS5A, NULL); 880 gpio_request(GPIO_FN_CS5A, NULL);
841 gpio_request(GPIO_FN_IRQ6_39, NULL); 881 gpio_request(GPIO_FN_IRQ6_39, NULL);
842 882
843 /* enable LED 1 - 4 */
844 gpio_request(GPIO_PORT185, NULL);
845 gpio_request(GPIO_PORT186, NULL);
846 gpio_request(GPIO_PORT187, NULL);
847 gpio_request(GPIO_PORT188, NULL);
848 gpio_direction_output(GPIO_PORT185, 1);
849 gpio_direction_output(GPIO_PORT186, 1);
850 gpio_direction_output(GPIO_PORT187, 1);
851 gpio_direction_output(GPIO_PORT188, 1);
852 gpio_export(GPIO_PORT185, 0);
853 gpio_export(GPIO_PORT186, 0);
854 gpio_export(GPIO_PORT187, 0);
855 gpio_export(GPIO_PORT188, 0);
856
857 /* enable Debug switch (S6) */ 883 /* enable Debug switch (S6) */
858 gpio_request(GPIO_PORT32, NULL); 884 gpio_request(GPIO_PORT32, NULL);
859 gpio_request(GPIO_PORT33, NULL); 885 gpio_request(GPIO_PORT33, NULL);
diff --git a/arch/arm/mach-shmobile/clock-sh7372.c b/arch/arm/mach-shmobile/clock-sh7372.c
index fb4e9b1d788e..759468992ad2 100644
--- a/arch/arm/mach-shmobile/clock-sh7372.c
+++ b/arch/arm/mach-shmobile/clock-sh7372.c
@@ -286,7 +286,6 @@ static struct clk_ops pllc2_clk_ops = {
286 286
287struct clk pllc2_clk = { 287struct clk pllc2_clk = {
288 .ops = &pllc2_clk_ops, 288 .ops = &pllc2_clk_ops,
289 .flags = CLK_ENABLE_ON_INIT,
290 .parent = &extal1_div2_clk, 289 .parent = &extal1_div2_clk,
291 .freq_table = pllc2_freq_table, 290 .freq_table = pllc2_freq_table,
292 .parent_table = pllc2_parent, 291 .parent_table = pllc2_parent,
@@ -395,7 +394,7 @@ static struct clk div6_reparent_clks[DIV6_REPARENT_NR] = {
395 394
396enum { MSTP001, 395enum { MSTP001,
397 MSTP131, MSTP130, 396 MSTP131, MSTP130,
398 MSTP129, MSTP128, 397 MSTP129, MSTP128, MSTP127, MSTP126,
399 MSTP118, MSTP117, MSTP116, 398 MSTP118, MSTP117, MSTP116,
400 MSTP106, MSTP101, MSTP100, 399 MSTP106, MSTP101, MSTP100,
401 MSTP223, 400 MSTP223,
@@ -413,6 +412,8 @@ static struct clk mstp_clks[MSTP_NR] = {
413 [MSTP130] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 30, 0), /* VEU2 */ 412 [MSTP130] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 30, 0), /* VEU2 */
414 [MSTP129] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 29, 0), /* VEU1 */ 413 [MSTP129] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 29, 0), /* VEU1 */
415 [MSTP128] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 28, 0), /* VEU0 */ 414 [MSTP128] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 28, 0), /* VEU0 */
415 [MSTP127] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 27, 0), /* CEU */
416 [MSTP126] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 26, 0), /* CSI2 */
416 [MSTP118] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 18, 0), /* DSITX */ 417 [MSTP118] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 18, 0), /* DSITX */
417 [MSTP117] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 17, 0), /* LCDC1 */ 418 [MSTP117] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 17, 0), /* LCDC1 */
418 [MSTP116] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR1, 16, 0), /* IIC0 */ 419 [MSTP116] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR1, 16, 0), /* IIC0 */
@@ -428,7 +429,7 @@ static struct clk mstp_clks[MSTP_NR] = {
428 [MSTP201] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 1, 0), /* SCIFA3 */ 429 [MSTP201] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 1, 0), /* SCIFA3 */
429 [MSTP200] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 0, 0), /* SCIFA4 */ 430 [MSTP200] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 0, 0), /* SCIFA4 */
430 [MSTP329] = MSTP(&r_clk, SMSTPCR3, 29, 0), /* CMT10 */ 431 [MSTP329] = MSTP(&r_clk, SMSTPCR3, 29, 0), /* CMT10 */
431 [MSTP328] = MSTP(&div6_clks[DIV6_SPU], SMSTPCR3, 28, CLK_ENABLE_ON_INIT), /* FSIA */ 432 [MSTP328] = MSTP(&div6_clks[DIV6_SPU], SMSTPCR3, 28, 0), /* FSIA */
432 [MSTP323] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 23, 0), /* IIC1 */ 433 [MSTP323] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 23, 0), /* IIC1 */
433 [MSTP322] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 22, 0), /* USB0 */ 434 [MSTP322] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 22, 0), /* USB0 */
434 [MSTP314] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 14, 0), /* SDHI0 */ 435 [MSTP314] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 14, 0), /* SDHI0 */
@@ -498,6 +499,8 @@ static struct clk_lookup lookups[] = {
498 CLKDEV_DEV_ID("uio_pdrv_genirq.3", &mstp_clks[MSTP130]), /* VEU2 */ 499 CLKDEV_DEV_ID("uio_pdrv_genirq.3", &mstp_clks[MSTP130]), /* VEU2 */
499 CLKDEV_DEV_ID("uio_pdrv_genirq.2", &mstp_clks[MSTP129]), /* VEU1 */ 500 CLKDEV_DEV_ID("uio_pdrv_genirq.2", &mstp_clks[MSTP129]), /* VEU1 */
500 CLKDEV_DEV_ID("uio_pdrv_genirq.1", &mstp_clks[MSTP128]), /* VEU0 */ 501 CLKDEV_DEV_ID("uio_pdrv_genirq.1", &mstp_clks[MSTP128]), /* VEU0 */
502 CLKDEV_DEV_ID("sh_mobile_ceu.0", &mstp_clks[MSTP127]), /* CEU */
503 CLKDEV_DEV_ID("sh-mobile-csi2.0", &mstp_clks[MSTP126]), /* CSI2 */
501 CLKDEV_DEV_ID("sh-mipi-dsi.0", &mstp_clks[MSTP118]), /* DSITX */ 504 CLKDEV_DEV_ID("sh-mipi-dsi.0", &mstp_clks[MSTP118]), /* DSITX */
502 CLKDEV_DEV_ID("sh_mobile_lcdc_fb.1", &mstp_clks[MSTP117]), /* LCDC1 */ 505 CLKDEV_DEV_ID("sh_mobile_lcdc_fb.1", &mstp_clks[MSTP117]), /* LCDC1 */
503 CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* IIC0 */ 506 CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* IIC0 */
diff --git a/arch/arm/mach-shmobile/clock.c b/arch/arm/mach-shmobile/clock.c
index b7c705a213a2..6b7c7c42bc8f 100644
--- a/arch/arm/mach-shmobile/clock.c
+++ b/arch/arm/mach-shmobile/clock.c
@@ -1,8 +1,10 @@
1/* 1/*
2 * SH-Mobile Timer 2 * SH-Mobile Clock Framework
3 * 3 *
4 * Copyright (C) 2010 Magnus Damm 4 * Copyright (C) 2010 Magnus Damm
5 * 5 *
6 * Used together with arch/arm/common/clkdev.c and drivers/sh/clk.c.
7 *
6 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 9 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License. 10 * the Free Software Foundation; version 2 of the License.
diff --git a/arch/arm/mach-shmobile/pm_runtime.c b/arch/arm/mach-shmobile/pm_runtime.c
new file mode 100644
index 000000000000..94912d3944d3
--- /dev/null
+++ b/arch/arm/mach-shmobile/pm_runtime.c
@@ -0,0 +1,169 @@
1/*
2 * arch/arm/mach-shmobile/pm_runtime.c
3 *
4 * Runtime PM support code for SuperH Mobile ARM
5 *
6 * Copyright (C) 2009-2010 Magnus Damm
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12
13#include <linux/init.h>
14#include <linux/kernel.h>
15#include <linux/io.h>
16#include <linux/pm_runtime.h>
17#include <linux/platform_device.h>
18#include <linux/clk.h>
19#include <linux/sh_clk.h>
20#include <linux/bitmap.h>
21
22#ifdef CONFIG_PM_RUNTIME
23#define BIT_ONCE 0
24#define BIT_ACTIVE 1
25#define BIT_CLK_ENABLED 2
26
27struct pm_runtime_data {
28 unsigned long flags;
29 struct clk *clk;
30};
31
32static void __devres_release(struct device *dev, void *res)
33{
34 struct pm_runtime_data *prd = res;
35
36 dev_dbg(dev, "__devres_release()\n");
37
38 if (test_bit(BIT_CLK_ENABLED, &prd->flags))
39 clk_disable(prd->clk);
40
41 if (test_bit(BIT_ACTIVE, &prd->flags))
42 clk_put(prd->clk);
43}
44
45static struct pm_runtime_data *__to_prd(struct device *dev)
46{
47 return devres_find(dev, __devres_release, NULL, NULL);
48}
49
50static void platform_pm_runtime_init(struct device *dev,
51 struct pm_runtime_data *prd)
52{
53 if (prd && !test_and_set_bit(BIT_ONCE, &prd->flags)) {
54 prd->clk = clk_get(dev, NULL);
55 if (!IS_ERR(prd->clk)) {
56 set_bit(BIT_ACTIVE, &prd->flags);
57 dev_info(dev, "clocks managed by runtime pm\n");
58 }
59 }
60}
61
62static void platform_pm_runtime_bug(struct device *dev,
63 struct pm_runtime_data *prd)
64{
65 if (prd && !test_and_set_bit(BIT_ONCE, &prd->flags))
66 dev_err(dev, "runtime pm suspend before resume\n");
67}
68
69int platform_pm_runtime_suspend(struct device *dev)
70{
71 struct pm_runtime_data *prd = __to_prd(dev);
72
73 dev_dbg(dev, "platform_pm_runtime_suspend()\n");
74
75 platform_pm_runtime_bug(dev, prd);
76
77 if (prd && test_bit(BIT_ACTIVE, &prd->flags)) {
78 clk_disable(prd->clk);
79 clear_bit(BIT_CLK_ENABLED, &prd->flags);
80 }
81
82 return 0;
83}
84
85int platform_pm_runtime_resume(struct device *dev)
86{
87 struct pm_runtime_data *prd = __to_prd(dev);
88
89 dev_dbg(dev, "platform_pm_runtime_resume()\n");
90
91 platform_pm_runtime_init(dev, prd);
92
93 if (prd && test_bit(BIT_ACTIVE, &prd->flags)) {
94 clk_enable(prd->clk);
95 set_bit(BIT_CLK_ENABLED, &prd->flags);
96 }
97
98 return 0;
99}
100
101int platform_pm_runtime_idle(struct device *dev)
102{
103 /* suspend synchronously to disable clocks immediately */
104 return pm_runtime_suspend(dev);
105}
106
107static int platform_bus_notify(struct notifier_block *nb,
108 unsigned long action, void *data)
109{
110 struct device *dev = data;
111 struct pm_runtime_data *prd;
112
113 dev_dbg(dev, "platform_bus_notify() %ld !\n", action);
114
115 if (action == BUS_NOTIFY_BIND_DRIVER) {
116 prd = devres_alloc(__devres_release, sizeof(*prd), GFP_KERNEL);
117 if (prd)
118 devres_add(dev, prd);
119 else
120 dev_err(dev, "unable to alloc memory for runtime pm\n");
121 }
122
123 return 0;
124}
125
126#else /* CONFIG_PM_RUNTIME */
127
128static int platform_bus_notify(struct notifier_block *nb,
129 unsigned long action, void *data)
130{
131 struct device *dev = data;
132 struct clk *clk;
133
134 dev_dbg(dev, "platform_bus_notify() %ld !\n", action);
135
136 switch (action) {
137 case BUS_NOTIFY_BIND_DRIVER:
138 clk = clk_get(dev, NULL);
139 if (!IS_ERR(clk)) {
140 clk_enable(clk);
141 clk_put(clk);
142 dev_info(dev, "runtime pm disabled, clock forced on\n");
143 }
144 break;
145 case BUS_NOTIFY_UNBOUND_DRIVER:
146 clk = clk_get(dev, NULL);
147 if (!IS_ERR(clk)) {
148 clk_disable(clk);
149 clk_put(clk);
150 dev_info(dev, "runtime pm disabled, clock forced off\n");
151 }
152 break;
153 }
154
155 return 0;
156}
157
158#endif /* CONFIG_PM_RUNTIME */
159
160static struct notifier_block platform_bus_notifier = {
161 .notifier_call = platform_bus_notify
162};
163
164static int __init sh_pm_runtime_init(void)
165{
166 bus_register_notifier(&platform_bus_type, &platform_bus_notifier);
167 return 0;
168}
169core_initcall(sh_pm_runtime_init);
diff --git a/arch/arm/mach-u300/include/mach/gpio.h b/arch/arm/mach-u300/include/mach/gpio.h
index 7b1fc984abb6..d5a71abcbaea 100644
--- a/arch/arm/mach-u300/include/mach/gpio.h
+++ b/arch/arm/mach-u300/include/mach/gpio.h
@@ -273,6 +273,9 @@ extern void gpio_pullup(unsigned gpio, int value);
273extern int gpio_get_value(unsigned gpio); 273extern int gpio_get_value(unsigned gpio);
274extern void gpio_set_value(unsigned gpio, int value); 274extern void gpio_set_value(unsigned gpio, int value);
275 275
276#define gpio_get_value_cansleep gpio_get_value
277#define gpio_set_value_cansleep gpio_set_value
278
276/* wrappers to sleep-enable the previous two functions */ 279/* wrappers to sleep-enable the previous two functions */
277static inline unsigned gpio_to_irq(unsigned gpio) 280static inline unsigned gpio_to_irq(unsigned gpio)
278{ 281{
diff --git a/arch/arm/mach-vexpress/ct-ca9x4.c b/arch/arm/mach-vexpress/ct-ca9x4.c
index 577df6cccb08..efb127022d42 100644
--- a/arch/arm/mach-vexpress/ct-ca9x4.c
+++ b/arch/arm/mach-vexpress/ct-ca9x4.c
@@ -227,7 +227,13 @@ static void ct_ca9x4_init(void)
227 int i; 227 int i;
228 228
229#ifdef CONFIG_CACHE_L2X0 229#ifdef CONFIG_CACHE_L2X0
230 l2x0_init(MMIO_P2V(CT_CA9X4_L2CC), 0x00000000, 0xfe0fffff); 230 void __iomem *l2x0_base = MMIO_P2V(CT_CA9X4_L2CC);
231
232 /* set RAM latencies to 1 cycle for this core tile. */
233 writel(0, l2x0_base + L2X0_TAG_LATENCY_CTRL);
234 writel(0, l2x0_base + L2X0_DATA_LATENCY_CTRL);
235
236 l2x0_init(l2x0_base, 0x00400000, 0xfe0fffff);
231#endif 237#endif
232 238
233 clkdev_add_table(lookups, ARRAY_SIZE(lookups)); 239 clkdev_add_table(lookups, ARRAY_SIZE(lookups));
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 33c3f570aaa0..a0a2928ae4dd 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -398,7 +398,7 @@ config CPU_V6
398# ARMv6k 398# ARMv6k
399config CPU_32v6K 399config CPU_32v6K
400 bool "Support ARM V6K processor extensions" if !SMP 400 bool "Support ARM V6K processor extensions" if !SMP
401 depends on CPU_V6 401 depends on CPU_V6 || CPU_V7
402 default y if SMP && !(ARCH_MX3 || ARCH_OMAP2) 402 default y if SMP && !(ARCH_MX3 || ARCH_OMAP2)
403 help 403 help
404 Say Y here if your ARMv6 processor supports the 'K' extension. 404 Say Y here if your ARMv6 processor supports the 'K' extension.
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index d073b64ae87e..724ba3bce72c 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -885,8 +885,23 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
885 885
886 if (ai_usermode & UM_SIGNAL) 886 if (ai_usermode & UM_SIGNAL)
887 force_sig(SIGBUS, current); 887 force_sig(SIGBUS, current);
888 else 888 else {
889 set_cr(cr_no_alignment); 889 /*
890 * We're about to disable the alignment trap and return to
891 * user space. But if an interrupt occurs before actually
892 * reaching user space, then the IRQ vector entry code will
893 * notice that we were still in kernel space and therefore
894 * the alignment trap won't be re-enabled in that case as it
895 * is presumed to be always on from kernel space.
896 * Let's prevent that race by disabling interrupts here (they
897 * are disabled on the way back to user space anyway in
898 * entry-common.S) and disable the alignment trap only if
899 * there is no work pending for this thread.
900 */
901 raw_local_irq_disable();
902 if (!(current_thread_info()->flags & _TIF_WORK_MASK))
903 set_cr(cr_no_alignment);
904 }
890 905
891 return 0; 906 return 0;
892} 907}
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index c704eed63c5d..4bc43e535d3b 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -229,6 +229,8 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot)
229 } 229 }
230 } while (size -= PAGE_SIZE); 230 } while (size -= PAGE_SIZE);
231 231
232 dsb();
233
232 return (void *)c->vm_start; 234 return (void *)c->vm_start;
233 } 235 }
234 return NULL; 236 return NULL;
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 6e1c4f6a2b3f..6a3a2d0cd6db 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -15,6 +15,7 @@
15#include <linux/nodemask.h> 15#include <linux/nodemask.h>
16#include <linux/memblock.h> 16#include <linux/memblock.h>
17#include <linux/sort.h> 17#include <linux/sort.h>
18#include <linux/fs.h>
18 19
19#include <asm/cputype.h> 20#include <asm/cputype.h>
20#include <asm/sections.h> 21#include <asm/sections.h>
@@ -246,6 +247,9 @@ static struct mem_type mem_types[] = {
246 .domain = DOMAIN_USER, 247 .domain = DOMAIN_USER,
247 }, 248 },
248 [MT_MEMORY] = { 249 [MT_MEMORY] = {
250 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
251 L_PTE_USER | L_PTE_EXEC,
252 .prot_l1 = PMD_TYPE_TABLE,
249 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, 253 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
250 .domain = DOMAIN_KERNEL, 254 .domain = DOMAIN_KERNEL,
251 }, 255 },
@@ -254,6 +258,9 @@ static struct mem_type mem_types[] = {
254 .domain = DOMAIN_KERNEL, 258 .domain = DOMAIN_KERNEL,
255 }, 259 },
256 [MT_MEMORY_NONCACHED] = { 260 [MT_MEMORY_NONCACHED] = {
261 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
262 L_PTE_USER | L_PTE_EXEC | L_PTE_MT_BUFFERABLE,
263 .prot_l1 = PMD_TYPE_TABLE,
257 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, 264 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
258 .domain = DOMAIN_KERNEL, 265 .domain = DOMAIN_KERNEL,
259 }, 266 },
@@ -411,9 +418,12 @@ static void __init build_mem_type_table(void)
411 * Enable CPU-specific coherency if supported. 418 * Enable CPU-specific coherency if supported.
412 * (Only available on XSC3 at the moment.) 419 * (Only available on XSC3 at the moment.)
413 */ 420 */
414 if (arch_is_coherent() && cpu_is_xsc3()) 421 if (arch_is_coherent() && cpu_is_xsc3()) {
415 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; 422 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
416 423 mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
424 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
425 mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
426 }
417 /* 427 /*
418 * ARMv6 and above have extended page tables. 428 * ARMv6 and above have extended page tables.
419 */ 429 */
@@ -438,7 +448,9 @@ static void __init build_mem_type_table(void)
438 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S; 448 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
439 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED; 449 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
440 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; 450 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
451 mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
441 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; 452 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
453 mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
442#endif 454#endif
443 } 455 }
444 456
@@ -475,6 +487,8 @@ static void __init build_mem_type_table(void)
475 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; 487 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
476 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; 488 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
477 mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd; 489 mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
490 mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
491 mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
478 mem_types[MT_ROM].prot_sect |= cp->pmd; 492 mem_types[MT_ROM].prot_sect |= cp->pmd;
479 493
480 switch (cp->pmd) { 494 switch (cp->pmd) {
@@ -498,6 +512,19 @@ static void __init build_mem_type_table(void)
498 } 512 }
499} 513}
500 514
515#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
516pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
517 unsigned long size, pgprot_t vma_prot)
518{
519 if (!pfn_valid(pfn))
520 return pgprot_noncached(vma_prot);
521 else if (file->f_flags & O_SYNC)
522 return pgprot_writecombine(vma_prot);
523 return vma_prot;
524}
525EXPORT_SYMBOL(phys_mem_access_prot);
526#endif
527
501#define vectors_base() (vectors_high() ? 0xffff0000 : 0) 528#define vectors_base() (vectors_high() ? 0xffff0000 : 0)
502 529
503static void __init *early_alloc(unsigned long sz) 530static void __init *early_alloc(unsigned long sz)
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 6a8506d99ee9..7563ff0141bd 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -186,13 +186,14 @@ cpu_v7_name:
186 * It is assumed that: 186 * It is assumed that:
187 * - cache type register is implemented 187 * - cache type register is implemented
188 */ 188 */
189__v7_setup: 189__v7_ca9mp_setup:
190#ifdef CONFIG_SMP 190#ifdef CONFIG_SMP
191 mrc p15, 0, r0, c1, c0, 1 191 mrc p15, 0, r0, c1, c0, 1
192 tst r0, #(1 << 6) @ SMP/nAMP mode enabled? 192 tst r0, #(1 << 6) @ SMP/nAMP mode enabled?
193 orreq r0, r0, #(1 << 6) | (1 << 0) @ Enable SMP/nAMP mode and 193 orreq r0, r0, #(1 << 6) | (1 << 0) @ Enable SMP/nAMP mode and
194 mcreq p15, 0, r0, c1, c0, 1 @ TLB ops broadcasting 194 mcreq p15, 0, r0, c1, c0, 1 @ TLB ops broadcasting
195#endif 195#endif
196__v7_setup:
196 adr r12, __v7_setup_stack @ the local stack 197 adr r12, __v7_setup_stack @ the local stack
197 stmia r12, {r0-r5, r7, r9, r11, lr} 198 stmia r12, {r0-r5, r7, r9, r11, lr}
198 bl v7_flush_dcache_all 199 bl v7_flush_dcache_all
@@ -201,11 +202,16 @@ __v7_setup:
201 mrc p15, 0, r0, c0, c0, 0 @ read main ID register 202 mrc p15, 0, r0, c0, c0, 0 @ read main ID register
202 and r10, r0, #0xff000000 @ ARM? 203 and r10, r0, #0xff000000 @ ARM?
203 teq r10, #0x41000000 204 teq r10, #0x41000000
204 bne 2f 205 bne 3f
205 and r5, r0, #0x00f00000 @ variant 206 and r5, r0, #0x00f00000 @ variant
206 and r6, r0, #0x0000000f @ revision 207 and r6, r0, #0x0000000f @ revision
207 orr r0, r6, r5, lsr #20-4 @ combine variant and revision 208 orr r6, r6, r5, lsr #20-4 @ combine variant and revision
209 ubfx r0, r0, #4, #12 @ primary part number
208 210
211 /* Cortex-A8 Errata */
212 ldr r10, =0x00000c08 @ Cortex-A8 primary part number
213 teq r0, r10
214 bne 2f
209#ifdef CONFIG_ARM_ERRATA_430973 215#ifdef CONFIG_ARM_ERRATA_430973
210 teq r5, #0x00100000 @ only present in r1p* 216 teq r5, #0x00100000 @ only present in r1p*
211 mrceq p15, 0, r10, c1, c0, 1 @ read aux control register 217 mrceq p15, 0, r10, c1, c0, 1 @ read aux control register
@@ -213,21 +219,42 @@ __v7_setup:
213 mcreq p15, 0, r10, c1, c0, 1 @ write aux control register 219 mcreq p15, 0, r10, c1, c0, 1 @ write aux control register
214#endif 220#endif
215#ifdef CONFIG_ARM_ERRATA_458693 221#ifdef CONFIG_ARM_ERRATA_458693
216 teq r0, #0x20 @ only present in r2p0 222 teq r6, #0x20 @ only present in r2p0
217 mrceq p15, 0, r10, c1, c0, 1 @ read aux control register 223 mrceq p15, 0, r10, c1, c0, 1 @ read aux control register
218 orreq r10, r10, #(1 << 5) @ set L1NEON to 1 224 orreq r10, r10, #(1 << 5) @ set L1NEON to 1
219 orreq r10, r10, #(1 << 9) @ set PLDNOP to 1 225 orreq r10, r10, #(1 << 9) @ set PLDNOP to 1
220 mcreq p15, 0, r10, c1, c0, 1 @ write aux control register 226 mcreq p15, 0, r10, c1, c0, 1 @ write aux control register
221#endif 227#endif
222#ifdef CONFIG_ARM_ERRATA_460075 228#ifdef CONFIG_ARM_ERRATA_460075
223 teq r0, #0x20 @ only present in r2p0 229 teq r6, #0x20 @ only present in r2p0
224 mrceq p15, 1, r10, c9, c0, 2 @ read L2 cache aux ctrl register 230 mrceq p15, 1, r10, c9, c0, 2 @ read L2 cache aux ctrl register
225 tsteq r10, #1 << 22 231 tsteq r10, #1 << 22
226 orreq r10, r10, #(1 << 22) @ set the Write Allocate disable bit 232 orreq r10, r10, #(1 << 22) @ set the Write Allocate disable bit
227 mcreq p15, 1, r10, c9, c0, 2 @ write the L2 cache aux ctrl register 233 mcreq p15, 1, r10, c9, c0, 2 @ write the L2 cache aux ctrl register
228#endif 234#endif
235 b 3f
236
237 /* Cortex-A9 Errata */
2382: ldr r10, =0x00000c09 @ Cortex-A9 primary part number
239 teq r0, r10
240 bne 3f
241#ifdef CONFIG_ARM_ERRATA_742230
242 cmp r6, #0x22 @ only present up to r2p2
243 mrcle p15, 0, r10, c15, c0, 1 @ read diagnostic register
244 orrle r10, r10, #1 << 4 @ set bit #4
245 mcrle p15, 0, r10, c15, c0, 1 @ write diagnostic register
246#endif
247#ifdef CONFIG_ARM_ERRATA_742231
248 teq r6, #0x20 @ present in r2p0
249 teqne r6, #0x21 @ present in r2p1
250 teqne r6, #0x22 @ present in r2p2
251 mrceq p15, 0, r10, c15, c0, 1 @ read diagnostic register
252 orreq r10, r10, #1 << 12 @ set bit #12
253 orreq r10, r10, #1 << 22 @ set bit #22
254 mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register
255#endif
229 256
2302: mov r10, #0 2573: mov r10, #0
231#ifdef HARVARD_CACHE 258#ifdef HARVARD_CACHE
232 mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate 259 mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate
233#endif 260#endif
@@ -323,6 +350,29 @@ cpu_elf_name:
323 350
324 .section ".proc.info.init", #alloc, #execinstr 351 .section ".proc.info.init", #alloc, #execinstr
325 352
353 .type __v7_ca9mp_proc_info, #object
354__v7_ca9mp_proc_info:
355 .long 0x410fc090 @ Required ID value
356 .long 0xff0ffff0 @ Mask for ID
357 .long PMD_TYPE_SECT | \
358 PMD_SECT_AP_WRITE | \
359 PMD_SECT_AP_READ | \
360 PMD_FLAGS
361 .long PMD_TYPE_SECT | \
362 PMD_SECT_XN | \
363 PMD_SECT_AP_WRITE | \
364 PMD_SECT_AP_READ
365 b __v7_ca9mp_setup
366 .long cpu_arch_name
367 .long cpu_elf_name
368 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
369 .long cpu_v7_name
370 .long v7_processor_functions
371 .long v7wbi_tlb_fns
372 .long v6_user_fns
373 .long v7_cache_fns
374 .size __v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info
375
326 /* 376 /*
327 * Match any ARMv7 processor core. 377 * Match any ARMv7 processor core.
328 */ 378 */
diff --git a/arch/arm/oprofile/common.c b/arch/arm/oprofile/common.c
index 0691176899ff..72e09eb642dd 100644
--- a/arch/arm/oprofile/common.c
+++ b/arch/arm/oprofile/common.c
@@ -102,6 +102,7 @@ static int op_create_counter(int cpu, int event)
102 if (IS_ERR(pevent)) { 102 if (IS_ERR(pevent)) {
103 ret = PTR_ERR(pevent); 103 ret = PTR_ERR(pevent);
104 } else if (pevent->state != PERF_EVENT_STATE_ACTIVE) { 104 } else if (pevent->state != PERF_EVENT_STATE_ACTIVE) {
105 perf_event_release_kernel(pevent);
105 pr_warning("oprofile: failed to enable event %d " 106 pr_warning("oprofile: failed to enable event %d "
106 "on CPU %d\n", event, cpu); 107 "on CPU %d\n", event, cpu);
107 ret = -EBUSY; 108 ret = -EBUSY;
@@ -365,6 +366,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
365 ret = init_driverfs(); 366 ret = init_driverfs();
366 if (ret) { 367 if (ret) {
367 kfree(counter_config); 368 kfree(counter_config);
369 counter_config = NULL;
368 return ret; 370 return ret;
369 } 371 }
370 372
@@ -402,7 +404,6 @@ void oprofile_arch_exit(void)
402 struct perf_event *event; 404 struct perf_event *event;
403 405
404 if (*perf_events) { 406 if (*perf_events) {
405 exit_driverfs();
406 for_each_possible_cpu(cpu) { 407 for_each_possible_cpu(cpu) {
407 for (id = 0; id < perf_num_counters; ++id) { 408 for (id = 0; id < perf_num_counters; ++id) {
408 event = perf_events[cpu][id]; 409 event = perf_events[cpu][id];
@@ -413,8 +414,10 @@ void oprofile_arch_exit(void)
413 } 414 }
414 } 415 }
415 416
416 if (counter_config) 417 if (counter_config) {
417 kfree(counter_config); 418 kfree(counter_config);
419 exit_driverfs();
420 }
418} 421}
419#else 422#else
420int __init oprofile_arch_init(struct oprofile_operations *ops) 423int __init oprofile_arch_init(struct oprofile_operations *ops)
diff --git a/arch/arm/plat-mxc/Kconfig b/arch/arm/plat-mxc/Kconfig
index 0527e65318f4..6785db4179b8 100644
--- a/arch/arm/plat-mxc/Kconfig
+++ b/arch/arm/plat-mxc/Kconfig
@@ -43,6 +43,7 @@ config ARCH_MXC91231
43config ARCH_MX5 43config ARCH_MX5
44 bool "MX5-based" 44 bool "MX5-based"
45 select CPU_V7 45 select CPU_V7
46 select ARM_L1_CACHE_SHIFT_6
46 help 47 help
47 This enables support for systems based on the Freescale i.MX51 family 48 This enables support for systems based on the Freescale i.MX51 family
48 49
diff --git a/arch/arm/plat-mxc/include/mach/eukrea-baseboards.h b/arch/arm/plat-mxc/include/mach/eukrea-baseboards.h
index 634e3f4c454d..656acb45d434 100644
--- a/arch/arm/plat-mxc/include/mach/eukrea-baseboards.h
+++ b/arch/arm/plat-mxc/include/mach/eukrea-baseboards.h
@@ -37,9 +37,9 @@
37 * mach-mx5/eukrea_mbimx51-baseboard.c for cpuimx51 37 * mach-mx5/eukrea_mbimx51-baseboard.c for cpuimx51
38 */ 38 */
39 39
40extern void eukrea_mbimx25_baseboard_init(void); 40extern void eukrea_mbimxsd25_baseboard_init(void);
41extern void eukrea_mbimx27_baseboard_init(void); 41extern void eukrea_mbimx27_baseboard_init(void);
42extern void eukrea_mbimx35_baseboard_init(void); 42extern void eukrea_mbimxsd35_baseboard_init(void);
43extern void eukrea_mbimx51_baseboard_init(void); 43extern void eukrea_mbimx51_baseboard_init(void);
44 44
45#endif 45#endif
diff --git a/arch/arm/plat-mxc/tzic.c b/arch/arm/plat-mxc/tzic.c
index b3da9aad4295..3703ab28257f 100644
--- a/arch/arm/plat-mxc/tzic.c
+++ b/arch/arm/plat-mxc/tzic.c
@@ -164,8 +164,9 @@ int tzic_enable_wake(int is_idle)
164 return -EAGAIN; 164 return -EAGAIN;
165 165
166 for (i = 0; i < 4; i++) { 166 for (i = 0; i < 4; i++) {
167 v = is_idle ? __raw_readl(TZIC_ENSET0(i)) : wakeup_intr[i]; 167 v = is_idle ? __raw_readl(tzic_base + TZIC_ENSET0(i)) :
168 __raw_writel(v, TZIC_WAKEUP0(i)); 168 wakeup_intr[i];
169 __raw_writel(v, tzic_base + TZIC_WAKEUP0(i));
169 } 170 }
170 171
171 return 0; 172 return 0;
diff --git a/arch/arm/plat-nomadik/timer.c b/arch/arm/plat-nomadik/timer.c
index ea3ca86c5283..aedf9c1d645e 100644
--- a/arch/arm/plat-nomadik/timer.c
+++ b/arch/arm/plat-nomadik/timer.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/arch/arm/mach-nomadik/timer.c 2 * linux/arch/arm/plat-nomadik/timer.c
3 * 3 *
4 * Copyright (C) 2008 STMicroelectronics 4 * Copyright (C) 2008 STMicroelectronics
5 * Copyright (C) 2010 Alessandro Rubini 5 * Copyright (C) 2010 Alessandro Rubini
@@ -75,7 +75,7 @@ static void nmdk_clkevt_mode(enum clock_event_mode mode,
75 cr = readl(mtu_base + MTU_CR(1)); 75 cr = readl(mtu_base + MTU_CR(1));
76 writel(0, mtu_base + MTU_LR(1)); 76 writel(0, mtu_base + MTU_LR(1));
77 writel(cr | MTU_CRn_ENA, mtu_base + MTU_CR(1)); 77 writel(cr | MTU_CRn_ENA, mtu_base + MTU_CR(1));
78 writel(0x2, mtu_base + MTU_IMSC); 78 writel(1 << 1, mtu_base + MTU_IMSC);
79 break; 79 break;
80 case CLOCK_EVT_MODE_SHUTDOWN: 80 case CLOCK_EVT_MODE_SHUTDOWN:
81 case CLOCK_EVT_MODE_UNUSED: 81 case CLOCK_EVT_MODE_UNUSED:
@@ -131,25 +131,23 @@ void __init nmdk_timer_init(void)
131{ 131{
132 unsigned long rate; 132 unsigned long rate;
133 struct clk *clk0; 133 struct clk *clk0;
134 struct clk *clk1; 134 u32 cr = MTU_CRn_32BITS;
135 u32 cr;
136 135
137 clk0 = clk_get_sys("mtu0", NULL); 136 clk0 = clk_get_sys("mtu0", NULL);
138 BUG_ON(IS_ERR(clk0)); 137 BUG_ON(IS_ERR(clk0));
139 138
140 clk1 = clk_get_sys("mtu1", NULL);
141 BUG_ON(IS_ERR(clk1));
142
143 clk_enable(clk0); 139 clk_enable(clk0);
144 clk_enable(clk1);
145 140
146 /* 141 /*
147 * Tick rate is 2.4MHz for Nomadik and 110MHz for ux500: 142 * Tick rate is 2.4MHz for Nomadik and 2.4Mhz, 100MHz or 133 MHz
148 * use a divide-by-16 counter if it's more than 16MHz 143 * for ux500.
144 * Use a divide-by-16 counter if the tick rate is more than 32MHz.
145 * At 32 MHz, the timer (with 32 bit counter) can be programmed
146 * to wake-up at a max 127s a head in time. Dividing a 2.4 MHz timer
147 * with 16 gives too low timer resolution.
149 */ 148 */
150 cr = MTU_CRn_32BITS;;
151 rate = clk_get_rate(clk0); 149 rate = clk_get_rate(clk0);
152 if (rate > 16 << 20) { 150 if (rate > 32000000) {
153 rate /= 16; 151 rate /= 16;
154 cr |= MTU_CRn_PRESCALE_16; 152 cr |= MTU_CRn_PRESCALE_16;
155 } else { 153 } else {
@@ -170,15 +168,8 @@ void __init nmdk_timer_init(void)
170 pr_err("timer: failed to initialize clock source %s\n", 168 pr_err("timer: failed to initialize clock source %s\n",
171 nmdk_clksrc.name); 169 nmdk_clksrc.name);
172 170
173 /* Timer 1 is used for events, fix according to rate */ 171 /* Timer 1 is used for events */
174 cr = MTU_CRn_32BITS; 172
175 rate = clk_get_rate(clk1);
176 if (rate > 16 << 20) {
177 rate /= 16;
178 cr |= MTU_CRn_PRESCALE_16;
179 } else {
180 cr |= MTU_CRn_PRESCALE_1;
181 }
182 clockevents_calc_mult_shift(&nmdk_clkevt, rate, MTU_MIN_RANGE); 173 clockevents_calc_mult_shift(&nmdk_clkevt, rate, MTU_MIN_RANGE);
183 174
184 writel(cr | MTU_CRn_ONESHOT, mtu_base + MTU_CR(1)); /* off, currently */ 175 writel(cr | MTU_CRn_ONESHOT, mtu_base + MTU_CR(1)); /* off, currently */
diff --git a/arch/arm/plat-omap/Kconfig b/arch/arm/plat-omap/Kconfig
index e39a417a368d..a92cb499313f 100644
--- a/arch/arm/plat-omap/Kconfig
+++ b/arch/arm/plat-omap/Kconfig
@@ -33,7 +33,7 @@ config OMAP_DEBUG_DEVICES
33config OMAP_DEBUG_LEDS 33config OMAP_DEBUG_LEDS
34 bool 34 bool
35 depends on OMAP_DEBUG_DEVICES 35 depends on OMAP_DEBUG_DEVICES
36 default y if LEDS 36 default y if LEDS_CLASS
37 37
38config OMAP_RESET_CLOCKS 38config OMAP_RESET_CLOCKS
39 bool "Reset unused clocks during boot" 39 bool "Reset unused clocks during boot"
diff --git a/arch/arm/plat-omap/mcbsp.c b/arch/arm/plat-omap/mcbsp.c
index e31496e35b0f..0c8612fd8312 100644
--- a/arch/arm/plat-omap/mcbsp.c
+++ b/arch/arm/plat-omap/mcbsp.c
@@ -156,7 +156,7 @@ static irqreturn_t omap_mcbsp_rx_irq_handler(int irq, void *dev_id)
156 /* Writing zero to RSYNC_ERR clears the IRQ */ 156 /* Writing zero to RSYNC_ERR clears the IRQ */
157 MCBSP_WRITE(mcbsp_rx, SPCR1, MCBSP_READ_CACHE(mcbsp_rx, SPCR1)); 157 MCBSP_WRITE(mcbsp_rx, SPCR1, MCBSP_READ_CACHE(mcbsp_rx, SPCR1));
158 } else { 158 } else {
159 complete(&mcbsp_rx->tx_irq_completion); 159 complete(&mcbsp_rx->rx_irq_completion);
160 } 160 }
161 161
162 return IRQ_HANDLED; 162 return IRQ_HANDLED;
diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
index 226b2e858d6c..10b3b4c63372 100644
--- a/arch/arm/plat-omap/sram.c
+++ b/arch/arm/plat-omap/sram.c
@@ -220,20 +220,7 @@ void __init omap_map_sram(void)
220 if (omap_sram_size == 0) 220 if (omap_sram_size == 0)
221 return; 221 return;
222 222
223 if (cpu_is_omap24xx()) {
224 omap_sram_io_desc[0].virtual = OMAP2_SRAM_VA;
225
226 base = OMAP2_SRAM_PA;
227 base = ROUND_DOWN(base, PAGE_SIZE);
228 omap_sram_io_desc[0].pfn = __phys_to_pfn(base);
229 }
230
231 if (cpu_is_omap34xx()) { 223 if (cpu_is_omap34xx()) {
232 omap_sram_io_desc[0].virtual = OMAP3_SRAM_VA;
233 base = OMAP3_SRAM_PA;
234 base = ROUND_DOWN(base, PAGE_SIZE);
235 omap_sram_io_desc[0].pfn = __phys_to_pfn(base);
236
237 /* 224 /*
238 * SRAM must be marked as non-cached on OMAP3 since the 225 * SRAM must be marked as non-cached on OMAP3 since the
239 * CORE DPLL M2 divider change code (in SRAM) runs with the 226 * CORE DPLL M2 divider change code (in SRAM) runs with the
@@ -244,13 +231,11 @@ void __init omap_map_sram(void)
244 omap_sram_io_desc[0].type = MT_MEMORY_NONCACHED; 231 omap_sram_io_desc[0].type = MT_MEMORY_NONCACHED;
245 } 232 }
246 233
247 if (cpu_is_omap44xx()) { 234 omap_sram_io_desc[0].virtual = omap_sram_base;
248 omap_sram_io_desc[0].virtual = OMAP4_SRAM_VA; 235 base = omap_sram_start;
249 base = OMAP4_SRAM_PA; 236 base = ROUND_DOWN(base, PAGE_SIZE);
250 base = ROUND_DOWN(base, PAGE_SIZE); 237 omap_sram_io_desc[0].pfn = __phys_to_pfn(base);
251 omap_sram_io_desc[0].pfn = __phys_to_pfn(base); 238 omap_sram_io_desc[0].length = ROUND_DOWN(omap_sram_size, PAGE_SIZE);
252 }
253 omap_sram_io_desc[0].length = 1024 * 1024; /* Use section desc */
254 iotable_init(omap_sram_io_desc, ARRAY_SIZE(omap_sram_io_desc)); 239 iotable_init(omap_sram_io_desc, ARRAY_SIZE(omap_sram_io_desc));
255 240
256 printk(KERN_INFO "SRAM: Mapped pa 0x%08lx to va 0x%08lx size: 0x%lx\n", 241 printk(KERN_INFO "SRAM: Mapped pa 0x%08lx to va 0x%08lx size: 0x%lx\n",
diff --git a/arch/arm/plat-pxa/pwm.c b/arch/arm/plat-pxa/pwm.c
index 0732c6c8d511..ef32686feef9 100644
--- a/arch/arm/plat-pxa/pwm.c
+++ b/arch/arm/plat-pxa/pwm.c
@@ -176,7 +176,7 @@ static inline void __add_pwm(struct pwm_device *pwm)
176 176
177static int __devinit pwm_probe(struct platform_device *pdev) 177static int __devinit pwm_probe(struct platform_device *pdev)
178{ 178{
179 struct platform_device_id *id = platform_get_device_id(pdev); 179 const struct platform_device_id *id = platform_get_device_id(pdev);
180 struct pwm_device *pwm, *secondary = NULL; 180 struct pwm_device *pwm, *secondary = NULL;
181 struct resource *r; 181 struct resource *r;
182 int ret = 0; 182 int ret = 0;
diff --git a/arch/arm/plat-s5p/dev-fimc0.c b/arch/arm/plat-s5p/dev-fimc0.c
index d3f1a9b5d2b5..608770fc1531 100644
--- a/arch/arm/plat-s5p/dev-fimc0.c
+++ b/arch/arm/plat-s5p/dev-fimc0.c
@@ -10,6 +10,7 @@
10 */ 10 */
11 11
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/dma-mapping.h>
13#include <linux/platform_device.h> 14#include <linux/platform_device.h>
14#include <linux/interrupt.h> 15#include <linux/interrupt.h>
15#include <linux/ioport.h> 16#include <linux/ioport.h>
@@ -18,7 +19,7 @@
18static struct resource s5p_fimc0_resource[] = { 19static struct resource s5p_fimc0_resource[] = {
19 [0] = { 20 [0] = {
20 .start = S5P_PA_FIMC0, 21 .start = S5P_PA_FIMC0,
21 .end = S5P_PA_FIMC0 + SZ_1M - 1, 22 .end = S5P_PA_FIMC0 + SZ_4K - 1,
22 .flags = IORESOURCE_MEM, 23 .flags = IORESOURCE_MEM,
23 }, 24 },
24 [1] = { 25 [1] = {
@@ -28,9 +29,15 @@ static struct resource s5p_fimc0_resource[] = {
28 }, 29 },
29}; 30};
30 31
32static u64 s5p_fimc0_dma_mask = DMA_BIT_MASK(32);
33
31struct platform_device s5p_device_fimc0 = { 34struct platform_device s5p_device_fimc0 = {
32 .name = "s5p-fimc", 35 .name = "s5p-fimc",
33 .id = 0, 36 .id = 0,
34 .num_resources = ARRAY_SIZE(s5p_fimc0_resource), 37 .num_resources = ARRAY_SIZE(s5p_fimc0_resource),
35 .resource = s5p_fimc0_resource, 38 .resource = s5p_fimc0_resource,
39 .dev = {
40 .dma_mask = &s5p_fimc0_dma_mask,
41 .coherent_dma_mask = DMA_BIT_MASK(32),
42 },
36}; 43};
diff --git a/arch/arm/plat-s5p/dev-fimc1.c b/arch/arm/plat-s5p/dev-fimc1.c
index 41bd6986d0ad..76e3a97a87d3 100644
--- a/arch/arm/plat-s5p/dev-fimc1.c
+++ b/arch/arm/plat-s5p/dev-fimc1.c
@@ -10,6 +10,7 @@
10 */ 10 */
11 11
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/dma-mapping.h>
13#include <linux/platform_device.h> 14#include <linux/platform_device.h>
14#include <linux/interrupt.h> 15#include <linux/interrupt.h>
15#include <linux/ioport.h> 16#include <linux/ioport.h>
@@ -18,7 +19,7 @@
18static struct resource s5p_fimc1_resource[] = { 19static struct resource s5p_fimc1_resource[] = {
19 [0] = { 20 [0] = {
20 .start = S5P_PA_FIMC1, 21 .start = S5P_PA_FIMC1,
21 .end = S5P_PA_FIMC1 + SZ_1M - 1, 22 .end = S5P_PA_FIMC1 + SZ_4K - 1,
22 .flags = IORESOURCE_MEM, 23 .flags = IORESOURCE_MEM,
23 }, 24 },
24 [1] = { 25 [1] = {
@@ -28,9 +29,15 @@ static struct resource s5p_fimc1_resource[] = {
28 }, 29 },
29}; 30};
30 31
32static u64 s5p_fimc1_dma_mask = DMA_BIT_MASK(32);
33
31struct platform_device s5p_device_fimc1 = { 34struct platform_device s5p_device_fimc1 = {
32 .name = "s5p-fimc", 35 .name = "s5p-fimc",
33 .id = 1, 36 .id = 1,
34 .num_resources = ARRAY_SIZE(s5p_fimc1_resource), 37 .num_resources = ARRAY_SIZE(s5p_fimc1_resource),
35 .resource = s5p_fimc1_resource, 38 .resource = s5p_fimc1_resource,
39 .dev = {
40 .dma_mask = &s5p_fimc1_dma_mask,
41 .coherent_dma_mask = DMA_BIT_MASK(32),
42 },
36}; 43};
diff --git a/arch/arm/plat-s5p/dev-fimc2.c b/arch/arm/plat-s5p/dev-fimc2.c
index dfddeda6d4a3..24d29816fa2c 100644
--- a/arch/arm/plat-s5p/dev-fimc2.c
+++ b/arch/arm/plat-s5p/dev-fimc2.c
@@ -10,6 +10,7 @@
10 */ 10 */
11 11
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/dma-mapping.h>
13#include <linux/platform_device.h> 14#include <linux/platform_device.h>
14#include <linux/interrupt.h> 15#include <linux/interrupt.h>
15#include <linux/ioport.h> 16#include <linux/ioport.h>
@@ -18,7 +19,7 @@
18static struct resource s5p_fimc2_resource[] = { 19static struct resource s5p_fimc2_resource[] = {
19 [0] = { 20 [0] = {
20 .start = S5P_PA_FIMC2, 21 .start = S5P_PA_FIMC2,
21 .end = S5P_PA_FIMC2 + SZ_1M - 1, 22 .end = S5P_PA_FIMC2 + SZ_4K - 1,
22 .flags = IORESOURCE_MEM, 23 .flags = IORESOURCE_MEM,
23 }, 24 },
24 [1] = { 25 [1] = {
@@ -28,9 +29,15 @@ static struct resource s5p_fimc2_resource[] = {
28 }, 29 },
29}; 30};
30 31
32static u64 s5p_fimc2_dma_mask = DMA_BIT_MASK(32);
33
31struct platform_device s5p_device_fimc2 = { 34struct platform_device s5p_device_fimc2 = {
32 .name = "s5p-fimc", 35 .name = "s5p-fimc",
33 .id = 2, 36 .id = 2,
34 .num_resources = ARRAY_SIZE(s5p_fimc2_resource), 37 .num_resources = ARRAY_SIZE(s5p_fimc2_resource),
35 .resource = s5p_fimc2_resource, 38 .resource = s5p_fimc2_resource,
39 .dev = {
40 .dma_mask = &s5p_fimc2_dma_mask,
41 .coherent_dma_mask = DMA_BIT_MASK(32),
42 },
36}; 43};
diff --git a/arch/arm/plat-samsung/gpio-config.c b/arch/arm/plat-samsung/gpio-config.c
index 57b68a50f45e..e3d41eaed1ff 100644
--- a/arch/arm/plat-samsung/gpio-config.c
+++ b/arch/arm/plat-samsung/gpio-config.c
@@ -273,13 +273,13 @@ s5p_gpio_drvstr_t s5p_gpio_get_drvstr(unsigned int pin)
273 if (!chip) 273 if (!chip)
274 return -EINVAL; 274 return -EINVAL;
275 275
276 off = chip->chip.base - pin; 276 off = pin - chip->chip.base;
277 shift = off * 2; 277 shift = off * 2;
278 reg = chip->base + 0x0C; 278 reg = chip->base + 0x0C;
279 279
280 drvstr = __raw_readl(reg); 280 drvstr = __raw_readl(reg);
281 drvstr = 0xffff & (0x3 << shift);
282 drvstr = drvstr >> shift; 281 drvstr = drvstr >> shift;
282 drvstr &= 0x3;
283 283
284 return (__force s5p_gpio_drvstr_t)drvstr; 284 return (__force s5p_gpio_drvstr_t)drvstr;
285} 285}
@@ -296,11 +296,12 @@ int s5p_gpio_set_drvstr(unsigned int pin, s5p_gpio_drvstr_t drvstr)
296 if (!chip) 296 if (!chip)
297 return -EINVAL; 297 return -EINVAL;
298 298
299 off = chip->chip.base - pin; 299 off = pin - chip->chip.base;
300 shift = off * 2; 300 shift = off * 2;
301 reg = chip->base + 0x0C; 301 reg = chip->base + 0x0C;
302 302
303 tmp = __raw_readl(reg); 303 tmp = __raw_readl(reg);
304 tmp &= ~(0x3 << shift);
304 tmp |= drvstr << shift; 305 tmp |= drvstr << shift;
305 306
306 __raw_writel(tmp, reg); 307 __raw_writel(tmp, reg);
diff --git a/arch/arm/plat-samsung/include/plat/gpio-cfg.h b/arch/arm/plat-samsung/include/plat/gpio-cfg.h
index db4112c6f2be..1c6b92947c5d 100644
--- a/arch/arm/plat-samsung/include/plat/gpio-cfg.h
+++ b/arch/arm/plat-samsung/include/plat/gpio-cfg.h
@@ -143,12 +143,12 @@ extern s3c_gpio_pull_t s3c_gpio_getpull(unsigned int pin);
143/* Define values for the drvstr available for each gpio pin. 143/* Define values for the drvstr available for each gpio pin.
144 * 144 *
145 * These values control the value of the output signal driver strength, 145 * These values control the value of the output signal driver strength,
146 * configurable on most pins on the S5C series. 146 * configurable on most pins on the S5P series.
147 */ 147 */
148#define S5P_GPIO_DRVSTR_LV1 ((__force s5p_gpio_drvstr_t)0x00) 148#define S5P_GPIO_DRVSTR_LV1 ((__force s5p_gpio_drvstr_t)0x0)
149#define S5P_GPIO_DRVSTR_LV2 ((__force s5p_gpio_drvstr_t)0x01) 149#define S5P_GPIO_DRVSTR_LV2 ((__force s5p_gpio_drvstr_t)0x2)
150#define S5P_GPIO_DRVSTR_LV3 ((__force s5p_gpio_drvstr_t)0x10) 150#define S5P_GPIO_DRVSTR_LV3 ((__force s5p_gpio_drvstr_t)0x1)
151#define S5P_GPIO_DRVSTR_LV4 ((__force s5p_gpio_drvstr_t)0x11) 151#define S5P_GPIO_DRVSTR_LV4 ((__force s5p_gpio_drvstr_t)0x3)
152 152
153/** 153/**
154 * s5c_gpio_get_drvstr() - get the driver streght value of a gpio pin 154 * s5c_gpio_get_drvstr() - get the driver streght value of a gpio pin
diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types
index 48cbdcb6bbd4..55590a4d87c9 100644
--- a/arch/arm/tools/mach-types
+++ b/arch/arm/tools/mach-types
@@ -12,7 +12,7 @@
12# 12#
13# http://www.arm.linux.org.uk/developer/machines/?action=new 13# http://www.arm.linux.org.uk/developer/machines/?action=new
14# 14#
15# Last update: Mon Jul 12 21:10:14 2010 15# Last update: Thu Sep 9 22:43:01 2010
16# 16#
17# machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number 17# machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number
18# 18#
@@ -2622,7 +2622,7 @@ kraken MACH_KRAKEN KRAKEN 2634
2622gw2388 MACH_GW2388 GW2388 2635 2622gw2388 MACH_GW2388 GW2388 2635
2623jadecpu MACH_JADECPU JADECPU 2636 2623jadecpu MACH_JADECPU JADECPU 2636
2624carlisle MACH_CARLISLE CARLISLE 2637 2624carlisle MACH_CARLISLE CARLISLE 2637
2625lux_sf9 MACH_LUX_SFT9 LUX_SFT9 2638 2625lux_sf9 MACH_LUX_SF9 LUX_SF9 2638
2626nemid_tb MACH_NEMID_TB NEMID_TB 2639 2626nemid_tb MACH_NEMID_TB NEMID_TB 2639
2627terrier MACH_TERRIER TERRIER 2640 2627terrier MACH_TERRIER TERRIER 2640
2628turbot MACH_TURBOT TURBOT 2641 2628turbot MACH_TURBOT TURBOT 2641
@@ -2950,3 +2950,97 @@ davinci_dm365_dvr MACH_DAVINCI_DM365_DVR DAVINCI_DM365_DVR 2963
2950netviz MACH_NETVIZ NETVIZ 2964 2950netviz MACH_NETVIZ NETVIZ 2964
2951flexibity MACH_FLEXIBITY FLEXIBITY 2965 2951flexibity MACH_FLEXIBITY FLEXIBITY 2965
2952wlan_computer MACH_WLAN_COMPUTER WLAN_COMPUTER 2966 2952wlan_computer MACH_WLAN_COMPUTER WLAN_COMPUTER 2966
2953lpc24xx MACH_LPC24XX LPC24XX 2967
2954spica MACH_SPICA SPICA 2968
2955gpsdisplay MACH_GPSDISPLAY GPSDISPLAY 2969
2956bipnet MACH_BIPNET BIPNET 2970
2957overo_ctu_inertial MACH_OVERO_CTU_INERTIAL OVERO_CTU_INERTIAL 2971
2958davinci_dm355_mmm MACH_DAVINCI_DM355_MMM DAVINCI_DM355_MMM 2972
2959pc9260_v2 MACH_PC9260_V2 PC9260_V2 2973
2960ptx7545 MACH_PTX7545 PTX7545 2974
2961tm_efdc MACH_TM_EFDC TM_EFDC 2975
2962omap3_waldo1 MACH_OMAP3_WALDO1 OMAP3_WALDO1 2977
2963flyer MACH_FLYER FLYER 2978
2964tornado3240 MACH_TORNADO3240 TORNADO3240 2979
2965soli_01 MACH_SOLI_01 SOLI_01 2980
2966omapl138_europalc MACH_OMAPL138_EUROPALC OMAPL138_EUROPALC 2981
2967helios_v1 MACH_HELIOS_V1 HELIOS_V1 2982
2968netspace_lite_v2 MACH_NETSPACE_LITE_V2 NETSPACE_LITE_V2 2983
2969ssc MACH_SSC SSC 2984
2970premierwave_en MACH_PREMIERWAVE_EN PREMIERWAVE_EN 2985
2971wasabi MACH_WASABI WASABI 2986
2972vivow MACH_VIVOW VIVOW 2987
2973mx50_rdp MACH_MX50_RDP MX50_RDP 2988
2974universal MACH_UNIVERSAL UNIVERSAL 2989
2975real6410 MACH_REAL6410 REAL6410 2990
2976spx_sakura MACH_SPX_SAKURA SPX_SAKURA 2991
2977ij3k_2440 MACH_IJ3K_2440 IJ3K_2440 2992
2978omap3_bc10 MACH_OMAP3_BC10 OMAP3_BC10 2993
2979thebe MACH_THEBE THEBE 2994
2980rv082 MACH_RV082 RV082 2995
2981armlguest MACH_ARMLGUEST ARMLGUEST 2996
2982tjinc1000 MACH_TJINC1000 TJINC1000 2997
2983dockstar MACH_DOCKSTAR DOCKSTAR 2998
2984ax8008 MACH_AX8008 AX8008 2999
2985gnet_sgce MACH_GNET_SGCE GNET_SGCE 3000
2986pxwnas_500_1000 MACH_PXWNAS_500_1000 PXWNAS_500_1000 3001
2987ea20 MACH_EA20 EA20 3002
2988awm2 MACH_AWM2 AWM2 3003
2989ti8148evm MACH_TI8148EVM TI8148EVM 3004
2990tegra_seaboard MACH_TEGRA_SEABOARD TEGRA_SEABOARD 3005
2991linkstation_chlv2 MACH_LINKSTATION_CHLV2 LINKSTATION_CHLV2 3006
2992tera_pro2_rack MACH_TERA_PRO2_RACK TERA_PRO2_RACK 3007
2993rubys MACH_RUBYS RUBYS 3008
2994aquarius MACH_AQUARIUS AQUARIUS 3009
2995mx53_ard MACH_MX53_ARD MX53_ARD 3010
2996mx53_smd MACH_MX53_SMD MX53_SMD 3011
2997lswxl MACH_LSWXL LSWXL 3012
2998dove_avng_v3 MACH_DOVE_AVNG_V3 DOVE_AVNG_V3 3013
2999sdi_ess_9263 MACH_SDI_ESS_9263 SDI_ESS_9263 3014
3000jocpu550 MACH_JOCPU550 JOCPU550 3015
3001msm8x60_rumi3 MACH_MSM8X60_RUMI3 MSM8X60_RUMI3 3016
3002msm8x60_ffa MACH_MSM8X60_FFA MSM8X60_FFA 3017
3003yanomami MACH_YANOMAMI YANOMAMI 3018
3004gta04 MACH_GTA04 GTA04 3019
3005cm_a510 MACH_CM_A510 CM_A510 3020
3006omap3_rfs200 MACH_OMAP3_RFS200 OMAP3_RFS200 3021
3007kx33xx MACH_KX33XX KX33XX 3022
3008ptx7510 MACH_PTX7510 PTX7510 3023
3009top9000 MACH_TOP9000 TOP9000 3024
3010teenote MACH_TEENOTE TEENOTE 3025
3011ts3 MACH_TS3 TS3 3026
3012a0 MACH_A0 A0 3027
3013fsm9xxx_surf MACH_FSM9XXX_SURF FSM9XXX_SURF 3028
3014fsm9xxx_ffa MACH_FSM9XXX_FFA FSM9XXX_FFA 3029
3015frrhwcdma60w MACH_FRRHWCDMA60W FRRHWCDMA60W 3030
3016remus MACH_REMUS REMUS 3031
3017at91cap7xdk MACH_AT91CAP7XDK AT91CAP7XDK 3032
3018at91cap7stk MACH_AT91CAP7STK AT91CAP7STK 3033
3019kt_sbc_sam9_1 MACH_KT_SBC_SAM9_1 KT_SBC_SAM9_1 3034
3020oratisrouter MACH_ORATISROUTER ORATISROUTER 3035
3021armada_xp_db MACH_ARMADA_XP_DB ARMADA_XP_DB 3036
3022spdm MACH_SPDM SPDM 3037
3023gtib MACH_GTIB GTIB 3038
3024dgm3240 MACH_DGM3240 DGM3240 3039
3025atlas_i_lpe MACH_ATLAS_I_LPE ATLAS_I_LPE 3040
3026htcmega MACH_HTCMEGA HTCMEGA 3041
3027tricorder MACH_TRICORDER TRICORDER 3042
3028tx28 MACH_TX28 TX28 3043
3029bstbrd MACH_BSTBRD BSTBRD 3044
3030pwb3090 MACH_PWB3090 PWB3090 3045
3031idea6410 MACH_IDEA6410 IDEA6410 3046
3032qbc9263 MACH_QBC9263 QBC9263 3047
3033borabora MACH_BORABORA BORABORA 3048
3034valdez MACH_VALDEZ VALDEZ 3049
3035ls9g20 MACH_LS9G20 LS9G20 3050
3036mios_v1 MACH_MIOS_V1 MIOS_V1 3051
3037s5pc110_crespo MACH_S5PC110_CRESPO S5PC110_CRESPO 3052
3038controltek9g20 MACH_CONTROLTEK9G20 CONTROLTEK9G20 3053
3039tin307 MACH_TIN307 TIN307 3054
3040tin510 MACH_TIN510 TIN510 3055
3041bluecheese MACH_BLUECHEESE BLUECHEESE 3057
3042tem3x30 MACH_TEM3X30 TEM3X30 3058
3043harvest_desoto MACH_HARVEST_DESOTO HARVEST_DESOTO 3059
3044msm8x60_qrdc MACH_MSM8X60_QRDC MSM8X60_QRDC 3060
3045spear900 MACH_SPEAR900 SPEAR900 3061
3046pcontrol_g20 MACH_PCONTROL_G20 PCONTROL_G20 3062
diff --git a/arch/avr32/kernel/module.c b/arch/avr32/kernel/module.c
index 98f94d041d9c..a727f54d64d6 100644
--- a/arch/avr32/kernel/module.c
+++ b/arch/avr32/kernel/module.c
@@ -314,10 +314,9 @@ int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
314 vfree(module->arch.syminfo); 314 vfree(module->arch.syminfo);
315 module->arch.syminfo = NULL; 315 module->arch.syminfo = NULL;
316 316
317 return module_bug_finalize(hdr, sechdrs, module); 317 return 0;
318} 318}
319 319
320void module_arch_cleanup(struct module *module) 320void module_arch_cleanup(struct module *module)
321{ 321{
322 module_bug_cleanup(module);
323} 322}
diff --git a/arch/frv/kernel/signal.c b/arch/frv/kernel/signal.c
index 0974c0ecc594..bab01298b58e 100644
--- a/arch/frv/kernel/signal.c
+++ b/arch/frv/kernel/signal.c
@@ -121,6 +121,9 @@ static int restore_sigcontext(struct sigcontext __user *sc, int *_gr8)
121 struct user_context *user = current->thread.user; 121 struct user_context *user = current->thread.user;
122 unsigned long tbr, psr; 122 unsigned long tbr, psr;
123 123
124 /* Always make any pending restarted system calls return -EINTR */
125 current_thread_info()->restart_block.fn = do_no_restart_syscall;
126
124 tbr = user->i.tbr; 127 tbr = user->i.tbr;
125 psr = user->i.psr; 128 psr = user->i.psr;
126 if (copy_from_user(user, &sc->sc_context, sizeof(sc->sc_context))) 129 if (copy_from_user(user, &sc->sc_context, sizeof(sc->sc_context)))
@@ -250,6 +253,8 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set)
250 struct sigframe __user *frame; 253 struct sigframe __user *frame;
251 int rsig; 254 int rsig;
252 255
256 set_fs(USER_DS);
257
253 frame = get_sigframe(ka, sizeof(*frame)); 258 frame = get_sigframe(ka, sizeof(*frame));
254 259
255 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) 260 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
@@ -293,22 +298,23 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set)
293 (unsigned long) (frame->retcode + 2)); 298 (unsigned long) (frame->retcode + 2));
294 } 299 }
295 300
296 /* set up registers for signal handler */ 301 /* Set up registers for the signal handler */
297 __frame->sp = (unsigned long) frame;
298 __frame->lr = (unsigned long) &frame->retcode;
299 __frame->gr8 = sig;
300
301 if (current->personality & FDPIC_FUNCPTRS) { 302 if (current->personality & FDPIC_FUNCPTRS) {
302 struct fdpic_func_descriptor __user *funcptr = 303 struct fdpic_func_descriptor __user *funcptr =
303 (struct fdpic_func_descriptor __user *) ka->sa.sa_handler; 304 (struct fdpic_func_descriptor __user *) ka->sa.sa_handler;
304 __get_user(__frame->pc, &funcptr->text); 305 struct fdpic_func_descriptor desc;
305 __get_user(__frame->gr15, &funcptr->GOT); 306 if (copy_from_user(&desc, funcptr, sizeof(desc)))
307 goto give_sigsegv;
308 __frame->pc = desc.text;
309 __frame->gr15 = desc.GOT;
306 } else { 310 } else {
307 __frame->pc = (unsigned long) ka->sa.sa_handler; 311 __frame->pc = (unsigned long) ka->sa.sa_handler;
308 __frame->gr15 = 0; 312 __frame->gr15 = 0;
309 } 313 }
310 314
311 set_fs(USER_DS); 315 __frame->sp = (unsigned long) frame;
316 __frame->lr = (unsigned long) &frame->retcode;
317 __frame->gr8 = sig;
312 318
313 /* the tracer may want to single-step inside the handler */ 319 /* the tracer may want to single-step inside the handler */
314 if (test_thread_flag(TIF_SINGLESTEP)) 320 if (test_thread_flag(TIF_SINGLESTEP))
@@ -323,7 +329,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set)
323 return 0; 329 return 0;
324 330
325give_sigsegv: 331give_sigsegv:
326 force_sig(SIGSEGV, current); 332 force_sigsegv(sig, current);
327 return -EFAULT; 333 return -EFAULT;
328 334
329} /* end setup_frame() */ 335} /* end setup_frame() */
@@ -338,6 +344,8 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
338 struct rt_sigframe __user *frame; 344 struct rt_sigframe __user *frame;
339 int rsig; 345 int rsig;
340 346
347 set_fs(USER_DS);
348
341 frame = get_sigframe(ka, sizeof(*frame)); 349 frame = get_sigframe(ka, sizeof(*frame));
342 350
343 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) 351 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
@@ -392,22 +400,23 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
392 } 400 }
393 401
394 /* Set up registers for signal handler */ 402 /* Set up registers for signal handler */
395 __frame->sp = (unsigned long) frame;
396 __frame->lr = (unsigned long) &frame->retcode;
397 __frame->gr8 = sig;
398 __frame->gr9 = (unsigned long) &frame->info;
399
400 if (current->personality & FDPIC_FUNCPTRS) { 403 if (current->personality & FDPIC_FUNCPTRS) {
401 struct fdpic_func_descriptor __user *funcptr = 404 struct fdpic_func_descriptor __user *funcptr =
402 (struct fdpic_func_descriptor __user *) ka->sa.sa_handler; 405 (struct fdpic_func_descriptor __user *) ka->sa.sa_handler;
403 __get_user(__frame->pc, &funcptr->text); 406 struct fdpic_func_descriptor desc;
404 __get_user(__frame->gr15, &funcptr->GOT); 407 if (copy_from_user(&desc, funcptr, sizeof(desc)))
408 goto give_sigsegv;
409 __frame->pc = desc.text;
410 __frame->gr15 = desc.GOT;
405 } else { 411 } else {
406 __frame->pc = (unsigned long) ka->sa.sa_handler; 412 __frame->pc = (unsigned long) ka->sa.sa_handler;
407 __frame->gr15 = 0; 413 __frame->gr15 = 0;
408 } 414 }
409 415
410 set_fs(USER_DS); 416 __frame->sp = (unsigned long) frame;
417 __frame->lr = (unsigned long) &frame->retcode;
418 __frame->gr8 = sig;
419 __frame->gr9 = (unsigned long) &frame->info;
411 420
412 /* the tracer may want to single-step inside the handler */ 421 /* the tracer may want to single-step inside the handler */
413 if (test_thread_flag(TIF_SINGLESTEP)) 422 if (test_thread_flag(TIF_SINGLESTEP))
@@ -422,7 +431,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
422 return 0; 431 return 0;
423 432
424give_sigsegv: 433give_sigsegv:
425 force_sig(SIGSEGV, current); 434 force_sigsegv(sig, current);
426 return -EFAULT; 435 return -EFAULT;
427 436
428} /* end setup_rt_frame() */ 437} /* end setup_rt_frame() */
@@ -437,7 +446,7 @@ static int handle_signal(unsigned long sig, siginfo_t *info,
437 int ret; 446 int ret;
438 447
439 /* Are we from a system call? */ 448 /* Are we from a system call? */
440 if (in_syscall(__frame)) { 449 if (__frame->syscallno != -1) {
441 /* If so, check system call restarting.. */ 450 /* If so, check system call restarting.. */
442 switch (__frame->gr8) { 451 switch (__frame->gr8) {
443 case -ERESTART_RESTARTBLOCK: 452 case -ERESTART_RESTARTBLOCK:
@@ -456,6 +465,7 @@ static int handle_signal(unsigned long sig, siginfo_t *info,
456 __frame->gr8 = __frame->orig_gr8; 465 __frame->gr8 = __frame->orig_gr8;
457 __frame->pc -= 4; 466 __frame->pc -= 4;
458 } 467 }
468 __frame->syscallno = -1;
459 } 469 }
460 470
461 /* Set up the stack frame */ 471 /* Set up the stack frame */
@@ -538,10 +548,11 @@ no_signal:
538 break; 548 break;
539 549
540 case -ERESTART_RESTARTBLOCK: 550 case -ERESTART_RESTARTBLOCK:
541 __frame->gr8 = __NR_restart_syscall; 551 __frame->gr7 = __NR_restart_syscall;
542 __frame->pc -= 4; 552 __frame->pc -= 4;
543 break; 553 break;
544 } 554 }
555 __frame->syscallno = -1;
545 } 556 }
546 557
547 /* if there's no signal to deliver, we just put the saved sigmask 558 /* if there's no signal to deliver, we just put the saved sigmask
diff --git a/arch/h8300/kernel/module.c b/arch/h8300/kernel/module.c
index 0865e291c20d..db4953dc4e1b 100644
--- a/arch/h8300/kernel/module.c
+++ b/arch/h8300/kernel/module.c
@@ -112,10 +112,9 @@ int module_finalize(const Elf_Ehdr *hdr,
112 const Elf_Shdr *sechdrs, 112 const Elf_Shdr *sechdrs,
113 struct module *me) 113 struct module *me)
114{ 114{
115 return module_bug_finalize(hdr, sechdrs, me); 115 return 0;
116} 116}
117 117
118void module_arch_cleanup(struct module *mod) 118void module_arch_cleanup(struct module *mod)
119{ 119{
120 module_bug_cleanup(mod);
121} 120}
diff --git a/arch/ia64/include/asm/compat.h b/arch/ia64/include/asm/compat.h
index f90edc85b509..9301a2821615 100644
--- a/arch/ia64/include/asm/compat.h
+++ b/arch/ia64/include/asm/compat.h
@@ -199,7 +199,7 @@ ptr_to_compat(void __user *uptr)
199} 199}
200 200
201static __inline__ void __user * 201static __inline__ void __user *
202compat_alloc_user_space (long len) 202arch_compat_alloc_user_space (long len)
203{ 203{
204 struct pt_regs *regs = task_pt_regs(current); 204 struct pt_regs *regs = task_pt_regs(current);
205 return (void __user *) (((regs->r12 & 0xffffffff) & -16) - len); 205 return (void __user *) (((regs->r12 & 0xffffffff) & -16) - len);
diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S
index 3567d54f8cee..331d42bda77a 100644
--- a/arch/ia64/kernel/fsys.S
+++ b/arch/ia64/kernel/fsys.S
@@ -420,22 +420,31 @@ EX(.fail_efault, ld8 r14=[r33]) // r14 <- *set
420 ;; 420 ;;
421 421
422 RSM_PSR_I(p0, r18, r19) // mask interrupt delivery 422 RSM_PSR_I(p0, r18, r19) // mask interrupt delivery
423 mov ar.ccv=0
424 andcm r14=r14,r17 // filter out SIGKILL & SIGSTOP 423 andcm r14=r14,r17 // filter out SIGKILL & SIGSTOP
424 mov r8=EINVAL // default to EINVAL
425 425
426#ifdef CONFIG_SMP 426#ifdef CONFIG_SMP
427 mov r17=1 427 // __ticket_spin_trylock(r31)
428 ld4 r17=[r31]
428 ;; 429 ;;
429 cmpxchg4.acq r18=[r31],r17,ar.ccv // try to acquire the lock 430 mov.m ar.ccv=r17
430 mov r8=EINVAL // default to EINVAL 431 extr.u r9=r17,17,15
432 adds r19=1,r17
433 extr.u r18=r17,0,15
434 ;;
435 cmp.eq p6,p7=r9,r18
431 ;; 436 ;;
437(p6) cmpxchg4.acq r9=[r31],r19,ar.ccv
438(p6) dep.z r20=r19,1,15 // next serving ticket for unlock
439(p7) br.cond.spnt.many .lock_contention
440 ;;
441 cmp4.eq p0,p7=r9,r17
442 adds r31=2,r31
443(p7) br.cond.spnt.many .lock_contention
432 ld8 r3=[r2] // re-read current->blocked now that we hold the lock 444 ld8 r3=[r2] // re-read current->blocked now that we hold the lock
433 cmp4.ne p6,p0=r18,r0
434(p6) br.cond.spnt.many .lock_contention
435 ;; 445 ;;
436#else 446#else
437 ld8 r3=[r2] // re-read current->blocked now that we hold the lock 447 ld8 r3=[r2] // re-read current->blocked now that we hold the lock
438 mov r8=EINVAL // default to EINVAL
439#endif 448#endif
440 add r18=IA64_TASK_PENDING_OFFSET+IA64_SIGPENDING_SIGNAL_OFFSET,r16 449 add r18=IA64_TASK_PENDING_OFFSET+IA64_SIGPENDING_SIGNAL_OFFSET,r16
441 add r19=IA64_TASK_SIGNAL_OFFSET,r16 450 add r19=IA64_TASK_SIGNAL_OFFSET,r16
@@ -490,7 +499,9 @@ EX(.fail_efault, ld8 r14=[r33]) // r14 <- *set
490(p6) br.cond.spnt.few 1b // yes -> retry 499(p6) br.cond.spnt.few 1b // yes -> retry
491 500
492#ifdef CONFIG_SMP 501#ifdef CONFIG_SMP
493 st4.rel [r31]=r0 // release the lock 502 // __ticket_spin_unlock(r31)
503 st2.rel [r31]=r20
504 mov r20=0 // i must not leak kernel bits...
494#endif 505#endif
495 SSM_PSR_I(p0, p9, r31) 506 SSM_PSR_I(p0, p9, r31)
496 ;; 507 ;;
@@ -512,7 +523,8 @@ EX(.fail_efault, (p15) st8 [r34]=r3)
512 523
513.sig_pending: 524.sig_pending:
514#ifdef CONFIG_SMP 525#ifdef CONFIG_SMP
515 st4.rel [r31]=r0 // release the lock 526 // __ticket_spin_unlock(r31)
527 st2.rel [r31]=r20 // release the lock
516#endif 528#endif
517 SSM_PSR_I(p0, p9, r17) 529 SSM_PSR_I(p0, p9, r17)
518 ;; 530 ;;
diff --git a/arch/m32r/include/asm/signal.h b/arch/m32r/include/asm/signal.h
index 9c1acb2b1a92..b2eeb0de1c8d 100644
--- a/arch/m32r/include/asm/signal.h
+++ b/arch/m32r/include/asm/signal.h
@@ -157,7 +157,6 @@ typedef struct sigaltstack {
157#undef __HAVE_ARCH_SIG_BITOPS 157#undef __HAVE_ARCH_SIG_BITOPS
158 158
159struct pt_regs; 159struct pt_regs;
160extern int do_signal(struct pt_regs *regs, sigset_t *oldset);
161 160
162#define ptrace_signal_deliver(regs, cookie) do { } while (0) 161#define ptrace_signal_deliver(regs, cookie) do { } while (0)
163 162
diff --git a/arch/m32r/include/asm/unistd.h b/arch/m32r/include/asm/unistd.h
index 76125777483c..c70545689da8 100644
--- a/arch/m32r/include/asm/unistd.h
+++ b/arch/m32r/include/asm/unistd.h
@@ -351,6 +351,7 @@
351#define __ARCH_WANT_SYS_OLD_GETRLIMIT /*will be unused*/ 351#define __ARCH_WANT_SYS_OLD_GETRLIMIT /*will be unused*/
352#define __ARCH_WANT_SYS_OLDUMOUNT 352#define __ARCH_WANT_SYS_OLDUMOUNT
353#define __ARCH_WANT_SYS_RT_SIGACTION 353#define __ARCH_WANT_SYS_RT_SIGACTION
354#define __ARCH_WANT_SYS_RT_SIGSUSPEND
354 355
355#define __IGNORE_lchown 356#define __IGNORE_lchown
356#define __IGNORE_setuid 357#define __IGNORE_setuid
diff --git a/arch/m32r/kernel/entry.S b/arch/m32r/kernel/entry.S
index 403869833b98..225412bc227e 100644
--- a/arch/m32r/kernel/entry.S
+++ b/arch/m32r/kernel/entry.S
@@ -235,10 +235,9 @@ work_resched:
235work_notifysig: ; deal with pending signals and 235work_notifysig: ; deal with pending signals and
236 ; notify-resume requests 236 ; notify-resume requests
237 mv r0, sp ; arg1 : struct pt_regs *regs 237 mv r0, sp ; arg1 : struct pt_regs *regs
238 ldi r1, #0 ; arg2 : sigset_t *oldset 238 mv r1, r9 ; arg2 : __u32 thread_info_flags
239 mv r2, r9 ; arg3 : __u32 thread_info_flags
240 bl do_notify_resume 239 bl do_notify_resume
241 bra restore_all 240 bra resume_userspace
242 241
243 ; perform syscall exit tracing 242 ; perform syscall exit tracing
244 ALIGN 243 ALIGN
diff --git a/arch/m32r/kernel/ptrace.c b/arch/m32r/kernel/ptrace.c
index e555091eb97c..0021ade4cba8 100644
--- a/arch/m32r/kernel/ptrace.c
+++ b/arch/m32r/kernel/ptrace.c
@@ -592,16 +592,17 @@ void user_enable_single_step(struct task_struct *child)
592 592
593 if (access_process_vm(child, pc&~3, &insn, sizeof(insn), 0) 593 if (access_process_vm(child, pc&~3, &insn, sizeof(insn), 0)
594 != sizeof(insn)) 594 != sizeof(insn))
595 break; 595 return -EIO;
596 596
597 compute_next_pc(insn, pc, &next_pc, child); 597 compute_next_pc(insn, pc, &next_pc, child);
598 if (next_pc & 0x80000000) 598 if (next_pc & 0x80000000)
599 break; 599 return -EIO;
600 600
601 if (embed_debug_trap(child, next_pc)) 601 if (embed_debug_trap(child, next_pc))
602 break; 602 return -EIO;
603 603
604 invalidate_cache(); 604 invalidate_cache();
605 return 0;
605} 606}
606 607
607void user_disable_single_step(struct task_struct *child) 608void user_disable_single_step(struct task_struct *child)
diff --git a/arch/m32r/kernel/signal.c b/arch/m32r/kernel/signal.c
index 144b0f124fc7..7bbe38645ed5 100644
--- a/arch/m32r/kernel/signal.c
+++ b/arch/m32r/kernel/signal.c
@@ -28,37 +28,6 @@
28 28
29#define DEBUG_SIG 0 29#define DEBUG_SIG 0
30 30
31#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
32
33int do_signal(struct pt_regs *, sigset_t *);
34
35asmlinkage int
36sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize,
37 unsigned long r2, unsigned long r3, unsigned long r4,
38 unsigned long r5, unsigned long r6, struct pt_regs *regs)
39{
40 sigset_t newset;
41
42 /* XXX: Don't preclude handling different sized sigset_t's. */
43 if (sigsetsize != sizeof(sigset_t))
44 return -EINVAL;
45
46 if (copy_from_user(&newset, unewset, sizeof(newset)))
47 return -EFAULT;
48 sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
49
50 spin_lock_irq(&current->sighand->siglock);
51 current->saved_sigmask = current->blocked;
52 current->blocked = newset;
53 recalc_sigpending();
54 spin_unlock_irq(&current->sighand->siglock);
55
56 current->state = TASK_INTERRUPTIBLE;
57 schedule();
58 set_thread_flag(TIF_RESTORE_SIGMASK);
59 return -ERESTARTNOHAND;
60}
61
62asmlinkage int 31asmlinkage int
63sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, 32sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
64 unsigned long r2, unsigned long r3, unsigned long r4, 33 unsigned long r2, unsigned long r3, unsigned long r4,
@@ -218,7 +187,7 @@ get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
218 return (void __user *)((sp - frame_size) & -8ul); 187 return (void __user *)((sp - frame_size) & -8ul);
219} 188}
220 189
221static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, 190static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
222 sigset_t *set, struct pt_regs *regs) 191 sigset_t *set, struct pt_regs *regs)
223{ 192{
224 struct rt_sigframe __user *frame; 193 struct rt_sigframe __user *frame;
@@ -275,22 +244,34 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
275 current->comm, current->pid, frame, regs->pc); 244 current->comm, current->pid, frame, regs->pc);
276#endif 245#endif
277 246
278 return; 247 return 0;
279 248
280give_sigsegv: 249give_sigsegv:
281 force_sigsegv(sig, current); 250 force_sigsegv(sig, current);
251 return -EFAULT;
252}
253
254static int prev_insn(struct pt_regs *regs)
255{
256 u16 inst;
257 if (get_user(&inst, (u16 __user *)(regs->bpc - 2)))
258 return -EFAULT;
259 if ((inst & 0xfff0) == 0x10f0) /* trap ? */
260 regs->bpc -= 2;
261 else
262 regs->bpc -= 4;
263 regs->syscall_nr = -1;
264 return 0;
282} 265}
283 266
284/* 267/*
285 * OK, we're invoking a handler 268 * OK, we're invoking a handler
286 */ 269 */
287 270
288static void 271static int
289handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, 272handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
290 sigset_t *oldset, struct pt_regs *regs) 273 sigset_t *oldset, struct pt_regs *regs)
291{ 274{
292 unsigned short inst;
293
294 /* Are we from a system call? */ 275 /* Are we from a system call? */
295 if (regs->syscall_nr >= 0) { 276 if (regs->syscall_nr >= 0) {
296 /* If so, check system call restarting.. */ 277 /* If so, check system call restarting.. */
@@ -308,16 +289,14 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
308 /* fallthrough */ 289 /* fallthrough */
309 case -ERESTARTNOINTR: 290 case -ERESTARTNOINTR:
310 regs->r0 = regs->orig_r0; 291 regs->r0 = regs->orig_r0;
311 inst = *(unsigned short *)(regs->bpc - 2); 292 if (prev_insn(regs) < 0)
312 if ((inst & 0xfff0) == 0x10f0) /* trap ? */ 293 return -EFAULT;
313 regs->bpc -= 2;
314 else
315 regs->bpc -= 4;
316 } 294 }
317 } 295 }
318 296
319 /* Set up the stack frame */ 297 /* Set up the stack frame */
320 setup_rt_frame(sig, ka, info, oldset, regs); 298 if (setup_rt_frame(sig, ka, info, oldset, regs))
299 return -EFAULT;
321 300
322 spin_lock_irq(&current->sighand->siglock); 301 spin_lock_irq(&current->sighand->siglock);
323 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask); 302 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
@@ -325,6 +304,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
325 sigaddset(&current->blocked,sig); 304 sigaddset(&current->blocked,sig);
326 recalc_sigpending(); 305 recalc_sigpending();
327 spin_unlock_irq(&current->sighand->siglock); 306 spin_unlock_irq(&current->sighand->siglock);
307 return 0;
328} 308}
329 309
330/* 310/*
@@ -332,12 +312,12 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
332 * want to handle. Thus you cannot kill init even with a SIGKILL even by 312 * want to handle. Thus you cannot kill init even with a SIGKILL even by
333 * mistake. 313 * mistake.
334 */ 314 */
335int do_signal(struct pt_regs *regs, sigset_t *oldset) 315static void do_signal(struct pt_regs *regs)
336{ 316{
337 siginfo_t info; 317 siginfo_t info;
338 int signr; 318 int signr;
339 struct k_sigaction ka; 319 struct k_sigaction ka;
340 unsigned short inst; 320 sigset_t *oldset;
341 321
342 /* 322 /*
343 * We want the common case to go fast, which 323 * We want the common case to go fast, which
@@ -346,12 +326,14 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
346 * if so. 326 * if so.
347 */ 327 */
348 if (!user_mode(regs)) 328 if (!user_mode(regs))
349 return 1; 329 return;
350 330
351 if (try_to_freeze()) 331 if (try_to_freeze())
352 goto no_signal; 332 goto no_signal;
353 333
354 if (!oldset) 334 if (test_thread_flag(TIF_RESTORE_SIGMASK))
335 oldset = &current->saved_sigmask;
336 else
355 oldset = &current->blocked; 337 oldset = &current->blocked;
356 338
357 signr = get_signal_to_deliver(&info, &ka, regs, NULL); 339 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
@@ -363,8 +345,10 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
363 */ 345 */
364 346
365 /* Whee! Actually deliver the signal. */ 347 /* Whee! Actually deliver the signal. */
366 handle_signal(signr, &ka, &info, oldset, regs); 348 if (handle_signal(signr, &ka, &info, oldset, regs) == 0)
367 return 1; 349 clear_thread_flag(TIF_RESTORE_SIGMASK);
350
351 return;
368 } 352 }
369 353
370 no_signal: 354 no_signal:
@@ -375,31 +359,24 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
375 regs->r0 == -ERESTARTSYS || 359 regs->r0 == -ERESTARTSYS ||
376 regs->r0 == -ERESTARTNOINTR) { 360 regs->r0 == -ERESTARTNOINTR) {
377 regs->r0 = regs->orig_r0; 361 regs->r0 = regs->orig_r0;
378 inst = *(unsigned short *)(regs->bpc - 2); 362 prev_insn(regs);
379 if ((inst & 0xfff0) == 0x10f0) /* trap ? */ 363 } else if (regs->r0 == -ERESTART_RESTARTBLOCK){
380 regs->bpc -= 2;
381 else
382 regs->bpc -= 4;
383 }
384 if (regs->r0 == -ERESTART_RESTARTBLOCK){
385 regs->r0 = regs->orig_r0; 364 regs->r0 = regs->orig_r0;
386 regs->r7 = __NR_restart_syscall; 365 regs->r7 = __NR_restart_syscall;
387 inst = *(unsigned short *)(regs->bpc - 2); 366 prev_insn(regs);
388 if ((inst & 0xfff0) == 0x10f0) /* trap ? */
389 regs->bpc -= 2;
390 else
391 regs->bpc -= 4;
392 } 367 }
393 } 368 }
394 return 0; 369 if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
370 clear_thread_flag(TIF_RESTORE_SIGMASK);
371 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
372 }
395} 373}
396 374
397/* 375/*
398 * notification of userspace execution resumption 376 * notification of userspace execution resumption
399 * - triggered by current->work.notify_resume 377 * - triggered by current->work.notify_resume
400 */ 378 */
401void do_notify_resume(struct pt_regs *regs, sigset_t *oldset, 379void do_notify_resume(struct pt_regs *regs, __u32 thread_info_flags)
402 __u32 thread_info_flags)
403{ 380{
404 /* Pending single-step? */ 381 /* Pending single-step? */
405 if (thread_info_flags & _TIF_SINGLESTEP) 382 if (thread_info_flags & _TIF_SINGLESTEP)
@@ -407,7 +384,7 @@ void do_notify_resume(struct pt_regs *regs, sigset_t *oldset,
407 384
408 /* deal with pending signal delivery */ 385 /* deal with pending signal delivery */
409 if (thread_info_flags & _TIF_SIGPENDING) 386 if (thread_info_flags & _TIF_SIGPENDING)
410 do_signal(regs,oldset); 387 do_signal(regs);
411 388
412 if (thread_info_flags & _TIF_NOTIFY_RESUME) { 389 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
413 clear_thread_flag(TIF_NOTIFY_RESUME); 390 clear_thread_flag(TIF_NOTIFY_RESUME);
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index 60b15d0aa072..b43b36beafe3 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -340,10 +340,13 @@
340#define __NR_set_thread_area 334 340#define __NR_set_thread_area 334
341#define __NR_atomic_cmpxchg_32 335 341#define __NR_atomic_cmpxchg_32 335
342#define __NR_atomic_barrier 336 342#define __NR_atomic_barrier 336
343#define __NR_fanotify_init 337
344#define __NR_fanotify_mark 338
345#define __NR_prlimit64 339
343 346
344#ifdef __KERNEL__ 347#ifdef __KERNEL__
345 348
346#define NR_syscalls 337 349#define NR_syscalls 340
347 350
348#define __ARCH_WANT_IPC_PARSE_VERSION 351#define __ARCH_WANT_IPC_PARSE_VERSION
349#define __ARCH_WANT_OLD_READDIR 352#define __ARCH_WANT_OLD_READDIR
diff --git a/arch/m68k/kernel/entry.S b/arch/m68k/kernel/entry.S
index 2391bdff0996..6360c437dcf5 100644
--- a/arch/m68k/kernel/entry.S
+++ b/arch/m68k/kernel/entry.S
@@ -765,4 +765,7 @@ sys_call_table:
765 .long sys_set_thread_area 765 .long sys_set_thread_area
766 .long sys_atomic_cmpxchg_32 /* 335 */ 766 .long sys_atomic_cmpxchg_32 /* 335 */
767 .long sys_atomic_barrier 767 .long sys_atomic_barrier
768 .long sys_fanotify_init
769 .long sys_fanotify_mark
770 .long sys_prlimit64
768 771
diff --git a/arch/m68k/mac/macboing.c b/arch/m68k/mac/macboing.c
index 8f0640847ad2..05285d08e547 100644
--- a/arch/m68k/mac/macboing.c
+++ b/arch/m68k/mac/macboing.c
@@ -162,7 +162,7 @@ static void mac_init_asc( void )
162void mac_mksound( unsigned int freq, unsigned int length ) 162void mac_mksound( unsigned int freq, unsigned int length )
163{ 163{
164 __u32 cfreq = ( freq << 5 ) / 468; 164 __u32 cfreq = ( freq << 5 ) / 468;
165 __u32 flags; 165 unsigned long flags;
166 int i; 166 int i;
167 167
168 if ( mac_special_bell == NULL ) 168 if ( mac_special_bell == NULL )
@@ -224,7 +224,7 @@ static void mac_nosound( unsigned long ignored )
224 */ 224 */
225static void mac_quadra_start_bell( unsigned int freq, unsigned int length, unsigned int volume ) 225static void mac_quadra_start_bell( unsigned int freq, unsigned int length, unsigned int volume )
226{ 226{
227 __u32 flags; 227 unsigned long flags;
228 228
229 /* if the bell is already ringing, ring longer */ 229 /* if the bell is already ringing, ring longer */
230 if ( mac_bell_duration > 0 ) 230 if ( mac_bell_duration > 0 )
@@ -271,7 +271,7 @@ static void mac_quadra_start_bell( unsigned int freq, unsigned int length, unsig
271static void mac_quadra_ring_bell( unsigned long ignored ) 271static void mac_quadra_ring_bell( unsigned long ignored )
272{ 272{
273 int i, count = mac_asc_samplespersec / HZ; 273 int i, count = mac_asc_samplespersec / HZ;
274 __u32 flags; 274 unsigned long flags;
275 275
276 /* 276 /*
277 * we neither want a sound buffer overflow nor underflow, so we need to match 277 * we neither want a sound buffer overflow nor underflow, so we need to match
diff --git a/arch/m68knommu/kernel/syscalltable.S b/arch/m68knommu/kernel/syscalltable.S
index b30b3eb197a5..79b1ed198c07 100644
--- a/arch/m68knommu/kernel/syscalltable.S
+++ b/arch/m68knommu/kernel/syscalltable.S
@@ -355,6 +355,9 @@ ENTRY(sys_call_table)
355 .long sys_set_thread_area 355 .long sys_set_thread_area
356 .long sys_atomic_cmpxchg_32 /* 335 */ 356 .long sys_atomic_cmpxchg_32 /* 335 */
357 .long sys_atomic_barrier 357 .long sys_atomic_barrier
358 .long sys_fanotify_init
359 .long sys_fanotify_mark
360 .long sys_prlimit64
358 361
359 .rept NR_syscalls-(.-sys_call_table)/4 362 .rept NR_syscalls-(.-sys_call_table)/4
360 .long sys_ni_syscall 363 .long sys_ni_syscall
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 3ad59dde4852..5526faabfc21 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -13,6 +13,7 @@ config MIPS
13 select HAVE_KPROBES 13 select HAVE_KPROBES
14 select HAVE_KRETPROBES 14 select HAVE_KRETPROBES
15 select RTC_LIB if !MACH_LOONGSON 15 select RTC_LIB if !MACH_LOONGSON
16 select GENERIC_ATOMIC64 if !64BIT
16 17
17mainmenu "Linux/MIPS Kernel Configuration" 18mainmenu "Linux/MIPS Kernel Configuration"
18 19
@@ -1646,8 +1647,16 @@ config MIPS_MT_SMP
1646 select SYS_SUPPORTS_SMP 1647 select SYS_SUPPORTS_SMP
1647 select SMP_UP 1648 select SMP_UP
1648 help 1649 help
1649 This is a kernel model which is also known a VSMP or lately 1650 This is a kernel model which is known a VSMP but lately has been
1650 has been marketesed into SMVP. 1651 marketesed into SMVP.
1652 Virtual SMP uses the processor's VPEs to implement virtual
1653 processors. In currently available configuration of the 34K processor
1654 this allows for a dual processor. Both processors will share the same
1655 primary caches; each will obtain the half of the TLB for it's own
1656 exclusive use. For a layman this model can be described as similar to
1657 what Intel calls Hyperthreading.
1658
1659 For further information see http://www.linux-mips.org/wiki/34K#VSMP
1651 1660
1652config MIPS_MT_SMTC 1661config MIPS_MT_SMTC
1653 bool "SMTC: Use all TCs on all VPEs for SMP" 1662 bool "SMTC: Use all TCs on all VPEs for SMP"
@@ -1664,6 +1673,14 @@ config MIPS_MT_SMTC
1664 help 1673 help
1665 This is a kernel model which is known a SMTC or lately has been 1674 This is a kernel model which is known a SMTC or lately has been
1666 marketesed into SMVP. 1675 marketesed into SMVP.
1676 is presenting the available TC's of the core as processors to Linux.
1677 On currently available 34K processors this means a Linux system will
1678 see up to 5 processors. The implementation of the SMTC kernel differs
1679 significantly from VSMP and cannot efficiently coexist in the same
1680 kernel binary so the choice between VSMP and SMTC is a compile time
1681 decision.
1682
1683 For further information see http://www.linux-mips.org/wiki/34K#SMTC
1667 1684
1668endchoice 1685endchoice
1669 1686
diff --git a/arch/mips/alchemy/common/prom.c b/arch/mips/alchemy/common/prom.c
index c29511b11d44..534021059629 100644
--- a/arch/mips/alchemy/common/prom.c
+++ b/arch/mips/alchemy/common/prom.c
@@ -43,7 +43,7 @@ int prom_argc;
43char **prom_argv; 43char **prom_argv;
44char **prom_envp; 44char **prom_envp;
45 45
46void prom_init_cmdline(void) 46void __init prom_init_cmdline(void)
47{ 47{
48 int i; 48 int i;
49 49
@@ -104,7 +104,7 @@ static inline void str2eaddr(unsigned char *ea, unsigned char *str)
104 } 104 }
105} 105}
106 106
107int prom_get_ethernet_addr(char *ethernet_addr) 107int __init prom_get_ethernet_addr(char *ethernet_addr)
108{ 108{
109 char *ethaddr_str; 109 char *ethaddr_str;
110 110
@@ -123,7 +123,6 @@ int prom_get_ethernet_addr(char *ethernet_addr)
123 123
124 return 0; 124 return 0;
125} 125}
126EXPORT_SYMBOL(prom_get_ethernet_addr);
127 126
128void __init prom_free_prom_memory(void) 127void __init prom_free_prom_memory(void)
129{ 128{
diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile
index ed9bb709c9a3..5fd7f7a58b7e 100644
--- a/arch/mips/boot/compressed/Makefile
+++ b/arch/mips/boot/compressed/Makefile
@@ -59,7 +59,7 @@ $(obj)/piggy.o: $(obj)/dummy.o $(obj)/vmlinux.bin.z FORCE
59hostprogs-y := calc_vmlinuz_load_addr 59hostprogs-y := calc_vmlinuz_load_addr
60 60
61VMLINUZ_LOAD_ADDRESS = $(shell $(obj)/calc_vmlinuz_load_addr \ 61VMLINUZ_LOAD_ADDRESS = $(shell $(obj)/calc_vmlinuz_load_addr \
62 $(objtree)/$(KBUILD_IMAGE) $(VMLINUX_LOAD_ADDRESS)) 62 $(obj)/vmlinux.bin $(VMLINUX_LOAD_ADDRESS))
63 63
64vmlinuzobjs-y += $(obj)/piggy.o 64vmlinuzobjs-y += $(obj)/piggy.o
65 65
diff --git a/arch/mips/cavium-octeon/Kconfig b/arch/mips/cavium-octeon/Kconfig
index 094c17e38e16..47323ca452dc 100644
--- a/arch/mips/cavium-octeon/Kconfig
+++ b/arch/mips/cavium-octeon/Kconfig
@@ -83,3 +83,7 @@ config ARCH_SPARSEMEM_ENABLE
83 def_bool y 83 def_bool y
84 select SPARSEMEM_STATIC 84 select SPARSEMEM_STATIC
85 depends on CPU_CAVIUM_OCTEON 85 depends on CPU_CAVIUM_OCTEON
86
87config CAVIUM_OCTEON_HELPER
88 def_bool y
89 depends on OCTEON_ETHERNET || PCI
diff --git a/arch/mips/cavium-octeon/cpu.c b/arch/mips/cavium-octeon/cpu.c
index c664c8cc2b42..a5b427909b5c 100644
--- a/arch/mips/cavium-octeon/cpu.c
+++ b/arch/mips/cavium-octeon/cpu.c
@@ -41,7 +41,7 @@ static int cnmips_cu2_call(struct notifier_block *nfb, unsigned long action,
41 return NOTIFY_OK; /* Let default notifier send signals */ 41 return NOTIFY_OK; /* Let default notifier send signals */
42} 42}
43 43
44static int cnmips_cu2_setup(void) 44static int __init cnmips_cu2_setup(void)
45{ 45{
46 return cu2_notifier(cnmips_cu2_call, 0); 46 return cu2_notifier(cnmips_cu2_call, 0);
47} 47}
diff --git a/arch/mips/cavium-octeon/executive/Makefile b/arch/mips/cavium-octeon/executive/Makefile
index 2fd66db6939e..7f41c5be2190 100644
--- a/arch/mips/cavium-octeon/executive/Makefile
+++ b/arch/mips/cavium-octeon/executive/Makefile
@@ -11,4 +11,4 @@
11 11
12obj-y += cvmx-bootmem.o cvmx-l2c.o cvmx-sysinfo.o octeon-model.o 12obj-y += cvmx-bootmem.o cvmx-l2c.o cvmx-sysinfo.o octeon-model.o
13 13
14obj-$(CONFIG_PCI) += cvmx-helper-errata.o cvmx-helper-jtag.o 14obj-$(CONFIG_CAVIUM_OCTEON_HELPER) += cvmx-helper-errata.o cvmx-helper-jtag.o
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index c63c56bfd184..47d87da379f9 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -782,6 +782,10 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
782 */ 782 */
783#define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0) 783#define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
784 784
785#else /* !CONFIG_64BIT */
786
787#include <asm-generic/atomic64.h>
788
785#endif /* CONFIG_64BIT */ 789#endif /* CONFIG_64BIT */
786 790
787/* 791/*
diff --git a/arch/mips/include/asm/compat.h b/arch/mips/include/asm/compat.h
index 613f6912dfc1..dbc51065df5b 100644
--- a/arch/mips/include/asm/compat.h
+++ b/arch/mips/include/asm/compat.h
@@ -145,7 +145,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
145 return (u32)(unsigned long)uptr; 145 return (u32)(unsigned long)uptr;
146} 146}
147 147
148static inline void __user *compat_alloc_user_space(long len) 148static inline void __user *arch_compat_alloc_user_space(long len)
149{ 149{
150 struct pt_regs *regs = (struct pt_regs *) 150 struct pt_regs *regs = (struct pt_regs *)
151 ((unsigned long) current_thread_info() + THREAD_SIZE - 32) - 1; 151 ((unsigned long) current_thread_info() + THREAD_SIZE - 32) - 1;
diff --git a/arch/mips/include/asm/cop2.h b/arch/mips/include/asm/cop2.h
index 2cb2f0c2c4f8..3532e2c5f098 100644
--- a/arch/mips/include/asm/cop2.h
+++ b/arch/mips/include/asm/cop2.h
@@ -24,7 +24,7 @@ extern int cu2_notifier_call_chain(unsigned long val, void *v);
24 24
25#define cu2_notifier(fn, pri) \ 25#define cu2_notifier(fn, pri) \
26({ \ 26({ \
27 static struct notifier_block fn##_nb __cpuinitdata = { \ 27 static struct notifier_block fn##_nb = { \
28 .notifier_call = fn, \ 28 .notifier_call = fn, \
29 .priority = pri \ 29 .priority = pri \
30 }; \ 30 }; \
diff --git a/arch/mips/include/asm/gic.h b/arch/mips/include/asm/gic.h
index 9b9436a4d816..86548da650e7 100644
--- a/arch/mips/include/asm/gic.h
+++ b/arch/mips/include/asm/gic.h
@@ -321,6 +321,7 @@ struct gic_intrmask_regs {
321 */ 321 */
322struct gic_intr_map { 322struct gic_intr_map {
323 unsigned int cpunum; /* Directed to this CPU */ 323 unsigned int cpunum; /* Directed to this CPU */
324#define GIC_UNUSED 0xdead /* Dummy data */
324 unsigned int pin; /* Directed to this Pin */ 325 unsigned int pin; /* Directed to this Pin */
325 unsigned int polarity; /* Polarity : +/- */ 326 unsigned int polarity; /* Polarity : +/- */
326 unsigned int trigtype; /* Trigger : Edge/Levl */ 327 unsigned int trigtype; /* Trigger : Edge/Levl */
diff --git a/arch/mips/include/asm/mach-tx49xx/kmalloc.h b/arch/mips/include/asm/mach-tx49xx/kmalloc.h
index b74caf65482b..ff9a8b86cb93 100644
--- a/arch/mips/include/asm/mach-tx49xx/kmalloc.h
+++ b/arch/mips/include/asm/mach-tx49xx/kmalloc.h
@@ -1,6 +1,6 @@
1#ifndef __ASM_MACH_TX49XX_KMALLOC_H 1#ifndef __ASM_MACH_TX49XX_KMALLOC_H
2#define __ASM_MACH_TX49XX_KMALLOC_H 2#define __ASM_MACH_TX49XX_KMALLOC_H
3 3
4#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES 4#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5 5
6#endif /* __ASM_MACH_TX49XX_KMALLOC_H */ 6#endif /* __ASM_MACH_TX49XX_KMALLOC_H */
diff --git a/arch/mips/include/asm/mips-boards/maltaint.h b/arch/mips/include/asm/mips-boards/maltaint.h
index cea872fc6f5c..d11aa02a956a 100644
--- a/arch/mips/include/asm/mips-boards/maltaint.h
+++ b/arch/mips/include/asm/mips-boards/maltaint.h
@@ -88,9 +88,6 @@
88 88
89#define GIC_EXT_INTR(x) x 89#define GIC_EXT_INTR(x) x
90 90
91/* Dummy data */
92#define X 0xdead
93
94/* External Interrupts used for IPI */ 91/* External Interrupts used for IPI */
95#define GIC_IPI_EXT_INTR_RESCHED_VPE0 16 92#define GIC_IPI_EXT_INTR_RESCHED_VPE0 16
96#define GIC_IPI_EXT_INTR_CALLFNC_VPE0 17 93#define GIC_IPI_EXT_INTR_CALLFNC_VPE0 17
diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
index a16beafcea91..e59cd1ac09c2 100644
--- a/arch/mips/include/asm/page.h
+++ b/arch/mips/include/asm/page.h
@@ -150,6 +150,20 @@ typedef struct { unsigned long pgprot; } pgprot_t;
150 ((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET) 150 ((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET)
151#endif 151#endif
152#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET)) 152#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET))
153
154/*
155 * RELOC_HIDE was originally added by 6007b903dfe5f1d13e0c711ac2894bdd4a61b1ad
156 * (lmo) rsp. 8431fd094d625b94d364fe393076ccef88e6ce18 (kernel.org). The
157 * discussion can be found in lkml posting
158 * <a2ebde260608230500o3407b108hc03debb9da6e62c@mail.gmail.com> which is
159 * archived at http://lists.linuxcoding.com/kernel/2006-q3/msg17360.html
160 *
161 * It is unclear if the misscompilations mentioned in
162 * http://lkml.org/lkml/2010/8/8/138 also affect MIPS so we keep this one
163 * until GCC 3.x has been retired before we can apply
164 * https://patchwork.linux-mips.org/patch/1541/
165 */
166
153#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) 167#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
154 168
155#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) 169#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
index 2376f2e06e47..70df9c0d3c5b 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -146,7 +146,8 @@ register struct thread_info *__current_thread_info __asm__("$28");
146#define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH) 146#define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
147 147
148/* work to do on interrupt/exception return */ 148/* work to do on interrupt/exception return */
149#define _TIF_WORK_MASK (0x0000ffef & ~_TIF_SECCOMP) 149#define _TIF_WORK_MASK (0x0000ffef & \
150 ~(_TIF_SECCOMP | _TIF_SYSCALL_AUDIT))
150/* work to do on any return to u-space */ 151/* work to do on any return to u-space */
151#define _TIF_ALLWORK_MASK (0x8000ffff & ~_TIF_SECCOMP) 152#define _TIF_ALLWORK_MASK (0x8000ffff & ~_TIF_SECCOMP)
152 153
diff --git a/arch/mips/include/asm/unistd.h b/arch/mips/include/asm/unistd.h
index baa318a59c97..550725b881d5 100644
--- a/arch/mips/include/asm/unistd.h
+++ b/arch/mips/include/asm/unistd.h
@@ -356,16 +356,19 @@
356#define __NR_perf_event_open (__NR_Linux + 333) 356#define __NR_perf_event_open (__NR_Linux + 333)
357#define __NR_accept4 (__NR_Linux + 334) 357#define __NR_accept4 (__NR_Linux + 334)
358#define __NR_recvmmsg (__NR_Linux + 335) 358#define __NR_recvmmsg (__NR_Linux + 335)
359#define __NR_fanotify_init (__NR_Linux + 336)
360#define __NR_fanotify_mark (__NR_Linux + 337)
361#define __NR_prlimit64 (__NR_Linux + 338)
359 362
360/* 363/*
361 * Offset of the last Linux o32 flavoured syscall 364 * Offset of the last Linux o32 flavoured syscall
362 */ 365 */
363#define __NR_Linux_syscalls 335 366#define __NR_Linux_syscalls 338
364 367
365#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ 368#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
366 369
367#define __NR_O32_Linux 4000 370#define __NR_O32_Linux 4000
368#define __NR_O32_Linux_syscalls 335 371#define __NR_O32_Linux_syscalls 338
369 372
370#if _MIPS_SIM == _MIPS_SIM_ABI64 373#if _MIPS_SIM == _MIPS_SIM_ABI64
371 374
@@ -668,16 +671,19 @@
668#define __NR_perf_event_open (__NR_Linux + 292) 671#define __NR_perf_event_open (__NR_Linux + 292)
669#define __NR_accept4 (__NR_Linux + 293) 672#define __NR_accept4 (__NR_Linux + 293)
670#define __NR_recvmmsg (__NR_Linux + 294) 673#define __NR_recvmmsg (__NR_Linux + 294)
674#define __NR_fanotify_init (__NR_Linux + 295)
675#define __NR_fanotify_mark (__NR_Linux + 296)
676#define __NR_prlimit64 (__NR_Linux + 297)
671 677
672/* 678/*
673 * Offset of the last Linux 64-bit flavoured syscall 679 * Offset of the last Linux 64-bit flavoured syscall
674 */ 680 */
675#define __NR_Linux_syscalls 294 681#define __NR_Linux_syscalls 297
676 682
677#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ 683#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
678 684
679#define __NR_64_Linux 5000 685#define __NR_64_Linux 5000
680#define __NR_64_Linux_syscalls 294 686#define __NR_64_Linux_syscalls 297
681 687
682#if _MIPS_SIM == _MIPS_SIM_NABI32 688#if _MIPS_SIM == _MIPS_SIM_NABI32
683 689
@@ -985,16 +991,19 @@
985#define __NR_accept4 (__NR_Linux + 297) 991#define __NR_accept4 (__NR_Linux + 297)
986#define __NR_recvmmsg (__NR_Linux + 298) 992#define __NR_recvmmsg (__NR_Linux + 298)
987#define __NR_getdents64 (__NR_Linux + 299) 993#define __NR_getdents64 (__NR_Linux + 299)
994#define __NR_fanotify_init (__NR_Linux + 300)
995#define __NR_fanotify_mark (__NR_Linux + 301)
996#define __NR_prlimit64 (__NR_Linux + 302)
988 997
989/* 998/*
990 * Offset of the last N32 flavoured syscall 999 * Offset of the last N32 flavoured syscall
991 */ 1000 */
992#define __NR_Linux_syscalls 299 1001#define __NR_Linux_syscalls 302
993 1002
994#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ 1003#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
995 1004
996#define __NR_N32_Linux 6000 1005#define __NR_N32_Linux 6000
997#define __NR_N32_Linux_syscalls 299 1006#define __NR_N32_Linux_syscalls 302
998 1007
999#ifdef __KERNEL__ 1008#ifdef __KERNEL__
1000 1009
diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c
index b181f2f0ea8e..82ba9f62f49e 100644
--- a/arch/mips/kernel/irq-gic.c
+++ b/arch/mips/kernel/irq-gic.c
@@ -7,7 +7,6 @@
7#include <asm/io.h> 7#include <asm/io.h>
8#include <asm/gic.h> 8#include <asm/gic.h>
9#include <asm/gcmpregs.h> 9#include <asm/gcmpregs.h>
10#include <asm/mips-boards/maltaint.h>
11#include <asm/irq.h> 10#include <asm/irq.h>
12#include <linux/hardirq.h> 11#include <linux/hardirq.h>
13#include <asm-generic/bitops/find.h> 12#include <asm-generic/bitops/find.h>
@@ -131,7 +130,7 @@ static int gic_set_affinity(unsigned int irq, const struct cpumask *cpumask)
131 int i; 130 int i;
132 131
133 irq -= _irqbase; 132 irq -= _irqbase;
134 pr_debug(KERN_DEBUG "%s(%d) called\n", __func__, irq); 133 pr_debug("%s(%d) called\n", __func__, irq);
135 cpumask_and(&tmp, cpumask, cpu_online_mask); 134 cpumask_and(&tmp, cpumask, cpu_online_mask);
136 if (cpus_empty(tmp)) 135 if (cpus_empty(tmp))
137 return -1; 136 return -1;
@@ -222,7 +221,7 @@ static void __init gic_basic_init(int numintrs, int numvpes,
222 /* Setup specifics */ 221 /* Setup specifics */
223 for (i = 0; i < mapsize; i++) { 222 for (i = 0; i < mapsize; i++) {
224 cpu = intrmap[i].cpunum; 223 cpu = intrmap[i].cpunum;
225 if (cpu == X) 224 if (cpu == GIC_UNUSED)
226 continue; 225 continue;
227 if (cpu == 0 && i != 0 && intrmap[i].flags == 0) 226 if (cpu == 0 && i != 0 && intrmap[i].flags == 0)
228 continue; 227 continue;
diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
index 1f4e2fa64140..f4546e97c60d 100644
--- a/arch/mips/kernel/kgdb.c
+++ b/arch/mips/kernel/kgdb.c
@@ -283,7 +283,7 @@ static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd,
283 struct pt_regs *regs = args->regs; 283 struct pt_regs *regs = args->regs;
284 int trap = (regs->cp0_cause & 0x7c) >> 2; 284 int trap = (regs->cp0_cause & 0x7c) >> 2;
285 285
286 /* Userpace events, ignore. */ 286 /* Userspace events, ignore. */
287 if (user_mode(regs)) 287 if (user_mode(regs))
288 return NOTIFY_DONE; 288 return NOTIFY_DONE;
289 289
diff --git a/arch/mips/kernel/kspd.c b/arch/mips/kernel/kspd.c
index 80e2ba694bab..29811f043399 100644
--- a/arch/mips/kernel/kspd.c
+++ b/arch/mips/kernel/kspd.c
@@ -251,7 +251,7 @@ void sp_work_handle_request(void)
251 memset(&tz, 0, sizeof(tz)); 251 memset(&tz, 0, sizeof(tz));
252 if ((ret.retval = sp_syscall(__NR_gettimeofday, (int)&tv, 252 if ((ret.retval = sp_syscall(__NR_gettimeofday, (int)&tv,
253 (int)&tz, 0, 0)) == 0) 253 (int)&tz, 0, 0)) == 0)
254 ret.retval = tv.tv_sec; 254 ret.retval = tv.tv_sec;
255 break; 255 break;
256 256
257 case MTSP_SYSCALL_EXIT: 257 case MTSP_SYSCALL_EXIT:
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index c2dab140dc98..6343b4a5b835 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -341,3 +341,10 @@ asmlinkage long sys32_lookup_dcookie(u32 a0, u32 a1, char __user *buf,
341{ 341{
342 return sys_lookup_dcookie(merge_64(a0, a1), buf, len); 342 return sys_lookup_dcookie(merge_64(a0, a1), buf, len);
343} 343}
344
345SYSCALL_DEFINE6(32_fanotify_mark, int, fanotify_fd, unsigned int, flags,
346 u64, a3, u64, a4, int, dfd, const char __user *, pathname)
347{
348 return sys_fanotify_mark(fanotify_fd, flags, merge_64(a3, a4),
349 dfd, pathname);
350}
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index 17202bbe843f..584415eef8c9 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -583,7 +583,10 @@ einval: li v0, -ENOSYS
583 sys sys_rt_tgsigqueueinfo 4 583 sys sys_rt_tgsigqueueinfo 4
584 sys sys_perf_event_open 5 584 sys sys_perf_event_open 5
585 sys sys_accept4 4 585 sys sys_accept4 4
586 sys sys_recvmmsg 5 586 sys sys_recvmmsg 5 /* 4335 */
587 sys sys_fanotify_init 2
588 sys sys_fanotify_mark 6
589 sys sys_prlimit64 4
587 .endm 590 .endm
588 591
589 /* We pre-compute the number of _instruction_ bytes needed to 592 /* We pre-compute the number of _instruction_ bytes needed to
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index a8a6c596eb04..5573f8e4e326 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -416,9 +416,12 @@ sys_call_table:
416 PTR sys_pipe2 416 PTR sys_pipe2
417 PTR sys_inotify_init1 417 PTR sys_inotify_init1
418 PTR sys_preadv 418 PTR sys_preadv
419 PTR sys_pwritev /* 5390 */ 419 PTR sys_pwritev /* 5290 */
420 PTR sys_rt_tgsigqueueinfo 420 PTR sys_rt_tgsigqueueinfo
421 PTR sys_perf_event_open 421 PTR sys_perf_event_open
422 PTR sys_accept4 422 PTR sys_accept4
423 PTR sys_recvmmsg 423 PTR sys_recvmmsg
424 PTR sys_fanotify_init /* 5295 */
425 PTR sys_fanotify_mark
426 PTR sys_prlimit64
424 .size sys_call_table,.-sys_call_table 427 .size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index a3d66137731a..1e38ec97672e 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -419,5 +419,8 @@ EXPORT(sysn32_call_table)
419 PTR sys_perf_event_open 419 PTR sys_perf_event_open
420 PTR sys_accept4 420 PTR sys_accept4
421 PTR compat_sys_recvmmsg 421 PTR compat_sys_recvmmsg
422 PTR sys_getdents 422 PTR sys_getdents64
423 PTR sys_fanotify_init /* 6300 */
424 PTR sys_fanotify_mark
425 PTR sys_prlimit64
423 .size sysn32_call_table,.-sysn32_call_table 426 .size sysn32_call_table,.-sysn32_call_table
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 813689ef2384..171979fc98e5 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -538,5 +538,8 @@ sys_call_table:
538 PTR compat_sys_rt_tgsigqueueinfo 538 PTR compat_sys_rt_tgsigqueueinfo
539 PTR sys_perf_event_open 539 PTR sys_perf_event_open
540 PTR sys_accept4 540 PTR sys_accept4
541 PTR compat_sys_recvmmsg 541 PTR compat_sys_recvmmsg /* 4335 */
542 PTR sys_fanotify_init
543 PTR sys_32_fanotify_mark
544 PTR sys_prlimit64
542 .size sys_call_table,.-sys_call_table 545 .size sys_call_table,.-sys_call_table
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index 7ba890860d98..469d4019f795 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -44,27 +44,39 @@ static inline int cpu_is_noncoherent_r10000(struct device *dev)
44 44
45static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp) 45static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
46{ 46{
47 gfp_t dma_flag;
48
47 /* ignore region specifiers */ 49 /* ignore region specifiers */
48 gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM); 50 gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
49 51
50#ifdef CONFIG_ZONE_DMA 52#ifdef CONFIG_ISA
51 if (dev == NULL) 53 if (dev == NULL)
52 gfp |= __GFP_DMA; 54 dma_flag = __GFP_DMA;
53 else if (dev->coherent_dma_mask < DMA_BIT_MASK(24))
54 gfp |= __GFP_DMA;
55 else 55 else
56#endif 56#endif
57#ifdef CONFIG_ZONE_DMA32 57#if defined(CONFIG_ZONE_DMA32) && defined(CONFIG_ZONE_DMA)
58 if (dev->coherent_dma_mask < DMA_BIT_MASK(32)) 58 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
59 gfp |= __GFP_DMA32; 59 dma_flag = __GFP_DMA;
60 else if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
61 dma_flag = __GFP_DMA32;
62 else
63#endif
64#if defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_ZONE_DMA)
65 if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
66 dma_flag = __GFP_DMA32;
67 else
68#endif
69#if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32)
70 if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
71 dma_flag = __GFP_DMA;
60 else 72 else
61#endif 73#endif
62 ; 74 dma_flag = 0;
63 75
64 /* Don't invoke OOM killer */ 76 /* Don't invoke OOM killer */
65 gfp |= __GFP_NORETRY; 77 gfp |= __GFP_NORETRY;
66 78
67 return gfp; 79 return gfp | dma_flag;
68} 80}
69 81
70void *dma_alloc_noncoherent(struct device *dev, size_t size, 82void *dma_alloc_noncoherent(struct device *dev, size_t size,
diff --git a/arch/mips/mm/sc-rm7k.c b/arch/mips/mm/sc-rm7k.c
index 1ef75cd80a0d..274af3be1442 100644
--- a/arch/mips/mm/sc-rm7k.c
+++ b/arch/mips/mm/sc-rm7k.c
@@ -30,7 +30,7 @@
30#define tc_lsize 32 30#define tc_lsize 32
31 31
32extern unsigned long icache_way_size, dcache_way_size; 32extern unsigned long icache_way_size, dcache_way_size;
33unsigned long tcache_size; 33static unsigned long tcache_size;
34 34
35#include <asm/r4kcache.h> 35#include <asm/r4kcache.h>
36 36
diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c
index 15949b0be811..b79b24afe3a2 100644
--- a/arch/mips/mti-malta/malta-int.c
+++ b/arch/mips/mti-malta/malta-int.c
@@ -385,6 +385,8 @@ static int __initdata msc_nr_eicirqs = ARRAY_SIZE(msc_eicirqmap);
385 */ 385 */
386 386
387#define GIC_CPU_NMI GIC_MAP_TO_NMI_MSK 387#define GIC_CPU_NMI GIC_MAP_TO_NMI_MSK
388#define X GIC_UNUSED
389
388static struct gic_intr_map gic_intr_map[GIC_NUM_INTRS] = { 390static struct gic_intr_map gic_intr_map[GIC_NUM_INTRS] = {
389 { X, X, X, X, 0 }, 391 { X, X, X, X, 0 },
390 { X, X, X, X, 0 }, 392 { X, X, X, X, 0 },
@@ -404,6 +406,7 @@ static struct gic_intr_map gic_intr_map[GIC_NUM_INTRS] = {
404 { X, X, X, X, 0 }, 406 { X, X, X, X, 0 },
405 /* The remainder of this table is initialised by fill_ipi_map */ 407 /* The remainder of this table is initialised by fill_ipi_map */
406}; 408};
409#undef X
407 410
408/* 411/*
409 * GCMP needs to be detected before any SMP initialisation 412 * GCMP needs to be detected before any SMP initialisation
diff --git a/arch/mips/pci/pci-rc32434.c b/arch/mips/pci/pci-rc32434.c
index 71f7d27b0d4c..f31218e17d3c 100644
--- a/arch/mips/pci/pci-rc32434.c
+++ b/arch/mips/pci/pci-rc32434.c
@@ -118,7 +118,7 @@ static int __init rc32434_pcibridge_init(void)
118 if (!((pcicvalue == PCIM_H_EA) || 118 if (!((pcicvalue == PCIM_H_EA) ||
119 (pcicvalue == PCIM_H_IA_FIX) || 119 (pcicvalue == PCIM_H_IA_FIX) ||
120 (pcicvalue == PCIM_H_IA_RR))) { 120 (pcicvalue == PCIM_H_IA_RR))) {
121 pr_err(KERN_ERR "PCI init error!!!\n"); 121 pr_err("PCI init error!!!\n");
122 /* Not in Host Mode, return ERROR */ 122 /* Not in Host Mode, return ERROR */
123 return -1; 123 return -1;
124 } 124 }
diff --git a/arch/mips/pnx8550/common/reset.c b/arch/mips/pnx8550/common/reset.c
index fadd8744a6bc..e7a12ff304b9 100644
--- a/arch/mips/pnx8550/common/reset.c
+++ b/arch/mips/pnx8550/common/reset.c
@@ -22,29 +22,19 @@
22 */ 22 */
23#include <linux/kernel.h> 23#include <linux/kernel.h>
24 24
25#include <asm/processor.h>
25#include <asm/reboot.h> 26#include <asm/reboot.h>
26#include <glb.h> 27#include <glb.h>
27 28
28void pnx8550_machine_restart(char *command) 29void pnx8550_machine_restart(char *command)
29{ 30{
30 char head[] = "************* Machine restart *************";
31 char foot[] = "*******************************************";
32
33 printk("\n\n");
34 printk("%s\n", head);
35 if (command != NULL)
36 printk("* %s\n", command);
37 printk("%s\n", foot);
38
39 PNX8550_RST_CTL = PNX8550_RST_DO_SW_RST; 31 PNX8550_RST_CTL = PNX8550_RST_DO_SW_RST;
40} 32}
41 33
42void pnx8550_machine_halt(void) 34void pnx8550_machine_halt(void)
43{ 35{
44 printk("*** Machine halt. (Not implemented) ***\n"); 36 while (1) {
45} 37 if (cpu_wait)
46 38 cpu_wait();
47void pnx8550_machine_power_off(void) 39 }
48{
49 printk("*** Machine power off. (Not implemented) ***\n");
50} 40}
diff --git a/arch/mips/pnx8550/common/setup.c b/arch/mips/pnx8550/common/setup.c
index 64246c9c875c..43cb3945fdbf 100644
--- a/arch/mips/pnx8550/common/setup.c
+++ b/arch/mips/pnx8550/common/setup.c
@@ -44,7 +44,6 @@
44extern void __init board_setup(void); 44extern void __init board_setup(void);
45extern void pnx8550_machine_restart(char *); 45extern void pnx8550_machine_restart(char *);
46extern void pnx8550_machine_halt(void); 46extern void pnx8550_machine_halt(void);
47extern void pnx8550_machine_power_off(void);
48extern struct resource ioport_resource; 47extern struct resource ioport_resource;
49extern struct resource iomem_resource; 48extern struct resource iomem_resource;
50extern char *prom_getcmdline(void); 49extern char *prom_getcmdline(void);
@@ -100,7 +99,7 @@ void __init plat_mem_setup(void)
100 99
101 _machine_restart = pnx8550_machine_restart; 100 _machine_restart = pnx8550_machine_restart;
102 _machine_halt = pnx8550_machine_halt; 101 _machine_halt = pnx8550_machine_halt;
103 pm_power_off = pnx8550_machine_power_off; 102 pm_power_off = pnx8550_machine_halt;
104 103
105 /* Clear the Global 2 Register, PCI Inta Output Enable Registers 104 /* Clear the Global 2 Register, PCI Inta Output Enable Registers
106 Bit 1:Enable DAC Powerdown 105 Bit 1:Enable DAC Powerdown
diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig
index 444b9f918fdf..7c2a2f7f8dc1 100644
--- a/arch/mn10300/Kconfig
+++ b/arch/mn10300/Kconfig
@@ -8,7 +8,6 @@ mainmenu "Linux Kernel Configuration"
8config MN10300 8config MN10300
9 def_bool y 9 def_bool y
10 select HAVE_OPROFILE 10 select HAVE_OPROFILE
11 select HAVE_ARCH_TRACEHOOK
12 11
13config AM33 12config AM33
14 def_bool y 13 def_bool y
diff --git a/arch/mn10300/Kconfig.debug b/arch/mn10300/Kconfig.debug
index ff80e86b9bd2..ce83c74b3fd7 100644
--- a/arch/mn10300/Kconfig.debug
+++ b/arch/mn10300/Kconfig.debug
@@ -101,7 +101,7 @@ config GDBSTUB_DEBUG_BREAKPOINT
101 101
102choice 102choice
103 prompt "GDB stub port" 103 prompt "GDB stub port"
104 default GDBSTUB_TTYSM0 104 default GDBSTUB_ON_TTYSM0
105 depends on GDBSTUB 105 depends on GDBSTUB
106 help 106 help
107 Select the serial port used for GDB-stub. 107 Select the serial port used for GDB-stub.
diff --git a/arch/mn10300/include/asm/bitops.h b/arch/mn10300/include/asm/bitops.h
index f49ac49e09ad..3f50e9661076 100644
--- a/arch/mn10300/include/asm/bitops.h
+++ b/arch/mn10300/include/asm/bitops.h
@@ -229,9 +229,9 @@ int ffs(int x)
229#include <asm-generic/bitops/hweight.h> 229#include <asm-generic/bitops/hweight.h>
230 230
231#define ext2_set_bit_atomic(lock, nr, addr) \ 231#define ext2_set_bit_atomic(lock, nr, addr) \
232 test_and_set_bit((nr) ^ 0x18, (addr)) 232 test_and_set_bit((nr), (addr))
233#define ext2_clear_bit_atomic(lock, nr, addr) \ 233#define ext2_clear_bit_atomic(lock, nr, addr) \
234 test_and_clear_bit((nr) ^ 0x18, (addr)) 234 test_and_clear_bit((nr), (addr))
235 235
236#include <asm-generic/bitops/ext2-non-atomic.h> 236#include <asm-generic/bitops/ext2-non-atomic.h>
237#include <asm-generic/bitops/minix-le.h> 237#include <asm-generic/bitops/minix-le.h>
diff --git a/arch/mn10300/include/asm/signal.h b/arch/mn10300/include/asm/signal.h
index 7e891fce2370..1865d72a86ff 100644
--- a/arch/mn10300/include/asm/signal.h
+++ b/arch/mn10300/include/asm/signal.h
@@ -78,7 +78,7 @@ typedef unsigned long sigset_t;
78 78
79/* These should not be considered constants from userland. */ 79/* These should not be considered constants from userland. */
80#define SIGRTMIN 32 80#define SIGRTMIN 32
81#define SIGRTMAX (_NSIG-1) 81#define SIGRTMAX _NSIG
82 82
83/* 83/*
84 * SA_FLAGS values: 84 * SA_FLAGS values:
diff --git a/arch/mn10300/kernel/mn10300-serial.c b/arch/mn10300/kernel/mn10300-serial.c
index 9d49073e827a..db509dd80565 100644
--- a/arch/mn10300/kernel/mn10300-serial.c
+++ b/arch/mn10300/kernel/mn10300-serial.c
@@ -156,17 +156,17 @@ struct mn10300_serial_port mn10300_serial_port_sif0 = {
156 ._intr = &SC0ICR, 156 ._intr = &SC0ICR,
157 ._rxb = &SC0RXB, 157 ._rxb = &SC0RXB,
158 ._txb = &SC0TXB, 158 ._txb = &SC0TXB,
159 .rx_name = "ttySM0/Rx", 159 .rx_name = "ttySM0:Rx",
160 .tx_name = "ttySM0/Tx", 160 .tx_name = "ttySM0:Tx",
161#ifdef CONFIG_MN10300_TTYSM0_TIMER8 161#ifdef CONFIG_MN10300_TTYSM0_TIMER8
162 .tm_name = "ttySM0/Timer8", 162 .tm_name = "ttySM0:Timer8",
163 ._tmxmd = &TM8MD, 163 ._tmxmd = &TM8MD,
164 ._tmxbr = &TM8BR, 164 ._tmxbr = &TM8BR,
165 ._tmicr = &TM8ICR, 165 ._tmicr = &TM8ICR,
166 .tm_irq = TM8IRQ, 166 .tm_irq = TM8IRQ,
167 .div_timer = MNSCx_DIV_TIMER_16BIT, 167 .div_timer = MNSCx_DIV_TIMER_16BIT,
168#else /* CONFIG_MN10300_TTYSM0_TIMER2 */ 168#else /* CONFIG_MN10300_TTYSM0_TIMER2 */
169 .tm_name = "ttySM0/Timer2", 169 .tm_name = "ttySM0:Timer2",
170 ._tmxmd = &TM2MD, 170 ._tmxmd = &TM2MD,
171 ._tmxbr = (volatile u16 *) &TM2BR, 171 ._tmxbr = (volatile u16 *) &TM2BR,
172 ._tmicr = &TM2ICR, 172 ._tmicr = &TM2ICR,
@@ -209,17 +209,17 @@ struct mn10300_serial_port mn10300_serial_port_sif1 = {
209 ._intr = &SC1ICR, 209 ._intr = &SC1ICR,
210 ._rxb = &SC1RXB, 210 ._rxb = &SC1RXB,
211 ._txb = &SC1TXB, 211 ._txb = &SC1TXB,
212 .rx_name = "ttySM1/Rx", 212 .rx_name = "ttySM1:Rx",
213 .tx_name = "ttySM1/Tx", 213 .tx_name = "ttySM1:Tx",
214#ifdef CONFIG_MN10300_TTYSM1_TIMER9 214#ifdef CONFIG_MN10300_TTYSM1_TIMER9
215 .tm_name = "ttySM1/Timer9", 215 .tm_name = "ttySM1:Timer9",
216 ._tmxmd = &TM9MD, 216 ._tmxmd = &TM9MD,
217 ._tmxbr = &TM9BR, 217 ._tmxbr = &TM9BR,
218 ._tmicr = &TM9ICR, 218 ._tmicr = &TM9ICR,
219 .tm_irq = TM9IRQ, 219 .tm_irq = TM9IRQ,
220 .div_timer = MNSCx_DIV_TIMER_16BIT, 220 .div_timer = MNSCx_DIV_TIMER_16BIT,
221#else /* CONFIG_MN10300_TTYSM1_TIMER3 */ 221#else /* CONFIG_MN10300_TTYSM1_TIMER3 */
222 .tm_name = "ttySM1/Timer3", 222 .tm_name = "ttySM1:Timer3",
223 ._tmxmd = &TM3MD, 223 ._tmxmd = &TM3MD,
224 ._tmxbr = (volatile u16 *) &TM3BR, 224 ._tmxbr = (volatile u16 *) &TM3BR,
225 ._tmicr = &TM3ICR, 225 ._tmicr = &TM3ICR,
@@ -260,9 +260,9 @@ struct mn10300_serial_port mn10300_serial_port_sif2 = {
260 .uart.lock = 260 .uart.lock =
261 __SPIN_LOCK_UNLOCKED(mn10300_serial_port_sif2.uart.lock), 261 __SPIN_LOCK_UNLOCKED(mn10300_serial_port_sif2.uart.lock),
262 .name = "ttySM2", 262 .name = "ttySM2",
263 .rx_name = "ttySM2/Rx", 263 .rx_name = "ttySM2:Rx",
264 .tx_name = "ttySM2/Tx", 264 .tx_name = "ttySM2:Tx",
265 .tm_name = "ttySM2/Timer10", 265 .tm_name = "ttySM2:Timer10",
266 ._iobase = &SC2CTR, 266 ._iobase = &SC2CTR,
267 ._control = &SC2CTR, 267 ._control = &SC2CTR,
268 ._status = &SC2STR, 268 ._status = &SC2STR,
diff --git a/arch/mn10300/kernel/module.c b/arch/mn10300/kernel/module.c
index 6aea7fd76993..196a111e2e29 100644
--- a/arch/mn10300/kernel/module.c
+++ b/arch/mn10300/kernel/module.c
@@ -206,7 +206,7 @@ int module_finalize(const Elf_Ehdr *hdr,
206 const Elf_Shdr *sechdrs, 206 const Elf_Shdr *sechdrs,
207 struct module *me) 207 struct module *me)
208{ 208{
209 return module_bug_finalize(hdr, sechdrs, me); 209 return 0;
210} 210}
211 211
212/* 212/*
@@ -214,5 +214,4 @@ int module_finalize(const Elf_Ehdr *hdr,
214 */ 214 */
215void module_arch_cleanup(struct module *mod) 215void module_arch_cleanup(struct module *mod)
216{ 216{
217 module_bug_cleanup(mod);
218} 217}
diff --git a/arch/mn10300/kernel/signal.c b/arch/mn10300/kernel/signal.c
index 717db14c2cc3..d4de05ab7864 100644
--- a/arch/mn10300/kernel/signal.c
+++ b/arch/mn10300/kernel/signal.c
@@ -65,10 +65,10 @@ asmlinkage long sys_sigaction(int sig,
65 old_sigset_t mask; 65 old_sigset_t mask;
66 if (verify_area(VERIFY_READ, act, sizeof(*act)) || 66 if (verify_area(VERIFY_READ, act, sizeof(*act)) ||
67 __get_user(new_ka.sa.sa_handler, &act->sa_handler) || 67 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
68 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer)) 68 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
69 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
70 __get_user(mask, &act->sa_mask))
69 return -EFAULT; 71 return -EFAULT;
70 __get_user(new_ka.sa.sa_flags, &act->sa_flags);
71 __get_user(mask, &act->sa_mask);
72 siginitset(&new_ka.sa.sa_mask, mask); 72 siginitset(&new_ka.sa.sa_mask, mask);
73 } 73 }
74 74
@@ -77,10 +77,10 @@ asmlinkage long sys_sigaction(int sig,
77 if (!ret && oact) { 77 if (!ret && oact) {
78 if (verify_area(VERIFY_WRITE, oact, sizeof(*oact)) || 78 if (verify_area(VERIFY_WRITE, oact, sizeof(*oact)) ||
79 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || 79 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
80 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer)) 80 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
81 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
82 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
81 return -EFAULT; 83 return -EFAULT;
82 __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
83 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
84 } 84 }
85 85
86 return ret; 86 return ret;
@@ -102,6 +102,9 @@ static int restore_sigcontext(struct pt_regs *regs,
102{ 102{
103 unsigned int err = 0; 103 unsigned int err = 0;
104 104
105 /* Always make any pending restarted system calls return -EINTR */
106 current_thread_info()->restart_block.fn = do_no_restart_syscall;
107
105 if (is_using_fpu(current)) 108 if (is_using_fpu(current))
106 fpu_kill_state(current); 109 fpu_kill_state(current);
107 110
@@ -330,8 +333,6 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
330 regs->d0 = sig; 333 regs->d0 = sig;
331 regs->d1 = (unsigned long) &frame->sc; 334 regs->d1 = (unsigned long) &frame->sc;
332 335
333 set_fs(USER_DS);
334
335 /* the tracer may want to single-step inside the handler */ 336 /* the tracer may want to single-step inside the handler */
336 if (test_thread_flag(TIF_SINGLESTEP)) 337 if (test_thread_flag(TIF_SINGLESTEP))
337 ptrace_notify(SIGTRAP); 338 ptrace_notify(SIGTRAP);
@@ -345,7 +346,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
345 return 0; 346 return 0;
346 347
347give_sigsegv: 348give_sigsegv:
348 force_sig(SIGSEGV, current); 349 force_sigsegv(sig, current);
349 return -EFAULT; 350 return -EFAULT;
350} 351}
351 352
@@ -413,8 +414,6 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
413 regs->d0 = sig; 414 regs->d0 = sig;
414 regs->d1 = (long) &frame->info; 415 regs->d1 = (long) &frame->info;
415 416
416 set_fs(USER_DS);
417
418 /* the tracer may want to single-step inside the handler */ 417 /* the tracer may want to single-step inside the handler */
419 if (test_thread_flag(TIF_SINGLESTEP)) 418 if (test_thread_flag(TIF_SINGLESTEP))
420 ptrace_notify(SIGTRAP); 419 ptrace_notify(SIGTRAP);
@@ -428,10 +427,16 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
428 return 0; 427 return 0;
429 428
430give_sigsegv: 429give_sigsegv:
431 force_sig(SIGSEGV, current); 430 force_sigsegv(sig, current);
432 return -EFAULT; 431 return -EFAULT;
433} 432}
434 433
434static inline void stepback(struct pt_regs *regs)
435{
436 regs->pc -= 2;
437 regs->orig_d0 = -1;
438}
439
435/* 440/*
436 * handle the actual delivery of a signal to userspace 441 * handle the actual delivery of a signal to userspace
437 */ 442 */
@@ -459,7 +464,7 @@ static int handle_signal(int sig,
459 /* fallthrough */ 464 /* fallthrough */
460 case -ERESTARTNOINTR: 465 case -ERESTARTNOINTR:
461 regs->d0 = regs->orig_d0; 466 regs->d0 = regs->orig_d0;
462 regs->pc -= 2; 467 stepback(regs);
463 } 468 }
464 } 469 }
465 470
@@ -527,12 +532,12 @@ static void do_signal(struct pt_regs *regs)
527 case -ERESTARTSYS: 532 case -ERESTARTSYS:
528 case -ERESTARTNOINTR: 533 case -ERESTARTNOINTR:
529 regs->d0 = regs->orig_d0; 534 regs->d0 = regs->orig_d0;
530 regs->pc -= 2; 535 stepback(regs);
531 break; 536 break;
532 537
533 case -ERESTART_RESTARTBLOCK: 538 case -ERESTART_RESTARTBLOCK:
534 regs->d0 = __NR_restart_syscall; 539 regs->d0 = __NR_restart_syscall;
535 regs->pc -= 2; 540 stepback(regs);
536 break; 541 break;
537 } 542 }
538 } 543 }
diff --git a/arch/mn10300/mm/Makefile b/arch/mn10300/mm/Makefile
index 28b9d983db0c..1557277fbc5c 100644
--- a/arch/mn10300/mm/Makefile
+++ b/arch/mn10300/mm/Makefile
@@ -2,13 +2,11 @@
2# Makefile for the MN10300-specific memory management code 2# Makefile for the MN10300-specific memory management code
3# 3#
4 4
5cacheflush-y := cache.o cache-mn10300.o
6cacheflush-$(CONFIG_MN10300_CACHE_WBACK) += cache-flush-mn10300.o
7
8cacheflush-$(CONFIG_MN10300_CACHE_DISABLED) := cache-disabled.o
9
5obj-y := \ 10obj-y := \
6 init.o fault.o pgtable.o extable.o tlb-mn10300.o mmu-context.o \ 11 init.o fault.o pgtable.o extable.o tlb-mn10300.o mmu-context.o \
7 misalignment.o dma-alloc.o 12 misalignment.o dma-alloc.o $(cacheflush-y)
8
9ifneq ($(CONFIG_MN10300_CACHE_DISABLED),y)
10obj-y += cache.o cache-mn10300.o
11ifeq ($(CONFIG_MN10300_CACHE_WBACK),y)
12obj-y += cache-flush-mn10300.o
13endif
14endif
diff --git a/arch/mn10300/mm/cache-disabled.c b/arch/mn10300/mm/cache-disabled.c
new file mode 100644
index 000000000000..f669ea42aba6
--- /dev/null
+++ b/arch/mn10300/mm/cache-disabled.c
@@ -0,0 +1,21 @@
1/* Handle the cache being disabled
2 *
3 * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11#include <linux/mm.h>
12
13/*
14 * allow userspace to flush the instruction cache
15 */
16asmlinkage long sys_cacheflush(unsigned long start, unsigned long end)
17{
18 if (end < start)
19 return -EINVAL;
20 return 0;
21}
diff --git a/arch/mn10300/mm/cache.c b/arch/mn10300/mm/cache.c
index 1b76719ec1c3..9261217e8d2c 100644
--- a/arch/mn10300/mm/cache.c
+++ b/arch/mn10300/mm/cache.c
@@ -54,13 +54,30 @@ EXPORT_SYMBOL(flush_icache_page);
54void flush_icache_range(unsigned long start, unsigned long end) 54void flush_icache_range(unsigned long start, unsigned long end)
55{ 55{
56#ifdef CONFIG_MN10300_CACHE_WBACK 56#ifdef CONFIG_MN10300_CACHE_WBACK
57 unsigned long addr, size, off; 57 unsigned long addr, size, base, off;
58 struct page *page; 58 struct page *page;
59 pgd_t *pgd; 59 pgd_t *pgd;
60 pud_t *pud; 60 pud_t *pud;
61 pmd_t *pmd; 61 pmd_t *pmd;
62 pte_t *ppte, pte; 62 pte_t *ppte, pte;
63 63
64 if (end > 0x80000000UL) {
65 /* addresses above 0xa0000000 do not go through the cache */
66 if (end > 0xa0000000UL) {
67 end = 0xa0000000UL;
68 if (start >= end)
69 return;
70 }
71
72 /* kernel addresses between 0x80000000 and 0x9fffffff do not
73 * require page tables, so we just map such addresses directly */
74 base = (start >= 0x80000000UL) ? start : 0x80000000UL;
75 mn10300_dcache_flush_range(base, end);
76 if (base == start)
77 goto invalidate;
78 end = base;
79 }
80
64 for (; start < end; start += size) { 81 for (; start < end; start += size) {
65 /* work out how much of the page to flush */ 82 /* work out how much of the page to flush */
66 off = start & (PAGE_SIZE - 1); 83 off = start & (PAGE_SIZE - 1);
@@ -104,6 +121,7 @@ void flush_icache_range(unsigned long start, unsigned long end)
104 } 121 }
105#endif 122#endif
106 123
124invalidate:
107 mn10300_icache_inv(); 125 mn10300_icache_inv();
108} 126}
109EXPORT_SYMBOL(flush_icache_range); 127EXPORT_SYMBOL(flush_icache_range);
diff --git a/arch/parisc/include/asm/compat.h b/arch/parisc/include/asm/compat.h
index 02b77baa5da6..efa0b60c63fe 100644
--- a/arch/parisc/include/asm/compat.h
+++ b/arch/parisc/include/asm/compat.h
@@ -147,7 +147,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
147 return (u32)(unsigned long)uptr; 147 return (u32)(unsigned long)uptr;
148} 148}
149 149
150static __inline__ void __user *compat_alloc_user_space(long len) 150static __inline__ void __user *arch_compat_alloc_user_space(long len)
151{ 151{
152 struct pt_regs *regs = &current->thread.regs; 152 struct pt_regs *regs = &current->thread.regs;
153 return (void __user *)regs->gr[30]; 153 return (void __user *)regs->gr[30];
diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
index 159a2b81e90c..6e81bb596e5b 100644
--- a/arch/parisc/kernel/module.c
+++ b/arch/parisc/kernel/module.c
@@ -941,11 +941,10 @@ int module_finalize(const Elf_Ehdr *hdr,
941 nsyms = newptr - (Elf_Sym *)symhdr->sh_addr; 941 nsyms = newptr - (Elf_Sym *)symhdr->sh_addr;
942 DEBUGP("NEW num_symtab %lu\n", nsyms); 942 DEBUGP("NEW num_symtab %lu\n", nsyms);
943 symhdr->sh_size = nsyms * sizeof(Elf_Sym); 943 symhdr->sh_size = nsyms * sizeof(Elf_Sym);
944 return module_bug_finalize(hdr, sechdrs, me); 944 return 0;
945} 945}
946 946
947void module_arch_cleanup(struct module *mod) 947void module_arch_cleanup(struct module *mod)
948{ 948{
949 deregister_unwind_table(mod); 949 deregister_unwind_table(mod);
950 module_bug_cleanup(mod);
951} 950}
diff --git a/arch/powerpc/include/asm/compat.h b/arch/powerpc/include/asm/compat.h
index 396d21a80058..a11d4eac4f97 100644
--- a/arch/powerpc/include/asm/compat.h
+++ b/arch/powerpc/include/asm/compat.h
@@ -134,7 +134,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
134 return (u32)(unsigned long)uptr; 134 return (u32)(unsigned long)uptr;
135} 135}
136 136
137static inline void __user *compat_alloc_user_space(long len) 137static inline void __user *arch_compat_alloc_user_space(long len)
138{ 138{
139 struct pt_regs *regs = current->thread.regs; 139 struct pt_regs *regs = current->thread.regs;
140 unsigned long usp = regs->gpr[1]; 140 unsigned long usp = regs->gpr[1];
diff --git a/arch/powerpc/include/asm/fsldma.h b/arch/powerpc/include/asm/fsldma.h
index a67aeed17d40..debc5ed96d6e 100644
--- a/arch/powerpc/include/asm/fsldma.h
+++ b/arch/powerpc/include/asm/fsldma.h
@@ -11,6 +11,7 @@
11#ifndef __ARCH_POWERPC_ASM_FSLDMA_H__ 11#ifndef __ARCH_POWERPC_ASM_FSLDMA_H__
12#define __ARCH_POWERPC_ASM_FSLDMA_H__ 12#define __ARCH_POWERPC_ASM_FSLDMA_H__
13 13
14#include <linux/slab.h>
14#include <linux/dmaengine.h> 15#include <linux/dmaengine.h>
15 16
16/* 17/*
diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c
index 477c663e0140..49cee9df225b 100644
--- a/arch/powerpc/kernel/module.c
+++ b/arch/powerpc/kernel/module.c
@@ -63,11 +63,6 @@ int module_finalize(const Elf_Ehdr *hdr,
63 const Elf_Shdr *sechdrs, struct module *me) 63 const Elf_Shdr *sechdrs, struct module *me)
64{ 64{
65 const Elf_Shdr *sect; 65 const Elf_Shdr *sect;
66 int err;
67
68 err = module_bug_finalize(hdr, sechdrs, me);
69 if (err)
70 return err;
71 66
72 /* Apply feature fixups */ 67 /* Apply feature fixups */
73 sect = find_section(hdr, sechdrs, "__ftr_fixup"); 68 sect = find_section(hdr, sechdrs, "__ftr_fixup");
@@ -101,5 +96,4 @@ int module_finalize(const Elf_Ehdr *hdr,
101 96
102void module_arch_cleanup(struct module *mod) 97void module_arch_cleanup(struct module *mod)
103{ 98{
104 module_bug_cleanup(mod);
105} 99}
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
index 7109f5b1baa8..2300426e531a 100644
--- a/arch/powerpc/kernel/signal.c
+++ b/arch/powerpc/kernel/signal.c
@@ -138,6 +138,7 @@ static int do_signal_pending(sigset_t *oldset, struct pt_regs *regs)
138 ti->local_flags &= ~_TLF_RESTORE_SIGMASK; 138 ti->local_flags &= ~_TLF_RESTORE_SIGMASK;
139 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL); 139 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
140 } 140 }
141 regs->trap = 0;
141 return 0; /* no signals delivered */ 142 return 0; /* no signals delivered */
142 } 143 }
143 144
@@ -164,6 +165,7 @@ static int do_signal_pending(sigset_t *oldset, struct pt_regs *regs)
164 ret = handle_rt_signal64(signr, &ka, &info, oldset, regs); 165 ret = handle_rt_signal64(signr, &ka, &info, oldset, regs);
165 } 166 }
166 167
168 regs->trap = 0;
167 if (ret) { 169 if (ret) {
168 spin_lock_irq(&current->sighand->siglock); 170 spin_lock_irq(&current->sighand->siglock);
169 sigorsets(&current->blocked, &current->blocked, 171 sigorsets(&current->blocked, &current->blocked,
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 266610119f66..b96a3a010c26 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -511,6 +511,7 @@ static long restore_user_regs(struct pt_regs *regs,
511 if (!sig) 511 if (!sig)
512 save_r2 = (unsigned int)regs->gpr[2]; 512 save_r2 = (unsigned int)regs->gpr[2];
513 err = restore_general_regs(regs, sr); 513 err = restore_general_regs(regs, sr);
514 regs->trap = 0;
514 err |= __get_user(msr, &sr->mc_gregs[PT_MSR]); 515 err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
515 if (!sig) 516 if (!sig)
516 regs->gpr[2] = (unsigned long) save_r2; 517 regs->gpr[2] = (unsigned long) save_r2;
@@ -884,7 +885,6 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
884 regs->nip = (unsigned long) ka->sa.sa_handler; 885 regs->nip = (unsigned long) ka->sa.sa_handler;
885 /* enter the signal handler in big-endian mode */ 886 /* enter the signal handler in big-endian mode */
886 regs->msr &= ~MSR_LE; 887 regs->msr &= ~MSR_LE;
887 regs->trap = 0;
888 return 1; 888 return 1;
889 889
890badframe: 890badframe:
@@ -1228,7 +1228,6 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka,
1228 regs->nip = (unsigned long) ka->sa.sa_handler; 1228 regs->nip = (unsigned long) ka->sa.sa_handler;
1229 /* enter the signal handler in big-endian mode */ 1229 /* enter the signal handler in big-endian mode */
1230 regs->msr &= ~MSR_LE; 1230 regs->msr &= ~MSR_LE;
1231 regs->trap = 0;
1232 1231
1233 return 1; 1232 return 1;
1234 1233
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 2fe6fc64b614..27c4a4584f80 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -178,7 +178,7 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
178 err |= __get_user(regs->xer, &sc->gp_regs[PT_XER]); 178 err |= __get_user(regs->xer, &sc->gp_regs[PT_XER]);
179 err |= __get_user(regs->ccr, &sc->gp_regs[PT_CCR]); 179 err |= __get_user(regs->ccr, &sc->gp_regs[PT_CCR]);
180 /* skip SOFTE */ 180 /* skip SOFTE */
181 err |= __get_user(regs->trap, &sc->gp_regs[PT_TRAP]); 181 regs->trap = 0;
182 err |= __get_user(regs->dar, &sc->gp_regs[PT_DAR]); 182 err |= __get_user(regs->dar, &sc->gp_regs[PT_DAR]);
183 err |= __get_user(regs->dsisr, &sc->gp_regs[PT_DSISR]); 183 err |= __get_user(regs->dsisr, &sc->gp_regs[PT_DSISR]);
184 err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]); 184 err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]);
diff --git a/arch/powerpc/platforms/512x/clock.c b/arch/powerpc/platforms/512x/clock.c
index 5b243bd3eb3b..3dc2a8d262b8 100644
--- a/arch/powerpc/platforms/512x/clock.c
+++ b/arch/powerpc/platforms/512x/clock.c
@@ -57,7 +57,7 @@ static struct clk *mpc5121_clk_get(struct device *dev, const char *id)
57 int id_match = 0; 57 int id_match = 0;
58 58
59 if (dev == NULL || id == NULL) 59 if (dev == NULL || id == NULL)
60 return NULL; 60 return clk;
61 61
62 mutex_lock(&clocks_mutex); 62 mutex_lock(&clocks_mutex);
63 list_for_each_entry(p, &clocks, node) { 63 list_for_each_entry(p, &clocks, node) {
diff --git a/arch/powerpc/platforms/52xx/efika.c b/arch/powerpc/platforms/52xx/efika.c
index 45c0cb9b67e6..18c104820198 100644
--- a/arch/powerpc/platforms/52xx/efika.c
+++ b/arch/powerpc/platforms/52xx/efika.c
@@ -99,7 +99,7 @@ static void __init efika_pcisetup(void)
99 if (bus_range == NULL || len < 2 * sizeof(int)) { 99 if (bus_range == NULL || len < 2 * sizeof(int)) {
100 printk(KERN_WARNING EFIKA_PLATFORM_NAME 100 printk(KERN_WARNING EFIKA_PLATFORM_NAME
101 ": Can't get bus-range for %s\n", pcictrl->full_name); 101 ": Can't get bus-range for %s\n", pcictrl->full_name);
102 return; 102 goto out_put;
103 } 103 }
104 104
105 if (bus_range[1] == bus_range[0]) 105 if (bus_range[1] == bus_range[0])
@@ -111,12 +111,12 @@ static void __init efika_pcisetup(void)
111 printk(" controlled by %s\n", pcictrl->full_name); 111 printk(" controlled by %s\n", pcictrl->full_name);
112 printk("\n"); 112 printk("\n");
113 113
114 hose = pcibios_alloc_controller(of_node_get(pcictrl)); 114 hose = pcibios_alloc_controller(pcictrl);
115 if (!hose) { 115 if (!hose) {
116 printk(KERN_WARNING EFIKA_PLATFORM_NAME 116 printk(KERN_WARNING EFIKA_PLATFORM_NAME
117 ": Can't allocate PCI controller structure for %s\n", 117 ": Can't allocate PCI controller structure for %s\n",
118 pcictrl->full_name); 118 pcictrl->full_name);
119 return; 119 goto out_put;
120 } 120 }
121 121
122 hose->first_busno = bus_range[0]; 122 hose->first_busno = bus_range[0];
@@ -124,6 +124,9 @@ static void __init efika_pcisetup(void)
124 hose->ops = &rtas_pci_ops; 124 hose->ops = &rtas_pci_ops;
125 125
126 pci_process_bridge_OF_ranges(hose, pcictrl, 0); 126 pci_process_bridge_OF_ranges(hose, pcictrl, 0);
127 return;
128out_put:
129 of_node_put(pcictrl);
127} 130}
128 131
129#else 132#else
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_common.c b/arch/powerpc/platforms/52xx/mpc52xx_common.c
index 6e905314ad5d..41f3a7eda1de 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_common.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_common.c
@@ -325,12 +325,16 @@ int mpc5200_psc_ac97_gpio_reset(int psc_number)
325 clrbits32(&simple_gpio->simple_dvo, sync | out); 325 clrbits32(&simple_gpio->simple_dvo, sync | out);
326 clrbits8(&wkup_gpio->wkup_dvo, reset); 326 clrbits8(&wkup_gpio->wkup_dvo, reset);
327 327
328 /* wait at lease 1 us */ 328 /* wait for 1 us */
329 udelay(2); 329 udelay(1);
330 330
331 /* Deassert reset */ 331 /* Deassert reset */
332 setbits8(&wkup_gpio->wkup_dvo, reset); 332 setbits8(&wkup_gpio->wkup_dvo, reset);
333 333
334 /* wait at least 200ns */
335 /* 7 ~= (200ns * timebase) / ns2sec */
336 __delay(7);
337
334 /* Restore pin-muxing */ 338 /* Restore pin-muxing */
335 out_be32(&simple_gpio->port_config, mux); 339 out_be32(&simple_gpio->port_config, mux);
336 340
diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h
index 104f2007f097..a875c2f542e1 100644
--- a/arch/s390/include/asm/compat.h
+++ b/arch/s390/include/asm/compat.h
@@ -181,7 +181,7 @@ static inline int is_compat_task(void)
181 181
182#endif 182#endif
183 183
184static inline void __user *compat_alloc_user_space(long len) 184static inline void __user *arch_compat_alloc_user_space(long len)
185{ 185{
186 unsigned long stack; 186 unsigned long stack;
187 187
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index 22cfd634c355..f7167ee4604c 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -407,10 +407,9 @@ int module_finalize(const Elf_Ehdr *hdr,
407{ 407{
408 vfree(me->arch.syminfo); 408 vfree(me->arch.syminfo);
409 me->arch.syminfo = NULL; 409 me->arch.syminfo = NULL;
410 return module_bug_finalize(hdr, sechdrs, me); 410 return 0;
411} 411}
412 412
413void module_arch_cleanup(struct module *mod) 413void module_arch_cleanup(struct module *mod)
414{ 414{
415 module_bug_cleanup(mod);
416} 415}
diff --git a/arch/sh/kernel/module.c b/arch/sh/kernel/module.c
index 43adddfe4c04..ae0be697a89e 100644
--- a/arch/sh/kernel/module.c
+++ b/arch/sh/kernel/module.c
@@ -149,13 +149,11 @@ int module_finalize(const Elf_Ehdr *hdr,
149 int ret = 0; 149 int ret = 0;
150 150
151 ret |= module_dwarf_finalize(hdr, sechdrs, me); 151 ret |= module_dwarf_finalize(hdr, sechdrs, me);
152 ret |= module_bug_finalize(hdr, sechdrs, me);
153 152
154 return ret; 153 return ret;
155} 154}
156 155
157void module_arch_cleanup(struct module *mod) 156void module_arch_cleanup(struct module *mod)
158{ 157{
159 module_bug_cleanup(mod);
160 module_dwarf_cleanup(mod); 158 module_dwarf_cleanup(mod);
161} 159}
diff --git a/arch/sparc/include/asm/compat.h b/arch/sparc/include/asm/compat.h
index 5016f76ea98a..6f57325bb883 100644
--- a/arch/sparc/include/asm/compat.h
+++ b/arch/sparc/include/asm/compat.h
@@ -167,7 +167,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
167 return (u32)(unsigned long)uptr; 167 return (u32)(unsigned long)uptr;
168} 168}
169 169
170static inline void __user *compat_alloc_user_space(long len) 170static inline void __user *arch_compat_alloc_user_space(long len)
171{ 171{
172 struct pt_regs *regs = current_thread_info()->kregs; 172 struct pt_regs *regs = current_thread_info()->kregs;
173 unsigned long usp = regs->u_regs[UREG_I6]; 173 unsigned long usp = regs->u_regs[UREG_I6];
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 357ced3c33ff..6318e622cfb0 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -1038,6 +1038,7 @@ static int __hw_perf_event_init(struct perf_event *event)
1038 if (atomic_read(&nmi_active) < 0) 1038 if (atomic_read(&nmi_active) < 0)
1039 return -ENODEV; 1039 return -ENODEV;
1040 1040
1041 pmap = NULL;
1041 if (attr->type == PERF_TYPE_HARDWARE) { 1042 if (attr->type == PERF_TYPE_HARDWARE) {
1042 if (attr->config >= sparc_pmu->max_events) 1043 if (attr->config >= sparc_pmu->max_events)
1043 return -EINVAL; 1044 return -EINVAL;
@@ -1046,9 +1047,18 @@ static int __hw_perf_event_init(struct perf_event *event)
1046 pmap = sparc_map_cache_event(attr->config); 1047 pmap = sparc_map_cache_event(attr->config);
1047 if (IS_ERR(pmap)) 1048 if (IS_ERR(pmap))
1048 return PTR_ERR(pmap); 1049 return PTR_ERR(pmap);
1049 } else 1050 } else if (attr->type != PERF_TYPE_RAW)
1050 return -EOPNOTSUPP; 1051 return -EOPNOTSUPP;
1051 1052
1053 if (pmap) {
1054 hwc->event_base = perf_event_encode(pmap);
1055 } else {
1056 /* User gives us "(encoding << 16) | pic_mask" for
1057 * PERF_TYPE_RAW events.
1058 */
1059 hwc->event_base = attr->config;
1060 }
1061
1052 /* We save the enable bits in the config_base. */ 1062 /* We save the enable bits in the config_base. */
1053 hwc->config_base = sparc_pmu->irq_bit; 1063 hwc->config_base = sparc_pmu->irq_bit;
1054 if (!attr->exclude_user) 1064 if (!attr->exclude_user)
@@ -1058,8 +1068,6 @@ static int __hw_perf_event_init(struct perf_event *event)
1058 if (!attr->exclude_hv) 1068 if (!attr->exclude_hv)
1059 hwc->config_base |= sparc_pmu->hv_bit; 1069 hwc->config_base |= sparc_pmu->hv_bit;
1060 1070
1061 hwc->event_base = perf_event_encode(pmap);
1062
1063 n = 0; 1071 n = 0;
1064 if (event->group_leader != event) { 1072 if (event->group_leader != event) {
1065 n = collect_events(event->group_leader, 1073 n = collect_events(event->group_leader,
diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c
index ea22cd373c64..75fad425e249 100644
--- a/arch/sparc/kernel/signal32.c
+++ b/arch/sparc/kernel/signal32.c
@@ -453,8 +453,66 @@ static int save_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
453 return err; 453 return err;
454} 454}
455 455
456static void setup_frame32(struct k_sigaction *ka, struct pt_regs *regs, 456/* The I-cache flush instruction only works in the primary ASI, which
457 int signo, sigset_t *oldset) 457 * right now is the nucleus, aka. kernel space.
458 *
459 * Therefore we have to kick the instructions out using the kernel
460 * side linear mapping of the physical address backing the user
461 * instructions.
462 */
463static void flush_signal_insns(unsigned long address)
464{
465 unsigned long pstate, paddr;
466 pte_t *ptep, pte;
467 pgd_t *pgdp;
468 pud_t *pudp;
469 pmd_t *pmdp;
470
471 /* Commit all stores of the instructions we are about to flush. */
472 wmb();
473
474 /* Disable cross-call reception. In this way even a very wide
475 * munmap() on another cpu can't tear down the page table
476 * hierarchy from underneath us, since that can't complete
477 * until the IPI tlb flush returns.
478 */
479
480 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
481 __asm__ __volatile__("wrpr %0, %1, %%pstate"
482 : : "r" (pstate), "i" (PSTATE_IE));
483
484 pgdp = pgd_offset(current->mm, address);
485 if (pgd_none(*pgdp))
486 goto out_irqs_on;
487 pudp = pud_offset(pgdp, address);
488 if (pud_none(*pudp))
489 goto out_irqs_on;
490 pmdp = pmd_offset(pudp, address);
491 if (pmd_none(*pmdp))
492 goto out_irqs_on;
493
494 ptep = pte_offset_map(pmdp, address);
495 pte = *ptep;
496 if (!pte_present(pte))
497 goto out_unmap;
498
499 paddr = (unsigned long) page_address(pte_page(pte));
500
501 __asm__ __volatile__("flush %0 + %1"
502 : /* no outputs */
503 : "r" (paddr),
504 "r" (address & (PAGE_SIZE - 1))
505 : "memory");
506
507out_unmap:
508 pte_unmap(ptep);
509out_irqs_on:
510 __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
511
512}
513
514static int setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
515 int signo, sigset_t *oldset)
458{ 516{
459 struct signal_frame32 __user *sf; 517 struct signal_frame32 __user *sf;
460 int sigframe_size; 518 int sigframe_size;
@@ -547,13 +605,7 @@ static void setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
547 if (ka->ka_restorer) { 605 if (ka->ka_restorer) {
548 regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer; 606 regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
549 } else { 607 } else {
550 /* Flush instruction space. */
551 unsigned long address = ((unsigned long)&(sf->insns[0])); 608 unsigned long address = ((unsigned long)&(sf->insns[0]));
552 pgd_t *pgdp = pgd_offset(current->mm, address);
553 pud_t *pudp = pud_offset(pgdp, address);
554 pmd_t *pmdp = pmd_offset(pudp, address);
555 pte_t *ptep;
556 pte_t pte;
557 609
558 regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2); 610 regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2);
559 611
@@ -562,34 +614,22 @@ static void setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
562 if (err) 614 if (err)
563 goto sigsegv; 615 goto sigsegv;
564 616
565 preempt_disable(); 617 flush_signal_insns(address);
566 ptep = pte_offset_map(pmdp, address);
567 pte = *ptep;
568 if (pte_present(pte)) {
569 unsigned long page = (unsigned long)
570 page_address(pte_page(pte));
571
572 wmb();
573 __asm__ __volatile__("flush %0 + %1"
574 : /* no outputs */
575 : "r" (page),
576 "r" (address & (PAGE_SIZE - 1))
577 : "memory");
578 }
579 pte_unmap(ptep);
580 preempt_enable();
581 } 618 }
582 return; 619 return 0;
583 620
584sigill: 621sigill:
585 do_exit(SIGILL); 622 do_exit(SIGILL);
623 return -EINVAL;
624
586sigsegv: 625sigsegv:
587 force_sigsegv(signo, current); 626 force_sigsegv(signo, current);
627 return -EFAULT;
588} 628}
589 629
590static void setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs, 630static int setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs,
591 unsigned long signr, sigset_t *oldset, 631 unsigned long signr, sigset_t *oldset,
592 siginfo_t *info) 632 siginfo_t *info)
593{ 633{
594 struct rt_signal_frame32 __user *sf; 634 struct rt_signal_frame32 __user *sf;
595 int sigframe_size; 635 int sigframe_size;
@@ -687,12 +727,7 @@ static void setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs,
687 if (ka->ka_restorer) 727 if (ka->ka_restorer)
688 regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer; 728 regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
689 else { 729 else {
690 /* Flush instruction space. */
691 unsigned long address = ((unsigned long)&(sf->insns[0])); 730 unsigned long address = ((unsigned long)&(sf->insns[0]));
692 pgd_t *pgdp = pgd_offset(current->mm, address);
693 pud_t *pudp = pud_offset(pgdp, address);
694 pmd_t *pmdp = pmd_offset(pudp, address);
695 pte_t *ptep;
696 731
697 regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2); 732 regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2);
698 733
@@ -704,38 +739,32 @@ static void setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs,
704 if (err) 739 if (err)
705 goto sigsegv; 740 goto sigsegv;
706 741
707 preempt_disable(); 742 flush_signal_insns(address);
708 ptep = pte_offset_map(pmdp, address);
709 if (pte_present(*ptep)) {
710 unsigned long page = (unsigned long)
711 page_address(pte_page(*ptep));
712
713 wmb();
714 __asm__ __volatile__("flush %0 + %1"
715 : /* no outputs */
716 : "r" (page),
717 "r" (address & (PAGE_SIZE - 1))
718 : "memory");
719 }
720 pte_unmap(ptep);
721 preempt_enable();
722 } 743 }
723 return; 744 return 0;
724 745
725sigill: 746sigill:
726 do_exit(SIGILL); 747 do_exit(SIGILL);
748 return -EINVAL;
749
727sigsegv: 750sigsegv:
728 force_sigsegv(signr, current); 751 force_sigsegv(signr, current);
752 return -EFAULT;
729} 753}
730 754
731static inline void handle_signal32(unsigned long signr, struct k_sigaction *ka, 755static inline int handle_signal32(unsigned long signr, struct k_sigaction *ka,
732 siginfo_t *info, 756 siginfo_t *info,
733 sigset_t *oldset, struct pt_regs *regs) 757 sigset_t *oldset, struct pt_regs *regs)
734{ 758{
759 int err;
760
735 if (ka->sa.sa_flags & SA_SIGINFO) 761 if (ka->sa.sa_flags & SA_SIGINFO)
736 setup_rt_frame32(ka, regs, signr, oldset, info); 762 err = setup_rt_frame32(ka, regs, signr, oldset, info);
737 else 763 else
738 setup_frame32(ka, regs, signr, oldset); 764 err = setup_frame32(ka, regs, signr, oldset);
765
766 if (err)
767 return err;
739 768
740 spin_lock_irq(&current->sighand->siglock); 769 spin_lock_irq(&current->sighand->siglock);
741 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask); 770 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
@@ -743,6 +772,10 @@ static inline void handle_signal32(unsigned long signr, struct k_sigaction *ka,
743 sigaddset(&current->blocked,signr); 772 sigaddset(&current->blocked,signr);
744 recalc_sigpending(); 773 recalc_sigpending();
745 spin_unlock_irq(&current->sighand->siglock); 774 spin_unlock_irq(&current->sighand->siglock);
775
776 tracehook_signal_handler(signr, info, ka, regs, 0);
777
778 return 0;
746} 779}
747 780
748static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs, 781static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs,
@@ -789,16 +822,14 @@ void do_signal32(sigset_t *oldset, struct pt_regs * regs,
789 if (signr > 0) { 822 if (signr > 0) {
790 if (restart_syscall) 823 if (restart_syscall)
791 syscall_restart32(orig_i0, regs, &ka.sa); 824 syscall_restart32(orig_i0, regs, &ka.sa);
792 handle_signal32(signr, &ka, &info, oldset, regs); 825 if (handle_signal32(signr, &ka, &info, oldset, regs) == 0) {
793 826 /* A signal was successfully delivered; the saved
794 /* A signal was successfully delivered; the saved 827 * sigmask will have been stored in the signal frame,
795 * sigmask will have been stored in the signal frame, 828 * and will be restored by sigreturn, so we can simply
796 * and will be restored by sigreturn, so we can simply 829 * clear the TS_RESTORE_SIGMASK flag.
797 * clear the TS_RESTORE_SIGMASK flag. 830 */
798 */ 831 current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
799 current_thread_info()->status &= ~TS_RESTORE_SIGMASK; 832 }
800
801 tracehook_signal_handler(signr, &info, &ka, regs, 0);
802 return; 833 return;
803 } 834 }
804 if (restart_syscall && 835 if (restart_syscall &&
@@ -809,12 +840,14 @@ void do_signal32(sigset_t *oldset, struct pt_regs * regs,
809 regs->u_regs[UREG_I0] = orig_i0; 840 regs->u_regs[UREG_I0] = orig_i0;
810 regs->tpc -= 4; 841 regs->tpc -= 4;
811 regs->tnpc -= 4; 842 regs->tnpc -= 4;
843 pt_regs_clear_syscall(regs);
812 } 844 }
813 if (restart_syscall && 845 if (restart_syscall &&
814 regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) { 846 regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) {
815 regs->u_regs[UREG_G1] = __NR_restart_syscall; 847 regs->u_regs[UREG_G1] = __NR_restart_syscall;
816 regs->tpc -= 4; 848 regs->tpc -= 4;
817 regs->tnpc -= 4; 849 regs->tnpc -= 4;
850 pt_regs_clear_syscall(regs);
818 } 851 }
819 852
820 /* If there's no signal to deliver, we just put the saved sigmask 853 /* If there's no signal to deliver, we just put the saved sigmask
diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c
index 9882df92ba0a..5e5c5fd03783 100644
--- a/arch/sparc/kernel/signal_32.c
+++ b/arch/sparc/kernel/signal_32.c
@@ -315,8 +315,8 @@ save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
315 return err; 315 return err;
316} 316}
317 317
318static void setup_frame(struct k_sigaction *ka, struct pt_regs *regs, 318static int setup_frame(struct k_sigaction *ka, struct pt_regs *regs,
319 int signo, sigset_t *oldset) 319 int signo, sigset_t *oldset)
320{ 320{
321 struct signal_frame __user *sf; 321 struct signal_frame __user *sf;
322 int sigframe_size, err; 322 int sigframe_size, err;
@@ -384,16 +384,19 @@ static void setup_frame(struct k_sigaction *ka, struct pt_regs *regs,
384 /* Flush instruction space. */ 384 /* Flush instruction space. */
385 flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0])); 385 flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
386 } 386 }
387 return; 387 return 0;
388 388
389sigill_and_return: 389sigill_and_return:
390 do_exit(SIGILL); 390 do_exit(SIGILL);
391 return -EINVAL;
392
391sigsegv: 393sigsegv:
392 force_sigsegv(signo, current); 394 force_sigsegv(signo, current);
395 return -EFAULT;
393} 396}
394 397
395static void setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs, 398static int setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
396 int signo, sigset_t *oldset, siginfo_t *info) 399 int signo, sigset_t *oldset, siginfo_t *info)
397{ 400{
398 struct rt_signal_frame __user *sf; 401 struct rt_signal_frame __user *sf;
399 int sigframe_size; 402 int sigframe_size;
@@ -466,22 +469,30 @@ static void setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
466 /* Flush instruction space. */ 469 /* Flush instruction space. */
467 flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0])); 470 flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
468 } 471 }
469 return; 472 return 0;
470 473
471sigill: 474sigill:
472 do_exit(SIGILL); 475 do_exit(SIGILL);
476 return -EINVAL;
477
473sigsegv: 478sigsegv:
474 force_sigsegv(signo, current); 479 force_sigsegv(signo, current);
480 return -EFAULT;
475} 481}
476 482
477static inline void 483static inline int
478handle_signal(unsigned long signr, struct k_sigaction *ka, 484handle_signal(unsigned long signr, struct k_sigaction *ka,
479 siginfo_t *info, sigset_t *oldset, struct pt_regs *regs) 485 siginfo_t *info, sigset_t *oldset, struct pt_regs *regs)
480{ 486{
487 int err;
488
481 if (ka->sa.sa_flags & SA_SIGINFO) 489 if (ka->sa.sa_flags & SA_SIGINFO)
482 setup_rt_frame(ka, regs, signr, oldset, info); 490 err = setup_rt_frame(ka, regs, signr, oldset, info);
483 else 491 else
484 setup_frame(ka, regs, signr, oldset); 492 err = setup_frame(ka, regs, signr, oldset);
493
494 if (err)
495 return err;
485 496
486 spin_lock_irq(&current->sighand->siglock); 497 spin_lock_irq(&current->sighand->siglock);
487 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask); 498 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
@@ -489,6 +500,10 @@ handle_signal(unsigned long signr, struct k_sigaction *ka,
489 sigaddset(&current->blocked, signr); 500 sigaddset(&current->blocked, signr);
490 recalc_sigpending(); 501 recalc_sigpending();
491 spin_unlock_irq(&current->sighand->siglock); 502 spin_unlock_irq(&current->sighand->siglock);
503
504 tracehook_signal_handler(signr, info, ka, regs, 0);
505
506 return 0;
492} 507}
493 508
494static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs, 509static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
@@ -546,17 +561,15 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
546 if (signr > 0) { 561 if (signr > 0) {
547 if (restart_syscall) 562 if (restart_syscall)
548 syscall_restart(orig_i0, regs, &ka.sa); 563 syscall_restart(orig_i0, regs, &ka.sa);
549 handle_signal(signr, &ka, &info, oldset, regs); 564 if (handle_signal(signr, &ka, &info, oldset, regs) == 0) {
550 565 /* a signal was successfully delivered; the saved
551 /* a signal was successfully delivered; the saved 566 * sigmask will have been stored in the signal frame,
552 * sigmask will have been stored in the signal frame, 567 * and will be restored by sigreturn, so we can simply
553 * and will be restored by sigreturn, so we can simply 568 * clear the TIF_RESTORE_SIGMASK flag.
554 * clear the TIF_RESTORE_SIGMASK flag. 569 */
555 */ 570 if (test_thread_flag(TIF_RESTORE_SIGMASK))
556 if (test_thread_flag(TIF_RESTORE_SIGMASK)) 571 clear_thread_flag(TIF_RESTORE_SIGMASK);
557 clear_thread_flag(TIF_RESTORE_SIGMASK); 572 }
558
559 tracehook_signal_handler(signr, &info, &ka, regs, 0);
560 return; 573 return;
561 } 574 }
562 if (restart_syscall && 575 if (restart_syscall &&
@@ -567,12 +580,14 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
567 regs->u_regs[UREG_I0] = orig_i0; 580 regs->u_regs[UREG_I0] = orig_i0;
568 regs->pc -= 4; 581 regs->pc -= 4;
569 regs->npc -= 4; 582 regs->npc -= 4;
583 pt_regs_clear_syscall(regs);
570 } 584 }
571 if (restart_syscall && 585 if (restart_syscall &&
572 regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) { 586 regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) {
573 regs->u_regs[UREG_G1] = __NR_restart_syscall; 587 regs->u_regs[UREG_G1] = __NR_restart_syscall;
574 regs->pc -= 4; 588 regs->pc -= 4;
575 regs->npc -= 4; 589 regs->npc -= 4;
590 pt_regs_clear_syscall(regs);
576 } 591 }
577 592
578 /* if there's no signal to deliver, we just put the saved sigmask 593 /* if there's no signal to deliver, we just put the saved sigmask
diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
index 9fa48c30037e..006fe4515886 100644
--- a/arch/sparc/kernel/signal_64.c
+++ b/arch/sparc/kernel/signal_64.c
@@ -409,7 +409,7 @@ static inline void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *
409 return (void __user *) sp; 409 return (void __user *) sp;
410} 410}
411 411
412static inline void 412static inline int
413setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs, 413setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
414 int signo, sigset_t *oldset, siginfo_t *info) 414 int signo, sigset_t *oldset, siginfo_t *info)
415{ 415{
@@ -483,26 +483,37 @@ setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
483 } 483 }
484 /* 4. return to kernel instructions */ 484 /* 4. return to kernel instructions */
485 regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer; 485 regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
486 return; 486 return 0;
487 487
488sigill: 488sigill:
489 do_exit(SIGILL); 489 do_exit(SIGILL);
490 return -EINVAL;
491
490sigsegv: 492sigsegv:
491 force_sigsegv(signo, current); 493 force_sigsegv(signo, current);
494 return -EFAULT;
492} 495}
493 496
494static inline void handle_signal(unsigned long signr, struct k_sigaction *ka, 497static inline int handle_signal(unsigned long signr, struct k_sigaction *ka,
495 siginfo_t *info, 498 siginfo_t *info,
496 sigset_t *oldset, struct pt_regs *regs) 499 sigset_t *oldset, struct pt_regs *regs)
497{ 500{
498 setup_rt_frame(ka, regs, signr, oldset, 501 int err;
499 (ka->sa.sa_flags & SA_SIGINFO) ? info : NULL); 502
503 err = setup_rt_frame(ka, regs, signr, oldset,
504 (ka->sa.sa_flags & SA_SIGINFO) ? info : NULL);
505 if (err)
506 return err;
500 spin_lock_irq(&current->sighand->siglock); 507 spin_lock_irq(&current->sighand->siglock);
501 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask); 508 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
502 if (!(ka->sa.sa_flags & SA_NOMASK)) 509 if (!(ka->sa.sa_flags & SA_NOMASK))
503 sigaddset(&current->blocked,signr); 510 sigaddset(&current->blocked,signr);
504 recalc_sigpending(); 511 recalc_sigpending();
505 spin_unlock_irq(&current->sighand->siglock); 512 spin_unlock_irq(&current->sighand->siglock);
513
514 tracehook_signal_handler(signr, info, ka, regs, 0);
515
516 return 0;
506} 517}
507 518
508static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs, 519static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
@@ -571,16 +582,14 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
571 if (signr > 0) { 582 if (signr > 0) {
572 if (restart_syscall) 583 if (restart_syscall)
573 syscall_restart(orig_i0, regs, &ka.sa); 584 syscall_restart(orig_i0, regs, &ka.sa);
574 handle_signal(signr, &ka, &info, oldset, regs); 585 if (handle_signal(signr, &ka, &info, oldset, regs) == 0) {
575 586 /* A signal was successfully delivered; the saved
576 /* A signal was successfully delivered; the saved 587 * sigmask will have been stored in the signal frame,
577 * sigmask will have been stored in the signal frame, 588 * and will be restored by sigreturn, so we can simply
578 * and will be restored by sigreturn, so we can simply 589 * clear the TS_RESTORE_SIGMASK flag.
579 * clear the TS_RESTORE_SIGMASK flag. 590 */
580 */ 591 current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
581 current_thread_info()->status &= ~TS_RESTORE_SIGMASK; 592 }
582
583 tracehook_signal_handler(signr, &info, &ka, regs, 0);
584 return; 593 return;
585 } 594 }
586 if (restart_syscall && 595 if (restart_syscall &&
@@ -591,12 +600,14 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
591 regs->u_regs[UREG_I0] = orig_i0; 600 regs->u_regs[UREG_I0] = orig_i0;
592 regs->tpc -= 4; 601 regs->tpc -= 4;
593 regs->tnpc -= 4; 602 regs->tnpc -= 4;
603 pt_regs_clear_syscall(regs);
594 } 604 }
595 if (restart_syscall && 605 if (restart_syscall &&
596 regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) { 606 regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) {
597 regs->u_regs[UREG_G1] = __NR_restart_syscall; 607 regs->u_regs[UREG_G1] = __NR_restart_syscall;
598 regs->tpc -= 4; 608 regs->tpc -= 4;
599 regs->tnpc -= 4; 609 regs->tnpc -= 4;
610 pt_regs_clear_syscall(regs);
600 } 611 }
601 612
602 /* If there's no signal to deliver, we just put the saved sigmask 613 /* If there's no signal to deliver, we just put the saved sigmask
diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
index 50794137d710..675c9e11ada5 100644
--- a/arch/sparc/kernel/sys_sparc_32.c
+++ b/arch/sparc/kernel/sys_sparc_32.c
@@ -166,7 +166,6 @@ sparc_breakpoint (struct pt_regs *regs)
166{ 166{
167 siginfo_t info; 167 siginfo_t info;
168 168
169 lock_kernel();
170#ifdef DEBUG_SPARC_BREAKPOINT 169#ifdef DEBUG_SPARC_BREAKPOINT
171 printk ("TRAP: Entering kernel PC=%x, nPC=%x\n", regs->pc, regs->npc); 170 printk ("TRAP: Entering kernel PC=%x, nPC=%x\n", regs->pc, regs->npc);
172#endif 171#endif
@@ -180,7 +179,6 @@ sparc_breakpoint (struct pt_regs *regs)
180#ifdef DEBUG_SPARC_BREAKPOINT 179#ifdef DEBUG_SPARC_BREAKPOINT
181 printk ("TRAP: Returning to space: PC=%x nPC=%x\n", regs->pc, regs->npc); 180 printk ("TRAP: Returning to space: PC=%x nPC=%x\n", regs->pc, regs->npc);
182#endif 181#endif
183 unlock_kernel();
184} 182}
185 183
186asmlinkage int 184asmlinkage int
diff --git a/arch/sparc/kernel/unaligned_32.c b/arch/sparc/kernel/unaligned_32.c
index f8514e291e15..12b9f352595f 100644
--- a/arch/sparc/kernel/unaligned_32.c
+++ b/arch/sparc/kernel/unaligned_32.c
@@ -323,7 +323,6 @@ asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn)
323{ 323{
324 enum direction dir; 324 enum direction dir;
325 325
326 lock_kernel();
327 if(!(current->thread.flags & SPARC_FLAG_UNALIGNED) || 326 if(!(current->thread.flags & SPARC_FLAG_UNALIGNED) ||
328 (((insn >> 30) & 3) != 3)) 327 (((insn >> 30) & 3) != 3))
329 goto kill_user; 328 goto kill_user;
@@ -377,5 +376,5 @@ asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn)
377kill_user: 376kill_user:
378 user_mna_trap_fault(regs, insn); 377 user_mna_trap_fault(regs, insn);
379out: 378out:
380 unlock_kernel(); 379 ;
381} 380}
diff --git a/arch/sparc/kernel/windows.c b/arch/sparc/kernel/windows.c
index f24d298bda29..b351770cbdd6 100644
--- a/arch/sparc/kernel/windows.c
+++ b/arch/sparc/kernel/windows.c
@@ -112,7 +112,6 @@ void try_to_clear_window_buffer(struct pt_regs *regs, int who)
112 struct thread_info *tp = current_thread_info(); 112 struct thread_info *tp = current_thread_info();
113 int window; 113 int window;
114 114
115 lock_kernel();
116 flush_user_windows(); 115 flush_user_windows();
117 for(window = 0; window < tp->w_saved; window++) { 116 for(window = 0; window < tp->w_saved; window++) {
118 unsigned long sp = tp->rwbuf_stkptrs[window]; 117 unsigned long sp = tp->rwbuf_stkptrs[window];
@@ -123,5 +122,4 @@ void try_to_clear_window_buffer(struct pt_regs *regs, int who)
123 do_exit(SIGILL); 122 do_exit(SIGILL);
124 } 123 }
125 tp->w_saved = 0; 124 tp->w_saved = 0;
126 unlock_kernel();
127} 125}
diff --git a/arch/tile/include/arch/chip_tile64.h b/arch/tile/include/arch/chip_tile64.h
index 1246573be59e..261aaba092d4 100644
--- a/arch/tile/include/arch/chip_tile64.h
+++ b/arch/tile/include/arch/chip_tile64.h
@@ -150,6 +150,9 @@
150/** Is the PROC_STATUS SPR supported? */ 150/** Is the PROC_STATUS SPR supported? */
151#define CHIP_HAS_PROC_STATUS_SPR() 0 151#define CHIP_HAS_PROC_STATUS_SPR() 0
152 152
153/** Is the DSTREAM_PF SPR supported? */
154#define CHIP_HAS_DSTREAM_PF() 0
155
153/** Log of the number of mshims we have. */ 156/** Log of the number of mshims we have. */
154#define CHIP_LOG_NUM_MSHIMS() 2 157#define CHIP_LOG_NUM_MSHIMS() 2
155 158
diff --git a/arch/tile/include/arch/chip_tilepro.h b/arch/tile/include/arch/chip_tilepro.h
index e864c47fc89c..70017699a74c 100644
--- a/arch/tile/include/arch/chip_tilepro.h
+++ b/arch/tile/include/arch/chip_tilepro.h
@@ -150,6 +150,9 @@
150/** Is the PROC_STATUS SPR supported? */ 150/** Is the PROC_STATUS SPR supported? */
151#define CHIP_HAS_PROC_STATUS_SPR() 1 151#define CHIP_HAS_PROC_STATUS_SPR() 1
152 152
153/** Is the DSTREAM_PF SPR supported? */
154#define CHIP_HAS_DSTREAM_PF() 0
155
153/** Log of the number of mshims we have. */ 156/** Log of the number of mshims we have. */
154#define CHIP_LOG_NUM_MSHIMS() 2 157#define CHIP_LOG_NUM_MSHIMS() 2
155 158
diff --git a/arch/tile/include/asm/compat.h b/arch/tile/include/asm/compat.h
index 5a34da6cdd79..8b60ec8b2d19 100644
--- a/arch/tile/include/asm/compat.h
+++ b/arch/tile/include/asm/compat.h
@@ -195,7 +195,7 @@ static inline unsigned long ptr_to_compat_reg(void __user *uptr)
195 return (long)(int)(long __force)uptr; 195 return (long)(int)(long __force)uptr;
196} 196}
197 197
198static inline void __user *compat_alloc_user_space(long len) 198static inline void __user *arch_compat_alloc_user_space(long len)
199{ 199{
200 struct pt_regs *regs = task_pt_regs(current); 200 struct pt_regs *regs = task_pt_regs(current);
201 return (void __user *)regs->sp - len; 201 return (void __user *)regs->sp - len;
@@ -214,8 +214,9 @@ extern int compat_setup_rt_frame(int sig, struct k_sigaction *ka,
214struct compat_sigaction; 214struct compat_sigaction;
215struct compat_siginfo; 215struct compat_siginfo;
216struct compat_sigaltstack; 216struct compat_sigaltstack;
217long compat_sys_execve(char __user *path, compat_uptr_t __user *argv, 217long compat_sys_execve(const char __user *path,
218 compat_uptr_t __user *envp); 218 const compat_uptr_t __user *argv,
219 const compat_uptr_t __user *envp);
219long compat_sys_rt_sigaction(int sig, struct compat_sigaction __user *act, 220long compat_sys_rt_sigaction(int sig, struct compat_sigaction __user *act,
220 struct compat_sigaction __user *oact, 221 struct compat_sigaction __user *oact,
221 size_t sigsetsize); 222 size_t sigsetsize);
diff --git a/arch/tile/include/asm/io.h b/arch/tile/include/asm/io.h
index 8c95bef3fa45..ee43328713ab 100644
--- a/arch/tile/include/asm/io.h
+++ b/arch/tile/include/asm/io.h
@@ -164,22 +164,22 @@ static inline void _tile_writeq(u64 val, unsigned long addr)
164#define iowrite32 writel 164#define iowrite32 writel
165#define iowrite64 writeq 165#define iowrite64 writeq
166 166
167static inline void *memcpy_fromio(void *dst, void *src, int len) 167static inline void memcpy_fromio(void *dst, const volatile void __iomem *src,
168 size_t len)
168{ 169{
169 int x; 170 int x;
170 BUG_ON((unsigned long)src & 0x3); 171 BUG_ON((unsigned long)src & 0x3);
171 for (x = 0; x < len; x += 4) 172 for (x = 0; x < len; x += 4)
172 *(u32 *)(dst + x) = readl(src + x); 173 *(u32 *)(dst + x) = readl(src + x);
173 return dst;
174} 174}
175 175
176static inline void *memcpy_toio(void *dst, void *src, int len) 176static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
177 size_t len)
177{ 178{
178 int x; 179 int x;
179 BUG_ON((unsigned long)dst & 0x3); 180 BUG_ON((unsigned long)dst & 0x3);
180 for (x = 0; x < len; x += 4) 181 for (x = 0; x < len; x += 4)
181 writel(*(u32 *)(src + x), dst + x); 182 writel(*(u32 *)(src + x), dst + x);
182 return dst;
183} 183}
184 184
185/* 185/*
diff --git a/arch/tile/include/asm/processor.h b/arch/tile/include/asm/processor.h
index d942d09b252e..ccd5f8425688 100644
--- a/arch/tile/include/asm/processor.h
+++ b/arch/tile/include/asm/processor.h
@@ -103,6 +103,18 @@ struct thread_struct {
103 /* Any other miscellaneous processor state bits */ 103 /* Any other miscellaneous processor state bits */
104 unsigned long proc_status; 104 unsigned long proc_status;
105#endif 105#endif
106#if !CHIP_HAS_FIXED_INTVEC_BASE()
107 /* Interrupt base for PL0 interrupts */
108 unsigned long interrupt_vector_base;
109#endif
110#if CHIP_HAS_TILE_RTF_HWM()
111 /* Tile cache retry fifo high-water mark */
112 unsigned long tile_rtf_hwm;
113#endif
114#if CHIP_HAS_DSTREAM_PF()
115 /* Data stream prefetch control */
116 unsigned long dstream_pf;
117#endif
106#ifdef CONFIG_HARDWALL 118#ifdef CONFIG_HARDWALL
107 /* Is this task tied to an activated hardwall? */ 119 /* Is this task tied to an activated hardwall? */
108 struct hardwall_info *hardwall; 120 struct hardwall_info *hardwall;
diff --git a/arch/tile/include/asm/ptrace.h b/arch/tile/include/asm/ptrace.h
index acdae814e016..4a02bb073979 100644
--- a/arch/tile/include/asm/ptrace.h
+++ b/arch/tile/include/asm/ptrace.h
@@ -51,10 +51,7 @@ typedef uint_reg_t pt_reg_t;
51 51
52/* 52/*
53 * This struct defines the way the registers are stored on the stack during a 53 * This struct defines the way the registers are stored on the stack during a
54 * system call/exception. It should be a multiple of 8 bytes to preserve 54 * system call or exception. "struct sigcontext" has the same shape.
55 * normal stack alignment rules.
56 *
57 * Must track <sys/ucontext.h> and <sys/procfs.h>
58 */ 55 */
59struct pt_regs { 56struct pt_regs {
60 /* Saved main processor registers; 56..63 are special. */ 57 /* Saved main processor registers; 56..63 are special. */
@@ -80,11 +77,6 @@ struct pt_regs {
80 77
81#endif /* __ASSEMBLY__ */ 78#endif /* __ASSEMBLY__ */
82 79
83/* Flag bits in pt_regs.flags */
84#define PT_FLAGS_DISABLE_IRQ 1 /* on return to kernel, disable irqs */
85#define PT_FLAGS_CALLER_SAVES 2 /* caller-save registers are valid */
86#define PT_FLAGS_RESTORE_REGS 4 /* restore callee-save regs on return */
87
88#define PTRACE_GETREGS 12 80#define PTRACE_GETREGS 12
89#define PTRACE_SETREGS 13 81#define PTRACE_SETREGS 13
90#define PTRACE_GETFPREGS 14 82#define PTRACE_GETFPREGS 14
@@ -101,6 +93,11 @@ struct pt_regs {
101 93
102#ifdef __KERNEL__ 94#ifdef __KERNEL__
103 95
96/* Flag bits in pt_regs.flags */
97#define PT_FLAGS_DISABLE_IRQ 1 /* on return to kernel, disable irqs */
98#define PT_FLAGS_CALLER_SAVES 2 /* caller-save registers are valid */
99#define PT_FLAGS_RESTORE_REGS 4 /* restore callee-save regs on return */
100
104#ifndef __ASSEMBLY__ 101#ifndef __ASSEMBLY__
105 102
106#define instruction_pointer(regs) ((regs)->pc) 103#define instruction_pointer(regs) ((regs)->pc)
diff --git a/arch/tile/include/asm/sigcontext.h b/arch/tile/include/asm/sigcontext.h
index 7cd7672e3ad4..5e2d03336f53 100644
--- a/arch/tile/include/asm/sigcontext.h
+++ b/arch/tile/include/asm/sigcontext.h
@@ -15,13 +15,21 @@
15#ifndef _ASM_TILE_SIGCONTEXT_H 15#ifndef _ASM_TILE_SIGCONTEXT_H
16#define _ASM_TILE_SIGCONTEXT_H 16#define _ASM_TILE_SIGCONTEXT_H
17 17
18/* NOTE: we can't include <linux/ptrace.h> due to #include dependencies. */ 18#include <arch/abi.h>
19#include <asm/ptrace.h>
20
21/* Must track <sys/ucontext.h> */
22 19
20/*
21 * struct sigcontext has the same shape as struct pt_regs,
22 * but is simplified since we know the fault is from userspace.
23 */
23struct sigcontext { 24struct sigcontext {
24 struct pt_regs regs; 25 uint_reg_t gregs[53]; /* General-purpose registers. */
26 uint_reg_t tp; /* Aliases gregs[TREG_TP]. */
27 uint_reg_t sp; /* Aliases gregs[TREG_SP]. */
28 uint_reg_t lr; /* Aliases gregs[TREG_LR]. */
29 uint_reg_t pc; /* Program counter. */
30 uint_reg_t ics; /* In Interrupt Critical Section? */
31 uint_reg_t faultnum; /* Fault number. */
32 uint_reg_t pad[5];
25}; 33};
26 34
27#endif /* _ASM_TILE_SIGCONTEXT_H */ 35#endif /* _ASM_TILE_SIGCONTEXT_H */
diff --git a/arch/tile/include/asm/signal.h b/arch/tile/include/asm/signal.h
index eb0253f32202..c1ee1d61d44c 100644
--- a/arch/tile/include/asm/signal.h
+++ b/arch/tile/include/asm/signal.h
@@ -24,6 +24,7 @@
24#include <asm-generic/signal.h> 24#include <asm-generic/signal.h>
25 25
26#if defined(__KERNEL__) && !defined(__ASSEMBLY__) 26#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
27struct pt_regs;
27int restore_sigcontext(struct pt_regs *, struct sigcontext __user *, long *); 28int restore_sigcontext(struct pt_regs *, struct sigcontext __user *, long *);
28int setup_sigcontext(struct sigcontext __user *, struct pt_regs *); 29int setup_sigcontext(struct sigcontext __user *, struct pt_regs *);
29void do_signal(struct pt_regs *regs); 30void do_signal(struct pt_regs *regs);
diff --git a/arch/tile/include/asm/syscalls.h b/arch/tile/include/asm/syscalls.h
index af165a74537f..ce99ffefeacf 100644
--- a/arch/tile/include/asm/syscalls.h
+++ b/arch/tile/include/asm/syscalls.h
@@ -62,10 +62,12 @@ long sys_fork(void);
62long _sys_fork(struct pt_regs *regs); 62long _sys_fork(struct pt_regs *regs);
63long sys_vfork(void); 63long sys_vfork(void);
64long _sys_vfork(struct pt_regs *regs); 64long _sys_vfork(struct pt_regs *regs);
65long sys_execve(char __user *filename, char __user * __user *argv, 65long sys_execve(const char __user *filename,
66 char __user * __user *envp); 66 const char __user *const __user *argv,
67long _sys_execve(char __user *filename, char __user * __user *argv, 67 const char __user *const __user *envp);
68 char __user * __user *envp, struct pt_regs *regs); 68long _sys_execve(const char __user *filename,
69 const char __user *const __user *argv,
70 const char __user *const __user *envp, struct pt_regs *regs);
69 71
70/* kernel/signal.c */ 72/* kernel/signal.c */
71long sys_sigaltstack(const stack_t __user *, stack_t __user *); 73long sys_sigaltstack(const stack_t __user *, stack_t __user *);
@@ -86,10 +88,13 @@ int _sys_cmpxchg_badaddr(unsigned long address, struct pt_regs *);
86#endif 88#endif
87 89
88#ifdef CONFIG_COMPAT 90#ifdef CONFIG_COMPAT
89long compat_sys_execve(char __user *path, compat_uptr_t __user *argv, 91long compat_sys_execve(const char __user *path,
90 compat_uptr_t __user *envp); 92 const compat_uptr_t __user *argv,
91long _compat_sys_execve(char __user *path, compat_uptr_t __user *argv, 93 const compat_uptr_t __user *envp);
92 compat_uptr_t __user *envp, struct pt_regs *regs); 94long _compat_sys_execve(const char __user *path,
95 const compat_uptr_t __user *argv,
96 const compat_uptr_t __user *envp,
97 struct pt_regs *regs);
93long compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr, 98long compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
94 struct compat_sigaltstack __user *uoss_ptr); 99 struct compat_sigaltstack __user *uoss_ptr);
95long _compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr, 100long _compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S
index 84f296ca9e63..8f58bdff20d7 100644
--- a/arch/tile/kernel/intvec_32.S
+++ b/arch/tile/kernel/intvec_32.S
@@ -1506,13 +1506,6 @@ handle_ill:
1506 } 1506 }
1507 STD_ENDPROC(handle_ill) 1507 STD_ENDPROC(handle_ill)
1508 1508
1509 .pushsection .rodata, "a"
1510 .align 8
1511bpt_code:
1512 bpt
1513 ENDPROC(bpt_code)
1514 .popsection
1515
1516/* Various stub interrupt handlers and syscall handlers */ 1509/* Various stub interrupt handlers and syscall handlers */
1517 1510
1518STD_ENTRY_LOCAL(_kernel_double_fault) 1511STD_ENTRY_LOCAL(_kernel_double_fault)
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index 985cc28c74c5..84c29111756c 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -408,6 +408,15 @@ static void save_arch_state(struct thread_struct *t)
408#if CHIP_HAS_PROC_STATUS_SPR() 408#if CHIP_HAS_PROC_STATUS_SPR()
409 t->proc_status = __insn_mfspr(SPR_PROC_STATUS); 409 t->proc_status = __insn_mfspr(SPR_PROC_STATUS);
410#endif 410#endif
411#if !CHIP_HAS_FIXED_INTVEC_BASE()
412 t->interrupt_vector_base = __insn_mfspr(SPR_INTERRUPT_VECTOR_BASE_0);
413#endif
414#if CHIP_HAS_TILE_RTF_HWM()
415 t->tile_rtf_hwm = __insn_mfspr(SPR_TILE_RTF_HWM);
416#endif
417#if CHIP_HAS_DSTREAM_PF()
418 t->dstream_pf = __insn_mfspr(SPR_DSTREAM_PF);
419#endif
411} 420}
412 421
413static void restore_arch_state(const struct thread_struct *t) 422static void restore_arch_state(const struct thread_struct *t)
@@ -428,14 +437,14 @@ static void restore_arch_state(const struct thread_struct *t)
428#if CHIP_HAS_PROC_STATUS_SPR() 437#if CHIP_HAS_PROC_STATUS_SPR()
429 __insn_mtspr(SPR_PROC_STATUS, t->proc_status); 438 __insn_mtspr(SPR_PROC_STATUS, t->proc_status);
430#endif 439#endif
440#if !CHIP_HAS_FIXED_INTVEC_BASE()
441 __insn_mtspr(SPR_INTERRUPT_VECTOR_BASE_0, t->interrupt_vector_base);
442#endif
431#if CHIP_HAS_TILE_RTF_HWM() 443#if CHIP_HAS_TILE_RTF_HWM()
432 /* 444 __insn_mtspr(SPR_TILE_RTF_HWM, t->tile_rtf_hwm);
433 * Clear this whenever we switch back to a process in case 445#endif
434 * the previous process was monkeying with it. Even if enabled 446#if CHIP_HAS_DSTREAM_PF()
435 * in CBOX_MSR1 via TILE_RTF_HWM_MIN, it's still just a 447 __insn_mtspr(SPR_DSTREAM_PF, t->dstream_pf);
436 * performance hint, so isn't worth a full save/restore.
437 */
438 __insn_mtspr(SPR_TILE_RTF_HWM, 0);
439#endif 448#endif
440} 449}
441 450
@@ -561,8 +570,9 @@ out:
561} 570}
562 571
563#ifdef CONFIG_COMPAT 572#ifdef CONFIG_COMPAT
564long _compat_sys_execve(char __user *path, compat_uptr_t __user *argv, 573long _compat_sys_execve(const char __user *path,
565 compat_uptr_t __user *envp, struct pt_regs *regs) 574 const compat_uptr_t __user *argv,
575 const compat_uptr_t __user *envp, struct pt_regs *regs)
566{ 576{
567 long error; 577 long error;
568 char *filename; 578 char *filename;
@@ -657,7 +667,7 @@ void show_regs(struct pt_regs *regs)
657 regs->regs[51], regs->regs[52], regs->tp); 667 regs->regs[51], regs->regs[52], regs->tp);
658 pr_err(" sp : "REGFMT" lr : "REGFMT"\n", regs->sp, regs->lr); 668 pr_err(" sp : "REGFMT" lr : "REGFMT"\n", regs->sp, regs->lr);
659#else 669#else
660 for (i = 0; i < 52; i += 3) 670 for (i = 0; i < 52; i += 4)
661 pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT 671 pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT
662 " r%-2d: "REGFMT" r%-2d: "REGFMT"\n", 672 " r%-2d: "REGFMT" r%-2d: "REGFMT"\n",
663 i, regs->regs[i], i+1, regs->regs[i+1], 673 i, regs->regs[i], i+1, regs->regs[i+1],
diff --git a/arch/tile/kernel/signal.c b/arch/tile/kernel/signal.c
index 45b66a3c991f..ce183aa1492c 100644
--- a/arch/tile/kernel/signal.c
+++ b/arch/tile/kernel/signal.c
@@ -61,13 +61,19 @@ int restore_sigcontext(struct pt_regs *regs,
61 /* Always make any pending restarted system calls return -EINTR */ 61 /* Always make any pending restarted system calls return -EINTR */
62 current_thread_info()->restart_block.fn = do_no_restart_syscall; 62 current_thread_info()->restart_block.fn = do_no_restart_syscall;
63 63
64 /*
65 * Enforce that sigcontext is like pt_regs, and doesn't mess
66 * up our stack alignment rules.
67 */
68 BUILD_BUG_ON(sizeof(struct sigcontext) != sizeof(struct pt_regs));
69 BUILD_BUG_ON(sizeof(struct sigcontext) % 8 != 0);
70
64 for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i) 71 for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i)
65 err |= __get_user(((long *)regs)[i], 72 err |= __get_user(regs->regs[i], &sc->gregs[i]);
66 &((long __user *)(&sc->regs))[i]);
67 73
68 regs->faultnum = INT_SWINT_1_SIGRETURN; 74 regs->faultnum = INT_SWINT_1_SIGRETURN;
69 75
70 err |= __get_user(*pr0, &sc->regs.regs[0]); 76 err |= __get_user(*pr0, &sc->gregs[0]);
71 return err; 77 return err;
72} 78}
73 79
@@ -112,8 +118,7 @@ int setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs)
112 int i, err = 0; 118 int i, err = 0;
113 119
114 for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i) 120 for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i)
115 err |= __put_user(((long *)regs)[i], 121 err |= __put_user(regs->regs[i], &sc->gregs[i]);
116 &((long __user *)(&sc->regs))[i]);
117 122
118 return err; 123 return err;
119} 124}
@@ -203,19 +208,17 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
203 * Set up registers for signal handler. 208 * Set up registers for signal handler.
204 * Registers that we don't modify keep the value they had from 209 * Registers that we don't modify keep the value they had from
205 * user-space at the time we took the signal. 210 * user-space at the time we took the signal.
211 * We always pass siginfo and mcontext, regardless of SA_SIGINFO,
212 * since some things rely on this (e.g. glibc's debug/segfault.c).
206 */ 213 */
207 regs->pc = (unsigned long) ka->sa.sa_handler; 214 regs->pc = (unsigned long) ka->sa.sa_handler;
208 regs->ex1 = PL_ICS_EX1(USER_PL, 1); /* set crit sec in handler */ 215 regs->ex1 = PL_ICS_EX1(USER_PL, 1); /* set crit sec in handler */
209 regs->sp = (unsigned long) frame; 216 regs->sp = (unsigned long) frame;
210 regs->lr = restorer; 217 regs->lr = restorer;
211 regs->regs[0] = (unsigned long) usig; 218 regs->regs[0] = (unsigned long) usig;
212 219 regs->regs[1] = (unsigned long) &frame->info;
213 if (ka->sa.sa_flags & SA_SIGINFO) { 220 regs->regs[2] = (unsigned long) &frame->uc;
214 /* Need extra arguments, so mark to restore caller-saves. */ 221 regs->flags |= PT_FLAGS_CALLER_SAVES;
215 regs->regs[1] = (unsigned long) &frame->info;
216 regs->regs[2] = (unsigned long) &frame->uc;
217 regs->flags |= PT_FLAGS_CALLER_SAVES;
218 }
219 222
220 /* 223 /*
221 * Notify any tracer that was single-stepping it. 224 * Notify any tracer that was single-stepping it.
diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c
index 38a68b0b4581..ea2e0ce28380 100644
--- a/arch/tile/kernel/stack.c
+++ b/arch/tile/kernel/stack.c
@@ -175,7 +175,7 @@ static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt)
175 pr_err(" <received signal %d>\n", 175 pr_err(" <received signal %d>\n",
176 frame->info.si_signo); 176 frame->info.si_signo);
177 } 177 }
178 return &frame->uc.uc_mcontext.regs; 178 return (struct pt_regs *)&frame->uc.uc_mcontext;
179 } 179 }
180 return NULL; 180 return NULL;
181} 181}
diff --git a/arch/um/kernel/exec.c b/arch/um/kernel/exec.c
index cd145eda3579..49b5e1eb3262 100644
--- a/arch/um/kernel/exec.c
+++ b/arch/um/kernel/exec.c
@@ -62,7 +62,7 @@ static long execve1(const char *file,
62 return error; 62 return error;
63} 63}
64 64
65long um_execve(const char *file, char __user *__user *argv, char __user *__user *env) 65long um_execve(const char *file, const char __user *const __user *argv, const char __user *const __user *env)
66{ 66{
67 long err; 67 long err;
68 68
@@ -72,8 +72,8 @@ long um_execve(const char *file, char __user *__user *argv, char __user *__user
72 return err; 72 return err;
73} 73}
74 74
75long sys_execve(const char __user *file, char __user *__user *argv, 75long sys_execve(const char __user *file, const char __user *const __user *argv,
76 char __user *__user *env) 76 const char __user *const __user *env)
77{ 77{
78 long error; 78 long error;
79 char *filename; 79 char *filename;
diff --git a/arch/um/kernel/internal.h b/arch/um/kernel/internal.h
index 1303a105fe91..5bf97db24a04 100644
--- a/arch/um/kernel/internal.h
+++ b/arch/um/kernel/internal.h
@@ -1 +1 @@
extern long um_execve(const char *file, char __user *__user *argv, char __user *__user *env); extern long um_execve(const char *file, const char __user *const __user *argv, const char __user *const __user *env);
diff --git a/arch/um/kernel/syscall.c b/arch/um/kernel/syscall.c
index 5ddb246626db..f958cb876ee3 100644
--- a/arch/um/kernel/syscall.c
+++ b/arch/um/kernel/syscall.c
@@ -60,8 +60,8 @@ int kernel_execve(const char *filename,
60 60
61 fs = get_fs(); 61 fs = get_fs();
62 set_fs(KERNEL_DS); 62 set_fs(KERNEL_DS);
63 ret = um_execve(filename, (char __user *__user *)argv, 63 ret = um_execve(filename, (const char __user *const __user *)argv,
64 (char __user *__user *) envp); 64 (const char __user *const __user *) envp);
65 set_fs(fs); 65 set_fs(fs);
66 66
67 return ret; 67 return ret;
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 8aa1b59b9074..e8c8881351b3 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -74,7 +74,7 @@ endif
74 74
75ifdef CONFIG_CC_STACKPROTECTOR 75ifdef CONFIG_CC_STACKPROTECTOR
76 cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh 76 cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh
77 ifeq ($(shell $(CONFIG_SHELL) $(cc_has_sp) $(CC) $(biarch)),y) 77 ifeq ($(shell $(CONFIG_SHELL) $(cc_has_sp) $(CC) $(KBUILD_CPPFLAGS) $(biarch)),y)
78 stackp-y := -fstack-protector 78 stackp-y := -fstack-protector
79 KBUILD_CFLAGS += $(stackp-y) 79 KBUILD_CFLAGS += $(stackp-y)
80 else 80 else
diff --git a/arch/x86/boot/early_serial_console.c b/arch/x86/boot/early_serial_console.c
index 030f4b93e255..5df2869c874b 100644
--- a/arch/x86/boot/early_serial_console.c
+++ b/arch/x86/boot/early_serial_console.c
@@ -58,7 +58,19 @@ static void parse_earlyprintk(void)
58 if (arg[pos] == ',') 58 if (arg[pos] == ',')
59 pos++; 59 pos++;
60 60
61 if (!strncmp(arg, "ttyS", 4)) { 61 /*
62 * make sure we have
63 * "serial,0x3f8,115200"
64 * "serial,ttyS0,115200"
65 * "ttyS0,115200"
66 */
67 if (pos == 7 && !strncmp(arg + pos, "0x", 2)) {
68 port = simple_strtoull(arg + pos, &e, 16);
69 if (port == 0 || arg + pos == e)
70 port = DEFAULT_SERIAL_PORT;
71 else
72 pos = e - arg;
73 } else if (!strncmp(arg + pos, "ttyS", 4)) {
62 static const int bases[] = { 0x3f8, 0x2f8 }; 74 static const int bases[] = { 0x3f8, 0x2f8 };
63 int idx = 0; 75 int idx = 0;
64 76
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index b86feabed69b..518bb99c3394 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -50,7 +50,12 @@
50 /* 50 /*
51 * Reload arg registers from stack in case ptrace changed them. 51 * Reload arg registers from stack in case ptrace changed them.
52 * We don't reload %eax because syscall_trace_enter() returned 52 * We don't reload %eax because syscall_trace_enter() returned
53 * the value it wants us to use in the table lookup. 53 * the %rax value we should see. Instead, we just truncate that
54 * value to 32 bits again as we did on entry from user mode.
55 * If it's a new value set by user_regset during entry tracing,
56 * this matches the normal truncation of the user-mode value.
57 * If it's -1 to make us punt the syscall, then (u32)-1 is still
58 * an appropriately invalid value.
54 */ 59 */
55 .macro LOAD_ARGS32 offset, _r9=0 60 .macro LOAD_ARGS32 offset, _r9=0
56 .if \_r9 61 .if \_r9
@@ -60,6 +65,7 @@
60 movl \offset+48(%rsp),%edx 65 movl \offset+48(%rsp),%edx
61 movl \offset+56(%rsp),%esi 66 movl \offset+56(%rsp),%esi
62 movl \offset+64(%rsp),%edi 67 movl \offset+64(%rsp),%edi
68 movl %eax,%eax /* zero extension */
63 .endm 69 .endm
64 70
65 .macro CFI_STARTPROC32 simple 71 .macro CFI_STARTPROC32 simple
@@ -153,7 +159,7 @@ ENTRY(ia32_sysenter_target)
153 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10) 159 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
154 CFI_REMEMBER_STATE 160 CFI_REMEMBER_STATE
155 jnz sysenter_tracesys 161 jnz sysenter_tracesys
156 cmpl $(IA32_NR_syscalls-1),%eax 162 cmpq $(IA32_NR_syscalls-1),%rax
157 ja ia32_badsys 163 ja ia32_badsys
158sysenter_do_call: 164sysenter_do_call:
159 IA32_ARG_FIXUP 165 IA32_ARG_FIXUP
@@ -195,7 +201,7 @@ sysexit_from_sys_call:
195 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */ 201 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
196 call audit_syscall_entry 202 call audit_syscall_entry
197 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */ 203 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
198 cmpl $(IA32_NR_syscalls-1),%eax 204 cmpq $(IA32_NR_syscalls-1),%rax
199 ja ia32_badsys 205 ja ia32_badsys
200 movl %ebx,%edi /* reload 1st syscall arg */ 206 movl %ebx,%edi /* reload 1st syscall arg */
201 movl RCX-ARGOFFSET(%rsp),%esi /* reload 2nd syscall arg */ 207 movl RCX-ARGOFFSET(%rsp),%esi /* reload 2nd syscall arg */
@@ -248,7 +254,7 @@ sysenter_tracesys:
248 call syscall_trace_enter 254 call syscall_trace_enter
249 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */ 255 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
250 RESTORE_REST 256 RESTORE_REST
251 cmpl $(IA32_NR_syscalls-1),%eax 257 cmpq $(IA32_NR_syscalls-1),%rax
252 ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */ 258 ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
253 jmp sysenter_do_call 259 jmp sysenter_do_call
254 CFI_ENDPROC 260 CFI_ENDPROC
@@ -314,7 +320,7 @@ ENTRY(ia32_cstar_target)
314 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10) 320 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
315 CFI_REMEMBER_STATE 321 CFI_REMEMBER_STATE
316 jnz cstar_tracesys 322 jnz cstar_tracesys
317 cmpl $IA32_NR_syscalls-1,%eax 323 cmpq $IA32_NR_syscalls-1,%rax
318 ja ia32_badsys 324 ja ia32_badsys
319cstar_do_call: 325cstar_do_call:
320 IA32_ARG_FIXUP 1 326 IA32_ARG_FIXUP 1
@@ -367,7 +373,7 @@ cstar_tracesys:
367 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */ 373 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
368 RESTORE_REST 374 RESTORE_REST
369 xchgl %ebp,%r9d 375 xchgl %ebp,%r9d
370 cmpl $(IA32_NR_syscalls-1),%eax 376 cmpq $(IA32_NR_syscalls-1),%rax
371 ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */ 377 ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
372 jmp cstar_do_call 378 jmp cstar_do_call
373END(ia32_cstar_target) 379END(ia32_cstar_target)
@@ -425,7 +431,7 @@ ENTRY(ia32_syscall)
425 orl $TS_COMPAT,TI_status(%r10) 431 orl $TS_COMPAT,TI_status(%r10)
426 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10) 432 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
427 jnz ia32_tracesys 433 jnz ia32_tracesys
428 cmpl $(IA32_NR_syscalls-1),%eax 434 cmpq $(IA32_NR_syscalls-1),%rax
429 ja ia32_badsys 435 ja ia32_badsys
430ia32_do_call: 436ia32_do_call:
431 IA32_ARG_FIXUP 437 IA32_ARG_FIXUP
@@ -444,7 +450,7 @@ ia32_tracesys:
444 call syscall_trace_enter 450 call syscall_trace_enter
445 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */ 451 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
446 RESTORE_REST 452 RESTORE_REST
447 cmpl $(IA32_NR_syscalls-1),%eax 453 cmpq $(IA32_NR_syscalls-1),%rax
448 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */ 454 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
449 jmp ia32_do_call 455 jmp ia32_do_call
450END(ia32_syscall) 456END(ia32_syscall)
diff --git a/arch/x86/include/asm/amd_iommu_proto.h b/arch/x86/include/asm/amd_iommu_proto.h
index d2544f1d705d..cb030374b90a 100644
--- a/arch/x86/include/asm/amd_iommu_proto.h
+++ b/arch/x86/include/asm/amd_iommu_proto.h
@@ -38,4 +38,10 @@ static inline void amd_iommu_stats_init(void) { }
38 38
39#endif /* !CONFIG_AMD_IOMMU_STATS */ 39#endif /* !CONFIG_AMD_IOMMU_STATS */
40 40
41static inline bool is_rd890_iommu(struct pci_dev *pdev)
42{
43 return (pdev->vendor == PCI_VENDOR_ID_ATI) &&
44 (pdev->device == PCI_DEVICE_ID_RD890_IOMMU);
45}
46
41#endif /* _ASM_X86_AMD_IOMMU_PROTO_H */ 47#endif /* _ASM_X86_AMD_IOMMU_PROTO_H */
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h
index 7014e88bc779..08616180deaf 100644
--- a/arch/x86/include/asm/amd_iommu_types.h
+++ b/arch/x86/include/asm/amd_iommu_types.h
@@ -368,6 +368,9 @@ struct amd_iommu {
368 /* capabilities of that IOMMU read from ACPI */ 368 /* capabilities of that IOMMU read from ACPI */
369 u32 cap; 369 u32 cap;
370 370
371 /* flags read from acpi table */
372 u8 acpi_flags;
373
371 /* 374 /*
372 * Capability pointer. There could be more than one IOMMU per PCI 375 * Capability pointer. There could be more than one IOMMU per PCI
373 * device function if there are more than one AMD IOMMU capability 376 * device function if there are more than one AMD IOMMU capability
@@ -411,6 +414,15 @@ struct amd_iommu {
411 414
412 /* default dma_ops domain for that IOMMU */ 415 /* default dma_ops domain for that IOMMU */
413 struct dma_ops_domain *default_dom; 416 struct dma_ops_domain *default_dom;
417
418 /*
419 * This array is required to work around a potential BIOS bug.
420 * The BIOS may miss to restore parts of the PCI configuration
421 * space when the system resumes from S3. The result is that the
422 * IOMMU does not execute commands anymore which leads to system
423 * failure.
424 */
425 u32 cache_cfg[4];
414}; 426};
415 427
416/* 428/*
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index 545776efeb16..bafd80defa43 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -309,7 +309,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
309static __always_inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr) 309static __always_inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr)
310{ 310{
311 return ((1UL << (nr % BITS_PER_LONG)) & 311 return ((1UL << (nr % BITS_PER_LONG)) &
312 (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0; 312 (addr[nr / BITS_PER_LONG])) != 0;
313} 313}
314 314
315static inline int variable_test_bit(int nr, volatile const unsigned long *addr) 315static inline int variable_test_bit(int nr, volatile const unsigned long *addr)
diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
index 306160e58b48..1d9cd27c2920 100644
--- a/arch/x86/include/asm/compat.h
+++ b/arch/x86/include/asm/compat.h
@@ -205,7 +205,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
205 return (u32)(unsigned long)uptr; 205 return (u32)(unsigned long)uptr;
206} 206}
207 207
208static inline void __user *compat_alloc_user_space(long len) 208static inline void __user *arch_compat_alloc_user_space(long len)
209{ 209{
210 struct pt_regs *regs = task_pt_regs(current); 210 struct pt_regs *regs = task_pt_regs(current);
211 return (void __user *)regs->sp - len; 211 return (void __user *)regs->sp - len;
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 781a50b29a49..3f76523589af 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -168,6 +168,7 @@
168#define X86_FEATURE_XSAVEOPT (7*32+ 4) /* Optimized Xsave */ 168#define X86_FEATURE_XSAVEOPT (7*32+ 4) /* Optimized Xsave */
169#define X86_FEATURE_PLN (7*32+ 5) /* Intel Power Limit Notification */ 169#define X86_FEATURE_PLN (7*32+ 5) /* Intel Power Limit Notification */
170#define X86_FEATURE_PTS (7*32+ 6) /* Intel Package Thermal Status */ 170#define X86_FEATURE_PTS (7*32+ 6) /* Intel Package Thermal Status */
171#define X86_FEATURE_DTS (7*32+ 7) /* Digital Thermal Sensor */
171 172
172/* Virtualization flags: Linux defined, word 8 */ 173/* Virtualization flags: Linux defined, word 8 */
173#define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */ 174#define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */
@@ -296,6 +297,7 @@ extern const char * const x86_power_flags[32];
296 297
297#endif /* CONFIG_X86_64 */ 298#endif /* CONFIG_X86_64 */
298 299
300#if __GNUC__ >= 4
299/* 301/*
300 * Static testing of CPU features. Used the same as boot_cpu_has(). 302 * Static testing of CPU features. Used the same as boot_cpu_has().
301 * These are only valid after alternatives have run, but will statically 303 * These are only valid after alternatives have run, but will statically
@@ -304,7 +306,7 @@ extern const char * const x86_power_flags[32];
304 */ 306 */
305static __always_inline __pure bool __static_cpu_has(u16 bit) 307static __always_inline __pure bool __static_cpu_has(u16 bit)
306{ 308{
307#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5) 309#if __GNUC__ > 4 || __GNUC_MINOR__ >= 5
308 asm goto("1: jmp %l[t_no]\n" 310 asm goto("1: jmp %l[t_no]\n"
309 "2:\n" 311 "2:\n"
310 ".section .altinstructions,\"a\"\n" 312 ".section .altinstructions,\"a\"\n"
@@ -345,7 +347,6 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
345#endif 347#endif
346} 348}
347 349
348#if __GNUC__ >= 4
349#define static_cpu_has(bit) \ 350#define static_cpu_has(bit) \
350( \ 351( \
351 __builtin_constant_p(boot_cpu_has(bit)) ? \ 352 __builtin_constant_p(boot_cpu_has(bit)) ? \
diff --git a/arch/x86/include/asm/hpet.h b/arch/x86/include/asm/hpet.h
index 004e6e25e913..1d5c08a1bdfd 100644
--- a/arch/x86/include/asm/hpet.h
+++ b/arch/x86/include/asm/hpet.h
@@ -68,7 +68,6 @@ extern unsigned long force_hpet_address;
68extern u8 hpet_blockid; 68extern u8 hpet_blockid;
69extern int hpet_force_user; 69extern int hpet_force_user;
70extern u8 hpet_msi_disable; 70extern u8 hpet_msi_disable;
71extern u8 hpet_readback_cmp;
72extern int is_hpet_enabled(void); 71extern int is_hpet_enabled(void);
73extern int hpet_enable(void); 72extern int hpet_enable(void);
74extern void hpet_disable(void); 73extern void hpet_disable(void);
diff --git a/arch/x86/include/asm/hw_breakpoint.h b/arch/x86/include/asm/hw_breakpoint.h
index 528a11e8d3e3..824ca07860d0 100644
--- a/arch/x86/include/asm/hw_breakpoint.h
+++ b/arch/x86/include/asm/hw_breakpoint.h
@@ -20,7 +20,7 @@ struct arch_hw_breakpoint {
20#include <linux/list.h> 20#include <linux/list.h>
21 21
22/* Available HW breakpoint length encodings */ 22/* Available HW breakpoint length encodings */
23#define X86_BREAKPOINT_LEN_X 0x00 23#define X86_BREAKPOINT_LEN_X 0x40
24#define X86_BREAKPOINT_LEN_1 0x40 24#define X86_BREAKPOINT_LEN_1 0x40
25#define X86_BREAKPOINT_LEN_2 0x44 25#define X86_BREAKPOINT_LEN_2 0x44
26#define X86_BREAKPOINT_LEN_4 0x4c 26#define X86_BREAKPOINT_LEN_4 0x4c
diff --git a/arch/x86/include/asm/iomap.h b/arch/x86/include/asm/iomap.h
index f35eb45d6576..c4191b3b7056 100644
--- a/arch/x86/include/asm/iomap.h
+++ b/arch/x86/include/asm/iomap.h
@@ -26,11 +26,11 @@
26#include <asm/pgtable.h> 26#include <asm/pgtable.h>
27#include <asm/tlbflush.h> 27#include <asm/tlbflush.h>
28 28
29void * 29void __iomem *
30iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); 30iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
31 31
32void 32void
33iounmap_atomic(void *kvaddr, enum km_type type); 33iounmap_atomic(void __iomem *kvaddr, enum km_type type);
34 34
35int 35int
36iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot); 36iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot);
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index 51cfd730ac5d..1f99ecfc48e1 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -152,9 +152,14 @@ struct x86_emulate_ops {
152struct operand { 152struct operand {
153 enum { OP_REG, OP_MEM, OP_IMM, OP_NONE } type; 153 enum { OP_REG, OP_MEM, OP_IMM, OP_NONE } type;
154 unsigned int bytes; 154 unsigned int bytes;
155 unsigned long orig_val, *ptr; 155 union {
156 unsigned long orig_val;
157 u64 orig_val64;
158 };
159 unsigned long *ptr;
156 union { 160 union {
157 unsigned long val; 161 unsigned long val;
162 u64 val64;
158 char valptr[sizeof(unsigned long) + 2]; 163 char valptr[sizeof(unsigned long) + 2];
159 }; 164 };
160}; 165};
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 0925676266bd..fedf32a8c3ec 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -11,6 +11,8 @@ ifdef CONFIG_FUNCTION_TRACER
11CFLAGS_REMOVE_tsc.o = -pg 11CFLAGS_REMOVE_tsc.o = -pg
12CFLAGS_REMOVE_rtc.o = -pg 12CFLAGS_REMOVE_rtc.o = -pg
13CFLAGS_REMOVE_paravirt-spinlocks.o = -pg 13CFLAGS_REMOVE_paravirt-spinlocks.o = -pg
14CFLAGS_REMOVE_pvclock.o = -pg
15CFLAGS_REMOVE_kvmclock.o = -pg
14CFLAGS_REMOVE_ftrace.o = -pg 16CFLAGS_REMOVE_ftrace.o = -pg
15CFLAGS_REMOVE_early_printk.o = -pg 17CFLAGS_REMOVE_early_printk.o = -pg
16endif 18endif
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c
index fb7a5f052e2b..fb16f17e59be 100644
--- a/arch/x86/kernel/acpi/cstate.c
+++ b/arch/x86/kernel/acpi/cstate.c
@@ -61,7 +61,7 @@ struct cstate_entry {
61 unsigned int ecx; 61 unsigned int ecx;
62 } states[ACPI_PROCESSOR_MAX_POWER]; 62 } states[ACPI_PROCESSOR_MAX_POWER];
63}; 63};
64static struct cstate_entry *cpu_cstate_entry; /* per CPU ptr */ 64static struct cstate_entry __percpu *cpu_cstate_entry; /* per CPU ptr */
65 65
66static short mwait_supported[ACPI_PROCESSOR_MAX_POWER]; 66static short mwait_supported[ACPI_PROCESSOR_MAX_POWER];
67 67
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index fa044e1e30a2..679b6450382b 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -1953,6 +1953,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
1953 size_t size, 1953 size_t size,
1954 int dir) 1954 int dir)
1955{ 1955{
1956 dma_addr_t flush_addr;
1956 dma_addr_t i, start; 1957 dma_addr_t i, start;
1957 unsigned int pages; 1958 unsigned int pages;
1958 1959
@@ -1960,6 +1961,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
1960 (dma_addr + size > dma_dom->aperture_size)) 1961 (dma_addr + size > dma_dom->aperture_size))
1961 return; 1962 return;
1962 1963
1964 flush_addr = dma_addr;
1963 pages = iommu_num_pages(dma_addr, size, PAGE_SIZE); 1965 pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
1964 dma_addr &= PAGE_MASK; 1966 dma_addr &= PAGE_MASK;
1965 start = dma_addr; 1967 start = dma_addr;
@@ -1974,7 +1976,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
1974 dma_ops_free_addresses(dma_dom, dma_addr, pages); 1976 dma_ops_free_addresses(dma_dom, dma_addr, pages);
1975 1977
1976 if (amd_iommu_unmap_flush || dma_dom->need_flush) { 1978 if (amd_iommu_unmap_flush || dma_dom->need_flush) {
1977 iommu_flush_pages(&dma_dom->domain, dma_addr, size); 1979 iommu_flush_pages(&dma_dom->domain, flush_addr, size);
1978 dma_dom->need_flush = false; 1980 dma_dom->need_flush = false;
1979 } 1981 }
1980} 1982}
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index 3cc63e2b8dd4..5a170cbbbed8 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -632,6 +632,13 @@ static void __init init_iommu_from_pci(struct amd_iommu *iommu)
632 iommu->last_device = calc_devid(MMIO_GET_BUS(range), 632 iommu->last_device = calc_devid(MMIO_GET_BUS(range),
633 MMIO_GET_LD(range)); 633 MMIO_GET_LD(range));
634 iommu->evt_msi_num = MMIO_MSI_NUM(misc); 634 iommu->evt_msi_num = MMIO_MSI_NUM(misc);
635
636 if (is_rd890_iommu(iommu->dev)) {
637 pci_read_config_dword(iommu->dev, 0xf0, &iommu->cache_cfg[0]);
638 pci_read_config_dword(iommu->dev, 0xf4, &iommu->cache_cfg[1]);
639 pci_read_config_dword(iommu->dev, 0xf8, &iommu->cache_cfg[2]);
640 pci_read_config_dword(iommu->dev, 0xfc, &iommu->cache_cfg[3]);
641 }
635} 642}
636 643
637/* 644/*
@@ -649,29 +656,9 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
649 struct ivhd_entry *e; 656 struct ivhd_entry *e;
650 657
651 /* 658 /*
652 * First set the recommended feature enable bits from ACPI 659 * First save the recommended feature enable bits from ACPI
653 * into the IOMMU control registers
654 */ 660 */
655 h->flags & IVHD_FLAG_HT_TUN_EN_MASK ? 661 iommu->acpi_flags = h->flags;
656 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
657 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
658
659 h->flags & IVHD_FLAG_PASSPW_EN_MASK ?
660 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
661 iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
662
663 h->flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
664 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
665 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
666
667 h->flags & IVHD_FLAG_ISOC_EN_MASK ?
668 iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
669 iommu_feature_disable(iommu, CONTROL_ISOC_EN);
670
671 /*
672 * make IOMMU memory accesses cache coherent
673 */
674 iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
675 662
676 /* 663 /*
677 * Done. Now parse the device entries 664 * Done. Now parse the device entries
@@ -1116,6 +1103,40 @@ static void init_device_table(void)
1116 } 1103 }
1117} 1104}
1118 1105
1106static void iommu_init_flags(struct amd_iommu *iommu)
1107{
1108 iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
1109 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
1110 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
1111
1112 iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
1113 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
1114 iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
1115
1116 iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
1117 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
1118 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
1119
1120 iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
1121 iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
1122 iommu_feature_disable(iommu, CONTROL_ISOC_EN);
1123
1124 /*
1125 * make IOMMU memory accesses cache coherent
1126 */
1127 iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
1128}
1129
1130static void iommu_apply_quirks(struct amd_iommu *iommu)
1131{
1132 if (is_rd890_iommu(iommu->dev)) {
1133 pci_write_config_dword(iommu->dev, 0xf0, iommu->cache_cfg[0]);
1134 pci_write_config_dword(iommu->dev, 0xf4, iommu->cache_cfg[1]);
1135 pci_write_config_dword(iommu->dev, 0xf8, iommu->cache_cfg[2]);
1136 pci_write_config_dword(iommu->dev, 0xfc, iommu->cache_cfg[3]);
1137 }
1138}
1139
1119/* 1140/*
1120 * This function finally enables all IOMMUs found in the system after 1141 * This function finally enables all IOMMUs found in the system after
1121 * they have been initialized 1142 * they have been initialized
@@ -1126,6 +1147,8 @@ static void enable_iommus(void)
1126 1147
1127 for_each_iommu(iommu) { 1148 for_each_iommu(iommu) {
1128 iommu_disable(iommu); 1149 iommu_disable(iommu);
1150 iommu_apply_quirks(iommu);
1151 iommu_init_flags(iommu);
1129 iommu_set_device_table(iommu); 1152 iommu_set_device_table(iommu);
1130 iommu_enable_command_buffer(iommu); 1153 iommu_enable_command_buffer(iommu);
1131 iommu_enable_event_buffer(iommu); 1154 iommu_enable_event_buffer(iommu);
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index f1efebaf5510..5c5b8f3dddb5 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -306,14 +306,19 @@ void arch_init_copy_chip_data(struct irq_desc *old_desc,
306 306
307 old_cfg = old_desc->chip_data; 307 old_cfg = old_desc->chip_data;
308 308
309 memcpy(cfg, old_cfg, sizeof(struct irq_cfg)); 309 cfg->vector = old_cfg->vector;
310 cfg->move_in_progress = old_cfg->move_in_progress;
311 cpumask_copy(cfg->domain, old_cfg->domain);
312 cpumask_copy(cfg->old_domain, old_cfg->old_domain);
310 313
311 init_copy_irq_2_pin(old_cfg, cfg, node); 314 init_copy_irq_2_pin(old_cfg, cfg, node);
312} 315}
313 316
314static void free_irq_cfg(struct irq_cfg *old_cfg) 317static void free_irq_cfg(struct irq_cfg *cfg)
315{ 318{
316 kfree(old_cfg); 319 free_cpumask_var(cfg->domain);
320 free_cpumask_var(cfg->old_domain);
321 kfree(cfg);
317} 322}
318 323
319void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc) 324void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc)
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 7b598b84c902..f744f54cb248 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -698,9 +698,11 @@ void __init uv_system_init(void)
698 for (j = 0; j < 64; j++) { 698 for (j = 0; j < 64; j++) {
699 if (!test_bit(j, &present)) 699 if (!test_bit(j, &present))
700 continue; 700 continue;
701 uv_blade_info[blade].pnode = (i * 64 + j); 701 pnode = (i * 64 + j);
702 uv_blade_info[blade].pnode = pnode;
702 uv_blade_info[blade].nr_possible_cpus = 0; 703 uv_blade_info[blade].nr_possible_cpus = 0;
703 uv_blade_info[blade].nr_online_cpus = 0; 704 uv_blade_info[blade].nr_online_cpus = 0;
705 max_pnode = max(pnode, max_pnode);
704 blade++; 706 blade++;
705 } 707 }
706 } 708 }
@@ -738,7 +740,6 @@ void __init uv_system_init(void)
738 uv_cpu_hub_info(cpu)->scir.offset = uv_scir_offset(apicid); 740 uv_cpu_hub_info(cpu)->scir.offset = uv_scir_offset(apicid);
739 uv_node_to_blade[nid] = blade; 741 uv_node_to_blade[nid] = blade;
740 uv_cpu_to_blade[cpu] = blade; 742 uv_cpu_to_blade[cpu] = blade;
741 max_pnode = max(pnode, max_pnode);
742 } 743 }
743 744
744 /* Add blade/pnode info for nodes without cpus */ 745 /* Add blade/pnode info for nodes without cpus */
@@ -750,7 +751,6 @@ void __init uv_system_init(void)
750 pnode = (paddr >> m_val) & pnode_mask; 751 pnode = (paddr >> m_val) & pnode_mask;
751 blade = boot_pnode_to_blade(pnode); 752 blade = boot_pnode_to_blade(pnode);
752 uv_node_to_blade[nid] = blade; 753 uv_node_to_blade[nid] = blade;
753 max_pnode = max(pnode, max_pnode);
754 } 754 }
755 755
756 map_gru_high(max_pnode); 756 map_gru_high(max_pnode);
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 490dac63c2d2..f2f9ac7da25c 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -545,7 +545,7 @@ void __cpuinit cpu_detect(struct cpuinfo_x86 *c)
545 } 545 }
546} 546}
547 547
548static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c) 548void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
549{ 549{
550 u32 tfms, xlvl; 550 u32 tfms, xlvl;
551 u32 ebx; 551 u32 ebx;
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
index 3624e8a0f71b..f668bb1f7d43 100644
--- a/arch/x86/kernel/cpu/cpu.h
+++ b/arch/x86/kernel/cpu/cpu.h
@@ -33,5 +33,6 @@ extern const struct cpu_dev *const __x86_cpu_dev_start[],
33 *const __x86_cpu_dev_end[]; 33 *const __x86_cpu_dev_end[];
34 34
35extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c); 35extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
36extern void get_cpu_cap(struct cpuinfo_x86 *c);
36 37
37#endif 38#endif
diff --git a/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c
index 994230d4dc4e..4f6f679f2799 100644
--- a/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c
@@ -368,16 +368,22 @@ static int __init pcc_cpufreq_do_osc(acpi_handle *handle)
368 return -ENODEV; 368 return -ENODEV;
369 369
370 out_obj = output.pointer; 370 out_obj = output.pointer;
371 if (out_obj->type != ACPI_TYPE_BUFFER) 371 if (out_obj->type != ACPI_TYPE_BUFFER) {
372 return -ENODEV; 372 ret = -ENODEV;
373 goto out_free;
374 }
373 375
374 errors = *((u32 *)out_obj->buffer.pointer) & ~(1 << 0); 376 errors = *((u32 *)out_obj->buffer.pointer) & ~(1 << 0);
375 if (errors) 377 if (errors) {
376 return -ENODEV; 378 ret = -ENODEV;
379 goto out_free;
380 }
377 381
378 supported = *((u32 *)(out_obj->buffer.pointer + 4)); 382 supported = *((u32 *)(out_obj->buffer.pointer + 4));
379 if (!(supported & 0x1)) 383 if (!(supported & 0x1)) {
380 return -ENODEV; 384 ret = -ENODEV;
385 goto out_free;
386 }
381 387
382out_free: 388out_free:
383 kfree(output.pointer); 389 kfree(output.pointer);
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 85f69cdeae10..b4389441efbb 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -39,6 +39,7 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
39 misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID; 39 misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID;
40 wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable); 40 wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
41 c->cpuid_level = cpuid_eax(0); 41 c->cpuid_level = cpuid_eax(0);
42 get_cpu_cap(c);
42 } 43 }
43 } 44 }
44 45
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index 224392d8fe8c..5e975298fa81 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -530,7 +530,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
530 err = -ENOMEM; 530 err = -ENOMEM;
531 goto out; 531 goto out;
532 } 532 }
533 if (!alloc_cpumask_var(&b->cpus, GFP_KERNEL)) { 533 if (!zalloc_cpumask_var(&b->cpus, GFP_KERNEL)) {
534 kfree(b); 534 kfree(b);
535 err = -ENOMEM; 535 err = -ENOMEM;
536 goto out; 536 goto out;
@@ -543,7 +543,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
543#ifndef CONFIG_SMP 543#ifndef CONFIG_SMP
544 cpumask_setall(b->cpus); 544 cpumask_setall(b->cpus);
545#else 545#else
546 cpumask_copy(b->cpus, c->llc_shared_map); 546 cpumask_set_cpu(cpu, b->cpus);
547#endif 547#endif
548 548
549 per_cpu(threshold_banks, cpu)[bank] = b; 549 per_cpu(threshold_banks, cpu)[bank] = b;
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index c2a8b26d4fea..d9368eeda309 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -202,10 +202,11 @@ static int therm_throt_process(bool new_event, int event, int level)
202 202
203#ifdef CONFIG_SYSFS 203#ifdef CONFIG_SYSFS
204/* Add/Remove thermal_throttle interface for CPU device: */ 204/* Add/Remove thermal_throttle interface for CPU device: */
205static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev) 205static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev,
206 unsigned int cpu)
206{ 207{
207 int err; 208 int err;
208 struct cpuinfo_x86 *c = &cpu_data(smp_processor_id()); 209 struct cpuinfo_x86 *c = &cpu_data(cpu);
209 210
210 err = sysfs_create_group(&sys_dev->kobj, &thermal_attr_group); 211 err = sysfs_create_group(&sys_dev->kobj, &thermal_attr_group);
211 if (err) 212 if (err)
@@ -251,7 +252,7 @@ thermal_throttle_cpu_callback(struct notifier_block *nfb,
251 case CPU_UP_PREPARE: 252 case CPU_UP_PREPARE:
252 case CPU_UP_PREPARE_FROZEN: 253 case CPU_UP_PREPARE_FROZEN:
253 mutex_lock(&therm_cpu_lock); 254 mutex_lock(&therm_cpu_lock);
254 err = thermal_throttle_add_dev(sys_dev); 255 err = thermal_throttle_add_dev(sys_dev, cpu);
255 mutex_unlock(&therm_cpu_lock); 256 mutex_unlock(&therm_cpu_lock);
256 WARN_ON(err); 257 WARN_ON(err);
257 break; 258 break;
@@ -287,7 +288,7 @@ static __init int thermal_throttle_init_device(void)
287#endif 288#endif
288 /* connect live CPUs to sysfs */ 289 /* connect live CPUs to sysfs */
289 for_each_online_cpu(cpu) { 290 for_each_online_cpu(cpu) {
290 err = thermal_throttle_add_dev(get_cpu_sysdev(cpu)); 291 err = thermal_throttle_add_dev(get_cpu_sysdev(cpu), cpu);
291 WARN_ON(err); 292 WARN_ON(err);
292 } 293 }
293#ifdef CONFIG_HOTPLUG_CPU 294#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index f2da20fda02d..03a5b0385ad6 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -102,6 +102,7 @@ struct cpu_hw_events {
102 */ 102 */
103 struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */ 103 struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */
104 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; 104 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
105 unsigned long running[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
105 int enabled; 106 int enabled;
106 107
107 int n_events; 108 int n_events;
@@ -1010,6 +1011,7 @@ static int x86_pmu_start(struct perf_event *event)
1010 x86_perf_event_set_period(event); 1011 x86_perf_event_set_period(event);
1011 cpuc->events[idx] = event; 1012 cpuc->events[idx] = event;
1012 __set_bit(idx, cpuc->active_mask); 1013 __set_bit(idx, cpuc->active_mask);
1014 __set_bit(idx, cpuc->running);
1013 x86_pmu.enable(event); 1015 x86_pmu.enable(event);
1014 perf_event_update_userpage(event); 1016 perf_event_update_userpage(event);
1015 1017
@@ -1141,8 +1143,16 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
1141 cpuc = &__get_cpu_var(cpu_hw_events); 1143 cpuc = &__get_cpu_var(cpu_hw_events);
1142 1144
1143 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 1145 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1144 if (!test_bit(idx, cpuc->active_mask)) 1146 if (!test_bit(idx, cpuc->active_mask)) {
1147 /*
1148 * Though we deactivated the counter some cpus
1149 * might still deliver spurious interrupts still
1150 * in flight. Catch them:
1151 */
1152 if (__test_and_clear_bit(idx, cpuc->running))
1153 handled++;
1145 continue; 1154 continue;
1155 }
1146 1156
1147 event = cpuc->events[idx]; 1157 event = cpuc->events[idx];
1148 hwc = &event->hw; 1158 hwc = &event->hw;
@@ -1154,7 +1164,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
1154 /* 1164 /*
1155 * event overflow 1165 * event overflow
1156 */ 1166 */
1157 handled = 1; 1167 handled++;
1158 data.period = event->hw.last_period; 1168 data.period = event->hw.last_period;
1159 1169
1160 if (!x86_perf_event_set_period(event)) 1170 if (!x86_perf_event_set_period(event))
@@ -1200,12 +1210,20 @@ void perf_events_lapic_init(void)
1200 apic_write(APIC_LVTPC, APIC_DM_NMI); 1210 apic_write(APIC_LVTPC, APIC_DM_NMI);
1201} 1211}
1202 1212
1213struct pmu_nmi_state {
1214 unsigned int marked;
1215 int handled;
1216};
1217
1218static DEFINE_PER_CPU(struct pmu_nmi_state, pmu_nmi);
1219
1203static int __kprobes 1220static int __kprobes
1204perf_event_nmi_handler(struct notifier_block *self, 1221perf_event_nmi_handler(struct notifier_block *self,
1205 unsigned long cmd, void *__args) 1222 unsigned long cmd, void *__args)
1206{ 1223{
1207 struct die_args *args = __args; 1224 struct die_args *args = __args;
1208 struct pt_regs *regs; 1225 unsigned int this_nmi;
1226 int handled;
1209 1227
1210 if (!atomic_read(&active_events)) 1228 if (!atomic_read(&active_events))
1211 return NOTIFY_DONE; 1229 return NOTIFY_DONE;
@@ -1214,22 +1232,47 @@ perf_event_nmi_handler(struct notifier_block *self,
1214 case DIE_NMI: 1232 case DIE_NMI:
1215 case DIE_NMI_IPI: 1233 case DIE_NMI_IPI:
1216 break; 1234 break;
1217 1235 case DIE_NMIUNKNOWN:
1236 this_nmi = percpu_read(irq_stat.__nmi_count);
1237 if (this_nmi != __get_cpu_var(pmu_nmi).marked)
1238 /* let the kernel handle the unknown nmi */
1239 return NOTIFY_DONE;
1240 /*
1241 * This one is a PMU back-to-back nmi. Two events
1242 * trigger 'simultaneously' raising two back-to-back
1243 * NMIs. If the first NMI handles both, the latter
1244 * will be empty and daze the CPU. So, we drop it to
1245 * avoid false-positive 'unknown nmi' messages.
1246 */
1247 return NOTIFY_STOP;
1218 default: 1248 default:
1219 return NOTIFY_DONE; 1249 return NOTIFY_DONE;
1220 } 1250 }
1221 1251
1222 regs = args->regs;
1223
1224 apic_write(APIC_LVTPC, APIC_DM_NMI); 1252 apic_write(APIC_LVTPC, APIC_DM_NMI);
1225 /* 1253
1226 * Can't rely on the handled return value to say it was our NMI, two 1254 handled = x86_pmu.handle_irq(args->regs);
1227 * events could trigger 'simultaneously' raising two back-to-back NMIs. 1255 if (!handled)
1228 * 1256 return NOTIFY_DONE;
1229 * If the first NMI handles both, the latter will be empty and daze 1257
1230 * the CPU. 1258 this_nmi = percpu_read(irq_stat.__nmi_count);
1231 */ 1259 if ((handled > 1) ||
1232 x86_pmu.handle_irq(regs); 1260 /* the next nmi could be a back-to-back nmi */
1261 ((__get_cpu_var(pmu_nmi).marked == this_nmi) &&
1262 (__get_cpu_var(pmu_nmi).handled > 1))) {
1263 /*
1264 * We could have two subsequent back-to-back nmis: The
1265 * first handles more than one counter, the 2nd
1266 * handles only one counter and the 3rd handles no
1267 * counter.
1268 *
1269 * This is the 2nd nmi because the previous was
1270 * handling more than one counter. We will mark the
1271 * next (3rd) and then drop it if unhandled.
1272 */
1273 __get_cpu_var(pmu_nmi).marked = this_nmi + 1;
1274 __get_cpu_var(pmu_nmi).handled = handled;
1275 }
1233 1276
1234 return NOTIFY_STOP; 1277 return NOTIFY_STOP;
1235} 1278}
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index d8d86d014008..ee05c90012d2 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -712,7 +712,8 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
712 struct perf_sample_data data; 712 struct perf_sample_data data;
713 struct cpu_hw_events *cpuc; 713 struct cpu_hw_events *cpuc;
714 int bit, loops; 714 int bit, loops;
715 u64 ack, status; 715 u64 status;
716 int handled = 0;
716 717
717 perf_sample_data_init(&data, 0); 718 perf_sample_data_init(&data, 0);
718 719
@@ -728,6 +729,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
728 729
729 loops = 0; 730 loops = 0;
730again: 731again:
732 intel_pmu_ack_status(status);
731 if (++loops > 100) { 733 if (++loops > 100) {
732 WARN_ONCE(1, "perfevents: irq loop stuck!\n"); 734 WARN_ONCE(1, "perfevents: irq loop stuck!\n");
733 perf_event_print_debug(); 735 perf_event_print_debug();
@@ -736,19 +738,22 @@ again:
736 } 738 }
737 739
738 inc_irq_stat(apic_perf_irqs); 740 inc_irq_stat(apic_perf_irqs);
739 ack = status;
740 741
741 intel_pmu_lbr_read(); 742 intel_pmu_lbr_read();
742 743
743 /* 744 /*
744 * PEBS overflow sets bit 62 in the global status register 745 * PEBS overflow sets bit 62 in the global status register
745 */ 746 */
746 if (__test_and_clear_bit(62, (unsigned long *)&status)) 747 if (__test_and_clear_bit(62, (unsigned long *)&status)) {
748 handled++;
747 x86_pmu.drain_pebs(regs); 749 x86_pmu.drain_pebs(regs);
750 }
748 751
749 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { 752 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
750 struct perf_event *event = cpuc->events[bit]; 753 struct perf_event *event = cpuc->events[bit];
751 754
755 handled++;
756
752 if (!test_bit(bit, cpuc->active_mask)) 757 if (!test_bit(bit, cpuc->active_mask))
753 continue; 758 continue;
754 759
@@ -761,8 +766,6 @@ again:
761 x86_pmu_stop(event); 766 x86_pmu_stop(event);
762 } 767 }
763 768
764 intel_pmu_ack_status(ack);
765
766 /* 769 /*
767 * Repeat if there is more work to be done: 770 * Repeat if there is more work to be done:
768 */ 771 */
@@ -772,7 +775,7 @@ again:
772 775
773done: 776done:
774 intel_pmu_enable_all(0); 777 intel_pmu_enable_all(0);
775 return 1; 778 return handled;
776} 779}
777 780
778static struct event_constraint * 781static struct event_constraint *
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c
index 7e578e9cc58b..249015173992 100644
--- a/arch/x86/kernel/cpu/perf_event_p4.c
+++ b/arch/x86/kernel/cpu/perf_event_p4.c
@@ -660,8 +660,12 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
660 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 660 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
661 int overflow; 661 int overflow;
662 662
663 if (!test_bit(idx, cpuc->active_mask)) 663 if (!test_bit(idx, cpuc->active_mask)) {
664 /* catch in-flight IRQs */
665 if (__test_and_clear_bit(idx, cpuc->running))
666 handled++;
664 continue; 667 continue;
668 }
665 669
666 event = cpuc->events[idx]; 670 event = cpuc->events[idx];
667 hwc = &event->hw; 671 hwc = &event->hw;
@@ -692,7 +696,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
692 inc_irq_stat(apic_perf_irqs); 696 inc_irq_stat(apic_perf_irqs);
693 } 697 }
694 698
695 return handled > 0; 699 return handled;
696} 700}
697 701
698/* 702/*
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index 34b4dad6f0b8..d49079515122 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -31,6 +31,7 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
31 const struct cpuid_bit *cb; 31 const struct cpuid_bit *cb;
32 32
33 static const struct cpuid_bit __cpuinitconst cpuid_bits[] = { 33 static const struct cpuid_bit __cpuinitconst cpuid_bits[] = {
34 { X86_FEATURE_DTS, CR_EAX, 0, 0x00000006, 0 },
34 { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006, 0 }, 35 { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006, 0 },
35 { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006, 0 }, 36 { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006, 0 },
36 { X86_FEATURE_PLN, CR_EAX, 4, 0x00000006, 0 }, 37 { X86_FEATURE_PLN, CR_EAX, 4, 0x00000006, 0 },
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index e5cc7e82e60d..ebdb85cf2686 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -18,7 +18,6 @@
18#include <asm/apic.h> 18#include <asm/apic.h>
19#include <asm/iommu.h> 19#include <asm/iommu.h>
20#include <asm/gart.h> 20#include <asm/gart.h>
21#include <asm/hpet.h>
22 21
23static void __init fix_hypertransport_config(int num, int slot, int func) 22static void __init fix_hypertransport_config(int num, int slot, int func)
24{ 23{
@@ -192,21 +191,6 @@ static void __init ati_bugs_contd(int num, int slot, int func)
192} 191}
193#endif 192#endif
194 193
195/*
196 * Force the read back of the CMP register in hpet_next_event()
197 * to work around the problem that the CMP register write seems to be
198 * delayed. See hpet_next_event() for details.
199 *
200 * We do this on all SMBUS incarnations for now until we have more
201 * information about the affected chipsets.
202 */
203static void __init ati_hpet_bugs(int num, int slot, int func)
204{
205#ifdef CONFIG_HPET_TIMER
206 hpet_readback_cmp = 1;
207#endif
208}
209
210#define QFLAG_APPLY_ONCE 0x1 194#define QFLAG_APPLY_ONCE 0x1
211#define QFLAG_APPLIED 0x2 195#define QFLAG_APPLIED 0x2
212#define QFLAG_DONE (QFLAG_APPLY_ONCE|QFLAG_APPLIED) 196#define QFLAG_DONE (QFLAG_APPLY_ONCE|QFLAG_APPLIED)
@@ -236,8 +220,6 @@ static struct chipset early_qrk[] __initdata = {
236 PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs }, 220 PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs },
237 { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, 221 { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS,
238 PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs_contd }, 222 PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs_contd },
239 { PCI_VENDOR_ID_ATI, PCI_ANY_ID,
240 PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_hpet_bugs },
241 {} 223 {}
242}; 224};
243 225
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 351f9c0fea1f..7494999141b3 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -35,7 +35,6 @@
35unsigned long hpet_address; 35unsigned long hpet_address;
36u8 hpet_blockid; /* OS timer block num */ 36u8 hpet_blockid; /* OS timer block num */
37u8 hpet_msi_disable; 37u8 hpet_msi_disable;
38u8 hpet_readback_cmp;
39 38
40#ifdef CONFIG_PCI_MSI 39#ifdef CONFIG_PCI_MSI
41static unsigned long hpet_num_timers; 40static unsigned long hpet_num_timers;
@@ -395,23 +394,27 @@ static int hpet_next_event(unsigned long delta,
395 * at that point and we would wait for the next hpet interrupt 394 * at that point and we would wait for the next hpet interrupt
396 * forever. We found out that reading the CMP register back 395 * forever. We found out that reading the CMP register back
397 * forces the transfer so we can rely on the comparison with 396 * forces the transfer so we can rely on the comparison with
398 * the counter register below. 397 * the counter register below. If the read back from the
398 * compare register does not match the value we programmed
399 * then we might have a real hardware problem. We can not do
400 * much about it here, but at least alert the user/admin with
401 * a prominent warning.
399 * 402 *
400 * That works fine on those ATI chipsets, but on newer Intel 403 * An erratum on some chipsets (ICH9,..), results in
401 * chipsets (ICH9...) this triggers due to an erratum: Reading 404 * comparator read immediately following a write returning old
402 * the comparator immediately following a write is returning 405 * value. Workaround for this is to read this value second
403 * the old value. 406 * time, when first read returns old value.
404 * 407 *
405 * We restrict the read back to the affected ATI chipsets (set 408 * In fact the write to the comparator register is delayed up
406 * by quirks) and also run it with hpet=verbose for debugging 409 * to two HPET cycles so the workaround we tried to restrict
407 * purposes. 410 * the readback to those known to be borked ATI chipsets
411 * failed miserably. So we give up on optimizations forever
412 * and penalize all HPET incarnations unconditionally.
408 */ 413 */
409 if (hpet_readback_cmp || hpet_verbose) { 414 if (unlikely((u32)hpet_readl(HPET_Tn_CMP(timer)) != cnt)) {
410 u32 cmp = hpet_readl(HPET_Tn_CMP(timer)); 415 if (hpet_readl(HPET_Tn_CMP(timer)) != cnt)
411
412 if (cmp != cnt)
413 printk_once(KERN_WARNING 416 printk_once(KERN_WARNING
414 "hpet: compare register read back failed.\n"); 417 "hpet: compare register read back failed.\n");
415 } 418 }
416 419
417 return (s32)(hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0; 420 return (s32)(hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0;
@@ -503,7 +506,7 @@ static int hpet_assign_irq(struct hpet_dev *dev)
503{ 506{
504 unsigned int irq; 507 unsigned int irq;
505 508
506 irq = create_irq(); 509 irq = create_irq_nr(0, -1);
507 if (!irq) 510 if (!irq)
508 return -EINVAL; 511 return -EINVAL;
509 512
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
index a474ec37c32f..ff15c9dcc25d 100644
--- a/arch/x86/kernel/hw_breakpoint.c
+++ b/arch/x86/kernel/hw_breakpoint.c
@@ -206,11 +206,27 @@ int arch_check_bp_in_kernelspace(struct perf_event *bp)
206int arch_bp_generic_fields(int x86_len, int x86_type, 206int arch_bp_generic_fields(int x86_len, int x86_type,
207 int *gen_len, int *gen_type) 207 int *gen_len, int *gen_type)
208{ 208{
209 /* Len */ 209 /* Type */
210 switch (x86_len) { 210 switch (x86_type) {
211 case X86_BREAKPOINT_LEN_X: 211 case X86_BREAKPOINT_EXECUTE:
212 if (x86_len != X86_BREAKPOINT_LEN_X)
213 return -EINVAL;
214
215 *gen_type = HW_BREAKPOINT_X;
212 *gen_len = sizeof(long); 216 *gen_len = sizeof(long);
217 return 0;
218 case X86_BREAKPOINT_WRITE:
219 *gen_type = HW_BREAKPOINT_W;
213 break; 220 break;
221 case X86_BREAKPOINT_RW:
222 *gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R;
223 break;
224 default:
225 return -EINVAL;
226 }
227
228 /* Len */
229 switch (x86_len) {
214 case X86_BREAKPOINT_LEN_1: 230 case X86_BREAKPOINT_LEN_1:
215 *gen_len = HW_BREAKPOINT_LEN_1; 231 *gen_len = HW_BREAKPOINT_LEN_1;
216 break; 232 break;
@@ -229,21 +245,6 @@ int arch_bp_generic_fields(int x86_len, int x86_type,
229 return -EINVAL; 245 return -EINVAL;
230 } 246 }
231 247
232 /* Type */
233 switch (x86_type) {
234 case X86_BREAKPOINT_EXECUTE:
235 *gen_type = HW_BREAKPOINT_X;
236 break;
237 case X86_BREAKPOINT_WRITE:
238 *gen_type = HW_BREAKPOINT_W;
239 break;
240 case X86_BREAKPOINT_RW:
241 *gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R;
242 break;
243 default:
244 return -EINVAL;
245 }
246
247 return 0; 248 return 0;
248} 249}
249 250
@@ -316,9 +317,6 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
316 ret = -EINVAL; 317 ret = -EINVAL;
317 318
318 switch (info->len) { 319 switch (info->len) {
319 case X86_BREAKPOINT_LEN_X:
320 align = sizeof(long) -1;
321 break;
322 case X86_BREAKPOINT_LEN_1: 320 case X86_BREAKPOINT_LEN_1:
323 align = 0; 321 align = 0;
324 break; 322 break;
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
index e0bc186d7501..1c355c550960 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -239,11 +239,10 @@ int module_finalize(const Elf_Ehdr *hdr,
239 apply_paravirt(pseg, pseg + para->sh_size); 239 apply_paravirt(pseg, pseg + para->sh_size);
240 } 240 }
241 241
242 return module_bug_finalize(hdr, sechdrs, me); 242 return 0;
243} 243}
244 244
245void module_arch_cleanup(struct module *mod) 245void module_arch_cleanup(struct module *mod)
246{ 246{
247 alternatives_smp_module_del(mod); 247 alternatives_smp_module_del(mod);
248 module_bug_cleanup(mod);
249} 248}
diff --git a/arch/x86/kernel/trampoline.c b/arch/x86/kernel/trampoline.c
index a874495b3673..e2a595257390 100644
--- a/arch/x86/kernel/trampoline.c
+++ b/arch/x86/kernel/trampoline.c
@@ -45,8 +45,7 @@ void __init setup_trampoline_page_table(void)
45 /* Copy kernel address range */ 45 /* Copy kernel address range */
46 clone_pgd_range(trampoline_pg_dir + KERNEL_PGD_BOUNDARY, 46 clone_pgd_range(trampoline_pg_dir + KERNEL_PGD_BOUNDARY,
47 swapper_pg_dir + KERNEL_PGD_BOUNDARY, 47 swapper_pg_dir + KERNEL_PGD_BOUNDARY,
48 min_t(unsigned long, KERNEL_PGD_PTRS, 48 KERNEL_PGD_PTRS);
49 KERNEL_PGD_BOUNDARY));
50 49
51 /* Initialize low mappings */ 50 /* Initialize low mappings */
52 clone_pgd_range(trampoline_pg_dir, 51 clone_pgd_range(trampoline_pg_dir,
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index d632934cb638..26a863a9c2a8 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -655,7 +655,7 @@ void restore_sched_clock_state(void)
655 655
656 local_irq_save(flags); 656 local_irq_save(flags);
657 657
658 get_cpu_var(cyc2ns_offset) = 0; 658 __get_cpu_var(cyc2ns_offset) = 0;
659 offset = cyc2ns_suspend - sched_clock(); 659 offset = cyc2ns_suspend - sched_clock();
660 660
661 for_each_possible_cpu(cpu) 661 for_each_possible_cpu(cpu)
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index b38bd8b92aa6..66ca98aafdd6 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -1870,17 +1870,16 @@ static inline int emulate_grp9(struct x86_emulate_ctxt *ctxt,
1870 struct x86_emulate_ops *ops) 1870 struct x86_emulate_ops *ops)
1871{ 1871{
1872 struct decode_cache *c = &ctxt->decode; 1872 struct decode_cache *c = &ctxt->decode;
1873 u64 old = c->dst.orig_val; 1873 u64 old = c->dst.orig_val64;
1874 1874
1875 if (((u32) (old >> 0) != (u32) c->regs[VCPU_REGS_RAX]) || 1875 if (((u32) (old >> 0) != (u32) c->regs[VCPU_REGS_RAX]) ||
1876 ((u32) (old >> 32) != (u32) c->regs[VCPU_REGS_RDX])) { 1876 ((u32) (old >> 32) != (u32) c->regs[VCPU_REGS_RDX])) {
1877
1878 c->regs[VCPU_REGS_RAX] = (u32) (old >> 0); 1877 c->regs[VCPU_REGS_RAX] = (u32) (old >> 0);
1879 c->regs[VCPU_REGS_RDX] = (u32) (old >> 32); 1878 c->regs[VCPU_REGS_RDX] = (u32) (old >> 32);
1880 ctxt->eflags &= ~EFLG_ZF; 1879 ctxt->eflags &= ~EFLG_ZF;
1881 } else { 1880 } else {
1882 c->dst.val = ((u64)c->regs[VCPU_REGS_RCX] << 32) | 1881 c->dst.val64 = ((u64)c->regs[VCPU_REGS_RCX] << 32) |
1883 (u32) c->regs[VCPU_REGS_RBX]; 1882 (u32) c->regs[VCPU_REGS_RBX];
1884 1883
1885 ctxt->eflags |= EFLG_ZF; 1884 ctxt->eflags |= EFLG_ZF;
1886 } 1885 }
@@ -2616,7 +2615,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
2616 c->src.valptr, c->src.bytes); 2615 c->src.valptr, c->src.bytes);
2617 if (rc != X86EMUL_CONTINUE) 2616 if (rc != X86EMUL_CONTINUE)
2618 goto done; 2617 goto done;
2619 c->src.orig_val = c->src.val; 2618 c->src.orig_val64 = c->src.val64;
2620 } 2619 }
2621 2620
2622 if (c->src2.type == OP_MEM) { 2621 if (c->src2.type == OP_MEM) {
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c
index 8d10c063d7f2..4b7b73ce2098 100644
--- a/arch/x86/kvm/i8259.c
+++ b/arch/x86/kvm/i8259.c
@@ -64,6 +64,9 @@ static void pic_unlock(struct kvm_pic *s)
64 if (!found) 64 if (!found)
65 found = s->kvm->bsp_vcpu; 65 found = s->kvm->bsp_vcpu;
66 66
67 if (!found)
68 return;
69
67 kvm_vcpu_kick(found); 70 kvm_vcpu_kick(found);
68 } 71 }
69} 72}
diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h
index ffed06871c5c..63c314502993 100644
--- a/arch/x86/kvm/irq.h
+++ b/arch/x86/kvm/irq.h
@@ -43,7 +43,6 @@ struct kvm_kpic_state {
43 u8 irr; /* interrupt request register */ 43 u8 irr; /* interrupt request register */
44 u8 imr; /* interrupt mask register */ 44 u8 imr; /* interrupt mask register */
45 u8 isr; /* interrupt service register */ 45 u8 isr; /* interrupt service register */
46 u8 isr_ack; /* interrupt ack detection */
47 u8 priority_add; /* highest irq priority */ 46 u8 priority_add; /* highest irq priority */
48 u8 irq_base; 47 u8 irq_base;
49 u8 read_reg_select; 48 u8 read_reg_select;
@@ -56,6 +55,7 @@ struct kvm_kpic_state {
56 u8 init4; /* true if 4 byte init */ 55 u8 init4; /* true if 4 byte init */
57 u8 elcr; /* PIIX edge/trigger selection */ 56 u8 elcr; /* PIIX edge/trigger selection */
58 u8 elcr_mask; 57 u8 elcr_mask;
58 u8 isr_ack; /* interrupt ack detection */
59 struct kvm_pic *pics_state; 59 struct kvm_pic *pics_state;
60}; 60};
61 61
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index 9257510b4836..9d5f55848455 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -324,9 +324,8 @@ static void lguest_load_gdt(const struct desc_ptr *desc)
324} 324}
325 325
326/* 326/*
327 * For a single GDT entry which changes, we do the lazy thing: alter our GDT, 327 * For a single GDT entry which changes, we simply change our copy and
328 * then tell the Host to reload the entire thing. This operation is so rare 328 * then tell the host about it.
329 * that this naive implementation is reasonable.
330 */ 329 */
331static void lguest_write_gdt_entry(struct desc_struct *dt, int entrynum, 330static void lguest_write_gdt_entry(struct desc_struct *dt, int entrynum,
332 const void *desc, int type) 331 const void *desc, int type)
@@ -338,9 +337,13 @@ static void lguest_write_gdt_entry(struct desc_struct *dt, int entrynum,
338} 337}
339 338
340/* 339/*
341 * OK, I lied. There are three "thread local storage" GDT entries which change 340 * There are three "thread local storage" GDT entries which change
342 * on every context switch (these three entries are how glibc implements 341 * on every context switch (these three entries are how glibc implements
343 * __thread variables). So we have a hypercall specifically for this case. 342 * __thread variables). As an optimization, we have a hypercall
343 * specifically for this case.
344 *
345 * Wouldn't it be nicer to have a general LOAD_GDT_ENTRIES hypercall
346 * which took a range of entries?
344 */ 347 */
345static void lguest_load_tls(struct thread_struct *t, unsigned int cpu) 348static void lguest_load_tls(struct thread_struct *t, unsigned int cpu)
346{ 349{
diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
index 84e236ce76ba..72fc70cf6184 100644
--- a/arch/x86/mm/iomap_32.c
+++ b/arch/x86/mm/iomap_32.c
@@ -74,7 +74,7 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
74/* 74/*
75 * Map 'pfn' using fixed map 'type' and protections 'prot' 75 * Map 'pfn' using fixed map 'type' and protections 'prot'
76 */ 76 */
77void * 77void __iomem *
78iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) 78iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
79{ 79{
80 /* 80 /*
@@ -86,12 +86,12 @@ iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
86 if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC)) 86 if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC))
87 prot = PAGE_KERNEL_UC_MINUS; 87 prot = PAGE_KERNEL_UC_MINUS;
88 88
89 return kmap_atomic_prot_pfn(pfn, type, prot); 89 return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, type, prot);
90} 90}
91EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn); 91EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn);
92 92
93void 93void
94iounmap_atomic(void *kvaddr, enum km_type type) 94iounmap_atomic(void __iomem *kvaddr, enum km_type type)
95{ 95{
96 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; 96 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
97 enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); 97 enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index f6b48f6c5951..f1575c9a2572 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -568,8 +568,13 @@ static int __init init_sysfs(void)
568 int error; 568 int error;
569 569
570 error = sysdev_class_register(&oprofile_sysclass); 570 error = sysdev_class_register(&oprofile_sysclass);
571 if (!error) 571 if (error)
572 error = sysdev_register(&device_oprofile); 572 return error;
573
574 error = sysdev_register(&device_oprofile);
575 if (error)
576 sysdev_class_unregister(&oprofile_sysclass);
577
573 return error; 578 return error;
574} 579}
575 580
@@ -580,8 +585,10 @@ static void exit_sysfs(void)
580} 585}
581 586
582#else 587#else
583#define init_sysfs() do { } while (0) 588
584#define exit_sysfs() do { } while (0) 589static inline int init_sysfs(void) { return 0; }
590static inline void exit_sysfs(void) { }
591
585#endif /* CONFIG_PM */ 592#endif /* CONFIG_PM */
586 593
587static int __init p4_init(char **cpu_type) 594static int __init p4_init(char **cpu_type)
@@ -664,7 +671,10 @@ static int __init ppro_init(char **cpu_type)
664 case 14: 671 case 14:
665 *cpu_type = "i386/core"; 672 *cpu_type = "i386/core";
666 break; 673 break;
667 case 15: case 23: 674 case 0x0f:
675 case 0x16:
676 case 0x17:
677 case 0x1d:
668 *cpu_type = "i386/core_2"; 678 *cpu_type = "i386/core_2";
669 break; 679 break;
670 case 0x1a: 680 case 0x1a:
@@ -695,6 +705,8 @@ int __init op_nmi_init(struct oprofile_operations *ops)
695 char *cpu_type = NULL; 705 char *cpu_type = NULL;
696 int ret = 0; 706 int ret = 0;
697 707
708 using_nmi = 0;
709
698 if (!cpu_has_apic) 710 if (!cpu_has_apic)
699 return -ENODEV; 711 return -ENODEV;
700 712
@@ -774,7 +786,10 @@ int __init op_nmi_init(struct oprofile_operations *ops)
774 786
775 mux_init(ops); 787 mux_init(ops);
776 788
777 init_sysfs(); 789 ret = init_sysfs();
790 if (ret)
791 return ret;
792
778 using_nmi = 1; 793 using_nmi = 1;
779 printk(KERN_INFO "oprofile: using NMI interrupt.\n"); 794 printk(KERN_INFO "oprofile: using NMI interrupt.\n");
780 return 0; 795 return 0;
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 1a5353a753fc..b2bb5aa3b054 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -489,8 +489,9 @@ static void xen_hvm_setup_cpu_clockevents(void)
489__init void xen_hvm_init_time_ops(void) 489__init void xen_hvm_init_time_ops(void)
490{ 490{
491 /* vector callback is needed otherwise we cannot receive interrupts 491 /* vector callback is needed otherwise we cannot receive interrupts
492 * on cpu > 0 */ 492 * on cpu > 0 and at this point we don't know how many cpus are
493 if (!xen_have_vector_callback && num_present_cpus() > 1) 493 * available */
494 if (!xen_have_vector_callback)
494 return; 495 return;
495 if (!xen_feature(XENFEAT_hvm_safe_pvclock)) { 496 if (!xen_feature(XENFEAT_hvm_safe_pvclock)) {
496 printk(KERN_INFO "Xen doesn't support pvclock on HVM," 497 printk(KERN_INFO "Xen doesn't support pvclock on HVM,"
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index a6809645d212..2fef1ef931a0 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -966,7 +966,7 @@ blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
966 966
967 /* Currently we do not support hierarchy deeper than two level (0,1) */ 967 /* Currently we do not support hierarchy deeper than two level (0,1) */
968 if (parent != cgroup->top_cgroup) 968 if (parent != cgroup->top_cgroup)
969 return ERR_PTR(-EINVAL); 969 return ERR_PTR(-EPERM);
970 970
971 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL); 971 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
972 if (!blkcg) 972 if (!blkcg)
diff --git a/block/blk-core.c b/block/blk-core.c
index ee1a1e7e63cc..32a1c123dfb3 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1198,9 +1198,9 @@ static int __make_request(struct request_queue *q, struct bio *bio)
1198 int el_ret; 1198 int el_ret;
1199 unsigned int bytes = bio->bi_size; 1199 unsigned int bytes = bio->bi_size;
1200 const unsigned short prio = bio_prio(bio); 1200 const unsigned short prio = bio_prio(bio);
1201 const bool sync = (bio->bi_rw & REQ_SYNC); 1201 const bool sync = !!(bio->bi_rw & REQ_SYNC);
1202 const bool unplug = (bio->bi_rw & REQ_UNPLUG); 1202 const bool unplug = !!(bio->bi_rw & REQ_UNPLUG);
1203 const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK; 1203 const unsigned long ff = bio->bi_rw & REQ_FAILFAST_MASK;
1204 int rw_flags; 1204 int rw_flags;
1205 1205
1206 if ((bio->bi_rw & REQ_HARDBARRIER) && 1206 if ((bio->bi_rw & REQ_HARDBARRIER) &&
diff --git a/block/blk-map.c b/block/blk-map.c
index c65d7593f7f1..ade0a08c9099 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -307,7 +307,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
307 return PTR_ERR(bio); 307 return PTR_ERR(bio);
308 308
309 if (rq_data_dir(rq) == WRITE) 309 if (rq_data_dir(rq) == WRITE)
310 bio->bi_rw |= (1 << REQ_WRITE); 310 bio->bi_rw |= REQ_WRITE;
311 311
312 if (do_copy) 312 if (do_copy)
313 rq->cmd_flags |= REQ_COPY_USER; 313 rq->cmd_flags |= REQ_COPY_USER;
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 3b0cd4249671..eafc94f68d79 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -362,6 +362,18 @@ static int attempt_merge(struct request_queue *q, struct request *req,
362 return 0; 362 return 0;
363 363
364 /* 364 /*
365 * Don't merge file system requests and discard requests
366 */
367 if ((req->cmd_flags & REQ_DISCARD) != (next->cmd_flags & REQ_DISCARD))
368 return 0;
369
370 /*
371 * Don't merge discard requests and secure discard requests
372 */
373 if ((req->cmd_flags & REQ_SECURE) != (next->cmd_flags & REQ_SECURE))
374 return 0;
375
376 /*
365 * not contiguous 377 * not contiguous
366 */ 378 */
367 if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next)) 379 if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 001ab18078f5..0749b89c6885 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -511,6 +511,7 @@ int blk_register_queue(struct gendisk *disk)
511 kobject_uevent(&q->kobj, KOBJ_REMOVE); 511 kobject_uevent(&q->kobj, KOBJ_REMOVE);
512 kobject_del(&q->kobj); 512 kobject_del(&q->kobj);
513 blk_trace_remove_sysfs(disk_to_dev(disk)); 513 blk_trace_remove_sysfs(disk_to_dev(disk));
514 kobject_put(&dev->kobj);
514 return ret; 515 return ret;
515 } 516 }
516 517
diff --git a/block/blk.h b/block/blk.h
index 6e7dc87141e4..d6b911ac002c 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -142,14 +142,18 @@ static inline int queue_congestion_off_threshold(struct request_queue *q)
142 142
143static inline int blk_cpu_to_group(int cpu) 143static inline int blk_cpu_to_group(int cpu)
144{ 144{
145 int group = NR_CPUS;
145#ifdef CONFIG_SCHED_MC 146#ifdef CONFIG_SCHED_MC
146 const struct cpumask *mask = cpu_coregroup_mask(cpu); 147 const struct cpumask *mask = cpu_coregroup_mask(cpu);
147 return cpumask_first(mask); 148 group = cpumask_first(mask);
148#elif defined(CONFIG_SCHED_SMT) 149#elif defined(CONFIG_SCHED_SMT)
149 return cpumask_first(topology_thread_cpumask(cpu)); 150 group = cpumask_first(topology_thread_cpumask(cpu));
150#else 151#else
151 return cpu; 152 return cpu;
152#endif 153#endif
154 if (likely(group < NR_CPUS))
155 return group;
156 return cpu;
153} 157}
154 158
155/* 159/*
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index eb4086f7dfef..9eba291eb6fd 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -30,6 +30,7 @@ static const int cfq_slice_sync = HZ / 10;
30static int cfq_slice_async = HZ / 25; 30static int cfq_slice_async = HZ / 25;
31static const int cfq_slice_async_rq = 2; 31static const int cfq_slice_async_rq = 2;
32static int cfq_slice_idle = HZ / 125; 32static int cfq_slice_idle = HZ / 125;
33static int cfq_group_idle = HZ / 125;
33static const int cfq_target_latency = HZ * 3/10; /* 300 ms */ 34static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
34static const int cfq_hist_divisor = 4; 35static const int cfq_hist_divisor = 4;
35 36
@@ -147,6 +148,8 @@ struct cfq_queue {
147 struct cfq_queue *new_cfqq; 148 struct cfq_queue *new_cfqq;
148 struct cfq_group *cfqg; 149 struct cfq_group *cfqg;
149 struct cfq_group *orig_cfqg; 150 struct cfq_group *orig_cfqg;
151 /* Number of sectors dispatched from queue in single dispatch round */
152 unsigned long nr_sectors;
150}; 153};
151 154
152/* 155/*
@@ -198,6 +201,8 @@ struct cfq_group {
198 struct hlist_node cfqd_node; 201 struct hlist_node cfqd_node;
199 atomic_t ref; 202 atomic_t ref;
200#endif 203#endif
204 /* number of requests that are on the dispatch list or inside driver */
205 int dispatched;
201}; 206};
202 207
203/* 208/*
@@ -271,6 +276,7 @@ struct cfq_data {
271 unsigned int cfq_slice[2]; 276 unsigned int cfq_slice[2];
272 unsigned int cfq_slice_async_rq; 277 unsigned int cfq_slice_async_rq;
273 unsigned int cfq_slice_idle; 278 unsigned int cfq_slice_idle;
279 unsigned int cfq_group_idle;
274 unsigned int cfq_latency; 280 unsigned int cfq_latency;
275 unsigned int cfq_group_isolation; 281 unsigned int cfq_group_isolation;
276 282
@@ -378,6 +384,21 @@ CFQ_CFQQ_FNS(wait_busy);
378 &cfqg->service_trees[i][j]: NULL) \ 384 &cfqg->service_trees[i][j]: NULL) \
379 385
380 386
387static inline bool iops_mode(struct cfq_data *cfqd)
388{
389 /*
390 * If we are not idling on queues and it is a NCQ drive, parallel
391 * execution of requests is on and measuring time is not possible
392 * in most of the cases until and unless we drive shallower queue
393 * depths and that becomes a performance bottleneck. In such cases
394 * switch to start providing fairness in terms of number of IOs.
395 */
396 if (!cfqd->cfq_slice_idle && cfqd->hw_tag)
397 return true;
398 else
399 return false;
400}
401
381static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq) 402static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq)
382{ 403{
383 if (cfq_class_idle(cfqq)) 404 if (cfq_class_idle(cfqq))
@@ -906,7 +927,6 @@ static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq)
906 slice_used = cfqq->allocated_slice; 927 slice_used = cfqq->allocated_slice;
907 } 928 }
908 929
909 cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u", slice_used);
910 return slice_used; 930 return slice_used;
911} 931}
912 932
@@ -914,19 +934,21 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
914 struct cfq_queue *cfqq) 934 struct cfq_queue *cfqq)
915{ 935{
916 struct cfq_rb_root *st = &cfqd->grp_service_tree; 936 struct cfq_rb_root *st = &cfqd->grp_service_tree;
917 unsigned int used_sl, charge_sl; 937 unsigned int used_sl, charge;
918 int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg) 938 int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
919 - cfqg->service_tree_idle.count; 939 - cfqg->service_tree_idle.count;
920 940
921 BUG_ON(nr_sync < 0); 941 BUG_ON(nr_sync < 0);
922 used_sl = charge_sl = cfq_cfqq_slice_usage(cfqq); 942 used_sl = charge = cfq_cfqq_slice_usage(cfqq);
923 943
924 if (!cfq_cfqq_sync(cfqq) && !nr_sync) 944 if (iops_mode(cfqd))
925 charge_sl = cfqq->allocated_slice; 945 charge = cfqq->slice_dispatch;
946 else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
947 charge = cfqq->allocated_slice;
926 948
927 /* Can't update vdisktime while group is on service tree */ 949 /* Can't update vdisktime while group is on service tree */
928 cfq_rb_erase(&cfqg->rb_node, st); 950 cfq_rb_erase(&cfqg->rb_node, st);
929 cfqg->vdisktime += cfq_scale_slice(charge_sl, cfqg); 951 cfqg->vdisktime += cfq_scale_slice(charge, cfqg);
930 __cfq_group_service_tree_add(st, cfqg); 952 __cfq_group_service_tree_add(st, cfqg);
931 953
932 /* This group is being expired. Save the context */ 954 /* This group is being expired. Save the context */
@@ -940,6 +962,9 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
940 962
941 cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime, 963 cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
942 st->min_vdisktime); 964 st->min_vdisktime);
965 cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u disp=%u charge=%u iops=%u"
966 " sect=%u", used_sl, cfqq->slice_dispatch, charge,
967 iops_mode(cfqd), cfqq->nr_sectors);
943 cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl); 968 cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl);
944 cfq_blkiocg_set_start_empty_time(&cfqg->blkg); 969 cfq_blkiocg_set_start_empty_time(&cfqg->blkg);
945} 970}
@@ -994,10 +1019,20 @@ cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create)
994 */ 1019 */
995 atomic_set(&cfqg->ref, 1); 1020 atomic_set(&cfqg->ref, 1);
996 1021
997 /* Add group onto cgroup list */ 1022 /*
998 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor); 1023 * Add group onto cgroup list. It might happen that bdi->dev is
999 cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd, 1024 * not initiliazed yet. Initialize this new group without major
1025 * and minor info and this info will be filled in once a new thread
1026 * comes for IO. See code above.
1027 */
1028 if (bdi->dev) {
1029 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
1030 cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd,
1000 MKDEV(major, minor)); 1031 MKDEV(major, minor));
1032 } else
1033 cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd,
1034 0);
1035
1001 cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev); 1036 cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev);
1002 1037
1003 /* Add group on cfqd list */ 1038 /* Add group on cfqd list */
@@ -1587,6 +1622,7 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd,
1587 cfqq->allocated_slice = 0; 1622 cfqq->allocated_slice = 0;
1588 cfqq->slice_end = 0; 1623 cfqq->slice_end = 0;
1589 cfqq->slice_dispatch = 0; 1624 cfqq->slice_dispatch = 0;
1625 cfqq->nr_sectors = 0;
1590 1626
1591 cfq_clear_cfqq_wait_request(cfqq); 1627 cfq_clear_cfqq_wait_request(cfqq);
1592 cfq_clear_cfqq_must_dispatch(cfqq); 1628 cfq_clear_cfqq_must_dispatch(cfqq);
@@ -1839,6 +1875,9 @@ static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1839 BUG_ON(!service_tree); 1875 BUG_ON(!service_tree);
1840 BUG_ON(!service_tree->count); 1876 BUG_ON(!service_tree->count);
1841 1877
1878 if (!cfqd->cfq_slice_idle)
1879 return false;
1880
1842 /* We never do for idle class queues. */ 1881 /* We never do for idle class queues. */
1843 if (prio == IDLE_WORKLOAD) 1882 if (prio == IDLE_WORKLOAD)
1844 return false; 1883 return false;
@@ -1863,7 +1902,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
1863{ 1902{
1864 struct cfq_queue *cfqq = cfqd->active_queue; 1903 struct cfq_queue *cfqq = cfqd->active_queue;
1865 struct cfq_io_context *cic; 1904 struct cfq_io_context *cic;
1866 unsigned long sl; 1905 unsigned long sl, group_idle = 0;
1867 1906
1868 /* 1907 /*
1869 * SSD device without seek penalty, disable idling. But only do so 1908 * SSD device without seek penalty, disable idling. But only do so
@@ -1879,8 +1918,13 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
1879 /* 1918 /*
1880 * idle is disabled, either manually or by past process history 1919 * idle is disabled, either manually or by past process history
1881 */ 1920 */
1882 if (!cfqd->cfq_slice_idle || !cfq_should_idle(cfqd, cfqq)) 1921 if (!cfq_should_idle(cfqd, cfqq)) {
1883 return; 1922 /* no queue idling. Check for group idling */
1923 if (cfqd->cfq_group_idle)
1924 group_idle = cfqd->cfq_group_idle;
1925 else
1926 return;
1927 }
1884 1928
1885 /* 1929 /*
1886 * still active requests from this queue, don't idle 1930 * still active requests from this queue, don't idle
@@ -1907,13 +1951,21 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
1907 return; 1951 return;
1908 } 1952 }
1909 1953
1954 /* There are other queues in the group, don't do group idle */
1955 if (group_idle && cfqq->cfqg->nr_cfqq > 1)
1956 return;
1957
1910 cfq_mark_cfqq_wait_request(cfqq); 1958 cfq_mark_cfqq_wait_request(cfqq);
1911 1959
1912 sl = cfqd->cfq_slice_idle; 1960 if (group_idle)
1961 sl = cfqd->cfq_group_idle;
1962 else
1963 sl = cfqd->cfq_slice_idle;
1913 1964
1914 mod_timer(&cfqd->idle_slice_timer, jiffies + sl); 1965 mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
1915 cfq_blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg); 1966 cfq_blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg);
1916 cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu", sl); 1967 cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
1968 group_idle ? 1 : 0);
1917} 1969}
1918 1970
1919/* 1971/*
@@ -1929,9 +1981,11 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
1929 cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq); 1981 cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
1930 cfq_remove_request(rq); 1982 cfq_remove_request(rq);
1931 cfqq->dispatched++; 1983 cfqq->dispatched++;
1984 (RQ_CFQG(rq))->dispatched++;
1932 elv_dispatch_sort(q, rq); 1985 elv_dispatch_sort(q, rq);
1933 1986
1934 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++; 1987 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
1988 cfqq->nr_sectors += blk_rq_sectors(rq);
1935 cfq_blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq), 1989 cfq_blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq),
1936 rq_data_dir(rq), rq_is_sync(rq)); 1990 rq_data_dir(rq), rq_is_sync(rq));
1937} 1991}
@@ -2198,7 +2252,7 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
2198 cfqq = NULL; 2252 cfqq = NULL;
2199 goto keep_queue; 2253 goto keep_queue;
2200 } else 2254 } else
2201 goto expire; 2255 goto check_group_idle;
2202 } 2256 }
2203 2257
2204 /* 2258 /*
@@ -2226,8 +2280,23 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
2226 * flight or is idling for a new request, allow either of these 2280 * flight or is idling for a new request, allow either of these
2227 * conditions to happen (or time out) before selecting a new queue. 2281 * conditions to happen (or time out) before selecting a new queue.
2228 */ 2282 */
2229 if (timer_pending(&cfqd->idle_slice_timer) || 2283 if (timer_pending(&cfqd->idle_slice_timer)) {
2230 (cfqq->dispatched && cfq_should_idle(cfqd, cfqq))) { 2284 cfqq = NULL;
2285 goto keep_queue;
2286 }
2287
2288 if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
2289 cfqq = NULL;
2290 goto keep_queue;
2291 }
2292
2293 /*
2294 * If group idle is enabled and there are requests dispatched from
2295 * this group, wait for requests to complete.
2296 */
2297check_group_idle:
2298 if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1
2299 && cfqq->cfqg->dispatched) {
2231 cfqq = NULL; 2300 cfqq = NULL;
2232 goto keep_queue; 2301 goto keep_queue;
2233 } 2302 }
@@ -3375,6 +3444,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
3375 WARN_ON(!cfqq->dispatched); 3444 WARN_ON(!cfqq->dispatched);
3376 cfqd->rq_in_driver--; 3445 cfqd->rq_in_driver--;
3377 cfqq->dispatched--; 3446 cfqq->dispatched--;
3447 (RQ_CFQG(rq))->dispatched--;
3378 cfq_blkiocg_update_completion_stats(&cfqq->cfqg->blkg, 3448 cfq_blkiocg_update_completion_stats(&cfqq->cfqg->blkg,
3379 rq_start_time_ns(rq), rq_io_start_time_ns(rq), 3449 rq_start_time_ns(rq), rq_io_start_time_ns(rq),
3380 rq_data_dir(rq), rq_is_sync(rq)); 3450 rq_data_dir(rq), rq_is_sync(rq));
@@ -3404,7 +3474,10 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
3404 * the queue. 3474 * the queue.
3405 */ 3475 */
3406 if (cfq_should_wait_busy(cfqd, cfqq)) { 3476 if (cfq_should_wait_busy(cfqd, cfqq)) {
3407 cfqq->slice_end = jiffies + cfqd->cfq_slice_idle; 3477 unsigned long extend_sl = cfqd->cfq_slice_idle;
3478 if (!cfqd->cfq_slice_idle)
3479 extend_sl = cfqd->cfq_group_idle;
3480 cfqq->slice_end = jiffies + extend_sl;
3408 cfq_mark_cfqq_wait_busy(cfqq); 3481 cfq_mark_cfqq_wait_busy(cfqq);
3409 cfq_log_cfqq(cfqd, cfqq, "will busy wait"); 3482 cfq_log_cfqq(cfqd, cfqq, "will busy wait");
3410 } 3483 }
@@ -3850,6 +3923,7 @@ static void *cfq_init_queue(struct request_queue *q)
3850 cfqd->cfq_slice[1] = cfq_slice_sync; 3923 cfqd->cfq_slice[1] = cfq_slice_sync;
3851 cfqd->cfq_slice_async_rq = cfq_slice_async_rq; 3924 cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
3852 cfqd->cfq_slice_idle = cfq_slice_idle; 3925 cfqd->cfq_slice_idle = cfq_slice_idle;
3926 cfqd->cfq_group_idle = cfq_group_idle;
3853 cfqd->cfq_latency = 1; 3927 cfqd->cfq_latency = 1;
3854 cfqd->cfq_group_isolation = 0; 3928 cfqd->cfq_group_isolation = 0;
3855 cfqd->hw_tag = -1; 3929 cfqd->hw_tag = -1;
@@ -3922,6 +3996,7 @@ SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
3922SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0); 3996SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
3923SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0); 3997SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
3924SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1); 3998SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
3999SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
3925SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); 4000SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
3926SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); 4001SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
3927SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); 4002SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
@@ -3954,6 +4029,7 @@ STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
3954STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, 4029STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
3955 UINT_MAX, 0); 4030 UINT_MAX, 0);
3956STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1); 4031STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
4032STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
3957STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1); 4033STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
3958STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); 4034STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
3959STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, 4035STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
@@ -3975,6 +4051,7 @@ static struct elv_fs_entry cfq_attrs[] = {
3975 CFQ_ATTR(slice_async), 4051 CFQ_ATTR(slice_async),
3976 CFQ_ATTR(slice_async_rq), 4052 CFQ_ATTR(slice_async_rq),
3977 CFQ_ATTR(slice_idle), 4053 CFQ_ATTR(slice_idle),
4054 CFQ_ATTR(group_idle),
3978 CFQ_ATTR(low_latency), 4055 CFQ_ATTR(low_latency),
3979 CFQ_ATTR(group_isolation), 4056 CFQ_ATTR(group_isolation),
3980 __ATTR_NULL 4057 __ATTR_NULL
@@ -4028,6 +4105,12 @@ static int __init cfq_init(void)
4028 if (!cfq_slice_idle) 4105 if (!cfq_slice_idle)
4029 cfq_slice_idle = 1; 4106 cfq_slice_idle = 1;
4030 4107
4108#ifdef CONFIG_CFQ_GROUP_IOSCHED
4109 if (!cfq_group_idle)
4110 cfq_group_idle = 1;
4111#else
4112 cfq_group_idle = 0;
4113#endif
4031 if (cfq_slab_setup()) 4114 if (cfq_slab_setup())
4032 return -ENOMEM; 4115 return -ENOMEM;
4033 4116
diff --git a/block/elevator.c b/block/elevator.c
index ec585c9554d3..205b09a5bd9e 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -1009,18 +1009,19 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
1009{ 1009{
1010 struct elevator_queue *old_elevator, *e; 1010 struct elevator_queue *old_elevator, *e;
1011 void *data; 1011 void *data;
1012 int err;
1012 1013
1013 /* 1014 /*
1014 * Allocate new elevator 1015 * Allocate new elevator
1015 */ 1016 */
1016 e = elevator_alloc(q, new_e); 1017 e = elevator_alloc(q, new_e);
1017 if (!e) 1018 if (!e)
1018 return 0; 1019 return -ENOMEM;
1019 1020
1020 data = elevator_init_queue(q, e); 1021 data = elevator_init_queue(q, e);
1021 if (!data) { 1022 if (!data) {
1022 kobject_put(&e->kobj); 1023 kobject_put(&e->kobj);
1023 return 0; 1024 return -ENOMEM;
1024 } 1025 }
1025 1026
1026 /* 1027 /*
@@ -1043,7 +1044,8 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
1043 1044
1044 __elv_unregister_queue(old_elevator); 1045 __elv_unregister_queue(old_elevator);
1045 1046
1046 if (elv_register_queue(q)) 1047 err = elv_register_queue(q);
1048 if (err)
1047 goto fail_register; 1049 goto fail_register;
1048 1050
1049 /* 1051 /*
@@ -1056,7 +1058,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
1056 1058
1057 blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name); 1059 blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name);
1058 1060
1059 return 1; 1061 return 0;
1060 1062
1061fail_register: 1063fail_register:
1062 /* 1064 /*
@@ -1071,17 +1073,19 @@ fail_register:
1071 queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q); 1073 queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
1072 spin_unlock_irq(q->queue_lock); 1074 spin_unlock_irq(q->queue_lock);
1073 1075
1074 return 0; 1076 return err;
1075} 1077}
1076 1078
1077ssize_t elv_iosched_store(struct request_queue *q, const char *name, 1079/*
1078 size_t count) 1080 * Switch this queue to the given IO scheduler.
1081 */
1082int elevator_change(struct request_queue *q, const char *name)
1079{ 1083{
1080 char elevator_name[ELV_NAME_MAX]; 1084 char elevator_name[ELV_NAME_MAX];
1081 struct elevator_type *e; 1085 struct elevator_type *e;
1082 1086
1083 if (!q->elevator) 1087 if (!q->elevator)
1084 return count; 1088 return -ENXIO;
1085 1089
1086 strlcpy(elevator_name, name, sizeof(elevator_name)); 1090 strlcpy(elevator_name, name, sizeof(elevator_name));
1087 e = elevator_get(strstrip(elevator_name)); 1091 e = elevator_get(strstrip(elevator_name));
@@ -1092,13 +1096,27 @@ ssize_t elv_iosched_store(struct request_queue *q, const char *name,
1092 1096
1093 if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) { 1097 if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) {
1094 elevator_put(e); 1098 elevator_put(e);
1095 return count; 1099 return 0;
1096 } 1100 }
1097 1101
1098 if (!elevator_switch(q, e)) 1102 return elevator_switch(q, e);
1099 printk(KERN_ERR "elevator: switch to %s failed\n", 1103}
1100 elevator_name); 1104EXPORT_SYMBOL(elevator_change);
1101 return count; 1105
1106ssize_t elv_iosched_store(struct request_queue *q, const char *name,
1107 size_t count)
1108{
1109 int ret;
1110
1111 if (!q->elevator)
1112 return count;
1113
1114 ret = elevator_change(q, name);
1115 if (!ret)
1116 return count;
1117
1118 printk(KERN_ERR "elevator: switch to %s failed\n", name);
1119 return ret;
1102} 1120}
1103 1121
1104ssize_t elv_iosched_show(struct request_queue *q, char *name) 1122ssize_t elv_iosched_show(struct request_queue *q, char *name)
diff --git a/drivers/Makefile b/drivers/Makefile
index ae473445ad6d..a2aea53a75ed 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -50,7 +50,7 @@ obj-$(CONFIG_SPI) += spi/
50obj-y += net/ 50obj-y += net/
51obj-$(CONFIG_ATM) += atm/ 51obj-$(CONFIG_ATM) += atm/
52obj-$(CONFIG_FUSION) += message/ 52obj-$(CONFIG_FUSION) += message/
53obj-$(CONFIG_FIREWIRE) += firewire/ 53obj-y += firewire/
54obj-y += ieee1394/ 54obj-y += ieee1394/
55obj-$(CONFIG_UIO) += uio/ 55obj-$(CONFIG_UIO) += uio/
56obj-y += cdrom/ 56obj-y += cdrom/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index b811f2173f6f..88681aca88c5 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -105,7 +105,7 @@ config ACPI_EC_DEBUGFS
105 105
106 Be aware that using this interface can confuse your Embedded 106 Be aware that using this interface can confuse your Embedded
107 Controller in a way that a normal reboot is not enough. You then 107 Controller in a way that a normal reboot is not enough. You then
108 have to power of your system, and remove the laptop battery for 108 have to power off your system, and remove the laptop battery for
109 some seconds. 109 some seconds.
110 An Embedded Controller typically is available on laptops and reads 110 An Embedded Controller typically is available on laptops and reads
111 sensor values like battery state and temperature. 111 sensor values like battery state and temperature.
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index b76848c80be3..6b115f6c4313 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -382,31 +382,32 @@ static void acpi_pad_remove_sysfs(struct acpi_device *device)
382 device_remove_file(&device->dev, &dev_attr_rrtime); 382 device_remove_file(&device->dev, &dev_attr_rrtime);
383} 383}
384 384
385/* Query firmware how many CPUs should be idle */ 385/*
386static int acpi_pad_pur(acpi_handle handle, int *num_cpus) 386 * Query firmware how many CPUs should be idle
387 * return -1 on failure
388 */
389static int acpi_pad_pur(acpi_handle handle)
387{ 390{
388 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; 391 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
389 union acpi_object *package; 392 union acpi_object *package;
390 int rev, num, ret = -EINVAL; 393 int num = -1;
391 394
392 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PUR", NULL, &buffer))) 395 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PUR", NULL, &buffer)))
393 return -EINVAL; 396 return num;
394 397
395 if (!buffer.length || !buffer.pointer) 398 if (!buffer.length || !buffer.pointer)
396 return -EINVAL; 399 return num;
397 400
398 package = buffer.pointer; 401 package = buffer.pointer;
399 if (package->type != ACPI_TYPE_PACKAGE || package->package.count != 2) 402
400 goto out; 403 if (package->type == ACPI_TYPE_PACKAGE &&
401 rev = package->package.elements[0].integer.value; 404 package->package.count == 2 &&
402 num = package->package.elements[1].integer.value; 405 package->package.elements[0].integer.value == 1) /* rev 1 */
403 if (rev != 1 || num < 0) 406
404 goto out; 407 num = package->package.elements[1].integer.value;
405 *num_cpus = num; 408
406 ret = 0;
407out:
408 kfree(buffer.pointer); 409 kfree(buffer.pointer);
409 return ret; 410 return num;
410} 411}
411 412
412/* Notify firmware how many CPUs are idle */ 413/* Notify firmware how many CPUs are idle */
@@ -433,7 +434,8 @@ static void acpi_pad_handle_notify(acpi_handle handle)
433 uint32_t idle_cpus; 434 uint32_t idle_cpus;
434 435
435 mutex_lock(&isolated_cpus_lock); 436 mutex_lock(&isolated_cpus_lock);
436 if (acpi_pad_pur(handle, &num_cpus)) { 437 num_cpus = acpi_pad_pur(handle);
438 if (num_cpus < 0) {
437 mutex_unlock(&isolated_cpus_lock); 439 mutex_unlock(&isolated_cpus_lock);
438 return; 440 return;
439 } 441 }
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index df85b53a674f..7dad9160f209 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -854,6 +854,7 @@ struct acpi_bit_register_info {
854 ACPI_BITMASK_POWER_BUTTON_STATUS | \ 854 ACPI_BITMASK_POWER_BUTTON_STATUS | \
855 ACPI_BITMASK_SLEEP_BUTTON_STATUS | \ 855 ACPI_BITMASK_SLEEP_BUTTON_STATUS | \
856 ACPI_BITMASK_RT_CLOCK_STATUS | \ 856 ACPI_BITMASK_RT_CLOCK_STATUS | \
857 ACPI_BITMASK_PCIEXP_WAKE_DISABLE | \
857 ACPI_BITMASK_WAKE_STATUS) 858 ACPI_BITMASK_WAKE_STATUS)
858 859
859#define ACPI_BITMASK_TIMER_ENABLE 0x0001 860#define ACPI_BITMASK_TIMER_ENABLE 0x0001
diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c
index 74c24d517f81..4093522eed45 100644
--- a/drivers/acpi/acpica/exutils.c
+++ b/drivers/acpi/acpica/exutils.c
@@ -109,7 +109,7 @@ void acpi_ex_enter_interpreter(void)
109 * 109 *
110 * DESCRIPTION: Reacquire the interpreter execution region from within the 110 * DESCRIPTION: Reacquire the interpreter execution region from within the
111 * interpreter code. Failure to enter the interpreter region is a 111 * interpreter code. Failure to enter the interpreter region is a
112 * fatal system error. Used in conjuction with 112 * fatal system error. Used in conjunction with
113 * relinquish_interpreter 113 * relinquish_interpreter
114 * 114 *
115 ******************************************************************************/ 115 ******************************************************************************/
diff --git a/drivers/acpi/acpica/rsutils.c b/drivers/acpi/acpica/rsutils.c
index 22cfcfbd9fff..491191e6cf69 100644
--- a/drivers/acpi/acpica/rsutils.c
+++ b/drivers/acpi/acpica/rsutils.c
@@ -149,7 +149,7 @@ acpi_rs_move_data(void *destination, void *source, u16 item_count, u8 move_type)
149 149
150 /* 150 /*
151 * 16-, 32-, and 64-bit cases must use the move macros that perform 151 * 16-, 32-, and 64-bit cases must use the move macros that perform
152 * endian conversion and/or accomodate hardware that cannot perform 152 * endian conversion and/or accommodate hardware that cannot perform
153 * misaligned memory transfers 153 * misaligned memory transfers
154 */ 154 */
155 case ACPI_RSC_MOVE16: 155 case ACPI_RSC_MOVE16:
diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig
index 907e350f1c7d..fca34ccfd294 100644
--- a/drivers/acpi/apei/Kconfig
+++ b/drivers/acpi/apei/Kconfig
@@ -34,6 +34,6 @@ config ACPI_APEI_ERST_DEBUG
34 depends on ACPI_APEI 34 depends on ACPI_APEI
35 help 35 help
36 ERST is a way provided by APEI to save and retrieve hardware 36 ERST is a way provided by APEI to save and retrieve hardware
37 error infomation to and from a persistent store. Enable this 37 error information to and from a persistent store. Enable this
38 if you want to debugging and testing the ERST kernel support 38 if you want to debugging and testing the ERST kernel support
39 and firmware implementation. 39 and firmware implementation.
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
index 73fd0c7487c1..4a904a4bf05f 100644
--- a/drivers/acpi/apei/apei-base.c
+++ b/drivers/acpi/apei/apei-base.c
@@ -445,11 +445,15 @@ EXPORT_SYMBOL_GPL(apei_resources_sub);
445int apei_resources_request(struct apei_resources *resources, 445int apei_resources_request(struct apei_resources *resources,
446 const char *desc) 446 const char *desc)
447{ 447{
448 struct apei_res *res, *res_bak; 448 struct apei_res *res, *res_bak = NULL;
449 struct resource *r; 449 struct resource *r;
450 int rc;
450 451
451 apei_resources_sub(resources, &apei_resources_all); 452 rc = apei_resources_sub(resources, &apei_resources_all);
453 if (rc)
454 return rc;
452 455
456 rc = -EINVAL;
453 list_for_each_entry(res, &resources->iomem, list) { 457 list_for_each_entry(res, &resources->iomem, list) {
454 r = request_mem_region(res->start, res->end - res->start, 458 r = request_mem_region(res->start, res->end - res->start,
455 desc); 459 desc);
@@ -475,7 +479,11 @@ int apei_resources_request(struct apei_resources *resources,
475 } 479 }
476 } 480 }
477 481
478 apei_resources_merge(&apei_resources_all, resources); 482 rc = apei_resources_merge(&apei_resources_all, resources);
483 if (rc) {
484 pr_err(APEI_PFX "Fail to merge resources!\n");
485 goto err_unmap_ioport;
486 }
479 487
480 return 0; 488 return 0;
481err_unmap_ioport: 489err_unmap_ioport:
@@ -491,12 +499,13 @@ err_unmap_iomem:
491 break; 499 break;
492 release_mem_region(res->start, res->end - res->start); 500 release_mem_region(res->start, res->end - res->start);
493 } 501 }
494 return -EINVAL; 502 return rc;
495} 503}
496EXPORT_SYMBOL_GPL(apei_resources_request); 504EXPORT_SYMBOL_GPL(apei_resources_request);
497 505
498void apei_resources_release(struct apei_resources *resources) 506void apei_resources_release(struct apei_resources *resources)
499{ 507{
508 int rc;
500 struct apei_res *res; 509 struct apei_res *res;
501 510
502 list_for_each_entry(res, &resources->iomem, list) 511 list_for_each_entry(res, &resources->iomem, list)
@@ -504,7 +513,9 @@ void apei_resources_release(struct apei_resources *resources)
504 list_for_each_entry(res, &resources->ioport, list) 513 list_for_each_entry(res, &resources->ioport, list)
505 release_region(res->start, res->end - res->start); 514 release_region(res->start, res->end - res->start);
506 515
507 apei_resources_sub(&apei_resources_all, resources); 516 rc = apei_resources_sub(&apei_resources_all, resources);
517 if (rc)
518 pr_err(APEI_PFX "Fail to sub resources!\n");
508} 519}
509EXPORT_SYMBOL_GPL(apei_resources_release); 520EXPORT_SYMBOL_GPL(apei_resources_release);
510 521
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
index 465c885938ee..cf29df69380b 100644
--- a/drivers/acpi/apei/einj.c
+++ b/drivers/acpi/apei/einj.c
@@ -426,7 +426,9 @@ DEFINE_SIMPLE_ATTRIBUTE(error_inject_fops, NULL,
426 426
427static int einj_check_table(struct acpi_table_einj *einj_tab) 427static int einj_check_table(struct acpi_table_einj *einj_tab)
428{ 428{
429 if (einj_tab->header_length != sizeof(struct acpi_table_einj)) 429 if ((einj_tab->header_length !=
430 (sizeof(struct acpi_table_einj) - sizeof(einj_tab->header)))
431 && (einj_tab->header_length != sizeof(struct acpi_table_einj)))
430 return -EINVAL; 432 return -EINVAL;
431 if (einj_tab->header.length < sizeof(struct acpi_table_einj)) 433 if (einj_tab->header.length < sizeof(struct acpi_table_einj))
432 return -EINVAL; 434 return -EINVAL;
diff --git a/drivers/acpi/apei/erst-dbg.c b/drivers/acpi/apei/erst-dbg.c
index 5281ddda2777..da1228a9a544 100644
--- a/drivers/acpi/apei/erst-dbg.c
+++ b/drivers/acpi/apei/erst-dbg.c
@@ -2,7 +2,7 @@
2 * APEI Error Record Serialization Table debug support 2 * APEI Error Record Serialization Table debug support
3 * 3 *
4 * ERST is a way provided by APEI to save and retrieve hardware error 4 * ERST is a way provided by APEI to save and retrieve hardware error
5 * infomation to and from a persistent store. This file provide the 5 * information to and from a persistent store. This file provide the
6 * debugging/testing support for ERST kernel support and firmware 6 * debugging/testing support for ERST kernel support and firmware
7 * implementation. 7 * implementation.
8 * 8 *
@@ -111,11 +111,13 @@ retry:
111 goto out; 111 goto out;
112 } 112 }
113 if (len > erst_dbg_buf_len) { 113 if (len > erst_dbg_buf_len) {
114 kfree(erst_dbg_buf); 114 void *p;
115 rc = -ENOMEM; 115 rc = -ENOMEM;
116 erst_dbg_buf = kmalloc(len, GFP_KERNEL); 116 p = kmalloc(len, GFP_KERNEL);
117 if (!erst_dbg_buf) 117 if (!p)
118 goto out; 118 goto out;
119 kfree(erst_dbg_buf);
120 erst_dbg_buf = p;
119 erst_dbg_buf_len = len; 121 erst_dbg_buf_len = len;
120 goto retry; 122 goto retry;
121 } 123 }
@@ -150,11 +152,13 @@ static ssize_t erst_dbg_write(struct file *filp, const char __user *ubuf,
150 if (mutex_lock_interruptible(&erst_dbg_mutex)) 152 if (mutex_lock_interruptible(&erst_dbg_mutex))
151 return -EINTR; 153 return -EINTR;
152 if (usize > erst_dbg_buf_len) { 154 if (usize > erst_dbg_buf_len) {
153 kfree(erst_dbg_buf); 155 void *p;
154 rc = -ENOMEM; 156 rc = -ENOMEM;
155 erst_dbg_buf = kmalloc(usize, GFP_KERNEL); 157 p = kmalloc(usize, GFP_KERNEL);
156 if (!erst_dbg_buf) 158 if (!p)
157 goto out; 159 goto out;
160 kfree(erst_dbg_buf);
161 erst_dbg_buf = p;
158 erst_dbg_buf_len = usize; 162 erst_dbg_buf_len = usize;
159 } 163 }
160 rc = copy_from_user(erst_dbg_buf, ubuf, usize); 164 rc = copy_from_user(erst_dbg_buf, ubuf, usize);
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index 18645f4e83cd..1211c03149e8 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -2,7 +2,7 @@
2 * APEI Error Record Serialization Table support 2 * APEI Error Record Serialization Table support
3 * 3 *
4 * ERST is a way provided by APEI to save and retrieve hardware error 4 * ERST is a way provided by APEI to save and retrieve hardware error
5 * infomation to and from a persistent store. 5 * information to and from a persistent store.
6 * 6 *
7 * For more information about ERST, please refer to ACPI Specification 7 * For more information about ERST, please refer to ACPI Specification
8 * version 4.0, section 17.4. 8 * version 4.0, section 17.4.
@@ -266,13 +266,30 @@ static int erst_exec_move_data(struct apei_exec_context *ctx,
266{ 266{
267 int rc; 267 int rc;
268 u64 offset; 268 u64 offset;
269 void *src, *dst;
270
271 /* ioremap does not work in interrupt context */
272 if (in_interrupt()) {
273 pr_warning(ERST_PFX
274 "MOVE_DATA can not be used in interrupt context");
275 return -EBUSY;
276 }
269 277
270 rc = __apei_exec_read_register(entry, &offset); 278 rc = __apei_exec_read_register(entry, &offset);
271 if (rc) 279 if (rc)
272 return rc; 280 return rc;
273 memmove((void *)ctx->dst_base + offset, 281
274 (void *)ctx->src_base + offset, 282 src = ioremap(ctx->src_base + offset, ctx->var2);
275 ctx->var2); 283 if (!src)
284 return -ENOMEM;
285 dst = ioremap(ctx->dst_base + offset, ctx->var2);
286 if (!dst)
287 return -ENOMEM;
288
289 memmove(dst, src, ctx->var2);
290
291 iounmap(src);
292 iounmap(dst);
276 293
277 return 0; 294 return 0;
278} 295}
@@ -750,7 +767,9 @@ __setup("erst_disable", setup_erst_disable);
750 767
751static int erst_check_table(struct acpi_table_erst *erst_tab) 768static int erst_check_table(struct acpi_table_erst *erst_tab)
752{ 769{
753 if (erst_tab->header_length != sizeof(struct acpi_table_erst)) 770 if ((erst_tab->header_length !=
771 (sizeof(struct acpi_table_erst) - sizeof(erst_tab->header)))
772 && (erst_tab->header_length != sizeof(struct acpi_table_einj)))
754 return -EINVAL; 773 return -EINVAL;
755 if (erst_tab->header.length < sizeof(struct acpi_table_erst)) 774 if (erst_tab->header.length < sizeof(struct acpi_table_erst))
756 return -EINVAL; 775 return -EINVAL;
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 385a6059714a..0d505e59214d 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -302,7 +302,7 @@ static int __devinit ghes_probe(struct platform_device *ghes_dev)
302 struct ghes *ghes = NULL; 302 struct ghes *ghes = NULL;
303 int rc = -EINVAL; 303 int rc = -EINVAL;
304 304
305 generic = ghes_dev->dev.platform_data; 305 generic = *(struct acpi_hest_generic **)ghes_dev->dev.platform_data;
306 if (!generic->enabled) 306 if (!generic->enabled)
307 return -ENODEV; 307 return -ENODEV;
308 308
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c
index 343168d18266..1a3508a7fe03 100644
--- a/drivers/acpi/apei/hest.c
+++ b/drivers/acpi/apei/hest.c
@@ -137,20 +137,23 @@ static int hest_parse_ghes_count(struct acpi_hest_header *hest_hdr, void *data)
137 137
138static int hest_parse_ghes(struct acpi_hest_header *hest_hdr, void *data) 138static int hest_parse_ghes(struct acpi_hest_header *hest_hdr, void *data)
139{ 139{
140 struct acpi_hest_generic *generic;
141 struct platform_device *ghes_dev; 140 struct platform_device *ghes_dev;
142 struct ghes_arr *ghes_arr = data; 141 struct ghes_arr *ghes_arr = data;
143 int rc; 142 int rc;
144 143
145 if (hest_hdr->type != ACPI_HEST_TYPE_GENERIC_ERROR) 144 if (hest_hdr->type != ACPI_HEST_TYPE_GENERIC_ERROR)
146 return 0; 145 return 0;
147 generic = (struct acpi_hest_generic *)hest_hdr; 146
148 if (!generic->enabled) 147 if (!((struct acpi_hest_generic *)hest_hdr)->enabled)
149 return 0; 148 return 0;
150 ghes_dev = platform_device_alloc("GHES", hest_hdr->source_id); 149 ghes_dev = platform_device_alloc("GHES", hest_hdr->source_id);
151 if (!ghes_dev) 150 if (!ghes_dev)
152 return -ENOMEM; 151 return -ENOMEM;
153 ghes_dev->dev.platform_data = generic; 152
153 rc = platform_device_add_data(ghes_dev, &hest_hdr, sizeof(void *));
154 if (rc)
155 goto err;
156
154 rc = platform_device_add(ghes_dev); 157 rc = platform_device_add(ghes_dev);
155 if (rc) 158 if (rc)
156 goto err; 159 goto err;
diff --git a/drivers/acpi/atomicio.c b/drivers/acpi/atomicio.c
index 8f8bd736d4ff..542e53903891 100644
--- a/drivers/acpi/atomicio.c
+++ b/drivers/acpi/atomicio.c
@@ -142,7 +142,7 @@ static void __iomem *acpi_pre_map(phys_addr_t paddr,
142 list_add_tail_rcu(&map->list, &acpi_iomaps); 142 list_add_tail_rcu(&map->list, &acpi_iomaps);
143 spin_unlock_irqrestore(&acpi_iomaps_lock, flags); 143 spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
144 144
145 return vaddr + (paddr - pg_off); 145 return map->vaddr + (paddr - map->paddr);
146err_unmap: 146err_unmap:
147 iounmap(vaddr); 147 iounmap(vaddr);
148 return NULL; 148 return NULL;
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index dc58402b0a17..98417201e9ce 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -273,7 +273,6 @@ static enum power_supply_property energy_battery_props[] = {
273 POWER_SUPPLY_PROP_CYCLE_COUNT, 273 POWER_SUPPLY_PROP_CYCLE_COUNT,
274 POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN, 274 POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
275 POWER_SUPPLY_PROP_VOLTAGE_NOW, 275 POWER_SUPPLY_PROP_VOLTAGE_NOW,
276 POWER_SUPPLY_PROP_CURRENT_NOW,
277 POWER_SUPPLY_PROP_POWER_NOW, 276 POWER_SUPPLY_PROP_POWER_NOW,
278 POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN, 277 POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN,
279 POWER_SUPPLY_PROP_ENERGY_FULL, 278 POWER_SUPPLY_PROP_ENERGY_FULL,
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index 2bb28b9d91c4..f7619600270a 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -183,6 +183,8 @@ static int __init dmi_disable_osi_vista(const struct dmi_system_id *d)
183{ 183{
184 printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident); 184 printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
185 acpi_osi_setup("!Windows 2006"); 185 acpi_osi_setup("!Windows 2006");
186 acpi_osi_setup("!Windows 2006 SP1");
187 acpi_osi_setup("!Windows 2006 SP2");
186 return 0; 188 return 0;
187} 189}
188static int __init dmi_disable_osi_win7(const struct dmi_system_id *d) 190static int __init dmi_disable_osi_win7(const struct dmi_system_id *d)
@@ -226,6 +228,14 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
226 }, 228 },
227 }, 229 },
228 { 230 {
231 .callback = dmi_disable_osi_vista,
232 .ident = "Toshiba Satellite L355",
233 .matches = {
234 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
235 DMI_MATCH(DMI_PRODUCT_VERSION, "Satellite L355"),
236 },
237 },
238 {
229 .callback = dmi_disable_osi_win7, 239 .callback = dmi_disable_osi_win7,
230 .ident = "ASUS K50IJ", 240 .ident = "ASUS K50IJ",
231 .matches = { 241 .matches = {
@@ -233,6 +243,14 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
233 DMI_MATCH(DMI_PRODUCT_NAME, "K50IJ"), 243 DMI_MATCH(DMI_PRODUCT_NAME, "K50IJ"),
234 }, 244 },
235 }, 245 },
246 {
247 .callback = dmi_disable_osi_vista,
248 .ident = "Toshiba P305D",
249 .matches = {
250 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
251 DMI_MATCH(DMI_PRODUCT_NAME, "Satellite P305D"),
252 },
253 },
236 254
237 /* 255 /*
238 * BIOS invocation of _OSI(Linux) is almost always a BIOS bug. 256 * BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 5c221ab535d5..310e3b9749cb 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -55,7 +55,7 @@ EXPORT_SYMBOL(acpi_root_dir);
55static int set_power_nocheck(const struct dmi_system_id *id) 55static int set_power_nocheck(const struct dmi_system_id *id)
56{ 56{
57 printk(KERN_NOTICE PREFIX "%s detected - " 57 printk(KERN_NOTICE PREFIX "%s detected - "
58 "disable power check in power transistion\n", id->ident); 58 "disable power check in power transition\n", id->ident);
59 acpi_power_nocheck = 1; 59 acpi_power_nocheck = 1;
60 return 0; 60 return 0;
61} 61}
@@ -80,23 +80,15 @@ static int set_copy_dsdt(const struct dmi_system_id *id)
80 80
81static struct dmi_system_id dsdt_dmi_table[] __initdata = { 81static struct dmi_system_id dsdt_dmi_table[] __initdata = {
82 /* 82 /*
83 * Insyde BIOS on some TOSHIBA machines corrupt the DSDT. 83 * Invoke DSDT corruption work-around on all Toshiba Satellite.
84 * https://bugzilla.kernel.org/show_bug.cgi?id=14679 84 * https://bugzilla.kernel.org/show_bug.cgi?id=14679
85 */ 85 */
86 { 86 {
87 .callback = set_copy_dsdt, 87 .callback = set_copy_dsdt,
88 .ident = "TOSHIBA Satellite A505", 88 .ident = "TOSHIBA Satellite",
89 .matches = { 89 .matches = {
90 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), 90 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
91 DMI_MATCH(DMI_PRODUCT_NAME, "Satellite A505"), 91 DMI_MATCH(DMI_PRODUCT_NAME, "Satellite"),
92 },
93 },
94 {
95 .callback = set_copy_dsdt,
96 .ident = "TOSHIBA Satellite L505D",
97 .matches = {
98 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
99 DMI_MATCH(DMI_PRODUCT_NAME, "Satellite L505D"),
100 }, 92 },
101 }, 93 },
102 {} 94 {}
@@ -1027,7 +1019,7 @@ static int __init acpi_init(void)
1027 1019
1028 /* 1020 /*
1029 * If the laptop falls into the DMI check table, the power state check 1021 * If the laptop falls into the DMI check table, the power state check
1030 * will be disabled in the course of device power transistion. 1022 * will be disabled in the course of device power transition.
1031 */ 1023 */
1032 dmi_check_system(power_nocheck_dmi_table); 1024 dmi_check_system(power_nocheck_dmi_table);
1033 1025
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index 8a3b840c0bb2..d94d2953c974 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -369,7 +369,9 @@ static void __exit acpi_fan_exit(void)
369 369
370 acpi_bus_unregister_driver(&acpi_fan_driver); 370 acpi_bus_unregister_driver(&acpi_fan_driver);
371 371
372#ifdef CONFIG_ACPI_PROCFS
372 remove_proc_entry(ACPI_FAN_CLASS, acpi_root_dir); 373 remove_proc_entry(ACPI_FAN_CLASS, acpi_root_dir);
374#endif
373 375
374 return; 376 return;
375} 377}
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index e9699aaed109..b618f888d66b 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -29,12 +29,6 @@ static int set_no_mwait(const struct dmi_system_id *id)
29 29
30static struct dmi_system_id __cpuinitdata processor_idle_dmi_table[] = { 30static struct dmi_system_id __cpuinitdata processor_idle_dmi_table[] = {
31 { 31 {
32 set_no_mwait, "IFL91 board", {
33 DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"),
34 DMI_MATCH(DMI_SYS_VENDOR, "ZEPTO"),
35 DMI_MATCH(DMI_PRODUCT_VERSION, "3215W"),
36 DMI_MATCH(DMI_BOARD_NAME, "IFL91") }, NULL},
37 {
38 set_no_mwait, "Extensa 5220", { 32 set_no_mwait, "Extensa 5220", {
39 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), 33 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
40 DMI_MATCH(DMI_SYS_VENDOR, "Acer"), 34 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index 156021892389..347eb21b2353 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -850,7 +850,7 @@ static int __init acpi_processor_init(void)
850 printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n", 850 printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n",
851 acpi_idle_driver.name); 851 acpi_idle_driver.name);
852 } else { 852 } else {
853 printk(KERN_DEBUG "ACPI: acpi_idle yielding to %s", 853 printk(KERN_DEBUG "ACPI: acpi_idle yielding to %s\n",
854 cpuidle_get_driver()->name); 854 cpuidle_get_driver()->name);
855 } 855 }
856 856
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index ba1bd263d903..3a73a93596e8 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -447,8 +447,8 @@ int acpi_processor_notify_smm(struct module *calling_module)
447 if (!try_module_get(calling_module)) 447 if (!try_module_get(calling_module))
448 return -EINVAL; 448 return -EINVAL;
449 449
450 /* is_done is set to negative if an error occured, 450 /* is_done is set to negative if an error occurred,
451 * and to postitive if _no_ error occured, but SMM 451 * and to postitive if _no_ error occurred, but SMM
452 * was already notified. This avoids double notification 452 * was already notified. This avoids double notification
453 * which might lead to unexpected results... 453 * which might lead to unexpected results...
454 */ 454 */
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index cf82989ae756..4754ff6e70e6 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -363,6 +363,12 @@ static int __init init_old_suspend_ordering(const struct dmi_system_id *d)
363 return 0; 363 return 0;
364} 364}
365 365
366static int __init init_nvs_nosave(const struct dmi_system_id *d)
367{
368 acpi_nvs_nosave();
369 return 0;
370}
371
366static struct dmi_system_id __initdata acpisleep_dmi_table[] = { 372static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
367 { 373 {
368 .callback = init_old_suspend_ordering, 374 .callback = init_old_suspend_ordering,
@@ -397,6 +403,22 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
397 DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"), 403 DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"),
398 }, 404 },
399 }, 405 },
406 {
407 .callback = init_nvs_nosave,
408 .ident = "Sony Vaio VGN-SR11M",
409 .matches = {
410 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
411 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR11M"),
412 },
413 },
414 {
415 .callback = init_nvs_nosave,
416 .ident = "Everex StepNote Series",
417 .matches = {
418 DMI_MATCH(DMI_SYS_VENDOR, "Everex Systems, Inc."),
419 DMI_MATCH(DMI_PRODUCT_NAME, "Everex StepNote Series"),
420 },
421 },
400 {}, 422 {},
401}; 423};
402#endif /* CONFIG_SUSPEND */ 424#endif /* CONFIG_SUSPEND */
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 68e2e4582fa2..f8588f81048a 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -100,7 +100,7 @@ static const struct acpi_dlevel acpi_debug_levels[] = {
100 ACPI_DEBUG_INIT(ACPI_LV_EVENTS), 100 ACPI_DEBUG_INIT(ACPI_LV_EVENTS),
101}; 101};
102 102
103static int param_get_debug_layer(char *buffer, struct kernel_param *kp) 103static int param_get_debug_layer(char *buffer, const struct kernel_param *kp)
104{ 104{
105 int result = 0; 105 int result = 0;
106 int i; 106 int i;
@@ -128,7 +128,7 @@ static int param_get_debug_layer(char *buffer, struct kernel_param *kp)
128 return result; 128 return result;
129} 129}
130 130
131static int param_get_debug_level(char *buffer, struct kernel_param *kp) 131static int param_get_debug_level(char *buffer, const struct kernel_param *kp)
132{ 132{
133 int result = 0; 133 int result = 0;
134 int i; 134 int i;
@@ -149,10 +149,18 @@ static int param_get_debug_level(char *buffer, struct kernel_param *kp)
149 return result; 149 return result;
150} 150}
151 151
152module_param_call(debug_layer, param_set_uint, param_get_debug_layer, 152static struct kernel_param_ops param_ops_debug_layer = {
153 &acpi_dbg_layer, 0644); 153 .set = param_set_uint,
154module_param_call(debug_level, param_set_uint, param_get_debug_level, 154 .get = param_get_debug_layer,
155 &acpi_dbg_level, 0644); 155};
156
157static struct kernel_param_ops param_ops_debug_level = {
158 .set = param_set_uint,
159 .get = param_get_debug_level,
160};
161
162module_param_cb(debug_layer, &param_ops_debug_layer, &acpi_dbg_layer, 0644);
163module_param_cb(debug_level, &param_ops_debug_level, &acpi_dbg_level, 0644);
156 164
157static char trace_method_name[6]; 165static char trace_method_name[6];
158module_param_string(trace_method_name, trace_method_name, 6, 0644); 166module_param_string(trace_method_name, trace_method_name, 6, 0644);
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index c5fef01b3c95..b83676126598 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -59,8 +59,8 @@ acpi_backlight_cap_match(acpi_handle handle, u32 level, void *context,
59 "support\n")); 59 "support\n"));
60 *cap |= ACPI_VIDEO_BACKLIGHT; 60 *cap |= ACPI_VIDEO_BACKLIGHT;
61 if (ACPI_FAILURE(acpi_get_handle(handle, "_BQC", &h_dummy))) 61 if (ACPI_FAILURE(acpi_get_handle(handle, "_BQC", &h_dummy)))
62 printk(KERN_WARNING FW_BUG PREFIX "ACPI brightness " 62 printk(KERN_WARNING FW_BUG PREFIX "No _BQC method, "
63 "control misses _BQC function\n"); 63 "cannot determine initial brightness\n");
64 /* We have backlight support, no need to scan further */ 64 /* We have backlight support, no need to scan further */
65 return AE_CTRL_TERMINATE; 65 return AE_CTRL_TERMINATE;
66 } 66 }
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 013727b20417..99d0e5a51148 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -90,6 +90,10 @@ static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
90static int ahci_pci_device_resume(struct pci_dev *pdev); 90static int ahci_pci_device_resume(struct pci_dev *pdev);
91#endif 91#endif
92 92
93static struct scsi_host_template ahci_sht = {
94 AHCI_SHT("ahci"),
95};
96
93static struct ata_port_operations ahci_vt8251_ops = { 97static struct ata_port_operations ahci_vt8251_ops = {
94 .inherits = &ahci_ops, 98 .inherits = &ahci_ops,
95 .hardreset = ahci_vt8251_hardreset, 99 .hardreset = ahci_vt8251_hardreset,
@@ -253,6 +257,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
253 { PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT RAID */ 257 { PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT RAID */
254 { PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */ 258 { PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */
255 { PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */ 259 { PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */
260 { PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */
261 { PCI_VDEVICE(INTEL, 0x1d04), board_ahci }, /* PBG RAID */
262 { PCI_VDEVICE(INTEL, 0x1d06), board_ahci }, /* PBG RAID */
256 263
257 /* JMicron 360/1/3/5/6, match class to avoid IDE function */ 264 /* JMicron 360/1/3/5/6, match class to avoid IDE function */
258 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 265 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 474427b6f99f..e5fdeebf9ef0 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -298,7 +298,17 @@ struct ahci_host_priv {
298 298
299extern int ahci_ignore_sss; 299extern int ahci_ignore_sss;
300 300
301extern struct scsi_host_template ahci_sht; 301extern struct device_attribute *ahci_shost_attrs[];
302extern struct device_attribute *ahci_sdev_attrs[];
303
304#define AHCI_SHT(drv_name) \
305 ATA_NCQ_SHT(drv_name), \
306 .can_queue = AHCI_MAX_CMDS - 1, \
307 .sg_tablesize = AHCI_MAX_SG, \
308 .dma_boundary = AHCI_DMA_BOUNDARY, \
309 .shost_attrs = ahci_shost_attrs, \
310 .sdev_attrs = ahci_sdev_attrs
311
302extern struct ata_port_operations ahci_ops; 312extern struct ata_port_operations ahci_ops;
303 313
304void ahci_save_initial_config(struct device *dev, 314void ahci_save_initial_config(struct device *dev,
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index 4e97f33cca44..84b643270e7a 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -23,6 +23,10 @@
23#include <linux/ahci_platform.h> 23#include <linux/ahci_platform.h>
24#include "ahci.h" 24#include "ahci.h"
25 25
26static struct scsi_host_template ahci_platform_sht = {
27 AHCI_SHT("ahci_platform"),
28};
29
26static int __init ahci_probe(struct platform_device *pdev) 30static int __init ahci_probe(struct platform_device *pdev)
27{ 31{
28 struct device *dev = &pdev->dev; 32 struct device *dev = &pdev->dev;
@@ -145,7 +149,7 @@ static int __init ahci_probe(struct platform_device *pdev)
145 ahci_print_info(host, "platform"); 149 ahci_print_info(host, "platform");
146 150
147 rc = ata_host_activate(host, irq, ahci_interrupt, IRQF_SHARED, 151 rc = ata_host_activate(host, irq, ahci_interrupt, IRQF_SHARED,
148 &ahci_sht); 152 &ahci_platform_sht);
149 if (rc) 153 if (rc)
150 goto err0; 154 goto err0;
151 155
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 3971bc0a4838..d712675d0a96 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -302,6 +302,10 @@ static const struct pci_device_id piix_pci_tbl[] = {
302 { 0x8086, 0x1c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, 302 { 0x8086, 0x1c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
303 /* SATA Controller IDE (CPT) */ 303 /* SATA Controller IDE (CPT) */
304 { 0x8086, 0x1c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, 304 { 0x8086, 0x1c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
305 /* SATA Controller IDE (PBG) */
306 { 0x8086, 0x1d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
307 /* SATA Controller IDE (PBG) */
308 { 0x8086, 0x1d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
305 { } /* terminate list */ 309 { } /* terminate list */
306}; 310};
307 311
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 666850d31df2..8eea309ea212 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -121,7 +121,7 @@ static DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL);
121static DEVICE_ATTR(em_buffer, S_IWUSR | S_IRUGO, 121static DEVICE_ATTR(em_buffer, S_IWUSR | S_IRUGO,
122 ahci_read_em_buffer, ahci_store_em_buffer); 122 ahci_read_em_buffer, ahci_store_em_buffer);
123 123
124static struct device_attribute *ahci_shost_attrs[] = { 124struct device_attribute *ahci_shost_attrs[] = {
125 &dev_attr_link_power_management_policy, 125 &dev_attr_link_power_management_policy,
126 &dev_attr_em_message_type, 126 &dev_attr_em_message_type,
127 &dev_attr_em_message, 127 &dev_attr_em_message,
@@ -132,22 +132,14 @@ static struct device_attribute *ahci_shost_attrs[] = {
132 &dev_attr_em_buffer, 132 &dev_attr_em_buffer,
133 NULL 133 NULL
134}; 134};
135EXPORT_SYMBOL_GPL(ahci_shost_attrs);
135 136
136static struct device_attribute *ahci_sdev_attrs[] = { 137struct device_attribute *ahci_sdev_attrs[] = {
137 &dev_attr_sw_activity, 138 &dev_attr_sw_activity,
138 &dev_attr_unload_heads, 139 &dev_attr_unload_heads,
139 NULL 140 NULL
140}; 141};
141 142EXPORT_SYMBOL_GPL(ahci_sdev_attrs);
142struct scsi_host_template ahci_sht = {
143 ATA_NCQ_SHT("ahci"),
144 .can_queue = AHCI_MAX_CMDS - 1,
145 .sg_tablesize = AHCI_MAX_SG,
146 .dma_boundary = AHCI_DMA_BOUNDARY,
147 .shost_attrs = ahci_shost_attrs,
148 .sdev_attrs = ahci_sdev_attrs,
149};
150EXPORT_SYMBOL_GPL(ahci_sht);
151 143
152struct ata_port_operations ahci_ops = { 144struct ata_port_operations ahci_ops = {
153 .inherits = &sata_pmp_port_ops, 145 .inherits = &sata_pmp_port_ops,
@@ -1326,7 +1318,7 @@ int ahci_do_softreset(struct ata_link *link, unsigned int *class,
1326 /* issue the first D2H Register FIS */ 1318 /* issue the first D2H Register FIS */
1327 msecs = 0; 1319 msecs = 0;
1328 now = jiffies; 1320 now = jiffies;
1329 if (time_after(now, deadline)) 1321 if (time_after(deadline, now))
1330 msecs = jiffies_to_msecs(deadline - now); 1322 msecs = jiffies_to_msecs(deadline - now);
1331 1323
1332 tf.ctl |= ATA_SRST; 1324 tf.ctl |= ATA_SRST;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index c035b3d041ee..932eaee50245 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -5418,6 +5418,7 @@ static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5418 */ 5418 */
5419int ata_host_suspend(struct ata_host *host, pm_message_t mesg) 5419int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5420{ 5420{
5421 unsigned int ehi_flags = ATA_EHI_QUIET;
5421 int rc; 5422 int rc;
5422 5423
5423 /* 5424 /*
@@ -5426,7 +5427,18 @@ int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5426 */ 5427 */
5427 ata_lpm_enable(host); 5428 ata_lpm_enable(host);
5428 5429
5429 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1); 5430 /*
5431 * On some hardware, device fails to respond after spun down
5432 * for suspend. As the device won't be used before being
5433 * resumed, we don't need to touch the device. Ask EH to skip
5434 * the usual stuff and proceed directly to suspend.
5435 *
5436 * http://thread.gmane.org/gmane.linux.ide/46764
5437 */
5438 if (mesg.event == PM_EVENT_SUSPEND)
5439 ehi_flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_NO_RECOVERY;
5440
5441 rc = ata_host_request_pm(host, mesg, 0, ehi_flags, 1);
5430 if (rc == 0) 5442 if (rc == 0)
5431 host->dev->power.power_state = mesg; 5443 host->dev->power.power_state = mesg;
5432 return rc; 5444 return rc;
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index c9ae299b8342..e48302eae55f 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -3235,6 +3235,10 @@ static int ata_eh_skip_recovery(struct ata_link *link)
3235 if (link->flags & ATA_LFLAG_DISABLED) 3235 if (link->flags & ATA_LFLAG_DISABLED)
3236 return 1; 3236 return 1;
3237 3237
3238 /* skip if explicitly requested */
3239 if (ehc->i.flags & ATA_EHI_NO_RECOVERY)
3240 return 1;
3241
3238 /* thaw frozen port and recover failed devices */ 3242 /* thaw frozen port and recover failed devices */
3239 if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link)) 3243 if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link))
3240 return 0; 3244 return 0;
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 3b82d8ef76f0..e30c537cce32 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -418,6 +418,7 @@ void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
418 if (ioaddr->ctl_addr) 418 if (ioaddr->ctl_addr)
419 iowrite8(tf->ctl, ioaddr->ctl_addr); 419 iowrite8(tf->ctl, ioaddr->ctl_addr);
420 ap->last_ctl = tf->ctl; 420 ap->last_ctl = tf->ctl;
421 ata_wait_idle(ap);
421 } 422 }
422 423
423 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { 424 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
@@ -453,6 +454,8 @@ void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
453 iowrite8(tf->device, ioaddr->device_addr); 454 iowrite8(tf->device, ioaddr->device_addr);
454 VPRINTK("device 0x%X\n", tf->device); 455 VPRINTK("device 0x%X\n", tf->device);
455 } 456 }
457
458 ata_wait_idle(ap);
456} 459}
457EXPORT_SYMBOL_GPL(ata_sff_tf_load); 460EXPORT_SYMBOL_GPL(ata_sff_tf_load);
458 461
@@ -1042,7 +1045,8 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
1042int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, 1045int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
1043 u8 status, int in_wq) 1046 u8 status, int in_wq)
1044{ 1047{
1045 struct ata_eh_info *ehi = &ap->link.eh_info; 1048 struct ata_link *link = qc->dev->link;
1049 struct ata_eh_info *ehi = &link->eh_info;
1046 unsigned long flags = 0; 1050 unsigned long flags = 0;
1047 int poll_next; 1051 int poll_next;
1048 1052
@@ -1298,8 +1302,14 @@ fsm_start:
1298} 1302}
1299EXPORT_SYMBOL_GPL(ata_sff_hsm_move); 1303EXPORT_SYMBOL_GPL(ata_sff_hsm_move);
1300 1304
1301void ata_sff_queue_pio_task(struct ata_port *ap, unsigned long delay) 1305void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay)
1302{ 1306{
1307 struct ata_port *ap = link->ap;
1308
1309 WARN_ON((ap->sff_pio_task_link != NULL) &&
1310 (ap->sff_pio_task_link != link));
1311 ap->sff_pio_task_link = link;
1312
1303 /* may fail if ata_sff_flush_pio_task() in progress */ 1313 /* may fail if ata_sff_flush_pio_task() in progress */
1304 queue_delayed_work(ata_sff_wq, &ap->sff_pio_task, 1314 queue_delayed_work(ata_sff_wq, &ap->sff_pio_task,
1305 msecs_to_jiffies(delay)); 1315 msecs_to_jiffies(delay));
@@ -1321,14 +1331,18 @@ static void ata_sff_pio_task(struct work_struct *work)
1321{ 1331{
1322 struct ata_port *ap = 1332 struct ata_port *ap =
1323 container_of(work, struct ata_port, sff_pio_task.work); 1333 container_of(work, struct ata_port, sff_pio_task.work);
1334 struct ata_link *link = ap->sff_pio_task_link;
1324 struct ata_queued_cmd *qc; 1335 struct ata_queued_cmd *qc;
1325 u8 status; 1336 u8 status;
1326 int poll_next; 1337 int poll_next;
1327 1338
1339 BUG_ON(ap->sff_pio_task_link == NULL);
1328 /* qc can be NULL if timeout occurred */ 1340 /* qc can be NULL if timeout occurred */
1329 qc = ata_qc_from_tag(ap, ap->link.active_tag); 1341 qc = ata_qc_from_tag(ap, link->active_tag);
1330 if (!qc) 1342 if (!qc) {
1343 ap->sff_pio_task_link = NULL;
1331 return; 1344 return;
1345 }
1332 1346
1333fsm_start: 1347fsm_start:
1334 WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE); 1348 WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE);
@@ -1345,11 +1359,16 @@ fsm_start:
1345 msleep(2); 1359 msleep(2);
1346 status = ata_sff_busy_wait(ap, ATA_BUSY, 10); 1360 status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
1347 if (status & ATA_BUSY) { 1361 if (status & ATA_BUSY) {
1348 ata_sff_queue_pio_task(ap, ATA_SHORT_PAUSE); 1362 ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE);
1349 return; 1363 return;
1350 } 1364 }
1351 } 1365 }
1352 1366
1367 /*
1368 * hsm_move() may trigger another command to be processed.
1369 * clean the link beforehand.
1370 */
1371 ap->sff_pio_task_link = NULL;
1353 /* move the HSM */ 1372 /* move the HSM */
1354 poll_next = ata_sff_hsm_move(ap, qc, status, 1); 1373 poll_next = ata_sff_hsm_move(ap, qc, status, 1);
1355 1374
@@ -1376,6 +1395,7 @@ fsm_start:
1376unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) 1395unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
1377{ 1396{
1378 struct ata_port *ap = qc->ap; 1397 struct ata_port *ap = qc->ap;
1398 struct ata_link *link = qc->dev->link;
1379 1399
1380 /* Use polling pio if the LLD doesn't handle 1400 /* Use polling pio if the LLD doesn't handle
1381 * interrupt driven pio and atapi CDB interrupt. 1401 * interrupt driven pio and atapi CDB interrupt.
@@ -1396,7 +1416,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
1396 ap->hsm_task_state = HSM_ST_LAST; 1416 ap->hsm_task_state = HSM_ST_LAST;
1397 1417
1398 if (qc->tf.flags & ATA_TFLAG_POLLING) 1418 if (qc->tf.flags & ATA_TFLAG_POLLING)
1399 ata_sff_queue_pio_task(ap, 0); 1419 ata_sff_queue_pio_task(link, 0);
1400 1420
1401 break; 1421 break;
1402 1422
@@ -1409,7 +1429,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
1409 if (qc->tf.flags & ATA_TFLAG_WRITE) { 1429 if (qc->tf.flags & ATA_TFLAG_WRITE) {
1410 /* PIO data out protocol */ 1430 /* PIO data out protocol */
1411 ap->hsm_task_state = HSM_ST_FIRST; 1431 ap->hsm_task_state = HSM_ST_FIRST;
1412 ata_sff_queue_pio_task(ap, 0); 1432 ata_sff_queue_pio_task(link, 0);
1413 1433
1414 /* always send first data block using the 1434 /* always send first data block using the
1415 * ata_sff_pio_task() codepath. 1435 * ata_sff_pio_task() codepath.
@@ -1419,7 +1439,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
1419 ap->hsm_task_state = HSM_ST; 1439 ap->hsm_task_state = HSM_ST;
1420 1440
1421 if (qc->tf.flags & ATA_TFLAG_POLLING) 1441 if (qc->tf.flags & ATA_TFLAG_POLLING)
1422 ata_sff_queue_pio_task(ap, 0); 1442 ata_sff_queue_pio_task(link, 0);
1423 1443
1424 /* if polling, ata_sff_pio_task() handles the 1444 /* if polling, ata_sff_pio_task() handles the
1425 * rest. otherwise, interrupt handler takes 1445 * rest. otherwise, interrupt handler takes
@@ -1441,7 +1461,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
1441 /* send cdb by polling if no cdb interrupt */ 1461 /* send cdb by polling if no cdb interrupt */
1442 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) || 1462 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
1443 (qc->tf.flags & ATA_TFLAG_POLLING)) 1463 (qc->tf.flags & ATA_TFLAG_POLLING))
1444 ata_sff_queue_pio_task(ap, 0); 1464 ata_sff_queue_pio_task(link, 0);
1445 break; 1465 break;
1446 1466
1447 default: 1467 default:
@@ -2734,6 +2754,7 @@ EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep);
2734unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc) 2754unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc)
2735{ 2755{
2736 struct ata_port *ap = qc->ap; 2756 struct ata_port *ap = qc->ap;
2757 struct ata_link *link = qc->dev->link;
2737 2758
2738 /* defer PIO handling to sff_qc_issue */ 2759 /* defer PIO handling to sff_qc_issue */
2739 if (!ata_is_dma(qc->tf.protocol)) 2760 if (!ata_is_dma(qc->tf.protocol))
@@ -2762,7 +2783,7 @@ unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc)
2762 2783
2763 /* send cdb by polling if no cdb interrupt */ 2784 /* send cdb by polling if no cdb interrupt */
2764 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) 2785 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
2765 ata_sff_queue_pio_task(ap, 0); 2786 ata_sff_queue_pio_task(link, 0);
2766 break; 2787 break;
2767 2788
2768 default: 2789 default:
diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
index ba43f0f8c880..2215632e4b31 100644
--- a/drivers/ata/pata_artop.c
+++ b/drivers/ata/pata_artop.c
@@ -74,7 +74,8 @@ static int artop6260_pre_reset(struct ata_link *link, unsigned long deadline)
74 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 74 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
75 75
76 /* Odd numbered device ids are the units with enable bits (the -R cards) */ 76 /* Odd numbered device ids are the units with enable bits (the -R cards) */
77 if (pdev->device % 1 && !pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no])) 77 if ((pdev->device & 1) &&
78 !pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no]))
78 return -ENOENT; 79 return -ENOENT;
79 80
80 return ata_sff_prereset(link, deadline); 81 return ata_sff_prereset(link, deadline);
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
index 5e659885de16..ac8d7d97e408 100644
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -417,6 +417,8 @@ static void via_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
417 tf->lbam, 417 tf->lbam,
418 tf->lbah); 418 tf->lbah);
419 } 419 }
420
421 ata_wait_idle(ap);
420} 422}
421 423
422static int via_port_start(struct ata_port *ap) 424static int via_port_start(struct ata_port *ap)
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 81982594a014..a9fd9709c262 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -2284,7 +2284,7 @@ static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc)
2284 } 2284 }
2285 2285
2286 if (qc->tf.flags & ATA_TFLAG_POLLING) 2286 if (qc->tf.flags & ATA_TFLAG_POLLING)
2287 ata_sff_queue_pio_task(ap, 0); 2287 ata_sff_queue_pio_task(link, 0);
2288 return 0; 2288 return 0;
2289} 2289}
2290 2290
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 5419a49ff135..276d5a701dc3 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -59,6 +59,7 @@ void device_pm_init(struct device *dev)
59{ 59{
60 dev->power.status = DPM_ON; 60 dev->power.status = DPM_ON;
61 init_completion(&dev->power.completion); 61 init_completion(&dev->power.completion);
62 complete_all(&dev->power.completion);
62 dev->power.wakeup_count = 0; 63 dev->power.wakeup_count = 0;
63 pm_runtime_init(dev); 64 pm_runtime_init(dev);
64} 65}
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 31064df1370a..5e4fadcdece9 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -297,6 +297,8 @@ static void enqueue_cmd_and_start_io(ctlr_info_t *h,
297 spin_lock_irqsave(&h->lock, flags); 297 spin_lock_irqsave(&h->lock, flags);
298 addQ(&h->reqQ, c); 298 addQ(&h->reqQ, c);
299 h->Qdepth++; 299 h->Qdepth++;
300 if (h->Qdepth > h->maxQsinceinit)
301 h->maxQsinceinit = h->Qdepth;
300 start_io(h); 302 start_io(h);
301 spin_unlock_irqrestore(&h->lock, flags); 303 spin_unlock_irqrestore(&h->lock, flags);
302} 304}
@@ -4519,6 +4521,12 @@ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
4519 misc_fw_support = readl(&cfgtable->misc_fw_support); 4521 misc_fw_support = readl(&cfgtable->misc_fw_support);
4520 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET; 4522 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
4521 4523
4524 /* The doorbell reset seems to cause lockups on some Smart
4525 * Arrays (e.g. P410, P410i, maybe others). Until this is
4526 * fixed or at least isolated, avoid the doorbell reset.
4527 */
4528 use_doorbell = 0;
4529
4522 rc = cciss_controller_hard_reset(pdev, vaddr, use_doorbell); 4530 rc = cciss_controller_hard_reset(pdev, vaddr, use_doorbell);
4523 if (rc) 4531 if (rc)
4524 goto unmap_cfgtable; 4532 goto unmap_cfgtable;
@@ -4712,6 +4720,9 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
4712 h->scatter_list = kmalloc(h->max_commands * 4720 h->scatter_list = kmalloc(h->max_commands *
4713 sizeof(struct scatterlist *), 4721 sizeof(struct scatterlist *),
4714 GFP_KERNEL); 4722 GFP_KERNEL);
4723 if (!h->scatter_list)
4724 goto clean4;
4725
4715 for (k = 0; k < h->nr_cmds; k++) { 4726 for (k = 0; k < h->nr_cmds; k++) {
4716 h->scatter_list[k] = kmalloc(sizeof(struct scatterlist) * 4727 h->scatter_list[k] = kmalloc(sizeof(struct scatterlist) *
4717 h->maxsgentries, 4728 h->maxsgentries,
@@ -4781,7 +4792,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
4781clean4: 4792clean4:
4782 kfree(h->cmd_pool_bits); 4793 kfree(h->cmd_pool_bits);
4783 /* Free up sg elements */ 4794 /* Free up sg elements */
4784 for (k = 0; k < h->nr_cmds; k++) 4795 for (k-- ; k >= 0; k--)
4785 kfree(h->scatter_list[k]); 4796 kfree(h->scatter_list[k]);
4786 kfree(h->scatter_list); 4797 kfree(h->scatter_list);
4787 cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds); 4798 cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index f3c636d23718..91797bbbe702 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -477,7 +477,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
477 pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset; 477 pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset;
478 478
479 if (bio_rw(bio) == WRITE) { 479 if (bio_rw(bio) == WRITE) {
480 bool barrier = (bio->bi_rw & REQ_HARDBARRIER); 480 bool barrier = !!(bio->bi_rw & REQ_HARDBARRIER);
481 struct file *file = lo->lo_backing_file; 481 struct file *file = lo->lo_backing_file;
482 482
483 if (barrier) { 483 if (barrier) {
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c
index b82c5ce5e9df..76fa3deaee84 100644
--- a/drivers/block/mg_disk.c
+++ b/drivers/block/mg_disk.c
@@ -974,8 +974,7 @@ static int mg_probe(struct platform_device *plat_dev)
974 host->breq->queuedata = host; 974 host->breq->queuedata = host;
975 975
976 /* mflash is random device, thanx for the noop */ 976 /* mflash is random device, thanx for the noop */
977 elevator_exit(host->breq->elevator); 977 err = elevator_change(host->breq, "noop");
978 err = elevator_init(host->breq, "noop");
979 if (err) { 978 if (err) {
980 printk(KERN_ERR "%s:%d (elevator_init) fail\n", 979 printk(KERN_ERR "%s:%d (elevator_init) fail\n",
981 __func__, __LINE__); 980 __func__, __LINE__);
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index b1cbeb59bb76..37a2bb595076 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2369,7 +2369,7 @@ static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
2369 pkt_shrink_pktlist(pd); 2369 pkt_shrink_pktlist(pd);
2370} 2370}
2371 2371
2372static struct pktcdvd_device *pkt_find_dev_from_minor(int dev_minor) 2372static struct pktcdvd_device *pkt_find_dev_from_minor(unsigned int dev_minor)
2373{ 2373{
2374 if (dev_minor >= MAX_WRITERS) 2374 if (dev_minor >= MAX_WRITERS)
2375 return NULL; 2375 return NULL;
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index eab58db5f91c..cd18493c9527 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -806,6 +806,8 @@ static const struct intel_driver_description {
806 "G45/G43", NULL, &intel_i965_driver }, 806 "G45/G43", NULL, &intel_i965_driver },
807 { PCI_DEVICE_ID_INTEL_B43_HB, PCI_DEVICE_ID_INTEL_B43_IG, 807 { PCI_DEVICE_ID_INTEL_B43_HB, PCI_DEVICE_ID_INTEL_B43_IG,
808 "B43", NULL, &intel_i965_driver }, 808 "B43", NULL, &intel_i965_driver },
809 { PCI_DEVICE_ID_INTEL_B43_1_HB, PCI_DEVICE_ID_INTEL_B43_1_IG,
810 "B43", NULL, &intel_i965_driver },
809 { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, 811 { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG,
810 "G41", NULL, &intel_i965_driver }, 812 "G41", NULL, &intel_i965_driver },
811 { PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, 813 { PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index ee189c74d345..d09b1ab7e8ab 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -186,6 +186,8 @@
186#define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2 186#define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2
187#define PCI_DEVICE_ID_INTEL_B43_HB 0x2E40 187#define PCI_DEVICE_ID_INTEL_B43_HB 0x2E40
188#define PCI_DEVICE_ID_INTEL_B43_IG 0x2E42 188#define PCI_DEVICE_ID_INTEL_B43_IG 0x2E42
189#define PCI_DEVICE_ID_INTEL_B43_1_HB 0x2E90
190#define PCI_DEVICE_ID_INTEL_B43_1_IG 0x2E92
189#define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40 191#define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40
190#define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42 192#define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42
191#define PCI_DEVICE_ID_INTEL_EAGLELAKE_HB 0x2E00 193#define PCI_DEVICE_ID_INTEL_EAGLELAKE_HB 0x2E00
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 3822b4f49c84..7bd7c45b53ef 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -305,6 +305,9 @@ static int num_force_kipmid;
305#ifdef CONFIG_PCI 305#ifdef CONFIG_PCI
306static int pci_registered; 306static int pci_registered;
307#endif 307#endif
308#ifdef CONFIG_ACPI
309static int pnp_registered;
310#endif
308#ifdef CONFIG_PPC_OF 311#ifdef CONFIG_PPC_OF
309static int of_registered; 312static int of_registered;
310#endif 313#endif
@@ -2126,7 +2129,7 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev,
2126{ 2129{
2127 struct acpi_device *acpi_dev; 2130 struct acpi_device *acpi_dev;
2128 struct smi_info *info; 2131 struct smi_info *info;
2129 struct resource *res; 2132 struct resource *res, *res_second;
2130 acpi_handle handle; 2133 acpi_handle handle;
2131 acpi_status status; 2134 acpi_status status;
2132 unsigned long long tmp; 2135 unsigned long long tmp;
@@ -2182,13 +2185,13 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev,
2182 info->io.addr_data = res->start; 2185 info->io.addr_data = res->start;
2183 2186
2184 info->io.regspacing = DEFAULT_REGSPACING; 2187 info->io.regspacing = DEFAULT_REGSPACING;
2185 res = pnp_get_resource(dev, 2188 res_second = pnp_get_resource(dev,
2186 (info->io.addr_type == IPMI_IO_ADDR_SPACE) ? 2189 (info->io.addr_type == IPMI_IO_ADDR_SPACE) ?
2187 IORESOURCE_IO : IORESOURCE_MEM, 2190 IORESOURCE_IO : IORESOURCE_MEM,
2188 1); 2191 1);
2189 if (res) { 2192 if (res_second) {
2190 if (res->start > info->io.addr_data) 2193 if (res_second->start > info->io.addr_data)
2191 info->io.regspacing = res->start - info->io.addr_data; 2194 info->io.regspacing = res_second->start - info->io.addr_data;
2192 } 2195 }
2193 info->io.regsize = DEFAULT_REGSPACING; 2196 info->io.regsize = DEFAULT_REGSPACING;
2194 info->io.regshift = 0; 2197 info->io.regshift = 0;
@@ -3359,6 +3362,7 @@ static __devinit int init_ipmi_si(void)
3359 3362
3360#ifdef CONFIG_ACPI 3363#ifdef CONFIG_ACPI
3361 pnp_register_driver(&ipmi_pnp_driver); 3364 pnp_register_driver(&ipmi_pnp_driver);
3365 pnp_registered = 1;
3362#endif 3366#endif
3363 3367
3364#ifdef CONFIG_DMI 3368#ifdef CONFIG_DMI
@@ -3526,7 +3530,8 @@ static __exit void cleanup_ipmi_si(void)
3526 pci_unregister_driver(&ipmi_pci_driver); 3530 pci_unregister_driver(&ipmi_pci_driver);
3527#endif 3531#endif
3528#ifdef CONFIG_ACPI 3532#ifdef CONFIG_ACPI
3529 pnp_unregister_driver(&ipmi_pnp_driver); 3533 if (pnp_registered)
3534 pnp_unregister_driver(&ipmi_pnp_driver);
3530#endif 3535#endif
3531 3536
3532#ifdef CONFIG_PPC_OF 3537#ifdef CONFIG_PPC_OF
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index a398ecdbd758..1f528fad3516 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -788,10 +788,11 @@ static const struct file_operations zero_fops = {
788/* 788/*
789 * capabilities for /dev/zero 789 * capabilities for /dev/zero
790 * - permits private mappings, "copies" are taken of the source of zeros 790 * - permits private mappings, "copies" are taken of the source of zeros
791 * - no writeback happens
791 */ 792 */
792static struct backing_dev_info zero_bdi = { 793static struct backing_dev_info zero_bdi = {
793 .name = "char/mem", 794 .name = "char/mem",
794 .capabilities = BDI_CAP_MAP_COPY, 795 .capabilities = BDI_CAP_MAP_COPY | BDI_CAP_NO_ACCT_AND_WRITEBACK,
795}; 796};
796 797
797static const struct file_operations full_fops = { 798static const struct file_operations full_fops = {
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 942a9826bd23..c810481a5bc2 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -596,6 +596,10 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
596 ssize_t ret; 596 ssize_t ret;
597 bool nonblock; 597 bool nonblock;
598 598
599 /* Userspace could be out to fool us */
600 if (!count)
601 return 0;
602
599 port = filp->private_data; 603 port = filp->private_data;
600 604
601 nonblock = filp->f_flags & O_NONBLOCK; 605 nonblock = filp->f_flags & O_NONBLOCK;
@@ -642,7 +646,7 @@ static unsigned int port_fops_poll(struct file *filp, poll_table *wait)
642 poll_wait(filp, &port->waitqueue, wait); 646 poll_wait(filp, &port->waitqueue, wait);
643 647
644 ret = 0; 648 ret = 0;
645 if (port->inbuf) 649 if (!will_read_block(port))
646 ret |= POLLIN | POLLRDNORM; 650 ret |= POLLIN | POLLRDNORM;
647 if (!will_write_block(port)) 651 if (!will_write_block(port))
648 ret |= POLLOUT; 652 ret |= POLLOUT;
diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
index 2bbeaaea46e9..38df8c19e74c 100644
--- a/drivers/char/vt_ioctl.c
+++ b/drivers/char/vt_ioctl.c
@@ -533,11 +533,14 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
533 case KIOCSOUND: 533 case KIOCSOUND:
534 if (!perm) 534 if (!perm)
535 goto eperm; 535 goto eperm;
536 /* FIXME: This is an old broken API but we need to keep it 536 /*
537 supported and somehow separate the historic advertised 537 * The use of PIT_TICK_RATE is historic, it used to be
538 tick rate from any real one */ 538 * the platform-dependent CLOCK_TICK_RATE between 2.6.12
539 * and 2.6.36, which was a minor but unfortunate ABI
540 * change.
541 */
539 if (arg) 542 if (arg)
540 arg = CLOCK_TICK_RATE / arg; 543 arg = PIT_TICK_RATE / arg;
541 kd_mksound(arg, 0); 544 kd_mksound(arg, 0);
542 break; 545 break;
543 546
@@ -553,11 +556,8 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
553 */ 556 */
554 ticks = HZ * ((arg >> 16) & 0xffff) / 1000; 557 ticks = HZ * ((arg >> 16) & 0xffff) / 1000;
555 count = ticks ? (arg & 0xffff) : 0; 558 count = ticks ? (arg & 0xffff) : 0;
556 /* FIXME: This is an old broken API but we need to keep it
557 supported and somehow separate the historic advertised
558 tick rate from any real one */
559 if (count) 559 if (count)
560 count = CLOCK_TICK_RATE / count; 560 count = PIT_TICK_RATE / count;
561 kd_mksound(count, ticks); 561 kd_mksound(count, ticks);
562 break; 562 break;
563 } 563 }
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index c2408bbe9c2e..f508690eb958 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -80,7 +80,7 @@
80 * Limiting Performance Impact 80 * Limiting Performance Impact
81 * --------------------------- 81 * ---------------------------
82 * C states, especially those with large exit latencies, can have a real 82 * C states, especially those with large exit latencies, can have a real
83 * noticable impact on workloads, which is not acceptable for most sysadmins, 83 * noticeable impact on workloads, which is not acceptable for most sysadmins,
84 * and in addition, less performance has a power price of its own. 84 * and in addition, less performance has a power price of its own.
85 * 85 *
86 * As a general rule of thumb, menu assumes that the following heuristic 86 * As a general rule of thumb, menu assumes that the following heuristic
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 86c5ae9fde34..411d5bf50fc4 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -162,7 +162,7 @@ static int mv_is_err_intr(u32 intr_cause)
162 162
163static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan) 163static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
164{ 164{
165 u32 val = (1 << (1 + (chan->idx * 16))); 165 u32 val = ~(1 << (chan->idx * 16));
166 dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val); 166 dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val);
167 __raw_writel(val, XOR_INTR_CAUSE(chan)); 167 __raw_writel(val, XOR_INTR_CAUSE(chan));
168} 168}
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index fb64cf36ba61..eb6b54dbb806 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -580,7 +580,6 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
580 580
581 sh_chan = to_sh_chan(chan); 581 sh_chan = to_sh_chan(chan);
582 param = chan->private; 582 param = chan->private;
583 slave_addr = param->config->addr;
584 583
585 /* Someone calling slave DMA on a public channel? */ 584 /* Someone calling slave DMA on a public channel? */
586 if (!param || !sg_len) { 585 if (!param || !sg_len) {
@@ -589,6 +588,8 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
589 return NULL; 588 return NULL;
590 } 589 }
591 590
591 slave_addr = param->config->addr;
592
592 /* 593 /*
593 * if (param != NULL), this is a successfully requested slave channel, 594 * if (param != NULL), this is a successfully requested slave channel,
594 * therefore param->config != NULL too. 595 * therefore param->config != NULL too.
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 3630308e7b81..6b21e25f7a84 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -339,6 +339,9 @@ static void edac_mc_workq_teardown(struct mem_ctl_info *mci)
339{ 339{
340 int status; 340 int status;
341 341
342 if (mci->op_state != OP_RUNNING_POLL)
343 return;
344
342 status = cancel_delayed_work(&mci->work); 345 status = cancel_delayed_work(&mci->work);
343 if (status == 0) { 346 if (status == 0) {
344 debugf0("%s() not canceled, flush the queue\n", 347 debugf0("%s() not canceled, flush the queue\n",
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index e0187d16dd7c..0fd5b85a0f75 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -1140,6 +1140,7 @@ static struct mcidev_sysfs_attribute i7core_udimm_counters_attrs[] = {
1140 ATTR_COUNTER(0), 1140 ATTR_COUNTER(0),
1141 ATTR_COUNTER(1), 1141 ATTR_COUNTER(1),
1142 ATTR_COUNTER(2), 1142 ATTR_COUNTER(2),
1143 { .attr = { .name = NULL } }
1143}; 1144};
1144 1145
1145static struct mcidev_sysfs_group i7core_udimm_counters = { 1146static struct mcidev_sysfs_group i7core_udimm_counters = {
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index be29b0bb2471..1b05896648bc 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -263,6 +263,7 @@ static const struct {
263 {PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, QUIRK_NO_MSI}, 263 {PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, QUIRK_NO_MSI},
264 {PCI_VENDOR_ID_NEC, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, 264 {PCI_VENDOR_ID_NEC, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
265 {PCI_VENDOR_ID_VIA, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, 265 {PCI_VENDOR_ID_VIA, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
266 {PCI_VENDOR_ID_RICOH, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
266 {PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, QUIRK_BE_HEADERS}, 267 {PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, QUIRK_BE_HEADERS},
267}; 268};
268 269
diff --git a/drivers/gpio/sx150x.c b/drivers/gpio/sx150x.c
index b42f42ca70c3..823559ab0e24 100644
--- a/drivers/gpio/sx150x.c
+++ b/drivers/gpio/sx150x.c
@@ -459,17 +459,33 @@ static int sx150x_init_io(struct sx150x_chip *chip, u8 base, u16 cfg)
459 return err; 459 return err;
460} 460}
461 461
462static int sx150x_init_hw(struct sx150x_chip *chip, 462static int sx150x_reset(struct sx150x_chip *chip)
463 struct sx150x_platform_data *pdata)
464{ 463{
465 int err = 0; 464 int err;
466 465
467 err = i2c_smbus_write_word_data(chip->client, 466 err = i2c_smbus_write_byte_data(chip->client,
468 chip->dev_cfg->reg_reset, 467 chip->dev_cfg->reg_reset,
469 0x3412); 468 0x12);
470 if (err < 0) 469 if (err < 0)
471 return err; 470 return err;
472 471
472 err = i2c_smbus_write_byte_data(chip->client,
473 chip->dev_cfg->reg_reset,
474 0x34);
475 return err;
476}
477
478static int sx150x_init_hw(struct sx150x_chip *chip,
479 struct sx150x_platform_data *pdata)
480{
481 int err = 0;
482
483 if (pdata->reset_during_probe) {
484 err = sx150x_reset(chip);
485 if (err < 0)
486 return err;
487 }
488
473 err = sx150x_i2c_write(chip->client, 489 err = sx150x_i2c_write(chip->client,
474 chip->dev_cfg->reg_misc, 490 chip->dev_cfg->reg_misc,
475 0x01); 491 0x01);
diff --git a/drivers/gpu/drm/drm_buffer.c b/drivers/gpu/drm/drm_buffer.c
index 55d03ed05000..529a0dbe9fc6 100644
--- a/drivers/gpu/drm/drm_buffer.c
+++ b/drivers/gpu/drm/drm_buffer.c
@@ -98,8 +98,8 @@ EXPORT_SYMBOL(drm_buffer_alloc);
98 * user_data: A pointer the data that is copied to the buffer. 98 * user_data: A pointer the data that is copied to the buffer.
99 * size: The Number of bytes to copy. 99 * size: The Number of bytes to copy.
100 */ 100 */
101extern int drm_buffer_copy_from_user(struct drm_buffer *buf, 101int drm_buffer_copy_from_user(struct drm_buffer *buf,
102 void __user *user_data, int size) 102 void __user *user_data, int size)
103{ 103{
104 int nr_pages = size / PAGE_SIZE + 1; 104 int nr_pages = size / PAGE_SIZE + 1;
105 int idx; 105 int idx;
@@ -163,7 +163,7 @@ void *drm_buffer_read_object(struct drm_buffer *buf,
163{ 163{
164 int idx = drm_buffer_index(buf); 164 int idx = drm_buffer_index(buf);
165 int page = drm_buffer_page(buf); 165 int page = drm_buffer_page(buf);
166 void *obj = 0; 166 void *obj = NULL;
167 167
168 if (idx + objsize <= PAGE_SIZE) { 168 if (idx + objsize <= PAGE_SIZE) {
169 obj = &buf->data[page][idx]; 169 obj = &buf->data[page][idx];
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index d2ab01e90a96..dcbeb98f195a 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -103,8 +103,8 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
103 if (connector->funcs->force) 103 if (connector->funcs->force)
104 connector->funcs->force(connector); 104 connector->funcs->force(connector);
105 } else { 105 } else {
106 connector->status = connector->funcs->detect(connector); 106 connector->status = connector->funcs->detect(connector, true);
107 drm_helper_hpd_irq_event(dev); 107 drm_kms_helper_poll_enable(dev);
108 } 108 }
109 109
110 if (connector->status == connector_status_disconnected) { 110 if (connector->status == connector_status_disconnected) {
@@ -637,13 +637,13 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
637 mode_changed = true; 637 mode_changed = true;
638 638
639 if (mode_changed) { 639 if (mode_changed) {
640 old_fb = set->crtc->fb;
641 set->crtc->fb = set->fb;
642 set->crtc->enabled = (set->mode != NULL); 640 set->crtc->enabled = (set->mode != NULL);
643 if (set->mode != NULL) { 641 if (set->mode != NULL) {
644 DRM_DEBUG_KMS("attempting to set mode from" 642 DRM_DEBUG_KMS("attempting to set mode from"
645 " userspace\n"); 643 " userspace\n");
646 drm_mode_debug_printmodeline(set->mode); 644 drm_mode_debug_printmodeline(set->mode);
645 old_fb = set->crtc->fb;
646 set->crtc->fb = set->fb;
647 if (!drm_crtc_helper_set_mode(set->crtc, set->mode, 647 if (!drm_crtc_helper_set_mode(set->crtc, set->mode,
648 set->x, set->y, 648 set->x, set->y,
649 old_fb)) { 649 old_fb)) {
@@ -866,7 +866,7 @@ static void output_poll_execute(struct work_struct *work)
866 !(connector->polled & DRM_CONNECTOR_POLL_HPD)) 866 !(connector->polled & DRM_CONNECTOR_POLL_HPD))
867 continue; 867 continue;
868 868
869 status = connector->funcs->detect(connector); 869 status = connector->funcs->detect(connector, false);
870 if (old_status != status) 870 if (old_status != status)
871 changed = true; 871 changed = true;
872 } 872 }
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index bf92d07510df..5663d2719063 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -148,7 +148,7 @@ int drm_gem_object_init(struct drm_device *dev,
148 return -ENOMEM; 148 return -ENOMEM;
149 149
150 kref_init(&obj->refcount); 150 kref_init(&obj->refcount);
151 kref_init(&obj->handlecount); 151 atomic_set(&obj->handle_count, 0);
152 obj->size = size; 152 obj->size = size;
153 153
154 atomic_inc(&dev->object_count); 154 atomic_inc(&dev->object_count);
@@ -462,28 +462,6 @@ drm_gem_object_free(struct kref *kref)
462} 462}
463EXPORT_SYMBOL(drm_gem_object_free); 463EXPORT_SYMBOL(drm_gem_object_free);
464 464
465/**
466 * Called after the last reference to the object has been lost.
467 * Must be called without holding struct_mutex
468 *
469 * Frees the object
470 */
471void
472drm_gem_object_free_unlocked(struct kref *kref)
473{
474 struct drm_gem_object *obj = (struct drm_gem_object *) kref;
475 struct drm_device *dev = obj->dev;
476
477 if (dev->driver->gem_free_object_unlocked != NULL)
478 dev->driver->gem_free_object_unlocked(obj);
479 else if (dev->driver->gem_free_object != NULL) {
480 mutex_lock(&dev->struct_mutex);
481 dev->driver->gem_free_object(obj);
482 mutex_unlock(&dev->struct_mutex);
483 }
484}
485EXPORT_SYMBOL(drm_gem_object_free_unlocked);
486
487static void drm_gem_object_ref_bug(struct kref *list_kref) 465static void drm_gem_object_ref_bug(struct kref *list_kref)
488{ 466{
489 BUG(); 467 BUG();
@@ -496,12 +474,8 @@ static void drm_gem_object_ref_bug(struct kref *list_kref)
496 * called before drm_gem_object_free or we'll be touching 474 * called before drm_gem_object_free or we'll be touching
497 * freed memory 475 * freed memory
498 */ 476 */
499void 477void drm_gem_object_handle_free(struct drm_gem_object *obj)
500drm_gem_object_handle_free(struct kref *kref)
501{ 478{
502 struct drm_gem_object *obj = container_of(kref,
503 struct drm_gem_object,
504 handlecount);
505 struct drm_device *dev = obj->dev; 479 struct drm_device *dev = obj->dev;
506 480
507 /* Remove any name for this object */ 481 /* Remove any name for this object */
@@ -528,6 +502,10 @@ void drm_gem_vm_open(struct vm_area_struct *vma)
528 struct drm_gem_object *obj = vma->vm_private_data; 502 struct drm_gem_object *obj = vma->vm_private_data;
529 503
530 drm_gem_object_reference(obj); 504 drm_gem_object_reference(obj);
505
506 mutex_lock(&obj->dev->struct_mutex);
507 drm_vm_open_locked(vma);
508 mutex_unlock(&obj->dev->struct_mutex);
531} 509}
532EXPORT_SYMBOL(drm_gem_vm_open); 510EXPORT_SYMBOL(drm_gem_vm_open);
533 511
@@ -535,7 +513,10 @@ void drm_gem_vm_close(struct vm_area_struct *vma)
535{ 513{
536 struct drm_gem_object *obj = vma->vm_private_data; 514 struct drm_gem_object *obj = vma->vm_private_data;
537 515
538 drm_gem_object_unreference_unlocked(obj); 516 mutex_lock(&obj->dev->struct_mutex);
517 drm_vm_close_locked(vma);
518 drm_gem_object_unreference(obj);
519 mutex_unlock(&obj->dev->struct_mutex);
539} 520}
540EXPORT_SYMBOL(drm_gem_vm_close); 521EXPORT_SYMBOL(drm_gem_vm_close);
541 522
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index 2ef2c7827243..974e970ce3f8 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -255,7 +255,7 @@ int drm_gem_one_name_info(int id, void *ptr, void *data)
255 255
256 seq_printf(m, "%6d %8zd %7d %8d\n", 256 seq_printf(m, "%6d %8zd %7d %8d\n",
257 obj->name, obj->size, 257 obj->name, obj->size,
258 atomic_read(&obj->handlecount.refcount), 258 atomic_read(&obj->handle_count),
259 atomic_read(&obj->refcount.refcount)); 259 atomic_read(&obj->refcount.refcount));
260 return 0; 260 return 0;
261} 261}
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index e20f78b542a7..f5bd9e590c80 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -164,6 +164,8 @@ int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
164 dev->hose = pdev->sysdata; 164 dev->hose = pdev->sysdata;
165#endif 165#endif
166 166
167 mutex_lock(&drm_global_mutex);
168
167 if ((ret = drm_fill_in_dev(dev, ent, driver))) { 169 if ((ret = drm_fill_in_dev(dev, ent, driver))) {
168 printk(KERN_ERR "DRM: Fill_in_dev failed.\n"); 170 printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
169 goto err_g2; 171 goto err_g2;
@@ -199,6 +201,7 @@ int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
199 driver->name, driver->major, driver->minor, driver->patchlevel, 201 driver->name, driver->major, driver->minor, driver->patchlevel,
200 driver->date, pci_name(pdev), dev->primary->index); 202 driver->date, pci_name(pdev), dev->primary->index);
201 203
204 mutex_unlock(&drm_global_mutex);
202 return 0; 205 return 0;
203 206
204err_g4: 207err_g4:
@@ -210,6 +213,7 @@ err_g2:
210 pci_disable_device(pdev); 213 pci_disable_device(pdev);
211err_g1: 214err_g1:
212 kfree(dev); 215 kfree(dev);
216 mutex_unlock(&drm_global_mutex);
213 return ret; 217 return ret;
214} 218}
215EXPORT_SYMBOL(drm_get_pci_dev); 219EXPORT_SYMBOL(drm_get_pci_dev);
diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c
index 460e9a3afa8d..92d1d0fb7b75 100644
--- a/drivers/gpu/drm/drm_platform.c
+++ b/drivers/gpu/drm/drm_platform.c
@@ -53,6 +53,8 @@ int drm_get_platform_dev(struct platform_device *platdev,
53 dev->platformdev = platdev; 53 dev->platformdev = platdev;
54 dev->dev = &platdev->dev; 54 dev->dev = &platdev->dev;
55 55
56 mutex_lock(&drm_global_mutex);
57
56 ret = drm_fill_in_dev(dev, NULL, driver); 58 ret = drm_fill_in_dev(dev, NULL, driver);
57 59
58 if (ret) { 60 if (ret) {
@@ -87,6 +89,8 @@ int drm_get_platform_dev(struct platform_device *platdev,
87 89
88 list_add_tail(&dev->driver_item, &driver->device_list); 90 list_add_tail(&dev->driver_item, &driver->device_list);
89 91
92 mutex_unlock(&drm_global_mutex);
93
90 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", 94 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
91 driver->name, driver->major, driver->minor, driver->patchlevel, 95 driver->name, driver->major, driver->minor, driver->patchlevel,
92 driver->date, dev->primary->index); 96 driver->date, dev->primary->index);
@@ -100,6 +104,7 @@ err_g2:
100 drm_put_minor(&dev->control); 104 drm_put_minor(&dev->control);
101err_g1: 105err_g1:
102 kfree(dev); 106 kfree(dev);
107 mutex_unlock(&drm_global_mutex);
103 return ret; 108 return ret;
104} 109}
105EXPORT_SYMBOL(drm_get_platform_dev); 110EXPORT_SYMBOL(drm_get_platform_dev);
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 86118a742231..85da4c40694c 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -159,7 +159,7 @@ static ssize_t status_show(struct device *device,
159 struct drm_connector *connector = to_drm_connector(device); 159 struct drm_connector *connector = to_drm_connector(device);
160 enum drm_connector_status status; 160 enum drm_connector_status status;
161 161
162 status = connector->funcs->detect(connector); 162 status = connector->funcs->detect(connector, true);
163 return snprintf(buf, PAGE_SIZE, "%s\n", 163 return snprintf(buf, PAGE_SIZE, "%s\n",
164 drm_get_connector_status_name(status)); 164 drm_get_connector_status_name(status));
165} 165}
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index fda67468e603..5df450683aab 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -433,15 +433,7 @@ static void drm_vm_open(struct vm_area_struct *vma)
433 mutex_unlock(&dev->struct_mutex); 433 mutex_unlock(&dev->struct_mutex);
434} 434}
435 435
436/** 436void drm_vm_close_locked(struct vm_area_struct *vma)
437 * \c close method for all virtual memory types.
438 *
439 * \param vma virtual memory area.
440 *
441 * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
442 * free it.
443 */
444static void drm_vm_close(struct vm_area_struct *vma)
445{ 437{
446 struct drm_file *priv = vma->vm_file->private_data; 438 struct drm_file *priv = vma->vm_file->private_data;
447 struct drm_device *dev = priv->minor->dev; 439 struct drm_device *dev = priv->minor->dev;
@@ -451,7 +443,6 @@ static void drm_vm_close(struct vm_area_struct *vma)
451 vma->vm_start, vma->vm_end - vma->vm_start); 443 vma->vm_start, vma->vm_end - vma->vm_start);
452 atomic_dec(&dev->vma_count); 444 atomic_dec(&dev->vma_count);
453 445
454 mutex_lock(&dev->struct_mutex);
455 list_for_each_entry_safe(pt, temp, &dev->vmalist, head) { 446 list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
456 if (pt->vma == vma) { 447 if (pt->vma == vma) {
457 list_del(&pt->head); 448 list_del(&pt->head);
@@ -459,6 +450,23 @@ static void drm_vm_close(struct vm_area_struct *vma)
459 break; 450 break;
460 } 451 }
461 } 452 }
453}
454
455/**
456 * \c close method for all virtual memory types.
457 *
458 * \param vma virtual memory area.
459 *
460 * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
461 * free it.
462 */
463static void drm_vm_close(struct vm_area_struct *vma)
464{
465 struct drm_file *priv = vma->vm_file->private_data;
466 struct drm_device *dev = priv->minor->dev;
467
468 mutex_lock(&dev->struct_mutex);
469 drm_vm_close_locked(vma);
462 mutex_unlock(&dev->struct_mutex); 470 mutex_unlock(&dev->struct_mutex);
463} 471}
464 472
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index 61b4caf220fa..fb07e73581e8 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -116,7 +116,7 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
116static const struct file_operations i810_buffer_fops = { 116static const struct file_operations i810_buffer_fops = {
117 .open = drm_open, 117 .open = drm_open,
118 .release = drm_release, 118 .release = drm_release,
119 .unlocked_ioctl = drm_ioctl, 119 .unlocked_ioctl = i810_ioctl,
120 .mmap = i810_mmap_buffers, 120 .mmap = i810_mmap_buffers,
121 .fasync = drm_fasync, 121 .fasync = drm_fasync,
122}; 122};
diff --git a/drivers/gpu/drm/i830/i830_dma.c b/drivers/gpu/drm/i830/i830_dma.c
index 671aa18415ac..cc92c7e6236f 100644
--- a/drivers/gpu/drm/i830/i830_dma.c
+++ b/drivers/gpu/drm/i830/i830_dma.c
@@ -118,7 +118,7 @@ static int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
118static const struct file_operations i830_buffer_fops = { 118static const struct file_operations i830_buffer_fops = {
119 .open = drm_open, 119 .open = drm_open,
120 .release = drm_release, 120 .release = drm_release,
121 .unlocked_ioctl = drm_ioctl, 121 .unlocked_ioctl = i830_ioctl,
122 .mmap = i830_mmap_buffers, 122 .mmap = i830_mmap_buffers,
123 .fasync = drm_fasync, 123 .fasync = drm_fasync,
124}; 124};
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 9d67b4853030..c74e4e8006d4 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1787,9 +1787,9 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
1787 } 1787 }
1788 } 1788 }
1789 1789
1790 div_u64(diff, diff1); 1790 diff = div_u64(diff, diff1);
1791 ret = ((m * diff) + c); 1791 ret = ((m * diff) + c);
1792 div_u64(ret, 10); 1792 ret = div_u64(ret, 10);
1793 1793
1794 dev_priv->last_count1 = total_count; 1794 dev_priv->last_count1 = total_count;
1795 dev_priv->last_time1 = now; 1795 dev_priv->last_time1 = now;
@@ -1858,7 +1858,7 @@ void i915_update_gfx_val(struct drm_i915_private *dev_priv)
1858 1858
1859 /* More magic constants... */ 1859 /* More magic constants... */
1860 diff = diff * 1181; 1860 diff = diff * 1181;
1861 div_u64(diff, diffms * 10); 1861 diff = div_u64(diff, diffms * 10);
1862 dev_priv->gfx_power = diff; 1862 dev_priv->gfx_power = diff;
1863} 1863}
1864 1864
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 216deb579785..6dbe14cc4f74 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -170,6 +170,7 @@ static const struct pci_device_id pciidlist[] = { /* aka */
170 INTEL_VGA_DEVICE(0x2e22, &intel_g45_info), /* G45_G */ 170 INTEL_VGA_DEVICE(0x2e22, &intel_g45_info), /* G45_G */
171 INTEL_VGA_DEVICE(0x2e32, &intel_g45_info), /* G41_G */ 171 INTEL_VGA_DEVICE(0x2e32, &intel_g45_info), /* G41_G */
172 INTEL_VGA_DEVICE(0x2e42, &intel_g45_info), /* B43_G */ 172 INTEL_VGA_DEVICE(0x2e42, &intel_g45_info), /* B43_G */
173 INTEL_VGA_DEVICE(0x2e92, &intel_g45_info), /* B43_G.1 */
173 INTEL_VGA_DEVICE(0xa001, &intel_pineview_info), 174 INTEL_VGA_DEVICE(0xa001, &intel_pineview_info),
174 INTEL_VGA_DEVICE(0xa011, &intel_pineview_info), 175 INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
175 INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info), 176 INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 16fca1d1799a..90b1d6753b9d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -136,14 +136,12 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
136 return -ENOMEM; 136 return -ENOMEM;
137 137
138 ret = drm_gem_handle_create(file_priv, obj, &handle); 138 ret = drm_gem_handle_create(file_priv, obj, &handle);
139 /* drop reference from allocate - handle holds it now */
140 drm_gem_object_unreference_unlocked(obj);
139 if (ret) { 141 if (ret) {
140 drm_gem_object_unreference_unlocked(obj);
141 return ret; 142 return ret;
142 } 143 }
143 144
144 /* Sink the floating reference from kref_init(handlecount) */
145 drm_gem_object_handle_unreference_unlocked(obj);
146
147 args->handle = handle; 145 args->handle = handle;
148 return 0; 146 return 0;
149} 147}
@@ -471,14 +469,17 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
471 return -ENOENT; 469 return -ENOENT;
472 obj_priv = to_intel_bo(obj); 470 obj_priv = to_intel_bo(obj);
473 471
474 /* Bounds check source. 472 /* Bounds check source. */
475 * 473 if (args->offset > obj->size || args->size > obj->size - args->offset) {
476 * XXX: This could use review for overflow issues... 474 ret = -EINVAL;
477 */ 475 goto err;
478 if (args->offset > obj->size || args->size > obj->size || 476 }
479 args->offset + args->size > obj->size) { 477
480 drm_gem_object_unreference_unlocked(obj); 478 if (!access_ok(VERIFY_WRITE,
481 return -EINVAL; 479 (char __user *)(uintptr_t)args->data_ptr,
480 args->size)) {
481 ret = -EFAULT;
482 goto err;
482 } 483 }
483 484
484 if (i915_gem_object_needs_bit17_swizzle(obj)) { 485 if (i915_gem_object_needs_bit17_swizzle(obj)) {
@@ -490,8 +491,8 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
490 file_priv); 491 file_priv);
491 } 492 }
492 493
494err:
493 drm_gem_object_unreference_unlocked(obj); 495 drm_gem_object_unreference_unlocked(obj);
494
495 return ret; 496 return ret;
496} 497}
497 498
@@ -580,8 +581,6 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
580 581
581 user_data = (char __user *) (uintptr_t) args->data_ptr; 582 user_data = (char __user *) (uintptr_t) args->data_ptr;
582 remain = args->size; 583 remain = args->size;
583 if (!access_ok(VERIFY_READ, user_data, remain))
584 return -EFAULT;
585 584
586 585
587 mutex_lock(&dev->struct_mutex); 586 mutex_lock(&dev->struct_mutex);
@@ -934,14 +933,17 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
934 return -ENOENT; 933 return -ENOENT;
935 obj_priv = to_intel_bo(obj); 934 obj_priv = to_intel_bo(obj);
936 935
937 /* Bounds check destination. 936 /* Bounds check destination. */
938 * 937 if (args->offset > obj->size || args->size > obj->size - args->offset) {
939 * XXX: This could use review for overflow issues... 938 ret = -EINVAL;
940 */ 939 goto err;
941 if (args->offset > obj->size || args->size > obj->size || 940 }
942 args->offset + args->size > obj->size) { 941
943 drm_gem_object_unreference_unlocked(obj); 942 if (!access_ok(VERIFY_READ,
944 return -EINVAL; 943 (char __user *)(uintptr_t)args->data_ptr,
944 args->size)) {
945 ret = -EFAULT;
946 goto err;
945 } 947 }
946 948
947 /* We can only do the GTT pwrite on untiled buffers, as otherwise 949 /* We can only do the GTT pwrite on untiled buffers, as otherwise
@@ -975,8 +977,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
975 DRM_INFO("pwrite failed %d\n", ret); 977 DRM_INFO("pwrite failed %d\n", ret);
976#endif 978#endif
977 979
980err:
978 drm_gem_object_unreference_unlocked(obj); 981 drm_gem_object_unreference_unlocked(obj);
979
980 return ret; 982 return ret;
981} 983}
982 984
@@ -2351,14 +2353,21 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
2351 2353
2352 reg->obj = obj; 2354 reg->obj = obj;
2353 2355
2354 if (IS_GEN6(dev)) 2356 switch (INTEL_INFO(dev)->gen) {
2357 case 6:
2355 sandybridge_write_fence_reg(reg); 2358 sandybridge_write_fence_reg(reg);
2356 else if (IS_I965G(dev)) 2359 break;
2360 case 5:
2361 case 4:
2357 i965_write_fence_reg(reg); 2362 i965_write_fence_reg(reg);
2358 else if (IS_I9XX(dev)) 2363 break;
2364 case 3:
2359 i915_write_fence_reg(reg); 2365 i915_write_fence_reg(reg);
2360 else 2366 break;
2367 case 2:
2361 i830_write_fence_reg(reg); 2368 i830_write_fence_reg(reg);
2369 break;
2370 }
2362 2371
2363 trace_i915_gem_object_get_fence(obj, obj_priv->fence_reg, 2372 trace_i915_gem_object_get_fence(obj, obj_priv->fence_reg,
2364 obj_priv->tiling_mode); 2373 obj_priv->tiling_mode);
@@ -2381,22 +2390,26 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
2381 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 2390 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2382 struct drm_i915_fence_reg *reg = 2391 struct drm_i915_fence_reg *reg =
2383 &dev_priv->fence_regs[obj_priv->fence_reg]; 2392 &dev_priv->fence_regs[obj_priv->fence_reg];
2393 uint32_t fence_reg;
2384 2394
2385 if (IS_GEN6(dev)) { 2395 switch (INTEL_INFO(dev)->gen) {
2396 case 6:
2386 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + 2397 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
2387 (obj_priv->fence_reg * 8), 0); 2398 (obj_priv->fence_reg * 8), 0);
2388 } else if (IS_I965G(dev)) { 2399 break;
2400 case 5:
2401 case 4:
2389 I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0); 2402 I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
2390 } else { 2403 break;
2391 uint32_t fence_reg; 2404 case 3:
2392 2405 if (obj_priv->fence_reg >= 8)
2393 if (obj_priv->fence_reg < 8) 2406 fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - 8) * 4;
2394 fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
2395 else 2407 else
2396 fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - 2408 case 2:
2397 8) * 4; 2409 fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
2398 2410
2399 I915_WRITE(fence_reg, 0); 2411 I915_WRITE(fence_reg, 0);
2412 break;
2400 } 2413 }
2401 2414
2402 reg->obj = NULL; 2415 reg->obj = NULL;
@@ -3247,6 +3260,8 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3247 (int) reloc->offset, 3260 (int) reloc->offset,
3248 reloc->read_domains, 3261 reloc->read_domains,
3249 reloc->write_domain); 3262 reloc->write_domain);
3263 drm_gem_object_unreference(target_obj);
3264 i915_gem_object_unpin(obj);
3250 return -EINVAL; 3265 return -EINVAL;
3251 } 3266 }
3252 if (reloc->write_domain & I915_GEM_DOMAIN_CPU || 3267 if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 72cae3cccad8..5c428fa3e0b3 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -79,6 +79,7 @@ mark_free(struct drm_i915_gem_object *obj_priv,
79 struct list_head *unwind) 79 struct list_head *unwind)
80{ 80{
81 list_add(&obj_priv->evict_list, unwind); 81 list_add(&obj_priv->evict_list, unwind);
82 drm_gem_object_reference(&obj_priv->base);
82 return drm_mm_scan_add_block(obj_priv->gtt_space); 83 return drm_mm_scan_add_block(obj_priv->gtt_space);
83} 84}
84 85
@@ -92,7 +93,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
92{ 93{
93 drm_i915_private_t *dev_priv = dev->dev_private; 94 drm_i915_private_t *dev_priv = dev->dev_private;
94 struct list_head eviction_list, unwind_list; 95 struct list_head eviction_list, unwind_list;
95 struct drm_i915_gem_object *obj_priv, *tmp_obj_priv; 96 struct drm_i915_gem_object *obj_priv;
96 struct list_head *render_iter, *bsd_iter; 97 struct list_head *render_iter, *bsd_iter;
97 int ret = 0; 98 int ret = 0;
98 99
@@ -165,6 +166,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
165 list_for_each_entry(obj_priv, &unwind_list, evict_list) { 166 list_for_each_entry(obj_priv, &unwind_list, evict_list) {
166 ret = drm_mm_scan_remove_block(obj_priv->gtt_space); 167 ret = drm_mm_scan_remove_block(obj_priv->gtt_space);
167 BUG_ON(ret); 168 BUG_ON(ret);
169 drm_gem_object_unreference(&obj_priv->base);
168 } 170 }
169 171
170 /* We expect the caller to unpin, evict all and try again, or give up. 172 /* We expect the caller to unpin, evict all and try again, or give up.
@@ -173,36 +175,34 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
173 return -ENOSPC; 175 return -ENOSPC;
174 176
175found: 177found:
178 /* drm_mm doesn't allow any other other operations while
179 * scanning, therefore store to be evicted objects on a
180 * temporary list. */
176 INIT_LIST_HEAD(&eviction_list); 181 INIT_LIST_HEAD(&eviction_list);
177 list_for_each_entry_safe(obj_priv, tmp_obj_priv, 182 while (!list_empty(&unwind_list)) {
178 &unwind_list, evict_list) { 183 obj_priv = list_first_entry(&unwind_list,
184 struct drm_i915_gem_object,
185 evict_list);
179 if (drm_mm_scan_remove_block(obj_priv->gtt_space)) { 186 if (drm_mm_scan_remove_block(obj_priv->gtt_space)) {
180 /* drm_mm doesn't allow any other other operations while
181 * scanning, therefore store to be evicted objects on a
182 * temporary list. */
183 list_move(&obj_priv->evict_list, &eviction_list); 187 list_move(&obj_priv->evict_list, &eviction_list);
188 continue;
184 } 189 }
190 list_del(&obj_priv->evict_list);
191 drm_gem_object_unreference(&obj_priv->base);
185 } 192 }
186 193
187 /* Unbinding will emit any required flushes */ 194 /* Unbinding will emit any required flushes */
188 list_for_each_entry_safe(obj_priv, tmp_obj_priv, 195 while (!list_empty(&eviction_list)) {
189 &eviction_list, evict_list) { 196 obj_priv = list_first_entry(&eviction_list,
190#if WATCH_LRU 197 struct drm_i915_gem_object,
191 DRM_INFO("%s: evicting %p\n", __func__, obj); 198 evict_list);
192#endif 199 if (ret == 0)
193 ret = i915_gem_object_unbind(&obj_priv->base); 200 ret = i915_gem_object_unbind(&obj_priv->base);
194 if (ret) 201 list_del(&obj_priv->evict_list);
195 return ret; 202 drm_gem_object_unreference(&obj_priv->base);
196 } 203 }
197 204
198 /* The just created free hole should be on the top of the free stack 205 return ret;
199 * maintained by drm_mm, so this BUG_ON actually executes in O(1).
200 * Furthermore all accessed data has just recently been used, so it
201 * should be really fast, too. */
202 BUG_ON(!drm_mm_search_free(&dev_priv->mm.gtt_space, min_size,
203 alignment, 0));
204
205 return 0;
206} 206}
207 207
208int 208int
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 59457e83b011..744225ebb4b2 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1350,17 +1350,25 @@ void i915_hangcheck_elapsed(unsigned long data)
1350 i915_seqno_passed(i915_get_gem_seqno(dev, 1350 i915_seqno_passed(i915_get_gem_seqno(dev,
1351 &dev_priv->render_ring), 1351 &dev_priv->render_ring),
1352 i915_get_tail_request(dev)->seqno)) { 1352 i915_get_tail_request(dev)->seqno)) {
1353 bool missed_wakeup = false;
1354
1353 dev_priv->hangcheck_count = 0; 1355 dev_priv->hangcheck_count = 0;
1354 1356
1355 /* Issue a wake-up to catch stuck h/w. */ 1357 /* Issue a wake-up to catch stuck h/w. */
1356 if (dev_priv->render_ring.waiting_gem_seqno | 1358 if (dev_priv->render_ring.waiting_gem_seqno &&
1357 dev_priv->bsd_ring.waiting_gem_seqno) { 1359 waitqueue_active(&dev_priv->render_ring.irq_queue)) {
1358 DRM_ERROR("Hangcheck timer elapsed... GPU idle, missed IRQ.\n"); 1360 DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
1359 if (dev_priv->render_ring.waiting_gem_seqno) 1361 missed_wakeup = true;
1360 DRM_WAKEUP(&dev_priv->render_ring.irq_queue); 1362 }
1361 if (dev_priv->bsd_ring.waiting_gem_seqno) 1363
1362 DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue); 1364 if (dev_priv->bsd_ring.waiting_gem_seqno &&
1365 waitqueue_active(&dev_priv->bsd_ring.irq_queue)) {
1366 DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
1367 missed_wakeup = true;
1363 } 1368 }
1369
1370 if (missed_wakeup)
1371 DRM_ERROR("Hangcheck timer elapsed... GPU idle, missed IRQ.\n");
1364 return; 1372 return;
1365 } 1373 }
1366 1374
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index d094e9129223..4f5e15577e89 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2206,9 +2206,17 @@
2206#define WM1_LP_SR_EN (1<<31) 2206#define WM1_LP_SR_EN (1<<31)
2207#define WM1_LP_LATENCY_SHIFT 24 2207#define WM1_LP_LATENCY_SHIFT 24
2208#define WM1_LP_LATENCY_MASK (0x7f<<24) 2208#define WM1_LP_LATENCY_MASK (0x7f<<24)
2209#define WM1_LP_FBC_LP1_MASK (0xf<<20)
2210#define WM1_LP_FBC_LP1_SHIFT 20
2209#define WM1_LP_SR_MASK (0x1ff<<8) 2211#define WM1_LP_SR_MASK (0x1ff<<8)
2210#define WM1_LP_SR_SHIFT 8 2212#define WM1_LP_SR_SHIFT 8
2211#define WM1_LP_CURSOR_MASK (0x3f) 2213#define WM1_LP_CURSOR_MASK (0x3f)
2214#define WM2_LP_ILK 0x4510c
2215#define WM2_LP_EN (1<<31)
2216#define WM3_LP_ILK 0x45110
2217#define WM3_LP_EN (1<<31)
2218#define WM1S_LP_ILK 0x45120
2219#define WM1S_LP_EN (1<<31)
2212 2220
2213/* Memory latency timer register */ 2221/* Memory latency timer register */
2214#define MLTR_ILK 0x11222 2222#define MLTR_ILK 0x11222
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 2c6b98f2440e..31f08581e93a 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -789,16 +789,25 @@ int i915_save_state(struct drm_device *dev)
789 dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2)); 789 dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
790 790
791 /* Fences */ 791 /* Fences */
792 if (IS_I965G(dev)) { 792 switch (INTEL_INFO(dev)->gen) {
793 case 6:
794 for (i = 0; i < 16; i++)
795 dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
796 break;
797 case 5:
798 case 4:
793 for (i = 0; i < 16; i++) 799 for (i = 0; i < 16; i++)
794 dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); 800 dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
795 } else { 801 break;
796 for (i = 0; i < 8; i++) 802 case 3:
797 dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
798
799 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 803 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
800 for (i = 0; i < 8; i++) 804 for (i = 0; i < 8; i++)
801 dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); 805 dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
806 case 2:
807 for (i = 0; i < 8; i++)
808 dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
809 break;
810
802 } 811 }
803 812
804 return 0; 813 return 0;
@@ -815,15 +824,24 @@ int i915_restore_state(struct drm_device *dev)
815 I915_WRITE(HWS_PGA, dev_priv->saveHWS); 824 I915_WRITE(HWS_PGA, dev_priv->saveHWS);
816 825
817 /* Fences */ 826 /* Fences */
818 if (IS_I965G(dev)) { 827 switch (INTEL_INFO(dev)->gen) {
828 case 6:
829 for (i = 0; i < 16; i++)
830 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]);
831 break;
832 case 5:
833 case 4:
819 for (i = 0; i < 16; i++) 834 for (i = 0; i < 16; i++)
820 I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]); 835 I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]);
821 } else { 836 break;
822 for (i = 0; i < 8; i++) 837 case 3:
823 I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]); 838 case 2:
824 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 839 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
825 for (i = 0; i < 8; i++) 840 for (i = 0; i < 8; i++)
826 I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]); 841 I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]);
842 for (i = 0; i < 8; i++)
843 I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]);
844 break;
827 } 845 }
828 846
829 i915_restore_display(dev); 847 i915_restore_display(dev);
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 4b7735196cd5..197d4f32585a 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -188,7 +188,7 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
188 188
189 if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0, 189 if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
190 1000, 1)) 190 1000, 1))
191 DRM_ERROR("timed out waiting for FORCE_TRIGGER"); 191 DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
192 192
193 if (turn_off_dac) { 193 if (turn_off_dac) {
194 I915_WRITE(PCH_ADPA, temp); 194 I915_WRITE(PCH_ADPA, temp);
@@ -245,7 +245,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
245 if (wait_for((I915_READ(PORT_HOTPLUG_EN) & 245 if (wait_for((I915_READ(PORT_HOTPLUG_EN) &
246 CRT_HOTPLUG_FORCE_DETECT) == 0, 246 CRT_HOTPLUG_FORCE_DETECT) == 0,
247 1000, 1)) 247 1000, 1))
248 DRM_ERROR("timed out waiting for FORCE_DETECT to go off"); 248 DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off");
249 } 249 }
250 250
251 stat = I915_READ(PORT_HOTPLUG_STAT); 251 stat = I915_READ(PORT_HOTPLUG_STAT);
@@ -400,7 +400,8 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder
400 return status; 400 return status;
401} 401}
402 402
403static enum drm_connector_status intel_crt_detect(struct drm_connector *connector) 403static enum drm_connector_status
404intel_crt_detect(struct drm_connector *connector, bool force)
404{ 405{
405 struct drm_device *dev = connector->dev; 406 struct drm_device *dev = connector->dev;
406 struct drm_encoder *encoder = intel_attached_encoder(connector); 407 struct drm_encoder *encoder = intel_attached_encoder(connector);
@@ -419,6 +420,9 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connecto
419 if (intel_crt_detect_ddc(encoder)) 420 if (intel_crt_detect_ddc(encoder))
420 return connector_status_connected; 421 return connector_status_connected;
421 422
423 if (!force)
424 return connector->status;
425
422 /* for pre-945g platforms use load detect */ 426 /* for pre-945g platforms use load detect */
423 if (encoder->crtc && encoder->crtc->enabled) { 427 if (encoder->crtc && encoder->crtc->enabled) {
424 status = intel_crt_load_detect(encoder->crtc, intel_encoder); 428 status = intel_crt_load_detect(encoder->crtc, intel_encoder);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 40cc5da264a9..979228594599 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1013,8 +1013,8 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
1013 DRM_DEBUG_KMS("vblank wait timed out\n"); 1013 DRM_DEBUG_KMS("vblank wait timed out\n");
1014} 1014}
1015 1015
1016/** 1016/*
1017 * intel_wait_for_vblank_off - wait for vblank after disabling a pipe 1017 * intel_wait_for_pipe_off - wait for pipe to turn off
1018 * @dev: drm device 1018 * @dev: drm device
1019 * @pipe: pipe to wait for 1019 * @pipe: pipe to wait for
1020 * 1020 *
@@ -1022,25 +1022,39 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
1022 * spinning on the vblank interrupt status bit, since we won't actually 1022 * spinning on the vblank interrupt status bit, since we won't actually
1023 * see an interrupt when the pipe is disabled. 1023 * see an interrupt when the pipe is disabled.
1024 * 1024 *
1025 * So this function waits for the display line value to settle (it 1025 * On Gen4 and above:
1026 * usually ends up stopping at the start of the next frame). 1026 * wait for the pipe register state bit to turn off
1027 *
1028 * Otherwise:
1029 * wait for the display line value to settle (it usually
1030 * ends up stopping at the start of the next frame).
1031 *
1027 */ 1032 */
1028void intel_wait_for_vblank_off(struct drm_device *dev, int pipe) 1033static void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
1029{ 1034{
1030 struct drm_i915_private *dev_priv = dev->dev_private; 1035 struct drm_i915_private *dev_priv = dev->dev_private;
1031 int pipedsl_reg = (pipe == 0 ? PIPEADSL : PIPEBDSL); 1036
1032 unsigned long timeout = jiffies + msecs_to_jiffies(100); 1037 if (INTEL_INFO(dev)->gen >= 4) {
1033 u32 last_line; 1038 int pipeconf_reg = (pipe == 0 ? PIPEACONF : PIPEBCONF);
1034 1039
1035 /* Wait for the display line to settle */ 1040 /* Wait for the Pipe State to go off */
1036 do { 1041 if (wait_for((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) == 0,
1037 last_line = I915_READ(pipedsl_reg) & DSL_LINEMASK; 1042 100, 0))
1038 mdelay(5); 1043 DRM_DEBUG_KMS("pipe_off wait timed out\n");
1039 } while (((I915_READ(pipedsl_reg) & DSL_LINEMASK) != last_line) && 1044 } else {
1040 time_after(timeout, jiffies)); 1045 u32 last_line;
1041 1046 int pipedsl_reg = (pipe == 0 ? PIPEADSL : PIPEBDSL);
1042 if (time_after(jiffies, timeout)) 1047 unsigned long timeout = jiffies + msecs_to_jiffies(100);
1043 DRM_DEBUG_KMS("vblank wait timed out\n"); 1048
1049 /* Wait for the display line to settle */
1050 do {
1051 last_line = I915_READ(pipedsl_reg) & DSL_LINEMASK;
1052 mdelay(5);
1053 } while (((I915_READ(pipedsl_reg) & DSL_LINEMASK) != last_line) &&
1054 time_after(timeout, jiffies));
1055 if (time_after(jiffies, timeout))
1056 DRM_DEBUG_KMS("pipe_off wait timed out\n");
1057 }
1044} 1058}
1045 1059
1046/* Parameters have changed, update FBC info */ 1060/* Parameters have changed, update FBC info */
@@ -2328,13 +2342,13 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
2328 I915_READ(dspbase_reg); 2342 I915_READ(dspbase_reg);
2329 } 2343 }
2330 2344
2331 /* Wait for vblank for the disable to take effect */
2332 intel_wait_for_vblank_off(dev, pipe);
2333
2334 /* Don't disable pipe A or pipe A PLLs if needed */ 2345 /* Don't disable pipe A or pipe A PLLs if needed */
2335 if (pipeconf_reg == PIPEACONF && 2346 if (pipeconf_reg == PIPEACONF &&
2336 (dev_priv->quirks & QUIRK_PIPEA_FORCE)) 2347 (dev_priv->quirks & QUIRK_PIPEA_FORCE)) {
2348 /* Wait for vblank for the disable to take effect */
2349 intel_wait_for_vblank(dev, pipe);
2337 goto skip_pipe_off; 2350 goto skip_pipe_off;
2351 }
2338 2352
2339 /* Next, disable display pipes */ 2353 /* Next, disable display pipes */
2340 temp = I915_READ(pipeconf_reg); 2354 temp = I915_READ(pipeconf_reg);
@@ -2343,8 +2357,8 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
2343 I915_READ(pipeconf_reg); 2357 I915_READ(pipeconf_reg);
2344 } 2358 }
2345 2359
2346 /* Wait for vblank for the disable to take effect. */ 2360 /* Wait for the pipe to turn off */
2347 intel_wait_for_vblank_off(dev, pipe); 2361 intel_wait_for_pipe_off(dev, pipe);
2348 2362
2349 temp = I915_READ(dpll_reg); 2363 temp = I915_READ(dpll_reg);
2350 if ((temp & DPLL_VCO_ENABLE) != 0) { 2364 if ((temp & DPLL_VCO_ENABLE) != 0) {
@@ -2463,11 +2477,19 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
2463 struct drm_display_mode *adjusted_mode) 2477 struct drm_display_mode *adjusted_mode)
2464{ 2478{
2465 struct drm_device *dev = crtc->dev; 2479 struct drm_device *dev = crtc->dev;
2480
2466 if (HAS_PCH_SPLIT(dev)) { 2481 if (HAS_PCH_SPLIT(dev)) {
2467 /* FDI link clock is fixed at 2.7G */ 2482 /* FDI link clock is fixed at 2.7G */
2468 if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4) 2483 if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4)
2469 return false; 2484 return false;
2470 } 2485 }
2486
2487 /* XXX some encoders set the crtcinfo, others don't.
2488 * Obviously we need some form of conflict resolution here...
2489 */
2490 if (adjusted_mode->crtc_htotal == 0)
2491 drm_mode_set_crtcinfo(adjusted_mode, 0);
2492
2471 return true; 2493 return true;
2472} 2494}
2473 2495
@@ -2767,14 +2789,8 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
2767 /* Don't promote wm_size to unsigned... */ 2789 /* Don't promote wm_size to unsigned... */
2768 if (wm_size > (long)wm->max_wm) 2790 if (wm_size > (long)wm->max_wm)
2769 wm_size = wm->max_wm; 2791 wm_size = wm->max_wm;
2770 if (wm_size <= 0) { 2792 if (wm_size <= 0)
2771 wm_size = wm->default_wm; 2793 wm_size = wm->default_wm;
2772 DRM_ERROR("Insufficient FIFO for plane, expect flickering:"
2773 " entries required = %ld, available = %lu.\n",
2774 entries_required + wm->guard_size,
2775 wm->fifo_size);
2776 }
2777
2778 return wm_size; 2794 return wm_size;
2779} 2795}
2780 2796
@@ -3388,8 +3404,7 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock,
3388 reg_value = I915_READ(WM1_LP_ILK); 3404 reg_value = I915_READ(WM1_LP_ILK);
3389 reg_value &= ~(WM1_LP_LATENCY_MASK | WM1_LP_SR_MASK | 3405 reg_value &= ~(WM1_LP_LATENCY_MASK | WM1_LP_SR_MASK |
3390 WM1_LP_CURSOR_MASK); 3406 WM1_LP_CURSOR_MASK);
3391 reg_value |= WM1_LP_SR_EN | 3407 reg_value |= (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) |
3392 (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) |
3393 (sr_wm << WM1_LP_SR_SHIFT) | cursor_wm; 3408 (sr_wm << WM1_LP_SR_SHIFT) | cursor_wm;
3394 3409
3395 I915_WRITE(WM1_LP_ILK, reg_value); 3410 I915_WRITE(WM1_LP_ILK, reg_value);
@@ -5675,6 +5690,9 @@ void intel_init_clock_gating(struct drm_device *dev)
5675 I915_WRITE(DISP_ARB_CTL, 5690 I915_WRITE(DISP_ARB_CTL,
5676 (I915_READ(DISP_ARB_CTL) | 5691 (I915_READ(DISP_ARB_CTL) |
5677 DISP_FBC_WM_DIS)); 5692 DISP_FBC_WM_DIS));
5693 I915_WRITE(WM3_LP_ILK, 0);
5694 I915_WRITE(WM2_LP_ILK, 0);
5695 I915_WRITE(WM1_LP_ILK, 0);
5678 } 5696 }
5679 /* 5697 /*
5680 * Based on the document from hardware guys the following bits 5698 * Based on the document from hardware guys the following bits
@@ -5696,8 +5714,7 @@ void intel_init_clock_gating(struct drm_device *dev)
5696 ILK_DPFC_DIS2 | 5714 ILK_DPFC_DIS2 |
5697 ILK_CLK_FBC); 5715 ILK_CLK_FBC);
5698 } 5716 }
5699 if (IS_GEN6(dev)) 5717 return;
5700 return;
5701 } else if (IS_G4X(dev)) { 5718 } else if (IS_G4X(dev)) {
5702 uint32_t dspclk_gate; 5719 uint32_t dspclk_gate;
5703 I915_WRITE(RENCLK_GATE_D1, 0); 5720 I915_WRITE(RENCLK_GATE_D1, 0);
@@ -5758,11 +5775,9 @@ void intel_init_clock_gating(struct drm_device *dev)
5758 OUT_RING(MI_FLUSH); 5775 OUT_RING(MI_FLUSH);
5759 ADVANCE_LP_RING(); 5776 ADVANCE_LP_RING();
5760 } 5777 }
5761 } else { 5778 } else
5762 DRM_DEBUG_KMS("Failed to allocate render context." 5779 DRM_DEBUG_KMS("Failed to allocate render context."
5763 "Disable RC6\n"); 5780 "Disable RC6\n");
5764 return;
5765 }
5766 } 5781 }
5767 5782
5768 if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) { 5783 if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) {
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 51d142939a26..9ab8708ac6ba 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1138,18 +1138,14 @@ static bool
1138intel_dp_set_link_train(struct intel_dp *intel_dp, 1138intel_dp_set_link_train(struct intel_dp *intel_dp,
1139 uint32_t dp_reg_value, 1139 uint32_t dp_reg_value,
1140 uint8_t dp_train_pat, 1140 uint8_t dp_train_pat,
1141 uint8_t train_set[4], 1141 uint8_t train_set[4])
1142 bool first)
1143{ 1142{
1144 struct drm_device *dev = intel_dp->base.enc.dev; 1143 struct drm_device *dev = intel_dp->base.enc.dev;
1145 struct drm_i915_private *dev_priv = dev->dev_private; 1144 struct drm_i915_private *dev_priv = dev->dev_private;
1146 struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.enc.crtc);
1147 int ret; 1145 int ret;
1148 1146
1149 I915_WRITE(intel_dp->output_reg, dp_reg_value); 1147 I915_WRITE(intel_dp->output_reg, dp_reg_value);
1150 POSTING_READ(intel_dp->output_reg); 1148 POSTING_READ(intel_dp->output_reg);
1151 if (first)
1152 intel_wait_for_vblank(dev, intel_crtc->pipe);
1153 1149
1154 intel_dp_aux_native_write_1(intel_dp, 1150 intel_dp_aux_native_write_1(intel_dp,
1155 DP_TRAINING_PATTERN_SET, 1151 DP_TRAINING_PATTERN_SET,
@@ -1174,10 +1170,15 @@ intel_dp_link_train(struct intel_dp *intel_dp)
1174 uint8_t voltage; 1170 uint8_t voltage;
1175 bool clock_recovery = false; 1171 bool clock_recovery = false;
1176 bool channel_eq = false; 1172 bool channel_eq = false;
1177 bool first = true;
1178 int tries; 1173 int tries;
1179 u32 reg; 1174 u32 reg;
1180 uint32_t DP = intel_dp->DP; 1175 uint32_t DP = intel_dp->DP;
1176 struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.enc.crtc);
1177
1178 /* Enable output, wait for it to become active */
1179 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
1180 POSTING_READ(intel_dp->output_reg);
1181 intel_wait_for_vblank(dev, intel_crtc->pipe);
1181 1182
1182 /* Write the link configuration data */ 1183 /* Write the link configuration data */
1183 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, 1184 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
@@ -1210,9 +1211,8 @@ intel_dp_link_train(struct intel_dp *intel_dp)
1210 reg = DP | DP_LINK_TRAIN_PAT_1; 1211 reg = DP | DP_LINK_TRAIN_PAT_1;
1211 1212
1212 if (!intel_dp_set_link_train(intel_dp, reg, 1213 if (!intel_dp_set_link_train(intel_dp, reg,
1213 DP_TRAINING_PATTERN_1, train_set, first)) 1214 DP_TRAINING_PATTERN_1, train_set))
1214 break; 1215 break;
1215 first = false;
1216 /* Set training pattern 1 */ 1216 /* Set training pattern 1 */
1217 1217
1218 udelay(100); 1218 udelay(100);
@@ -1266,8 +1266,7 @@ intel_dp_link_train(struct intel_dp *intel_dp)
1266 1266
1267 /* channel eq pattern */ 1267 /* channel eq pattern */
1268 if (!intel_dp_set_link_train(intel_dp, reg, 1268 if (!intel_dp_set_link_train(intel_dp, reg,
1269 DP_TRAINING_PATTERN_2, train_set, 1269 DP_TRAINING_PATTERN_2, train_set))
1270 false))
1271 break; 1270 break;
1272 1271
1273 udelay(400); 1272 udelay(400);
@@ -1386,7 +1385,7 @@ ironlake_dp_detect(struct drm_connector *connector)
1386 * \return false if DP port is disconnected. 1385 * \return false if DP port is disconnected.
1387 */ 1386 */
1388static enum drm_connector_status 1387static enum drm_connector_status
1389intel_dp_detect(struct drm_connector *connector) 1388intel_dp_detect(struct drm_connector *connector, bool force)
1390{ 1389{
1391 struct drm_encoder *encoder = intel_attached_encoder(connector); 1390 struct drm_encoder *encoder = intel_attached_encoder(connector);
1392 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1391 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index ad312ca6b3e5..8828b3ac6414 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -229,7 +229,6 @@ extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
229 struct drm_crtc *crtc); 229 struct drm_crtc *crtc);
230int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 230int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
231 struct drm_file *file_priv); 231 struct drm_file *file_priv);
232extern void intel_wait_for_vblank_off(struct drm_device *dev, int pipe);
233extern void intel_wait_for_vblank(struct drm_device *dev, int pipe); 232extern void intel_wait_for_vblank(struct drm_device *dev, int pipe);
234extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe); 233extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe);
235extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, 234extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index a399f4b2c1c5..7c9ec1472d46 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -221,7 +221,8 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
221 * 221 *
222 * Unimplemented. 222 * Unimplemented.
223 */ 223 */
224static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector) 224static enum drm_connector_status
225intel_dvo_detect(struct drm_connector *connector, bool force)
225{ 226{
226 struct drm_encoder *encoder = intel_attached_encoder(connector); 227 struct drm_encoder *encoder = intel_attached_encoder(connector);
227 struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder); 228 struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 7bdc96256bf5..56ad9df2ccb5 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -237,8 +237,10 @@ int intel_fbdev_destroy(struct drm_device *dev,
237 drm_fb_helper_fini(&ifbdev->helper); 237 drm_fb_helper_fini(&ifbdev->helper);
238 238
239 drm_framebuffer_cleanup(&ifb->base); 239 drm_framebuffer_cleanup(&ifb->base);
240 if (ifb->obj) 240 if (ifb->obj) {
241 drm_gem_object_handle_unreference(ifb->obj);
241 drm_gem_object_unreference(ifb->obj); 242 drm_gem_object_unreference(ifb->obj);
243 }
242 244
243 return 0; 245 return 0;
244} 246}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index ccd4c97e6524..926934a482ec 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -139,7 +139,7 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
139} 139}
140 140
141static enum drm_connector_status 141static enum drm_connector_status
142intel_hdmi_detect(struct drm_connector *connector) 142intel_hdmi_detect(struct drm_connector *connector, bool force)
143{ 143{
144 struct drm_encoder *encoder = intel_attached_encoder(connector); 144 struct drm_encoder *encoder = intel_attached_encoder(connector);
145 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 145 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index b819c1081147..6ec39a86ed06 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -445,7 +445,8 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
445 * connected and closed means disconnected. We also send hotplug events as 445 * connected and closed means disconnected. We also send hotplug events as
446 * needed, using lid status notification from the input layer. 446 * needed, using lid status notification from the input layer.
447 */ 447 */
448static enum drm_connector_status intel_lvds_detect(struct drm_connector *connector) 448static enum drm_connector_status
449intel_lvds_detect(struct drm_connector *connector, bool force)
449{ 450{
450 struct drm_device *dev = connector->dev; 451 struct drm_device *dev = connector->dev;
451 enum drm_connector_status status = connector_status_connected; 452 enum drm_connector_status status = connector_status_connected;
@@ -540,7 +541,9 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
540 * the LID nofication event. 541 * the LID nofication event.
541 */ 542 */
542 if (connector) 543 if (connector)
543 connector->status = connector->funcs->detect(connector); 544 connector->status = connector->funcs->detect(connector,
545 false);
546
544 /* Don't force modeset on machines where it causes a GPU lockup */ 547 /* Don't force modeset on machines where it causes a GPU lockup */
545 if (dmi_check_system(intel_no_modeset_on_lid)) 548 if (dmi_check_system(intel_no_modeset_on_lid))
546 return NOTIFY_OK; 549 return NOTIFY_OK;
@@ -875,8 +878,6 @@ void intel_lvds_init(struct drm_device *dev)
875 878
876 intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT); 879 intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
877 intel_encoder->crtc_mask = (1 << 1); 880 intel_encoder->crtc_mask = (1 << 1);
878 if (IS_I965G(dev))
879 intel_encoder->crtc_mask |= (1 << 0);
880 drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs); 881 drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
881 drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs); 882 drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
882 connector->display_info.subpixel_order = SubPixelHorizontalRGB; 883 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index e3b7a7ee39cb..ee73e428a84a 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1417,7 +1417,7 @@ intel_analog_is_connected(struct drm_device *dev)
1417 if (!analog_connector) 1417 if (!analog_connector)
1418 return false; 1418 return false;
1419 1419
1420 if (analog_connector->funcs->detect(analog_connector) == 1420 if (analog_connector->funcs->detect(analog_connector, false) ==
1421 connector_status_disconnected) 1421 connector_status_disconnected)
1422 return false; 1422 return false;
1423 1423
@@ -1486,7 +1486,8 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
1486 return status; 1486 return status;
1487} 1487}
1488 1488
1489static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector) 1489static enum drm_connector_status
1490intel_sdvo_detect(struct drm_connector *connector, bool force)
1490{ 1491{
1491 uint16_t response; 1492 uint16_t response;
1492 struct drm_encoder *encoder = intel_attached_encoder(connector); 1493 struct drm_encoder *encoder = intel_attached_encoder(connector);
@@ -2169,8 +2170,7 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
2169 return true; 2170 return true;
2170 2171
2171err: 2172err:
2172 intel_sdvo_destroy_enhance_property(connector); 2173 intel_sdvo_destroy(connector);
2173 kfree(intel_sdvo_connector);
2174 return false; 2174 return false;
2175} 2175}
2176 2176
@@ -2242,8 +2242,7 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
2242 return true; 2242 return true;
2243 2243
2244err: 2244err:
2245 intel_sdvo_destroy_enhance_property(connector); 2245 intel_sdvo_destroy(connector);
2246 kfree(intel_sdvo_connector);
2247 return false; 2246 return false;
2248} 2247}
2249 2248
@@ -2521,11 +2520,10 @@ static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
2521 uint16_t response; 2520 uint16_t response;
2522 } enhancements; 2521 } enhancements;
2523 2522
2524 if (!intel_sdvo_get_value(intel_sdvo, 2523 enhancements.response = 0;
2525 SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS, 2524 intel_sdvo_get_value(intel_sdvo,
2526 &enhancements, sizeof(enhancements))) 2525 SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
2527 return false; 2526 &enhancements, sizeof(enhancements));
2528
2529 if (enhancements.response == 0) { 2527 if (enhancements.response == 0) {
2530 DRM_DEBUG_KMS("No enhancement is supported\n"); 2528 DRM_DEBUG_KMS("No enhancement is supported\n");
2531 return true; 2529 return true;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index c671f60ce80b..4a117e318a73 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1341,7 +1341,7 @@ static void intel_tv_find_better_format(struct drm_connector *connector)
1341 * we have a pipe programmed in order to probe the TV. 1341 * we have a pipe programmed in order to probe the TV.
1342 */ 1342 */
1343static enum drm_connector_status 1343static enum drm_connector_status
1344intel_tv_detect(struct drm_connector *connector) 1344intel_tv_detect(struct drm_connector *connector, bool force)
1345{ 1345{
1346 struct drm_display_mode mode; 1346 struct drm_display_mode mode;
1347 struct drm_encoder *encoder = intel_attached_encoder(connector); 1347 struct drm_encoder *encoder = intel_attached_encoder(connector);
@@ -1353,7 +1353,7 @@ intel_tv_detect(struct drm_connector *connector)
1353 1353
1354 if (encoder->crtc && encoder->crtc->enabled) { 1354 if (encoder->crtc && encoder->crtc->enabled) {
1355 type = intel_tv_detect_type(intel_tv); 1355 type = intel_tv_detect_type(intel_tv);
1356 } else { 1356 } else if (force) {
1357 struct drm_crtc *crtc; 1357 struct drm_crtc *crtc;
1358 int dpms_mode; 1358 int dpms_mode;
1359 1359
@@ -1364,10 +1364,9 @@ intel_tv_detect(struct drm_connector *connector)
1364 intel_release_load_detect_pipe(&intel_tv->base, connector, 1364 intel_release_load_detect_pipe(&intel_tv->base, connector,
1365 dpms_mode); 1365 dpms_mode);
1366 } else 1366 } else
1367 type = -1; 1367 return connector_status_unknown;
1368 } 1368 } else
1369 1369 return connector->status;
1370 intel_tv->type = type;
1371 1370
1372 if (type < 0) 1371 if (type < 0)
1373 return connector_status_disconnected; 1372 return connector_status_disconnected;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index a1473fff06ac..fc737037f751 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -168,7 +168,7 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
168} 168}
169 169
170static enum drm_connector_status 170static enum drm_connector_status
171nouveau_connector_detect(struct drm_connector *connector) 171nouveau_connector_detect(struct drm_connector *connector, bool force)
172{ 172{
173 struct drm_device *dev = connector->dev; 173 struct drm_device *dev = connector->dev;
174 struct nouveau_connector *nv_connector = nouveau_connector(connector); 174 struct nouveau_connector *nv_connector = nouveau_connector(connector);
@@ -246,7 +246,7 @@ detect_analog:
246} 246}
247 247
248static enum drm_connector_status 248static enum drm_connector_status
249nouveau_connector_detect_lvds(struct drm_connector *connector) 249nouveau_connector_detect_lvds(struct drm_connector *connector, bool force)
250{ 250{
251 struct drm_device *dev = connector->dev; 251 struct drm_device *dev = connector->dev;
252 struct drm_nouveau_private *dev_priv = dev->dev_private; 252 struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -267,7 +267,7 @@ nouveau_connector_detect_lvds(struct drm_connector *connector)
267 267
268 /* Try retrieving EDID via DDC */ 268 /* Try retrieving EDID via DDC */
269 if (!dev_priv->vbios.fp_no_ddc) { 269 if (!dev_priv->vbios.fp_no_ddc) {
270 status = nouveau_connector_detect(connector); 270 status = nouveau_connector_detect(connector, force);
271 if (status == connector_status_connected) 271 if (status == connector_status_connected)
272 goto out; 272 goto out;
273 } 273 }
@@ -558,8 +558,10 @@ nouveau_connector_get_modes(struct drm_connector *connector)
558 if (nv_encoder->dcb->type == OUTPUT_LVDS && 558 if (nv_encoder->dcb->type == OUTPUT_LVDS &&
559 (nv_encoder->dcb->lvdsconf.use_straps_for_mode || 559 (nv_encoder->dcb->lvdsconf.use_straps_for_mode ||
560 dev_priv->vbios.fp_no_ddc) && nouveau_bios_fp_mode(dev, NULL)) { 560 dev_priv->vbios.fp_no_ddc) && nouveau_bios_fp_mode(dev, NULL)) {
561 nv_connector->native_mode = drm_mode_create(dev); 561 struct drm_display_mode mode;
562 nouveau_bios_fp_mode(dev, nv_connector->native_mode); 562
563 nouveau_bios_fp_mode(dev, &mode);
564 nv_connector->native_mode = drm_mode_duplicate(dev, &mode);
563 } 565 }
564 566
565 /* Find the native mode if this is a digital panel, if we didn't 567 /* Find the native mode if this is a digital panel, if we didn't
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index dbd30b2e43fd..d2047713dc59 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -352,6 +352,7 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *nfbdev)
352 352
353 if (nouveau_fb->nvbo) { 353 if (nouveau_fb->nvbo) {
354 nouveau_bo_unmap(nouveau_fb->nvbo); 354 nouveau_bo_unmap(nouveau_fb->nvbo);
355 drm_gem_object_handle_unreference_unlocked(nouveau_fb->nvbo->gem);
355 drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem); 356 drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem);
356 nouveau_fb->nvbo = NULL; 357 nouveau_fb->nvbo = NULL;
357 } 358 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index ead7b8fc53fc..19620a6709f5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -167,11 +167,9 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
167 goto out; 167 goto out;
168 168
169 ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle); 169 ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
170 /* drop reference from allocate - handle holds it now */
171 drm_gem_object_unreference_unlocked(nvbo->gem);
170out: 172out:
171 drm_gem_object_handle_unreference_unlocked(nvbo->gem);
172
173 if (ret)
174 drm_gem_object_unreference_unlocked(nvbo->gem);
175 return ret; 173 return ret;
176} 174}
177 175
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
index 3ec181ff50ce..3c9964a8fbad 100644
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -79,6 +79,7 @@ nouveau_notifier_takedown_channel(struct nouveau_channel *chan)
79 mutex_lock(&dev->struct_mutex); 79 mutex_lock(&dev->struct_mutex);
80 nouveau_bo_unpin(chan->notifier_bo); 80 nouveau_bo_unpin(chan->notifier_bo);
81 mutex_unlock(&dev->struct_mutex); 81 mutex_unlock(&dev->struct_mutex);
82 drm_gem_object_handle_unreference_unlocked(chan->notifier_bo->gem);
82 drm_gem_object_unreference_unlocked(chan->notifier_bo->gem); 83 drm_gem_object_unreference_unlocked(chan->notifier_bo->gem);
83 drm_mm_takedown(&chan->notifier_heap); 84 drm_mm_takedown(&chan->notifier_heap);
84} 85}
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index 1bc72c3190a9..fe359a239df3 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -4999,7 +4999,7 @@ typedef struct _SW_I2C_IO_DATA_PARAMETERS
4999#define SW_I2C_CNTL_WRITE1BIT 6 4999#define SW_I2C_CNTL_WRITE1BIT 6
5000 5000
5001//==============================VESA definition Portion=============================== 5001//==============================VESA definition Portion===============================
5002#define VESA_OEM_PRODUCT_REV '01.00' 5002#define VESA_OEM_PRODUCT_REV "01.00"
5003#define VESA_MODE_ATTRIBUTE_MODE_SUPPORT 0xBB //refer to VBE spec p.32, no TTY support 5003#define VESA_MODE_ATTRIBUTE_MODE_SUPPORT 0xBB //refer to VBE spec p.32, no TTY support
5004#define VESA_MODE_WIN_ATTRIBUTE 7 5004#define VESA_MODE_WIN_ATTRIBUTE 7
5005#define VESA_WIN_SIZE 64 5005#define VESA_WIN_SIZE 64
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 464a81a1990f..cd0290f946cf 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -539,14 +539,15 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
539 pll->algo = PLL_ALGO_LEGACY; 539 pll->algo = PLL_ALGO_LEGACY;
540 pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER; 540 pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER;
541 } 541 }
542 /* There is some evidence (often anecdotal) that RV515 LVDS 542 /* There is some evidence (often anecdotal) that RV515/RV620 LVDS
543 * (on some boards at least) prefers the legacy algo. I'm not 543 * (on some boards at least) prefers the legacy algo. I'm not
544 * sure whether this should handled generically or on a 544 * sure whether this should handled generically or on a
545 * case-by-case quirk basis. Both algos should work fine in the 545 * case-by-case quirk basis. Both algos should work fine in the
546 * majority of cases. 546 * majority of cases.
547 */ 547 */
548 if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) && 548 if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) &&
549 (rdev->family == CHIP_RV515)) { 549 ((rdev->family == CHIP_RV515) ||
550 (rdev->family == CHIP_RV620))) {
550 /* allow the user to overrride just in case */ 551 /* allow the user to overrride just in case */
551 if (radeon_new_pll == 1) 552 if (radeon_new_pll == 1)
552 pll->algo = PLL_ALGO_NEW; 553 pll->algo = PLL_ALGO_NEW;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index b8b7f010b25f..79082d4398ae 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1160,14 +1160,25 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1160 EVERGREEN_MAX_BACKENDS_MASK)); 1160 EVERGREEN_MAX_BACKENDS_MASK));
1161 break; 1161 break;
1162 } 1162 }
1163 } else 1163 } else {
1164 gb_backend_map = 1164 switch (rdev->family) {
1165 evergreen_get_tile_pipe_to_backend_map(rdev, 1165 case CHIP_CYPRESS:
1166 rdev->config.evergreen.max_tile_pipes, 1166 case CHIP_HEMLOCK:
1167 rdev->config.evergreen.max_backends, 1167 gb_backend_map = 0x66442200;
1168 ((EVERGREEN_MAX_BACKENDS_MASK << 1168 break;
1169 rdev->config.evergreen.max_backends) & 1169 case CHIP_JUNIPER:
1170 EVERGREEN_MAX_BACKENDS_MASK)); 1170 gb_backend_map = 0x00006420;
1171 break;
1172 default:
1173 gb_backend_map =
1174 evergreen_get_tile_pipe_to_backend_map(rdev,
1175 rdev->config.evergreen.max_tile_pipes,
1176 rdev->config.evergreen.max_backends,
1177 ((EVERGREEN_MAX_BACKENDS_MASK <<
1178 rdev->config.evergreen.max_backends) &
1179 EVERGREEN_MAX_BACKENDS_MASK));
1180 }
1181 }
1171 1182
1172 rdev->config.evergreen.tile_config = gb_addr_config; 1183 rdev->config.evergreen.tile_config = gb_addr_config;
1173 WREG32(GB_BACKEND_MAP, gb_backend_map); 1184 WREG32(GB_BACKEND_MAP, gb_backend_map);
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index e817a0bb5eb4..e151f16a8f86 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -2020,18 +2020,7 @@ bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *l
2020 return false; 2020 return false;
2021 } 2021 }
2022 elapsed = jiffies_to_msecs(cjiffies - lockup->last_jiffies); 2022 elapsed = jiffies_to_msecs(cjiffies - lockup->last_jiffies);
2023 if (elapsed >= 3000) { 2023 if (elapsed >= 10000) {
2024 /* very likely the improbable case where current
2025 * rptr is equal to last recorded, a while ago, rptr
2026 * this is more likely a false positive update tracking
2027 * information which should force us to be recall at
2028 * latter point
2029 */
2030 lockup->last_cp_rptr = cp->rptr;
2031 lockup->last_jiffies = jiffies;
2032 return false;
2033 }
2034 if (elapsed >= 1000) {
2035 dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed); 2024 dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed);
2036 return true; 2025 return true;
2037 } 2026 }
@@ -3308,13 +3297,14 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
3308 unsigned long size; 3297 unsigned long size;
3309 unsigned prim_walk; 3298 unsigned prim_walk;
3310 unsigned nverts; 3299 unsigned nverts;
3300 unsigned num_cb = track->num_cb;
3311 3301
3312 for (i = 0; i < track->num_cb; i++) { 3302 if (!track->zb_cb_clear && !track->color_channel_mask &&
3303 !track->blend_read_enable)
3304 num_cb = 0;
3305
3306 for (i = 0; i < num_cb; i++) {
3313 if (track->cb[i].robj == NULL) { 3307 if (track->cb[i].robj == NULL) {
3314 if (!(track->zb_cb_clear || track->color_channel_mask ||
3315 track->blend_read_enable)) {
3316 continue;
3317 }
3318 DRM_ERROR("[drm] No buffer for color buffer %d !\n", i); 3308 DRM_ERROR("[drm] No buffer for color buffer %d !\n", i);
3319 return -EINVAL; 3309 return -EINVAL;
3320 } 3310 }
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index afc18d87fdca..7a04959ba0ee 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2729,7 +2729,7 @@ int r600_ib_test(struct radeon_device *rdev)
2729 if (i < rdev->usec_timeout) { 2729 if (i < rdev->usec_timeout) {
2730 DRM_INFO("ib test succeeded in %u usecs\n", i); 2730 DRM_INFO("ib test succeeded in %u usecs\n", i);
2731 } else { 2731 } else {
2732 DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n", 2732 DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
2733 scratch, tmp); 2733 scratch, tmp);
2734 r = -EINVAL; 2734 r = -EINVAL;
2735 } 2735 }
@@ -3528,7 +3528,8 @@ void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
3528 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read 3528 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
3529 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL 3529 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
3530 */ 3530 */
3531 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740)) { 3531 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
3532 rdev->vram_scratch.ptr) {
3532 void __iomem *ptr = (void *)rdev->vram_scratch.ptr; 3533 void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
3533 u32 tmp; 3534 u32 tmp;
3534 3535
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index d13622ae74e9..9ceb2a1ce799 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -1,3 +1,28 @@
1/*
2 * Copyright 2009 Advanced Micro Devices, Inc.
3 * Copyright 2009 Red Hat Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 */
25
1#include "drmP.h" 26#include "drmP.h"
2#include "drm.h" 27#include "drm.h"
3#include "radeon_drm.h" 28#include "radeon_drm.h"
diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.h b/drivers/gpu/drm/radeon/r600_blit_shaders.h
index fdc3b378cbb0..f437d36dd98c 100644
--- a/drivers/gpu/drm/radeon/r600_blit_shaders.h
+++ b/drivers/gpu/drm/radeon/r600_blit_shaders.h
@@ -1,3 +1,27 @@
1/*
2 * Copyright 2009 Advanced Micro Devices, Inc.
3 * Copyright 2009 Red Hat Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 */
1 25
2#ifndef R600_BLIT_SHADERS_H 26#ifndef R600_BLIT_SHADERS_H
3#define R600_BLIT_SHADERS_H 27#define R600_BLIT_SHADERS_H
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index d8864949e387..250a3a918193 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -1170,9 +1170,8 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 i
1170 /* using get ib will give us the offset into the mipmap bo */ 1170 /* using get ib will give us the offset into the mipmap bo */
1171 word0 = radeon_get_ib_value(p, idx + 3) << 8; 1171 word0 = radeon_get_ib_value(p, idx + 3) << 8;
1172 if ((mipmap_size + word0) > radeon_bo_size(mipmap)) { 1172 if ((mipmap_size + word0) > radeon_bo_size(mipmap)) {
1173 dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n", 1173 /*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
1174 w0, h0, bpe, blevel, nlevels, word0, mipmap_size, radeon_bo_size(texture)); 1174 w0, h0, bpe, blevel, nlevels, word0, mipmap_size, radeon_bo_size(texture));*/
1175 return -EINVAL;
1176 } 1175 }
1177 return 0; 1176 return 0;
1178} 1177}
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index ebae14c4b768..68932ba7b8a4 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -317,6 +317,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
317 *connector_type = DRM_MODE_CONNECTOR_DVID; 317 *connector_type = DRM_MODE_CONNECTOR_DVID;
318 } 318 }
319 319
320 /* MSI K9A2GM V2/V3 board has no HDMI or DVI */
321 if ((dev->pdev->device == 0x796e) &&
322 (dev->pdev->subsystem_vendor == 0x1462) &&
323 (dev->pdev->subsystem_device == 0x7302)) {
324 if ((supported_device == ATOM_DEVICE_DFP2_SUPPORT) ||
325 (supported_device == ATOM_DEVICE_DFP3_SUPPORT))
326 return false;
327 }
328
320 /* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */ 329 /* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */
321 if ((dev->pdev->device == 0x7941) && 330 if ((dev->pdev->device == 0x7941) &&
322 (dev->pdev->subsystem_vendor == 0x147b) && 331 (dev->pdev->subsystem_vendor == 0x147b) &&
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index bd74e428bd14..a04b7a6ad95f 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -1485,6 +1485,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1485 /* PowerMac8,1 ? */ 1485 /* PowerMac8,1 ? */
1486 /* imac g5 isight */ 1486 /* imac g5 isight */
1487 rdev->mode_info.connector_table = CT_IMAC_G5_ISIGHT; 1487 rdev->mode_info.connector_table = CT_IMAC_G5_ISIGHT;
1488 } else if ((rdev->pdev->device == 0x4a48) &&
1489 (rdev->pdev->subsystem_vendor == 0x1002) &&
1490 (rdev->pdev->subsystem_device == 0x4a48)) {
1491 /* Mac X800 */
1492 rdev->mode_info.connector_table = CT_MAC_X800;
1488 } else 1493 } else
1489#endif /* CONFIG_PPC_PMAC */ 1494#endif /* CONFIG_PPC_PMAC */
1490#ifdef CONFIG_PPC64 1495#ifdef CONFIG_PPC64
@@ -1961,6 +1966,48 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1961 CONNECTOR_OBJECT_ID_VGA, 1966 CONNECTOR_OBJECT_ID_VGA,
1962 &hpd); 1967 &hpd);
1963 break; 1968 break;
1969 case CT_MAC_X800:
1970 DRM_INFO("Connector Table: %d (mac x800)\n",
1971 rdev->mode_info.connector_table);
1972 /* DVI - primary dac, internal tmds */
1973 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
1974 hpd.hpd = RADEON_HPD_1; /* ??? */
1975 radeon_add_legacy_encoder(dev,
1976 radeon_get_encoder_enum(dev,
1977 ATOM_DEVICE_DFP1_SUPPORT,
1978 0),
1979 ATOM_DEVICE_DFP1_SUPPORT);
1980 radeon_add_legacy_encoder(dev,
1981 radeon_get_encoder_enum(dev,
1982 ATOM_DEVICE_CRT1_SUPPORT,
1983 1),
1984 ATOM_DEVICE_CRT1_SUPPORT);
1985 radeon_add_legacy_connector(dev, 0,
1986 ATOM_DEVICE_DFP1_SUPPORT |
1987 ATOM_DEVICE_CRT1_SUPPORT,
1988 DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
1989 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
1990 &hpd);
1991 /* DVI - tv dac, dvo */
1992 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
1993 hpd.hpd = RADEON_HPD_2; /* ??? */
1994 radeon_add_legacy_encoder(dev,
1995 radeon_get_encoder_enum(dev,
1996 ATOM_DEVICE_DFP2_SUPPORT,
1997 0),
1998 ATOM_DEVICE_DFP2_SUPPORT);
1999 radeon_add_legacy_encoder(dev,
2000 radeon_get_encoder_enum(dev,
2001 ATOM_DEVICE_CRT2_SUPPORT,
2002 2),
2003 ATOM_DEVICE_CRT2_SUPPORT);
2004 radeon_add_legacy_connector(dev, 1,
2005 ATOM_DEVICE_DFP2_SUPPORT |
2006 ATOM_DEVICE_CRT2_SUPPORT,
2007 DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
2008 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I,
2009 &hpd);
2010 break;
1964 default: 2011 default:
1965 DRM_INFO("Connector table: %d (invalid)\n", 2012 DRM_INFO("Connector table: %d (invalid)\n",
1966 rdev->mode_info.connector_table); 2013 rdev->mode_info.connector_table);
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index a9dd7847d96e..ecc1a8fafbfd 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -481,7 +481,8 @@ static int radeon_lvds_mode_valid(struct drm_connector *connector,
481 return MODE_OK; 481 return MODE_OK;
482} 482}
483 483
484static enum drm_connector_status radeon_lvds_detect(struct drm_connector *connector) 484static enum drm_connector_status
485radeon_lvds_detect(struct drm_connector *connector, bool force)
485{ 486{
486 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 487 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
487 struct drm_encoder *encoder = radeon_best_single_encoder(connector); 488 struct drm_encoder *encoder = radeon_best_single_encoder(connector);
@@ -594,7 +595,8 @@ static int radeon_vga_mode_valid(struct drm_connector *connector,
594 return MODE_OK; 595 return MODE_OK;
595} 596}
596 597
597static enum drm_connector_status radeon_vga_detect(struct drm_connector *connector) 598static enum drm_connector_status
599radeon_vga_detect(struct drm_connector *connector, bool force)
598{ 600{
599 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 601 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
600 struct drm_encoder *encoder; 602 struct drm_encoder *encoder;
@@ -691,7 +693,8 @@ static int radeon_tv_mode_valid(struct drm_connector *connector,
691 return MODE_OK; 693 return MODE_OK;
692} 694}
693 695
694static enum drm_connector_status radeon_tv_detect(struct drm_connector *connector) 696static enum drm_connector_status
697radeon_tv_detect(struct drm_connector *connector, bool force)
695{ 698{
696 struct drm_encoder *encoder; 699 struct drm_encoder *encoder;
697 struct drm_encoder_helper_funcs *encoder_funcs; 700 struct drm_encoder_helper_funcs *encoder_funcs;
@@ -748,7 +751,8 @@ static int radeon_dvi_get_modes(struct drm_connector *connector)
748 * we have to check if this analog encoder is shared with anyone else (TV) 751 * we have to check if this analog encoder is shared with anyone else (TV)
749 * if its shared we have to set the other connector to disconnected. 752 * if its shared we have to set the other connector to disconnected.
750 */ 753 */
751static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connector) 754static enum drm_connector_status
755radeon_dvi_detect(struct drm_connector *connector, bool force)
752{ 756{
753 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 757 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
754 struct drm_encoder *encoder = NULL; 758 struct drm_encoder *encoder = NULL;
@@ -972,7 +976,8 @@ static int radeon_dp_get_modes(struct drm_connector *connector)
972 return ret; 976 return ret;
973} 977}
974 978
975static enum drm_connector_status radeon_dp_detect(struct drm_connector *connector) 979static enum drm_connector_status
980radeon_dp_detect(struct drm_connector *connector, bool force)
976{ 981{
977 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 982 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
978 enum drm_connector_status ret = connector_status_disconnected; 983 enum drm_connector_status ret = connector_status_disconnected;
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 6dd434ad2429..b92d2f2fcbed 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -349,6 +349,8 @@ static void radeon_print_display_setup(struct drm_device *dev)
349 DRM_INFO(" DFP4: %s\n", encoder_names[radeon_encoder->encoder_id]); 349 DRM_INFO(" DFP4: %s\n", encoder_names[radeon_encoder->encoder_id]);
350 if (devices & ATOM_DEVICE_DFP5_SUPPORT) 350 if (devices & ATOM_DEVICE_DFP5_SUPPORT)
351 DRM_INFO(" DFP5: %s\n", encoder_names[radeon_encoder->encoder_id]); 351 DRM_INFO(" DFP5: %s\n", encoder_names[radeon_encoder->encoder_id]);
352 if (devices & ATOM_DEVICE_DFP6_SUPPORT)
353 DRM_INFO(" DFP6: %s\n", encoder_names[radeon_encoder->encoder_id]);
352 if (devices & ATOM_DEVICE_TV1_SUPPORT) 354 if (devices & ATOM_DEVICE_TV1_SUPPORT)
353 DRM_INFO(" TV1: %s\n", encoder_names[radeon_encoder->encoder_id]); 355 DRM_INFO(" TV1: %s\n", encoder_names[radeon_encoder->encoder_id]);
354 if (devices & ATOM_DEVICE_CV_SUPPORT) 356 if (devices & ATOM_DEVICE_CV_SUPPORT)
@@ -841,8 +843,9 @@ static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
841{ 843{
842 struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb); 844 struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
843 845
844 if (radeon_fb->obj) 846 if (radeon_fb->obj) {
845 drm_gem_object_unreference_unlocked(radeon_fb->obj); 847 drm_gem_object_unreference_unlocked(radeon_fb->obj);
848 }
846 drm_framebuffer_cleanup(fb); 849 drm_framebuffer_cleanup(fb);
847 kfree(radeon_fb); 850 kfree(radeon_fb);
848} 851}
@@ -1140,17 +1143,18 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
1140 radeon_crtc->rmx_type = radeon_encoder->rmx_type; 1143 radeon_crtc->rmx_type = radeon_encoder->rmx_type;
1141 else 1144 else
1142 radeon_crtc->rmx_type = RMX_OFF; 1145 radeon_crtc->rmx_type = RMX_OFF;
1143 src_v = crtc->mode.vdisplay;
1144 dst_v = radeon_crtc->native_mode.vdisplay;
1145 src_h = crtc->mode.hdisplay;
1146 dst_h = radeon_crtc->native_mode.vdisplay;
1147 /* copy native mode */ 1146 /* copy native mode */
1148 memcpy(&radeon_crtc->native_mode, 1147 memcpy(&radeon_crtc->native_mode,
1149 &radeon_encoder->native_mode, 1148 &radeon_encoder->native_mode,
1150 sizeof(struct drm_display_mode)); 1149 sizeof(struct drm_display_mode));
1150 src_v = crtc->mode.vdisplay;
1151 dst_v = radeon_crtc->native_mode.vdisplay;
1152 src_h = crtc->mode.hdisplay;
1153 dst_h = radeon_crtc->native_mode.hdisplay;
1151 1154
1152 /* fix up for overscan on hdmi */ 1155 /* fix up for overscan on hdmi */
1153 if (ASIC_IS_AVIVO(rdev) && 1156 if (ASIC_IS_AVIVO(rdev) &&
1157 (!(mode->flags & DRM_MODE_FLAG_INTERLACE)) &&
1154 ((radeon_encoder->underscan_type == UNDERSCAN_ON) || 1158 ((radeon_encoder->underscan_type == UNDERSCAN_ON) ||
1155 ((radeon_encoder->underscan_type == UNDERSCAN_AUTO) && 1159 ((radeon_encoder->underscan_type == UNDERSCAN_AUTO) &&
1156 drm_detect_hdmi_monitor(radeon_connector->edid) && 1160 drm_detect_hdmi_monitor(radeon_connector->edid) &&
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index c74a8b20d941..9cdf6a35bc2c 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -94,8 +94,10 @@ static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj)
94 ret = radeon_bo_reserve(rbo, false); 94 ret = radeon_bo_reserve(rbo, false);
95 if (likely(ret == 0)) { 95 if (likely(ret == 0)) {
96 radeon_bo_kunmap(rbo); 96 radeon_bo_kunmap(rbo);
97 radeon_bo_unpin(rbo);
97 radeon_bo_unreserve(rbo); 98 radeon_bo_unreserve(rbo);
98 } 99 }
100 drm_gem_object_handle_unreference(gobj);
99 drm_gem_object_unreference_unlocked(gobj); 101 drm_gem_object_unreference_unlocked(gobj);
100} 102}
101 103
@@ -325,8 +327,6 @@ static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfb
325{ 327{
326 struct fb_info *info; 328 struct fb_info *info;
327 struct radeon_framebuffer *rfb = &rfbdev->rfb; 329 struct radeon_framebuffer *rfb = &rfbdev->rfb;
328 struct radeon_bo *rbo;
329 int r;
330 330
331 if (rfbdev->helper.fbdev) { 331 if (rfbdev->helper.fbdev) {
332 info = rfbdev->helper.fbdev; 332 info = rfbdev->helper.fbdev;
@@ -338,14 +338,8 @@ static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfb
338 } 338 }
339 339
340 if (rfb->obj) { 340 if (rfb->obj) {
341 rbo = rfb->obj->driver_private; 341 radeonfb_destroy_pinned_object(rfb->obj);
342 r = radeon_bo_reserve(rbo, false); 342 rfb->obj = NULL;
343 if (likely(r == 0)) {
344 radeon_bo_kunmap(rbo);
345 radeon_bo_unpin(rbo);
346 radeon_bo_unreserve(rbo);
347 }
348 drm_gem_object_unreference_unlocked(rfb->obj);
349 } 343 }
350 drm_fb_helper_fini(&rfbdev->helper); 344 drm_fb_helper_fini(&rfbdev->helper);
351 drm_framebuffer_cleanup(&rfb->base); 345 drm_framebuffer_cleanup(&rfb->base);
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index c578f265b24c..d1e595d91723 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -201,11 +201,11 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
201 return r; 201 return r;
202 } 202 }
203 r = drm_gem_handle_create(filp, gobj, &handle); 203 r = drm_gem_handle_create(filp, gobj, &handle);
204 /* drop reference from allocate - handle holds it now */
205 drm_gem_object_unreference_unlocked(gobj);
204 if (r) { 206 if (r) {
205 drm_gem_object_unreference_unlocked(gobj);
206 return r; 207 return r;
207 } 208 }
208 drm_gem_object_handle_unreference_unlocked(gobj);
209 args->handle = handle; 209 args->handle = handle;
210 return 0; 210 return 0;
211} 211}
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 5eee3c41d124..8fbbe1c6ebbd 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -203,6 +203,10 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
203 */ 203 */
204int radeon_driver_firstopen_kms(struct drm_device *dev) 204int radeon_driver_firstopen_kms(struct drm_device *dev)
205{ 205{
206 struct radeon_device *rdev = dev->dev_private;
207
208 if (rdev->powered_down)
209 return -EINVAL;
206 return 0; 210 return 0;
207} 211}
208 212
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index efbe975312dc..17a6602b5885 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -204,7 +204,7 @@ struct radeon_i2c_chan {
204 204
205/* mostly for macs, but really any system without connector tables */ 205/* mostly for macs, but really any system without connector tables */
206enum radeon_connector_table { 206enum radeon_connector_table {
207 CT_NONE, 207 CT_NONE = 0,
208 CT_GENERIC, 208 CT_GENERIC,
209 CT_IBOOK, 209 CT_IBOOK,
210 CT_POWERBOOK_EXTERNAL, 210 CT_POWERBOOK_EXTERNAL,
@@ -215,6 +215,7 @@ enum radeon_connector_table {
215 CT_IMAC_G5_ISIGHT, 215 CT_IMAC_G5_ISIGHT,
216 CT_EMAC, 216 CT_EMAC,
217 CT_RN50_POWER, 217 CT_RN50_POWER,
218 CT_MAC_X800,
218}; 219};
219 220
220enum radeon_dvo_chip { 221enum radeon_dvo_chip {
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 7cffb3e04232..3451a82adba7 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -351,6 +351,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
351 INIT_LIST_HEAD(&fbo->lru); 351 INIT_LIST_HEAD(&fbo->lru);
352 INIT_LIST_HEAD(&fbo->swap); 352 INIT_LIST_HEAD(&fbo->swap);
353 fbo->vm_node = NULL; 353 fbo->vm_node = NULL;
354 atomic_set(&fbo->cpu_writers, 0);
354 355
355 fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj); 356 fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
356 kref_init(&fbo->list_kref); 357 kref_init(&fbo->list_kref);
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index ca904799f018..b1e02fffd3cc 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -69,7 +69,7 @@ struct ttm_page_pool {
69 spinlock_t lock; 69 spinlock_t lock;
70 bool fill_lock; 70 bool fill_lock;
71 struct list_head list; 71 struct list_head list;
72 int gfp_flags; 72 gfp_t gfp_flags;
73 unsigned npages; 73 unsigned npages;
74 char *name; 74 char *name;
75 unsigned long nfrees; 75 unsigned long nfrees;
@@ -475,7 +475,7 @@ static void ttm_handle_caching_state_failure(struct list_head *pages,
475 * This function is reentrant if caller updates count depending on number of 475 * This function is reentrant if caller updates count depending on number of
476 * pages returned in pages array. 476 * pages returned in pages array.
477 */ 477 */
478static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags, 478static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
479 int ttm_flags, enum ttm_caching_state cstate, unsigned count) 479 int ttm_flags, enum ttm_caching_state cstate, unsigned count)
480{ 480{
481 struct page **caching_array; 481 struct page **caching_array;
@@ -666,7 +666,7 @@ int ttm_get_pages(struct list_head *pages, int flags,
666{ 666{
667 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); 667 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
668 struct page *p = NULL; 668 struct page *p = NULL;
669 int gfp_flags = GFP_USER; 669 gfp_t gfp_flags = GFP_USER;
670 int r; 670 int r;
671 671
672 /* set zero flag for page allocation if required */ 672 /* set zero flag for page allocation if required */
@@ -818,7 +818,7 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
818 return 0; 818 return 0;
819} 819}
820 820
821void ttm_page_alloc_fini() 821void ttm_page_alloc_fini(void)
822{ 822{
823 int i; 823 int i;
824 824
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 72ec2e2b6e97..a96ed6d9d010 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -148,13 +148,16 @@ static struct pci_device_id vmw_pci_id_list[] = {
148 {0, 0, 0} 148 {0, 0, 0}
149}; 149};
150 150
151static char *vmw_devname = "vmwgfx"; 151static int enable_fbdev;
152 152
153static int vmw_probe(struct pci_dev *, const struct pci_device_id *); 153static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
154static void vmw_master_init(struct vmw_master *); 154static void vmw_master_init(struct vmw_master *);
155static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, 155static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
156 void *ptr); 156 void *ptr);
157 157
158MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
159module_param_named(enable_fbdev, enable_fbdev, int, 0600);
160
158static void vmw_print_capabilities(uint32_t capabilities) 161static void vmw_print_capabilities(uint32_t capabilities)
159{ 162{
160 DRM_INFO("Capabilities:\n"); 163 DRM_INFO("Capabilities:\n");
@@ -192,8 +195,6 @@ static int vmw_request_device(struct vmw_private *dev_priv)
192{ 195{
193 int ret; 196 int ret;
194 197
195 vmw_kms_save_vga(dev_priv);
196
197 ret = vmw_fifo_init(dev_priv, &dev_priv->fifo); 198 ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
198 if (unlikely(ret != 0)) { 199 if (unlikely(ret != 0)) {
199 DRM_ERROR("Unable to initialize FIFO.\n"); 200 DRM_ERROR("Unable to initialize FIFO.\n");
@@ -206,9 +207,35 @@ static int vmw_request_device(struct vmw_private *dev_priv)
206static void vmw_release_device(struct vmw_private *dev_priv) 207static void vmw_release_device(struct vmw_private *dev_priv)
207{ 208{
208 vmw_fifo_release(dev_priv, &dev_priv->fifo); 209 vmw_fifo_release(dev_priv, &dev_priv->fifo);
209 vmw_kms_restore_vga(dev_priv);
210} 210}
211 211
212int vmw_3d_resource_inc(struct vmw_private *dev_priv)
213{
214 int ret = 0;
215
216 mutex_lock(&dev_priv->release_mutex);
217 if (unlikely(dev_priv->num_3d_resources++ == 0)) {
218 ret = vmw_request_device(dev_priv);
219 if (unlikely(ret != 0))
220 --dev_priv->num_3d_resources;
221 }
222 mutex_unlock(&dev_priv->release_mutex);
223 return ret;
224}
225
226
227void vmw_3d_resource_dec(struct vmw_private *dev_priv)
228{
229 int32_t n3d;
230
231 mutex_lock(&dev_priv->release_mutex);
232 if (unlikely(--dev_priv->num_3d_resources == 0))
233 vmw_release_device(dev_priv);
234 n3d = (int32_t) dev_priv->num_3d_resources;
235 mutex_unlock(&dev_priv->release_mutex);
236
237 BUG_ON(n3d < 0);
238}
212 239
213static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) 240static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
214{ 241{
@@ -228,6 +255,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
228 dev_priv->last_read_sequence = (uint32_t) -100; 255 dev_priv->last_read_sequence = (uint32_t) -100;
229 mutex_init(&dev_priv->hw_mutex); 256 mutex_init(&dev_priv->hw_mutex);
230 mutex_init(&dev_priv->cmdbuf_mutex); 257 mutex_init(&dev_priv->cmdbuf_mutex);
258 mutex_init(&dev_priv->release_mutex);
231 rwlock_init(&dev_priv->resource_lock); 259 rwlock_init(&dev_priv->resource_lock);
232 idr_init(&dev_priv->context_idr); 260 idr_init(&dev_priv->context_idr);
233 idr_init(&dev_priv->surface_idr); 261 idr_init(&dev_priv->surface_idr);
@@ -244,6 +272,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
244 dev_priv->vram_start = pci_resource_start(dev->pdev, 1); 272 dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
245 dev_priv->mmio_start = pci_resource_start(dev->pdev, 2); 273 dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
246 274
275 dev_priv->enable_fb = enable_fbdev;
276
247 mutex_lock(&dev_priv->hw_mutex); 277 mutex_lock(&dev_priv->hw_mutex);
248 278
249 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); 279 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
@@ -343,17 +373,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
343 373
344 dev->dev_private = dev_priv; 374 dev->dev_private = dev_priv;
345 375
346 if (!dev->devname)
347 dev->devname = vmw_devname;
348
349 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
350 ret = drm_irq_install(dev);
351 if (unlikely(ret != 0)) {
352 DRM_ERROR("Failed installing irq: %d\n", ret);
353 goto out_no_irq;
354 }
355 }
356
357 ret = pci_request_regions(dev->pdev, "vmwgfx probe"); 376 ret = pci_request_regions(dev->pdev, "vmwgfx probe");
358 dev_priv->stealth = (ret != 0); 377 dev_priv->stealth = (ret != 0);
359 if (dev_priv->stealth) { 378 if (dev_priv->stealth) {
@@ -369,26 +388,52 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
369 goto out_no_device; 388 goto out_no_device;
370 } 389 }
371 } 390 }
372 ret = vmw_request_device(dev_priv); 391 ret = vmw_kms_init(dev_priv);
373 if (unlikely(ret != 0)) 392 if (unlikely(ret != 0))
374 goto out_no_device; 393 goto out_no_kms;
375 vmw_kms_init(dev_priv);
376 vmw_overlay_init(dev_priv); 394 vmw_overlay_init(dev_priv);
377 vmw_fb_init(dev_priv); 395 if (dev_priv->enable_fb) {
396 ret = vmw_3d_resource_inc(dev_priv);
397 if (unlikely(ret != 0))
398 goto out_no_fifo;
399 vmw_kms_save_vga(dev_priv);
400 vmw_fb_init(dev_priv);
401 DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ?
402 "Detected device 3D availability.\n" :
403 "Detected no device 3D availability.\n");
404 } else {
405 DRM_INFO("Delayed 3D detection since we're not "
406 "running the device in SVGA mode yet.\n");
407 }
408
409 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
410 ret = drm_irq_install(dev);
411 if (unlikely(ret != 0)) {
412 DRM_ERROR("Failed installing irq: %d\n", ret);
413 goto out_no_irq;
414 }
415 }
378 416
379 dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier; 417 dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
380 register_pm_notifier(&dev_priv->pm_nb); 418 register_pm_notifier(&dev_priv->pm_nb);
381 419
382 DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ? "Have 3D\n" : "No 3D\n");
383
384 return 0; 420 return 0;
385 421
386out_no_device:
387 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
388 drm_irq_uninstall(dev_priv->dev);
389 if (dev->devname == vmw_devname)
390 dev->devname = NULL;
391out_no_irq: 422out_no_irq:
423 if (dev_priv->enable_fb) {
424 vmw_fb_close(dev_priv);
425 vmw_kms_restore_vga(dev_priv);
426 vmw_3d_resource_dec(dev_priv);
427 }
428out_no_fifo:
429 vmw_overlay_close(dev_priv);
430 vmw_kms_close(dev_priv);
431out_no_kms:
432 if (dev_priv->stealth)
433 pci_release_region(dev->pdev, 2);
434 else
435 pci_release_regions(dev->pdev);
436out_no_device:
392 ttm_object_device_release(&dev_priv->tdev); 437 ttm_object_device_release(&dev_priv->tdev);
393out_err4: 438out_err4:
394 iounmap(dev_priv->mmio_virt); 439 iounmap(dev_priv->mmio_virt);
@@ -415,19 +460,20 @@ static int vmw_driver_unload(struct drm_device *dev)
415 460
416 unregister_pm_notifier(&dev_priv->pm_nb); 461 unregister_pm_notifier(&dev_priv->pm_nb);
417 462
418 vmw_fb_close(dev_priv); 463 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
464 drm_irq_uninstall(dev_priv->dev);
465 if (dev_priv->enable_fb) {
466 vmw_fb_close(dev_priv);
467 vmw_kms_restore_vga(dev_priv);
468 vmw_3d_resource_dec(dev_priv);
469 }
419 vmw_kms_close(dev_priv); 470 vmw_kms_close(dev_priv);
420 vmw_overlay_close(dev_priv); 471 vmw_overlay_close(dev_priv);
421 vmw_release_device(dev_priv);
422 if (dev_priv->stealth) 472 if (dev_priv->stealth)
423 pci_release_region(dev->pdev, 2); 473 pci_release_region(dev->pdev, 2);
424 else 474 else
425 pci_release_regions(dev->pdev); 475 pci_release_regions(dev->pdev);
426 476
427 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
428 drm_irq_uninstall(dev_priv->dev);
429 if (dev->devname == vmw_devname)
430 dev->devname = NULL;
431 ttm_object_device_release(&dev_priv->tdev); 477 ttm_object_device_release(&dev_priv->tdev);
432 iounmap(dev_priv->mmio_virt); 478 iounmap(dev_priv->mmio_virt);
433 drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start, 479 drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
@@ -500,7 +546,7 @@ static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
500 struct drm_ioctl_desc *ioctl = 546 struct drm_ioctl_desc *ioctl =
501 &vmw_ioctls[nr - DRM_COMMAND_BASE]; 547 &vmw_ioctls[nr - DRM_COMMAND_BASE];
502 548
503 if (unlikely(ioctl->cmd != cmd)) { 549 if (unlikely(ioctl->cmd_drv != cmd)) {
504 DRM_ERROR("Invalid command format, ioctl %d\n", 550 DRM_ERROR("Invalid command format, ioctl %d\n",
505 nr - DRM_COMMAND_BASE); 551 nr - DRM_COMMAND_BASE);
506 return -EINVAL; 552 return -EINVAL;
@@ -589,6 +635,16 @@ static int vmw_master_set(struct drm_device *dev,
589 struct vmw_master *vmaster = vmw_master(file_priv->master); 635 struct vmw_master *vmaster = vmw_master(file_priv->master);
590 int ret = 0; 636 int ret = 0;
591 637
638 if (!dev_priv->enable_fb) {
639 ret = vmw_3d_resource_inc(dev_priv);
640 if (unlikely(ret != 0))
641 return ret;
642 vmw_kms_save_vga(dev_priv);
643 mutex_lock(&dev_priv->hw_mutex);
644 vmw_write(dev_priv, SVGA_REG_TRACES, 0);
645 mutex_unlock(&dev_priv->hw_mutex);
646 }
647
592 if (active) { 648 if (active) {
593 BUG_ON(active != &dev_priv->fbdev_master); 649 BUG_ON(active != &dev_priv->fbdev_master);
594 ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile); 650 ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
@@ -617,7 +673,13 @@ static int vmw_master_set(struct drm_device *dev,
617 return 0; 673 return 0;
618 674
619out_no_active_lock: 675out_no_active_lock:
620 vmw_release_device(dev_priv); 676 if (!dev_priv->enable_fb) {
677 mutex_lock(&dev_priv->hw_mutex);
678 vmw_write(dev_priv, SVGA_REG_TRACES, 1);
679 mutex_unlock(&dev_priv->hw_mutex);
680 vmw_kms_restore_vga(dev_priv);
681 vmw_3d_resource_dec(dev_priv);
682 }
621 return ret; 683 return ret;
622} 684}
623 685
@@ -645,11 +707,23 @@ static void vmw_master_drop(struct drm_device *dev,
645 707
646 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); 708 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
647 709
710 if (!dev_priv->enable_fb) {
711 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
712 if (unlikely(ret != 0))
713 DRM_ERROR("Unable to clean VRAM on master drop.\n");
714 mutex_lock(&dev_priv->hw_mutex);
715 vmw_write(dev_priv, SVGA_REG_TRACES, 1);
716 mutex_unlock(&dev_priv->hw_mutex);
717 vmw_kms_restore_vga(dev_priv);
718 vmw_3d_resource_dec(dev_priv);
719 }
720
648 dev_priv->active_master = &dev_priv->fbdev_master; 721 dev_priv->active_master = &dev_priv->fbdev_master;
649 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); 722 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
650 ttm_vt_unlock(&dev_priv->fbdev_master.lock); 723 ttm_vt_unlock(&dev_priv->fbdev_master.lock);
651 724
652 vmw_fb_on(dev_priv); 725 if (dev_priv->enable_fb)
726 vmw_fb_on(dev_priv);
653} 727}
654 728
655 729
@@ -722,6 +796,7 @@ static struct drm_driver driver = {
722 .irq_postinstall = vmw_irq_postinstall, 796 .irq_postinstall = vmw_irq_postinstall,
723 .irq_uninstall = vmw_irq_uninstall, 797 .irq_uninstall = vmw_irq_uninstall,
724 .irq_handler = vmw_irq_handler, 798 .irq_handler = vmw_irq_handler,
799 .get_vblank_counter = vmw_get_vblank_counter,
725 .reclaim_buffers_locked = NULL, 800 .reclaim_buffers_locked = NULL,
726 .get_map_ofs = drm_core_get_map_ofs, 801 .get_map_ofs = drm_core_get_map_ofs,
727 .get_reg_ofs = drm_core_get_reg_ofs, 802 .get_reg_ofs = drm_core_get_reg_ofs,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 429f917b60bf..58de6393f611 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -277,6 +277,7 @@ struct vmw_private {
277 277
278 bool stealth; 278 bool stealth;
279 bool is_opened; 279 bool is_opened;
280 bool enable_fb;
280 281
281 /** 282 /**
282 * Master management. 283 * Master management.
@@ -285,6 +286,9 @@ struct vmw_private {
285 struct vmw_master *active_master; 286 struct vmw_master *active_master;
286 struct vmw_master fbdev_master; 287 struct vmw_master fbdev_master;
287 struct notifier_block pm_nb; 288 struct notifier_block pm_nb;
289
290 struct mutex release_mutex;
291 uint32_t num_3d_resources;
288}; 292};
289 293
290static inline struct vmw_private *vmw_priv(struct drm_device *dev) 294static inline struct vmw_private *vmw_priv(struct drm_device *dev)
@@ -319,6 +323,9 @@ static inline uint32_t vmw_read(struct vmw_private *dev_priv,
319 return val; 323 return val;
320} 324}
321 325
326int vmw_3d_resource_inc(struct vmw_private *dev_priv);
327void vmw_3d_resource_dec(struct vmw_private *dev_priv);
328
322/** 329/**
323 * GMR utilities - vmwgfx_gmr.c 330 * GMR utilities - vmwgfx_gmr.c
324 */ 331 */
@@ -511,6 +518,7 @@ void vmw_kms_write_svga(struct vmw_private *vmw_priv,
511 unsigned bbp, unsigned depth); 518 unsigned bbp, unsigned depth);
512int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, 519int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
513 struct drm_file *file_priv); 520 struct drm_file *file_priv);
521u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc);
514 522
515/** 523/**
516 * Overlay control - vmwgfx_overlay.c 524 * Overlay control - vmwgfx_overlay.c
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 870967a97c15..409e172f4abf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -615,6 +615,11 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
615 if (unlikely(ret != 0)) 615 if (unlikely(ret != 0))
616 goto err_unlock; 616 goto err_unlock;
617 617
618 if (bo->mem.mem_type == TTM_PL_VRAM &&
619 bo->mem.mm_node->start < bo->num_pages)
620 (void) ttm_bo_validate(bo, &vmw_sys_placement, false,
621 false, false);
622
618 ret = ttm_bo_validate(bo, &ne_placement, false, false, false); 623 ret = ttm_bo_validate(bo, &ne_placement, false, false, false);
619 624
620 /* Could probably bug on */ 625 /* Could probably bug on */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index e6a1eb7ea954..0fe31766e4cf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -106,6 +106,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
106 mutex_lock(&dev_priv->hw_mutex); 106 mutex_lock(&dev_priv->hw_mutex);
107 dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE); 107 dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
108 dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE); 108 dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
109 dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
109 vmw_write(dev_priv, SVGA_REG_ENABLE, 1); 110 vmw_write(dev_priv, SVGA_REG_ENABLE, 1);
110 111
111 min = 4; 112 min = 4;
@@ -175,6 +176,8 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
175 dev_priv->config_done_state); 176 dev_priv->config_done_state);
176 vmw_write(dev_priv, SVGA_REG_ENABLE, 177 vmw_write(dev_priv, SVGA_REG_ENABLE,
177 dev_priv->enable_state); 178 dev_priv->enable_state);
179 vmw_write(dev_priv, SVGA_REG_TRACES,
180 dev_priv->traces_state);
178 181
179 mutex_unlock(&dev_priv->hw_mutex); 182 mutex_unlock(&dev_priv->hw_mutex);
180 vmw_fence_queue_takedown(&fifo->fence_queue); 183 vmw_fence_queue_takedown(&fifo->fence_queue);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 64d7f47df868..e882ba099f0c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -898,7 +898,19 @@ int vmw_kms_save_vga(struct vmw_private *vmw_priv)
898 save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH); 898 save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH);
899 save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT); 899 save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT);
900 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); 900 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
901 if (i == 0 && vmw_priv->num_displays == 1 &&
902 save->width == 0 && save->height == 0) {
903
904 /*
905 * It should be fairly safe to assume that these
906 * values are uninitialized.
907 */
908
909 save->width = vmw_priv->vga_width - save->pos_x;
910 save->height = vmw_priv->vga_height - save->pos_y;
911 }
901 } 912 }
913
902 return 0; 914 return 0;
903} 915}
904 916
@@ -984,3 +996,8 @@ out_unlock:
984 ttm_read_unlock(&vmaster->lock); 996 ttm_read_unlock(&vmaster->lock);
985 return ret; 997 return ret;
986} 998}
999
1000u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc)
1001{
1002 return 0;
1003}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 2ff5cf78235f..11cb39e3accb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -27,6 +27,8 @@
27 27
28#include "vmwgfx_kms.h" 28#include "vmwgfx_kms.h"
29 29
30#define VMWGFX_LDU_NUM_DU 8
31
30#define vmw_crtc_to_ldu(x) \ 32#define vmw_crtc_to_ldu(x) \
31 container_of(x, struct vmw_legacy_display_unit, base.crtc) 33 container_of(x, struct vmw_legacy_display_unit, base.crtc)
32#define vmw_encoder_to_ldu(x) \ 34#define vmw_encoder_to_ldu(x) \
@@ -335,7 +337,8 @@ static void vmw_ldu_connector_restore(struct drm_connector *connector)
335} 337}
336 338
337static enum drm_connector_status 339static enum drm_connector_status
338 vmw_ldu_connector_detect(struct drm_connector *connector) 340 vmw_ldu_connector_detect(struct drm_connector *connector,
341 bool force)
339{ 342{
340 if (vmw_connector_to_ldu(connector)->pref_active) 343 if (vmw_connector_to_ldu(connector)->pref_active)
341 return connector_status_connected; 344 return connector_status_connected;
@@ -516,7 +519,7 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
516 519
517 drm_connector_init(dev, connector, &vmw_legacy_connector_funcs, 520 drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
518 DRM_MODE_CONNECTOR_LVDS); 521 DRM_MODE_CONNECTOR_LVDS);
519 connector->status = vmw_ldu_connector_detect(connector); 522 connector->status = vmw_ldu_connector_detect(connector, true);
520 523
521 drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs, 524 drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs,
522 DRM_MODE_ENCODER_LVDS); 525 DRM_MODE_ENCODER_LVDS);
@@ -535,6 +538,10 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
535 538
536int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv) 539int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
537{ 540{
541 struct drm_device *dev = dev_priv->dev;
542 int i;
543 int ret;
544
538 if (dev_priv->ldu_priv) { 545 if (dev_priv->ldu_priv) {
539 DRM_INFO("ldu system already on\n"); 546 DRM_INFO("ldu system already on\n");
540 return -EINVAL; 547 return -EINVAL;
@@ -552,23 +559,24 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
552 559
553 drm_mode_create_dirty_info_property(dev_priv->dev); 560 drm_mode_create_dirty_info_property(dev_priv->dev);
554 561
555 vmw_ldu_init(dev_priv, 0);
556 /* for old hardware without multimon only enable one display */
557 if (dev_priv->capabilities & SVGA_CAP_MULTIMON) { 562 if (dev_priv->capabilities & SVGA_CAP_MULTIMON) {
558 vmw_ldu_init(dev_priv, 1); 563 for (i = 0; i < VMWGFX_LDU_NUM_DU; ++i)
559 vmw_ldu_init(dev_priv, 2); 564 vmw_ldu_init(dev_priv, i);
560 vmw_ldu_init(dev_priv, 3); 565 ret = drm_vblank_init(dev, VMWGFX_LDU_NUM_DU);
561 vmw_ldu_init(dev_priv, 4); 566 } else {
562 vmw_ldu_init(dev_priv, 5); 567 /* for old hardware without multimon only enable one display */
563 vmw_ldu_init(dev_priv, 6); 568 vmw_ldu_init(dev_priv, 0);
564 vmw_ldu_init(dev_priv, 7); 569 ret = drm_vblank_init(dev, 1);
565 } 570 }
566 571
567 return 0; 572 return ret;
568} 573}
569 574
570int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv) 575int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv)
571{ 576{
577 struct drm_device *dev = dev_priv->dev;
578
579 drm_vblank_cleanup(dev);
572 if (!dev_priv->ldu_priv) 580 if (!dev_priv->ldu_priv)
573 return -ENOSYS; 581 return -ENOSYS;
574 582
@@ -610,7 +618,7 @@ int vmw_kms_ldu_update_layout(struct vmw_private *dev_priv, unsigned num,
610 ldu->pref_height = 600; 618 ldu->pref_height = 600;
611 ldu->pref_active = false; 619 ldu->pref_active = false;
612 } 620 }
613 con->status = vmw_ldu_connector_detect(con); 621 con->status = vmw_ldu_connector_detect(con, true);
614 } 622 }
615 623
616 mutex_unlock(&dev->mode_config.mutex); 624 mutex_unlock(&dev->mode_config.mutex);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 5f2d5df01e5c..c8c40e9979db 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -211,6 +211,7 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
211 cmd->body.cid = cpu_to_le32(res->id); 211 cmd->body.cid = cpu_to_le32(res->id);
212 212
213 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 213 vmw_fifo_commit(dev_priv, sizeof(*cmd));
214 vmw_3d_resource_dec(dev_priv);
214} 215}
215 216
216static int vmw_context_init(struct vmw_private *dev_priv, 217static int vmw_context_init(struct vmw_private *dev_priv,
@@ -247,6 +248,7 @@ static int vmw_context_init(struct vmw_private *dev_priv,
247 cmd->body.cid = cpu_to_le32(res->id); 248 cmd->body.cid = cpu_to_le32(res->id);
248 249
249 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 250 vmw_fifo_commit(dev_priv, sizeof(*cmd));
251 (void) vmw_3d_resource_inc(dev_priv);
250 vmw_resource_activate(res, vmw_hw_context_destroy); 252 vmw_resource_activate(res, vmw_hw_context_destroy);
251 return 0; 253 return 0;
252} 254}
@@ -406,6 +408,7 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res)
406 cmd->body.sid = cpu_to_le32(res->id); 408 cmd->body.sid = cpu_to_le32(res->id);
407 409
408 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 410 vmw_fifo_commit(dev_priv, sizeof(*cmd));
411 vmw_3d_resource_dec(dev_priv);
409} 412}
410 413
411void vmw_surface_res_free(struct vmw_resource *res) 414void vmw_surface_res_free(struct vmw_resource *res)
@@ -473,6 +476,7 @@ int vmw_surface_init(struct vmw_private *dev_priv,
473 } 476 }
474 477
475 vmw_fifo_commit(dev_priv, submit_size); 478 vmw_fifo_commit(dev_priv, submit_size);
479 (void) vmw_3d_resource_inc(dev_priv);
476 vmw_resource_activate(res, vmw_hw_surface_destroy); 480 vmw_resource_activate(res, vmw_hw_surface_destroy);
477 return 0; 481 return 0;
478} 482}
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index b87569e96b16..f366f968155a 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -598,7 +598,7 @@ static inline void vga_update_device_decodes(struct vga_device *vgadev,
598 pr_debug("vgaarb: decoding count now is: %d\n", vga_decode_count); 598 pr_debug("vgaarb: decoding count now is: %d\n", vga_decode_count);
599} 599}
600 600
601void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace) 601static void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace)
602{ 602{
603 struct vga_device *vgadev; 603 struct vga_device *vgadev;
604 unsigned long flags; 604 unsigned long flags;
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 0c52899be964..3f7292486024 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1285,8 +1285,11 @@ static const struct hid_device_id hid_blacklist[] = {
1285 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) }, 1285 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
1286 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) }, 1286 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
1287 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) }, 1287 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
1288 { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT) },
1289 { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUSTEK_MULTITOUCH_YFO) },
1288 { HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) }, 1290 { HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) },
1289 { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) }, 1291 { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) },
1292 { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) },
1290 { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH) }, 1293 { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH) },
1291 { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6) }, 1294 { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6) },
1292 { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) }, 1295 { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) },
@@ -1578,7 +1581,6 @@ static const struct hid_device_id hid_ignore_list[] = {
1578 { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_24) }, 1581 { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_24) },
1579 { HID_USB_DEVICE(USB_VENDOR_ID_AIRCABLE, USB_DEVICE_ID_AIRCABLE1) }, 1582 { HID_USB_DEVICE(USB_VENDOR_ID_AIRCABLE, USB_DEVICE_ID_AIRCABLE1) },
1580 { HID_USB_DEVICE(USB_VENDOR_ID_ALCOR, USB_DEVICE_ID_ALCOR_USBRS232) }, 1583 { HID_USB_DEVICE(USB_VENDOR_ID_ALCOR, USB_DEVICE_ID_ALCOR_USBRS232) },
1581 { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT)},
1582 { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM)}, 1584 { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM)},
1583 { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM2)}, 1585 { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM2)},
1584 { HID_USB_DEVICE(USB_VENDOR_ID_AVERMEDIA, USB_DEVICE_ID_AVER_FM_MR800) }, 1586 { HID_USB_DEVICE(USB_VENDOR_ID_AVERMEDIA, USB_DEVICE_ID_AVER_FM_MR800) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 85c6d13c9ffa..765a4f53eb5c 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -105,6 +105,7 @@
105 105
106#define USB_VENDOR_ID_ASUS 0x0486 106#define USB_VENDOR_ID_ASUS 0x0486
107#define USB_DEVICE_ID_ASUS_T91MT 0x0185 107#define USB_DEVICE_ID_ASUS_T91MT 0x0185
108#define USB_DEVICE_ID_ASUSTEK_MULTITOUCH_YFO 0x0186
108 109
109#define USB_VENDOR_ID_ASUSTEK 0x0b05 110#define USB_VENDOR_ID_ASUSTEK 0x0b05
110#define USB_DEVICE_ID_ASUSTEK_LCM 0x1726 111#define USB_DEVICE_ID_ASUSTEK_LCM 0x1726
@@ -128,6 +129,7 @@
128 129
129#define USB_VENDOR_ID_BTC 0x046e 130#define USB_VENDOR_ID_BTC 0x046e
130#define USB_DEVICE_ID_BTC_EMPREX_REMOTE 0x5578 131#define USB_DEVICE_ID_BTC_EMPREX_REMOTE 0x5578
132#define USB_DEVICE_ID_BTC_EMPREX_REMOTE_2 0x5577
131 133
132#define USB_VENDOR_ID_CANDO 0x2087 134#define USB_VENDOR_ID_CANDO 0x2087
133#define USB_DEVICE_ID_CANDO_MULTI_TOUCH 0x0a01 135#define USB_DEVICE_ID_CANDO_MULTI_TOUCH 0x0a01
@@ -149,6 +151,7 @@
149 151
150#define USB_VENDOR_ID_CHICONY 0x04f2 152#define USB_VENDOR_ID_CHICONY 0x04f2
151#define USB_DEVICE_ID_CHICONY_TACTICAL_PAD 0x0418 153#define USB_DEVICE_ID_CHICONY_TACTICAL_PAD 0x0418
154#define USB_DEVICE_ID_CHICONY_MULTI_TOUCH 0xb19d
152 155
153#define USB_VENDOR_ID_CIDC 0x1677 156#define USB_VENDOR_ID_CIDC 0x1677
154 157
@@ -507,6 +510,7 @@
507#define USB_VENDOR_ID_UCLOGIC 0x5543 510#define USB_VENDOR_ID_UCLOGIC 0x5543
508#define USB_DEVICE_ID_UCLOGIC_TABLET_PF1209 0x0042 511#define USB_DEVICE_ID_UCLOGIC_TABLET_PF1209 0x0042
509#define USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U 0x0003 512#define USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U 0x0003
513#define USB_DEVICE_ID_UCLOGIC_TABLET_KNA5 0x6001
510 514
511#define USB_VENDOR_ID_VERNIER 0x08f7 515#define USB_VENDOR_ID_VERNIER 0x08f7
512#define USB_DEVICE_ID_VERNIER_LABPRO 0x0001 516#define USB_DEVICE_ID_VERNIER_LABPRO 0x0001
diff --git a/drivers/hid/hid-mosart.c b/drivers/hid/hid-mosart.c
index e91437c18906..ac5421d568f1 100644
--- a/drivers/hid/hid-mosart.c
+++ b/drivers/hid/hid-mosart.c
@@ -239,6 +239,7 @@ static void mosart_remove(struct hid_device *hdev)
239 239
240static const struct hid_device_id mosart_devices[] = { 240static const struct hid_device_id mosart_devices[] = {
241 { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT) }, 241 { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT) },
242 { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUSTEK_MULTITOUCH_YFO) },
242 { } 243 { }
243}; 244};
244MODULE_DEVICE_TABLE(hid, mosart_devices); 245MODULE_DEVICE_TABLE(hid, mosart_devices);
diff --git a/drivers/hid/hid-topseed.c b/drivers/hid/hid-topseed.c
index 5771f851f856..956ed9ac19d4 100644
--- a/drivers/hid/hid-topseed.c
+++ b/drivers/hid/hid-topseed.c
@@ -64,6 +64,7 @@ static int ts_input_mapping(struct hid_device *hdev, struct hid_input *hi,
64static const struct hid_device_id ts_devices[] = { 64static const struct hid_device_id ts_devices[] = {
65 { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) }, 65 { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) },
66 { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) }, 66 { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) },
67 { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) },
67 { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) }, 68 { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) },
68 { } 69 { }
69}; 70};
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index b729c0286679..599041a7f670 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -828,6 +828,7 @@ static int usbhid_output_raw_report(struct hid_device *hid, __u8 *buf, size_t co
828 } 828 }
829 } else { 829 } else {
830 int skipped_report_id = 0; 830 int skipped_report_id = 0;
831 int report_id = buf[0];
831 if (buf[0] == 0x0) { 832 if (buf[0] == 0x0) {
832 /* Don't send the Report ID */ 833 /* Don't send the Report ID */
833 buf++; 834 buf++;
@@ -837,7 +838,7 @@ static int usbhid_output_raw_report(struct hid_device *hid, __u8 *buf, size_t co
837 ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 838 ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
838 HID_REQ_SET_REPORT, 839 HID_REQ_SET_REPORT,
839 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, 840 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
840 ((report_type + 1) << 8) | *buf, 841 ((report_type + 1) << 8) | report_id,
841 interface->desc.bInterfaceNumber, buf, count, 842 interface->desc.bInterfaceNumber, buf, count,
842 USB_CTRL_SET_TIMEOUT); 843 USB_CTRL_SET_TIMEOUT);
843 /* count also the report id, if this was a numbered report. */ 844 /* count also the report id, if this was a numbered report. */
@@ -1445,6 +1446,11 @@ static const struct hid_device_id hid_usb_table[] = {
1445 { } 1446 { }
1446}; 1447};
1447 1448
1449struct usb_interface *usbhid_find_interface(int minor)
1450{
1451 return usb_find_interface(&hid_driver, minor);
1452}
1453
1448static struct hid_driver hid_usb_driver = { 1454static struct hid_driver hid_usb_driver = {
1449 .name = "generic-usb", 1455 .name = "generic-usb",
1450 .id_table = hid_usb_table, 1456 .id_table = hid_usb_table,
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 2643d3147621..70da3181c8a0 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -33,6 +33,7 @@ static const struct hid_blacklist {
33 { USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_PREDATOR, HID_QUIRK_BADPAD }, 33 { USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_PREDATOR, HID_QUIRK_BADPAD },
34 { USB_VENDOR_ID_ALPS, USB_DEVICE_ID_IBM_GAMEPAD, HID_QUIRK_BADPAD }, 34 { USB_VENDOR_ID_ALPS, USB_DEVICE_ID_IBM_GAMEPAD, HID_QUIRK_BADPAD },
35 { USB_VENDOR_ID_CHIC, USB_DEVICE_ID_CHIC_GAMEPAD, HID_QUIRK_BADPAD }, 35 { USB_VENDOR_ID_CHIC, USB_DEVICE_ID_CHIC_GAMEPAD, HID_QUIRK_BADPAD },
36 { USB_VENDOR_ID_DWAV, USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER, HID_QUIRK_MULTI_INPUT | HID_QUIRK_NOGET },
36 { USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH, HID_QUIRK_MULTI_INPUT }, 37 { USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH, HID_QUIRK_MULTI_INPUT },
37 { USB_VENDOR_ID_MOJO, USB_DEVICE_ID_RETRO_ADAPTER, HID_QUIRK_MULTI_INPUT }, 38 { USB_VENDOR_ID_MOJO, USB_DEVICE_ID_RETRO_ADAPTER, HID_QUIRK_MULTI_INPUT },
38 { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_DRIVING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT }, 39 { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_DRIVING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
@@ -69,6 +70,7 @@ static const struct hid_blacklist {
69 { USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD, HID_QUIRK_NOGET }, 70 { USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD, HID_QUIRK_NOGET },
70 { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209, HID_QUIRK_MULTI_INPUT }, 71 { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209, HID_QUIRK_MULTI_INPUT },
71 { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U, HID_QUIRK_MULTI_INPUT }, 72 { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U, HID_QUIRK_MULTI_INPUT },
73 { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5, HID_QUIRK_MULTI_INPUT },
72 { USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS }, 74 { USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS },
73 { USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_QUAD_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT }, 75 { USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_QUAD_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
74 76
@@ -77,6 +79,8 @@ static const struct hid_blacklist {
77 79
78 { USB_VENDOR_ID_PI_ENGINEERING, USB_DEVICE_ID_PI_ENGINEERING_VEC_USB_FOOTPEDAL, HID_QUIRK_HIDINPUT_FORCE }, 80 { USB_VENDOR_ID_PI_ENGINEERING, USB_DEVICE_ID_PI_ENGINEERING_VEC_USB_FOOTPEDAL, HID_QUIRK_HIDINPUT_FORCE },
79 81
82 { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_MULTI_TOUCH, HID_QUIRK_MULTI_INPUT },
83
80 { 0, 0 } 84 { 0, 0 }
81}; 85};
82 86
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 0a29c51114aa..681e620eb95b 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -270,7 +270,7 @@ static int hiddev_open(struct inode *inode, struct file *file)
270 struct hiddev *hiddev; 270 struct hiddev *hiddev;
271 int res; 271 int res;
272 272
273 intf = usb_find_interface(&hiddev_driver, iminor(inode)); 273 intf = usbhid_find_interface(iminor(inode));
274 if (!intf) 274 if (!intf)
275 return -ENODEV; 275 return -ENODEV;
276 hid = usb_get_intfdata(intf); 276 hid = usb_get_intfdata(intf);
diff --git a/drivers/hid/usbhid/usbhid.h b/drivers/hid/usbhid/usbhid.h
index 693fd3e720df..89d2e847dcc6 100644
--- a/drivers/hid/usbhid/usbhid.h
+++ b/drivers/hid/usbhid/usbhid.h
@@ -42,6 +42,7 @@ void usbhid_submit_report
42(struct hid_device *hid, struct hid_report *report, unsigned char dir); 42(struct hid_device *hid, struct hid_report *report, unsigned char dir);
43int usbhid_get_power(struct hid_device *hid); 43int usbhid_get_power(struct hid_device *hid);
44void usbhid_put_power(struct hid_device *hid); 44void usbhid_put_power(struct hid_device *hid);
45struct usb_interface *usbhid_find_interface(int minor);
45 46
46/* iofl flags */ 47/* iofl flags */
47#define HID_CTRL_RUNNING 1 48#define HID_CTRL_RUNNING 1
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 4d4d09bdec0a..97499d00615a 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -409,7 +409,7 @@ config SENSORS_CORETEMP
409 409
410config SENSORS_PKGTEMP 410config SENSORS_PKGTEMP
411 tristate "Intel processor package temperature sensor" 411 tristate "Intel processor package temperature sensor"
412 depends on X86 && PCI && EXPERIMENTAL 412 depends on X86 && EXPERIMENTAL
413 help 413 help
414 If you say yes here you get support for the package level temperature 414 If you say yes here you get support for the package level temperature
415 sensor inside your CPU. Check documentation/driver for details. 415 sensor inside your CPU. Check documentation/driver for details.
diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c
index 15c1a9616af3..0683e6be662c 100644
--- a/drivers/hwmon/adm1031.c
+++ b/drivers/hwmon/adm1031.c
@@ -79,7 +79,7 @@ struct adm1031_data {
79 int chip_type; 79 int chip_type;
80 char valid; /* !=0 if following fields are valid */ 80 char valid; /* !=0 if following fields are valid */
81 unsigned long last_updated; /* In jiffies */ 81 unsigned long last_updated; /* In jiffies */
82 unsigned int update_rate; /* In milliseconds */ 82 unsigned int update_interval; /* In milliseconds */
83 /* The chan_select_table contains the possible configurations for 83 /* The chan_select_table contains the possible configurations for
84 * auto fan control. 84 * auto fan control.
85 */ 85 */
@@ -743,23 +743,23 @@ static SENSOR_DEVICE_ATTR(temp3_crit_alarm, S_IRUGO, show_alarm, NULL, 12);
743static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 13); 743static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 13);
744static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 14); 744static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 14);
745 745
746/* Update Rate */ 746/* Update Interval */
747static const unsigned int update_rates[] = { 747static const unsigned int update_intervals[] = {
748 16000, 8000, 4000, 2000, 1000, 500, 250, 125, 748 16000, 8000, 4000, 2000, 1000, 500, 250, 125,
749}; 749};
750 750
751static ssize_t show_update_rate(struct device *dev, 751static ssize_t show_update_interval(struct device *dev,
752 struct device_attribute *attr, char *buf) 752 struct device_attribute *attr, char *buf)
753{ 753{
754 struct i2c_client *client = to_i2c_client(dev); 754 struct i2c_client *client = to_i2c_client(dev);
755 struct adm1031_data *data = i2c_get_clientdata(client); 755 struct adm1031_data *data = i2c_get_clientdata(client);
756 756
757 return sprintf(buf, "%u\n", data->update_rate); 757 return sprintf(buf, "%u\n", data->update_interval);
758} 758}
759 759
760static ssize_t set_update_rate(struct device *dev, 760static ssize_t set_update_interval(struct device *dev,
761 struct device_attribute *attr, 761 struct device_attribute *attr,
762 const char *buf, size_t count) 762 const char *buf, size_t count)
763{ 763{
764 struct i2c_client *client = to_i2c_client(dev); 764 struct i2c_client *client = to_i2c_client(dev);
765 struct adm1031_data *data = i2c_get_clientdata(client); 765 struct adm1031_data *data = i2c_get_clientdata(client);
@@ -771,12 +771,15 @@ static ssize_t set_update_rate(struct device *dev,
771 if (err) 771 if (err)
772 return err; 772 return err;
773 773
774 /* find the nearest update rate from the table */ 774 /*
775 for (i = 0; i < ARRAY_SIZE(update_rates) - 1; i++) { 775 * Find the nearest update interval from the table.
776 if (val >= update_rates[i]) 776 * Use it to determine the matching update rate.
777 */
778 for (i = 0; i < ARRAY_SIZE(update_intervals) - 1; i++) {
779 if (val >= update_intervals[i])
777 break; 780 break;
778 } 781 }
779 /* if not found, we point to the last entry (lowest update rate) */ 782 /* if not found, we point to the last entry (lowest update interval) */
780 783
781 /* set the new update rate while preserving other settings */ 784 /* set the new update rate while preserving other settings */
782 reg = adm1031_read_value(client, ADM1031_REG_FAN_FILTER); 785 reg = adm1031_read_value(client, ADM1031_REG_FAN_FILTER);
@@ -785,14 +788,14 @@ static ssize_t set_update_rate(struct device *dev,
785 adm1031_write_value(client, ADM1031_REG_FAN_FILTER, reg); 788 adm1031_write_value(client, ADM1031_REG_FAN_FILTER, reg);
786 789
787 mutex_lock(&data->update_lock); 790 mutex_lock(&data->update_lock);
788 data->update_rate = update_rates[i]; 791 data->update_interval = update_intervals[i];
789 mutex_unlock(&data->update_lock); 792 mutex_unlock(&data->update_lock);
790 793
791 return count; 794 return count;
792} 795}
793 796
794static DEVICE_ATTR(update_rate, S_IRUGO | S_IWUSR, show_update_rate, 797static DEVICE_ATTR(update_interval, S_IRUGO | S_IWUSR, show_update_interval,
795 set_update_rate); 798 set_update_interval);
796 799
797static struct attribute *adm1031_attributes[] = { 800static struct attribute *adm1031_attributes[] = {
798 &sensor_dev_attr_fan1_input.dev_attr.attr, 801 &sensor_dev_attr_fan1_input.dev_attr.attr,
@@ -830,7 +833,7 @@ static struct attribute *adm1031_attributes[] = {
830 833
831 &sensor_dev_attr_auto_fan1_min_pwm.dev_attr.attr, 834 &sensor_dev_attr_auto_fan1_min_pwm.dev_attr.attr,
832 835
833 &dev_attr_update_rate.attr, 836 &dev_attr_update_interval.attr,
834 &dev_attr_alarms.attr, 837 &dev_attr_alarms.attr,
835 838
836 NULL 839 NULL
@@ -981,7 +984,8 @@ static void adm1031_init_client(struct i2c_client *client)
981 mask = ADM1031_UPDATE_RATE_MASK; 984 mask = ADM1031_UPDATE_RATE_MASK;
982 read_val = adm1031_read_value(client, ADM1031_REG_FAN_FILTER); 985 read_val = adm1031_read_value(client, ADM1031_REG_FAN_FILTER);
983 i = (read_val & mask) >> ADM1031_UPDATE_RATE_SHIFT; 986 i = (read_val & mask) >> ADM1031_UPDATE_RATE_SHIFT;
984 data->update_rate = update_rates[i]; 987 /* Save it as update interval */
988 data->update_interval = update_intervals[i];
985} 989}
986 990
987static struct adm1031_data *adm1031_update_device(struct device *dev) 991static struct adm1031_data *adm1031_update_device(struct device *dev)
@@ -993,7 +997,8 @@ static struct adm1031_data *adm1031_update_device(struct device *dev)
993 997
994 mutex_lock(&data->update_lock); 998 mutex_lock(&data->update_lock);
995 999
996 next_update = data->last_updated + msecs_to_jiffies(data->update_rate); 1000 next_update = data->last_updated
1001 + msecs_to_jiffies(data->update_interval);
997 if (time_after(jiffies, next_update) || !data->valid) { 1002 if (time_after(jiffies, next_update) || !data->valid) {
998 1003
999 dev_dbg(&client->dev, "Starting adm1031 update\n"); 1004 dev_dbg(&client->dev, "Starting adm1031 update\n");
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index de8111114f46..a23b17a78ace 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -36,6 +36,7 @@
36#include <linux/pci.h> 36#include <linux/pci.h>
37#include <asm/msr.h> 37#include <asm/msr.h>
38#include <asm/processor.h> 38#include <asm/processor.h>
39#include <asm/smp.h>
39 40
40#define DRVNAME "coretemp" 41#define DRVNAME "coretemp"
41 42
@@ -423,9 +424,18 @@ static int __cpuinit coretemp_device_add(unsigned int cpu)
423 int err; 424 int err;
424 struct platform_device *pdev; 425 struct platform_device *pdev;
425 struct pdev_entry *pdev_entry; 426 struct pdev_entry *pdev_entry;
426#ifdef CONFIG_SMP
427 struct cpuinfo_x86 *c = &cpu_data(cpu); 427 struct cpuinfo_x86 *c = &cpu_data(cpu);
428#endif 428
429 /*
430 * CPUID.06H.EAX[0] indicates whether the CPU has thermal
431 * sensors. We check this bit only, all the early CPUs
432 * without thermal sensors will be filtered out.
433 */
434 if (!cpu_has(c, X86_FEATURE_DTS)) {
435 printk(KERN_INFO DRVNAME ": CPU (model=0x%x)"
436 " has no thermal sensor.\n", c->x86_model);
437 return 0;
438 }
429 439
430 mutex_lock(&pdev_list_mutex); 440 mutex_lock(&pdev_list_mutex);
431 441
@@ -482,14 +492,22 @@ exit:
482 492
483static void coretemp_device_remove(unsigned int cpu) 493static void coretemp_device_remove(unsigned int cpu)
484{ 494{
485 struct pdev_entry *p, *n; 495 struct pdev_entry *p;
496 unsigned int i;
497
486 mutex_lock(&pdev_list_mutex); 498 mutex_lock(&pdev_list_mutex);
487 list_for_each_entry_safe(p, n, &pdev_list, list) { 499 list_for_each_entry(p, &pdev_list, list) {
488 if (p->cpu == cpu) { 500 if (p->cpu != cpu)
489 platform_device_unregister(p->pdev); 501 continue;
490 list_del(&p->list); 502
491 kfree(p); 503 platform_device_unregister(p->pdev);
492 } 504 list_del(&p->list);
505 mutex_unlock(&pdev_list_mutex);
506 kfree(p);
507 for_each_cpu(i, cpu_sibling_mask(cpu))
508 if (i != cpu && !coretemp_device_add(i))
509 break;
510 return;
493 } 511 }
494 mutex_unlock(&pdev_list_mutex); 512 mutex_unlock(&pdev_list_mutex);
495} 513}
@@ -527,30 +545,21 @@ static int __init coretemp_init(void)
527 if (err) 545 if (err)
528 goto exit; 546 goto exit;
529 547
530 for_each_online_cpu(i) { 548 for_each_online_cpu(i)
531 struct cpuinfo_x86 *c = &cpu_data(i); 549 coretemp_device_add(i);
532 /* 550
533 * CPUID.06H.EAX[0] indicates whether the CPU has thermal 551#ifndef CONFIG_HOTPLUG_CPU
534 * sensors. We check this bit only, all the early CPUs
535 * without thermal sensors will be filtered out.
536 */
537 if (c->cpuid_level >= 6 && (cpuid_eax(0x06) & 0x01))
538 coretemp_device_add(i);
539 else {
540 printk(KERN_INFO DRVNAME ": CPU (model=0x%x)"
541 " has no thermal sensor.\n", c->x86_model);
542 }
543 }
544 if (list_empty(&pdev_list)) { 552 if (list_empty(&pdev_list)) {
545 err = -ENODEV; 553 err = -ENODEV;
546 goto exit_driver_unreg; 554 goto exit_driver_unreg;
547 } 555 }
556#endif
548 557
549 register_hotcpu_notifier(&coretemp_cpu_notifier); 558 register_hotcpu_notifier(&coretemp_cpu_notifier);
550 return 0; 559 return 0;
551 560
552exit_driver_unreg:
553#ifndef CONFIG_HOTPLUG_CPU 561#ifndef CONFIG_HOTPLUG_CPU
562exit_driver_unreg:
554 platform_driver_unregister(&coretemp_driver); 563 platform_driver_unregister(&coretemp_driver);
555#endif 564#endif
556exit: 565exit:
diff --git a/drivers/hwmon/emc1403.c b/drivers/hwmon/emc1403.c
index 5b58b20dead1..8dee3f38fdfb 100644
--- a/drivers/hwmon/emc1403.c
+++ b/drivers/hwmon/emc1403.c
@@ -308,7 +308,6 @@ static int emc1403_probe(struct i2c_client *client,
308 res = sysfs_create_group(&client->dev.kobj, &m_thermal_gr); 308 res = sysfs_create_group(&client->dev.kobj, &m_thermal_gr);
309 if (res) { 309 if (res) {
310 dev_warn(&client->dev, "create group failed\n"); 310 dev_warn(&client->dev, "create group failed\n");
311 hwmon_device_unregister(data->hwmon_dev);
312 goto thermal_error1; 311 goto thermal_error1;
313 } 312 }
314 data->hwmon_dev = hwmon_device_register(&client->dev); 313 data->hwmon_dev = hwmon_device_register(&client->dev);
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c
index 537841ef44b9..75afb3b0e076 100644
--- a/drivers/hwmon/f71882fg.c
+++ b/drivers/hwmon/f71882fg.c
@@ -111,7 +111,7 @@ static struct platform_device *f71882fg_pdev;
111/* Super-I/O Function prototypes */ 111/* Super-I/O Function prototypes */
112static inline int superio_inb(int base, int reg); 112static inline int superio_inb(int base, int reg);
113static inline int superio_inw(int base, int reg); 113static inline int superio_inw(int base, int reg);
114static inline void superio_enter(int base); 114static inline int superio_enter(int base);
115static inline void superio_select(int base, int ld); 115static inline void superio_select(int base, int ld);
116static inline void superio_exit(int base); 116static inline void superio_exit(int base);
117 117
@@ -861,11 +861,20 @@ static int superio_inw(int base, int reg)
861 return val; 861 return val;
862} 862}
863 863
864static inline void superio_enter(int base) 864static inline int superio_enter(int base)
865{ 865{
866 /* Don't step on other drivers' I/O space by accident */
867 if (!request_muxed_region(base, 2, DRVNAME)) {
868 printk(KERN_ERR DRVNAME ": I/O address 0x%04x already in use\n",
869 base);
870 return -EBUSY;
871 }
872
866 /* according to the datasheet the key must be send twice! */ 873 /* according to the datasheet the key must be send twice! */
867 outb(SIO_UNLOCK_KEY, base); 874 outb(SIO_UNLOCK_KEY, base);
868 outb(SIO_UNLOCK_KEY, base); 875 outb(SIO_UNLOCK_KEY, base);
876
877 return 0;
869} 878}
870 879
871static inline void superio_select(int base, int ld) 880static inline void superio_select(int base, int ld)
@@ -877,6 +886,7 @@ static inline void superio_select(int base, int ld)
877static inline void superio_exit(int base) 886static inline void superio_exit(int base)
878{ 887{
879 outb(SIO_LOCK_KEY, base); 888 outb(SIO_LOCK_KEY, base);
889 release_region(base, 2);
880} 890}
881 891
882static inline int fan_from_reg(u16 reg) 892static inline int fan_from_reg(u16 reg)
@@ -2175,21 +2185,15 @@ static int f71882fg_remove(struct platform_device *pdev)
2175static int __init f71882fg_find(int sioaddr, unsigned short *address, 2185static int __init f71882fg_find(int sioaddr, unsigned short *address,
2176 struct f71882fg_sio_data *sio_data) 2186 struct f71882fg_sio_data *sio_data)
2177{ 2187{
2178 int err = -ENODEV;
2179 u16 devid; 2188 u16 devid;
2180 2189 int err = superio_enter(sioaddr);
2181 /* Don't step on other drivers' I/O space by accident */ 2190 if (err)
2182 if (!request_region(sioaddr, 2, DRVNAME)) { 2191 return err;
2183 printk(KERN_ERR DRVNAME ": I/O address 0x%04x already in use\n",
2184 (int)sioaddr);
2185 return -EBUSY;
2186 }
2187
2188 superio_enter(sioaddr);
2189 2192
2190 devid = superio_inw(sioaddr, SIO_REG_MANID); 2193 devid = superio_inw(sioaddr, SIO_REG_MANID);
2191 if (devid != SIO_FINTEK_ID) { 2194 if (devid != SIO_FINTEK_ID) {
2192 pr_debug(DRVNAME ": Not a Fintek device\n"); 2195 pr_debug(DRVNAME ": Not a Fintek device\n");
2196 err = -ENODEV;
2193 goto exit; 2197 goto exit;
2194 } 2198 }
2195 2199
@@ -2213,6 +2217,7 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address,
2213 default: 2217 default:
2214 printk(KERN_INFO DRVNAME ": Unsupported Fintek device: %04x\n", 2218 printk(KERN_INFO DRVNAME ": Unsupported Fintek device: %04x\n",
2215 (unsigned int)devid); 2219 (unsigned int)devid);
2220 err = -ENODEV;
2216 goto exit; 2221 goto exit;
2217 } 2222 }
2218 2223
@@ -2223,12 +2228,14 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address,
2223 2228
2224 if (!(superio_inb(sioaddr, SIO_REG_ENABLE) & 0x01)) { 2229 if (!(superio_inb(sioaddr, SIO_REG_ENABLE) & 0x01)) {
2225 printk(KERN_WARNING DRVNAME ": Device not activated\n"); 2230 printk(KERN_WARNING DRVNAME ": Device not activated\n");
2231 err = -ENODEV;
2226 goto exit; 2232 goto exit;
2227 } 2233 }
2228 2234
2229 *address = superio_inw(sioaddr, SIO_REG_ADDR); 2235 *address = superio_inw(sioaddr, SIO_REG_ADDR);
2230 if (*address == 0) { 2236 if (*address == 0) {
2231 printk(KERN_WARNING DRVNAME ": Base address not set\n"); 2237 printk(KERN_WARNING DRVNAME ": Base address not set\n");
2238 err = -ENODEV;
2232 goto exit; 2239 goto exit;
2233 } 2240 }
2234 *address &= ~(REGION_LENGTH - 1); /* Ignore 3 LSB */ 2241 *address &= ~(REGION_LENGTH - 1); /* Ignore 3 LSB */
@@ -2239,7 +2246,6 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address,
2239 (int)superio_inb(sioaddr, SIO_REG_DEVREV)); 2246 (int)superio_inb(sioaddr, SIO_REG_DEVREV));
2240exit: 2247exit:
2241 superio_exit(sioaddr); 2248 superio_exit(sioaddr);
2242 release_region(sioaddr, 2);
2243 return err; 2249 return err;
2244} 2250}
2245 2251
diff --git a/drivers/hwmon/f75375s.c b/drivers/hwmon/f75375s.c
index 0f58ecc5334d..9638d58f99fd 100644
--- a/drivers/hwmon/f75375s.c
+++ b/drivers/hwmon/f75375s.c
@@ -79,7 +79,7 @@ enum chips { f75373, f75375 };
79#define F75375_REG_PWM2_DROP_DUTY 0x6C 79#define F75375_REG_PWM2_DROP_DUTY 0x6C
80 80
81#define FAN_CTRL_LINEAR(nr) (4 + nr) 81#define FAN_CTRL_LINEAR(nr) (4 + nr)
82#define FAN_CTRL_MODE(nr) (5 + ((nr) * 2)) 82#define FAN_CTRL_MODE(nr) (4 + ((nr) * 2))
83 83
84/* 84/*
85 * Data structures and manipulation thereof 85 * Data structures and manipulation thereof
@@ -298,7 +298,7 @@ static int set_pwm_enable_direct(struct i2c_client *client, int nr, int val)
298 return -EINVAL; 298 return -EINVAL;
299 299
300 fanmode = f75375_read8(client, F75375_REG_FAN_TIMER); 300 fanmode = f75375_read8(client, F75375_REG_FAN_TIMER);
301 fanmode = ~(3 << FAN_CTRL_MODE(nr)); 301 fanmode &= ~(3 << FAN_CTRL_MODE(nr));
302 302
303 switch (val) { 303 switch (val) {
304 case 0: /* Full speed */ 304 case 0: /* Full speed */
@@ -350,7 +350,7 @@ static ssize_t set_pwm_mode(struct device *dev, struct device_attribute *attr,
350 350
351 mutex_lock(&data->update_lock); 351 mutex_lock(&data->update_lock);
352 conf = f75375_read8(client, F75375_REG_CONFIG1); 352 conf = f75375_read8(client, F75375_REG_CONFIG1);
353 conf = ~(1 << FAN_CTRL_LINEAR(nr)); 353 conf &= ~(1 << FAN_CTRL_LINEAR(nr));
354 354
355 if (val == 0) 355 if (val == 0)
356 conf |= (1 << FAN_CTRL_LINEAR(nr)) ; 356 conf |= (1 << FAN_CTRL_LINEAR(nr)) ;
diff --git a/drivers/hwmon/hp_accel.c b/drivers/hwmon/hp_accel.c
index 7580f55e67e3..36e957532230 100644
--- a/drivers/hwmon/hp_accel.c
+++ b/drivers/hwmon/hp_accel.c
@@ -221,6 +221,8 @@ static struct dmi_system_id lis3lv02d_dmi_ids[] = {
221 AXIS_DMI_MATCH("HPB442x", "HP ProBook 442", xy_rotated_left), 221 AXIS_DMI_MATCH("HPB442x", "HP ProBook 442", xy_rotated_left),
222 AXIS_DMI_MATCH("HPB452x", "HP ProBook 452", y_inverted), 222 AXIS_DMI_MATCH("HPB452x", "HP ProBook 452", y_inverted),
223 AXIS_DMI_MATCH("HPB522x", "HP ProBook 522", xy_swap), 223 AXIS_DMI_MATCH("HPB522x", "HP ProBook 522", xy_swap),
224 AXIS_DMI_MATCH("HPB532x", "HP ProBook 532", y_inverted),
225 AXIS_DMI_MATCH("Mini5102", "HP Mini 5102", xy_rotated_left_usd),
224 { NULL, } 226 { NULL, }
225/* Laptop models without axis info (yet): 227/* Laptop models without axis info (yet):
226 * "NC6910" "HP Compaq 6910" 228 * "NC6910" "HP Compaq 6910"
diff --git a/drivers/hwmon/lis3lv02d.c b/drivers/hwmon/lis3lv02d.c
index 6138f036b159..fc591ae53107 100644
--- a/drivers/hwmon/lis3lv02d.c
+++ b/drivers/hwmon/lis3lv02d.c
@@ -277,7 +277,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *dummy)
277 wake_up_interruptible(&lis3_dev.misc_wait); 277 wake_up_interruptible(&lis3_dev.misc_wait);
278 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN); 278 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
279out: 279out:
280 if (lis3_dev.whoami == WAI_8B && lis3_dev.idev && 280 if (lis3_dev.pdata && lis3_dev.whoami == WAI_8B && lis3_dev.idev &&
281 lis3_dev.idev->input->users) 281 lis3_dev.idev->input->users)
282 return IRQ_WAKE_THREAD; 282 return IRQ_WAKE_THREAD;
283 return IRQ_HANDLED; 283 return IRQ_HANDLED;
@@ -718,7 +718,7 @@ int lis3lv02d_init_device(struct lis3lv02d *dev)
718 * io-apic is not configurable (and generates a warning) but I keep it 718 * io-apic is not configurable (and generates a warning) but I keep it
719 * in case of support for other hardware. 719 * in case of support for other hardware.
720 */ 720 */
721 if (dev->whoami == WAI_8B) 721 if (dev->pdata && dev->whoami == WAI_8B)
722 thread_fn = lis302dl_interrupt_thread1_8b; 722 thread_fn = lis302dl_interrupt_thread1_8b;
723 else 723 else
724 thread_fn = NULL; 724 thread_fn = NULL;
diff --git a/drivers/hwmon/lis3lv02d_i2c.c b/drivers/hwmon/lis3lv02d_i2c.c
index dc1f5402c1d7..8e5933b72d19 100644
--- a/drivers/hwmon/lis3lv02d_i2c.c
+++ b/drivers/hwmon/lis3lv02d_i2c.c
@@ -121,7 +121,7 @@ static int lis3lv02d_i2c_suspend(struct i2c_client *client, pm_message_t mesg)
121{ 121{
122 struct lis3lv02d *lis3 = i2c_get_clientdata(client); 122 struct lis3lv02d *lis3 = i2c_get_clientdata(client);
123 123
124 if (!lis3->pdata->wakeup_flags) 124 if (!lis3->pdata || !lis3->pdata->wakeup_flags)
125 lis3lv02d_poweroff(lis3); 125 lis3lv02d_poweroff(lis3);
126 return 0; 126 return 0;
127} 127}
@@ -130,7 +130,7 @@ static int lis3lv02d_i2c_resume(struct i2c_client *client)
130{ 130{
131 struct lis3lv02d *lis3 = i2c_get_clientdata(client); 131 struct lis3lv02d *lis3 = i2c_get_clientdata(client);
132 132
133 if (!lis3->pdata->wakeup_flags) 133 if (!lis3->pdata || !lis3->pdata->wakeup_flags)
134 lis3lv02d_poweron(lis3); 134 lis3lv02d_poweron(lis3);
135 return 0; 135 return 0;
136} 136}
diff --git a/drivers/hwmon/lis3lv02d_spi.c b/drivers/hwmon/lis3lv02d_spi.c
index 82b16808a274..b9be5e3a22b3 100644
--- a/drivers/hwmon/lis3lv02d_spi.c
+++ b/drivers/hwmon/lis3lv02d_spi.c
@@ -92,7 +92,7 @@ static int lis3lv02d_spi_suspend(struct spi_device *spi, pm_message_t mesg)
92{ 92{
93 struct lis3lv02d *lis3 = spi_get_drvdata(spi); 93 struct lis3lv02d *lis3 = spi_get_drvdata(spi);
94 94
95 if (!lis3->pdata->wakeup_flags) 95 if (!lis3->pdata || !lis3->pdata->wakeup_flags)
96 lis3lv02d_poweroff(&lis3_dev); 96 lis3lv02d_poweroff(&lis3_dev);
97 97
98 return 0; 98 return 0;
@@ -102,7 +102,7 @@ static int lis3lv02d_spi_resume(struct spi_device *spi)
102{ 102{
103 struct lis3lv02d *lis3 = spi_get_drvdata(spi); 103 struct lis3lv02d *lis3 = spi_get_drvdata(spi);
104 104
105 if (!lis3->pdata->wakeup_flags) 105 if (!lis3->pdata || !lis3->pdata->wakeup_flags)
106 lis3lv02d_poweron(lis3); 106 lis3lv02d_poweron(lis3);
107 107
108 return 0; 108 return 0;
diff --git a/drivers/hwmon/lm95241.c b/drivers/hwmon/lm95241.c
index 94741d42112d..464340f25496 100644
--- a/drivers/hwmon/lm95241.c
+++ b/drivers/hwmon/lm95241.c
@@ -91,7 +91,7 @@ static struct lm95241_data *lm95241_update_device(struct device *dev);
91struct lm95241_data { 91struct lm95241_data {
92 struct device *hwmon_dev; 92 struct device *hwmon_dev;
93 struct mutex update_lock; 93 struct mutex update_lock;
94 unsigned long last_updated, rate; /* in jiffies */ 94 unsigned long last_updated, interval; /* in jiffies */
95 char valid; /* zero until following fields are valid */ 95 char valid; /* zero until following fields are valid */
96 /* registers values */ 96 /* registers values */
97 u8 local_h, local_l; /* local */ 97 u8 local_h, local_l; /* local */
@@ -114,23 +114,23 @@ show_temp(local);
114show_temp(remote1); 114show_temp(remote1);
115show_temp(remote2); 115show_temp(remote2);
116 116
117static ssize_t show_rate(struct device *dev, struct device_attribute *attr, 117static ssize_t show_interval(struct device *dev, struct device_attribute *attr,
118 char *buf) 118 char *buf)
119{ 119{
120 struct lm95241_data *data = lm95241_update_device(dev); 120 struct lm95241_data *data = lm95241_update_device(dev);
121 121
122 snprintf(buf, PAGE_SIZE - 1, "%lu\n", 1000 * data->rate / HZ); 122 snprintf(buf, PAGE_SIZE - 1, "%lu\n", 1000 * data->interval / HZ);
123 return strlen(buf); 123 return strlen(buf);
124} 124}
125 125
126static ssize_t set_rate(struct device *dev, struct device_attribute *attr, 126static ssize_t set_interval(struct device *dev, struct device_attribute *attr,
127 const char *buf, size_t count) 127 const char *buf, size_t count)
128{ 128{
129 struct i2c_client *client = to_i2c_client(dev); 129 struct i2c_client *client = to_i2c_client(dev);
130 struct lm95241_data *data = i2c_get_clientdata(client); 130 struct lm95241_data *data = i2c_get_clientdata(client);
131 131
132 strict_strtol(buf, 10, &data->rate); 132 strict_strtol(buf, 10, &data->interval);
133 data->rate = data->rate * HZ / 1000; 133 data->interval = data->interval * HZ / 1000;
134 134
135 return count; 135 return count;
136} 136}
@@ -286,7 +286,8 @@ static DEVICE_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_min1, set_min1);
286static DEVICE_ATTR(temp3_min, S_IWUSR | S_IRUGO, show_min2, set_min2); 286static DEVICE_ATTR(temp3_min, S_IWUSR | S_IRUGO, show_min2, set_min2);
287static DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_max1, set_max1); 287static DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_max1, set_max1);
288static DEVICE_ATTR(temp3_max, S_IWUSR | S_IRUGO, show_max2, set_max2); 288static DEVICE_ATTR(temp3_max, S_IWUSR | S_IRUGO, show_max2, set_max2);
289static DEVICE_ATTR(rate, S_IWUSR | S_IRUGO, show_rate, set_rate); 289static DEVICE_ATTR(update_interval, S_IWUSR | S_IRUGO, show_interval,
290 set_interval);
290 291
291static struct attribute *lm95241_attributes[] = { 292static struct attribute *lm95241_attributes[] = {
292 &dev_attr_temp1_input.attr, 293 &dev_attr_temp1_input.attr,
@@ -298,7 +299,7 @@ static struct attribute *lm95241_attributes[] = {
298 &dev_attr_temp3_min.attr, 299 &dev_attr_temp3_min.attr,
299 &dev_attr_temp2_max.attr, 300 &dev_attr_temp2_max.attr,
300 &dev_attr_temp3_max.attr, 301 &dev_attr_temp3_max.attr,
301 &dev_attr_rate.attr, 302 &dev_attr_update_interval.attr,
302 NULL 303 NULL
303}; 304};
304 305
@@ -376,7 +377,7 @@ static void lm95241_init_client(struct i2c_client *client)
376{ 377{
377 struct lm95241_data *data = i2c_get_clientdata(client); 378 struct lm95241_data *data = i2c_get_clientdata(client);
378 379
379 data->rate = HZ; /* 1 sec default */ 380 data->interval = HZ; /* 1 sec default */
380 data->valid = 0; 381 data->valid = 0;
381 data->config = CFG_CR0076; 382 data->config = CFG_CR0076;
382 data->model = 0; 383 data->model = 0;
@@ -410,7 +411,7 @@ static struct lm95241_data *lm95241_update_device(struct device *dev)
410 411
411 mutex_lock(&data->update_lock); 412 mutex_lock(&data->update_lock);
412 413
413 if (time_after(jiffies, data->last_updated + data->rate) || 414 if (time_after(jiffies, data->last_updated + data->interval) ||
414 !data->valid) { 415 !data->valid) {
415 dev_dbg(&client->dev, "Updating lm95241 data.\n"); 416 dev_dbg(&client->dev, "Updating lm95241 data.\n");
416 data->local_h = 417 data->local_h =
diff --git a/drivers/hwmon/pkgtemp.c b/drivers/hwmon/pkgtemp.c
index 74157fcda6ed..f11903936c8b 100644
--- a/drivers/hwmon/pkgtemp.c
+++ b/drivers/hwmon/pkgtemp.c
@@ -33,7 +33,6 @@
33#include <linux/list.h> 33#include <linux/list.h>
34#include <linux/platform_device.h> 34#include <linux/platform_device.h>
35#include <linux/cpu.h> 35#include <linux/cpu.h>
36#include <linux/pci.h>
37#include <asm/msr.h> 36#include <asm/msr.h>
38#include <asm/processor.h> 37#include <asm/processor.h>
39 38
@@ -224,7 +223,7 @@ static int __devinit pkgtemp_probe(struct platform_device *pdev)
224 223
225 err = sysfs_create_group(&pdev->dev.kobj, &pkgtemp_group); 224 err = sysfs_create_group(&pdev->dev.kobj, &pkgtemp_group);
226 if (err) 225 if (err)
227 goto exit_free; 226 goto exit_dev;
228 227
229 data->hwmon_dev = hwmon_device_register(&pdev->dev); 228 data->hwmon_dev = hwmon_device_register(&pdev->dev);
230 if (IS_ERR(data->hwmon_dev)) { 229 if (IS_ERR(data->hwmon_dev)) {
@@ -238,6 +237,8 @@ static int __devinit pkgtemp_probe(struct platform_device *pdev)
238 237
239exit_class: 238exit_class:
240 sysfs_remove_group(&pdev->dev.kobj, &pkgtemp_group); 239 sysfs_remove_group(&pdev->dev.kobj, &pkgtemp_group);
240exit_dev:
241 device_remove_file(&pdev->dev, &sensor_dev_attr_temp1_max.dev_attr);
241exit_free: 242exit_free:
242 kfree(data); 243 kfree(data);
243exit: 244exit:
@@ -250,6 +251,7 @@ static int __devexit pkgtemp_remove(struct platform_device *pdev)
250 251
251 hwmon_device_unregister(data->hwmon_dev); 252 hwmon_device_unregister(data->hwmon_dev);
252 sysfs_remove_group(&pdev->dev.kobj, &pkgtemp_group); 253 sysfs_remove_group(&pdev->dev.kobj, &pkgtemp_group);
254 device_remove_file(&pdev->dev, &sensor_dev_attr_temp1_max.dev_attr);
253 platform_set_drvdata(pdev, NULL); 255 platform_set_drvdata(pdev, NULL);
254 kfree(data); 256 kfree(data);
255 return 0; 257 return 0;
@@ -281,9 +283,10 @@ static int __cpuinit pkgtemp_device_add(unsigned int cpu)
281 int err; 283 int err;
282 struct platform_device *pdev; 284 struct platform_device *pdev;
283 struct pdev_entry *pdev_entry; 285 struct pdev_entry *pdev_entry;
284#ifdef CONFIG_SMP
285 struct cpuinfo_x86 *c = &cpu_data(cpu); 286 struct cpuinfo_x86 *c = &cpu_data(cpu);
286#endif 287
288 if (!cpu_has(c, X86_FEATURE_PTS))
289 return 0;
287 290
288 mutex_lock(&pdev_list_mutex); 291 mutex_lock(&pdev_list_mutex);
289 292
@@ -339,17 +342,18 @@ exit:
339#ifdef CONFIG_HOTPLUG_CPU 342#ifdef CONFIG_HOTPLUG_CPU
340static void pkgtemp_device_remove(unsigned int cpu) 343static void pkgtemp_device_remove(unsigned int cpu)
341{ 344{
342 struct pdev_entry *p, *n; 345 struct pdev_entry *p;
343 unsigned int i; 346 unsigned int i;
344 int err; 347 int err;
345 348
346 mutex_lock(&pdev_list_mutex); 349 mutex_lock(&pdev_list_mutex);
347 list_for_each_entry_safe(p, n, &pdev_list, list) { 350 list_for_each_entry(p, &pdev_list, list) {
348 if (p->cpu != cpu) 351 if (p->cpu != cpu)
349 continue; 352 continue;
350 353
351 platform_device_unregister(p->pdev); 354 platform_device_unregister(p->pdev);
352 list_del(&p->list); 355 list_del(&p->list);
356 mutex_unlock(&pdev_list_mutex);
353 kfree(p); 357 kfree(p);
354 for_each_cpu(i, cpu_core_mask(cpu)) { 358 for_each_cpu(i, cpu_core_mask(cpu)) {
355 if (i != cpu) { 359 if (i != cpu) {
@@ -358,7 +362,7 @@ static void pkgtemp_device_remove(unsigned int cpu)
358 break; 362 break;
359 } 363 }
360 } 364 }
361 break; 365 return;
362 } 366 }
363 mutex_unlock(&pdev_list_mutex); 367 mutex_unlock(&pdev_list_mutex);
364} 368}
@@ -399,11 +403,6 @@ static int __init pkgtemp_init(void)
399 goto exit; 403 goto exit;
400 404
401 for_each_online_cpu(i) { 405 for_each_online_cpu(i) {
402 struct cpuinfo_x86 *c = &cpu_data(i);
403
404 if (!cpu_has(c, X86_FEATURE_PTS))
405 continue;
406
407 err = pkgtemp_device_add(i); 406 err = pkgtemp_device_add(i);
408 if (err) 407 if (err)
409 goto exit_devices_unreg; 408 goto exit_devices_unreg;
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index e96e69dd36fb..072c58008a63 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -127,6 +127,7 @@ superio_enter(int ioreg)
127static inline void 127static inline void
128superio_exit(int ioreg) 128superio_exit(int ioreg)
129{ 129{
130 outb(0xaa, ioreg);
130 outb(0x02, ioreg); 131 outb(0x02, ioreg);
131 outb(0x02, ioreg + 1); 132 outb(0x02, ioreg + 1);
132} 133}
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index 2222c87876b9..b8feac5f2ef4 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -357,9 +357,6 @@ i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop)
357 357
358 dev->terminate = 0; 358 dev->terminate = 0;
359 359
360 /* write the data into mode register */
361 davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, flag);
362
363 /* 360 /*
364 * First byte should be set here, not after interrupt, 361 * First byte should be set here, not after interrupt,
365 * because transmit-data-ready interrupt can come before 362 * because transmit-data-ready interrupt can come before
@@ -371,6 +368,9 @@ i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop)
371 dev->buf_len--; 368 dev->buf_len--;
372 } 369 }
373 370
371 /* write the data into mode register; start transmitting */
372 davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, flag);
373
374 r = wait_for_completion_interruptible_timeout(&dev->cmd_complete, 374 r = wait_for_completion_interruptible_timeout(&dev->cmd_complete,
375 dev->adapter.timeout); 375 dev->adapter.timeout);
376 if (r == 0) { 376 if (r == 0) {
diff --git a/drivers/i2c/busses/i2c-octeon.c b/drivers/i2c/busses/i2c-octeon.c
index 0e9f85d0a835..56dbe54e8811 100644
--- a/drivers/i2c/busses/i2c-octeon.c
+++ b/drivers/i2c/busses/i2c-octeon.c
@@ -218,7 +218,7 @@ static int octeon_i2c_wait(struct octeon_i2c *i2c)
218 return result; 218 return result;
219 } else if (result == 0) { 219 } else if (result == 0) {
220 dev_dbg(i2c->dev, "%s: timeout\n", __func__); 220 dev_dbg(i2c->dev, "%s: timeout\n", __func__);
221 result = -ETIMEDOUT; 221 return -ETIMEDOUT;
222 } 222 }
223 223
224 return 0; 224 return 0;
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 7674efb55378..b33c78586bfc 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -680,6 +680,8 @@ omap_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
680 680
681 if (r == 0) 681 if (r == 0)
682 r = num; 682 r = num;
683
684 omap_i2c_wait_for_bb(dev);
683out: 685out:
684 omap_i2c_idle(dev); 686 omap_i2c_idle(dev);
685 return r; 687 return r;
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 72902e0bbfa7..bf831bf81587 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -662,8 +662,8 @@ static int s3c24xx_i2c_clockrate(struct s3c24xx_i2c *i2c, unsigned int *got)
662 unsigned long sda_delay; 662 unsigned long sda_delay;
663 663
664 if (pdata->sda_delay) { 664 if (pdata->sda_delay) {
665 sda_delay = (freq / 1000) * pdata->sda_delay; 665 sda_delay = clkin * pdata->sda_delay;
666 sda_delay /= 1000000; 666 sda_delay = DIV_ROUND_UP(sda_delay, 1000000);
667 sda_delay = DIV_ROUND_UP(sda_delay, 5); 667 sda_delay = DIV_ROUND_UP(sda_delay, 5);
668 if (sda_delay > 3) 668 if (sda_delay > 3)
669 sda_delay = 3; 669 sda_delay = 3;
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 4c3d1bfec0c5..068cef0a987a 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -1448,19 +1448,13 @@ int ide_host_register(struct ide_host *host, const struct ide_port_info *d,
1448 if (hwif == NULL) 1448 if (hwif == NULL)
1449 continue; 1449 continue;
1450 1450
1451 if (hwif->present)
1452 hwif_register_devices(hwif);
1453 }
1454
1455 ide_host_for_each_port(i, hwif, host) {
1456 if (hwif == NULL)
1457 continue;
1458
1459 ide_sysfs_register_port(hwif); 1451 ide_sysfs_register_port(hwif);
1460 ide_proc_register_port(hwif); 1452 ide_proc_register_port(hwif);
1461 1453
1462 if (hwif->present) 1454 if (hwif->present) {
1463 ide_proc_port_register_devices(hwif); 1455 ide_proc_port_register_devices(hwif);
1456 hwif_register_devices(hwif);
1457 }
1464 } 1458 }
1465 1459
1466 return j ? 0 : -1; 1460 return j ? 0 : -1;
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index a10152bb1427..0906fc5b69b9 100755..100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -83,7 +83,7 @@ static unsigned int mwait_substates;
83/* Reliable LAPIC Timer States, bit 1 for C1 etc. */ 83/* Reliable LAPIC Timer States, bit 1 for C1 etc. */
84static unsigned int lapic_timer_reliable_states; 84static unsigned int lapic_timer_reliable_states;
85 85
86static struct cpuidle_device *intel_idle_cpuidle_devices; 86static struct cpuidle_device __percpu *intel_idle_cpuidle_devices;
87static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state); 87static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state);
88 88
89static struct cpuidle_state *cpuidle_state_table; 89static struct cpuidle_state *cpuidle_state_table;
@@ -108,7 +108,7 @@ static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = {
108 .name = "NHM-C3", 108 .name = "NHM-C3",
109 .desc = "MWAIT 0x10", 109 .desc = "MWAIT 0x10",
110 .driver_data = (void *) 0x10, 110 .driver_data = (void *) 0x10,
111 .flags = CPUIDLE_FLAG_TIME_VALID, 111 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
112 .exit_latency = 20, 112 .exit_latency = 20,
113 .power_usage = 500, 113 .power_usage = 500,
114 .target_residency = 80, 114 .target_residency = 80,
@@ -117,7 +117,7 @@ static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = {
117 .name = "NHM-C6", 117 .name = "NHM-C6",
118 .desc = "MWAIT 0x20", 118 .desc = "MWAIT 0x20",
119 .driver_data = (void *) 0x20, 119 .driver_data = (void *) 0x20,
120 .flags = CPUIDLE_FLAG_TIME_VALID, 120 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
121 .exit_latency = 200, 121 .exit_latency = 200,
122 .power_usage = 350, 122 .power_usage = 350,
123 .target_residency = 800, 123 .target_residency = 800,
@@ -149,7 +149,7 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
149 .name = "ATM-C4", 149 .name = "ATM-C4",
150 .desc = "MWAIT 0x30", 150 .desc = "MWAIT 0x30",
151 .driver_data = (void *) 0x30, 151 .driver_data = (void *) 0x30,
152 .flags = CPUIDLE_FLAG_TIME_VALID, 152 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
153 .exit_latency = 100, 153 .exit_latency = 100,
154 .power_usage = 250, 154 .power_usage = 250,
155 .target_residency = 400, 155 .target_residency = 400,
@@ -159,7 +159,7 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
159 .name = "ATM-C6", 159 .name = "ATM-C6",
160 .desc = "MWAIT 0x40", 160 .desc = "MWAIT 0x40",
161 .driver_data = (void *) 0x40, 161 .driver_data = (void *) 0x40,
162 .flags = CPUIDLE_FLAG_TIME_VALID, 162 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
163 .exit_latency = 200, 163 .exit_latency = 200,
164 .power_usage = 150, 164 .power_usage = 150,
165 .target_residency = 800, 165 .target_residency = 800,
@@ -185,6 +185,16 @@ static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state)
185 185
186 local_irq_disable(); 186 local_irq_disable();
187 187
188 /*
189 * If the state flag indicates that the TLB will be flushed or if this
190 * is the deepest c-state supported, do a voluntary leave mm to avoid
191 * costly and mostly unnecessary wakeups for flushing the user TLB's
192 * associated with the active mm.
193 */
194 if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED ||
195 (&dev->states[dev->state_count - 1] == state))
196 leave_mm(cpu);
197
188 if (!(lapic_timer_reliable_states & (1 << (cstate)))) 198 if (!(lapic_timer_reliable_states & (1 << (cstate))))
189 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu); 199 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
190 200
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.h b/drivers/infiniband/hw/cxgb3/cxio_hal.h
index 8f0caf7d4482..78fbe9ffe7f0 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.h
@@ -53,7 +53,7 @@
53#define T3_MAX_PBL_SIZE 256 53#define T3_MAX_PBL_SIZE 256
54#define T3_MAX_RQ_SIZE 1024 54#define T3_MAX_RQ_SIZE 1024
55#define T3_MAX_QP_DEPTH (T3_MAX_RQ_SIZE-1) 55#define T3_MAX_QP_DEPTH (T3_MAX_RQ_SIZE-1)
56#define T3_MAX_CQ_DEPTH 262144 56#define T3_MAX_CQ_DEPTH 65536
57#define T3_MAX_NUM_STAG (1<<15) 57#define T3_MAX_NUM_STAG (1<<15)
58#define T3_MAX_MR_SIZE 0x100000000ULL 58#define T3_MAX_MR_SIZE 0x100000000ULL
59#define T3_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */ 59#define T3_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index d88077a21994..13c88871dc3b 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -463,7 +463,8 @@ static int send_connect(struct iwch_ep *ep)
463 V_MSS_IDX(mtu_idx) | 463 V_MSS_IDX(mtu_idx) |
464 V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx); 464 V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
465 opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10); 465 opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
466 opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor); 466 opt2 = F_RX_COALESCE_VALID | V_RX_COALESCE(0) | V_FLAVORS_VALID(1) |
467 V_CONG_CONTROL_FLAVOR(cong_flavor);
467 skb->priority = CPL_PRIORITY_SETUP; 468 skb->priority = CPL_PRIORITY_SETUP;
468 set_arp_failure_handler(skb, act_open_req_arp_failure); 469 set_arp_failure_handler(skb, act_open_req_arp_failure);
469 470
@@ -1280,7 +1281,8 @@ static void accept_cr(struct iwch_ep *ep, __be32 peer_ip, struct sk_buff *skb)
1280 V_MSS_IDX(mtu_idx) | 1281 V_MSS_IDX(mtu_idx) |
1281 V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx); 1282 V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
1282 opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10); 1283 opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
1283 opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor); 1284 opt2 = F_RX_COALESCE_VALID | V_RX_COALESCE(0) | V_FLAVORS_VALID(1) |
1285 V_CONG_CONTROL_FLAVOR(cong_flavor);
1284 1286
1285 rpl = cplhdr(skb); 1287 rpl = cplhdr(skb);
1286 rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 1288 rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 443cea55daac..61e0efd4ccfb 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -502,7 +502,9 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
502static void nes_retrans_expired(struct nes_cm_node *cm_node) 502static void nes_retrans_expired(struct nes_cm_node *cm_node)
503{ 503{
504 struct iw_cm_id *cm_id = cm_node->cm_id; 504 struct iw_cm_id *cm_id = cm_node->cm_id;
505 switch (cm_node->state) { 505 enum nes_cm_node_state state = cm_node->state;
506 cm_node->state = NES_CM_STATE_CLOSED;
507 switch (state) {
506 case NES_CM_STATE_SYN_RCVD: 508 case NES_CM_STATE_SYN_RCVD:
507 case NES_CM_STATE_CLOSING: 509 case NES_CM_STATE_CLOSING:
508 rem_ref_cm_node(cm_node->cm_core, cm_node); 510 rem_ref_cm_node(cm_node->cm_core, cm_node);
@@ -511,7 +513,6 @@ static void nes_retrans_expired(struct nes_cm_node *cm_node)
511 case NES_CM_STATE_FIN_WAIT1: 513 case NES_CM_STATE_FIN_WAIT1:
512 if (cm_node->cm_id) 514 if (cm_node->cm_id)
513 cm_id->rem_ref(cm_id); 515 cm_id->rem_ref(cm_id);
514 cm_node->state = NES_CM_STATE_CLOSED;
515 send_reset(cm_node, NULL); 516 send_reset(cm_node, NULL);
516 break; 517 break;
517 default: 518 default:
@@ -1439,9 +1440,6 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
1439 break; 1440 break;
1440 case NES_CM_STATE_MPAREQ_RCVD: 1441 case NES_CM_STATE_MPAREQ_RCVD:
1441 passive_state = atomic_add_return(1, &cm_node->passive_state); 1442 passive_state = atomic_add_return(1, &cm_node->passive_state);
1442 if (passive_state == NES_SEND_RESET_EVENT)
1443 create_event(cm_node, NES_CM_EVENT_RESET);
1444 cm_node->state = NES_CM_STATE_CLOSED;
1445 dev_kfree_skb_any(skb); 1443 dev_kfree_skb_any(skb);
1446 break; 1444 break;
1447 case NES_CM_STATE_ESTABLISHED: 1445 case NES_CM_STATE_ESTABLISHED:
@@ -1456,6 +1454,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
1456 case NES_CM_STATE_CLOSED: 1454 case NES_CM_STATE_CLOSED:
1457 drop_packet(skb); 1455 drop_packet(skb);
1458 break; 1456 break;
1457 case NES_CM_STATE_FIN_WAIT2:
1459 case NES_CM_STATE_FIN_WAIT1: 1458 case NES_CM_STATE_FIN_WAIT1:
1460 case NES_CM_STATE_LAST_ACK: 1459 case NES_CM_STATE_LAST_ACK:
1461 cm_node->cm_id->rem_ref(cm_node->cm_id); 1460 cm_node->cm_id->rem_ref(cm_node->cm_id);
@@ -2777,6 +2776,12 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2777 return -EINVAL; 2776 return -EINVAL;
2778 } 2777 }
2779 2778
2779 passive_state = atomic_add_return(1, &cm_node->passive_state);
2780 if (passive_state == NES_SEND_RESET_EVENT) {
2781 rem_ref_cm_node(cm_node->cm_core, cm_node);
2782 return -ECONNRESET;
2783 }
2784
2780 /* associate the node with the QP */ 2785 /* associate the node with the QP */
2781 nesqp->cm_node = (void *)cm_node; 2786 nesqp->cm_node = (void *)cm_node;
2782 cm_node->nesqp = nesqp; 2787 cm_node->nesqp = nesqp;
@@ -2979,9 +2984,6 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2979 printk(KERN_ERR "%s[%u] OFA CM event_handler returned, " 2984 printk(KERN_ERR "%s[%u] OFA CM event_handler returned, "
2980 "ret=%d\n", __func__, __LINE__, ret); 2985 "ret=%d\n", __func__, __LINE__, ret);
2981 2986
2982 passive_state = atomic_add_return(1, &cm_node->passive_state);
2983 if (passive_state == NES_SEND_RESET_EVENT)
2984 create_event(cm_node, NES_CM_EVENT_RESET);
2985 return 0; 2987 return 0;
2986} 2988}
2987 2989
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index f8233c851c69..1980a461c499 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -3468,6 +3468,19 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
3468 return; /* Ignore it, wait for close complete */ 3468 return; /* Ignore it, wait for close complete */
3469 3469
3470 if (atomic_inc_return(&nesqp->close_timer_started) == 1) { 3470 if (atomic_inc_return(&nesqp->close_timer_started) == 1) {
3471 if ((tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) &&
3472 (nesqp->ibqp_state == IB_QPS_RTS) &&
3473 ((nesadapter->eeprom_version >> 16) != NES_A0)) {
3474 spin_lock_irqsave(&nesqp->lock, flags);
3475 nesqp->hw_iwarp_state = iwarp_state;
3476 nesqp->hw_tcp_state = tcp_state;
3477 nesqp->last_aeq = async_event_id;
3478 next_iwarp_state = NES_CQP_QP_IWARP_STATE_CLOSING;
3479 nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_CLOSING;
3480 spin_unlock_irqrestore(&nesqp->lock, flags);
3481 nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 0);
3482 nes_cm_disconn(nesqp);
3483 }
3471 nesqp->cm_id->add_ref(nesqp->cm_id); 3484 nesqp->cm_id->add_ref(nesqp->cm_id);
3472 schedule_nes_timer(nesqp->cm_node, (struct sk_buff *)nesqp, 3485 schedule_nes_timer(nesqp->cm_node, (struct sk_buff *)nesqp,
3473 NES_TIMER_TYPE_CLOSE, 1, 0); 3486 NES_TIMER_TYPE_CLOSE, 1, 0);
@@ -3477,7 +3490,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
3477 nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount), 3490 nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount),
3478 async_event_id, nesqp->last_aeq, tcp_state); 3491 async_event_id, nesqp->last_aeq, tcp_state);
3479 } 3492 }
3480
3481 break; 3493 break;
3482 case NES_AEQE_AEID_LLP_CLOSE_COMPLETE: 3494 case NES_AEQE_AEID_LLP_CLOSE_COMPLETE:
3483 if (nesqp->term_flags) { 3495 if (nesqp->term_flags) {
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h
index aa9183db32b1..1204c3432b63 100644
--- a/drivers/infiniband/hw/nes/nes_hw.h
+++ b/drivers/infiniband/hw/nes/nes_hw.h
@@ -45,6 +45,7 @@
45#define NES_PHY_TYPE_KR 9 45#define NES_PHY_TYPE_KR 9
46 46
47#define NES_MULTICAST_PF_MAX 8 47#define NES_MULTICAST_PF_MAX 8
48#define NES_A0 3
48 49
49enum pci_regs { 50enum pci_regs {
50 NES_INT_STAT = 0x0000, 51 NES_INT_STAT = 0x0000,
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 6dfdd49cdbcf..10560c796fd6 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -1446,14 +1446,14 @@ static int nes_netdev_set_pauseparam(struct net_device *netdev,
1446 NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200)); 1446 NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200));
1447 u32temp |= NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE; 1447 u32temp |= NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE;
1448 nes_write_indexed(nesdev, 1448 nes_write_indexed(nesdev,
1449 NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE + (nesdev->mac_index*0x200), u32temp); 1449 NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200), u32temp);
1450 nesdev->disable_tx_flow_control = 0; 1450 nesdev->disable_tx_flow_control = 0;
1451 } else if ((et_pauseparam->tx_pause == 0) && (nesdev->disable_tx_flow_control == 0)) { 1451 } else if ((et_pauseparam->tx_pause == 0) && (nesdev->disable_tx_flow_control == 0)) {
1452 u32temp = nes_read_indexed(nesdev, 1452 u32temp = nes_read_indexed(nesdev,
1453 NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200)); 1453 NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200));
1454 u32temp &= ~NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE; 1454 u32temp &= ~NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE;
1455 nes_write_indexed(nesdev, 1455 nes_write_indexed(nesdev,
1456 NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE + (nesdev->mac_index*0x200), u32temp); 1456 NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200), u32temp);
1457 nesdev->disable_tx_flow_control = 1; 1457 nesdev->disable_tx_flow_control = 1;
1458 } 1458 }
1459 if ((et_pauseparam->rx_pause == 1) && (nesdev->disable_rx_flow_control == 1)) { 1459 if ((et_pauseparam->rx_pause == 1) && (nesdev->disable_rx_flow_control == 1)) {
diff --git a/drivers/input/input.c b/drivers/input/input.c
index a9b025f4147a..ab6982056518 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -1599,11 +1599,14 @@ EXPORT_SYMBOL(input_free_device);
1599 * @dev: input device supporting MT events and finger tracking 1599 * @dev: input device supporting MT events and finger tracking
1600 * @num_slots: number of slots used by the device 1600 * @num_slots: number of slots used by the device
1601 * 1601 *
1602 * This function allocates all necessary memory for MT slot handling 1602 * This function allocates all necessary memory for MT slot handling in the
1603 * in the input device, and adds ABS_MT_SLOT to the device capabilities. 1603 * input device, and adds ABS_MT_SLOT to the device capabilities. All slots
1604 * are initially marked as unused iby setting ABS_MT_TRACKING_ID to -1.
1604 */ 1605 */
1605int input_mt_create_slots(struct input_dev *dev, unsigned int num_slots) 1606int input_mt_create_slots(struct input_dev *dev, unsigned int num_slots)
1606{ 1607{
1608 int i;
1609
1607 if (!num_slots) 1610 if (!num_slots)
1608 return 0; 1611 return 0;
1609 1612
@@ -1614,6 +1617,10 @@ int input_mt_create_slots(struct input_dev *dev, unsigned int num_slots)
1614 dev->mtsize = num_slots; 1617 dev->mtsize = num_slots;
1615 input_set_abs_params(dev, ABS_MT_SLOT, 0, num_slots - 1, 0, 0); 1618 input_set_abs_params(dev, ABS_MT_SLOT, 0, num_slots - 1, 0, 0);
1616 1619
1620 /* Mark slots as 'unused' */
1621 for (i = 0; i < num_slots; i++)
1622 dev->mt[i].abs[ABS_MT_TRACKING_ID - ABS_MT_FIRST] = -1;
1623
1617 return 0; 1624 return 0;
1618} 1625}
1619EXPORT_SYMBOL(input_mt_create_slots); 1626EXPORT_SYMBOL(input_mt_create_slots);
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
index ea67c49146a3..b95231763911 100644
--- a/drivers/input/mouse/bcm5974.c
+++ b/drivers/input/mouse/bcm5974.c
@@ -337,10 +337,14 @@ static void report_finger_data(struct input_dev *input,
337 const struct bcm5974_config *cfg, 337 const struct bcm5974_config *cfg,
338 const struct tp_finger *f) 338 const struct tp_finger *f)
339{ 339{
340 input_report_abs(input, ABS_MT_TOUCH_MAJOR, raw2int(f->force_major)); 340 input_report_abs(input, ABS_MT_TOUCH_MAJOR,
341 input_report_abs(input, ABS_MT_TOUCH_MINOR, raw2int(f->force_minor)); 341 raw2int(f->force_major) << 1);
342 input_report_abs(input, ABS_MT_WIDTH_MAJOR, raw2int(f->size_major)); 342 input_report_abs(input, ABS_MT_TOUCH_MINOR,
343 input_report_abs(input, ABS_MT_WIDTH_MINOR, raw2int(f->size_minor)); 343 raw2int(f->force_minor) << 1);
344 input_report_abs(input, ABS_MT_WIDTH_MAJOR,
345 raw2int(f->size_major) << 1);
346 input_report_abs(input, ABS_MT_WIDTH_MINOR,
347 raw2int(f->size_minor) << 1);
344 input_report_abs(input, ABS_MT_ORIENTATION, 348 input_report_abs(input, ABS_MT_ORIENTATION,
345 MAX_FINGER_ORIENTATION - raw2int(f->orientation)); 349 MAX_FINGER_ORIENTATION - raw2int(f->orientation));
346 input_report_abs(input, ABS_MT_POSITION_X, raw2int(f->abs_x)); 350 input_report_abs(input, ABS_MT_POSITION_X, raw2int(f->abs_x));
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 46e4ba0b9246..f58513160480 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -1485,8 +1485,8 @@ static int __init i8042_init(void)
1485 1485
1486static void __exit i8042_exit(void) 1486static void __exit i8042_exit(void)
1487{ 1487{
1488 platform_driver_unregister(&i8042_driver);
1489 platform_device_unregister(i8042_platform_device); 1488 platform_device_unregister(i8042_platform_device);
1489 platform_driver_unregister(&i8042_driver);
1490 i8042_platform_exit(); 1490 i8042_platform_exit();
1491 1491
1492 panic_blink = NULL; 1492 panic_blink = NULL;
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 40d77ba8fdc1..6e29badb969e 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -243,10 +243,10 @@ static int wacom_graphire_irq(struct wacom_wac *wacom)
243 if (features->type == WACOM_G4 || 243 if (features->type == WACOM_G4 ||
244 features->type == WACOM_MO) { 244 features->type == WACOM_MO) {
245 input_report_abs(input, ABS_DISTANCE, data[6] & 0x3f); 245 input_report_abs(input, ABS_DISTANCE, data[6] & 0x3f);
246 rw = (signed)(data[7] & 0x04) - (data[7] & 0x03); 246 rw = (data[7] & 0x04) - (data[7] & 0x03);
247 } else { 247 } else {
248 input_report_abs(input, ABS_DISTANCE, data[7] & 0x3f); 248 input_report_abs(input, ABS_DISTANCE, data[7] & 0x3f);
249 rw = -(signed)data[6]; 249 rw = -(signed char)data[6];
250 } 250 }
251 input_report_rel(input, REL_WHEEL, rw); 251 input_report_rel(input, REL_WHEEL, rw);
252 } 252 }
diff --git a/drivers/leds/leds-ns2.c b/drivers/leds/leds-ns2.c
index 74dce4ba0262..350eb34f049c 100644
--- a/drivers/leds/leds-ns2.c
+++ b/drivers/leds/leds-ns2.c
@@ -81,7 +81,7 @@ static int ns2_led_get_mode(struct ns2_led_data *led_dat,
81 int cmd_level; 81 int cmd_level;
82 int slow_level; 82 int slow_level;
83 83
84 read_lock(&led_dat->rw_lock); 84 read_lock_irq(&led_dat->rw_lock);
85 85
86 cmd_level = gpio_get_value(led_dat->cmd); 86 cmd_level = gpio_get_value(led_dat->cmd);
87 slow_level = gpio_get_value(led_dat->slow); 87 slow_level = gpio_get_value(led_dat->slow);
@@ -95,7 +95,7 @@ static int ns2_led_get_mode(struct ns2_led_data *led_dat,
95 } 95 }
96 } 96 }
97 97
98 read_unlock(&led_dat->rw_lock); 98 read_unlock_irq(&led_dat->rw_lock);
99 99
100 return ret; 100 return ret;
101} 101}
@@ -104,8 +104,9 @@ static void ns2_led_set_mode(struct ns2_led_data *led_dat,
104 enum ns2_led_modes mode) 104 enum ns2_led_modes mode)
105{ 105{
106 int i; 106 int i;
107 unsigned long flags;
107 108
108 write_lock(&led_dat->rw_lock); 109 write_lock_irqsave(&led_dat->rw_lock, flags);
109 110
110 for (i = 0; i < ARRAY_SIZE(ns2_led_modval); i++) { 111 for (i = 0; i < ARRAY_SIZE(ns2_led_modval); i++) {
111 if (mode == ns2_led_modval[i].mode) { 112 if (mode == ns2_led_modval[i].mode) {
@@ -116,7 +117,7 @@ static void ns2_led_set_mode(struct ns2_led_data *led_dat,
116 } 117 }
117 } 118 }
118 119
119 write_unlock(&led_dat->rw_lock); 120 write_unlock_irqrestore(&led_dat->rw_lock, flags);
120} 121}
121 122
122static void ns2_led_set(struct led_classdev *led_cdev, 123static void ns2_led_set(struct led_classdev *led_cdev,
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 43cf9cc9c1df..f20d13e717d5 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1643,7 +1643,9 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1643 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1; 1643 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1644 if (rdev->sb_size & bmask) 1644 if (rdev->sb_size & bmask)
1645 rdev->sb_size = (rdev->sb_size | bmask) + 1; 1645 rdev->sb_size = (rdev->sb_size | bmask) + 1;
1646 } 1646 } else
1647 max_dev = le32_to_cpu(sb->max_dev);
1648
1647 for (i=0; i<max_dev;i++) 1649 for (i=0; i<max_dev;i++)
1648 sb->dev_roles[i] = cpu_to_le16(0xfffe); 1650 sb->dev_roles[i] = cpu_to_le16(0xfffe);
1649 1651
@@ -7069,7 +7071,7 @@ void md_check_recovery(mddev_t *mddev)
7069 if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) 7071 if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
7070 return; 7072 return;
7071 if ( ! ( 7073 if ( ! (
7072 (mddev->flags && !mddev->external) || 7074 (mddev->flags & ~ (1<<MD_CHANGE_PENDING)) ||
7073 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || 7075 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
7074 test_bit(MD_RECOVERY_DONE, &mddev->recovery) || 7076 test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
7075 (mddev->external == 0 && mddev->safemode == 1) || 7077 (mddev->external == 0 && mddev->safemode == 1) ||
diff --git a/drivers/mfd/max8925-core.c b/drivers/mfd/max8925-core.c
index 04028a9ee082..428377a5a6f5 100644
--- a/drivers/mfd/max8925-core.c
+++ b/drivers/mfd/max8925-core.c
@@ -429,24 +429,25 @@ static void max8925_irq_sync_unlock(unsigned int irq)
429 irq_tsc = cache_tsc; 429 irq_tsc = cache_tsc;
430 for (i = 0; i < ARRAY_SIZE(max8925_irqs); i++) { 430 for (i = 0; i < ARRAY_SIZE(max8925_irqs); i++) {
431 irq_data = &max8925_irqs[i]; 431 irq_data = &max8925_irqs[i];
432 /* 1 -- disable, 0 -- enable */
432 switch (irq_data->mask_reg) { 433 switch (irq_data->mask_reg) {
433 case MAX8925_CHG_IRQ1_MASK: 434 case MAX8925_CHG_IRQ1_MASK:
434 irq_chg[0] &= irq_data->enable; 435 irq_chg[0] &= ~irq_data->enable;
435 break; 436 break;
436 case MAX8925_CHG_IRQ2_MASK: 437 case MAX8925_CHG_IRQ2_MASK:
437 irq_chg[1] &= irq_data->enable; 438 irq_chg[1] &= ~irq_data->enable;
438 break; 439 break;
439 case MAX8925_ON_OFF_IRQ1_MASK: 440 case MAX8925_ON_OFF_IRQ1_MASK:
440 irq_on[0] &= irq_data->enable; 441 irq_on[0] &= ~irq_data->enable;
441 break; 442 break;
442 case MAX8925_ON_OFF_IRQ2_MASK: 443 case MAX8925_ON_OFF_IRQ2_MASK:
443 irq_on[1] &= irq_data->enable; 444 irq_on[1] &= ~irq_data->enable;
444 break; 445 break;
445 case MAX8925_RTC_IRQ_MASK: 446 case MAX8925_RTC_IRQ_MASK:
446 irq_rtc &= irq_data->enable; 447 irq_rtc &= ~irq_data->enable;
447 break; 448 break;
448 case MAX8925_TSC_IRQ_MASK: 449 case MAX8925_TSC_IRQ_MASK:
449 irq_tsc &= irq_data->enable; 450 irq_tsc &= ~irq_data->enable;
450 break; 451 break;
451 default: 452 default:
452 dev_err(chip->dev, "wrong IRQ\n"); 453 dev_err(chip->dev, "wrong IRQ\n");
diff --git a/drivers/mfd/wm831x-irq.c b/drivers/mfd/wm831x-irq.c
index 7dabe4dbd373..294183b6260b 100644
--- a/drivers/mfd/wm831x-irq.c
+++ b/drivers/mfd/wm831x-irq.c
@@ -394,8 +394,13 @@ static int wm831x_irq_set_type(unsigned int irq, unsigned int type)
394 394
395 irq = irq - wm831x->irq_base; 395 irq = irq - wm831x->irq_base;
396 396
397 if (irq < WM831X_IRQ_GPIO_1 || irq > WM831X_IRQ_GPIO_11) 397 if (irq < WM831X_IRQ_GPIO_1 || irq > WM831X_IRQ_GPIO_11) {
398 return -EINVAL; 398 /* Ignore internal-only IRQs */
399 if (irq >= 0 && irq < WM831X_NUM_IRQS)
400 return 0;
401 else
402 return -EINVAL;
403 }
399 404
400 switch (type) { 405 switch (type) {
401 case IRQ_TYPE_EDGE_BOTH: 406 case IRQ_TYPE_EDGE_BOTH:
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 0b591b658243..b74331260744 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -368,7 +368,7 @@ config VMWARE_BALLOON
368 If unsure, say N. 368 If unsure, say N.
369 369
370 To compile this driver as a module, choose M here: the 370 To compile this driver as a module, choose M here: the
371 module will be called vmware_balloon. 371 module will be called vmw_balloon.
372 372
373config ARM_CHARLCD 373config ARM_CHARLCD
374 bool "ARM Ltd. Character LCD Driver" 374 bool "ARM Ltd. Character LCD Driver"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 255a80dc9d73..42eab95cde2a 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -33,5 +33,5 @@ obj-$(CONFIG_IWMC3200TOP) += iwmc3200top/
33obj-$(CONFIG_HMC6352) += hmc6352.o 33obj-$(CONFIG_HMC6352) += hmc6352.o
34obj-y += eeprom/ 34obj-y += eeprom/
35obj-y += cb710/ 35obj-y += cb710/
36obj-$(CONFIG_VMWARE_BALLOON) += vmware_balloon.o 36obj-$(CONFIG_VMWARE_BALLOON) += vmw_balloon.o
37obj-$(CONFIG_ARM_CHARLCD) += arm-charlcd.o 37obj-$(CONFIG_ARM_CHARLCD) += arm-charlcd.o
diff --git a/drivers/misc/vmware_balloon.c b/drivers/misc/vmw_balloon.c
index 2a1e804a71aa..2a1e804a71aa 100644
--- a/drivers/misc/vmware_balloon.c
+++ b/drivers/misc/vmw_balloon.c
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index bd2755e8d9a3..f332c52968b7 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -362,9 +362,8 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
362 goto err; 362 goto err;
363 } 363 }
364 364
365 err = mmc_sd_get_cid(host, host->ocr & ocr, card->raw_cid); 365 if (ocr & R4_MEMORY_PRESENT
366 366 && mmc_sd_get_cid(host, host->ocr & ocr, card->raw_cid) == 0) {
367 if (!err) {
368 card->type = MMC_TYPE_SD_COMBO; 367 card->type = MMC_TYPE_SD_COMBO;
369 368
370 if (oldcard && (oldcard->type != MMC_TYPE_SD_COMBO || 369 if (oldcard && (oldcard->type != MMC_TYPE_SD_COMBO ||
diff --git a/drivers/mmc/host/at91_mci.c b/drivers/mmc/host/at91_mci.c
index 5f3a599ead07..87226cd202a5 100644
--- a/drivers/mmc/host/at91_mci.c
+++ b/drivers/mmc/host/at91_mci.c
@@ -66,6 +66,7 @@
66#include <linux/clk.h> 66#include <linux/clk.h>
67#include <linux/atmel_pdc.h> 67#include <linux/atmel_pdc.h>
68#include <linux/gfp.h> 68#include <linux/gfp.h>
69#include <linux/highmem.h>
69 70
70#include <linux/mmc/host.h> 71#include <linux/mmc/host.h>
71 72
diff --git a/drivers/mmc/host/imxmmc.c b/drivers/mmc/host/imxmmc.c
index 9a68ff4353a2..5a950b16d9e6 100644
--- a/drivers/mmc/host/imxmmc.c
+++ b/drivers/mmc/host/imxmmc.c
@@ -148,11 +148,12 @@ static int imxmci_start_clock(struct imxmci_host *host)
148 148
149 while (delay--) { 149 while (delay--) {
150 reg = readw(host->base + MMC_REG_STATUS); 150 reg = readw(host->base + MMC_REG_STATUS);
151 if (reg & STATUS_CARD_BUS_CLK_RUN) 151 if (reg & STATUS_CARD_BUS_CLK_RUN) {
152 /* Check twice before cut */ 152 /* Check twice before cut */
153 reg = readw(host->base + MMC_REG_STATUS); 153 reg = readw(host->base + MMC_REG_STATUS);
154 if (reg & STATUS_CARD_BUS_CLK_RUN) 154 if (reg & STATUS_CARD_BUS_CLK_RUN)
155 return 0; 155 return 0;
156 }
156 157
157 if (test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events)) 158 if (test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events))
158 return 0; 159 return 0;
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 4a8776f8afdd..4526d2791f29 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -2305,7 +2305,6 @@ static int omap_hsmmc_suspend(struct device *dev)
2305 int ret = 0; 2305 int ret = 0;
2306 struct platform_device *pdev = to_platform_device(dev); 2306 struct platform_device *pdev = to_platform_device(dev);
2307 struct omap_hsmmc_host *host = platform_get_drvdata(pdev); 2307 struct omap_hsmmc_host *host = platform_get_drvdata(pdev);
2308 pm_message_t state = PMSG_SUSPEND; /* unused by MMC core */
2309 2308
2310 if (host && host->suspended) 2309 if (host && host->suspended)
2311 return 0; 2310 return 0;
@@ -2324,8 +2323,8 @@ static int omap_hsmmc_suspend(struct device *dev)
2324 } 2323 }
2325 } 2324 }
2326 cancel_work_sync(&host->mmc_carddetect_work); 2325 cancel_work_sync(&host->mmc_carddetect_work);
2327 mmc_host_enable(host->mmc);
2328 ret = mmc_suspend_host(host->mmc); 2326 ret = mmc_suspend_host(host->mmc);
2327 mmc_host_enable(host->mmc);
2329 if (ret == 0) { 2328 if (ret == 0) {
2330 omap_hsmmc_disable_irq(host); 2329 omap_hsmmc_disable_irq(host);
2331 OMAP_HSMMC_WRITE(host->base, HCTL, 2330 OMAP_HSMMC_WRITE(host->base, HCTL,
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 2e16e0a90a5e..976330de379e 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -1600,7 +1600,7 @@ static int __devinit s3cmci_probe(struct platform_device *pdev)
1600 host->pio_active = XFER_NONE; 1600 host->pio_active = XFER_NONE;
1601 1601
1602#ifdef CONFIG_MMC_S3C_PIODMA 1602#ifdef CONFIG_MMC_S3C_PIODMA
1603 host->dodma = host->pdata->dma; 1603 host->dodma = host->pdata->use_dma;
1604#endif 1604#endif
1605 1605
1606 host->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1606 host->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 71ad4163b95e..aacb862ecc8a 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -241,8 +241,10 @@ static struct sdhci_ops sdhci_s3c_ops = {
241static void sdhci_s3c_notify_change(struct platform_device *dev, int state) 241static void sdhci_s3c_notify_change(struct platform_device *dev, int state)
242{ 242{
243 struct sdhci_host *host = platform_get_drvdata(dev); 243 struct sdhci_host *host = platform_get_drvdata(dev);
244 unsigned long flags;
245
244 if (host) { 246 if (host) {
245 spin_lock(&host->lock); 247 spin_lock_irqsave(&host->lock, flags);
246 if (state) { 248 if (state) {
247 dev_dbg(&dev->dev, "card inserted.\n"); 249 dev_dbg(&dev->dev, "card inserted.\n");
248 host->flags &= ~SDHCI_DEVICE_DEAD; 250 host->flags &= ~SDHCI_DEVICE_DEAD;
@@ -253,7 +255,7 @@ static void sdhci_s3c_notify_change(struct platform_device *dev, int state)
253 host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; 255 host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
254 } 256 }
255 tasklet_schedule(&host->card_tasklet); 257 tasklet_schedule(&host->card_tasklet);
256 spin_unlock(&host->lock); 258 spin_unlock_irqrestore(&host->lock, flags);
257 } 259 }
258} 260}
259 261
@@ -481,8 +483,10 @@ static int __devexit sdhci_s3c_remove(struct platform_device *pdev)
481 sdhci_remove_host(host, 1); 483 sdhci_remove_host(host, 1);
482 484
483 for (ptr = 0; ptr < 3; ptr++) { 485 for (ptr = 0; ptr < 3; ptr++) {
484 clk_disable(sc->clk_bus[ptr]); 486 if (sc->clk_bus[ptr]) {
485 clk_put(sc->clk_bus[ptr]); 487 clk_disable(sc->clk_bus[ptr]);
488 clk_put(sc->clk_bus[ptr]);
489 }
486 } 490 }
487 clk_disable(sc->clk_io); 491 clk_disable(sc->clk_io);
488 clk_put(sc->clk_io); 492 clk_put(sc->clk_io);
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index ee7d0a5a51c4..69d98e3bf6ab 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -164,6 +164,7 @@ tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd)
164static void tmio_mmc_pio_irq(struct tmio_mmc_host *host) 164static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
165{ 165{
166 struct mmc_data *data = host->data; 166 struct mmc_data *data = host->data;
167 void *sg_virt;
167 unsigned short *buf; 168 unsigned short *buf;
168 unsigned int count; 169 unsigned int count;
169 unsigned long flags; 170 unsigned long flags;
@@ -173,8 +174,8 @@ static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
173 return; 174 return;
174 } 175 }
175 176
176 buf = (unsigned short *)(tmio_mmc_kmap_atomic(host, &flags) + 177 sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags);
177 host->sg_off); 178 buf = (unsigned short *)(sg_virt + host->sg_off);
178 179
179 count = host->sg_ptr->length - host->sg_off; 180 count = host->sg_ptr->length - host->sg_off;
180 if (count > data->blksz) 181 if (count > data->blksz)
@@ -191,7 +192,7 @@ static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
191 192
192 host->sg_off += count; 193 host->sg_off += count;
193 194
194 tmio_mmc_kunmap_atomic(host, &flags); 195 tmio_mmc_kunmap_atomic(sg_virt, &flags);
195 196
196 if (host->sg_off == host->sg_ptr->length) 197 if (host->sg_off == host->sg_ptr->length)
197 tmio_mmc_next_sg(host); 198 tmio_mmc_next_sg(host);
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index 64f7d5dfc106..0fedc78e3ea5 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -82,10 +82,7 @@
82 82
83#define ack_mmc_irqs(host, i) \ 83#define ack_mmc_irqs(host, i) \
84 do { \ 84 do { \
85 u32 mask;\ 85 sd_ctrl_write32((host), CTL_STATUS, ~(i)); \
86 mask = sd_ctrl_read32((host), CTL_STATUS); \
87 mask &= ~((i) & TMIO_MASK_IRQ); \
88 sd_ctrl_write32((host), CTL_STATUS, mask); \
89 } while (0) 86 } while (0)
90 87
91 88
@@ -177,19 +174,17 @@ static inline int tmio_mmc_next_sg(struct tmio_mmc_host *host)
177 return --host->sg_len; 174 return --host->sg_len;
178} 175}
179 176
180static inline char *tmio_mmc_kmap_atomic(struct tmio_mmc_host *host, 177static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg,
181 unsigned long *flags) 178 unsigned long *flags)
182{ 179{
183 struct scatterlist *sg = host->sg_ptr;
184
185 local_irq_save(*flags); 180 local_irq_save(*flags);
186 return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset; 181 return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
187} 182}
188 183
189static inline void tmio_mmc_kunmap_atomic(struct tmio_mmc_host *host, 184static inline void tmio_mmc_kunmap_atomic(void *virt,
190 unsigned long *flags) 185 unsigned long *flags)
191{ 186{
192 kunmap_atomic(sg_page(host->sg_ptr), KM_BIO_SRC_IRQ); 187 kunmap_atomic(virt, KM_BIO_SRC_IRQ);
193 local_irq_restore(*flags); 188 local_irq_restore(*flags);
194} 189}
195 190
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index a382e3dd0a5d..6fbeefa3a766 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -682,7 +682,6 @@ static int __devinit bf5xx_nand_add_partition(struct bf5xx_nand_info *info)
682static int __devexit bf5xx_nand_remove(struct platform_device *pdev) 682static int __devexit bf5xx_nand_remove(struct platform_device *pdev)
683{ 683{
684 struct bf5xx_nand_info *info = to_nand_info(pdev); 684 struct bf5xx_nand_info *info = to_nand_info(pdev);
685 struct mtd_info *mtd = NULL;
686 685
687 platform_set_drvdata(pdev, NULL); 686 platform_set_drvdata(pdev, NULL);
688 687
@@ -690,11 +689,7 @@ static int __devexit bf5xx_nand_remove(struct platform_device *pdev)
690 * and their partitions, then go through freeing the 689 * and their partitions, then go through freeing the
691 * resources used 690 * resources used
692 */ 691 */
693 mtd = &info->mtd; 692 nand_release(&info->mtd);
694 if (mtd) {
695 nand_release(mtd);
696 kfree(mtd);
697 }
698 693
699 peripheral_free_list(bfin_nfc_pin_req); 694 peripheral_free_list(bfin_nfc_pin_req);
700 bf5xx_nand_dma_remove(info); 695 bf5xx_nand_dma_remove(info);
@@ -710,7 +705,7 @@ static int bf5xx_nand_scan(struct mtd_info *mtd)
710 struct nand_chip *chip = mtd->priv; 705 struct nand_chip *chip = mtd->priv;
711 int ret; 706 int ret;
712 707
713 ret = nand_scan_ident(mtd, 1); 708 ret = nand_scan_ident(mtd, 1, NULL);
714 if (ret) 709 if (ret)
715 return ret; 710 return ret;
716 711
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index fcf8ceb277d4..b2828e84d243 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -67,7 +67,9 @@
67#define NFC_V1_V2_CONFIG1_BIG (1 << 5) 67#define NFC_V1_V2_CONFIG1_BIG (1 << 5)
68#define NFC_V1_V2_CONFIG1_RST (1 << 6) 68#define NFC_V1_V2_CONFIG1_RST (1 << 6)
69#define NFC_V1_V2_CONFIG1_CE (1 << 7) 69#define NFC_V1_V2_CONFIG1_CE (1 << 7)
70#define NFC_V1_V2_CONFIG1_ONE_CYCLE (1 << 8) 70#define NFC_V2_CONFIG1_ONE_CYCLE (1 << 8)
71#define NFC_V2_CONFIG1_PPB(x) (((x) & 0x3) << 9)
72#define NFC_V2_CONFIG1_FP_INT (1 << 11)
71 73
72#define NFC_V1_V2_CONFIG2_INT (1 << 15) 74#define NFC_V1_V2_CONFIG2_INT (1 << 15)
73 75
@@ -402,16 +404,16 @@ static void send_read_id_v1_v2(struct mxc_nand_host *host)
402 /* Wait for operation to complete */ 404 /* Wait for operation to complete */
403 wait_op_done(host, true); 405 wait_op_done(host, true);
404 406
407 memcpy(host->data_buf, host->main_area0, 16);
408
405 if (this->options & NAND_BUSWIDTH_16) { 409 if (this->options & NAND_BUSWIDTH_16) {
406 void __iomem *main_buf = host->main_area0;
407 /* compress the ID info */ 410 /* compress the ID info */
408 writeb(readb(main_buf + 2), main_buf + 1); 411 host->data_buf[1] = host->data_buf[2];
409 writeb(readb(main_buf + 4), main_buf + 2); 412 host->data_buf[2] = host->data_buf[4];
410 writeb(readb(main_buf + 6), main_buf + 3); 413 host->data_buf[3] = host->data_buf[6];
411 writeb(readb(main_buf + 8), main_buf + 4); 414 host->data_buf[4] = host->data_buf[8];
412 writeb(readb(main_buf + 10), main_buf + 5); 415 host->data_buf[5] = host->data_buf[10];
413 } 416 }
414 memcpy(host->data_buf, host->main_area0, 16);
415} 417}
416 418
417static uint16_t get_dev_status_v3(struct mxc_nand_host *host) 419static uint16_t get_dev_status_v3(struct mxc_nand_host *host)
@@ -729,27 +731,30 @@ static void preset_v1_v2(struct mtd_info *mtd)
729{ 731{
730 struct nand_chip *nand_chip = mtd->priv; 732 struct nand_chip *nand_chip = mtd->priv;
731 struct mxc_nand_host *host = nand_chip->priv; 733 struct mxc_nand_host *host = nand_chip->priv;
732 uint16_t tmp; 734 uint16_t config1 = 0;
733 735
734 /* enable interrupt, disable spare enable */ 736 if (nand_chip->ecc.mode == NAND_ECC_HW)
735 tmp = readw(NFC_V1_V2_CONFIG1); 737 config1 |= NFC_V1_V2_CONFIG1_ECC_EN;
736 tmp &= ~NFC_V1_V2_CONFIG1_INT_MSK; 738
737 tmp &= ~NFC_V1_V2_CONFIG1_SP_EN; 739 if (nfc_is_v21())
738 if (nand_chip->ecc.mode == NAND_ECC_HW) { 740 config1 |= NFC_V2_CONFIG1_FP_INT;
739 tmp |= NFC_V1_V2_CONFIG1_ECC_EN; 741
740 } else { 742 if (!cpu_is_mx21())
741 tmp &= ~NFC_V1_V2_CONFIG1_ECC_EN; 743 config1 |= NFC_V1_V2_CONFIG1_INT_MSK;
742 }
743 744
744 if (nfc_is_v21() && mtd->writesize) { 745 if (nfc_is_v21() && mtd->writesize) {
746 uint16_t pages_per_block = mtd->erasesize / mtd->writesize;
747
745 host->eccsize = get_eccsize(mtd); 748 host->eccsize = get_eccsize(mtd);
746 if (host->eccsize == 4) 749 if (host->eccsize == 4)
747 tmp |= NFC_V2_CONFIG1_ECC_MODE_4; 750 config1 |= NFC_V2_CONFIG1_ECC_MODE_4;
751
752 config1 |= NFC_V2_CONFIG1_PPB(ffs(pages_per_block) - 6);
748 } else { 753 } else {
749 host->eccsize = 1; 754 host->eccsize = 1;
750 } 755 }
751 756
752 writew(tmp, NFC_V1_V2_CONFIG1); 757 writew(config1, NFC_V1_V2_CONFIG1);
753 /* preset operation */ 758 /* preset operation */
754 759
755 /* Unlock the internal RAM Buffer */ 760 /* Unlock the internal RAM Buffer */
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 133d51528f8d..513e0a76a4a7 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -413,7 +413,7 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
413 prefetch_status = gpmc_read_status(GPMC_PREFETCH_COUNT); 413 prefetch_status = gpmc_read_status(GPMC_PREFETCH_COUNT);
414 } while (prefetch_status); 414 } while (prefetch_status);
415 /* disable and stop the PFPW engine */ 415 /* disable and stop the PFPW engine */
416 gpmc_prefetch_reset(); 416 gpmc_prefetch_reset(info->gpmc_cs);
417 417
418 dma_unmap_single(&info->pdev->dev, dma_addr, len, dir); 418 dma_unmap_single(&info->pdev->dev, dma_addr, len, dir);
419 return 0; 419 return 0;
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 4d89f3780207..4d01cda68844 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -1320,6 +1320,7 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
1320 goto fail_free_irq; 1320 goto fail_free_irq;
1321 } 1321 }
1322 1322
1323#ifdef CONFIG_MTD_PARTITIONS
1323 if (mtd_has_cmdlinepart()) { 1324 if (mtd_has_cmdlinepart()) {
1324 static const char *probes[] = { "cmdlinepart", NULL }; 1325 static const char *probes[] = { "cmdlinepart", NULL };
1325 struct mtd_partition *parts; 1326 struct mtd_partition *parts;
@@ -1332,6 +1333,9 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
1332 } 1333 }
1333 1334
1334 return add_mtd_partitions(mtd, pdata->parts, pdata->nr_parts); 1335 return add_mtd_partitions(mtd, pdata->parts, pdata->nr_parts);
1336#else
1337 return 0;
1338#endif
1335 1339
1336fail_free_irq: 1340fail_free_irq:
1337 free_irq(irq, info); 1341 free_irq(irq, info);
@@ -1364,7 +1368,9 @@ static int pxa3xx_nand_remove(struct platform_device *pdev)
1364 platform_set_drvdata(pdev, NULL); 1368 platform_set_drvdata(pdev, NULL);
1365 1369
1366 del_mtd_device(mtd); 1370 del_mtd_device(mtd);
1371#ifdef CONFIG_MTD_PARTITIONS
1367 del_mtd_partitions(mtd); 1372 del_mtd_partitions(mtd);
1373#endif
1368 irq = platform_get_irq(pdev, 0); 1374 irq = platform_get_irq(pdev, 0);
1369 if (irq >= 0) 1375 if (irq >= 0)
1370 free_irq(irq, info); 1376 free_irq(irq, info);
diff --git a/drivers/mtd/onenand/samsung.c b/drivers/mtd/onenand/samsung.c
index cb443af3d45f..a460f1b748c2 100644
--- a/drivers/mtd/onenand/samsung.c
+++ b/drivers/mtd/onenand/samsung.c
@@ -554,14 +554,13 @@ static int s5pc110_dma_ops(void *dst, void *src, size_t count, int direction)
554 554
555 do { 555 do {
556 status = readl(base + S5PC110_DMA_TRANS_STATUS); 556 status = readl(base + S5PC110_DMA_TRANS_STATUS);
557 if (status & S5PC110_DMA_TRANS_STATUS_TE) {
558 writel(S5PC110_DMA_TRANS_CMD_TEC,
559 base + S5PC110_DMA_TRANS_CMD);
560 return -EIO;
561 }
557 } while (!(status & S5PC110_DMA_TRANS_STATUS_TD)); 562 } while (!(status & S5PC110_DMA_TRANS_STATUS_TD));
558 563
559 if (status & S5PC110_DMA_TRANS_STATUS_TE) {
560 writel(S5PC110_DMA_TRANS_CMD_TEC, base + S5PC110_DMA_TRANS_CMD);
561 writel(S5PC110_DMA_TRANS_CMD_TDC, base + S5PC110_DMA_TRANS_CMD);
562 return -EIO;
563 }
564
565 writel(S5PC110_DMA_TRANS_CMD_TDC, base + S5PC110_DMA_TRANS_CMD); 564 writel(S5PC110_DMA_TRANS_CMD_TDC, base + S5PC110_DMA_TRANS_CMD);
566 565
567 return 0; 566 return 0;
@@ -571,13 +570,12 @@ static int s5pc110_read_bufferram(struct mtd_info *mtd, int area,
571 unsigned char *buffer, int offset, size_t count) 570 unsigned char *buffer, int offset, size_t count)
572{ 571{
573 struct onenand_chip *this = mtd->priv; 572 struct onenand_chip *this = mtd->priv;
574 void __iomem *bufferram;
575 void __iomem *p; 573 void __iomem *p;
576 void *buf = (void *) buffer; 574 void *buf = (void *) buffer;
577 dma_addr_t dma_src, dma_dst; 575 dma_addr_t dma_src, dma_dst;
578 int err; 576 int err;
579 577
580 p = bufferram = this->base + area; 578 p = this->base + area;
581 if (ONENAND_CURRENT_BUFFERRAM(this)) { 579 if (ONENAND_CURRENT_BUFFERRAM(this)) {
582 if (area == ONENAND_DATARAM) 580 if (area == ONENAND_DATARAM)
583 p += this->writesize; 581 p += this->writesize;
@@ -621,7 +619,7 @@ static int s5pc110_read_bufferram(struct mtd_info *mtd, int area,
621normal: 619normal:
622 if (count != mtd->writesize) { 620 if (count != mtd->writesize) {
623 /* Copy the bufferram to memory to prevent unaligned access */ 621 /* Copy the bufferram to memory to prevent unaligned access */
624 memcpy(this->page_buf, bufferram, mtd->writesize); 622 memcpy(this->page_buf, p, mtd->writesize);
625 p = this->page_buf + offset; 623 p = this->page_buf + offset;
626 } 624 }
627 625
diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c
index bdf2149e5296..87f0a93b165c 100644
--- a/drivers/net/ll_temac_main.c
+++ b/drivers/net/ll_temac_main.c
@@ -38,6 +38,7 @@
38#include <linux/of_device.h> 38#include <linux/of_device.h>
39#include <linux/of_mdio.h> 39#include <linux/of_mdio.h>
40#include <linux/of_platform.h> 40#include <linux/of_platform.h>
41#include <linux/of_address.h>
41#include <linux/skbuff.h> 42#include <linux/skbuff.h>
42#include <linux/spinlock.h> 43#include <linux/spinlock.h>
43#include <linux/tcp.h> /* needed for sizeof(tcphdr) */ 44#include <linux/tcp.h> /* needed for sizeof(tcphdr) */
diff --git a/drivers/net/ll_temac_mdio.c b/drivers/net/ll_temac_mdio.c
index 5ae28c975b38..8cf9d4f56bb2 100644
--- a/drivers/net/ll_temac_mdio.c
+++ b/drivers/net/ll_temac_mdio.c
@@ -10,6 +10,7 @@
10#include <linux/phy.h> 10#include <linux/phy.h>
11#include <linux/of.h> 11#include <linux/of.h>
12#include <linux/of_device.h> 12#include <linux/of_device.h>
13#include <linux/of_address.h>
13#include <linux/slab.h> 14#include <linux/slab.h>
14#include <linux/of_mdio.h> 15#include <linux/of_mdio.h>
15 16
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 49279b0ee526..f9b509a6b09a 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -508,7 +508,8 @@ static int pcnet_confcheck(struct pcmcia_device *p_dev,
508 unsigned int vcc, 508 unsigned int vcc,
509 void *priv_data) 509 void *priv_data)
510{ 510{
511 int *has_shmem = priv_data; 511 int *priv = priv_data;
512 int try = (*priv & 0x1);
512 int i; 513 int i;
513 cistpl_io_t *io = &cfg->io; 514 cistpl_io_t *io = &cfg->io;
514 515
@@ -525,77 +526,103 @@ static int pcnet_confcheck(struct pcmcia_device *p_dev,
525 i = p_dev->resource[1]->end = 0; 526 i = p_dev->resource[1]->end = 0;
526 } 527 }
527 528
528 *has_shmem = ((cfg->mem.nwin == 1) && 529 *priv &= ((cfg->mem.nwin == 1) &&
529 (cfg->mem.win[0].len >= 0x4000)); 530 (cfg->mem.win[0].len >= 0x4000)) ? 0x10 : ~0x10;
531
530 p_dev->resource[0]->start = io->win[i].base; 532 p_dev->resource[0]->start = io->win[i].base;
531 p_dev->resource[0]->end = io->win[i].len; 533 p_dev->resource[0]->end = io->win[i].len;
532 p_dev->io_lines = io->flags & CISTPL_IO_LINES_MASK; 534 if (!try)
535 p_dev->io_lines = io->flags & CISTPL_IO_LINES_MASK;
536 else
537 p_dev->io_lines = 16;
533 if (p_dev->resource[0]->end + p_dev->resource[1]->end >= 32) 538 if (p_dev->resource[0]->end + p_dev->resource[1]->end >= 32)
534 return try_io_port(p_dev); 539 return try_io_port(p_dev);
535 540
536 return 0; 541 return -EINVAL;
542}
543
544static hw_info_t *pcnet_try_config(struct pcmcia_device *link,
545 int *has_shmem, int try)
546{
547 struct net_device *dev = link->priv;
548 hw_info_t *local_hw_info;
549 pcnet_dev_t *info = PRIV(dev);
550 int priv = try;
551 int ret;
552
553 ret = pcmcia_loop_config(link, pcnet_confcheck, &priv);
554 if (ret) {
555 dev_warn(&link->dev, "no useable port range found\n");
556 return NULL;
557 }
558 *has_shmem = (priv & 0x10);
559
560 if (!link->irq)
561 return NULL;
562
563 if (resource_size(link->resource[1]) == 8) {
564 link->conf.Attributes |= CONF_ENABLE_SPKR;
565 link->conf.Status = CCSR_AUDIO_ENA;
566 }
567 if ((link->manf_id == MANFID_IBM) &&
568 (link->card_id == PRODID_IBM_HOME_AND_AWAY))
569 link->conf.ConfigIndex |= 0x10;
570
571 ret = pcmcia_request_configuration(link, &link->conf);
572 if (ret)
573 return NULL;
574
575 dev->irq = link->irq;
576 dev->base_addr = link->resource[0]->start;
577
578 if (info->flags & HAS_MISC_REG) {
579 if ((if_port == 1) || (if_port == 2))
580 dev->if_port = if_port;
581 else
582 dev_notice(&link->dev, "invalid if_port requested\n");
583 } else
584 dev->if_port = 0;
585
586 if ((link->conf.ConfigBase == 0x03c0) &&
587 (link->manf_id == 0x149) && (link->card_id == 0xc1ab)) {
588 dev_info(&link->dev,
589 "this is an AX88190 card - use axnet_cs instead.\n");
590 return NULL;
591 }
592
593 local_hw_info = get_hwinfo(link);
594 if (!local_hw_info)
595 local_hw_info = get_prom(link);
596 if (!local_hw_info)
597 local_hw_info = get_dl10019(link);
598 if (!local_hw_info)
599 local_hw_info = get_ax88190(link);
600 if (!local_hw_info)
601 local_hw_info = get_hwired(link);
602
603 return local_hw_info;
537} 604}
538 605
539static int pcnet_config(struct pcmcia_device *link) 606static int pcnet_config(struct pcmcia_device *link)
540{ 607{
541 struct net_device *dev = link->priv; 608 struct net_device *dev = link->priv;
542 pcnet_dev_t *info = PRIV(dev); 609 pcnet_dev_t *info = PRIV(dev);
543 int ret, start_pg, stop_pg, cm_offset; 610 int start_pg, stop_pg, cm_offset;
544 int has_shmem = 0; 611 int has_shmem = 0;
545 hw_info_t *local_hw_info; 612 hw_info_t *local_hw_info;
546 613
547 dev_dbg(&link->dev, "pcnet_config\n"); 614 dev_dbg(&link->dev, "pcnet_config\n");
548 615
549 ret = pcmcia_loop_config(link, pcnet_confcheck, &has_shmem); 616 local_hw_info = pcnet_try_config(link, &has_shmem, 0);
550 if (ret) 617 if (!local_hw_info) {
551 goto failed; 618 /* check whether forcing io_lines to 16 helps... */
552 619 pcmcia_disable_device(link);
553 if (!link->irq) 620 local_hw_info = pcnet_try_config(link, &has_shmem, 1);
554 goto failed; 621 if (local_hw_info == NULL) {
555 622 dev_notice(&link->dev, "unable to read hardware net"
556 if (resource_size(link->resource[1]) == 8) { 623 " address for io base %#3lx\n", dev->base_addr);
557 link->conf.Attributes |= CONF_ENABLE_SPKR; 624 goto failed;
558 link->conf.Status = CCSR_AUDIO_ENA; 625 }
559 }
560 if ((link->manf_id == MANFID_IBM) &&
561 (link->card_id == PRODID_IBM_HOME_AND_AWAY))
562 link->conf.ConfigIndex |= 0x10;
563
564 ret = pcmcia_request_configuration(link, &link->conf);
565 if (ret)
566 goto failed;
567 dev->irq = link->irq;
568 dev->base_addr = link->resource[0]->start;
569 if (info->flags & HAS_MISC_REG) {
570 if ((if_port == 1) || (if_port == 2))
571 dev->if_port = if_port;
572 else
573 printk(KERN_NOTICE "pcnet_cs: invalid if_port requested\n");
574 } else {
575 dev->if_port = 0;
576 }
577
578 if ((link->conf.ConfigBase == 0x03c0) &&
579 (link->manf_id == 0x149) && (link->card_id == 0xc1ab)) {
580 printk(KERN_INFO "pcnet_cs: this is an AX88190 card!\n");
581 printk(KERN_INFO "pcnet_cs: use axnet_cs instead.\n");
582 goto failed;
583 }
584
585 local_hw_info = get_hwinfo(link);
586 if (local_hw_info == NULL)
587 local_hw_info = get_prom(link);
588 if (local_hw_info == NULL)
589 local_hw_info = get_dl10019(link);
590 if (local_hw_info == NULL)
591 local_hw_info = get_ax88190(link);
592 if (local_hw_info == NULL)
593 local_hw_info = get_hwired(link);
594
595 if (local_hw_info == NULL) {
596 printk(KERN_NOTICE "pcnet_cs: unable to read hardware net"
597 " address for io base %#3lx\n", dev->base_addr);
598 goto failed;
599 } 626 }
600 627
601 info->flags = local_hw_info->flags; 628 info->flags = local_hw_info->flags;
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
index a9352b2c7ac4..b7e755f4178a 100644
--- a/drivers/oprofile/buffer_sync.c
+++ b/drivers/oprofile/buffer_sync.c
@@ -141,16 +141,6 @@ static struct notifier_block module_load_nb = {
141 .notifier_call = module_load_notify, 141 .notifier_call = module_load_notify,
142}; 142};
143 143
144
145static void end_sync(void)
146{
147 end_cpu_work();
148 /* make sure we don't leak task structs */
149 process_task_mortuary();
150 process_task_mortuary();
151}
152
153
154int sync_start(void) 144int sync_start(void)
155{ 145{
156 int err; 146 int err;
@@ -158,7 +148,7 @@ int sync_start(void)
158 if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL)) 148 if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL))
159 return -ENOMEM; 149 return -ENOMEM;
160 150
161 start_cpu_work(); 151 mutex_lock(&buffer_mutex);
162 152
163 err = task_handoff_register(&task_free_nb); 153 err = task_handoff_register(&task_free_nb);
164 if (err) 154 if (err)
@@ -173,7 +163,10 @@ int sync_start(void)
173 if (err) 163 if (err)
174 goto out4; 164 goto out4;
175 165
166 start_cpu_work();
167
176out: 168out:
169 mutex_unlock(&buffer_mutex);
177 return err; 170 return err;
178out4: 171out4:
179 profile_event_unregister(PROFILE_MUNMAP, &munmap_nb); 172 profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
@@ -182,7 +175,6 @@ out3:
182out2: 175out2:
183 task_handoff_unregister(&task_free_nb); 176 task_handoff_unregister(&task_free_nb);
184out1: 177out1:
185 end_sync();
186 free_cpumask_var(marked_cpus); 178 free_cpumask_var(marked_cpus);
187 goto out; 179 goto out;
188} 180}
@@ -190,11 +182,20 @@ out1:
190 182
191void sync_stop(void) 183void sync_stop(void)
192{ 184{
185 /* flush buffers */
186 mutex_lock(&buffer_mutex);
187 end_cpu_work();
193 unregister_module_notifier(&module_load_nb); 188 unregister_module_notifier(&module_load_nb);
194 profile_event_unregister(PROFILE_MUNMAP, &munmap_nb); 189 profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
195 profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb); 190 profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
196 task_handoff_unregister(&task_free_nb); 191 task_handoff_unregister(&task_free_nb);
197 end_sync(); 192 mutex_unlock(&buffer_mutex);
193 flush_scheduled_work();
194
195 /* make sure we don't leak task structs */
196 process_task_mortuary();
197 process_task_mortuary();
198
198 free_cpumask_var(marked_cpus); 199 free_cpumask_var(marked_cpus);
199} 200}
200 201
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index 219f79e2210a..f179ac2ea801 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -120,8 +120,6 @@ void end_cpu_work(void)
120 120
121 cancel_delayed_work(&b->work); 121 cancel_delayed_work(&b->work);
122 } 122 }
123
124 flush_scheduled_work();
125} 123}
126 124
127/* 125/*
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index c3ceebb5be84..4789f8e8bf7a 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -71,6 +71,49 @@
71#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32)) 71#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
72#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64)) 72#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
73 73
74/* page table handling */
75#define LEVEL_STRIDE (9)
76#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
77
78static inline int agaw_to_level(int agaw)
79{
80 return agaw + 2;
81}
82
83static inline int agaw_to_width(int agaw)
84{
85 return 30 + agaw * LEVEL_STRIDE;
86}
87
88static inline int width_to_agaw(int width)
89{
90 return (width - 30) / LEVEL_STRIDE;
91}
92
93static inline unsigned int level_to_offset_bits(int level)
94{
95 return (level - 1) * LEVEL_STRIDE;
96}
97
98static inline int pfn_level_offset(unsigned long pfn, int level)
99{
100 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
101}
102
103static inline unsigned long level_mask(int level)
104{
105 return -1UL << level_to_offset_bits(level);
106}
107
108static inline unsigned long level_size(int level)
109{
110 return 1UL << level_to_offset_bits(level);
111}
112
113static inline unsigned long align_to_level(unsigned long pfn, int level)
114{
115 return (pfn + level_size(level) - 1) & level_mask(level);
116}
74 117
75/* VT-d pages must always be _smaller_ than MM pages. Otherwise things 118/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
76 are never going to work. */ 119 are never going to work. */
@@ -434,8 +477,6 @@ void free_iova_mem(struct iova *iova)
434} 477}
435 478
436 479
437static inline int width_to_agaw(int width);
438
439static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw) 480static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
440{ 481{
441 unsigned long sagaw; 482 unsigned long sagaw;
@@ -646,51 +687,6 @@ out:
646 spin_unlock_irqrestore(&iommu->lock, flags); 687 spin_unlock_irqrestore(&iommu->lock, flags);
647} 688}
648 689
649/* page table handling */
650#define LEVEL_STRIDE (9)
651#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
652
653static inline int agaw_to_level(int agaw)
654{
655 return agaw + 2;
656}
657
658static inline int agaw_to_width(int agaw)
659{
660 return 30 + agaw * LEVEL_STRIDE;
661
662}
663
664static inline int width_to_agaw(int width)
665{
666 return (width - 30) / LEVEL_STRIDE;
667}
668
669static inline unsigned int level_to_offset_bits(int level)
670{
671 return (level - 1) * LEVEL_STRIDE;
672}
673
674static inline int pfn_level_offset(unsigned long pfn, int level)
675{
676 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
677}
678
679static inline unsigned long level_mask(int level)
680{
681 return -1UL << level_to_offset_bits(level);
682}
683
684static inline unsigned long level_size(int level)
685{
686 return 1UL << level_to_offset_bits(level);
687}
688
689static inline unsigned long align_to_level(unsigned long pfn, int level)
690{
691 return (pfn + level_size(level) - 1) & level_mask(level);
692}
693
694static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, 690static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
695 unsigned long pfn) 691 unsigned long pfn)
696{ 692{
@@ -3761,6 +3757,33 @@ static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
3761 3757
3762DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf); 3758DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
3763 3759
3760#define GGC 0x52
3761#define GGC_MEMORY_SIZE_MASK (0xf << 8)
3762#define GGC_MEMORY_SIZE_NONE (0x0 << 8)
3763#define GGC_MEMORY_SIZE_1M (0x1 << 8)
3764#define GGC_MEMORY_SIZE_2M (0x3 << 8)
3765#define GGC_MEMORY_VT_ENABLED (0x8 << 8)
3766#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
3767#define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
3768#define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
3769
3770static void __devinit quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
3771{
3772 unsigned short ggc;
3773
3774 if (pci_read_config_word(dev, GGC, &ggc))
3775 return;
3776
3777 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
3778 printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
3779 dmar_map_gfx = 0;
3780 }
3781}
3782DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
3783DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
3784DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
3785DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
3786
3764/* On Tylersburg chipsets, some BIOSes have been known to enable the 3787/* On Tylersburg chipsets, some BIOSes have been known to enable the
3765 ISOCH DMAR unit for the Azalia sound device, but not give it any 3788 ISOCH DMAR unit for the Azalia sound device, but not give it any
3766 TLB entries, which causes it to deadlock. Check for that. We do 3789 TLB entries, which causes it to deadlock. Check for that. We do
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index ce6a3666b3d9..553d8ee55c1c 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -608,7 +608,7 @@ int pci_iov_resource_bar(struct pci_dev *dev, int resno,
608 * the VF BAR size multiplied by the number of VFs. The alignment 608 * the VF BAR size multiplied by the number of VFs. The alignment
609 * is just the VF BAR size. 609 * is just the VF BAR size.
610 */ 610 */
611int pci_sriov_resource_alignment(struct pci_dev *dev, int resno) 611resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno)
612{ 612{
613 struct resource tmp; 613 struct resource tmp;
614 enum pci_bar_type type; 614 enum pci_bar_type type;
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 7754a678ab15..6beb11b617a9 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -264,7 +264,8 @@ extern int pci_iov_init(struct pci_dev *dev);
264extern void pci_iov_release(struct pci_dev *dev); 264extern void pci_iov_release(struct pci_dev *dev);
265extern int pci_iov_resource_bar(struct pci_dev *dev, int resno, 265extern int pci_iov_resource_bar(struct pci_dev *dev, int resno,
266 enum pci_bar_type *type); 266 enum pci_bar_type *type);
267extern int pci_sriov_resource_alignment(struct pci_dev *dev, int resno); 267extern resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev,
268 int resno);
268extern void pci_restore_iov_state(struct pci_dev *dev); 269extern void pci_restore_iov_state(struct pci_dev *dev);
269extern int pci_iov_bus_range(struct pci_bus *bus); 270extern int pci_iov_bus_range(struct pci_bus *bus);
270 271
@@ -320,7 +321,7 @@ static inline int pci_ats_enabled(struct pci_dev *dev)
320} 321}
321#endif /* CONFIG_PCI_IOV */ 322#endif /* CONFIG_PCI_IOV */
322 323
323static inline int pci_resource_alignment(struct pci_dev *dev, 324static inline resource_size_t pci_resource_alignment(struct pci_dev *dev,
324 struct resource *res) 325 struct resource *res)
325{ 326{
326#ifdef CONFIG_PCI_IOV 327#ifdef CONFIG_PCI_IOV
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 89ed181cd90c..857ae01734a6 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -163,6 +163,26 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_2, quirk_isa_d
163DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs); 163DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs);
164 164
165/* 165/*
166 * Intel NM10 "TigerPoint" LPC PM1a_STS.BM_STS must be clear
167 * for some HT machines to use C4 w/o hanging.
168 */
169static void __devinit quirk_tigerpoint_bm_sts(struct pci_dev *dev)
170{
171 u32 pmbase;
172 u16 pm1a;
173
174 pci_read_config_dword(dev, 0x40, &pmbase);
175 pmbase = pmbase & 0xff80;
176 pm1a = inw(pmbase);
177
178 if (pm1a & 0x10) {
179 dev_info(&dev->dev, FW_BUG "TigerPoint LPC.BM_STS cleared\n");
180 outw(0x10, pmbase);
181 }
182}
183DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGP_LPC, quirk_tigerpoint_bm_sts);
184
185/*
166 * Chipsets where PCI->PCI transfers vanish or hang 186 * Chipsets where PCI->PCI transfers vanish or hang
167 */ 187 */
168static void __devinit quirk_nopcipci(struct pci_dev *dev) 188static void __devinit quirk_nopcipci(struct pci_dev *dev)
diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c
index 54aa1c238cb3..9ba4dade69a4 100644
--- a/drivers/pcmcia/pcmcia_resource.c
+++ b/drivers/pcmcia/pcmcia_resource.c
@@ -163,7 +163,7 @@ static int pcmcia_access_config(struct pcmcia_device *p_dev,
163 c = p_dev->function_config; 163 c = p_dev->function_config;
164 164
165 if (!(c->state & CONFIG_LOCKED)) { 165 if (!(c->state & CONFIG_LOCKED)) {
166 dev_dbg(&s->dev, "Configuration isnt't locked\n"); 166 dev_dbg(&p_dev->dev, "Configuration isnt't locked\n");
167 mutex_unlock(&s->ops_mutex); 167 mutex_unlock(&s->ops_mutex);
168 return -EACCES; 168 return -EACCES;
169 } 169 }
@@ -220,7 +220,7 @@ int pcmcia_map_mem_page(struct pcmcia_device *p_dev, window_handle_t wh,
220 s->win[w].card_start = offset; 220 s->win[w].card_start = offset;
221 ret = s->ops->set_mem_map(s, &s->win[w]); 221 ret = s->ops->set_mem_map(s, &s->win[w]);
222 if (ret) 222 if (ret)
223 dev_warn(&s->dev, "failed to set_mem_map\n"); 223 dev_warn(&p_dev->dev, "failed to set_mem_map\n");
224 mutex_unlock(&s->ops_mutex); 224 mutex_unlock(&s->ops_mutex);
225 return ret; 225 return ret;
226} /* pcmcia_map_mem_page */ 226} /* pcmcia_map_mem_page */
@@ -244,18 +244,18 @@ int pcmcia_modify_configuration(struct pcmcia_device *p_dev,
244 c = p_dev->function_config; 244 c = p_dev->function_config;
245 245
246 if (!(s->state & SOCKET_PRESENT)) { 246 if (!(s->state & SOCKET_PRESENT)) {
247 dev_dbg(&s->dev, "No card present\n"); 247 dev_dbg(&p_dev->dev, "No card present\n");
248 ret = -ENODEV; 248 ret = -ENODEV;
249 goto unlock; 249 goto unlock;
250 } 250 }
251 if (!(c->state & CONFIG_LOCKED)) { 251 if (!(c->state & CONFIG_LOCKED)) {
252 dev_dbg(&s->dev, "Configuration isnt't locked\n"); 252 dev_dbg(&p_dev->dev, "Configuration isnt't locked\n");
253 ret = -EACCES; 253 ret = -EACCES;
254 goto unlock; 254 goto unlock;
255 } 255 }
256 256
257 if (mod->Attributes & (CONF_IRQ_CHANGE_VALID | CONF_VCC_CHANGE_VALID)) { 257 if (mod->Attributes & (CONF_IRQ_CHANGE_VALID | CONF_VCC_CHANGE_VALID)) {
258 dev_dbg(&s->dev, 258 dev_dbg(&p_dev->dev,
259 "changing Vcc or IRQ is not allowed at this time\n"); 259 "changing Vcc or IRQ is not allowed at this time\n");
260 ret = -EINVAL; 260 ret = -EINVAL;
261 goto unlock; 261 goto unlock;
@@ -265,20 +265,22 @@ int pcmcia_modify_configuration(struct pcmcia_device *p_dev,
265 if ((mod->Attributes & CONF_VPP1_CHANGE_VALID) && 265 if ((mod->Attributes & CONF_VPP1_CHANGE_VALID) &&
266 (mod->Attributes & CONF_VPP2_CHANGE_VALID)) { 266 (mod->Attributes & CONF_VPP2_CHANGE_VALID)) {
267 if (mod->Vpp1 != mod->Vpp2) { 267 if (mod->Vpp1 != mod->Vpp2) {
268 dev_dbg(&s->dev, "Vpp1 and Vpp2 must be the same\n"); 268 dev_dbg(&p_dev->dev,
269 "Vpp1 and Vpp2 must be the same\n");
269 ret = -EINVAL; 270 ret = -EINVAL;
270 goto unlock; 271 goto unlock;
271 } 272 }
272 s->socket.Vpp = mod->Vpp1; 273 s->socket.Vpp = mod->Vpp1;
273 if (s->ops->set_socket(s, &s->socket)) { 274 if (s->ops->set_socket(s, &s->socket)) {
274 dev_printk(KERN_WARNING, &s->dev, 275 dev_printk(KERN_WARNING, &p_dev->dev,
275 "Unable to set VPP\n"); 276 "Unable to set VPP\n");
276 ret = -EIO; 277 ret = -EIO;
277 goto unlock; 278 goto unlock;
278 } 279 }
279 } else if ((mod->Attributes & CONF_VPP1_CHANGE_VALID) || 280 } else if ((mod->Attributes & CONF_VPP1_CHANGE_VALID) ||
280 (mod->Attributes & CONF_VPP2_CHANGE_VALID)) { 281 (mod->Attributes & CONF_VPP2_CHANGE_VALID)) {
281 dev_dbg(&s->dev, "changing Vcc is not allowed at this time\n"); 282 dev_dbg(&p_dev->dev,
283 "changing Vcc is not allowed at this time\n");
282 ret = -EINVAL; 284 ret = -EINVAL;
283 goto unlock; 285 goto unlock;
284 } 286 }
@@ -401,7 +403,7 @@ int pcmcia_release_window(struct pcmcia_device *p_dev, struct resource *res)
401 win = &s->win[w]; 403 win = &s->win[w];
402 404
403 if (!(p_dev->_win & CLIENT_WIN_REQ(w))) { 405 if (!(p_dev->_win & CLIENT_WIN_REQ(w))) {
404 dev_dbg(&s->dev, "not releasing unknown window\n"); 406 dev_dbg(&p_dev->dev, "not releasing unknown window\n");
405 mutex_unlock(&s->ops_mutex); 407 mutex_unlock(&s->ops_mutex);
406 return -EINVAL; 408 return -EINVAL;
407 } 409 }
@@ -439,7 +441,7 @@ int pcmcia_request_configuration(struct pcmcia_device *p_dev,
439 return -ENODEV; 441 return -ENODEV;
440 442
441 if (req->IntType & INT_CARDBUS) { 443 if (req->IntType & INT_CARDBUS) {
442 dev_dbg(&s->dev, "IntType may not be INT_CARDBUS\n"); 444 dev_dbg(&p_dev->dev, "IntType may not be INT_CARDBUS\n");
443 return -EINVAL; 445 return -EINVAL;
444 } 446 }
445 447
@@ -447,7 +449,7 @@ int pcmcia_request_configuration(struct pcmcia_device *p_dev,
447 c = p_dev->function_config; 449 c = p_dev->function_config;
448 if (c->state & CONFIG_LOCKED) { 450 if (c->state & CONFIG_LOCKED) {
449 mutex_unlock(&s->ops_mutex); 451 mutex_unlock(&s->ops_mutex);
450 dev_dbg(&s->dev, "Configuration is locked\n"); 452 dev_dbg(&p_dev->dev, "Configuration is locked\n");
451 return -EACCES; 453 return -EACCES;
452 } 454 }
453 455
@@ -455,7 +457,7 @@ int pcmcia_request_configuration(struct pcmcia_device *p_dev,
455 s->socket.Vpp = req->Vpp; 457 s->socket.Vpp = req->Vpp;
456 if (s->ops->set_socket(s, &s->socket)) { 458 if (s->ops->set_socket(s, &s->socket)) {
457 mutex_unlock(&s->ops_mutex); 459 mutex_unlock(&s->ops_mutex);
458 dev_printk(KERN_WARNING, &s->dev, 460 dev_printk(KERN_WARNING, &p_dev->dev,
459 "Unable to set socket state\n"); 461 "Unable to set socket state\n");
460 return -EINVAL; 462 return -EINVAL;
461 } 463 }
@@ -569,19 +571,20 @@ int pcmcia_request_io(struct pcmcia_device *p_dev)
569 int ret = -EINVAL; 571 int ret = -EINVAL;
570 572
571 mutex_lock(&s->ops_mutex); 573 mutex_lock(&s->ops_mutex);
572 dev_dbg(&s->dev, "pcmcia_request_io: %pR , %pR", &c->io[0], &c->io[1]); 574 dev_dbg(&p_dev->dev, "pcmcia_request_io: %pR , %pR",
575 &c->io[0], &c->io[1]);
573 576
574 if (!(s->state & SOCKET_PRESENT)) { 577 if (!(s->state & SOCKET_PRESENT)) {
575 dev_dbg(&s->dev, "pcmcia_request_io: No card present\n"); 578 dev_dbg(&p_dev->dev, "pcmcia_request_io: No card present\n");
576 goto out; 579 goto out;
577 } 580 }
578 581
579 if (c->state & CONFIG_LOCKED) { 582 if (c->state & CONFIG_LOCKED) {
580 dev_dbg(&s->dev, "Configuration is locked\n"); 583 dev_dbg(&p_dev->dev, "Configuration is locked\n");
581 goto out; 584 goto out;
582 } 585 }
583 if (c->state & CONFIG_IO_REQ) { 586 if (c->state & CONFIG_IO_REQ) {
584 dev_dbg(&s->dev, "IO already configured\n"); 587 dev_dbg(&p_dev->dev, "IO already configured\n");
585 goto out; 588 goto out;
586 } 589 }
587 590
@@ -592,7 +595,13 @@ int pcmcia_request_io(struct pcmcia_device *p_dev)
592 if (c->io[1].end) { 595 if (c->io[1].end) {
593 ret = alloc_io_space(s, &c->io[1], p_dev->io_lines); 596 ret = alloc_io_space(s, &c->io[1], p_dev->io_lines);
594 if (ret) { 597 if (ret) {
598 struct resource tmp = c->io[0];
599 /* release the previously allocated resource */
595 release_io_space(s, &c->io[0]); 600 release_io_space(s, &c->io[0]);
601 /* but preserve the settings, for they worked... */
602 c->io[0].end = resource_size(&tmp);
603 c->io[0].start = tmp.start;
604 c->io[0].flags = tmp.flags;
596 goto out; 605 goto out;
597 } 606 }
598 } else 607 } else
@@ -601,7 +610,7 @@ int pcmcia_request_io(struct pcmcia_device *p_dev)
601 c->state |= CONFIG_IO_REQ; 610 c->state |= CONFIG_IO_REQ;
602 p_dev->_io = 1; 611 p_dev->_io = 1;
603 612
604 dev_dbg(&s->dev, "pcmcia_request_io succeeded: %pR , %pR", 613 dev_dbg(&p_dev->dev, "pcmcia_request_io succeeded: %pR , %pR",
605 &c->io[0], &c->io[1]); 614 &c->io[0], &c->io[1]);
606out: 615out:
607 mutex_unlock(&s->ops_mutex); 616 mutex_unlock(&s->ops_mutex);
@@ -800,7 +809,7 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha
800 int w; 809 int w;
801 810
802 if (!(s->state & SOCKET_PRESENT)) { 811 if (!(s->state & SOCKET_PRESENT)) {
803 dev_dbg(&s->dev, "No card present\n"); 812 dev_dbg(&p_dev->dev, "No card present\n");
804 return -ENODEV; 813 return -ENODEV;
805 } 814 }
806 815
@@ -809,12 +818,12 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha
809 req->Size = s->map_size; 818 req->Size = s->map_size;
810 align = (s->features & SS_CAP_MEM_ALIGN) ? req->Size : s->map_size; 819 align = (s->features & SS_CAP_MEM_ALIGN) ? req->Size : s->map_size;
811 if (req->Size & (s->map_size-1)) { 820 if (req->Size & (s->map_size-1)) {
812 dev_dbg(&s->dev, "invalid map size\n"); 821 dev_dbg(&p_dev->dev, "invalid map size\n");
813 return -EINVAL; 822 return -EINVAL;
814 } 823 }
815 if ((req->Base && (s->features & SS_CAP_STATIC_MAP)) || 824 if ((req->Base && (s->features & SS_CAP_STATIC_MAP)) ||
816 (req->Base & (align-1))) { 825 (req->Base & (align-1))) {
817 dev_dbg(&s->dev, "invalid base address\n"); 826 dev_dbg(&p_dev->dev, "invalid base address\n");
818 return -EINVAL; 827 return -EINVAL;
819 } 828 }
820 if (req->Base) 829 if (req->Base)
@@ -826,7 +835,7 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha
826 if (!(s->state & SOCKET_WIN_REQ(w))) 835 if (!(s->state & SOCKET_WIN_REQ(w)))
827 break; 836 break;
828 if (w == MAX_WIN) { 837 if (w == MAX_WIN) {
829 dev_dbg(&s->dev, "all windows are used already\n"); 838 dev_dbg(&p_dev->dev, "all windows are used already\n");
830 mutex_unlock(&s->ops_mutex); 839 mutex_unlock(&s->ops_mutex);
831 return -EINVAL; 840 return -EINVAL;
832 } 841 }
@@ -837,7 +846,7 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha
837 win->res = pcmcia_find_mem_region(req->Base, req->Size, align, 846 win->res = pcmcia_find_mem_region(req->Base, req->Size, align,
838 0, s); 847 0, s);
839 if (!win->res) { 848 if (!win->res) {
840 dev_dbg(&s->dev, "allocating mem region failed\n"); 849 dev_dbg(&p_dev->dev, "allocating mem region failed\n");
841 mutex_unlock(&s->ops_mutex); 850 mutex_unlock(&s->ops_mutex);
842 return -EINVAL; 851 return -EINVAL;
843 } 852 }
@@ -851,7 +860,7 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha
851 win->card_start = 0; 860 win->card_start = 0;
852 861
853 if (s->ops->set_mem_map(s, win) != 0) { 862 if (s->ops->set_mem_map(s, win) != 0) {
854 dev_dbg(&s->dev, "failed to set memory mapping\n"); 863 dev_dbg(&p_dev->dev, "failed to set memory mapping\n");
855 mutex_unlock(&s->ops_mutex); 864 mutex_unlock(&s->ops_mutex);
856 return -EIO; 865 return -EIO;
857 } 866 }
@@ -874,7 +883,7 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha
874 if (win->res) 883 if (win->res)
875 request_resource(&iomem_resource, res); 884 request_resource(&iomem_resource, res);
876 885
877 dev_dbg(&s->dev, "request_window results in %pR\n", res); 886 dev_dbg(&p_dev->dev, "request_window results in %pR\n", res);
878 887
879 mutex_unlock(&s->ops_mutex); 888 mutex_unlock(&s->ops_mutex);
880 *wh = res; 889 *wh = res;
diff --git a/drivers/pcmcia/pd6729.c b/drivers/pcmcia/pd6729.c
index b8a869af0f44..deef6656ab7b 100644
--- a/drivers/pcmcia/pd6729.c
+++ b/drivers/pcmcia/pd6729.c
@@ -646,7 +646,7 @@ static int __devinit pd6729_pci_probe(struct pci_dev *dev,
646 if (!pci_resource_start(dev, 0)) { 646 if (!pci_resource_start(dev, 0)) {
647 dev_warn(&dev->dev, "refusing to load the driver as the " 647 dev_warn(&dev->dev, "refusing to load the driver as the "
648 "io_base is NULL.\n"); 648 "io_base is NULL.\n");
649 goto err_out_free_mem; 649 goto err_out_disable;
650 } 650 }
651 651
652 dev_info(&dev->dev, "Cirrus PD6729 PCI to PCMCIA Bridge at 0x%llx " 652 dev_info(&dev->dev, "Cirrus PD6729 PCI to PCMCIA Bridge at 0x%llx "
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index e35ed128bdef..2d61186ad5a2 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -3093,7 +3093,8 @@ static const struct tpacpi_quirk tpacpi_hotkey_qtable[] __initconst = {
3093 TPACPI_Q_IBM('1', 'D', TPACPI_HK_Q_INIMASK), /* X22, X23, X24 */ 3093 TPACPI_Q_IBM('1', 'D', TPACPI_HK_Q_INIMASK), /* X22, X23, X24 */
3094}; 3094};
3095 3095
3096typedef u16 tpacpi_keymap_t[TPACPI_HOTKEY_MAP_LEN]; 3096typedef u16 tpacpi_keymap_entry_t;
3097typedef tpacpi_keymap_entry_t tpacpi_keymap_t[TPACPI_HOTKEY_MAP_LEN];
3097 3098
3098static int __init hotkey_init(struct ibm_init_struct *iibm) 3099static int __init hotkey_init(struct ibm_init_struct *iibm)
3099{ 3100{
@@ -3230,7 +3231,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
3230 }; 3231 };
3231 3232
3232#define TPACPI_HOTKEY_MAP_SIZE sizeof(tpacpi_keymap_t) 3233#define TPACPI_HOTKEY_MAP_SIZE sizeof(tpacpi_keymap_t)
3233#define TPACPI_HOTKEY_MAP_TYPESIZE sizeof(tpacpi_keymap_t[0]) 3234#define TPACPI_HOTKEY_MAP_TYPESIZE sizeof(tpacpi_keymap_entry_t)
3234 3235
3235 int res, i; 3236 int res, i;
3236 int status; 3237 int status;
diff --git a/drivers/power/apm_power.c b/drivers/power/apm_power.c
index 936bae560fa1..dc628cb2e762 100644
--- a/drivers/power/apm_power.c
+++ b/drivers/power/apm_power.c
@@ -233,6 +233,7 @@ static int calculate_capacity(enum apm_source source)
233 empty_design_prop = POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN; 233 empty_design_prop = POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN;
234 now_prop = POWER_SUPPLY_PROP_ENERGY_NOW; 234 now_prop = POWER_SUPPLY_PROP_ENERGY_NOW;
235 avg_prop = POWER_SUPPLY_PROP_ENERGY_AVG; 235 avg_prop = POWER_SUPPLY_PROP_ENERGY_AVG;
236 break;
236 case SOURCE_VOLTAGE: 237 case SOURCE_VOLTAGE:
237 full_prop = POWER_SUPPLY_PROP_VOLTAGE_MAX; 238 full_prop = POWER_SUPPLY_PROP_VOLTAGE_MAX;
238 empty_prop = POWER_SUPPLY_PROP_VOLTAGE_MIN; 239 empty_prop = POWER_SUPPLY_PROP_VOLTAGE_MIN;
diff --git a/drivers/power/intel_mid_battery.c b/drivers/power/intel_mid_battery.c
index c61ffec2ff10..2a10cd361181 100644
--- a/drivers/power/intel_mid_battery.c
+++ b/drivers/power/intel_mid_battery.c
@@ -185,8 +185,8 @@ static int pmic_scu_ipc_battery_property_get(struct battery_property *prop)
185{ 185{
186 u32 data[3]; 186 u32 data[3];
187 u8 *p = (u8 *)&data[1]; 187 u8 *p = (u8 *)&data[1];
188 int err = intel_scu_ipc_command(IPC_CMD_BATTERY_PROPERTY, 188 int err = intel_scu_ipc_command(IPCMSG_BATTERY,
189 IPCMSG_BATTERY, NULL, 0, data, 3); 189 IPC_CMD_BATTERY_PROPERTY, NULL, 0, data, 3);
190 190
191 prop->capacity = data[0]; 191 prop->capacity = data[0];
192 prop->crnt = *p++; 192 prop->crnt = *p++;
@@ -207,7 +207,7 @@ static int pmic_scu_ipc_battery_property_get(struct battery_property *prop)
207 207
208static int pmic_scu_ipc_set_charger(int charger) 208static int pmic_scu_ipc_set_charger(int charger)
209{ 209{
210 return intel_scu_ipc_simple_command(charger, IPCMSG_BATTERY); 210 return intel_scu_ipc_simple_command(IPCMSG_BATTERY, charger);
211} 211}
212 212
213/** 213/**
diff --git a/drivers/regulator/88pm8607.c b/drivers/regulator/88pm8607.c
index 7d149a8d8d9b..2ce2eb71d0f5 100644
--- a/drivers/regulator/88pm8607.c
+++ b/drivers/regulator/88pm8607.c
@@ -215,7 +215,7 @@ static int pm8607_list_voltage(struct regulator_dev *rdev, unsigned index)
215 struct pm8607_regulator_info *info = rdev_get_drvdata(rdev); 215 struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
216 int ret = -EINVAL; 216 int ret = -EINVAL;
217 217
218 if (info->vol_table && (index < (2 << info->vol_nbits))) { 218 if (info->vol_table && (index < (1 << info->vol_nbits))) {
219 ret = info->vol_table[index]; 219 ret = info->vol_table[index];
220 if (info->slope_double) 220 if (info->slope_double)
221 ret <<= 1; 221 ret <<= 1;
@@ -233,7 +233,7 @@ static int choose_voltage(struct regulator_dev *rdev, int min_uV, int max_uV)
233 max_uV = max_uV >> 1; 233 max_uV = max_uV >> 1;
234 } 234 }
235 if (info->vol_table) { 235 if (info->vol_table) {
236 for (i = 0; i < (2 << info->vol_nbits); i++) { 236 for (i = 0; i < (1 << info->vol_nbits); i++) {
237 if (!info->vol_table[i]) 237 if (!info->vol_table[i])
238 break; 238 break;
239 if ((min_uV <= info->vol_table[i]) 239 if ((min_uV <= info->vol_table[i])
diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c
index 11790990277a..b349266a43de 100644
--- a/drivers/regulator/ab3100.c
+++ b/drivers/regulator/ab3100.c
@@ -634,12 +634,9 @@ static int __devinit ab3100_regulators_probe(struct platform_device *pdev)
634 "%s: failed to register regulator %s err %d\n", 634 "%s: failed to register regulator %s err %d\n",
635 __func__, ab3100_regulator_desc[i].name, 635 __func__, ab3100_regulator_desc[i].name,
636 err); 636 err);
637 i--;
638 /* remove the already registered regulators */ 637 /* remove the already registered regulators */
639 while (i > 0) { 638 while (--i >= 0)
640 regulator_unregister(ab3100_regulators[i].rdev); 639 regulator_unregister(ab3100_regulators[i].rdev);
641 i--;
642 }
643 return err; 640 return err;
644 } 641 }
645 642
diff --git a/drivers/regulator/ab8500.c b/drivers/regulator/ab8500.c
index dc3f1a491675..28c7ae67cec9 100644
--- a/drivers/regulator/ab8500.c
+++ b/drivers/regulator/ab8500.c
@@ -157,7 +157,7 @@ static int ab8500_list_voltage(struct regulator_dev *rdev, unsigned selector)
157 if (info->fixed_uV) 157 if (info->fixed_uV)
158 return info->fixed_uV; 158 return info->fixed_uV;
159 159
160 if (selector > info->voltages_len) 160 if (selector >= info->voltages_len)
161 return -EINVAL; 161 return -EINVAL;
162 162
163 return info->supported_voltages[selector]; 163 return info->supported_voltages[selector];
@@ -344,13 +344,14 @@ static inline struct ab8500_regulator_info *find_regulator_info(int id)
344static __devinit int ab8500_regulator_probe(struct platform_device *pdev) 344static __devinit int ab8500_regulator_probe(struct platform_device *pdev)
345{ 345{
346 struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent); 346 struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent);
347 struct ab8500_platform_data *pdata = dev_get_platdata(ab8500->dev); 347 struct ab8500_platform_data *pdata;
348 int i, err; 348 int i, err;
349 349
350 if (!ab8500) { 350 if (!ab8500) {
351 dev_err(&pdev->dev, "null mfd parent\n"); 351 dev_err(&pdev->dev, "null mfd parent\n");
352 return -EINVAL; 352 return -EINVAL;
353 } 353 }
354 pdata = dev_get_platdata(ab8500->dev);
354 355
355 /* register all regulators */ 356 /* register all regulators */
356 for (i = 0; i < ARRAY_SIZE(ab8500_regulator_info); i++) { 357 for (i = 0; i < ARRAY_SIZE(ab8500_regulator_info); i++) {
@@ -368,11 +369,9 @@ static __devinit int ab8500_regulator_probe(struct platform_device *pdev)
368 dev_err(&pdev->dev, "failed to register regulator %s\n", 369 dev_err(&pdev->dev, "failed to register regulator %s\n",
369 info->desc.name); 370 info->desc.name);
370 /* when we fail, un-register all earlier regulators */ 371 /* when we fail, un-register all earlier regulators */
371 i--; 372 while (--i >= 0) {
372 while (i > 0) {
373 info = &ab8500_regulator_info[i]; 373 info = &ab8500_regulator_info[i];
374 regulator_unregister(info->regulator); 374 regulator_unregister(info->regulator);
375 i--;
376 } 375 }
377 return err; 376 return err;
378 } 377 }
diff --git a/drivers/regulator/ad5398.c b/drivers/regulator/ad5398.c
index d59d2f2314af..df1fb53c09d2 100644
--- a/drivers/regulator/ad5398.c
+++ b/drivers/regulator/ad5398.c
@@ -25,7 +25,7 @@ struct ad5398_chip_info {
25 unsigned int current_level; 25 unsigned int current_level;
26 unsigned int current_mask; 26 unsigned int current_mask;
27 unsigned int current_offset; 27 unsigned int current_offset;
28 struct regulator_dev rdev; 28 struct regulator_dev *rdev;
29}; 29};
30 30
31static int ad5398_calc_current(struct ad5398_chip_info *chip, 31static int ad5398_calc_current(struct ad5398_chip_info *chip,
@@ -211,7 +211,6 @@ MODULE_DEVICE_TABLE(i2c, ad5398_id);
211static int __devinit ad5398_probe(struct i2c_client *client, 211static int __devinit ad5398_probe(struct i2c_client *client,
212 const struct i2c_device_id *id) 212 const struct i2c_device_id *id)
213{ 213{
214 struct regulator_dev *rdev;
215 struct regulator_init_data *init_data = client->dev.platform_data; 214 struct regulator_init_data *init_data = client->dev.platform_data;
216 struct ad5398_chip_info *chip; 215 struct ad5398_chip_info *chip;
217 const struct ad5398_current_data_format *df = 216 const struct ad5398_current_data_format *df =
@@ -233,9 +232,10 @@ static int __devinit ad5398_probe(struct i2c_client *client,
233 chip->current_offset = df->current_offset; 232 chip->current_offset = df->current_offset;
234 chip->current_mask = (chip->current_level - 1) << chip->current_offset; 233 chip->current_mask = (chip->current_level - 1) << chip->current_offset;
235 234
236 rdev = regulator_register(&ad5398_reg, &client->dev, init_data, chip); 235 chip->rdev = regulator_register(&ad5398_reg, &client->dev,
237 if (IS_ERR(rdev)) { 236 init_data, chip);
238 ret = PTR_ERR(rdev); 237 if (IS_ERR(chip->rdev)) {
238 ret = PTR_ERR(chip->rdev);
239 dev_err(&client->dev, "failed to register %s %s\n", 239 dev_err(&client->dev, "failed to register %s %s\n",
240 id->name, ad5398_reg.name); 240 id->name, ad5398_reg.name);
241 goto err; 241 goto err;
@@ -254,7 +254,7 @@ static int __devexit ad5398_remove(struct i2c_client *client)
254{ 254{
255 struct ad5398_chip_info *chip = i2c_get_clientdata(client); 255 struct ad5398_chip_info *chip = i2c_get_clientdata(client);
256 256
257 regulator_unregister(&chip->rdev); 257 regulator_unregister(chip->rdev);
258 kfree(chip); 258 kfree(chip);
259 i2c_set_clientdata(client, NULL); 259 i2c_set_clientdata(client, NULL);
260 260
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 422a709d271d..cc8b337b9119 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -700,7 +700,7 @@ static void print_constraints(struct regulator_dev *rdev)
700 constraints->min_uA != constraints->max_uA) { 700 constraints->min_uA != constraints->max_uA) {
701 ret = _regulator_get_current_limit(rdev); 701 ret = _regulator_get_current_limit(rdev);
702 if (ret > 0) 702 if (ret > 0)
703 count += sprintf(buf + count, "at %d uA ", ret / 1000); 703 count += sprintf(buf + count, "at %d mA ", ret / 1000);
704 } 704 }
705 705
706 if (constraints->valid_modes_mask & REGULATOR_MODE_FAST) 706 if (constraints->valid_modes_mask & REGULATOR_MODE_FAST)
@@ -2302,8 +2302,10 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
2302 dev_set_name(&rdev->dev, "regulator.%d", 2302 dev_set_name(&rdev->dev, "regulator.%d",
2303 atomic_inc_return(&regulator_no) - 1); 2303 atomic_inc_return(&regulator_no) - 1);
2304 ret = device_register(&rdev->dev); 2304 ret = device_register(&rdev->dev);
2305 if (ret != 0) 2305 if (ret != 0) {
2306 put_device(&rdev->dev);
2306 goto clean; 2307 goto clean;
2308 }
2307 2309
2308 dev_set_drvdata(&rdev->dev, rdev); 2310 dev_set_drvdata(&rdev->dev, rdev);
2309 2311
diff --git a/drivers/regulator/isl6271a-regulator.c b/drivers/regulator/isl6271a-regulator.c
index e49d2bd393f2..d61ecb885a8c 100644
--- a/drivers/regulator/isl6271a-regulator.c
+++ b/drivers/regulator/isl6271a-regulator.c
@@ -165,7 +165,7 @@ static int __devinit isl6271a_probe(struct i2c_client *i2c,
165 mutex_init(&pmic->mtx); 165 mutex_init(&pmic->mtx);
166 166
167 for (i = 0; i < 3; i++) { 167 for (i = 0; i < 3; i++) {
168 pmic->rdev[i] = regulator_register(&isl_rd[0], &i2c->dev, 168 pmic->rdev[i] = regulator_register(&isl_rd[i], &i2c->dev,
169 init_data, pmic); 169 init_data, pmic);
170 if (IS_ERR(pmic->rdev[i])) { 170 if (IS_ERR(pmic->rdev[i])) {
171 dev_err(&i2c->dev, "failed to register %s\n", id->name); 171 dev_err(&i2c->dev, "failed to register %s\n", id->name);
diff --git a/drivers/regulator/max1586.c b/drivers/regulator/max1586.c
index 8867c2710a6d..559cfa271a44 100644
--- a/drivers/regulator/max1586.c
+++ b/drivers/regulator/max1586.c
@@ -121,14 +121,14 @@ static int max1586_v6_set(struct regulator_dev *rdev, int min_uV, int max_uV)
121 if (max_uV < MAX1586_V6_MIN_UV || max_uV > MAX1586_V6_MAX_UV) 121 if (max_uV < MAX1586_V6_MIN_UV || max_uV > MAX1586_V6_MAX_UV)
122 return -EINVAL; 122 return -EINVAL;
123 123
124 if (min_uV >= 3000000)
125 selector = 3;
126 if (min_uV < 3000000)
127 selector = 2;
128 if (min_uV < 2500000)
129 selector = 1;
130 if (min_uV < 1800000) 124 if (min_uV < 1800000)
131 selector = 0; 125 selector = 0;
126 else if (min_uV < 2500000)
127 selector = 1;
128 else if (min_uV < 3000000)
129 selector = 2;
130 else if (min_uV >= 3000000)
131 selector = 3;
132 132
133 if (max1586_v6_calc_voltage(selector) > max_uV) 133 if (max1586_v6_calc_voltage(selector) > max_uV)
134 return -EINVAL; 134 return -EINVAL;
diff --git a/drivers/regulator/max8649.c b/drivers/regulator/max8649.c
index 4520ace3f7e7..6b60a9c0366b 100644
--- a/drivers/regulator/max8649.c
+++ b/drivers/regulator/max8649.c
@@ -330,7 +330,7 @@ static int __devinit max8649_regulator_probe(struct i2c_client *client,
330 /* set external clock frequency */ 330 /* set external clock frequency */
331 info->extclk_freq = pdata->extclk_freq; 331 info->extclk_freq = pdata->extclk_freq;
332 max8649_set_bits(info->i2c, MAX8649_SYNC, MAX8649_EXT_MASK, 332 max8649_set_bits(info->i2c, MAX8649_SYNC, MAX8649_EXT_MASK,
333 info->extclk_freq); 333 info->extclk_freq << 6);
334 } 334 }
335 335
336 if (pdata->ramp_timing) { 336 if (pdata->ramp_timing) {
diff --git a/drivers/regulator/max8998.c b/drivers/regulator/max8998.c
index ab67298799f9..a1baf1fbe004 100644
--- a/drivers/regulator/max8998.c
+++ b/drivers/regulator/max8998.c
@@ -549,7 +549,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
549 if (!max8998) 549 if (!max8998)
550 return -ENOMEM; 550 return -ENOMEM;
551 551
552 size = sizeof(struct regulator_dev *) * (pdata->num_regulators + 1); 552 size = sizeof(struct regulator_dev *) * pdata->num_regulators;
553 max8998->rdev = kzalloc(size, GFP_KERNEL); 553 max8998->rdev = kzalloc(size, GFP_KERNEL);
554 if (!max8998->rdev) { 554 if (!max8998->rdev) {
555 kfree(max8998); 555 kfree(max8998);
@@ -557,7 +557,9 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
557 } 557 }
558 558
559 rdev = max8998->rdev; 559 rdev = max8998->rdev;
560 max8998->dev = &pdev->dev;
560 max8998->iodev = iodev; 561 max8998->iodev = iodev;
562 max8998->num_regulators = pdata->num_regulators;
561 platform_set_drvdata(pdev, max8998); 563 platform_set_drvdata(pdev, max8998);
562 564
563 for (i = 0; i < pdata->num_regulators; i++) { 565 for (i = 0; i < pdata->num_regulators; i++) {
@@ -583,7 +585,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
583 585
584 return 0; 586 return 0;
585err: 587err:
586 for (i = 0; i <= max8998->num_regulators; i++) 588 for (i = 0; i < max8998->num_regulators; i++)
587 if (rdev[i]) 589 if (rdev[i])
588 regulator_unregister(rdev[i]); 590 regulator_unregister(rdev[i]);
589 591
@@ -599,7 +601,7 @@ static int __devexit max8998_pmic_remove(struct platform_device *pdev)
599 struct regulator_dev **rdev = max8998->rdev; 601 struct regulator_dev **rdev = max8998->rdev;
600 int i; 602 int i;
601 603
602 for (i = 0; i <= max8998->num_regulators; i++) 604 for (i = 0; i < max8998->num_regulators; i++)
603 if (rdev[i]) 605 if (rdev[i])
604 regulator_unregister(rdev[i]); 606 regulator_unregister(rdev[i]);
605 607
diff --git a/drivers/regulator/tps6507x-regulator.c b/drivers/regulator/tps6507x-regulator.c
index c239f42aa4a3..020f5878d7ff 100644
--- a/drivers/regulator/tps6507x-regulator.c
+++ b/drivers/regulator/tps6507x-regulator.c
@@ -626,12 +626,6 @@ fail:
626 return error; 626 return error;
627} 627}
628 628
629/**
630 * tps6507x_remove - TPS6507x driver i2c remove handler
631 * @client: i2c driver client device structure
632 *
633 * Unregister TPS driver as an i2c client device driver
634 */
635static int __devexit tps6507x_pmic_remove(struct platform_device *pdev) 629static int __devexit tps6507x_pmic_remove(struct platform_device *pdev)
636{ 630{
637 struct tps6507x_dev *tps6507x_dev = platform_get_drvdata(pdev); 631 struct tps6507x_dev *tps6507x_dev = platform_get_drvdata(pdev);
diff --git a/drivers/regulator/tps6586x-regulator.c b/drivers/regulator/tps6586x-regulator.c
index 8cff1413a147..51237fbb1bbb 100644
--- a/drivers/regulator/tps6586x-regulator.c
+++ b/drivers/regulator/tps6586x-regulator.c
@@ -133,7 +133,7 @@ static int tps6586x_ldo_get_voltage(struct regulator_dev *rdev)
133 mask = ((1 << ri->volt_nbits) - 1) << ri->volt_shift; 133 mask = ((1 << ri->volt_nbits) - 1) << ri->volt_shift;
134 val = (val & mask) >> ri->volt_shift; 134 val = (val & mask) >> ri->volt_shift;
135 135
136 if (val > ri->desc.n_voltages) 136 if (val >= ri->desc.n_voltages)
137 BUG(); 137 BUG();
138 138
139 return ri->voltages[val] * 1000; 139 return ri->voltages[val] * 1000;
@@ -150,7 +150,7 @@ static int tps6586x_dvm_set_voltage(struct regulator_dev *rdev,
150 if (ret) 150 if (ret)
151 return ret; 151 return ret;
152 152
153 return tps6586x_set_bits(parent, ri->go_reg, ri->go_bit); 153 return tps6586x_set_bits(parent, ri->go_reg, 1 << ri->go_bit);
154} 154}
155 155
156static int tps6586x_regulator_enable(struct regulator_dev *rdev) 156static int tps6586x_regulator_enable(struct regulator_dev *rdev)
diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c
index e686cdb61b97..9edf8f692341 100644
--- a/drivers/regulator/wm831x-ldo.c
+++ b/drivers/regulator/wm831x-ldo.c
@@ -215,8 +215,7 @@ static int wm831x_gp_ldo_set_mode(struct regulator_dev *rdev,
215 215
216 case REGULATOR_MODE_IDLE: 216 case REGULATOR_MODE_IDLE:
217 ret = wm831x_set_bits(wm831x, ctrl_reg, 217 ret = wm831x_set_bits(wm831x, ctrl_reg,
218 WM831X_LDO1_LP_MODE, 218 WM831X_LDO1_LP_MODE, 0);
219 WM831X_LDO1_LP_MODE);
220 if (ret < 0) 219 if (ret < 0)
221 return ret; 220 return ret;
222 221
@@ -225,10 +224,12 @@ static int wm831x_gp_ldo_set_mode(struct regulator_dev *rdev,
225 WM831X_LDO1_ON_MODE); 224 WM831X_LDO1_ON_MODE);
226 if (ret < 0) 225 if (ret < 0)
227 return ret; 226 return ret;
227 break;
228 228
229 case REGULATOR_MODE_STANDBY: 229 case REGULATOR_MODE_STANDBY:
230 ret = wm831x_set_bits(wm831x, ctrl_reg, 230 ret = wm831x_set_bits(wm831x, ctrl_reg,
231 WM831X_LDO1_LP_MODE, 0); 231 WM831X_LDO1_LP_MODE,
232 WM831X_LDO1_LP_MODE);
232 if (ret < 0) 233 if (ret < 0)
233 return ret; 234 return ret;
234 235
diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c
index 0e6ed7db9364..fe4b8a8a9dfd 100644
--- a/drivers/regulator/wm8350-regulator.c
+++ b/drivers/regulator/wm8350-regulator.c
@@ -1129,7 +1129,7 @@ static unsigned int wm8350_dcdc_get_mode(struct regulator_dev *rdev)
1129 mode = REGULATOR_MODE_NORMAL; 1129 mode = REGULATOR_MODE_NORMAL;
1130 } else if (!active && !sleep) 1130 } else if (!active && !sleep)
1131 mode = REGULATOR_MODE_IDLE; 1131 mode = REGULATOR_MODE_IDLE;
1132 else if (!sleep) 1132 else if (sleep)
1133 mode = REGULATOR_MODE_STANDBY; 1133 mode = REGULATOR_MODE_STANDBY;
1134 1134
1135 return mode; 1135 return mode;
diff --git a/drivers/rtc/rtc-ab3100.c b/drivers/rtc/rtc-ab3100.c
index d26780ea254b..261a07e0fb24 100644
--- a/drivers/rtc/rtc-ab3100.c
+++ b/drivers/rtc/rtc-ab3100.c
@@ -235,6 +235,7 @@ static int __init ab3100_rtc_probe(struct platform_device *pdev)
235 err = PTR_ERR(rtc); 235 err = PTR_ERR(rtc);
236 return err; 236 return err;
237 } 237 }
238 platform_set_drvdata(pdev, rtc);
238 239
239 return 0; 240 return 0;
240} 241}
@@ -244,6 +245,7 @@ static int __exit ab3100_rtc_remove(struct platform_device *pdev)
244 struct rtc_device *rtc = platform_get_drvdata(pdev); 245 struct rtc_device *rtc = platform_get_drvdata(pdev);
245 246
246 rtc_device_unregister(rtc); 247 rtc_device_unregister(rtc);
248 platform_set_drvdata(pdev, NULL);
247 return 0; 249 return 0;
248} 250}
249 251
diff --git a/drivers/rtc/rtc-bfin.c b/drivers/rtc/rtc-bfin.c
index 72b2bcc2c224..d4fb82d85e9b 100644
--- a/drivers/rtc/rtc-bfin.c
+++ b/drivers/rtc/rtc-bfin.c
@@ -426,7 +426,7 @@ static int bfin_rtc_suspend(struct platform_device *pdev, pm_message_t state)
426 enable_irq_wake(IRQ_RTC); 426 enable_irq_wake(IRQ_RTC);
427 bfin_rtc_sync_pending(&pdev->dev); 427 bfin_rtc_sync_pending(&pdev->dev);
428 } else 428 } else
429 bfin_rtc_int_clear(-1); 429 bfin_rtc_int_clear(0);
430 430
431 return 0; 431 return 0;
432} 432}
@@ -435,8 +435,17 @@ static int bfin_rtc_resume(struct platform_device *pdev)
435{ 435{
436 if (device_may_wakeup(&pdev->dev)) 436 if (device_may_wakeup(&pdev->dev))
437 disable_irq_wake(IRQ_RTC); 437 disable_irq_wake(IRQ_RTC);
438 else 438
439 bfin_write_RTC_ISTAT(-1); 439 /*
440 * Since only some of the RTC bits are maintained externally in the
441 * Vbat domain, we need to wait for the RTC MMRs to be synced into
442 * the core after waking up. This happens every RTC 1HZ. Once that
443 * has happened, we can go ahead and re-enable the important write
444 * complete interrupt event.
445 */
446 while (!(bfin_read_RTC_ISTAT() & RTC_ISTAT_SEC))
447 continue;
448 bfin_rtc_int_set(RTC_ISTAT_WRITE_COMPLETE);
440 449
441 return 0; 450 return 0;
442} 451}
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index 66377f3e28b8..d60557cae8ef 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -364,7 +364,7 @@ static int m41t80_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *t)
364 t->time.tm_isdst = -1; 364 t->time.tm_isdst = -1;
365 t->enabled = !!(reg[M41T80_REG_ALARM_MON] & M41T80_ALMON_AFE); 365 t->enabled = !!(reg[M41T80_REG_ALARM_MON] & M41T80_ALMON_AFE);
366 t->pending = !!(reg[M41T80_REG_FLAGS] & M41T80_FLAGS_AF); 366 t->pending = !!(reg[M41T80_REG_FLAGS] & M41T80_FLAGS_AF);
367 return rtc_valid_tm(t); 367 return 0;
368} 368}
369 369
370static struct rtc_class_ops m41t80_rtc_ops = { 370static struct rtc_class_ops m41t80_rtc_ops = {
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index 6c418fe7f288..b7a6690e5b35 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -403,7 +403,7 @@ static int pl031_probe(struct amba_device *adev, struct amba_id *id)
403 } 403 }
404 404
405 if (request_irq(adev->irq[0], pl031_interrupt, 405 if (request_irq(adev->irq[0], pl031_interrupt,
406 IRQF_DISABLED | IRQF_SHARED, "rtc-pl031", ldata)) { 406 IRQF_DISABLED, "rtc-pl031", ldata)) {
407 ret = -EIO; 407 ret = -EIO;
408 goto out_no_irq; 408 goto out_no_irq;
409 } 409 }
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index a0d3ec89d412..f57a87f4ae96 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -310,11 +310,6 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
310 310
311 s3c_rtc_setaie(alrm->enabled); 311 s3c_rtc_setaie(alrm->enabled);
312 312
313 if (alrm->enabled)
314 enable_irq_wake(s3c_rtc_alarmno);
315 else
316 disable_irq_wake(s3c_rtc_alarmno);
317
318 return 0; 313 return 0;
319} 314}
320 315
@@ -587,6 +582,10 @@ static int s3c_rtc_suspend(struct platform_device *pdev, pm_message_t state)
587 ticnt_en_save &= S3C64XX_RTCCON_TICEN; 582 ticnt_en_save &= S3C64XX_RTCCON_TICEN;
588 } 583 }
589 s3c_rtc_enable(pdev, 0); 584 s3c_rtc_enable(pdev, 0);
585
586 if (device_may_wakeup(&pdev->dev))
587 enable_irq_wake(s3c_rtc_alarmno);
588
590 return 0; 589 return 0;
591} 590}
592 591
@@ -600,6 +599,10 @@ static int s3c_rtc_resume(struct platform_device *pdev)
600 tmp = readb(s3c_rtc_base + S3C2410_RTCCON); 599 tmp = readb(s3c_rtc_base + S3C2410_RTCCON);
601 writeb(tmp | ticnt_en_save, s3c_rtc_base + S3C2410_RTCCON); 600 writeb(tmp | ticnt_en_save, s3c_rtc_base + S3C2410_RTCCON);
602 } 601 }
602
603 if (device_may_wakeup(&pdev->dev))
604 disable_irq_wake(s3c_rtc_alarmno);
605
603 return 0; 606 return 0;
604} 607}
605#else 608#else
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index b7de02525ec9..85cf607fc78f 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -217,8 +217,7 @@ tapeblock_setup_device(struct tape_device * device)
217 if (!blkdat->request_queue) 217 if (!blkdat->request_queue)
218 return -ENOMEM; 218 return -ENOMEM;
219 219
220 elevator_exit(blkdat->request_queue->elevator); 220 rc = elevator_change(blkdat->request_queue, "noop");
221 rc = elevator_init(blkdat->request_queue, "noop");
222 if (rc) 221 if (rc)
223 goto cleanup_queue; 222 goto cleanup_queue;
224 223
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index 7d4d2275573c..7f11f3e48e12 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -300,8 +300,7 @@ int beiscsi_get_host_param(struct Scsi_Host *shost,
300 enum iscsi_host_param param, char *buf) 300 enum iscsi_host_param param, char *buf)
301{ 301{
302 struct beiscsi_hba *phba = (struct beiscsi_hba *)iscsi_host_priv(shost); 302 struct beiscsi_hba *phba = (struct beiscsi_hba *)iscsi_host_priv(shost);
303 int len = 0; 303 int status = 0;
304 int status;
305 304
306 SE_DEBUG(DBG_LVL_8, "In beiscsi_get_host_param, param= %d\n", param); 305 SE_DEBUG(DBG_LVL_8, "In beiscsi_get_host_param, param= %d\n", param);
307 switch (param) { 306 switch (param) {
@@ -315,7 +314,7 @@ int beiscsi_get_host_param(struct Scsi_Host *shost,
315 default: 314 default:
316 return iscsi_host_get_param(shost, param, buf); 315 return iscsi_host_get_param(shost, param, buf);
317 } 316 }
318 return len; 317 return status;
319} 318}
320 319
321int beiscsi_get_macaddr(char *buf, struct beiscsi_hba *phba) 320int beiscsi_get_macaddr(char *buf, struct beiscsi_hba *phba)
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index 26350e470bcc..877324fc594c 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -368,7 +368,7 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
368 memset(req, 0, sizeof(*req)); 368 memset(req, 0, sizeof(*req));
369 wrb->tag0 |= tag; 369 wrb->tag0 |= tag;
370 370
371 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 1); 371 be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
372 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, 372 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
373 OPCODE_COMMON_ISCSI_TCP_CONNECT_AND_OFFLOAD, 373 OPCODE_COMMON_ISCSI_TCP_CONNECT_AND_OFFLOAD,
374 sizeof(*req)); 374 sizeof(*req));
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c
index cd05e049d5f6..d0c82340f0e2 100644
--- a/drivers/scsi/constants.c
+++ b/drivers/scsi/constants.c
@@ -1404,13 +1404,13 @@ void scsi_print_sense(char *name, struct scsi_cmnd *cmd)
1404{ 1404{
1405 struct scsi_sense_hdr sshdr; 1405 struct scsi_sense_hdr sshdr;
1406 1406
1407 scmd_printk(KERN_INFO, cmd, ""); 1407 scmd_printk(KERN_INFO, cmd, " ");
1408 scsi_decode_sense_buffer(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, 1408 scsi_decode_sense_buffer(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
1409 &sshdr); 1409 &sshdr);
1410 scsi_show_sense_hdr(&sshdr); 1410 scsi_show_sense_hdr(&sshdr);
1411 scsi_decode_sense_extras(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, 1411 scsi_decode_sense_extras(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
1412 &sshdr); 1412 &sshdr);
1413 scmd_printk(KERN_INFO, cmd, ""); 1413 scmd_printk(KERN_INFO, cmd, " ");
1414 scsi_show_extd_sense(sshdr.asc, sshdr.ascq); 1414 scsi_show_extd_sense(sshdr.asc, sshdr.ascq);
1415} 1415}
1416EXPORT_SYMBOL(scsi_print_sense); 1416EXPORT_SYMBOL(scsi_print_sense);
@@ -1453,7 +1453,7 @@ EXPORT_SYMBOL(scsi_show_result);
1453 1453
1454void scsi_print_result(struct scsi_cmnd *cmd) 1454void scsi_print_result(struct scsi_cmnd *cmd)
1455{ 1455{
1456 scmd_printk(KERN_INFO, cmd, ""); 1456 scmd_printk(KERN_INFO, cmd, " ");
1457 scsi_show_result(cmd->result); 1457 scsi_show_result(cmd->result);
1458} 1458}
1459EXPORT_SYMBOL(scsi_print_result); 1459EXPORT_SYMBOL(scsi_print_result);
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 4f5551b5fe53..c5d0606ad097 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -3231,6 +3231,12 @@ static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
3231 misc_fw_support = readl(&cfgtable->misc_fw_support); 3231 misc_fw_support = readl(&cfgtable->misc_fw_support);
3232 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET; 3232 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
3233 3233
3234 /* The doorbell reset seems to cause lockups on some Smart
3235 * Arrays (e.g. P410, P410i, maybe others). Until this is
3236 * fixed or at least isolated, avoid the doorbell reset.
3237 */
3238 use_doorbell = 0;
3239
3234 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell); 3240 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
3235 if (rc) 3241 if (rc)
3236 goto unmap_cfgtable; 3242 goto unmap_cfgtable;
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index fda4de3440c4..e88bbdde49c5 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -865,7 +865,7 @@ void osd_req_read(struct osd_request *or,
865{ 865{
866 _osd_req_encode_common(or, OSD_ACT_READ, obj, offset, len); 866 _osd_req_encode_common(or, OSD_ACT_READ, obj, offset, len);
867 WARN_ON(or->in.bio || or->in.total_bytes); 867 WARN_ON(or->in.bio || or->in.total_bytes);
868 WARN_ON(1 == (bio->bi_rw & REQ_WRITE)); 868 WARN_ON(bio->bi_rw & REQ_WRITE);
869 or->in.bio = bio; 869 or->in.bio = bio;
870 or->in.total_bytes = len; 870 or->in.total_bytes = len;
871} 871}
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 420238cc794e..114bc5a81171 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1838,26 +1838,33 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
1838 1838
1839 qla24xx_disable_vp(vha); 1839 qla24xx_disable_vp(vha);
1840 1840
1841 vha->flags.delete_progress = 1;
1842
1841 fc_remove_host(vha->host); 1843 fc_remove_host(vha->host);
1842 1844
1843 scsi_remove_host(vha->host); 1845 scsi_remove_host(vha->host);
1844 1846
1845 qla2x00_free_fcports(vha); 1847 if (vha->timer_active) {
1848 qla2x00_vp_stop_timer(vha);
1849 DEBUG15(printk(KERN_INFO "scsi(%ld): timer for the vport[%d]"
1850 " = %p has stopped\n", vha->host_no, vha->vp_idx, vha));
1851 }
1846 1852
1847 qla24xx_deallocate_vp_id(vha); 1853 qla24xx_deallocate_vp_id(vha);
1848 1854
1855 /* No pending activities shall be there on the vha now */
1856 DEBUG(msleep(random32()%10)); /* Just to see if something falls on
1857 * the net we have placed below */
1858
1859 BUG_ON(atomic_read(&vha->vref_count));
1860
1861 qla2x00_free_fcports(vha);
1862
1849 mutex_lock(&ha->vport_lock); 1863 mutex_lock(&ha->vport_lock);
1850 ha->cur_vport_count--; 1864 ha->cur_vport_count--;
1851 clear_bit(vha->vp_idx, ha->vp_idx_map); 1865 clear_bit(vha->vp_idx, ha->vp_idx_map);
1852 mutex_unlock(&ha->vport_lock); 1866 mutex_unlock(&ha->vport_lock);
1853 1867
1854 if (vha->timer_active) {
1855 qla2x00_vp_stop_timer(vha);
1856 DEBUG15(printk ("scsi(%ld): timer for the vport[%d] = %p "
1857 "has stopped\n",
1858 vha->host_no, vha->vp_idx, vha));
1859 }
1860
1861 if (vha->req->id && !ha->flags.cpu_affinity_enabled) { 1868 if (vha->req->id && !ha->flags.cpu_affinity_enabled) {
1862 if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS) 1869 if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS)
1863 qla_printk(KERN_WARNING, ha, 1870 qla_printk(KERN_WARNING, ha,
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 6cfc28a25eb3..b74e6b5743dc 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -29,8 +29,6 @@
29/* #define QL_DEBUG_LEVEL_17 */ /* Output EEH trace messages */ 29/* #define QL_DEBUG_LEVEL_17 */ /* Output EEH trace messages */
30/* #define QL_DEBUG_LEVEL_18 */ /* Output T10 CRC trace messages */ 30/* #define QL_DEBUG_LEVEL_18 */ /* Output T10 CRC trace messages */
31 31
32/* #define QL_PRINTK_BUF */ /* Captures printk to buffer */
33
34/* 32/*
35* Macros use for debugging the driver. 33* Macros use for debugging the driver.
36*/ 34*/
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 3a432ea0c7a3..d2a4e1530708 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -2641,6 +2641,7 @@ struct qla_hw_data {
2641#define MBX_UPDATE_FLASH_ACTIVE 3 2641#define MBX_UPDATE_FLASH_ACTIVE 3
2642 2642
2643 struct mutex vport_lock; /* Virtual port synchronization */ 2643 struct mutex vport_lock; /* Virtual port synchronization */
2644 spinlock_t vport_slock; /* order is hardware_lock, then vport_slock */
2644 struct completion mbx_cmd_comp; /* Serialize mbx access */ 2645 struct completion mbx_cmd_comp; /* Serialize mbx access */
2645 struct completion mbx_intr_comp; /* Used for completion notification */ 2646 struct completion mbx_intr_comp; /* Used for completion notification */
2646 struct completion dcbx_comp; /* For set port config notification */ 2647 struct completion dcbx_comp; /* For set port config notification */
@@ -2828,6 +2829,7 @@ typedef struct scsi_qla_host {
2828 uint32_t management_server_logged_in :1; 2829 uint32_t management_server_logged_in :1;
2829 uint32_t process_response_queue :1; 2830 uint32_t process_response_queue :1;
2830 uint32_t difdix_supported:1; 2831 uint32_t difdix_supported:1;
2832 uint32_t delete_progress:1;
2831 } flags; 2833 } flags;
2832 2834
2833 atomic_t loop_state; 2835 atomic_t loop_state;
@@ -2922,6 +2924,8 @@ typedef struct scsi_qla_host {
2922 struct req_que *req; 2924 struct req_que *req;
2923 int fw_heartbeat_counter; 2925 int fw_heartbeat_counter;
2924 int seconds_since_last_heartbeat; 2926 int seconds_since_last_heartbeat;
2927
2928 atomic_t vref_count;
2925} scsi_qla_host_t; 2929} scsi_qla_host_t;
2926 2930
2927/* 2931/*
@@ -2932,6 +2936,22 @@ typedef struct scsi_qla_host {
2932 test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) || \ 2936 test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) || \
2933 atomic_read(&ha->loop_state) == LOOP_DOWN) 2937 atomic_read(&ha->loop_state) == LOOP_DOWN)
2934 2938
2939#define QLA_VHA_MARK_BUSY(__vha, __bail) do { \
2940 atomic_inc(&__vha->vref_count); \
2941 mb(); \
2942 if (__vha->flags.delete_progress) { \
2943 atomic_dec(&__vha->vref_count); \
2944 __bail = 1; \
2945 } else { \
2946 __bail = 0; \
2947 } \
2948} while (0)
2949
2950#define QLA_VHA_MARK_NOT_BUSY(__vha) do { \
2951 atomic_dec(&__vha->vref_count); \
2952} while (0)
2953
2954
2935#define qla_printk(level, ha, format, arg...) \ 2955#define qla_printk(level, ha, format, arg...) \
2936 dev_printk(level , &((ha)->pdev->dev) , format , ## arg) 2956 dev_printk(level , &((ha)->pdev->dev) , format , ## arg)
2937 2957
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index d863ed2619b5..9c383baebe27 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -69,21 +69,29 @@ qla2x00_ctx_sp_free(srb_t *sp)
69{ 69{
70 struct srb_ctx *ctx = sp->ctx; 70 struct srb_ctx *ctx = sp->ctx;
71 struct srb_iocb *iocb = ctx->u.iocb_cmd; 71 struct srb_iocb *iocb = ctx->u.iocb_cmd;
72 struct scsi_qla_host *vha = sp->fcport->vha;
72 73
73 del_timer_sync(&iocb->timer); 74 del_timer_sync(&iocb->timer);
74 kfree(iocb); 75 kfree(iocb);
75 kfree(ctx); 76 kfree(ctx);
76 mempool_free(sp, sp->fcport->vha->hw->srb_mempool); 77 mempool_free(sp, sp->fcport->vha->hw->srb_mempool);
78
79 QLA_VHA_MARK_NOT_BUSY(vha);
77} 80}
78 81
79inline srb_t * 82inline srb_t *
80qla2x00_get_ctx_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size, 83qla2x00_get_ctx_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size,
81 unsigned long tmo) 84 unsigned long tmo)
82{ 85{
83 srb_t *sp; 86 srb_t *sp = NULL;
84 struct qla_hw_data *ha = vha->hw; 87 struct qla_hw_data *ha = vha->hw;
85 struct srb_ctx *ctx; 88 struct srb_ctx *ctx;
86 struct srb_iocb *iocb; 89 struct srb_iocb *iocb;
90 uint8_t bail;
91
92 QLA_VHA_MARK_BUSY(vha, bail);
93 if (bail)
94 return NULL;
87 95
88 sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL); 96 sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
89 if (!sp) 97 if (!sp)
@@ -116,6 +124,8 @@ qla2x00_get_ctx_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size,
116 iocb->timer.function = qla2x00_ctx_sp_timeout; 124 iocb->timer.function = qla2x00_ctx_sp_timeout;
117 add_timer(&iocb->timer); 125 add_timer(&iocb->timer);
118done: 126done:
127 if (!sp)
128 QLA_VHA_MARK_NOT_BUSY(vha);
119 return sp; 129 return sp;
120} 130}
121 131
@@ -1777,11 +1787,15 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
1777 qla2x00_init_response_q_entries(rsp); 1787 qla2x00_init_response_q_entries(rsp);
1778 } 1788 }
1779 1789
1790 spin_lock_irqsave(&ha->vport_slock, flags);
1780 /* Clear RSCN queue. */ 1791 /* Clear RSCN queue. */
1781 list_for_each_entry(vp, &ha->vp_list, list) { 1792 list_for_each_entry(vp, &ha->vp_list, list) {
1782 vp->rscn_in_ptr = 0; 1793 vp->rscn_in_ptr = 0;
1783 vp->rscn_out_ptr = 0; 1794 vp->rscn_out_ptr = 0;
1784 } 1795 }
1796
1797 spin_unlock_irqrestore(&ha->vport_slock, flags);
1798
1785 ha->isp_ops->config_rings(vha); 1799 ha->isp_ops->config_rings(vha);
1786 1800
1787 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1801 spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -3218,12 +3232,17 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3218 /* Bypass virtual ports of the same host. */ 3232 /* Bypass virtual ports of the same host. */
3219 found = 0; 3233 found = 0;
3220 if (ha->num_vhosts) { 3234 if (ha->num_vhosts) {
3235 unsigned long flags;
3236
3237 spin_lock_irqsave(&ha->vport_slock, flags);
3221 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { 3238 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
3222 if (new_fcport->d_id.b24 == vp->d_id.b24) { 3239 if (new_fcport->d_id.b24 == vp->d_id.b24) {
3223 found = 1; 3240 found = 1;
3224 break; 3241 break;
3225 } 3242 }
3226 } 3243 }
3244 spin_unlock_irqrestore(&ha->vport_slock, flags);
3245
3227 if (found) 3246 if (found)
3228 continue; 3247 continue;
3229 } 3248 }
@@ -3343,6 +3362,7 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
3343 struct qla_hw_data *ha = vha->hw; 3362 struct qla_hw_data *ha = vha->hw;
3344 struct scsi_qla_host *vp; 3363 struct scsi_qla_host *vp;
3345 struct scsi_qla_host *tvp; 3364 struct scsi_qla_host *tvp;
3365 unsigned long flags = 0;
3346 3366
3347 rval = QLA_SUCCESS; 3367 rval = QLA_SUCCESS;
3348 3368
@@ -3367,6 +3387,8 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
3367 /* Check for loop ID being already in use. */ 3387 /* Check for loop ID being already in use. */
3368 found = 0; 3388 found = 0;
3369 fcport = NULL; 3389 fcport = NULL;
3390
3391 spin_lock_irqsave(&ha->vport_slock, flags);
3370 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { 3392 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
3371 list_for_each_entry(fcport, &vp->vp_fcports, list) { 3393 list_for_each_entry(fcport, &vp->vp_fcports, list) {
3372 if (fcport->loop_id == dev->loop_id && 3394 if (fcport->loop_id == dev->loop_id &&
@@ -3379,6 +3401,7 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
3379 if (found) 3401 if (found)
3380 break; 3402 break;
3381 } 3403 }
3404 spin_unlock_irqrestore(&ha->vport_slock, flags);
3382 3405
3383 /* If not in use then it is free to use. */ 3406 /* If not in use then it is free to use. */
3384 if (!found) { 3407 if (!found) {
@@ -3791,14 +3814,27 @@ void
3791qla2x00_update_fcports(scsi_qla_host_t *base_vha) 3814qla2x00_update_fcports(scsi_qla_host_t *base_vha)
3792{ 3815{
3793 fc_port_t *fcport; 3816 fc_port_t *fcport;
3794 struct scsi_qla_host *tvp, *vha; 3817 struct scsi_qla_host *vha;
3818 struct qla_hw_data *ha = base_vha->hw;
3819 unsigned long flags;
3795 3820
3821 spin_lock_irqsave(&ha->vport_slock, flags);
3796 /* Go with deferred removal of rport references. */ 3822 /* Go with deferred removal of rport references. */
3797 list_for_each_entry_safe(vha, tvp, &base_vha->hw->vp_list, list) 3823 list_for_each_entry(vha, &base_vha->hw->vp_list, list) {
3798 list_for_each_entry(fcport, &vha->vp_fcports, list) 3824 atomic_inc(&vha->vref_count);
3825 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3799 if (fcport && fcport->drport && 3826 if (fcport && fcport->drport &&
3800 atomic_read(&fcport->state) != FCS_UNCONFIGURED) 3827 atomic_read(&fcport->state) != FCS_UNCONFIGURED) {
3828 spin_unlock_irqrestore(&ha->vport_slock, flags);
3829
3801 qla2x00_rport_del(fcport); 3830 qla2x00_rport_del(fcport);
3831
3832 spin_lock_irqsave(&ha->vport_slock, flags);
3833 }
3834 }
3835 atomic_dec(&vha->vref_count);
3836 }
3837 spin_unlock_irqrestore(&ha->vport_slock, flags);
3802} 3838}
3803 3839
3804void 3840void
@@ -3806,7 +3842,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
3806{ 3842{
3807 struct qla_hw_data *ha = vha->hw; 3843 struct qla_hw_data *ha = vha->hw;
3808 struct scsi_qla_host *vp, *base_vha = pci_get_drvdata(ha->pdev); 3844 struct scsi_qla_host *vp, *base_vha = pci_get_drvdata(ha->pdev);
3809 struct scsi_qla_host *tvp; 3845 unsigned long flags;
3810 3846
3811 vha->flags.online = 0; 3847 vha->flags.online = 0;
3812 ha->flags.chip_reset_done = 0; 3848 ha->flags.chip_reset_done = 0;
@@ -3824,8 +3860,18 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
3824 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 3860 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
3825 atomic_set(&vha->loop_state, LOOP_DOWN); 3861 atomic_set(&vha->loop_state, LOOP_DOWN);
3826 qla2x00_mark_all_devices_lost(vha, 0); 3862 qla2x00_mark_all_devices_lost(vha, 0);
3827 list_for_each_entry_safe(vp, tvp, &base_vha->hw->vp_list, list) 3863
3864 spin_lock_irqsave(&ha->vport_slock, flags);
3865 list_for_each_entry(vp, &base_vha->hw->vp_list, list) {
3866 atomic_inc(&vp->vref_count);
3867 spin_unlock_irqrestore(&ha->vport_slock, flags);
3868
3828 qla2x00_mark_all_devices_lost(vp, 0); 3869 qla2x00_mark_all_devices_lost(vp, 0);
3870
3871 spin_lock_irqsave(&ha->vport_slock, flags);
3872 atomic_dec(&vp->vref_count);
3873 }
3874 spin_unlock_irqrestore(&ha->vport_slock, flags);
3829 } else { 3875 } else {
3830 if (!atomic_read(&vha->loop_down_timer)) 3876 if (!atomic_read(&vha->loop_down_timer))
3831 atomic_set(&vha->loop_down_timer, 3877 atomic_set(&vha->loop_down_timer,
@@ -3862,8 +3908,8 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
3862 uint8_t status = 0; 3908 uint8_t status = 0;
3863 struct qla_hw_data *ha = vha->hw; 3909 struct qla_hw_data *ha = vha->hw;
3864 struct scsi_qla_host *vp; 3910 struct scsi_qla_host *vp;
3865 struct scsi_qla_host *tvp;
3866 struct req_que *req = ha->req_q_map[0]; 3911 struct req_que *req = ha->req_q_map[0];
3912 unsigned long flags;
3867 3913
3868 if (vha->flags.online) { 3914 if (vha->flags.online) {
3869 qla2x00_abort_isp_cleanup(vha); 3915 qla2x00_abort_isp_cleanup(vha);
@@ -3970,10 +4016,21 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
3970 DEBUG(printk(KERN_INFO 4016 DEBUG(printk(KERN_INFO
3971 "qla2x00_abort_isp(%ld): succeeded.\n", 4017 "qla2x00_abort_isp(%ld): succeeded.\n",
3972 vha->host_no)); 4018 vha->host_no));
3973 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { 4019
3974 if (vp->vp_idx) 4020 spin_lock_irqsave(&ha->vport_slock, flags);
4021 list_for_each_entry(vp, &ha->vp_list, list) {
4022 if (vp->vp_idx) {
4023 atomic_inc(&vp->vref_count);
4024 spin_unlock_irqrestore(&ha->vport_slock, flags);
4025
3975 qla2x00_vp_abort_isp(vp); 4026 qla2x00_vp_abort_isp(vp);
4027
4028 spin_lock_irqsave(&ha->vport_slock, flags);
4029 atomic_dec(&vp->vref_count);
4030 }
3976 } 4031 }
4032 spin_unlock_irqrestore(&ha->vport_slock, flags);
4033
3977 } else { 4034 } else {
3978 qla_printk(KERN_INFO, ha, 4035 qla_printk(KERN_INFO, ha,
3979 "qla2x00_abort_isp: **** FAILED ****\n"); 4036 "qla2x00_abort_isp: **** FAILED ****\n");
@@ -5185,7 +5242,7 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
5185 struct req_que *req = ha->req_q_map[0]; 5242 struct req_que *req = ha->req_q_map[0];
5186 struct rsp_que *rsp = ha->rsp_q_map[0]; 5243 struct rsp_que *rsp = ha->rsp_q_map[0];
5187 struct scsi_qla_host *vp; 5244 struct scsi_qla_host *vp;
5188 struct scsi_qla_host *tvp; 5245 unsigned long flags;
5189 5246
5190 status = qla2x00_init_rings(vha); 5247 status = qla2x00_init_rings(vha);
5191 if (!status) { 5248 if (!status) {
@@ -5272,10 +5329,21 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
5272 DEBUG(printk(KERN_INFO 5329 DEBUG(printk(KERN_INFO
5273 "qla82xx_restart_isp(%ld): succeeded.\n", 5330 "qla82xx_restart_isp(%ld): succeeded.\n",
5274 vha->host_no)); 5331 vha->host_no));
5275 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { 5332
5276 if (vp->vp_idx) 5333 spin_lock_irqsave(&ha->vport_slock, flags);
5334 list_for_each_entry(vp, &ha->vp_list, list) {
5335 if (vp->vp_idx) {
5336 atomic_inc(&vp->vref_count);
5337 spin_unlock_irqrestore(&ha->vport_slock, flags);
5338
5277 qla2x00_vp_abort_isp(vp); 5339 qla2x00_vp_abort_isp(vp);
5340
5341 spin_lock_irqsave(&ha->vport_slock, flags);
5342 atomic_dec(&vp->vref_count);
5343 }
5278 } 5344 }
5345 spin_unlock_irqrestore(&ha->vport_slock, flags);
5346
5279 } else { 5347 } else {
5280 qla_printk(KERN_INFO, ha, 5348 qla_printk(KERN_INFO, ha,
5281 "qla82xx_restart_isp: **** FAILED ****\n"); 5349 "qla82xx_restart_isp: **** FAILED ****\n");
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 6982ba70e12a..28f65be19dad 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1706,19 +1706,20 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1706 cp->result = DID_ERROR << 16; 1706 cp->result = DID_ERROR << 16;
1707 break; 1707 break;
1708 } 1708 }
1709 } else if (!lscsi_status) { 1709 } else {
1710 DEBUG2(qla_printk(KERN_INFO, ha, 1710 DEBUG2(qla_printk(KERN_INFO, ha,
1711 "scsi(%ld:%d:%d) Dropped frame(s) detected (0x%x " 1711 "scsi(%ld:%d:%d) Dropped frame(s) detected (0x%x "
1712 "of 0x%x bytes).\n", vha->host_no, cp->device->id, 1712 "of 0x%x bytes).\n", vha->host_no, cp->device->id,
1713 cp->device->lun, resid, scsi_bufflen(cp))); 1713 cp->device->lun, resid, scsi_bufflen(cp)));
1714 1714
1715 cp->result = DID_ERROR << 16; 1715 cp->result = DID_ERROR << 16 | lscsi_status;
1716 break; 1716 goto check_scsi_status;
1717 } 1717 }
1718 1718
1719 cp->result = DID_OK << 16 | lscsi_status; 1719 cp->result = DID_OK << 16 | lscsi_status;
1720 logit = 0; 1720 logit = 0;
1721 1721
1722check_scsi_status:
1722 /* 1723 /*
1723 * Check to see if SCSI Status is non zero. If so report SCSI 1724 * Check to see if SCSI Status is non zero. If so report SCSI
1724 * Status. 1725 * Status.
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 6009b0c69488..a595ec8264f8 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -2913,7 +2913,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
2913 uint16_t stat = le16_to_cpu(rptid_entry->vp_idx); 2913 uint16_t stat = le16_to_cpu(rptid_entry->vp_idx);
2914 struct qla_hw_data *ha = vha->hw; 2914 struct qla_hw_data *ha = vha->hw;
2915 scsi_qla_host_t *vp; 2915 scsi_qla_host_t *vp;
2916 scsi_qla_host_t *tvp; 2916 unsigned long flags;
2917 2917
2918 if (rptid_entry->entry_status != 0) 2918 if (rptid_entry->entry_status != 0)
2919 return; 2919 return;
@@ -2945,9 +2945,12 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
2945 return; 2945 return;
2946 } 2946 }
2947 2947
2948 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) 2948 spin_lock_irqsave(&ha->vport_slock, flags);
2949 list_for_each_entry(vp, &ha->vp_list, list)
2949 if (vp_idx == vp->vp_idx) 2950 if (vp_idx == vp->vp_idx)
2950 break; 2951 break;
2952 spin_unlock_irqrestore(&ha->vport_slock, flags);
2953
2951 if (!vp) 2954 if (!vp)
2952 return; 2955 return;
2953 2956
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 987c5b0ca78e..2b69392a71a1 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -30,6 +30,7 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
30{ 30{
31 uint32_t vp_id; 31 uint32_t vp_id;
32 struct qla_hw_data *ha = vha->hw; 32 struct qla_hw_data *ha = vha->hw;
33 unsigned long flags;
33 34
34 /* Find an empty slot and assign an vp_id */ 35 /* Find an empty slot and assign an vp_id */
35 mutex_lock(&ha->vport_lock); 36 mutex_lock(&ha->vport_lock);
@@ -44,7 +45,11 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
44 set_bit(vp_id, ha->vp_idx_map); 45 set_bit(vp_id, ha->vp_idx_map);
45 ha->num_vhosts++; 46 ha->num_vhosts++;
46 vha->vp_idx = vp_id; 47 vha->vp_idx = vp_id;
48
49 spin_lock_irqsave(&ha->vport_slock, flags);
47 list_add_tail(&vha->list, &ha->vp_list); 50 list_add_tail(&vha->list, &ha->vp_list);
51 spin_unlock_irqrestore(&ha->vport_slock, flags);
52
48 mutex_unlock(&ha->vport_lock); 53 mutex_unlock(&ha->vport_lock);
49 return vp_id; 54 return vp_id;
50} 55}
@@ -54,12 +59,31 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
54{ 59{
55 uint16_t vp_id; 60 uint16_t vp_id;
56 struct qla_hw_data *ha = vha->hw; 61 struct qla_hw_data *ha = vha->hw;
62 unsigned long flags = 0;
57 63
58 mutex_lock(&ha->vport_lock); 64 mutex_lock(&ha->vport_lock);
65 /*
66 * Wait for all pending activities to finish before removing vport from
67 * the list.
68 * Lock needs to be held for safe removal from the list (it
69 * ensures no active vp_list traversal while the vport is removed
70 * from the queue)
71 */
72 spin_lock_irqsave(&ha->vport_slock, flags);
73 while (atomic_read(&vha->vref_count)) {
74 spin_unlock_irqrestore(&ha->vport_slock, flags);
75
76 msleep(500);
77
78 spin_lock_irqsave(&ha->vport_slock, flags);
79 }
80 list_del(&vha->list);
81 spin_unlock_irqrestore(&ha->vport_slock, flags);
82
59 vp_id = vha->vp_idx; 83 vp_id = vha->vp_idx;
60 ha->num_vhosts--; 84 ha->num_vhosts--;
61 clear_bit(vp_id, ha->vp_idx_map); 85 clear_bit(vp_id, ha->vp_idx_map);
62 list_del(&vha->list); 86
63 mutex_unlock(&ha->vport_lock); 87 mutex_unlock(&ha->vport_lock);
64} 88}
65 89
@@ -68,12 +92,17 @@ qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name)
68{ 92{
69 scsi_qla_host_t *vha; 93 scsi_qla_host_t *vha;
70 struct scsi_qla_host *tvha; 94 struct scsi_qla_host *tvha;
95 unsigned long flags;
71 96
97 spin_lock_irqsave(&ha->vport_slock, flags);
72 /* Locate matching device in database. */ 98 /* Locate matching device in database. */
73 list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) { 99 list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) {
74 if (!memcmp(port_name, vha->port_name, WWN_SIZE)) 100 if (!memcmp(port_name, vha->port_name, WWN_SIZE)) {
101 spin_unlock_irqrestore(&ha->vport_slock, flags);
75 return vha; 102 return vha;
103 }
76 } 104 }
105 spin_unlock_irqrestore(&ha->vport_slock, flags);
77 return NULL; 106 return NULL;
78} 107}
79 108
@@ -93,6 +122,12 @@ qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name)
93static void 122static void
94qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha) 123qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
95{ 124{
125 /*
126 * !!! NOTE !!!
127 * This function, if called in contexts other than vp create, disable
128 * or delete, please make sure this is synchronized with the
129 * delete thread.
130 */
96 fc_port_t *fcport; 131 fc_port_t *fcport;
97 132
98 list_for_each_entry(fcport, &vha->vp_fcports, list) { 133 list_for_each_entry(fcport, &vha->vp_fcports, list) {
@@ -100,7 +135,6 @@ qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
100 "loop_id=0x%04x :%x\n", 135 "loop_id=0x%04x :%x\n",
101 vha->host_no, fcport->loop_id, fcport->vp_idx)); 136 vha->host_no, fcport->loop_id, fcport->vp_idx));
102 137
103 atomic_set(&fcport->state, FCS_DEVICE_DEAD);
104 qla2x00_mark_device_lost(vha, fcport, 0, 0); 138 qla2x00_mark_device_lost(vha, fcport, 0, 0);
105 atomic_set(&fcport->state, FCS_UNCONFIGURED); 139 atomic_set(&fcport->state, FCS_UNCONFIGURED);
106 } 140 }
@@ -194,12 +228,17 @@ qla24xx_configure_vp(scsi_qla_host_t *vha)
194void 228void
195qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb) 229qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
196{ 230{
197 scsi_qla_host_t *vha, *tvha; 231 scsi_qla_host_t *vha;
198 struct qla_hw_data *ha = rsp->hw; 232 struct qla_hw_data *ha = rsp->hw;
199 int i = 0; 233 int i = 0;
234 unsigned long flags;
200 235
201 list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) { 236 spin_lock_irqsave(&ha->vport_slock, flags);
237 list_for_each_entry(vha, &ha->vp_list, list) {
202 if (vha->vp_idx) { 238 if (vha->vp_idx) {
239 atomic_inc(&vha->vref_count);
240 spin_unlock_irqrestore(&ha->vport_slock, flags);
241
203 switch (mb[0]) { 242 switch (mb[0]) {
204 case MBA_LIP_OCCURRED: 243 case MBA_LIP_OCCURRED:
205 case MBA_LOOP_UP: 244 case MBA_LOOP_UP:
@@ -215,9 +254,13 @@ qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
215 qla2x00_async_event(vha, rsp, mb); 254 qla2x00_async_event(vha, rsp, mb);
216 break; 255 break;
217 } 256 }
257
258 spin_lock_irqsave(&ha->vport_slock, flags);
259 atomic_dec(&vha->vref_count);
218 } 260 }
219 i++; 261 i++;
220 } 262 }
263 spin_unlock_irqrestore(&ha->vport_slock, flags);
221} 264}
222 265
223int 266int
@@ -297,7 +340,7 @@ qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
297 int ret; 340 int ret;
298 struct qla_hw_data *ha = vha->hw; 341 struct qla_hw_data *ha = vha->hw;
299 scsi_qla_host_t *vp; 342 scsi_qla_host_t *vp;
300 struct scsi_qla_host *tvp; 343 unsigned long flags = 0;
301 344
302 if (vha->vp_idx) 345 if (vha->vp_idx)
303 return; 346 return;
@@ -309,10 +352,19 @@ qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
309 if (!(ha->current_topology & ISP_CFG_F)) 352 if (!(ha->current_topology & ISP_CFG_F))
310 return; 353 return;
311 354
312 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { 355 spin_lock_irqsave(&ha->vport_slock, flags);
313 if (vp->vp_idx) 356 list_for_each_entry(vp, &ha->vp_list, list) {
357 if (vp->vp_idx) {
358 atomic_inc(&vp->vref_count);
359 spin_unlock_irqrestore(&ha->vport_slock, flags);
360
314 ret = qla2x00_do_dpc_vp(vp); 361 ret = qla2x00_do_dpc_vp(vp);
362
363 spin_lock_irqsave(&ha->vport_slock, flags);
364 atomic_dec(&vp->vref_count);
365 }
315 } 366 }
367 spin_unlock_irqrestore(&ha->vport_slock, flags);
316} 368}
317 369
318int 370int
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 915b77a6e193..0a71cc71eab2 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -2672,6 +2672,19 @@ qla82xx_start_scsi(srb_t *sp)
2672sufficient_dsds: 2672sufficient_dsds:
2673 req_cnt = 1; 2673 req_cnt = 1;
2674 2674
2675 if (req->cnt < (req_cnt + 2)) {
2676 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2677 &reg->req_q_out[0]);
2678 if (req->ring_index < cnt)
2679 req->cnt = cnt - req->ring_index;
2680 else
2681 req->cnt = req->length -
2682 (req->ring_index - cnt);
2683 }
2684
2685 if (req->cnt < (req_cnt + 2))
2686 goto queuing_error;
2687
2675 ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC); 2688 ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2676 if (!sp->ctx) { 2689 if (!sp->ctx) {
2677 DEBUG(printk(KERN_INFO 2690 DEBUG(printk(KERN_INFO
@@ -3307,16 +3320,19 @@ qla82xx_check_fw_alive(scsi_qla_host_t *vha)
3307 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3320 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3308 } 3321 }
3309 qla2xxx_wake_dpc(vha); 3322 qla2xxx_wake_dpc(vha);
3323 ha->flags.fw_hung = 1;
3310 if (ha->flags.mbox_busy) { 3324 if (ha->flags.mbox_busy) {
3311 ha->flags.fw_hung = 1;
3312 ha->flags.mbox_int = 1; 3325 ha->flags.mbox_int = 1;
3313 DEBUG2(qla_printk(KERN_ERR, ha, 3326 DEBUG2(qla_printk(KERN_ERR, ha,
3314 "Due to fw hung, doing premature " 3327 "Due to fw hung, doing premature "
3315 "completion of mbx command\n")); 3328 "completion of mbx command\n"));
3316 complete(&ha->mbx_intr_comp); 3329 if (test_bit(MBX_INTR_WAIT,
3330 &ha->mbx_cmd_flags))
3331 complete(&ha->mbx_intr_comp);
3317 } 3332 }
3318 } 3333 }
3319 } 3334 } else
3335 vha->seconds_since_last_heartbeat = 0;
3320 vha->fw_heartbeat_counter = fw_heartbeat_counter; 3336 vha->fw_heartbeat_counter = fw_heartbeat_counter;
3321} 3337}
3322 3338
@@ -3418,13 +3434,15 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
3418 "%s(): Adapter reset needed!\n", __func__); 3434 "%s(): Adapter reset needed!\n", __func__);
3419 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3435 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3420 qla2xxx_wake_dpc(vha); 3436 qla2xxx_wake_dpc(vha);
3437 ha->flags.fw_hung = 1;
3421 if (ha->flags.mbox_busy) { 3438 if (ha->flags.mbox_busy) {
3422 ha->flags.fw_hung = 1;
3423 ha->flags.mbox_int = 1; 3439 ha->flags.mbox_int = 1;
3424 DEBUG2(qla_printk(KERN_ERR, ha, 3440 DEBUG2(qla_printk(KERN_ERR, ha,
3425 "Need reset, doing premature " 3441 "Need reset, doing premature "
3426 "completion of mbx command\n")); 3442 "completion of mbx command\n"));
3427 complete(&ha->mbx_intr_comp); 3443 if (test_bit(MBX_INTR_WAIT,
3444 &ha->mbx_cmd_flags))
3445 complete(&ha->mbx_intr_comp);
3428 } 3446 }
3429 } else { 3447 } else {
3430 qla82xx_check_fw_alive(vha); 3448 qla82xx_check_fw_alive(vha);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 8c80b49ac1c4..1e4bff695254 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -2341,16 +2341,28 @@ probe_out:
2341static void 2341static void
2342qla2x00_remove_one(struct pci_dev *pdev) 2342qla2x00_remove_one(struct pci_dev *pdev)
2343{ 2343{
2344 scsi_qla_host_t *base_vha, *vha, *temp; 2344 scsi_qla_host_t *base_vha, *vha;
2345 struct qla_hw_data *ha; 2345 struct qla_hw_data *ha;
2346 unsigned long flags;
2346 2347
2347 base_vha = pci_get_drvdata(pdev); 2348 base_vha = pci_get_drvdata(pdev);
2348 ha = base_vha->hw; 2349 ha = base_vha->hw;
2349 2350
2350 list_for_each_entry_safe(vha, temp, &ha->vp_list, list) { 2351 spin_lock_irqsave(&ha->vport_slock, flags);
2351 if (vha && vha->fc_vport) 2352 list_for_each_entry(vha, &ha->vp_list, list) {
2353 atomic_inc(&vha->vref_count);
2354
2355 if (vha && vha->fc_vport) {
2356 spin_unlock_irqrestore(&ha->vport_slock, flags);
2357
2352 fc_vport_terminate(vha->fc_vport); 2358 fc_vport_terminate(vha->fc_vport);
2359
2360 spin_lock_irqsave(&ha->vport_slock, flags);
2361 }
2362
2363 atomic_dec(&vha->vref_count);
2353 } 2364 }
2365 spin_unlock_irqrestore(&ha->vport_slock, flags);
2354 2366
2355 set_bit(UNLOADING, &base_vha->dpc_flags); 2367 set_bit(UNLOADING, &base_vha->dpc_flags);
2356 2368
@@ -2975,10 +2987,17 @@ static struct qla_work_evt *
2975qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type) 2987qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type)
2976{ 2988{
2977 struct qla_work_evt *e; 2989 struct qla_work_evt *e;
2990 uint8_t bail;
2991
2992 QLA_VHA_MARK_BUSY(vha, bail);
2993 if (bail)
2994 return NULL;
2978 2995
2979 e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC); 2996 e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC);
2980 if (!e) 2997 if (!e) {
2998 QLA_VHA_MARK_NOT_BUSY(vha);
2981 return NULL; 2999 return NULL;
3000 }
2982 3001
2983 INIT_LIST_HEAD(&e->list); 3002 INIT_LIST_HEAD(&e->list);
2984 e->type = type; 3003 e->type = type;
@@ -3135,6 +3154,9 @@ qla2x00_do_work(struct scsi_qla_host *vha)
3135 } 3154 }
3136 if (e->flags & QLA_EVT_FLAG_FREE) 3155 if (e->flags & QLA_EVT_FLAG_FREE)
3137 kfree(e); 3156 kfree(e);
3157
3158 /* For each work completed decrement vha ref count */
3159 QLA_VHA_MARK_NOT_BUSY(vha);
3138 } 3160 }
3139} 3161}
3140 3162
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index e75ccb91317d..8edbccb3232d 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,9 +7,9 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.03.03-k0" 10#define QLA2XXX_VERSION "8.03.04-k0"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 3 13#define QLA_DRIVER_MINOR_VER 3
14#define QLA_DRIVER_PATCH_VER 3 14#define QLA_DRIVER_PATCH_VER 4
15#define QLA_DRIVER_BETA_VER 0 15#define QLA_DRIVER_BETA_VER 0
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 9ade720422c6..ee02d3838a0a 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1011,8 +1011,8 @@ int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
1011 1011
1012err_exit: 1012err_exit:
1013 scsi_release_buffers(cmd); 1013 scsi_release_buffers(cmd);
1014 scsi_put_command(cmd);
1015 cmd->request->special = NULL; 1014 cmd->request->special = NULL;
1015 scsi_put_command(cmd);
1016 return error; 1016 return error;
1017} 1017}
1018EXPORT_SYMBOL(scsi_init_io); 1018EXPORT_SYMBOL(scsi_init_io);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 2714becc2eaf..ffa0689ee840 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -870,7 +870,7 @@ static int sd_release(struct gendisk *disk, fmode_t mode)
870 870
871 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_release\n")); 871 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_release\n"));
872 872
873 if (atomic_dec_return(&sdkp->openers) && sdev->removable) { 873 if (atomic_dec_return(&sdkp->openers) == 0 && sdev->removable) {
874 if (scsi_block_when_processing_errors(sdev)) 874 if (scsi_block_when_processing_errors(sdev))
875 scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW); 875 scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW);
876 } 876 }
@@ -2625,15 +2625,15 @@ module_exit(exit_sd);
2625static void sd_print_sense_hdr(struct scsi_disk *sdkp, 2625static void sd_print_sense_hdr(struct scsi_disk *sdkp,
2626 struct scsi_sense_hdr *sshdr) 2626 struct scsi_sense_hdr *sshdr)
2627{ 2627{
2628 sd_printk(KERN_INFO, sdkp, ""); 2628 sd_printk(KERN_INFO, sdkp, " ");
2629 scsi_show_sense_hdr(sshdr); 2629 scsi_show_sense_hdr(sshdr);
2630 sd_printk(KERN_INFO, sdkp, ""); 2630 sd_printk(KERN_INFO, sdkp, " ");
2631 scsi_show_extd_sense(sshdr->asc, sshdr->ascq); 2631 scsi_show_extd_sense(sshdr->asc, sshdr->ascq);
2632} 2632}
2633 2633
2634static void sd_print_result(struct scsi_disk *sdkp, int result) 2634static void sd_print_result(struct scsi_disk *sdkp, int result)
2635{ 2635{
2636 sd_printk(KERN_INFO, sdkp, ""); 2636 sd_printk(KERN_INFO, sdkp, " ");
2637 scsi_show_result(result); 2637 scsi_show_result(result);
2638} 2638}
2639 2639
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
index a7bc8b7b09ac..2c3e89ddf069 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -72,10 +72,7 @@ static void sym_printl_hex(u_char *p, int n)
72 72
73static void sym_print_msg(struct sym_ccb *cp, char *label, u_char *msg) 73static void sym_print_msg(struct sym_ccb *cp, char *label, u_char *msg)
74{ 74{
75 if (label) 75 sym_print_addr(cp->cmd, "%s: ", label);
76 sym_print_addr(cp->cmd, "%s: ", label);
77 else
78 sym_print_addr(cp->cmd, "");
79 76
80 spi_print_msg(msg); 77 spi_print_msg(msg);
81 printf("\n"); 78 printf("\n");
@@ -4558,7 +4555,8 @@ static void sym_int_sir(struct sym_hcb *np)
4558 switch (np->msgin [2]) { 4555 switch (np->msgin [2]) {
4559 case M_X_MODIFY_DP: 4556 case M_X_MODIFY_DP:
4560 if (DEBUG_FLAGS & DEBUG_POINTER) 4557 if (DEBUG_FLAGS & DEBUG_POINTER)
4561 sym_print_msg(cp, NULL, np->msgin); 4558 sym_print_msg(cp, "extended msg ",
4559 np->msgin);
4562 tmp = (np->msgin[3]<<24) + (np->msgin[4]<<16) + 4560 tmp = (np->msgin[3]<<24) + (np->msgin[4]<<16) +
4563 (np->msgin[5]<<8) + (np->msgin[6]); 4561 (np->msgin[5]<<8) + (np->msgin[6]);
4564 sym_modify_dp(np, tp, cp, tmp); 4562 sym_modify_dp(np, tp, cp, tmp);
@@ -4585,7 +4583,7 @@ static void sym_int_sir(struct sym_hcb *np)
4585 */ 4583 */
4586 case M_IGN_RESIDUE: 4584 case M_IGN_RESIDUE:
4587 if (DEBUG_FLAGS & DEBUG_POINTER) 4585 if (DEBUG_FLAGS & DEBUG_POINTER)
4588 sym_print_msg(cp, NULL, np->msgin); 4586 sym_print_msg(cp, "1 or 2 byte ", np->msgin);
4589 if (cp->host_flags & HF_SENSE) 4587 if (cp->host_flags & HF_SENSE)
4590 OUTL_DSP(np, SCRIPTA_BA(np, clrack)); 4588 OUTL_DSP(np, SCRIPTA_BA(np, clrack));
4591 else 4589 else
diff --git a/drivers/serial/amba-pl010.c b/drivers/serial/amba-pl010.c
index 50441ffe8e38..2904aa044126 100644
--- a/drivers/serial/amba-pl010.c
+++ b/drivers/serial/amba-pl010.c
@@ -472,14 +472,9 @@ pl010_set_termios(struct uart_port *port, struct ktermios *termios,
472 spin_unlock_irqrestore(&uap->port.lock, flags); 472 spin_unlock_irqrestore(&uap->port.lock, flags);
473} 473}
474 474
475static void pl010_set_ldisc(struct uart_port *port) 475static void pl010_set_ldisc(struct uart_port *port, int new)
476{ 476{
477 int line = port->line; 477 if (new == N_PPS) {
478
479 if (line >= port->state->port.tty->driver->num)
480 return;
481
482 if (port->state->port.tty->ldisc->ops->num == N_PPS) {
483 port->flags |= UPF_HARDPPS_CD; 478 port->flags |= UPF_HARDPPS_CD;
484 pl010_enable_ms(port); 479 pl010_enable_ms(port);
485 } else 480 } else
diff --git a/drivers/serial/mfd.c b/drivers/serial/mfd.c
index bc9af503907f..5dff45c76d32 100644
--- a/drivers/serial/mfd.c
+++ b/drivers/serial/mfd.c
@@ -27,6 +27,7 @@
27#include <linux/init.h> 27#include <linux/init.h>
28#include <linux/console.h> 28#include <linux/console.h>
29#include <linux/sysrq.h> 29#include <linux/sysrq.h>
30#include <linux/slab.h>
30#include <linux/serial_reg.h> 31#include <linux/serial_reg.h>
31#include <linux/circ_buf.h> 32#include <linux/circ_buf.h>
32#include <linux/delay.h> 33#include <linux/delay.h>
@@ -1423,7 +1424,6 @@ static void hsu_global_init(void)
1423 } 1424 }
1424 1425
1425 phsu = hsu; 1426 phsu = hsu;
1426
1427 hsu_debugfs_init(hsu); 1427 hsu_debugfs_init(hsu);
1428 return; 1428 return;
1429 1429
@@ -1435,18 +1435,20 @@ err_free_region:
1435 1435
1436static void serial_hsu_remove(struct pci_dev *pdev) 1436static void serial_hsu_remove(struct pci_dev *pdev)
1437{ 1437{
1438 struct hsu_port *hsu; 1438 void *priv = pci_get_drvdata(pdev);
1439 int i; 1439 struct uart_hsu_port *up;
1440 1440
1441 hsu = pci_get_drvdata(pdev); 1441 if (!priv)
1442 if (!hsu)
1443 return; 1442 return;
1444 1443
1445 for (i = 0; i < 3; i++) 1444 /* For port 0/1/2, priv is the address of uart_hsu_port */
1446 uart_remove_one_port(&serial_hsu_reg, &hsu->port[i].port); 1445 if (pdev->device != 0x081E) {
1446 up = priv;
1447 uart_remove_one_port(&serial_hsu_reg, &up->port);
1448 }
1447 1449
1448 pci_set_drvdata(pdev, NULL); 1450 pci_set_drvdata(pdev, NULL);
1449 free_irq(hsu->irq, hsu); 1451 free_irq(pdev->irq, priv);
1450 pci_disable_device(pdev); 1452 pci_disable_device(pdev);
1451} 1453}
1452 1454
diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/serial/mpc52xx_uart.c
index 8dedb266f143..c4399e23565a 100644
--- a/drivers/serial/mpc52xx_uart.c
+++ b/drivers/serial/mpc52xx_uart.c
@@ -500,6 +500,7 @@ static int __init mpc512x_psc_fifoc_init(void)
500 psc_fifoc = of_iomap(np, 0); 500 psc_fifoc = of_iomap(np, 0);
501 if (!psc_fifoc) { 501 if (!psc_fifoc) {
502 pr_err("%s: Can't map FIFOC\n", __func__); 502 pr_err("%s: Can't map FIFOC\n", __func__);
503 of_node_put(np);
503 return -ENODEV; 504 return -ENODEV;
504 } 505 }
505 506
diff --git a/drivers/serial/mrst_max3110.c b/drivers/serial/mrst_max3110.c
index f6ad1ecbff79..51c15f58e01e 100644
--- a/drivers/serial/mrst_max3110.c
+++ b/drivers/serial/mrst_max3110.c
@@ -29,6 +29,7 @@
29 29
30#include <linux/module.h> 30#include <linux/module.h>
31#include <linux/ioport.h> 31#include <linux/ioport.h>
32#include <linux/irq.h>
32#include <linux/init.h> 33#include <linux/init.h>
33#include <linux/console.h> 34#include <linux/console.h>
34#include <linux/sysrq.h> 35#include <linux/sysrq.h>
diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c
index 141c69554bd4..7d475b2a79e8 100644
--- a/drivers/serial/serial_cs.c
+++ b/drivers/serial/serial_cs.c
@@ -335,8 +335,6 @@ static int serial_probe(struct pcmcia_device *link)
335 info->p_dev = link; 335 info->p_dev = link;
336 link->priv = info; 336 link->priv = info;
337 337
338 link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
339 link->resource[0]->end = 8;
340 link->conf.Attributes = CONF_ENABLE_IRQ; 338 link->conf.Attributes = CONF_ENABLE_IRQ;
341 if (do_sound) { 339 if (do_sound) {
342 link->conf.Attributes |= CONF_ENABLE_SPKR; 340 link->conf.Attributes |= CONF_ENABLE_SPKR;
@@ -411,6 +409,27 @@ static int setup_serial(struct pcmcia_device *handle, struct serial_info * info,
411 409
412/*====================================================================*/ 410/*====================================================================*/
413 411
412static int pfc_config(struct pcmcia_device *p_dev)
413{
414 unsigned int port = 0;
415 struct serial_info *info = p_dev->priv;
416
417 if ((p_dev->resource[1]->end != 0) &&
418 (resource_size(p_dev->resource[1]) == 8)) {
419 port = p_dev->resource[1]->start;
420 info->slave = 1;
421 } else if ((info->manfid == MANFID_OSITECH) &&
422 (resource_size(p_dev->resource[0]) == 0x40)) {
423 port = p_dev->resource[0]->start + 0x28;
424 info->slave = 1;
425 }
426 if (info->slave)
427 return setup_serial(p_dev, info, port, p_dev->irq);
428
429 dev_warn(&p_dev->dev, "no usable port range found, giving up\n");
430 return -ENODEV;
431}
432
414static int simple_config_check(struct pcmcia_device *p_dev, 433static int simple_config_check(struct pcmcia_device *p_dev,
415 cistpl_cftable_entry_t *cf, 434 cistpl_cftable_entry_t *cf,
416 cistpl_cftable_entry_t *dflt, 435 cistpl_cftable_entry_t *dflt,
@@ -461,23 +480,8 @@ static int simple_config(struct pcmcia_device *link)
461 struct serial_info *info = link->priv; 480 struct serial_info *info = link->priv;
462 int i = -ENODEV, try; 481 int i = -ENODEV, try;
463 482
464 /* If the card is already configured, look up the port and irq */ 483 link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
465 if (link->function_config) { 484 link->resource[0]->end = 8;
466 unsigned int port = 0;
467 if ((link->resource[1]->end != 0) &&
468 (resource_size(link->resource[1]) == 8)) {
469 port = link->resource[1]->end;
470 info->slave = 1;
471 } else if ((info->manfid == MANFID_OSITECH) &&
472 (resource_size(link->resource[0]) == 0x40)) {
473 port = link->resource[0]->start + 0x28;
474 info->slave = 1;
475 }
476 if (info->slave) {
477 return setup_serial(link, info, port,
478 link->irq);
479 }
480 }
481 485
482 /* First pass: look for a config entry that looks normal. 486 /* First pass: look for a config entry that looks normal.
483 * Two tries: without IO aliases, then with aliases */ 487 * Two tries: without IO aliases, then with aliases */
@@ -491,8 +495,7 @@ static int simple_config(struct pcmcia_device *link)
491 if (!pcmcia_loop_config(link, simple_config_check_notpicky, NULL)) 495 if (!pcmcia_loop_config(link, simple_config_check_notpicky, NULL))
492 goto found_port; 496 goto found_port;
493 497
494 printk(KERN_NOTICE 498 dev_warn(&link->dev, "no usable port range found, giving up\n");
495 "serial_cs: no usable port range found, giving up\n");
496 return -1; 499 return -1;
497 500
498found_port: 501found_port:
@@ -558,6 +561,7 @@ static int multi_config(struct pcmcia_device *link)
558 int i, base2 = 0; 561 int i, base2 = 0;
559 562
560 /* First, look for a generic full-sized window */ 563 /* First, look for a generic full-sized window */
564 link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
561 link->resource[0]->end = info->multi * 8; 565 link->resource[0]->end = info->multi * 8;
562 if (pcmcia_loop_config(link, multi_config_check, &base2)) { 566 if (pcmcia_loop_config(link, multi_config_check, &base2)) {
563 /* If that didn't work, look for two windows */ 567 /* If that didn't work, look for two windows */
@@ -565,15 +569,14 @@ static int multi_config(struct pcmcia_device *link)
565 info->multi = 2; 569 info->multi = 2;
566 if (pcmcia_loop_config(link, multi_config_check_notpicky, 570 if (pcmcia_loop_config(link, multi_config_check_notpicky,
567 &base2)) { 571 &base2)) {
568 printk(KERN_NOTICE "serial_cs: no usable port range" 572 dev_warn(&link->dev, "no usable port range "
569 "found, giving up\n"); 573 "found, giving up\n");
570 return -ENODEV; 574 return -ENODEV;
571 } 575 }
572 } 576 }
573 577
574 if (!link->irq) 578 if (!link->irq)
575 dev_warn(&link->dev, 579 dev_warn(&link->dev, "no usable IRQ found, continuing...\n");
576 "serial_cs: no usable IRQ found, continuing...\n");
577 580
578 /* 581 /*
579 * Apply any configuration quirks. 582 * Apply any configuration quirks.
@@ -675,6 +678,7 @@ static int serial_config(struct pcmcia_device * link)
675 multifunction cards that ask for appropriate IO port ranges */ 678 multifunction cards that ask for appropriate IO port ranges */
676 if ((info->multi == 0) && 679 if ((info->multi == 0) &&
677 (link->has_func_id) && 680 (link->has_func_id) &&
681 (link->socket->pcmcia_pfc == 0) &&
678 ((link->func_id == CISTPL_FUNCID_MULTI) || 682 ((link->func_id == CISTPL_FUNCID_MULTI) ||
679 (link->func_id == CISTPL_FUNCID_SERIAL))) 683 (link->func_id == CISTPL_FUNCID_SERIAL)))
680 pcmcia_loop_config(link, serial_check_for_multi, info); 684 pcmcia_loop_config(link, serial_check_for_multi, info);
@@ -685,7 +689,13 @@ static int serial_config(struct pcmcia_device * link)
685 if (info->quirk && info->quirk->multi != -1) 689 if (info->quirk && info->quirk->multi != -1)
686 info->multi = info->quirk->multi; 690 info->multi = info->quirk->multi;
687 691
688 if (info->multi > 1) 692 dev_info(&link->dev,
693 "trying to set up [0x%04x:0x%04x] (pfc: %d, multi: %d, quirk: %p)\n",
694 link->manf_id, link->card_id,
695 link->socket->pcmcia_pfc, info->multi, info->quirk);
696 if (link->socket->pcmcia_pfc)
697 i = pfc_config(link);
698 else if (info->multi > 1)
689 i = multi_config(link); 699 i = multi_config(link);
690 else 700 else
691 i = simple_config(link); 701 i = simple_config(link);
@@ -704,7 +714,7 @@ static int serial_config(struct pcmcia_device * link)
704 return 0; 714 return 0;
705 715
706failed: 716failed:
707 dev_warn(&link->dev, "serial_cs: failed to initialize\n"); 717 dev_warn(&link->dev, "failed to initialize\n");
708 serial_remove(link); 718 serial_remove(link);
709 return -ENODEV; 719 return -ENODEV;
710} 720}
diff --git a/drivers/spi/amba-pl022.c b/drivers/spi/amba-pl022.c
index acd35d1ebd12..4c37c4e28647 100644
--- a/drivers/spi/amba-pl022.c
+++ b/drivers/spi/amba-pl022.c
@@ -503,8 +503,9 @@ static void giveback(struct pl022 *pl022)
503 msg->state = NULL; 503 msg->state = NULL;
504 if (msg->complete) 504 if (msg->complete)
505 msg->complete(msg->context); 505 msg->complete(msg->context);
506 /* This message is completed, so let's turn off the clock! */ 506 /* This message is completed, so let's turn off the clocks! */
507 clk_disable(pl022->clk); 507 clk_disable(pl022->clk);
508 amba_pclk_disable(pl022->adev);
508} 509}
509 510
510/** 511/**
@@ -1139,9 +1140,10 @@ static void pump_messages(struct work_struct *work)
1139 /* Setup the SPI using the per chip configuration */ 1140 /* Setup the SPI using the per chip configuration */
1140 pl022->cur_chip = spi_get_ctldata(pl022->cur_msg->spi); 1141 pl022->cur_chip = spi_get_ctldata(pl022->cur_msg->spi);
1141 /* 1142 /*
1142 * We enable the clock here, then the clock will be disabled when 1143 * We enable the clocks here, then the clocks will be disabled when
1143 * giveback() is called in each method (poll/interrupt/DMA) 1144 * giveback() is called in each method (poll/interrupt/DMA)
1144 */ 1145 */
1146 amba_pclk_enable(pl022->adev);
1145 clk_enable(pl022->clk); 1147 clk_enable(pl022->clk);
1146 restore_state(pl022); 1148 restore_state(pl022);
1147 flush(pl022); 1149 flush(pl022);
@@ -1786,11 +1788,9 @@ pl022_probe(struct amba_device *adev, struct amba_id *id)
1786 } 1788 }
1787 1789
1788 /* Disable SSP */ 1790 /* Disable SSP */
1789 clk_enable(pl022->clk);
1790 writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)), 1791 writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)),
1791 SSP_CR1(pl022->virtbase)); 1792 SSP_CR1(pl022->virtbase));
1792 load_ssp_default_config(pl022); 1793 load_ssp_default_config(pl022);
1793 clk_disable(pl022->clk);
1794 1794
1795 status = request_irq(adev->irq[0], pl022_interrupt_handler, 0, "pl022", 1795 status = request_irq(adev->irq[0], pl022_interrupt_handler, 0, "pl022",
1796 pl022); 1796 pl022);
@@ -1818,6 +1818,8 @@ pl022_probe(struct amba_device *adev, struct amba_id *id)
1818 goto err_spi_register; 1818 goto err_spi_register;
1819 } 1819 }
1820 dev_dbg(dev, "probe succeded\n"); 1820 dev_dbg(dev, "probe succeded\n");
1821 /* Disable the silicon block pclk and clock it when needed */
1822 amba_pclk_disable(adev);
1821 return 0; 1823 return 0;
1822 1824
1823 err_spi_register: 1825 err_spi_register:
@@ -1879,9 +1881,9 @@ static int pl022_suspend(struct amba_device *adev, pm_message_t state)
1879 return status; 1881 return status;
1880 } 1882 }
1881 1883
1882 clk_enable(pl022->clk); 1884 amba_pclk_enable(adev);
1883 load_ssp_default_config(pl022); 1885 load_ssp_default_config(pl022);
1884 clk_disable(pl022->clk); 1886 amba_pclk_disable(adev);
1885 dev_dbg(&adev->dev, "suspended\n"); 1887 dev_dbg(&adev->dev, "suspended\n");
1886 return 0; 1888 return 0;
1887} 1889}
@@ -1981,7 +1983,7 @@ static int __init pl022_init(void)
1981 return amba_driver_register(&pl022_driver); 1983 return amba_driver_register(&pl022_driver);
1982} 1984}
1983 1985
1984module_init(pl022_init); 1986subsys_initcall(pl022_init);
1985 1987
1986static void __exit pl022_exit(void) 1988static void __exit pl022_exit(void)
1987{ 1989{
diff --git a/drivers/spi/dw_spi.c b/drivers/spi/dw_spi.c
index d256cb00604c..56247853c298 100644
--- a/drivers/spi/dw_spi.c
+++ b/drivers/spi/dw_spi.c
@@ -181,10 +181,6 @@ static void flush(struct dw_spi *dws)
181 wait_till_not_busy(dws); 181 wait_till_not_busy(dws);
182} 182}
183 183
184static void null_cs_control(u32 command)
185{
186}
187
188static int null_writer(struct dw_spi *dws) 184static int null_writer(struct dw_spi *dws)
189{ 185{
190 u8 n_bytes = dws->n_bytes; 186 u8 n_bytes = dws->n_bytes;
@@ -322,7 +318,7 @@ static void giveback(struct dw_spi *dws)
322 struct spi_transfer, 318 struct spi_transfer,
323 transfer_list); 319 transfer_list);
324 320
325 if (!last_transfer->cs_change) 321 if (!last_transfer->cs_change && dws->cs_control)
326 dws->cs_control(MRST_SPI_DEASSERT); 322 dws->cs_control(MRST_SPI_DEASSERT);
327 323
328 msg->state = NULL; 324 msg->state = NULL;
@@ -396,6 +392,11 @@ static irqreturn_t interrupt_transfer(struct dw_spi *dws)
396static irqreturn_t dw_spi_irq(int irq, void *dev_id) 392static irqreturn_t dw_spi_irq(int irq, void *dev_id)
397{ 393{
398 struct dw_spi *dws = dev_id; 394 struct dw_spi *dws = dev_id;
395 u16 irq_status, irq_mask = 0x3f;
396
397 irq_status = dw_readw(dws, isr) & irq_mask;
398 if (!irq_status)
399 return IRQ_NONE;
399 400
400 if (!dws->cur_msg) { 401 if (!dws->cur_msg) {
401 spi_mask_intr(dws, SPI_INT_TXEI); 402 spi_mask_intr(dws, SPI_INT_TXEI);
@@ -544,13 +545,13 @@ static void pump_transfers(unsigned long data)
544 */ 545 */
545 if (dws->cs_control) { 546 if (dws->cs_control) {
546 if (dws->rx && dws->tx) 547 if (dws->rx && dws->tx)
547 chip->tmode = 0x00; 548 chip->tmode = SPI_TMOD_TR;
548 else if (dws->rx) 549 else if (dws->rx)
549 chip->tmode = 0x02; 550 chip->tmode = SPI_TMOD_RO;
550 else 551 else
551 chip->tmode = 0x01; 552 chip->tmode = SPI_TMOD_TO;
552 553
553 cr0 &= ~(0x3 << SPI_MODE_OFFSET); 554 cr0 &= ~SPI_TMOD_MASK;
554 cr0 |= (chip->tmode << SPI_TMOD_OFFSET); 555 cr0 |= (chip->tmode << SPI_TMOD_OFFSET);
555 } 556 }
556 557
@@ -699,9 +700,6 @@ static int dw_spi_setup(struct spi_device *spi)
699 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); 700 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
700 if (!chip) 701 if (!chip)
701 return -ENOMEM; 702 return -ENOMEM;
702
703 chip->cs_control = null_cs_control;
704 chip->enable_dma = 0;
705 } 703 }
706 704
707 /* 705 /*
@@ -883,7 +881,7 @@ int __devinit dw_spi_add_host(struct dw_spi *dws)
883 dws->dma_inited = 0; 881 dws->dma_inited = 0;
884 dws->dma_addr = (dma_addr_t)(dws->paddr + 0x60); 882 dws->dma_addr = (dma_addr_t)(dws->paddr + 0x60);
885 883
886 ret = request_irq(dws->irq, dw_spi_irq, 0, 884 ret = request_irq(dws->irq, dw_spi_irq, IRQF_SHARED,
887 "dw_spi", dws); 885 "dw_spi", dws);
888 if (ret < 0) { 886 if (ret < 0) {
889 dev_err(&master->dev, "can not get IRQ\n"); 887 dev_err(&master->dev, "can not get IRQ\n");
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index a9e5c79ae52a..b5a78a1f4421 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -23,6 +23,7 @@
23#include <linux/init.h> 23#include <linux/init.h>
24#include <linux/cache.h> 24#include <linux/cache.h>
25#include <linux/mutex.h> 25#include <linux/mutex.h>
26#include <linux/of_device.h>
26#include <linux/slab.h> 27#include <linux/slab.h>
27#include <linux/mod_devicetable.h> 28#include <linux/mod_devicetable.h>
28#include <linux/spi/spi.h> 29#include <linux/spi/spi.h>
@@ -86,6 +87,10 @@ static int spi_match_device(struct device *dev, struct device_driver *drv)
86 const struct spi_device *spi = to_spi_device(dev); 87 const struct spi_device *spi = to_spi_device(dev);
87 const struct spi_driver *sdrv = to_spi_driver(drv); 88 const struct spi_driver *sdrv = to_spi_driver(drv);
88 89
90 /* Attempt an OF style match */
91 if (of_driver_match_device(dev, drv))
92 return 1;
93
89 if (sdrv->id_table) 94 if (sdrv->id_table)
90 return !!spi_match_id(sdrv->id_table, spi); 95 return !!spi_match_id(sdrv->id_table, spi);
91 96
@@ -554,11 +559,9 @@ done:
554EXPORT_SYMBOL_GPL(spi_register_master); 559EXPORT_SYMBOL_GPL(spi_register_master);
555 560
556 561
557static int __unregister(struct device *dev, void *master_dev) 562static int __unregister(struct device *dev, void *null)
558{ 563{
559 /* note: before about 2.6.14-rc1 this would corrupt memory: */ 564 spi_unregister_device(to_spi_device(dev));
560 if (dev != master_dev)
561 spi_unregister_device(to_spi_device(dev));
562 return 0; 565 return 0;
563} 566}
564 567
@@ -576,8 +579,7 @@ void spi_unregister_master(struct spi_master *master)
576{ 579{
577 int dummy; 580 int dummy;
578 581
579 dummy = device_for_each_child(master->dev.parent, &master->dev, 582 dummy = device_for_each_child(&master->dev, NULL, __unregister);
580 __unregister);
581 device_unregister(&master->dev); 583 device_unregister(&master->dev);
582} 584}
583EXPORT_SYMBOL_GPL(spi_unregister_master); 585EXPORT_SYMBOL_GPL(spi_unregister_master);
diff --git a/drivers/spi/spi_gpio.c b/drivers/spi/spi_gpio.c
index e24a63498acb..63e51b011d50 100644
--- a/drivers/spi/spi_gpio.c
+++ b/drivers/spi/spi_gpio.c
@@ -350,7 +350,7 @@ static int __init spi_gpio_probe(struct platform_device *pdev)
350 spi_gpio->bitbang.master = spi_master_get(master); 350 spi_gpio->bitbang.master = spi_master_get(master);
351 spi_gpio->bitbang.chipselect = spi_gpio_chipselect; 351 spi_gpio->bitbang.chipselect = spi_gpio_chipselect;
352 352
353 if ((master_flags & (SPI_MASTER_NO_RX | SPI_MASTER_NO_RX)) == 0) { 353 if ((master_flags & (SPI_MASTER_NO_TX | SPI_MASTER_NO_RX)) == 0) {
354 spi_gpio->bitbang.txrx_word[SPI_MODE_0] = spi_gpio_txrx_word_mode0; 354 spi_gpio->bitbang.txrx_word[SPI_MODE_0] = spi_gpio_txrx_word_mode0;
355 spi_gpio->bitbang.txrx_word[SPI_MODE_1] = spi_gpio_txrx_word_mode1; 355 spi_gpio->bitbang.txrx_word[SPI_MODE_1] = spi_gpio_txrx_word_mode1;
356 spi_gpio->bitbang.txrx_word[SPI_MODE_2] = spi_gpio_txrx_word_mode2; 356 spi_gpio->bitbang.txrx_word[SPI_MODE_2] = spi_gpio_txrx_word_mode2;
diff --git a/drivers/spi/spi_mpc8xxx.c b/drivers/spi/spi_mpc8xxx.c
index d31b57f7baaf..1dd86b835cd8 100644
--- a/drivers/spi/spi_mpc8xxx.c
+++ b/drivers/spi/spi_mpc8xxx.c
@@ -408,11 +408,17 @@ static void mpc8xxx_spi_cpm_bufs_start(struct mpc8xxx_spi *mspi)
408 408
409 xfer_ofs = mspi->xfer_in_progress->len - mspi->count; 409 xfer_ofs = mspi->xfer_in_progress->len - mspi->count;
410 410
411 out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma + xfer_ofs); 411 if (mspi->rx_dma == mspi->dma_dummy_rx)
412 out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma);
413 else
414 out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma + xfer_ofs);
412 out_be16(&rx_bd->cbd_datlen, 0); 415 out_be16(&rx_bd->cbd_datlen, 0);
413 out_be16(&rx_bd->cbd_sc, BD_SC_EMPTY | BD_SC_INTRPT | BD_SC_WRAP); 416 out_be16(&rx_bd->cbd_sc, BD_SC_EMPTY | BD_SC_INTRPT | BD_SC_WRAP);
414 417
415 out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma + xfer_ofs); 418 if (mspi->tx_dma == mspi->dma_dummy_tx)
419 out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma);
420 else
421 out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma + xfer_ofs);
416 out_be16(&tx_bd->cbd_datlen, xfer_len); 422 out_be16(&tx_bd->cbd_datlen, xfer_len);
417 out_be16(&tx_bd->cbd_sc, BD_SC_READY | BD_SC_INTRPT | BD_SC_WRAP | 423 out_be16(&tx_bd->cbd_sc, BD_SC_READY | BD_SC_INTRPT | BD_SC_WRAP |
418 BD_SC_LAST); 424 BD_SC_LAST);
diff --git a/drivers/spi/spi_s3c64xx.c b/drivers/spi/spi_s3c64xx.c
index 97365815a729..c3038da2648a 100644
--- a/drivers/spi/spi_s3c64xx.c
+++ b/drivers/spi/spi_s3c64xx.c
@@ -200,6 +200,9 @@ static void flush_fifo(struct s3c64xx_spi_driver_data *sdd)
200 val = readl(regs + S3C64XX_SPI_STATUS); 200 val = readl(regs + S3C64XX_SPI_STATUS);
201 } while (TX_FIFO_LVL(val, sci) && loops--); 201 } while (TX_FIFO_LVL(val, sci) && loops--);
202 202
203 if (loops == 0)
204 dev_warn(&sdd->pdev->dev, "Timed out flushing TX FIFO\n");
205
203 /* Flush RxFIFO*/ 206 /* Flush RxFIFO*/
204 loops = msecs_to_loops(1); 207 loops = msecs_to_loops(1);
205 do { 208 do {
@@ -210,6 +213,9 @@ static void flush_fifo(struct s3c64xx_spi_driver_data *sdd)
210 break; 213 break;
211 } while (loops--); 214 } while (loops--);
212 215
216 if (loops == 0)
217 dev_warn(&sdd->pdev->dev, "Timed out flushing RX FIFO\n");
218
213 val = readl(regs + S3C64XX_SPI_CH_CFG); 219 val = readl(regs + S3C64XX_SPI_CH_CFG);
214 val &= ~S3C64XX_SPI_CH_SW_RST; 220 val &= ~S3C64XX_SPI_CH_SW_RST;
215 writel(val, regs + S3C64XX_SPI_CH_CFG); 221 writel(val, regs + S3C64XX_SPI_CH_CFG);
@@ -320,16 +326,17 @@ static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd,
320 326
321 /* millisecs to xfer 'len' bytes @ 'cur_speed' */ 327 /* millisecs to xfer 'len' bytes @ 'cur_speed' */
322 ms = xfer->len * 8 * 1000 / sdd->cur_speed; 328 ms = xfer->len * 8 * 1000 / sdd->cur_speed;
323 ms += 5; /* some tolerance */ 329 ms += 10; /* some tolerance */
324 330
325 if (dma_mode) { 331 if (dma_mode) {
326 val = msecs_to_jiffies(ms) + 10; 332 val = msecs_to_jiffies(ms) + 10;
327 val = wait_for_completion_timeout(&sdd->xfer_completion, val); 333 val = wait_for_completion_timeout(&sdd->xfer_completion, val);
328 } else { 334 } else {
335 u32 status;
329 val = msecs_to_loops(ms); 336 val = msecs_to_loops(ms);
330 do { 337 do {
331 val = readl(regs + S3C64XX_SPI_STATUS); 338 status = readl(regs + S3C64XX_SPI_STATUS);
332 } while (RX_FIFO_LVL(val, sci) < xfer->len && --val); 339 } while (RX_FIFO_LVL(status, sci) < xfer->len && --val);
333 } 340 }
334 341
335 if (!val) 342 if (!val)
@@ -447,8 +454,8 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
447 writel(val, regs + S3C64XX_SPI_CLK_CFG); 454 writel(val, regs + S3C64XX_SPI_CLK_CFG);
448} 455}
449 456
450void s3c64xx_spi_dma_rxcb(struct s3c2410_dma_chan *chan, void *buf_id, 457static void s3c64xx_spi_dma_rxcb(struct s3c2410_dma_chan *chan, void *buf_id,
451 int size, enum s3c2410_dma_buffresult res) 458 int size, enum s3c2410_dma_buffresult res)
452{ 459{
453 struct s3c64xx_spi_driver_data *sdd = buf_id; 460 struct s3c64xx_spi_driver_data *sdd = buf_id;
454 unsigned long flags; 461 unsigned long flags;
@@ -467,8 +474,8 @@ void s3c64xx_spi_dma_rxcb(struct s3c2410_dma_chan *chan, void *buf_id,
467 spin_unlock_irqrestore(&sdd->lock, flags); 474 spin_unlock_irqrestore(&sdd->lock, flags);
468} 475}
469 476
470void s3c64xx_spi_dma_txcb(struct s3c2410_dma_chan *chan, void *buf_id, 477static void s3c64xx_spi_dma_txcb(struct s3c2410_dma_chan *chan, void *buf_id,
471 int size, enum s3c2410_dma_buffresult res) 478 int size, enum s3c2410_dma_buffresult res)
472{ 479{
473 struct s3c64xx_spi_driver_data *sdd = buf_id; 480 struct s3c64xx_spi_driver_data *sdd = buf_id;
474 unsigned long flags; 481 unsigned long flags;
@@ -508,8 +515,9 @@ static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd,
508 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 515 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
509 516
510 if (xfer->tx_buf != NULL) { 517 if (xfer->tx_buf != NULL) {
511 xfer->tx_dma = dma_map_single(dev, xfer->tx_buf, 518 xfer->tx_dma = dma_map_single(dev,
512 xfer->len, DMA_TO_DEVICE); 519 (void *)xfer->tx_buf, xfer->len,
520 DMA_TO_DEVICE);
513 if (dma_mapping_error(dev, xfer->tx_dma)) { 521 if (dma_mapping_error(dev, xfer->tx_dma)) {
514 dev_err(dev, "dma_map_single Tx failed\n"); 522 dev_err(dev, "dma_map_single Tx failed\n");
515 xfer->tx_dma = XFER_DMAADDR_INVALID; 523 xfer->tx_dma = XFER_DMAADDR_INVALID;
@@ -919,6 +927,13 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
919 return -ENODEV; 927 return -ENODEV;
920 } 928 }
921 929
930 sci = pdev->dev.platform_data;
931 if (!sci->src_clk_name) {
932 dev_err(&pdev->dev,
933 "Board init must call s3c64xx_spi_set_info()\n");
934 return -EINVAL;
935 }
936
922 /* Check for availability of necessary resource */ 937 /* Check for availability of necessary resource */
923 938
924 dmatx_res = platform_get_resource(pdev, IORESOURCE_DMA, 0); 939 dmatx_res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
@@ -946,8 +961,6 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
946 return -ENOMEM; 961 return -ENOMEM;
947 } 962 }
948 963
949 sci = pdev->dev.platform_data;
950
951 platform_set_drvdata(pdev, master); 964 platform_set_drvdata(pdev, master);
952 965
953 sdd = spi_master_get_devdata(master); 966 sdd = spi_master_get_devdata(master);
@@ -1170,7 +1183,7 @@ static int __init s3c64xx_spi_init(void)
1170{ 1183{
1171 return platform_driver_probe(&s3c64xx_spi_driver, s3c64xx_spi_probe); 1184 return platform_driver_probe(&s3c64xx_spi_driver, s3c64xx_spi_probe);
1172} 1185}
1173module_init(s3c64xx_spi_init); 1186subsys_initcall(s3c64xx_spi_init);
1174 1187
1175static void __exit s3c64xx_spi_exit(void) 1188static void __exit s3c64xx_spi_exit(void)
1176{ 1189{
diff --git a/drivers/staging/batman-adv/hard-interface.c b/drivers/staging/batman-adv/hard-interface.c
index baa8b05b9e8d..6e973a79aa25 100644
--- a/drivers/staging/batman-adv/hard-interface.c
+++ b/drivers/staging/batman-adv/hard-interface.c
@@ -30,7 +30,6 @@
30#include "hash.h" 30#include "hash.h"
31 31
32#include <linux/if_arp.h> 32#include <linux/if_arp.h>
33#include <linux/netfilter_bridge.h>
34 33
35#define MIN(x, y) ((x) < (y) ? (x) : (y)) 34#define MIN(x, y) ((x) < (y) ? (x) : (y))
36 35
@@ -431,11 +430,6 @@ out:
431 return NOTIFY_DONE; 430 return NOTIFY_DONE;
432} 431}
433 432
434static int batman_skb_recv_finish(struct sk_buff *skb)
435{
436 return NF_ACCEPT;
437}
438
439/* receive a packet with the batman ethertype coming on a hard 433/* receive a packet with the batman ethertype coming on a hard
440 * interface */ 434 * interface */
441int batman_skb_recv(struct sk_buff *skb, struct net_device *dev, 435int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
@@ -456,13 +450,6 @@ int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
456 if (atomic_read(&module_state) != MODULE_ACTIVE) 450 if (atomic_read(&module_state) != MODULE_ACTIVE)
457 goto err_free; 451 goto err_free;
458 452
459 /* if netfilter/ebtables wants to block incoming batman
460 * packets then give them a chance to do so here */
461 ret = NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, dev, NULL,
462 batman_skb_recv_finish);
463 if (ret != 1)
464 goto err_out;
465
466 /* packet should hold at least type and version */ 453 /* packet should hold at least type and version */
467 if (unlikely(skb_headlen(skb) < 2)) 454 if (unlikely(skb_headlen(skb) < 2))
468 goto err_free; 455 goto err_free;
diff --git a/drivers/staging/batman-adv/send.c b/drivers/staging/batman-adv/send.c
index 055edee7b4e4..da3c82e47bbd 100644
--- a/drivers/staging/batman-adv/send.c
+++ b/drivers/staging/batman-adv/send.c
@@ -29,7 +29,6 @@
29#include "vis.h" 29#include "vis.h"
30#include "aggregation.h" 30#include "aggregation.h"
31 31
32#include <linux/netfilter_bridge.h>
33 32
34static void send_outstanding_bcast_packet(struct work_struct *work); 33static void send_outstanding_bcast_packet(struct work_struct *work);
35 34
@@ -92,12 +91,9 @@ int send_skb_packet(struct sk_buff *skb,
92 91
93 /* dev_queue_xmit() returns a negative result on error. However on 92 /* dev_queue_xmit() returns a negative result on error. However on
94 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP 93 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
95 * (which is > 0). This will not be treated as an error. 94 * (which is > 0). This will not be treated as an error. */
96 * Also, if netfilter/ebtables wants to block outgoing batman
97 * packets then giving them a chance to do so here */
98 95
99 return NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, 96 return dev_queue_xmit(skb);
100 dev_queue_xmit);
101send_skb_err: 97send_skb_err:
102 kfree_skb(skb); 98 kfree_skb(skb);
103 return NET_XMIT_DROP; 99 return NET_XMIT_DROP;
diff --git a/drivers/staging/ti-st/st.h b/drivers/staging/ti-st/st.h
index 9952579425b9..1b3060eb2921 100644
--- a/drivers/staging/ti-st/st.h
+++ b/drivers/staging/ti-st/st.h
@@ -80,5 +80,4 @@ struct st_proto_s {
80extern long st_register(struct st_proto_s *); 80extern long st_register(struct st_proto_s *);
81extern long st_unregister(enum proto_type); 81extern long st_unregister(enum proto_type);
82 82
83extern struct platform_device *st_get_plat_device(void);
84#endif /* ST_H */ 83#endif /* ST_H */
diff --git a/drivers/staging/ti-st/st_core.c b/drivers/staging/ti-st/st_core.c
index 063c9b1db1ab..b85d8bfdf600 100644
--- a/drivers/staging/ti-st/st_core.c
+++ b/drivers/staging/ti-st/st_core.c
@@ -38,7 +38,6 @@
38#include "st_ll.h" 38#include "st_ll.h"
39#include "st.h" 39#include "st.h"
40 40
41#define VERBOSE
42/* strings to be used for rfkill entries and by 41/* strings to be used for rfkill entries and by
43 * ST Core to be used for sysfs debug entry 42 * ST Core to be used for sysfs debug entry
44 */ 43 */
@@ -581,7 +580,7 @@ long st_register(struct st_proto_s *new_proto)
581 long err = 0; 580 long err = 0;
582 unsigned long flags = 0; 581 unsigned long flags = 0;
583 582
584 st_kim_ref(&st_gdata); 583 st_kim_ref(&st_gdata, 0);
585 pr_info("%s(%d) ", __func__, new_proto->type); 584 pr_info("%s(%d) ", __func__, new_proto->type);
586 if (st_gdata == NULL || new_proto == NULL || new_proto->recv == NULL 585 if (st_gdata == NULL || new_proto == NULL || new_proto->recv == NULL
587 || new_proto->reg_complete_cb == NULL) { 586 || new_proto->reg_complete_cb == NULL) {
@@ -713,7 +712,7 @@ long st_unregister(enum proto_type type)
713 712
714 pr_debug("%s: %d ", __func__, type); 713 pr_debug("%s: %d ", __func__, type);
715 714
716 st_kim_ref(&st_gdata); 715 st_kim_ref(&st_gdata, 0);
717 if (type < ST_BT || type >= ST_MAX) { 716 if (type < ST_BT || type >= ST_MAX) {
718 pr_err(" protocol %d not supported", type); 717 pr_err(" protocol %d not supported", type);
719 return -EPROTONOSUPPORT; 718 return -EPROTONOSUPPORT;
@@ -767,7 +766,7 @@ long st_write(struct sk_buff *skb)
767#endif 766#endif
768 long len; 767 long len;
769 768
770 st_kim_ref(&st_gdata); 769 st_kim_ref(&st_gdata, 0);
771 if (unlikely(skb == NULL || st_gdata == NULL 770 if (unlikely(skb == NULL || st_gdata == NULL
772 || st_gdata->tty == NULL)) { 771 || st_gdata->tty == NULL)) {
773 pr_err("data/tty unavailable to perform write"); 772 pr_err("data/tty unavailable to perform write");
@@ -818,7 +817,7 @@ static int st_tty_open(struct tty_struct *tty)
818 struct st_data_s *st_gdata; 817 struct st_data_s *st_gdata;
819 pr_info("%s ", __func__); 818 pr_info("%s ", __func__);
820 819
821 st_kim_ref(&st_gdata); 820 st_kim_ref(&st_gdata, 0);
822 st_gdata->tty = tty; 821 st_gdata->tty = tty;
823 tty->disc_data = st_gdata; 822 tty->disc_data = st_gdata;
824 823
diff --git a/drivers/staging/ti-st/st_core.h b/drivers/staging/ti-st/st_core.h
index e0c32d149f5f..8601320a679e 100644
--- a/drivers/staging/ti-st/st_core.h
+++ b/drivers/staging/ti-st/st_core.h
@@ -117,7 +117,7 @@ int st_core_init(struct st_data_s **);
117void st_core_exit(struct st_data_s *); 117void st_core_exit(struct st_data_s *);
118 118
119/* ask for reference from KIM */ 119/* ask for reference from KIM */
120void st_kim_ref(struct st_data_s **); 120void st_kim_ref(struct st_data_s **, int);
121 121
122#define GPS_STUB_TEST 122#define GPS_STUB_TEST
123#ifdef GPS_STUB_TEST 123#ifdef GPS_STUB_TEST
diff --git a/drivers/staging/ti-st/st_kim.c b/drivers/staging/ti-st/st_kim.c
index b4a6c7fdc4e6..9e99463f76e8 100644
--- a/drivers/staging/ti-st/st_kim.c
+++ b/drivers/staging/ti-st/st_kim.c
@@ -72,11 +72,26 @@ const unsigned char *protocol_names[] = {
72 PROTO_ENTRY(ST_GPS, "GPS"), 72 PROTO_ENTRY(ST_GPS, "GPS"),
73}; 73};
74 74
75#define MAX_ST_DEVICES 3 /* Imagine 1 on each UART for now */
76struct platform_device *st_kim_devices[MAX_ST_DEVICES];
75 77
76/**********************************************************************/ 78/**********************************************************************/
77/* internal functions */ 79/* internal functions */
78 80
79/** 81/**
82 * st_get_plat_device -
83 * function which returns the reference to the platform device
84 * requested by id. As of now only 1 such device exists (id=0)
85 * the context requesting for reference can get the id to be
86 * requested by a. The protocol driver which is registering or
87 * b. the tty device which is opened.
88 */
89static struct platform_device *st_get_plat_device(int id)
90{
91 return st_kim_devices[id];
92}
93
94/**
80 * validate_firmware_response - 95 * validate_firmware_response -
81 * function to return whether the firmware response was proper 96 * function to return whether the firmware response was proper
82 * in case of error don't complete so that waiting for proper 97 * in case of error don't complete so that waiting for proper
@@ -353,7 +368,7 @@ void st_kim_chip_toggle(enum proto_type type, enum kim_gpio_state state)
353 struct kim_data_s *kim_gdata; 368 struct kim_data_s *kim_gdata;
354 pr_info(" %s ", __func__); 369 pr_info(" %s ", __func__);
355 370
356 kim_pdev = st_get_plat_device(); 371 kim_pdev = st_get_plat_device(0);
357 kim_gdata = dev_get_drvdata(&kim_pdev->dev); 372 kim_gdata = dev_get_drvdata(&kim_pdev->dev);
358 373
359 if (kim_gdata->gpios[type] == -1) { 374 if (kim_gdata->gpios[type] == -1) {
@@ -574,12 +589,12 @@ static int kim_toggle_radio(void *data, bool blocked)
574 * This would enable multiple such platform devices to exist 589 * This would enable multiple such platform devices to exist
575 * on a given platform 590 * on a given platform
576 */ 591 */
577void st_kim_ref(struct st_data_s **core_data) 592void st_kim_ref(struct st_data_s **core_data, int id)
578{ 593{
579 struct platform_device *pdev; 594 struct platform_device *pdev;
580 struct kim_data_s *kim_gdata; 595 struct kim_data_s *kim_gdata;
581 /* get kim_gdata reference from platform device */ 596 /* get kim_gdata reference from platform device */
582 pdev = st_get_plat_device(); 597 pdev = st_get_plat_device(id);
583 kim_gdata = dev_get_drvdata(&pdev->dev); 598 kim_gdata = dev_get_drvdata(&pdev->dev);
584 *core_data = kim_gdata->core_data; 599 *core_data = kim_gdata->core_data;
585} 600}
@@ -623,6 +638,7 @@ static int kim_probe(struct platform_device *pdev)
623 long *gpios = pdev->dev.platform_data; 638 long *gpios = pdev->dev.platform_data;
624 struct kim_data_s *kim_gdata; 639 struct kim_data_s *kim_gdata;
625 640
641 st_kim_devices[pdev->id] = pdev;
626 kim_gdata = kzalloc(sizeof(struct kim_data_s), GFP_ATOMIC); 642 kim_gdata = kzalloc(sizeof(struct kim_data_s), GFP_ATOMIC);
627 if (!kim_gdata) { 643 if (!kim_gdata) {
628 pr_err("no mem to allocate"); 644 pr_err("no mem to allocate");
diff --git a/drivers/staging/vt6655/wpactl.c b/drivers/staging/vt6655/wpactl.c
index 0142338bcafe..4bdb8362de82 100644
--- a/drivers/staging/vt6655/wpactl.c
+++ b/drivers/staging/vt6655/wpactl.c
@@ -766,9 +766,14 @@ static int wpa_set_associate(PSDevice pDevice,
766 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wpa_ie_len = %d\n", param->u.wpa_associate.wpa_ie_len); 766 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wpa_ie_len = %d\n", param->u.wpa_associate.wpa_ie_len);
767 767
768 768
769 if (param->u.wpa_associate.wpa_ie && 769 if (param->u.wpa_associate.wpa_ie_len) {
770 copy_from_user(&abyWPAIE[0], param->u.wpa_associate.wpa_ie, param->u.wpa_associate.wpa_ie_len)) 770 if (!param->u.wpa_associate.wpa_ie)
771 return -EINVAL; 771 return -EINVAL;
772 if (param->u.wpa_associate.wpa_ie_len > sizeof(abyWPAIE))
773 return -EINVAL;
774 if (copy_from_user(&abyWPAIE[0], param->u.wpa_associate.wpa_ie, param->u.wpa_associate.wpa_ie_len))
775 return -EFAULT;
776 }
772 777
773 if (param->u.wpa_associate.mode == 1) 778 if (param->u.wpa_associate.mode == 1)
774 pMgmt->eConfigMode = WMAC_CONFIG_IBSS_STA; 779 pMgmt->eConfigMode = WMAC_CONFIG_IBSS_STA;
diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig
index 7e594449600e..9eed5b52d9de 100644
--- a/drivers/usb/core/Kconfig
+++ b/drivers/usb/core/Kconfig
@@ -91,12 +91,12 @@ config USB_DYNAMIC_MINORS
91 If you are unsure about this, say N here. 91 If you are unsure about this, say N here.
92 92
93config USB_SUSPEND 93config USB_SUSPEND
94 bool "USB runtime power management (suspend/resume and wakeup)" 94 bool "USB runtime power management (autosuspend) and wakeup"
95 depends on USB && PM_RUNTIME 95 depends on USB && PM_RUNTIME
96 help 96 help
97 If you say Y here, you can use driver calls or the sysfs 97 If you say Y here, you can use driver calls or the sysfs
98 "power/level" file to suspend or resume individual USB 98 "power/control" file to enable or disable autosuspend for
99 peripherals and to enable or disable autosuspend (see 99 individual USB peripherals (see
100 Documentation/usb/power-management.txt for more details). 100 Documentation/usb/power-management.txt for more details).
101 101
102 Also, USB "remote wakeup" signaling is supported, whereby some 102 Also, USB "remote wakeup" signaling is supported, whereby some
diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c
index f06f5dbc8cdc..1e6ccef2cf0c 100644
--- a/drivers/usb/core/file.c
+++ b/drivers/usb/core/file.c
@@ -159,9 +159,9 @@ void usb_major_cleanup(void)
159int usb_register_dev(struct usb_interface *intf, 159int usb_register_dev(struct usb_interface *intf,
160 struct usb_class_driver *class_driver) 160 struct usb_class_driver *class_driver)
161{ 161{
162 int retval = -EINVAL; 162 int retval;
163 int minor_base = class_driver->minor_base; 163 int minor_base = class_driver->minor_base;
164 int minor = 0; 164 int minor;
165 char name[20]; 165 char name[20];
166 char *temp; 166 char *temp;
167 167
@@ -173,12 +173,17 @@ int usb_register_dev(struct usb_interface *intf,
173 */ 173 */
174 minor_base = 0; 174 minor_base = 0;
175#endif 175#endif
176 intf->minor = -1;
177
178 dbg ("looking for a minor, starting at %d", minor_base);
179 176
180 if (class_driver->fops == NULL) 177 if (class_driver->fops == NULL)
181 goto exit; 178 return -EINVAL;
179 if (intf->minor >= 0)
180 return -EADDRINUSE;
181
182 retval = init_usb_class();
183 if (retval)
184 return retval;
185
186 dev_dbg(&intf->dev, "looking for a minor, starting at %d", minor_base);
182 187
183 down_write(&minor_rwsem); 188 down_write(&minor_rwsem);
184 for (minor = minor_base; minor < MAX_USB_MINORS; ++minor) { 189 for (minor = minor_base; minor < MAX_USB_MINORS; ++minor) {
@@ -186,20 +191,12 @@ int usb_register_dev(struct usb_interface *intf,
186 continue; 191 continue;
187 192
188 usb_minors[minor] = class_driver->fops; 193 usb_minors[minor] = class_driver->fops;
189 194 intf->minor = minor;
190 retval = 0;
191 break; 195 break;
192 } 196 }
193 up_write(&minor_rwsem); 197 up_write(&minor_rwsem);
194 198 if (intf->minor < 0)
195 if (retval) 199 return -EXFULL;
196 goto exit;
197
198 retval = init_usb_class();
199 if (retval)
200 goto exit;
201
202 intf->minor = minor;
203 200
204 /* create a usb class device for this usb interface */ 201 /* create a usb class device for this usb interface */
205 snprintf(name, sizeof(name), class_driver->name, minor - minor_base); 202 snprintf(name, sizeof(name), class_driver->name, minor - minor_base);
@@ -213,11 +210,11 @@ int usb_register_dev(struct usb_interface *intf,
213 "%s", temp); 210 "%s", temp);
214 if (IS_ERR(intf->usb_dev)) { 211 if (IS_ERR(intf->usb_dev)) {
215 down_write(&minor_rwsem); 212 down_write(&minor_rwsem);
216 usb_minors[intf->minor] = NULL; 213 usb_minors[minor] = NULL;
214 intf->minor = -1;
217 up_write(&minor_rwsem); 215 up_write(&minor_rwsem);
218 retval = PTR_ERR(intf->usb_dev); 216 retval = PTR_ERR(intf->usb_dev);
219 } 217 }
220exit:
221 return retval; 218 return retval;
222} 219}
223EXPORT_SYMBOL_GPL(usb_register_dev); 220EXPORT_SYMBOL_GPL(usb_register_dev);
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 844683e50383..9f0ce7de0e36 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1802,6 +1802,7 @@ free_interfaces:
1802 intf->dev.groups = usb_interface_groups; 1802 intf->dev.groups = usb_interface_groups;
1803 intf->dev.dma_mask = dev->dev.dma_mask; 1803 intf->dev.dma_mask = dev->dev.dma_mask;
1804 INIT_WORK(&intf->reset_ws, __usb_queue_reset_device); 1804 INIT_WORK(&intf->reset_ws, __usb_queue_reset_device);
1805 intf->minor = -1;
1805 device_initialize(&intf->dev); 1806 device_initialize(&intf->dev);
1806 dev_set_name(&intf->dev, "%d-%s:%d.%d", 1807 dev_set_name(&intf->dev, "%d-%s:%d.%d",
1807 dev->bus->busnum, dev->devpath, 1808 dev->bus->busnum, dev->devpath,
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 58b72d741d93..a1e8d273103f 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -119,6 +119,11 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
119 ehci->broken_periodic = 1; 119 ehci->broken_periodic = 1;
120 ehci_info(ehci, "using broken periodic workaround\n"); 120 ehci_info(ehci, "using broken periodic workaround\n");
121 } 121 }
122 if (pdev->device == 0x0806 || pdev->device == 0x0811
123 || pdev->device == 0x0829) {
124 ehci_info(ehci, "disable lpm for langwell/penwell\n");
125 ehci->has_lpm = 0;
126 }
122 break; 127 break;
123 case PCI_VENDOR_ID_TDI: 128 case PCI_VENDOR_ID_TDI:
124 if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) { 129 if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) {
diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c
index 59dc3d351b60..5ab5bb89bae3 100644
--- a/drivers/usb/musb/cppi_dma.c
+++ b/drivers/usb/musb/cppi_dma.c
@@ -322,6 +322,7 @@ cppi_channel_allocate(struct dma_controller *c,
322 index, transmit ? 'T' : 'R', cppi_ch); 322 index, transmit ? 'T' : 'R', cppi_ch);
323 cppi_ch->hw_ep = ep; 323 cppi_ch->hw_ep = ep;
324 cppi_ch->channel.status = MUSB_DMA_STATUS_FREE; 324 cppi_ch->channel.status = MUSB_DMA_STATUS_FREE;
325 cppi_ch->channel.max_len = 0x7fffffff;
325 326
326 DBG(4, "Allocate CPPI%d %cX\n", index, transmit ? 'T' : 'R'); 327 DBG(4, "Allocate CPPI%d %cX\n", index, transmit ? 'T' : 'R');
327 return &cppi_ch->channel; 328 return &cppi_ch->channel;
diff --git a/drivers/usb/musb/musb_debugfs.c b/drivers/usb/musb/musb_debugfs.c
index c79a5e30d437..9e8639d4e862 100644
--- a/drivers/usb/musb/musb_debugfs.c
+++ b/drivers/usb/musb/musb_debugfs.c
@@ -195,15 +195,14 @@ static const struct file_operations musb_regdump_fops = {
195 195
196static int musb_test_mode_open(struct inode *inode, struct file *file) 196static int musb_test_mode_open(struct inode *inode, struct file *file)
197{ 197{
198 file->private_data = inode->i_private;
199
200 return single_open(file, musb_test_mode_show, inode->i_private); 198 return single_open(file, musb_test_mode_show, inode->i_private);
201} 199}
202 200
203static ssize_t musb_test_mode_write(struct file *file, 201static ssize_t musb_test_mode_write(struct file *file,
204 const char __user *ubuf, size_t count, loff_t *ppos) 202 const char __user *ubuf, size_t count, loff_t *ppos)
205{ 203{
206 struct musb *musb = file->private_data; 204 struct seq_file *s = file->private_data;
205 struct musb *musb = s->private;
207 u8 test = 0; 206 u8 test = 0;
208 char buf[18]; 207 char buf[18];
209 208
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 6fca870e957e..d065e23f123e 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -300,6 +300,11 @@ static void txstate(struct musb *musb, struct musb_request *req)
300#ifndef CONFIG_MUSB_PIO_ONLY 300#ifndef CONFIG_MUSB_PIO_ONLY
301 if (is_dma_capable() && musb_ep->dma) { 301 if (is_dma_capable() && musb_ep->dma) {
302 struct dma_controller *c = musb->dma_controller; 302 struct dma_controller *c = musb->dma_controller;
303 size_t request_size;
304
305 /* setup DMA, then program endpoint CSR */
306 request_size = min_t(size_t, request->length - request->actual,
307 musb_ep->dma->max_len);
303 308
304 use_dma = (request->dma != DMA_ADDR_INVALID); 309 use_dma = (request->dma != DMA_ADDR_INVALID);
305 310
@@ -307,11 +312,6 @@ static void txstate(struct musb *musb, struct musb_request *req)
307 312
308#ifdef CONFIG_USB_INVENTRA_DMA 313#ifdef CONFIG_USB_INVENTRA_DMA
309 { 314 {
310 size_t request_size;
311
312 /* setup DMA, then program endpoint CSR */
313 request_size = min_t(size_t, request->length,
314 musb_ep->dma->max_len);
315 if (request_size < musb_ep->packet_sz) 315 if (request_size < musb_ep->packet_sz)
316 musb_ep->dma->desired_mode = 0; 316 musb_ep->dma->desired_mode = 0;
317 else 317 else
@@ -373,8 +373,8 @@ static void txstate(struct musb *musb, struct musb_request *req)
373 use_dma = use_dma && c->channel_program( 373 use_dma = use_dma && c->channel_program(
374 musb_ep->dma, musb_ep->packet_sz, 374 musb_ep->dma, musb_ep->packet_sz,
375 0, 375 0,
376 request->dma, 376 request->dma + request->actual,
377 request->length); 377 request_size);
378 if (!use_dma) { 378 if (!use_dma) {
379 c->channel_release(musb_ep->dma); 379 c->channel_release(musb_ep->dma);
380 musb_ep->dma = NULL; 380 musb_ep->dma = NULL;
@@ -386,8 +386,8 @@ static void txstate(struct musb *musb, struct musb_request *req)
386 use_dma = use_dma && c->channel_program( 386 use_dma = use_dma && c->channel_program(
387 musb_ep->dma, musb_ep->packet_sz, 387 musb_ep->dma, musb_ep->packet_sz,
388 request->zero, 388 request->zero,
389 request->dma, 389 request->dma + request->actual,
390 request->length); 390 request_size);
391#endif 391#endif
392 } 392 }
393#endif 393#endif
@@ -501,26 +501,14 @@ void musb_g_tx(struct musb *musb, u8 epnum)
501 request->zero = 0; 501 request->zero = 0;
502 } 502 }
503 503
504 /* ... or if not, then complete it. */ 504 if (request->actual == request->length) {
505 musb_g_giveback(musb_ep, request, 0); 505 musb_g_giveback(musb_ep, request, 0);
506 506 request = musb_ep->desc ? next_request(musb_ep) : NULL;
507 /* 507 if (!request) {
508 * Kickstart next transfer if appropriate; 508 DBG(4, "%s idle now\n",
509 * the packet that just completed might not 509 musb_ep->end_point.name);
510 * be transmitted for hours or days. 510 return;
511 * REVISIT for double buffering... 511 }
512 * FIXME revisit for stalls too...
513 */
514 musb_ep_select(mbase, epnum);
515 csr = musb_readw(epio, MUSB_TXCSR);
516 if (csr & MUSB_TXCSR_FIFONOTEMPTY)
517 return;
518
519 request = musb_ep->desc ? next_request(musb_ep) : NULL;
520 if (!request) {
521 DBG(4, "%s idle now\n",
522 musb_ep->end_point.name);
523 return;
524 } 512 }
525 } 513 }
526 514
@@ -568,11 +556,19 @@ static void rxstate(struct musb *musb, struct musb_request *req)
568{ 556{
569 const u8 epnum = req->epnum; 557 const u8 epnum = req->epnum;
570 struct usb_request *request = &req->request; 558 struct usb_request *request = &req->request;
571 struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out; 559 struct musb_ep *musb_ep;
572 void __iomem *epio = musb->endpoints[epnum].regs; 560 void __iomem *epio = musb->endpoints[epnum].regs;
573 unsigned fifo_count = 0; 561 unsigned fifo_count = 0;
574 u16 len = musb_ep->packet_sz; 562 u16 len;
575 u16 csr = musb_readw(epio, MUSB_RXCSR); 563 u16 csr = musb_readw(epio, MUSB_RXCSR);
564 struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
565
566 if (hw_ep->is_shared_fifo)
567 musb_ep = &hw_ep->ep_in;
568 else
569 musb_ep = &hw_ep->ep_out;
570
571 len = musb_ep->packet_sz;
576 572
577 /* We shouldn't get here while DMA is active, but we do... */ 573 /* We shouldn't get here while DMA is active, but we do... */
578 if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) { 574 if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
@@ -647,8 +643,8 @@ static void rxstate(struct musb *musb, struct musb_request *req)
647 */ 643 */
648 644
649 csr |= MUSB_RXCSR_DMAENAB; 645 csr |= MUSB_RXCSR_DMAENAB;
650#ifdef USE_MODE1
651 csr |= MUSB_RXCSR_AUTOCLEAR; 646 csr |= MUSB_RXCSR_AUTOCLEAR;
647#ifdef USE_MODE1
652 /* csr |= MUSB_RXCSR_DMAMODE; */ 648 /* csr |= MUSB_RXCSR_DMAMODE; */
653 649
654 /* this special sequence (enabling and then 650 /* this special sequence (enabling and then
@@ -663,10 +659,11 @@ static void rxstate(struct musb *musb, struct musb_request *req)
663 if (request->actual < request->length) { 659 if (request->actual < request->length) {
664 int transfer_size = 0; 660 int transfer_size = 0;
665#ifdef USE_MODE1 661#ifdef USE_MODE1
666 transfer_size = min(request->length, 662 transfer_size = min(request->length - request->actual,
667 channel->max_len); 663 channel->max_len);
668#else 664#else
669 transfer_size = len; 665 transfer_size = min(request->length - request->actual,
666 (unsigned)len);
670#endif 667#endif
671 if (transfer_size <= musb_ep->packet_sz) 668 if (transfer_size <= musb_ep->packet_sz)
672 musb_ep->dma->desired_mode = 0; 669 musb_ep->dma->desired_mode = 0;
@@ -740,9 +737,15 @@ void musb_g_rx(struct musb *musb, u8 epnum)
740 u16 csr; 737 u16 csr;
741 struct usb_request *request; 738 struct usb_request *request;
742 void __iomem *mbase = musb->mregs; 739 void __iomem *mbase = musb->mregs;
743 struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out; 740 struct musb_ep *musb_ep;
744 void __iomem *epio = musb->endpoints[epnum].regs; 741 void __iomem *epio = musb->endpoints[epnum].regs;
745 struct dma_channel *dma; 742 struct dma_channel *dma;
743 struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
744
745 if (hw_ep->is_shared_fifo)
746 musb_ep = &hw_ep->ep_in;
747 else
748 musb_ep = &hw_ep->ep_out;
746 749
747 musb_ep_select(mbase, epnum); 750 musb_ep_select(mbase, epnum);
748 751
@@ -1081,7 +1084,7 @@ struct free_record {
1081/* 1084/*
1082 * Context: controller locked, IRQs blocked. 1085 * Context: controller locked, IRQs blocked.
1083 */ 1086 */
1084static void musb_ep_restart(struct musb *musb, struct musb_request *req) 1087void musb_ep_restart(struct musb *musb, struct musb_request *req)
1085{ 1088{
1086 DBG(3, "<== %s request %p len %u on hw_ep%d\n", 1089 DBG(3, "<== %s request %p len %u on hw_ep%d\n",
1087 req->tx ? "TX/IN" : "RX/OUT", 1090 req->tx ? "TX/IN" : "RX/OUT",
diff --git a/drivers/usb/musb/musb_gadget.h b/drivers/usb/musb/musb_gadget.h
index c8b140325d82..572b1da7f2dc 100644
--- a/drivers/usb/musb/musb_gadget.h
+++ b/drivers/usb/musb/musb_gadget.h
@@ -105,4 +105,6 @@ extern void musb_gadget_cleanup(struct musb *);
105 105
106extern void musb_g_giveback(struct musb_ep *, struct usb_request *, int); 106extern void musb_g_giveback(struct musb_ep *, struct usb_request *, int);
107 107
108extern void musb_ep_restart(struct musb *, struct musb_request *);
109
108#endif /* __MUSB_GADGET_H */ 110#endif /* __MUSB_GADGET_H */
diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c
index 59bef8f3a358..6dd03f4c5f49 100644
--- a/drivers/usb/musb/musb_gadget_ep0.c
+++ b/drivers/usb/musb/musb_gadget_ep0.c
@@ -261,6 +261,7 @@ __acquires(musb->lock)
261 ctrlrequest->wIndex & 0x0f; 261 ctrlrequest->wIndex & 0x0f;
262 struct musb_ep *musb_ep; 262 struct musb_ep *musb_ep;
263 struct musb_hw_ep *ep; 263 struct musb_hw_ep *ep;
264 struct musb_request *request;
264 void __iomem *regs; 265 void __iomem *regs;
265 int is_in; 266 int is_in;
266 u16 csr; 267 u16 csr;
@@ -302,6 +303,14 @@ __acquires(musb->lock)
302 musb_writew(regs, MUSB_RXCSR, csr); 303 musb_writew(regs, MUSB_RXCSR, csr);
303 } 304 }
304 305
306 /* Maybe start the first request in the queue */
307 request = to_musb_request(
308 next_request(musb_ep));
309 if (!musb_ep->busy && request) {
310 DBG(3, "restarting the request\n");
311 musb_ep_restart(musb, request);
312 }
313
305 /* select ep0 again */ 314 /* select ep0 again */
306 musb_ep_select(mbase, 0); 315 musb_ep_select(mbase, 0);
307 } break; 316 } break;
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 877d20b1dff9..9e65c47cc98b 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -660,6 +660,12 @@ static bool musb_tx_dma_program(struct dma_controller *dma,
660 660
661 qh->segsize = length; 661 qh->segsize = length;
662 662
663 /*
664 * Ensure the data reaches to main memory before starting
665 * DMA transfer
666 */
667 wmb();
668
663 if (!dma->channel_program(channel, pkt_size, mode, 669 if (!dma->channel_program(channel, pkt_size, mode,
664 urb->transfer_dma + offset, length)) { 670 urb->transfer_dma + offset, length)) {
665 dma->channel_release(channel); 671 dma->channel_release(channel);
diff --git a/drivers/usb/otg/twl4030-usb.c b/drivers/usb/otg/twl4030-usb.c
index 05aaac1c3861..0bc97698af15 100644
--- a/drivers/usb/otg/twl4030-usb.c
+++ b/drivers/usb/otg/twl4030-usb.c
@@ -347,11 +347,20 @@ static void twl4030_i2c_access(struct twl4030_usb *twl, int on)
347 } 347 }
348} 348}
349 349
350static void twl4030_phy_power(struct twl4030_usb *twl, int on) 350static void __twl4030_phy_power(struct twl4030_usb *twl, int on)
351{ 351{
352 u8 pwr; 352 u8 pwr = twl4030_usb_read(twl, PHY_PWR_CTRL);
353
354 if (on)
355 pwr &= ~PHY_PWR_PHYPWD;
356 else
357 pwr |= PHY_PWR_PHYPWD;
353 358
354 pwr = twl4030_usb_read(twl, PHY_PWR_CTRL); 359 WARN_ON(twl4030_usb_write_verify(twl, PHY_PWR_CTRL, pwr) < 0);
360}
361
362static void twl4030_phy_power(struct twl4030_usb *twl, int on)
363{
355 if (on) { 364 if (on) {
356 regulator_enable(twl->usb3v1); 365 regulator_enable(twl->usb3v1);
357 regulator_enable(twl->usb1v8); 366 regulator_enable(twl->usb1v8);
@@ -365,15 +374,13 @@ static void twl4030_phy_power(struct twl4030_usb *twl, int on)
365 twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, 374 twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0,
366 VUSB_DEDICATED2); 375 VUSB_DEDICATED2);
367 regulator_enable(twl->usb1v5); 376 regulator_enable(twl->usb1v5);
368 pwr &= ~PHY_PWR_PHYPWD; 377 __twl4030_phy_power(twl, 1);
369 WARN_ON(twl4030_usb_write_verify(twl, PHY_PWR_CTRL, pwr) < 0);
370 twl4030_usb_write(twl, PHY_CLK_CTRL, 378 twl4030_usb_write(twl, PHY_CLK_CTRL,
371 twl4030_usb_read(twl, PHY_CLK_CTRL) | 379 twl4030_usb_read(twl, PHY_CLK_CTRL) |
372 (PHY_CLK_CTRL_CLOCKGATING_EN | 380 (PHY_CLK_CTRL_CLOCKGATING_EN |
373 PHY_CLK_CTRL_CLK32K_EN)); 381 PHY_CLK_CTRL_CLK32K_EN));
374 } else { 382 } else {
375 pwr |= PHY_PWR_PHYPWD; 383 __twl4030_phy_power(twl, 0);
376 WARN_ON(twl4030_usb_write_verify(twl, PHY_PWR_CTRL, pwr) < 0);
377 regulator_disable(twl->usb1v5); 384 regulator_disable(twl->usb1v5);
378 regulator_disable(twl->usb1v8); 385 regulator_disable(twl->usb1v8);
379 regulator_disable(twl->usb3v1); 386 regulator_disable(twl->usb3v1);
@@ -387,19 +394,25 @@ static void twl4030_phy_suspend(struct twl4030_usb *twl, int controller_off)
387 394
388 twl4030_phy_power(twl, 0); 395 twl4030_phy_power(twl, 0);
389 twl->asleep = 1; 396 twl->asleep = 1;
397 dev_dbg(twl->dev, "%s\n", __func__);
390} 398}
391 399
392static void twl4030_phy_resume(struct twl4030_usb *twl) 400static void __twl4030_phy_resume(struct twl4030_usb *twl)
393{ 401{
394 if (!twl->asleep)
395 return;
396
397 twl4030_phy_power(twl, 1); 402 twl4030_phy_power(twl, 1);
398 twl4030_i2c_access(twl, 1); 403 twl4030_i2c_access(twl, 1);
399 twl4030_usb_set_mode(twl, twl->usb_mode); 404 twl4030_usb_set_mode(twl, twl->usb_mode);
400 if (twl->usb_mode == T2_USB_MODE_ULPI) 405 if (twl->usb_mode == T2_USB_MODE_ULPI)
401 twl4030_i2c_access(twl, 0); 406 twl4030_i2c_access(twl, 0);
407}
408
409static void twl4030_phy_resume(struct twl4030_usb *twl)
410{
411 if (!twl->asleep)
412 return;
413 __twl4030_phy_resume(twl);
402 twl->asleep = 0; 414 twl->asleep = 0;
415 dev_dbg(twl->dev, "%s\n", __func__);
403} 416}
404 417
405static int twl4030_usb_ldo_init(struct twl4030_usb *twl) 418static int twl4030_usb_ldo_init(struct twl4030_usb *twl)
@@ -408,8 +421,8 @@ static int twl4030_usb_ldo_init(struct twl4030_usb *twl)
408 twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0xC0, PROTECT_KEY); 421 twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0xC0, PROTECT_KEY);
409 twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0x0C, PROTECT_KEY); 422 twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0x0C, PROTECT_KEY);
410 423
411 /* put VUSB3V1 LDO in active state */ 424 /* Keep VUSB3V1 LDO in sleep state until VBUS/ID change detected*/
412 twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB_DEDICATED2); 425 /*twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB_DEDICATED2);*/
413 426
414 /* input to VUSB3V1 LDO is from VBAT, not VBUS */ 427 /* input to VUSB3V1 LDO is from VBAT, not VBUS */
415 twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0x14, VUSB_DEDICATED1); 428 twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0x14, VUSB_DEDICATED1);
@@ -502,6 +515,26 @@ static irqreturn_t twl4030_usb_irq(int irq, void *_twl)
502 return IRQ_HANDLED; 515 return IRQ_HANDLED;
503} 516}
504 517
518static void twl4030_usb_phy_init(struct twl4030_usb *twl)
519{
520 int status;
521
522 status = twl4030_usb_linkstat(twl);
523 if (status >= 0) {
524 if (status == USB_EVENT_NONE) {
525 __twl4030_phy_power(twl, 0);
526 twl->asleep = 1;
527 } else {
528 __twl4030_phy_resume(twl);
529 twl->asleep = 0;
530 }
531
532 blocking_notifier_call_chain(&twl->otg.notifier, status,
533 twl->otg.gadget);
534 }
535 sysfs_notify(&twl->dev->kobj, NULL, "vbus");
536}
537
505static int twl4030_set_suspend(struct otg_transceiver *x, int suspend) 538static int twl4030_set_suspend(struct otg_transceiver *x, int suspend)
506{ 539{
507 struct twl4030_usb *twl = xceiv_to_twl(x); 540 struct twl4030_usb *twl = xceiv_to_twl(x);
@@ -550,7 +583,6 @@ static int __devinit twl4030_usb_probe(struct platform_device *pdev)
550 struct twl4030_usb_data *pdata = pdev->dev.platform_data; 583 struct twl4030_usb_data *pdata = pdev->dev.platform_data;
551 struct twl4030_usb *twl; 584 struct twl4030_usb *twl;
552 int status, err; 585 int status, err;
553 u8 pwr;
554 586
555 if (!pdata) { 587 if (!pdata) {
556 dev_dbg(&pdev->dev, "platform_data not available\n"); 588 dev_dbg(&pdev->dev, "platform_data not available\n");
@@ -569,10 +601,7 @@ static int __devinit twl4030_usb_probe(struct platform_device *pdev)
569 twl->otg.set_peripheral = twl4030_set_peripheral; 601 twl->otg.set_peripheral = twl4030_set_peripheral;
570 twl->otg.set_suspend = twl4030_set_suspend; 602 twl->otg.set_suspend = twl4030_set_suspend;
571 twl->usb_mode = pdata->usb_mode; 603 twl->usb_mode = pdata->usb_mode;
572 604 twl->asleep = 1;
573 pwr = twl4030_usb_read(twl, PHY_PWR_CTRL);
574
575 twl->asleep = (pwr & PHY_PWR_PHYPWD);
576 605
577 /* init spinlock for workqueue */ 606 /* init spinlock for workqueue */
578 spin_lock_init(&twl->lock); 607 spin_lock_init(&twl->lock);
@@ -610,15 +639,10 @@ static int __devinit twl4030_usb_probe(struct platform_device *pdev)
610 return status; 639 return status;
611 } 640 }
612 641
613 /* The IRQ handler just handles changes from the previous states 642 /* Power down phy or make it work according to
614 * of the ID and VBUS pins ... in probe() we must initialize that 643 * current link state.
615 * previous state. The easy way: fake an IRQ.
616 *
617 * REVISIT: a real IRQ might have happened already, if PREEMPT is
618 * enabled. Else the IRQ may not yet be configured or enabled,
619 * because of scheduling delays.
620 */ 644 */
621 twl4030_usb_irq(twl->irq, twl); 645 twl4030_usb_phy_init(twl);
622 646
623 dev_info(&pdev->dev, "Initialized TWL4030 USB module\n"); 647 dev_info(&pdev->dev, "Initialized TWL4030 USB module\n");
624 return 0; 648 return 0;
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 30922a7e3347..aa665817a272 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -2024,6 +2024,9 @@ static int mos7720_ioctl(struct tty_struct *tty, struct file *file,
2024 2024
2025 case TIOCGICOUNT: 2025 case TIOCGICOUNT:
2026 cnow = mos7720_port->icount; 2026 cnow = mos7720_port->icount;
2027
2028 memset(&icount, 0, sizeof(struct serial_icounter_struct));
2029
2027 icount.cts = cnow.cts; 2030 icount.cts = cnow.cts;
2028 icount.dsr = cnow.dsr; 2031 icount.dsr = cnow.dsr;
2029 icount.rng = cnow.rng; 2032 icount.rng = cnow.rng;
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 1c9b6e9b2386..1a42bc213799 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -2285,6 +2285,9 @@ static int mos7840_ioctl(struct tty_struct *tty, struct file *file,
2285 case TIOCGICOUNT: 2285 case TIOCGICOUNT:
2286 cnow = mos7840_port->icount; 2286 cnow = mos7840_port->icount;
2287 smp_rmb(); 2287 smp_rmb();
2288
2289 memset(&icount, 0, sizeof(struct serial_icounter_struct));
2290
2288 icount.cts = cnow.cts; 2291 icount.cts = cnow.cts;
2289 icount.dsr = cnow.dsr; 2292 icount.dsr = cnow.dsr;
2290 icount.rng = cnow.rng; 2293 icount.rng = cnow.rng;
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 84f842331dfa..7ccc967831f0 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -3508,7 +3508,7 @@ static void fbcon_exit(void)
3508 softback_buf = 0UL; 3508 softback_buf = 0UL;
3509 3509
3510 for (i = 0; i < FB_MAX; i++) { 3510 for (i = 0; i < FB_MAX; i++) {
3511 int pending; 3511 int pending = 0;
3512 3512
3513 mapped = 0; 3513 mapped = 0;
3514 info = registered_fb[i]; 3514 info = registered_fb[i];
@@ -3516,7 +3516,8 @@ static void fbcon_exit(void)
3516 if (info == NULL) 3516 if (info == NULL)
3517 continue; 3517 continue;
3518 3518
3519 pending = cancel_work_sync(&info->queue); 3519 if (info->queue.func)
3520 pending = cancel_work_sync(&info->queue);
3520 DPRINTK("fbcon: %s pending work\n", (pending ? "canceled" : 3521 DPRINTK("fbcon: %s pending work\n", (pending ? "canceled" :
3521 "no")); 3522 "no"));
3522 3523
diff --git a/drivers/video/efifb.c b/drivers/video/efifb.c
index 815f84b07933..70477c2e4b61 100644
--- a/drivers/video/efifb.c
+++ b/drivers/video/efifb.c
@@ -13,7 +13,7 @@
13#include <linux/platform_device.h> 13#include <linux/platform_device.h>
14#include <linux/screen_info.h> 14#include <linux/screen_info.h>
15#include <linux/dmi.h> 15#include <linux/dmi.h>
16 16#include <linux/pci.h>
17#include <video/vga.h> 17#include <video/vga.h>
18 18
19static struct fb_var_screeninfo efifb_defined __devinitdata = { 19static struct fb_var_screeninfo efifb_defined __devinitdata = {
@@ -39,17 +39,31 @@ enum {
39 M_I20, /* 20-Inch iMac */ 39 M_I20, /* 20-Inch iMac */
40 M_I20_SR, /* 20-Inch iMac (Santa Rosa) */ 40 M_I20_SR, /* 20-Inch iMac (Santa Rosa) */
41 M_I24, /* 24-Inch iMac */ 41 M_I24, /* 24-Inch iMac */
42 M_I24_8_1, /* 24-Inch iMac, 8,1th gen */
43 M_I24_10_1, /* 24-Inch iMac, 10,1th gen */
44 M_I27_11_1, /* 27-Inch iMac, 11,1th gen */
42 M_MINI, /* Mac Mini */ 45 M_MINI, /* Mac Mini */
46 M_MINI_3_1, /* Mac Mini, 3,1th gen */
47 M_MINI_4_1, /* Mac Mini, 4,1th gen */
43 M_MB, /* MacBook */ 48 M_MB, /* MacBook */
44 M_MB_2, /* MacBook, 2nd rev. */ 49 M_MB_2, /* MacBook, 2nd rev. */
45 M_MB_3, /* MacBook, 3rd rev. */ 50 M_MB_3, /* MacBook, 3rd rev. */
51 M_MB_5_1, /* MacBook, 5th rev. */
52 M_MB_6_1, /* MacBook, 6th rev. */
53 M_MB_7_1, /* MacBook, 7th rev. */
46 M_MB_SR, /* MacBook, 2nd gen, (Santa Rosa) */ 54 M_MB_SR, /* MacBook, 2nd gen, (Santa Rosa) */
47 M_MBA, /* MacBook Air */ 55 M_MBA, /* MacBook Air */
48 M_MBP, /* MacBook Pro */ 56 M_MBP, /* MacBook Pro */
49 M_MBP_2, /* MacBook Pro 2nd gen */ 57 M_MBP_2, /* MacBook Pro 2nd gen */
58 M_MBP_2_2, /* MacBook Pro 2,2nd gen */
50 M_MBP_SR, /* MacBook Pro (Santa Rosa) */ 59 M_MBP_SR, /* MacBook Pro (Santa Rosa) */
51 M_MBP_4, /* MacBook Pro, 4th gen */ 60 M_MBP_4, /* MacBook Pro, 4th gen */
52 M_MBP_5_1, /* MacBook Pro, 5,1th gen */ 61 M_MBP_5_1, /* MacBook Pro, 5,1th gen */
62 M_MBP_5_2, /* MacBook Pro, 5,2th gen */
63 M_MBP_5_3, /* MacBook Pro, 5,3rd gen */
64 M_MBP_6_1, /* MacBook Pro, 6,1th gen */
65 M_MBP_6_2, /* MacBook Pro, 6,2th gen */
66 M_MBP_7_1, /* MacBook Pro, 7,1th gen */
53 M_UNKNOWN /* placeholder */ 67 M_UNKNOWN /* placeholder */
54}; 68};
55 69
@@ -64,14 +78,28 @@ static struct efifb_dmi_info {
64 [M_I20] = { "i20", 0x80010000, 1728 * 4, 1680, 1050 }, /* guess */ 78 [M_I20] = { "i20", 0x80010000, 1728 * 4, 1680, 1050 }, /* guess */
65 [M_I20_SR] = { "imac7", 0x40010000, 1728 * 4, 1680, 1050 }, 79 [M_I20_SR] = { "imac7", 0x40010000, 1728 * 4, 1680, 1050 },
66 [M_I24] = { "i24", 0x80010000, 2048 * 4, 1920, 1200 }, /* guess */ 80 [M_I24] = { "i24", 0x80010000, 2048 * 4, 1920, 1200 }, /* guess */
81 [M_I24_8_1] = { "imac8", 0xc0060000, 2048 * 4, 1920, 1200 },
82 [M_I24_10_1] = { "imac10", 0xc0010000, 2048 * 4, 1920, 1080 },
83 [M_I27_11_1] = { "imac11", 0xc0010000, 2560 * 4, 2560, 1440 },
67 [M_MINI]= { "mini", 0x80000000, 2048 * 4, 1024, 768 }, 84 [M_MINI]= { "mini", 0x80000000, 2048 * 4, 1024, 768 },
85 [M_MINI_3_1] = { "mini31", 0x40010000, 1024 * 4, 1024, 768 },
86 [M_MINI_4_1] = { "mini41", 0xc0010000, 2048 * 4, 1920, 1200 },
68 [M_MB] = { "macbook", 0x80000000, 2048 * 4, 1280, 800 }, 87 [M_MB] = { "macbook", 0x80000000, 2048 * 4, 1280, 800 },
88 [M_MB_5_1] = { "macbook51", 0x80010000, 2048 * 4, 1280, 800 },
89 [M_MB_6_1] = { "macbook61", 0x80010000, 2048 * 4, 1280, 800 },
90 [M_MB_7_1] = { "macbook71", 0x80010000, 2048 * 4, 1280, 800 },
69 [M_MBA] = { "mba", 0x80000000, 2048 * 4, 1280, 800 }, 91 [M_MBA] = { "mba", 0x80000000, 2048 * 4, 1280, 800 },
70 [M_MBP] = { "mbp", 0x80010000, 1472 * 4, 1440, 900 }, 92 [M_MBP] = { "mbp", 0x80010000, 1472 * 4, 1440, 900 },
71 [M_MBP_2] = { "mbp2", 0, 0, 0, 0 }, /* placeholder */ 93 [M_MBP_2] = { "mbp2", 0, 0, 0, 0 }, /* placeholder */
94 [M_MBP_2_2] = { "mbp22", 0x80010000, 1472 * 4, 1440, 900 },
72 [M_MBP_SR] = { "mbp3", 0x80030000, 2048 * 4, 1440, 900 }, 95 [M_MBP_SR] = { "mbp3", 0x80030000, 2048 * 4, 1440, 900 },
73 [M_MBP_4] = { "mbp4", 0xc0060000, 2048 * 4, 1920, 1200 }, 96 [M_MBP_4] = { "mbp4", 0xc0060000, 2048 * 4, 1920, 1200 },
74 [M_MBP_5_1] = { "mbp51", 0xc0010000, 2048 * 4, 1440, 900 }, 97 [M_MBP_5_1] = { "mbp51", 0xc0010000, 2048 * 4, 1440, 900 },
98 [M_MBP_5_2] = { "mbp52", 0xc0010000, 2048 * 4, 1920, 1200 },
99 [M_MBP_5_3] = { "mbp53", 0xd0010000, 2048 * 4, 1440, 900 },
100 [M_MBP_6_1] = { "mbp61", 0x90030000, 2048 * 4, 1920, 1200 },
101 [M_MBP_6_2] = { "mbp62", 0x90030000, 2048 * 4, 1680, 1050 },
102 [M_MBP_7_1] = { "mbp71", 0xc0010000, 2048 * 4, 1280, 800 },
75 [M_UNKNOWN] = { NULL, 0, 0, 0, 0 } 103 [M_UNKNOWN] = { NULL, 0, 0, 0, 0 }
76}; 104};
77 105
@@ -92,7 +120,12 @@ static const struct dmi_system_id dmi_system_table[] __initconst = {
92 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "iMac6,1", M_I24), 120 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "iMac6,1", M_I24),
93 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac6,1", M_I24), 121 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac6,1", M_I24),
94 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac7,1", M_I20_SR), 122 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac7,1", M_I20_SR),
123 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac8,1", M_I24_8_1),
124 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac10,1", M_I24_10_1),
125 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac11,1", M_I27_11_1),
95 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "Macmini1,1", M_MINI), 126 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "Macmini1,1", M_MINI),
127 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "Macmini3,1", M_MINI_3_1),
128 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "Macmini4,1", M_MINI_4_1),
96 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook1,1", M_MB), 129 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook1,1", M_MB),
97 /* At least one of these two will be right; maybe both? */ 130 /* At least one of these two will be right; maybe both? */
98 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook2,1", M_MB), 131 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook2,1", M_MB),
@@ -101,14 +134,23 @@ static const struct dmi_system_id dmi_system_table[] __initconst = {
101 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook3,1", M_MB), 134 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook3,1", M_MB),
102 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook3,1", M_MB), 135 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook3,1", M_MB),
103 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook4,1", M_MB), 136 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook4,1", M_MB),
137 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook5,1", M_MB_5_1),
138 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook6,1", M_MB_6_1),
139 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook7,1", M_MB_7_1),
104 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookAir1,1", M_MBA), 140 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookAir1,1", M_MBA),
105 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro1,1", M_MBP), 141 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro1,1", M_MBP),
106 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro2,1", M_MBP_2), 142 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro2,1", M_MBP_2),
143 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro2,2", M_MBP_2_2),
107 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro2,1", M_MBP_2), 144 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro2,1", M_MBP_2),
108 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro3,1", M_MBP_SR), 145 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro3,1", M_MBP_SR),
109 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro3,1", M_MBP_SR), 146 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro3,1", M_MBP_SR),
110 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro4,1", M_MBP_4), 147 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro4,1", M_MBP_4),
111 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,1", M_MBP_5_1), 148 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,1", M_MBP_5_1),
149 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,2", M_MBP_5_2),
150 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,3", M_MBP_5_3),
151 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro6,1", M_MBP_6_1),
152 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro6,2", M_MBP_6_2),
153 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro7,1", M_MBP_7_1),
112 {}, 154 {},
113}; 155};
114 156
@@ -116,7 +158,7 @@ static int set_system(const struct dmi_system_id *id)
116{ 158{
117 struct efifb_dmi_info *info = id->driver_data; 159 struct efifb_dmi_info *info = id->driver_data;
118 if (info->base == 0) 160 if (info->base == 0)
119 return -ENODEV; 161 return 0;
120 162
121 printk(KERN_INFO "efifb: dmi detected %s - framebuffer at %p " 163 printk(KERN_INFO "efifb: dmi detected %s - framebuffer at %p "
122 "(%dx%d, stride %d)\n", id->ident, 164 "(%dx%d, stride %d)\n", id->ident,
@@ -124,18 +166,55 @@ static int set_system(const struct dmi_system_id *id)
124 info->stride); 166 info->stride);
125 167
126 /* Trust the bootloader over the DMI tables */ 168 /* Trust the bootloader over the DMI tables */
127 if (screen_info.lfb_base == 0) 169 if (screen_info.lfb_base == 0) {
170#if defined(CONFIG_PCI)
171 struct pci_dev *dev = NULL;
172 int found_bar = 0;
173#endif
128 screen_info.lfb_base = info->base; 174 screen_info.lfb_base = info->base;
129 if (screen_info.lfb_linelength == 0)
130 screen_info.lfb_linelength = info->stride;
131 if (screen_info.lfb_width == 0)
132 screen_info.lfb_width = info->width;
133 if (screen_info.lfb_height == 0)
134 screen_info.lfb_height = info->height;
135 if (screen_info.orig_video_isVGA == 0)
136 screen_info.orig_video_isVGA = VIDEO_TYPE_EFI;
137 175
138 return 0; 176#if defined(CONFIG_PCI)
177 /* make sure that the address in the table is actually on a
178 * VGA device's PCI BAR */
179
180 for_each_pci_dev(dev) {
181 int i;
182 if ((dev->class >> 8) != PCI_CLASS_DISPLAY_VGA)
183 continue;
184 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
185 resource_size_t start, end;
186
187 start = pci_resource_start(dev, i);
188 if (start == 0)
189 break;
190 end = pci_resource_end(dev, i);
191 if (screen_info.lfb_base >= start &&
192 screen_info.lfb_base < end) {
193 found_bar = 1;
194 }
195 }
196 }
197 if (!found_bar)
198 screen_info.lfb_base = 0;
199#endif
200 }
201 if (screen_info.lfb_base) {
202 if (screen_info.lfb_linelength == 0)
203 screen_info.lfb_linelength = info->stride;
204 if (screen_info.lfb_width == 0)
205 screen_info.lfb_width = info->width;
206 if (screen_info.lfb_height == 0)
207 screen_info.lfb_height = info->height;
208 if (screen_info.orig_video_isVGA == 0)
209 screen_info.orig_video_isVGA = VIDEO_TYPE_EFI;
210 } else {
211 screen_info.lfb_linelength = 0;
212 screen_info.lfb_width = 0;
213 screen_info.lfb_height = 0;
214 screen_info.orig_video_isVGA = 0;
215 return 0;
216 }
217 return 1;
139} 218}
140 219
141static int efifb_setcolreg(unsigned regno, unsigned red, unsigned green, 220static int efifb_setcolreg(unsigned regno, unsigned red, unsigned green,
diff --git a/drivers/video/pxa168fb.c b/drivers/video/pxa168fb.c
index c91a7f70f7b0..a31a77ff6f3d 100644
--- a/drivers/video/pxa168fb.c
+++ b/drivers/video/pxa168fb.c
@@ -298,8 +298,8 @@ static void set_dma_control0(struct pxa168fb_info *fbi)
298 * Set bit to enable graphics DMA. 298 * Set bit to enable graphics DMA.
299 */ 299 */
300 x = readl(fbi->reg_base + LCD_SPU_DMA_CTRL0); 300 x = readl(fbi->reg_base + LCD_SPU_DMA_CTRL0);
301 x |= fbi->active ? 0x00000100 : 0; 301 x &= ~CFG_GRA_ENA_MASK;
302 fbi->active = 0; 302 x |= fbi->active ? CFG_GRA_ENA(1) : CFG_GRA_ENA(0);
303 303
304 /* 304 /*
305 * If we are in a pseudo-color mode, we need to enable 305 * If we are in a pseudo-color mode, we need to enable
@@ -559,7 +559,7 @@ static struct fb_ops pxa168fb_ops = {
559 .fb_imageblit = cfb_imageblit, 559 .fb_imageblit = cfb_imageblit,
560}; 560};
561 561
562static int __init pxa168fb_init_mode(struct fb_info *info, 562static int __devinit pxa168fb_init_mode(struct fb_info *info,
563 struct pxa168fb_mach_info *mi) 563 struct pxa168fb_mach_info *mi)
564{ 564{
565 struct pxa168fb_info *fbi = info->par; 565 struct pxa168fb_info *fbi = info->par;
@@ -599,7 +599,7 @@ static int __init pxa168fb_init_mode(struct fb_info *info,
599 return ret; 599 return ret;
600} 600}
601 601
602static int __init pxa168fb_probe(struct platform_device *pdev) 602static int __devinit pxa168fb_probe(struct platform_device *pdev)
603{ 603{
604 struct pxa168fb_mach_info *mi; 604 struct pxa168fb_mach_info *mi;
605 struct fb_info *info = 0; 605 struct fb_info *info = 0;
@@ -792,7 +792,7 @@ static struct platform_driver pxa168fb_driver = {
792 .probe = pxa168fb_probe, 792 .probe = pxa168fb_probe,
793}; 793};
794 794
795static int __devinit pxa168fb_init(void) 795static int __init pxa168fb_init(void)
796{ 796{
797 return platform_driver_register(&pxa168fb_driver); 797 return platform_driver_register(&pxa168fb_driver);
798} 798}
diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c
index 559bf1727a2b..b52f8e4ef1fd 100644
--- a/drivers/video/sis/sis_main.c
+++ b/drivers/video/sis/sis_main.c
@@ -1701,6 +1701,9 @@ static int sisfb_ioctl(struct fb_info *info, unsigned int cmd,
1701 break; 1701 break;
1702 1702
1703 case FBIOGET_VBLANK: 1703 case FBIOGET_VBLANK:
1704
1705 memset(&sisvbblank, 0, sizeof(struct fb_vblank));
1706
1704 sisvbblank.count = 0; 1707 sisvbblank.count = 0;
1705 sisvbblank.flags = sisfb_setupvbblankflags(ivideo, &sisvbblank.vcount, &sisvbblank.hcount); 1708 sisvbblank.flags = sisfb_setupvbblankflags(ivideo, &sisvbblank.vcount, &sisvbblank.hcount);
1706 1709
diff --git a/drivers/video/via/ioctl.c b/drivers/video/via/ioctl.c
index da03c074e32a..4d553d0b8d7a 100644
--- a/drivers/video/via/ioctl.c
+++ b/drivers/video/via/ioctl.c
@@ -25,6 +25,8 @@ int viafb_ioctl_get_viafb_info(u_long arg)
25{ 25{
26 struct viafb_ioctl_info viainfo; 26 struct viafb_ioctl_info viainfo;
27 27
28 memset(&viainfo, 0, sizeof(struct viafb_ioctl_info));
29
28 viainfo.viafb_id = VIAID; 30 viainfo.viafb_id = VIAID;
29 viainfo.vendor_id = PCI_VIA_VENDOR_ID; 31 viainfo.vendor_id = PCI_VIA_VENDOR_ID;
30 32
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index b036677df8c4..24efd8ea41bb 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -213,11 +213,11 @@ config OMAP_WATCHDOG
213 here to enable the OMAP1610/OMAP1710/OMAP2420/OMAP3430/OMAP4430 watchdog timer. 213 here to enable the OMAP1610/OMAP1710/OMAP2420/OMAP3430/OMAP4430 watchdog timer.
214 214
215config PNX4008_WATCHDOG 215config PNX4008_WATCHDOG
216 tristate "PNX4008 Watchdog" 216 tristate "PNX4008 and LPC32XX Watchdog"
217 depends on ARCH_PNX4008 217 depends on ARCH_PNX4008 || ARCH_LPC32XX
218 help 218 help
219 Say Y here if to include support for the watchdog timer 219 Say Y here if to include support for the watchdog timer
220 in the PNX4008 processor. 220 in the PNX4008 or LPC32XX processor.
221 This driver can be built as a module by choosing M. The module 221 This driver can be built as a module by choosing M. The module
222 will be called pnx4008_wdt. 222 will be called pnx4008_wdt.
223 223
diff --git a/drivers/watchdog/sb_wdog.c b/drivers/watchdog/sb_wdog.c
index 88c83aa57303..f31493e65b38 100644
--- a/drivers/watchdog/sb_wdog.c
+++ b/drivers/watchdog/sb_wdog.c
@@ -305,7 +305,7 @@ static int __init sbwdog_init(void)
305 if (ret) { 305 if (ret) {
306 printk(KERN_ERR "%s: failed to request irq 1 - %d\n", 306 printk(KERN_ERR "%s: failed to request irq 1 - %d\n",
307 ident.identity, ret); 307 ident.identity, ret);
308 return ret; 308 goto out;
309 } 309 }
310 310
311 ret = misc_register(&sbwdog_miscdev); 311 ret = misc_register(&sbwdog_miscdev);
@@ -313,14 +313,20 @@ static int __init sbwdog_init(void)
313 printk(KERN_INFO "%s: timeout is %ld.%ld secs\n", 313 printk(KERN_INFO "%s: timeout is %ld.%ld secs\n",
314 ident.identity, 314 ident.identity,
315 timeout / 1000000, (timeout / 100000) % 10); 315 timeout / 1000000, (timeout / 100000) % 10);
316 } else 316 return 0;
317 free_irq(1, (void *)user_dog); 317 }
318 free_irq(1, (void *)user_dog);
319out:
320 unregister_reboot_notifier(&sbwdog_notifier);
321
318 return ret; 322 return ret;
319} 323}
320 324
321static void __exit sbwdog_exit(void) 325static void __exit sbwdog_exit(void)
322{ 326{
323 misc_deregister(&sbwdog_miscdev); 327 misc_deregister(&sbwdog_miscdev);
328 free_irq(1, (void *)user_dog);
329 unregister_reboot_notifier(&sbwdog_notifier);
324} 330}
325 331
326module_init(sbwdog_init); 332module_init(sbwdog_init);
diff --git a/drivers/watchdog/ts72xx_wdt.c b/drivers/watchdog/ts72xx_wdt.c
index 458c499c1223..18cdeb4c4258 100644
--- a/drivers/watchdog/ts72xx_wdt.c
+++ b/drivers/watchdog/ts72xx_wdt.c
@@ -449,6 +449,9 @@ static __devinit int ts72xx_wdt_probe(struct platform_device *pdev)
449 wdt->pdev = pdev; 449 wdt->pdev = pdev;
450 mutex_init(&wdt->lock); 450 mutex_init(&wdt->lock);
451 451
452 /* make sure that the watchdog is disabled */
453 ts72xx_wdt_stop(wdt);
454
452 error = misc_register(&ts72xx_wdt_miscdev); 455 error = misc_register(&ts72xx_wdt_miscdev);
453 if (error) { 456 if (error) {
454 dev_err(&pdev->dev, "failed to register miscdev\n"); 457 dev_err(&pdev->dev, "failed to register miscdev\n");
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index 29bac5118877..d409495876f1 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -755,7 +755,10 @@ int register_xenstore_notifier(struct notifier_block *nb)
755{ 755{
756 int ret = 0; 756 int ret = 0;
757 757
758 blocking_notifier_chain_register(&xenstore_chain, nb); 758 if (xenstored_ready > 0)
759 ret = nb->notifier_call(nb, 0, NULL);
760 else
761 blocking_notifier_chain_register(&xenstore_chain, nb);
759 762
760 return ret; 763 return ret;
761} 764}
@@ -769,7 +772,7 @@ EXPORT_SYMBOL_GPL(unregister_xenstore_notifier);
769 772
770void xenbus_probe(struct work_struct *unused) 773void xenbus_probe(struct work_struct *unused)
771{ 774{
772 BUG_ON((xenstored_ready <= 0)); 775 xenstored_ready = 1;
773 776
774 /* Enumerate devices in xenstore and watch for changes. */ 777 /* Enumerate devices in xenstore and watch for changes. */
775 xenbus_probe_devices(&xenbus_frontend); 778 xenbus_probe_devices(&xenbus_frontend);
@@ -835,8 +838,8 @@ static int __init xenbus_init(void)
835 xen_store_evtchn = xen_start_info->store_evtchn; 838 xen_store_evtchn = xen_start_info->store_evtchn;
836 xen_store_mfn = xen_start_info->store_mfn; 839 xen_store_mfn = xen_start_info->store_mfn;
837 xen_store_interface = mfn_to_virt(xen_store_mfn); 840 xen_store_interface = mfn_to_virt(xen_store_mfn);
841 xenstored_ready = 1;
838 } 842 }
839 xenstored_ready = 1;
840 } 843 }
841 844
842 /* Initialize the interface to xenstore. */ 845 /* Initialize the interface to xenstore. */
diff --git a/fs/9p/vfs_dir.c b/fs/9p/vfs_dir.c
index 16c8a2a98c1b..899f168fd19c 100644
--- a/fs/9p/vfs_dir.c
+++ b/fs/9p/vfs_dir.c
@@ -292,9 +292,11 @@ int v9fs_dir_release(struct inode *inode, struct file *filp)
292 292
293 fid = filp->private_data; 293 fid = filp->private_data;
294 P9_DPRINTK(P9_DEBUG_VFS, 294 P9_DPRINTK(P9_DEBUG_VFS,
295 "inode: %p filp: %p fid: %d\n", inode, filp, fid->fid); 295 "v9fs_dir_release: inode: %p filp: %p fid: %d\n",
296 inode, filp, fid ? fid->fid : -1);
296 filemap_write_and_wait(inode->i_mapping); 297 filemap_write_and_wait(inode->i_mapping);
297 p9_client_clunk(fid); 298 if (fid)
299 p9_client_clunk(fid);
298 return 0; 300 return 0;
299} 301}
300 302
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index c7c23eab9440..9e670d527646 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -730,7 +730,10 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int mode,
730 P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", err); 730 P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", err);
731 goto error; 731 goto error;
732 } 732 }
733 dentry->d_op = &v9fs_cached_dentry_operations; 733 if (v9ses->cache)
734 dentry->d_op = &v9fs_cached_dentry_operations;
735 else
736 dentry->d_op = &v9fs_dentry_operations;
734 d_instantiate(dentry, inode); 737 d_instantiate(dentry, inode);
735 err = v9fs_fid_add(dentry, fid); 738 err = v9fs_fid_add(dentry, fid);
736 if (err < 0) 739 if (err < 0)
@@ -1128,6 +1131,7 @@ v9fs_vfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
1128 v9fs_stat2inode(st, dentry->d_inode, dentry->d_inode->i_sb); 1131 v9fs_stat2inode(st, dentry->d_inode, dentry->d_inode->i_sb);
1129 generic_fillattr(dentry->d_inode, stat); 1132 generic_fillattr(dentry->d_inode, stat);
1130 1133
1134 p9stat_free(st);
1131 kfree(st); 1135 kfree(st);
1132 return 0; 1136 return 0;
1133} 1137}
@@ -1489,6 +1493,7 @@ static int v9fs_readlink(struct dentry *dentry, char *buffer, int buflen)
1489 1493
1490 retval = strnlen(buffer, buflen); 1494 retval = strnlen(buffer, buflen);
1491done: 1495done:
1496 p9stat_free(st);
1492 kfree(st); 1497 kfree(st);
1493 return retval; 1498 return retval;
1494} 1499}
@@ -1942,7 +1947,7 @@ static const struct inode_operations v9fs_dir_inode_operations_dotu = {
1942 .unlink = v9fs_vfs_unlink, 1947 .unlink = v9fs_vfs_unlink,
1943 .mkdir = v9fs_vfs_mkdir, 1948 .mkdir = v9fs_vfs_mkdir,
1944 .rmdir = v9fs_vfs_rmdir, 1949 .rmdir = v9fs_vfs_rmdir,
1945 .mknod = v9fs_vfs_mknod_dotl, 1950 .mknod = v9fs_vfs_mknod,
1946 .rename = v9fs_vfs_rename, 1951 .rename = v9fs_vfs_rename,
1947 .getattr = v9fs_vfs_getattr, 1952 .getattr = v9fs_vfs_getattr,
1948 .setattr = v9fs_vfs_setattr, 1953 .setattr = v9fs_vfs_setattr,
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index f9311077de68..1d12ba0ed3db 100644
--- a/fs/9p/vfs_super.c
+++ b/fs/9p/vfs_super.c
@@ -122,6 +122,10 @@ static int v9fs_get_sb(struct file_system_type *fs_type, int flags,
122 fid = v9fs_session_init(v9ses, dev_name, data); 122 fid = v9fs_session_init(v9ses, dev_name, data);
123 if (IS_ERR(fid)) { 123 if (IS_ERR(fid)) {
124 retval = PTR_ERR(fid); 124 retval = PTR_ERR(fid);
125 /*
126 * we need to call session_close to tear down some
127 * of the data structure setup by session_init
128 */
125 goto close_session; 129 goto close_session;
126 } 130 }
127 131
@@ -144,7 +148,6 @@ static int v9fs_get_sb(struct file_system_type *fs_type, int flags,
144 retval = -ENOMEM; 148 retval = -ENOMEM;
145 goto release_sb; 149 goto release_sb;
146 } 150 }
147
148 sb->s_root = root; 151 sb->s_root = root;
149 152
150 if (v9fs_proto_dotl(v9ses)) { 153 if (v9fs_proto_dotl(v9ses)) {
@@ -152,7 +155,7 @@ static int v9fs_get_sb(struct file_system_type *fs_type, int flags,
152 st = p9_client_getattr_dotl(fid, P9_STATS_BASIC); 155 st = p9_client_getattr_dotl(fid, P9_STATS_BASIC);
153 if (IS_ERR(st)) { 156 if (IS_ERR(st)) {
154 retval = PTR_ERR(st); 157 retval = PTR_ERR(st);
155 goto clunk_fid; 158 goto release_sb;
156 } 159 }
157 160
158 v9fs_stat2inode_dotl(st, root->d_inode); 161 v9fs_stat2inode_dotl(st, root->d_inode);
@@ -162,7 +165,7 @@ static int v9fs_get_sb(struct file_system_type *fs_type, int flags,
162 st = p9_client_stat(fid); 165 st = p9_client_stat(fid);
163 if (IS_ERR(st)) { 166 if (IS_ERR(st)) {
164 retval = PTR_ERR(st); 167 retval = PTR_ERR(st);
165 goto clunk_fid; 168 goto release_sb;
166 } 169 }
167 170
168 root->d_inode->i_ino = v9fs_qid2ino(&st->qid); 171 root->d_inode->i_ino = v9fs_qid2ino(&st->qid);
@@ -174,19 +177,24 @@ static int v9fs_get_sb(struct file_system_type *fs_type, int flags,
174 177
175 v9fs_fid_add(root, fid); 178 v9fs_fid_add(root, fid);
176 179
177P9_DPRINTK(P9_DEBUG_VFS, " simple set mount, return 0\n"); 180 P9_DPRINTK(P9_DEBUG_VFS, " simple set mount, return 0\n");
178 simple_set_mnt(mnt, sb); 181 simple_set_mnt(mnt, sb);
179 return 0; 182 return 0;
180 183
181clunk_fid: 184clunk_fid:
182 p9_client_clunk(fid); 185 p9_client_clunk(fid);
183
184close_session: 186close_session:
185 v9fs_session_close(v9ses); 187 v9fs_session_close(v9ses);
186 kfree(v9ses); 188 kfree(v9ses);
187 return retval; 189 return retval;
188
189release_sb: 190release_sb:
191 /*
192 * we will do the session_close and root dentry release
193 * in the below call. But we need to clunk fid, because we haven't
194 * attached the fid to dentry so it won't get clunked
195 * automatically.
196 */
197 p9_client_clunk(fid);
190 deactivate_locked_super(sb); 198 deactivate_locked_super(sb);
191 return retval; 199 return retval;
192} 200}
diff --git a/fs/aio.c b/fs/aio.c
index 3006b5bc33d6..250b0a73c8a8 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -712,8 +712,16 @@ static ssize_t aio_run_iocb(struct kiocb *iocb)
712 */ 712 */
713 ret = retry(iocb); 713 ret = retry(iocb);
714 714
715 if (ret != -EIOCBRETRY && ret != -EIOCBQUEUED) 715 if (ret != -EIOCBRETRY && ret != -EIOCBQUEUED) {
716 /*
717 * There's no easy way to restart the syscall since other AIO's
718 * may be already running. Just fail this IO with EINTR.
719 */
720 if (unlikely(ret == -ERESTARTSYS || ret == -ERESTARTNOINTR ||
721 ret == -ERESTARTNOHAND || ret == -ERESTART_RESTARTBLOCK))
722 ret = -EINTR;
716 aio_complete(iocb, ret, 0); 723 aio_complete(iocb, ret, 0);
724 }
717out: 725out:
718 spin_lock_irq(&ctx->ctx_lock); 726 spin_lock_irq(&ctx->ctx_lock);
719 727
@@ -1659,6 +1667,9 @@ long do_io_submit(aio_context_t ctx_id, long nr,
1659 if (unlikely(nr < 0)) 1667 if (unlikely(nr < 0))
1660 return -EINVAL; 1668 return -EINVAL;
1661 1669
1670 if (unlikely(nr > LONG_MAX/sizeof(*iocbpp)))
1671 nr = LONG_MAX/sizeof(*iocbpp);
1672
1662 if (unlikely(!access_ok(VERIFY_READ, iocbpp, (nr*sizeof(*iocbpp))))) 1673 if (unlikely(!access_ok(VERIFY_READ, iocbpp, (nr*sizeof(*iocbpp)))))
1663 return -EFAULT; 1674 return -EFAULT;
1664 1675
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index a7528b913936..fd0cc0bf9a40 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -724,7 +724,7 @@ static int __init init_misc_binfmt(void)
724{ 724{
725 int err = register_filesystem(&bm_fs_type); 725 int err = register_filesystem(&bm_fs_type);
726 if (!err) { 726 if (!err) {
727 err = register_binfmt(&misc_format); 727 err = insert_binfmt(&misc_format);
728 if (err) 728 if (err)
729 unregister_filesystem(&bm_fs_type); 729 unregister_filesystem(&bm_fs_type);
730 } 730 }
diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c
index 612a5c38d3c1..4d0ff5ee27b8 100644
--- a/fs/bio-integrity.c
+++ b/fs/bio-integrity.c
@@ -413,10 +413,10 @@ int bio_integrity_prep(struct bio *bio)
413 413
414 /* Allocate kernel buffer for protection data */ 414 /* Allocate kernel buffer for protection data */
415 len = sectors * blk_integrity_tuple_size(bi); 415 len = sectors * blk_integrity_tuple_size(bi);
416 buf = kmalloc(len, GFP_NOIO | __GFP_NOFAIL | q->bounce_gfp); 416 buf = kmalloc(len, GFP_NOIO | q->bounce_gfp);
417 if (unlikely(buf == NULL)) { 417 if (unlikely(buf == NULL)) {
418 printk(KERN_ERR "could not allocate integrity buffer\n"); 418 printk(KERN_ERR "could not allocate integrity buffer\n");
419 return -EIO; 419 return -ENOMEM;
420 } 420 }
421 421
422 end = (((unsigned long) buf) + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 422 end = (((unsigned long) buf) + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
diff --git a/fs/ceph/Kconfig b/fs/ceph/Kconfig
index bc87b9c1d27e..0fcd2640c23f 100644
--- a/fs/ceph/Kconfig
+++ b/fs/ceph/Kconfig
@@ -3,6 +3,7 @@ config CEPH_FS
3 depends on INET && EXPERIMENTAL 3 depends on INET && EXPERIMENTAL
4 select LIBCRC32C 4 select LIBCRC32C
5 select CRYPTO_AES 5 select CRYPTO_AES
6 select CRYPTO
6 help 7 help
7 Choose Y or M here to include support for mounting the 8 Choose Y or M here to include support for mounting the
8 experimental Ceph distributed file system. Ceph is an extremely 9 experimental Ceph distributed file system. Ceph is an extremely
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 4cfce1ee31fa..efbc604001c8 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -411,8 +411,8 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
411 if (i_size < page_off + len) 411 if (i_size < page_off + len)
412 len = i_size - page_off; 412 len = i_size - page_off;
413 413
414 dout("writepage %p page %p index %lu on %llu~%u\n", 414 dout("writepage %p page %p index %lu on %llu~%u snapc %p\n",
415 inode, page, page->index, page_off, len); 415 inode, page, page->index, page_off, len, snapc);
416 416
417 writeback_stat = atomic_long_inc_return(&client->writeback_count); 417 writeback_stat = atomic_long_inc_return(&client->writeback_count);
418 if (writeback_stat > 418 if (writeback_stat >
@@ -766,7 +766,8 @@ get_more_pages:
766 /* ok */ 766 /* ok */
767 if (locked_pages == 0) { 767 if (locked_pages == 0) {
768 /* prepare async write request */ 768 /* prepare async write request */
769 offset = page->index << PAGE_CACHE_SHIFT; 769 offset = (unsigned long long)page->index
770 << PAGE_CACHE_SHIFT;
770 len = wsize; 771 len = wsize;
771 req = ceph_osdc_new_request(&client->osdc, 772 req = ceph_osdc_new_request(&client->osdc,
772 &ci->i_layout, 773 &ci->i_layout,
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index a2069b6680ae..73c153092f72 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -814,7 +814,7 @@ int __ceph_caps_used(struct ceph_inode_info *ci)
814 used |= CEPH_CAP_PIN; 814 used |= CEPH_CAP_PIN;
815 if (ci->i_rd_ref) 815 if (ci->i_rd_ref)
816 used |= CEPH_CAP_FILE_RD; 816 used |= CEPH_CAP_FILE_RD;
817 if (ci->i_rdcache_ref || ci->i_rdcache_gen) 817 if (ci->i_rdcache_ref || ci->vfs_inode.i_data.nrpages)
818 used |= CEPH_CAP_FILE_CACHE; 818 used |= CEPH_CAP_FILE_CACHE;
819 if (ci->i_wr_ref) 819 if (ci->i_wr_ref)
820 used |= CEPH_CAP_FILE_WR; 820 used |= CEPH_CAP_FILE_WR;
@@ -1195,10 +1195,14 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
1195 * asynchronously back to the MDS once sync writes complete and dirty 1195 * asynchronously back to the MDS once sync writes complete and dirty
1196 * data is written out. 1196 * data is written out.
1197 * 1197 *
1198 * Unless @again is true, skip cap_snaps that were already sent to
1199 * the MDS (i.e., during this session).
1200 *
1198 * Called under i_lock. Takes s_mutex as needed. 1201 * Called under i_lock. Takes s_mutex as needed.
1199 */ 1202 */
1200void __ceph_flush_snaps(struct ceph_inode_info *ci, 1203void __ceph_flush_snaps(struct ceph_inode_info *ci,
1201 struct ceph_mds_session **psession) 1204 struct ceph_mds_session **psession,
1205 int again)
1202 __releases(ci->vfs_inode->i_lock) 1206 __releases(ci->vfs_inode->i_lock)
1203 __acquires(ci->vfs_inode->i_lock) 1207 __acquires(ci->vfs_inode->i_lock)
1204{ 1208{
@@ -1227,7 +1231,7 @@ retry:
1227 * pages to be written out. 1231 * pages to be written out.
1228 */ 1232 */
1229 if (capsnap->dirty_pages || capsnap->writing) 1233 if (capsnap->dirty_pages || capsnap->writing)
1230 continue; 1234 break;
1231 1235
1232 /* 1236 /*
1233 * if cap writeback already occurred, we should have dropped 1237 * if cap writeback already occurred, we should have dropped
@@ -1240,6 +1244,13 @@ retry:
1240 dout("no auth cap (migrating?), doing nothing\n"); 1244 dout("no auth cap (migrating?), doing nothing\n");
1241 goto out; 1245 goto out;
1242 } 1246 }
1247
1248 /* only flush each capsnap once */
1249 if (!again && !list_empty(&capsnap->flushing_item)) {
1250 dout("already flushed %p, skipping\n", capsnap);
1251 continue;
1252 }
1253
1243 mds = ci->i_auth_cap->session->s_mds; 1254 mds = ci->i_auth_cap->session->s_mds;
1244 mseq = ci->i_auth_cap->mseq; 1255 mseq = ci->i_auth_cap->mseq;
1245 1256
@@ -1276,8 +1287,8 @@ retry:
1276 &session->s_cap_snaps_flushing); 1287 &session->s_cap_snaps_flushing);
1277 spin_unlock(&inode->i_lock); 1288 spin_unlock(&inode->i_lock);
1278 1289
1279 dout("flush_snaps %p cap_snap %p follows %lld size %llu\n", 1290 dout("flush_snaps %p cap_snap %p follows %lld tid %llu\n",
1280 inode, capsnap, next_follows, capsnap->size); 1291 inode, capsnap, capsnap->follows, capsnap->flush_tid);
1281 send_cap_msg(session, ceph_vino(inode).ino, 0, 1292 send_cap_msg(session, ceph_vino(inode).ino, 0,
1282 CEPH_CAP_OP_FLUSHSNAP, capsnap->issued, 0, 1293 CEPH_CAP_OP_FLUSHSNAP, capsnap->issued, 0,
1283 capsnap->dirty, 0, capsnap->flush_tid, 0, mseq, 1294 capsnap->dirty, 0, capsnap->flush_tid, 0, mseq,
@@ -1314,7 +1325,7 @@ static void ceph_flush_snaps(struct ceph_inode_info *ci)
1314 struct inode *inode = &ci->vfs_inode; 1325 struct inode *inode = &ci->vfs_inode;
1315 1326
1316 spin_lock(&inode->i_lock); 1327 spin_lock(&inode->i_lock);
1317 __ceph_flush_snaps(ci, NULL); 1328 __ceph_flush_snaps(ci, NULL, 0);
1318 spin_unlock(&inode->i_lock); 1329 spin_unlock(&inode->i_lock);
1319} 1330}
1320 1331
@@ -1477,7 +1488,7 @@ void ceph_check_caps(struct ceph_inode_info *ci, int flags,
1477 1488
1478 /* flush snaps first time around only */ 1489 /* flush snaps first time around only */
1479 if (!list_empty(&ci->i_cap_snaps)) 1490 if (!list_empty(&ci->i_cap_snaps))
1480 __ceph_flush_snaps(ci, &session); 1491 __ceph_flush_snaps(ci, &session, 0);
1481 goto retry_locked; 1492 goto retry_locked;
1482retry: 1493retry:
1483 spin_lock(&inode->i_lock); 1494 spin_lock(&inode->i_lock);
@@ -1894,7 +1905,7 @@ static void kick_flushing_capsnaps(struct ceph_mds_client *mdsc,
1894 if (cap && cap->session == session) { 1905 if (cap && cap->session == session) {
1895 dout("kick_flushing_caps %p cap %p capsnap %p\n", inode, 1906 dout("kick_flushing_caps %p cap %p capsnap %p\n", inode,
1896 cap, capsnap); 1907 cap, capsnap);
1897 __ceph_flush_snaps(ci, &session); 1908 __ceph_flush_snaps(ci, &session, 1);
1898 } else { 1909 } else {
1899 pr_err("%p auth cap %p not mds%d ???\n", inode, 1910 pr_err("%p auth cap %p not mds%d ???\n", inode,
1900 cap, session->s_mds); 1911 cap, session->s_mds);
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 6e4f43ff23ec..a1986eb52045 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -1021,11 +1021,15 @@ out_touch:
1021static void ceph_dentry_release(struct dentry *dentry) 1021static void ceph_dentry_release(struct dentry *dentry)
1022{ 1022{
1023 struct ceph_dentry_info *di = ceph_dentry(dentry); 1023 struct ceph_dentry_info *di = ceph_dentry(dentry);
1024 struct inode *parent_inode = dentry->d_parent->d_inode; 1024 struct inode *parent_inode = NULL;
1025 u64 snapid = ceph_snap(parent_inode); 1025 u64 snapid = CEPH_NOSNAP;
1026 1026
1027 if (!IS_ROOT(dentry)) {
1028 parent_inode = dentry->d_parent->d_inode;
1029 if (parent_inode)
1030 snapid = ceph_snap(parent_inode);
1031 }
1027 dout("dentry_release %p parent %p\n", dentry, parent_inode); 1032 dout("dentry_release %p parent %p\n", dentry, parent_inode);
1028
1029 if (parent_inode && snapid != CEPH_SNAPDIR) { 1033 if (parent_inode && snapid != CEPH_SNAPDIR) {
1030 struct ceph_inode_info *ci = ceph_inode(parent_inode); 1034 struct ceph_inode_info *ci = ceph_inode(parent_inode);
1031 1035
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index e7cca414da03..62377ec37edf 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -845,7 +845,7 @@ static void ceph_set_dentry_offset(struct dentry *dn)
845 * the caller) if we fail. 845 * the caller) if we fail.
846 */ 846 */
847static struct dentry *splice_dentry(struct dentry *dn, struct inode *in, 847static struct dentry *splice_dentry(struct dentry *dn, struct inode *in,
848 bool *prehash) 848 bool *prehash, bool set_offset)
849{ 849{
850 struct dentry *realdn; 850 struct dentry *realdn;
851 851
@@ -877,7 +877,8 @@ static struct dentry *splice_dentry(struct dentry *dn, struct inode *in,
877 } 877 }
878 if ((!prehash || *prehash) && d_unhashed(dn)) 878 if ((!prehash || *prehash) && d_unhashed(dn))
879 d_rehash(dn); 879 d_rehash(dn);
880 ceph_set_dentry_offset(dn); 880 if (set_offset)
881 ceph_set_dentry_offset(dn);
881out: 882out:
882 return dn; 883 return dn;
883} 884}
@@ -1062,7 +1063,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
1062 d_delete(dn); 1063 d_delete(dn);
1063 goto done; 1064 goto done;
1064 } 1065 }
1065 dn = splice_dentry(dn, in, &have_lease); 1066 dn = splice_dentry(dn, in, &have_lease, true);
1066 if (IS_ERR(dn)) { 1067 if (IS_ERR(dn)) {
1067 err = PTR_ERR(dn); 1068 err = PTR_ERR(dn);
1068 goto done; 1069 goto done;
@@ -1105,7 +1106,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
1105 goto done; 1106 goto done;
1106 } 1107 }
1107 dout(" linking snapped dir %p to dn %p\n", in, dn); 1108 dout(" linking snapped dir %p to dn %p\n", in, dn);
1108 dn = splice_dentry(dn, in, NULL); 1109 dn = splice_dentry(dn, in, NULL, true);
1109 if (IS_ERR(dn)) { 1110 if (IS_ERR(dn)) {
1110 err = PTR_ERR(dn); 1111 err = PTR_ERR(dn);
1111 goto done; 1112 goto done;
@@ -1237,7 +1238,7 @@ retry_lookup:
1237 err = PTR_ERR(in); 1238 err = PTR_ERR(in);
1238 goto out; 1239 goto out;
1239 } 1240 }
1240 dn = splice_dentry(dn, in, NULL); 1241 dn = splice_dentry(dn, in, NULL, false);
1241 if (IS_ERR(dn)) 1242 if (IS_ERR(dn))
1242 dn = NULL; 1243 dn = NULL;
1243 } 1244 }
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index f091b1351786..fad95f8f2608 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -2374,6 +2374,8 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
2374 num_fcntl_locks, 2374 num_fcntl_locks,
2375 num_flock_locks); 2375 num_flock_locks);
2376 unlock_kernel(); 2376 unlock_kernel();
2377 } else {
2378 err = ceph_pagelist_append(pagelist, &rec, reclen);
2377 } 2379 }
2378 2380
2379out_free: 2381out_free:
diff --git a/fs/ceph/pagelist.c b/fs/ceph/pagelist.c
index b6859f47d364..46a368b6dce5 100644
--- a/fs/ceph/pagelist.c
+++ b/fs/ceph/pagelist.c
@@ -5,10 +5,18 @@
5 5
6#include "pagelist.h" 6#include "pagelist.h"
7 7
8static void ceph_pagelist_unmap_tail(struct ceph_pagelist *pl)
9{
10 struct page *page = list_entry(pl->head.prev, struct page,
11 lru);
12 kunmap(page);
13}
14
8int ceph_pagelist_release(struct ceph_pagelist *pl) 15int ceph_pagelist_release(struct ceph_pagelist *pl)
9{ 16{
10 if (pl->mapped_tail) 17 if (pl->mapped_tail)
11 kunmap(pl->mapped_tail); 18 ceph_pagelist_unmap_tail(pl);
19
12 while (!list_empty(&pl->head)) { 20 while (!list_empty(&pl->head)) {
13 struct page *page = list_first_entry(&pl->head, struct page, 21 struct page *page = list_first_entry(&pl->head, struct page,
14 lru); 22 lru);
@@ -26,7 +34,7 @@ static int ceph_pagelist_addpage(struct ceph_pagelist *pl)
26 pl->room += PAGE_SIZE; 34 pl->room += PAGE_SIZE;
27 list_add_tail(&page->lru, &pl->head); 35 list_add_tail(&page->lru, &pl->head);
28 if (pl->mapped_tail) 36 if (pl->mapped_tail)
29 kunmap(pl->mapped_tail); 37 ceph_pagelist_unmap_tail(pl);
30 pl->mapped_tail = kmap(page); 38 pl->mapped_tail = kmap(page);
31 return 0; 39 return 0;
32} 40}
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index 4868b9dcac5a..190b6c4a6f2b 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -119,6 +119,7 @@ static struct ceph_snap_realm *ceph_create_snap_realm(
119 INIT_LIST_HEAD(&realm->children); 119 INIT_LIST_HEAD(&realm->children);
120 INIT_LIST_HEAD(&realm->child_item); 120 INIT_LIST_HEAD(&realm->child_item);
121 INIT_LIST_HEAD(&realm->empty_item); 121 INIT_LIST_HEAD(&realm->empty_item);
122 INIT_LIST_HEAD(&realm->dirty_item);
122 INIT_LIST_HEAD(&realm->inodes_with_caps); 123 INIT_LIST_HEAD(&realm->inodes_with_caps);
123 spin_lock_init(&realm->inodes_with_caps_lock); 124 spin_lock_init(&realm->inodes_with_caps_lock);
124 __insert_snap_realm(&mdsc->snap_realms, realm); 125 __insert_snap_realm(&mdsc->snap_realms, realm);
@@ -467,7 +468,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
467 INIT_LIST_HEAD(&capsnap->ci_item); 468 INIT_LIST_HEAD(&capsnap->ci_item);
468 INIT_LIST_HEAD(&capsnap->flushing_item); 469 INIT_LIST_HEAD(&capsnap->flushing_item);
469 470
470 capsnap->follows = snapc->seq - 1; 471 capsnap->follows = snapc->seq;
471 capsnap->issued = __ceph_caps_issued(ci, NULL); 472 capsnap->issued = __ceph_caps_issued(ci, NULL);
472 capsnap->dirty = dirty; 473 capsnap->dirty = dirty;
473 474
@@ -604,6 +605,7 @@ int ceph_update_snap_trace(struct ceph_mds_client *mdsc,
604 struct ceph_snap_realm *realm; 605 struct ceph_snap_realm *realm;
605 int invalidate = 0; 606 int invalidate = 0;
606 int err = -ENOMEM; 607 int err = -ENOMEM;
608 LIST_HEAD(dirty_realms);
607 609
608 dout("update_snap_trace deletion=%d\n", deletion); 610 dout("update_snap_trace deletion=%d\n", deletion);
609more: 611more:
@@ -626,24 +628,6 @@ more:
626 } 628 }
627 } 629 }
628 630
629 if (le64_to_cpu(ri->seq) > realm->seq) {
630 dout("update_snap_trace updating %llx %p %lld -> %lld\n",
631 realm->ino, realm, realm->seq, le64_to_cpu(ri->seq));
632 /*
633 * if the realm seq has changed, queue a cap_snap for every
634 * inode with open caps. we do this _before_ we update
635 * the realm info so that we prepare for writeback under the
636 * _previous_ snap context.
637 *
638 * ...unless it's a snap deletion!
639 */
640 if (!deletion)
641 queue_realm_cap_snaps(realm);
642 } else {
643 dout("update_snap_trace %llx %p seq %lld unchanged\n",
644 realm->ino, realm, realm->seq);
645 }
646
647 /* ensure the parent is correct */ 631 /* ensure the parent is correct */
648 err = adjust_snap_realm_parent(mdsc, realm, le64_to_cpu(ri->parent)); 632 err = adjust_snap_realm_parent(mdsc, realm, le64_to_cpu(ri->parent));
649 if (err < 0) 633 if (err < 0)
@@ -651,6 +635,8 @@ more:
651 invalidate += err; 635 invalidate += err;
652 636
653 if (le64_to_cpu(ri->seq) > realm->seq) { 637 if (le64_to_cpu(ri->seq) > realm->seq) {
638 dout("update_snap_trace updating %llx %p %lld -> %lld\n",
639 realm->ino, realm, realm->seq, le64_to_cpu(ri->seq));
654 /* update realm parameters, snap lists */ 640 /* update realm parameters, snap lists */
655 realm->seq = le64_to_cpu(ri->seq); 641 realm->seq = le64_to_cpu(ri->seq);
656 realm->created = le64_to_cpu(ri->created); 642 realm->created = le64_to_cpu(ri->created);
@@ -668,9 +654,17 @@ more:
668 if (err < 0) 654 if (err < 0)
669 goto fail; 655 goto fail;
670 656
657 /* queue realm for cap_snap creation */
658 list_add(&realm->dirty_item, &dirty_realms);
659
671 invalidate = 1; 660 invalidate = 1;
672 } else if (!realm->cached_context) { 661 } else if (!realm->cached_context) {
662 dout("update_snap_trace %llx %p seq %lld new\n",
663 realm->ino, realm, realm->seq);
673 invalidate = 1; 664 invalidate = 1;
665 } else {
666 dout("update_snap_trace %llx %p seq %lld unchanged\n",
667 realm->ino, realm, realm->seq);
674 } 668 }
675 669
676 dout("done with %llx %p, invalidated=%d, %p %p\n", realm->ino, 670 dout("done with %llx %p, invalidated=%d, %p %p\n", realm->ino,
@@ -683,6 +677,14 @@ more:
683 if (invalidate) 677 if (invalidate)
684 rebuild_snap_realms(realm); 678 rebuild_snap_realms(realm);
685 679
680 /*
681 * queue cap snaps _after_ we've built the new snap contexts,
682 * so that i_head_snapc can be set appropriately.
683 */
684 list_for_each_entry(realm, &dirty_realms, dirty_item) {
685 queue_realm_cap_snaps(realm);
686 }
687
686 __cleanup_empty_realms(mdsc); 688 __cleanup_empty_realms(mdsc);
687 return 0; 689 return 0;
688 690
@@ -715,7 +717,7 @@ static void flush_snaps(struct ceph_mds_client *mdsc)
715 igrab(inode); 717 igrab(inode);
716 spin_unlock(&mdsc->snap_flush_lock); 718 spin_unlock(&mdsc->snap_flush_lock);
717 spin_lock(&inode->i_lock); 719 spin_lock(&inode->i_lock);
718 __ceph_flush_snaps(ci, &session); 720 __ceph_flush_snaps(ci, &session, 0);
719 spin_unlock(&inode->i_lock); 721 spin_unlock(&inode->i_lock);
720 iput(inode); 722 iput(inode);
721 spin_lock(&mdsc->snap_flush_lock); 723 spin_lock(&mdsc->snap_flush_lock);
@@ -816,6 +818,7 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
816 }; 818 };
817 struct inode *inode = ceph_find_inode(sb, vino); 819 struct inode *inode = ceph_find_inode(sb, vino);
818 struct ceph_inode_info *ci; 820 struct ceph_inode_info *ci;
821 struct ceph_snap_realm *oldrealm;
819 822
820 if (!inode) 823 if (!inode)
821 continue; 824 continue;
@@ -841,18 +844,19 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
841 dout(" will move %p to split realm %llx %p\n", 844 dout(" will move %p to split realm %llx %p\n",
842 inode, realm->ino, realm); 845 inode, realm->ino, realm);
843 /* 846 /*
844 * Remove the inode from the realm's inode 847 * Move the inode to the new realm
845 * list, but don't add it to the new realm
846 * yet. We don't want the cap_snap to be
847 * queued (again) by ceph_update_snap_trace()
848 * below. Queue it _now_, under the old context.
849 */ 848 */
850 spin_lock(&realm->inodes_with_caps_lock); 849 spin_lock(&realm->inodes_with_caps_lock);
851 list_del_init(&ci->i_snap_realm_item); 850 list_del_init(&ci->i_snap_realm_item);
851 list_add(&ci->i_snap_realm_item,
852 &realm->inodes_with_caps);
853 oldrealm = ci->i_snap_realm;
854 ci->i_snap_realm = realm;
852 spin_unlock(&realm->inodes_with_caps_lock); 855 spin_unlock(&realm->inodes_with_caps_lock);
853 spin_unlock(&inode->i_lock); 856 spin_unlock(&inode->i_lock);
854 857
855 ceph_queue_cap_snap(ci); 858 ceph_get_snap_realm(mdsc, realm);
859 ceph_put_snap_realm(mdsc, oldrealm);
856 860
857 iput(inode); 861 iput(inode);
858 continue; 862 continue;
@@ -880,43 +884,9 @@ skip_inode:
880 ceph_update_snap_trace(mdsc, p, e, 884 ceph_update_snap_trace(mdsc, p, e,
881 op == CEPH_SNAP_OP_DESTROY); 885 op == CEPH_SNAP_OP_DESTROY);
882 886
883 if (op == CEPH_SNAP_OP_SPLIT) { 887 if (op == CEPH_SNAP_OP_SPLIT)
884 /*
885 * ok, _now_ add the inodes into the new realm.
886 */
887 for (i = 0; i < num_split_inos; i++) {
888 struct ceph_vino vino = {
889 .ino = le64_to_cpu(split_inos[i]),
890 .snap = CEPH_NOSNAP,
891 };
892 struct inode *inode = ceph_find_inode(sb, vino);
893 struct ceph_inode_info *ci;
894
895 if (!inode)
896 continue;
897 ci = ceph_inode(inode);
898 spin_lock(&inode->i_lock);
899 if (list_empty(&ci->i_snap_realm_item)) {
900 struct ceph_snap_realm *oldrealm =
901 ci->i_snap_realm;
902
903 dout(" moving %p to split realm %llx %p\n",
904 inode, realm->ino, realm);
905 spin_lock(&realm->inodes_with_caps_lock);
906 list_add(&ci->i_snap_realm_item,
907 &realm->inodes_with_caps);
908 ci->i_snap_realm = realm;
909 spin_unlock(&realm->inodes_with_caps_lock);
910 ceph_get_snap_realm(mdsc, realm);
911 ceph_put_snap_realm(mdsc, oldrealm);
912 }
913 spin_unlock(&inode->i_lock);
914 iput(inode);
915 }
916
917 /* we took a reference when we created the realm, above */ 888 /* we took a reference when we created the realm, above */
918 ceph_put_snap_realm(mdsc, realm); 889 ceph_put_snap_realm(mdsc, realm);
919 }
920 890
921 __cleanup_empty_realms(mdsc); 891 __cleanup_empty_realms(mdsc);
922 892
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index c33897ae5725..b87638e84c4b 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -690,6 +690,8 @@ struct ceph_snap_realm {
690 690
691 struct list_head empty_item; /* if i have ref==0 */ 691 struct list_head empty_item; /* if i have ref==0 */
692 692
693 struct list_head dirty_item; /* if realm needs new context */
694
693 /* the current set of snaps for this realm */ 695 /* the current set of snaps for this realm */
694 struct ceph_snap_context *cached_context; 696 struct ceph_snap_context *cached_context;
695 697
@@ -826,7 +828,8 @@ extern void ceph_put_cap_refs(struct ceph_inode_info *ci, int had);
826extern void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr, 828extern void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
827 struct ceph_snap_context *snapc); 829 struct ceph_snap_context *snapc);
828extern void __ceph_flush_snaps(struct ceph_inode_info *ci, 830extern void __ceph_flush_snaps(struct ceph_inode_info *ci,
829 struct ceph_mds_session **psession); 831 struct ceph_mds_session **psession,
832 int again);
830extern void ceph_check_caps(struct ceph_inode_info *ci, int flags, 833extern void ceph_check_caps(struct ceph_inode_info *ci, int flags,
831 struct ceph_mds_session *session); 834 struct ceph_mds_session *session);
832extern void ceph_check_delayed_caps(struct ceph_mds_client *mdsc); 835extern void ceph_check_delayed_caps(struct ceph_mds_client *mdsc);
diff --git a/fs/char_dev.c b/fs/char_dev.c
index f80a4f25123c..143d393881cb 100644
--- a/fs/char_dev.c
+++ b/fs/char_dev.c
@@ -40,7 +40,9 @@ struct backing_dev_info directly_mappable_cdev_bdi = {
40#endif 40#endif
41 /* permit direct mmap, for read, write or exec */ 41 /* permit direct mmap, for read, write or exec */
42 BDI_CAP_MAP_DIRECT | 42 BDI_CAP_MAP_DIRECT |
43 BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP), 43 BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP |
44 /* no writeback happens */
45 BDI_CAP_NO_ACCT_AND_WRITEBACK),
44}; 46};
45 47
46static struct kobj_map *cdev_map; 48static struct kobj_map *cdev_map;
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
index 0da1debd499d..917b7d449bb2 100644
--- a/fs/cifs/Kconfig
+++ b/fs/cifs/Kconfig
@@ -2,8 +2,6 @@ config CIFS
2 tristate "CIFS support (advanced network filesystem, SMBFS successor)" 2 tristate "CIFS support (advanced network filesystem, SMBFS successor)"
3 depends on INET 3 depends on INET
4 select NLS 4 select NLS
5 select CRYPTO_MD5
6 select CRYPTO_ARC4
7 help 5 help
8 This is the client VFS module for the Common Internet File System 6 This is the client VFS module for the Common Internet File System
9 (CIFS) protocol which is the successor to the Server Message Block 7 (CIFS) protocol which is the successor to the Server Message Block
diff --git a/fs/cifs/asn1.c b/fs/cifs/asn1.c
index 21f0fbd86989..cfd1ce34e0bc 100644
--- a/fs/cifs/asn1.c
+++ b/fs/cifs/asn1.c
@@ -597,13 +597,13 @@ decode_negTokenInit(unsigned char *security_blob, int length,
597 if (compare_oid(oid, oidlen, MSKRB5_OID, 597 if (compare_oid(oid, oidlen, MSKRB5_OID,
598 MSKRB5_OID_LEN)) 598 MSKRB5_OID_LEN))
599 server->sec_mskerberos = true; 599 server->sec_mskerberos = true;
600 if (compare_oid(oid, oidlen, KRB5U2U_OID, 600 else if (compare_oid(oid, oidlen, KRB5U2U_OID,
601 KRB5U2U_OID_LEN)) 601 KRB5U2U_OID_LEN))
602 server->sec_kerberosu2u = true; 602 server->sec_kerberosu2u = true;
603 if (compare_oid(oid, oidlen, KRB5_OID, 603 else if (compare_oid(oid, oidlen, KRB5_OID,
604 KRB5_OID_LEN)) 604 KRB5_OID_LEN))
605 server->sec_kerberos = true; 605 server->sec_kerberos = true;
606 if (compare_oid(oid, oidlen, NTLMSSP_OID, 606 else if (compare_oid(oid, oidlen, NTLMSSP_OID,
607 NTLMSSP_OID_LEN)) 607 NTLMSSP_OID_LEN))
608 server->sec_ntlmssp = true; 608 server->sec_ntlmssp = true;
609 609
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index 709f2296bdb4..35042d8f7338 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -27,7 +27,6 @@
27#include "md5.h" 27#include "md5.h"
28#include "cifs_unicode.h" 28#include "cifs_unicode.h"
29#include "cifsproto.h" 29#include "cifsproto.h"
30#include "ntlmssp.h"
31#include <linux/ctype.h> 30#include <linux/ctype.h>
32#include <linux/random.h> 31#include <linux/random.h>
33 32
@@ -43,43 +42,21 @@ extern void SMBencrypt(unsigned char *passwd, const unsigned char *c8,
43 unsigned char *p24); 42 unsigned char *p24);
44 43
45static int cifs_calculate_signature(const struct smb_hdr *cifs_pdu, 44static int cifs_calculate_signature(const struct smb_hdr *cifs_pdu,
46 struct TCP_Server_Info *server, char *signature) 45 const struct mac_key *key, char *signature)
47{ 46{
48 int rc; 47 struct MD5Context context;
49 48
50 if (cifs_pdu == NULL || server == NULL || signature == NULL) 49 if ((cifs_pdu == NULL) || (signature == NULL) || (key == NULL))
51 return -EINVAL; 50 return -EINVAL;
52 51
53 if (!server->ntlmssp.sdescmd5) { 52 cifs_MD5_init(&context);
54 cERROR(1, 53 cifs_MD5_update(&context, (char *)&key->data, key->len);
55 "cifs_calculate_signature: can't generate signature\n"); 54 cifs_MD5_update(&context, cifs_pdu->Protocol, cifs_pdu->smb_buf_length);
56 return -1;
57 }
58
59 rc = crypto_shash_init(&server->ntlmssp.sdescmd5->shash);
60 if (rc) {
61 cERROR(1, "cifs_calculate_signature: oould not init md5\n");
62 return rc;
63 }
64
65 if (server->secType == RawNTLMSSP)
66 crypto_shash_update(&server->ntlmssp.sdescmd5->shash,
67 server->session_key.data.ntlmv2.key,
68 CIFS_NTLMV2_SESSKEY_SIZE);
69 else
70 crypto_shash_update(&server->ntlmssp.sdescmd5->shash,
71 (char *)&server->session_key.data,
72 server->session_key.len);
73
74 crypto_shash_update(&server->ntlmssp.sdescmd5->shash,
75 cifs_pdu->Protocol, cifs_pdu->smb_buf_length);
76 55
77 rc = crypto_shash_final(&server->ntlmssp.sdescmd5->shash, signature); 56 cifs_MD5_final(signature, &context);
78 57 return 0;
79 return rc;
80} 58}
81 59
82
83int cifs_sign_smb(struct smb_hdr *cifs_pdu, struct TCP_Server_Info *server, 60int cifs_sign_smb(struct smb_hdr *cifs_pdu, struct TCP_Server_Info *server,
84 __u32 *pexpected_response_sequence_number) 61 __u32 *pexpected_response_sequence_number)
85{ 62{
@@ -101,7 +78,8 @@ int cifs_sign_smb(struct smb_hdr *cifs_pdu, struct TCP_Server_Info *server,
101 server->sequence_number++; 78 server->sequence_number++;
102 spin_unlock(&GlobalMid_Lock); 79 spin_unlock(&GlobalMid_Lock);
103 80
104 rc = cifs_calculate_signature(cifs_pdu, server, smb_signature); 81 rc = cifs_calculate_signature(cifs_pdu, &server->mac_signing_key,
82 smb_signature);
105 if (rc) 83 if (rc)
106 memset(cifs_pdu->Signature.SecuritySignature, 0, 8); 84 memset(cifs_pdu->Signature.SecuritySignature, 0, 8);
107 else 85 else
@@ -111,39 +89,21 @@ int cifs_sign_smb(struct smb_hdr *cifs_pdu, struct TCP_Server_Info *server,
111} 89}
112 90
113static int cifs_calc_signature2(const struct kvec *iov, int n_vec, 91static int cifs_calc_signature2(const struct kvec *iov, int n_vec,
114 struct TCP_Server_Info *server, char *signature) 92 const struct mac_key *key, char *signature)
115{ 93{
94 struct MD5Context context;
116 int i; 95 int i;
117 int rc;
118 96
119 if (iov == NULL || server == NULL || signature == NULL) 97 if ((iov == NULL) || (signature == NULL) || (key == NULL))
120 return -EINVAL; 98 return -EINVAL;
121 99
122 if (!server->ntlmssp.sdescmd5) { 100 cifs_MD5_init(&context);
123 cERROR(1, "cifs_calc_signature2: can't generate signature\n"); 101 cifs_MD5_update(&context, (char *)&key->data, key->len);
124 return -1;
125 }
126
127 rc = crypto_shash_init(&server->ntlmssp.sdescmd5->shash);
128 if (rc) {
129 cERROR(1, "cifs_calc_signature2: oould not init md5\n");
130 return rc;
131 }
132
133 if (server->secType == RawNTLMSSP)
134 crypto_shash_update(&server->ntlmssp.sdescmd5->shash,
135 server->session_key.data.ntlmv2.key,
136 CIFS_NTLMV2_SESSKEY_SIZE);
137 else
138 crypto_shash_update(&server->ntlmssp.sdescmd5->shash,
139 (char *)&server->session_key.data,
140 server->session_key.len);
141
142 for (i = 0; i < n_vec; i++) { 102 for (i = 0; i < n_vec; i++) {
143 if (iov[i].iov_len == 0) 103 if (iov[i].iov_len == 0)
144 continue; 104 continue;
145 if (iov[i].iov_base == NULL) { 105 if (iov[i].iov_base == NULL) {
146 cERROR(1, "cifs_calc_signature2: null iovec entry"); 106 cERROR(1, "null iovec entry");
147 return -EIO; 107 return -EIO;
148 } 108 }
149 /* The first entry includes a length field (which does not get 109 /* The first entry includes a length field (which does not get
@@ -151,18 +111,18 @@ static int cifs_calc_signature2(const struct kvec *iov, int n_vec,
151 if (i == 0) { 111 if (i == 0) {
152 if (iov[0].iov_len <= 8) /* cmd field at offset 9 */ 112 if (iov[0].iov_len <= 8) /* cmd field at offset 9 */
153 break; /* nothing to sign or corrupt header */ 113 break; /* nothing to sign or corrupt header */
154 crypto_shash_update(&server->ntlmssp.sdescmd5->shash, 114 cifs_MD5_update(&context, iov[0].iov_base+4,
155 iov[i].iov_base + 4, iov[i].iov_len - 4); 115 iov[0].iov_len-4);
156 } else 116 } else
157 crypto_shash_update(&server->ntlmssp.sdescmd5->shash, 117 cifs_MD5_update(&context, iov[i].iov_base, iov[i].iov_len);
158 iov[i].iov_base, iov[i].iov_len);
159 } 118 }
160 119
161 rc = crypto_shash_final(&server->ntlmssp.sdescmd5->shash, signature); 120 cifs_MD5_final(signature, &context);
162 121
163 return rc; 122 return 0;
164} 123}
165 124
125
166int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *server, 126int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *server,
167 __u32 *pexpected_response_sequence_number) 127 __u32 *pexpected_response_sequence_number)
168{ 128{
@@ -185,7 +145,8 @@ int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *server,
185 server->sequence_number++; 145 server->sequence_number++;
186 spin_unlock(&GlobalMid_Lock); 146 spin_unlock(&GlobalMid_Lock);
187 147
188 rc = cifs_calc_signature2(iov, n_vec, server, smb_signature); 148 rc = cifs_calc_signature2(iov, n_vec, &server->mac_signing_key,
149 smb_signature);
189 if (rc) 150 if (rc)
190 memset(cifs_pdu->Signature.SecuritySignature, 0, 8); 151 memset(cifs_pdu->Signature.SecuritySignature, 0, 8);
191 else 152 else
@@ -195,14 +156,14 @@ int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *server,
195} 156}
196 157
197int cifs_verify_signature(struct smb_hdr *cifs_pdu, 158int cifs_verify_signature(struct smb_hdr *cifs_pdu,
198 struct TCP_Server_Info *server, 159 const struct mac_key *mac_key,
199 __u32 expected_sequence_number) 160 __u32 expected_sequence_number)
200{ 161{
201 int rc; 162 unsigned int rc;
202 char server_response_sig[8]; 163 char server_response_sig[8];
203 char what_we_think_sig_should_be[20]; 164 char what_we_think_sig_should_be[20];
204 165
205 if (cifs_pdu == NULL || server == NULL) 166 if ((cifs_pdu == NULL) || (mac_key == NULL))
206 return -EINVAL; 167 return -EINVAL;
207 168
208 if (cifs_pdu->Command == SMB_COM_NEGOTIATE) 169 if (cifs_pdu->Command == SMB_COM_NEGOTIATE)
@@ -231,7 +192,7 @@ int cifs_verify_signature(struct smb_hdr *cifs_pdu,
231 cpu_to_le32(expected_sequence_number); 192 cpu_to_le32(expected_sequence_number);
232 cifs_pdu->Signature.Sequence.Reserved = 0; 193 cifs_pdu->Signature.Sequence.Reserved = 0;
233 194
234 rc = cifs_calculate_signature(cifs_pdu, server, 195 rc = cifs_calculate_signature(cifs_pdu, mac_key,
235 what_we_think_sig_should_be); 196 what_we_think_sig_should_be);
236 197
237 if (rc) 198 if (rc)
@@ -248,7 +209,7 @@ int cifs_verify_signature(struct smb_hdr *cifs_pdu,
248} 209}
249 210
250/* We fill in key by putting in 40 byte array which was allocated by caller */ 211/* We fill in key by putting in 40 byte array which was allocated by caller */
251int cifs_calculate_session_key(struct session_key *key, const char *rn, 212int cifs_calculate_mac_key(struct mac_key *key, const char *rn,
252 const char *password) 213 const char *password)
253{ 214{
254 char temp_key[16]; 215 char temp_key[16];
@@ -306,52 +267,38 @@ static int calc_ntlmv2_hash(struct cifsSesInfo *ses,
306{ 267{
307 int rc = 0; 268 int rc = 0;
308 int len; 269 int len;
309 char nt_hash[CIFS_NTHASH_SIZE]; 270 char nt_hash[16];
271 struct HMACMD5Context *pctxt;
310 wchar_t *user; 272 wchar_t *user;
311 wchar_t *domain; 273 wchar_t *domain;
312 wchar_t *server;
313 274
314 if (!ses->server->ntlmssp.sdeschmacmd5) { 275 pctxt = kmalloc(sizeof(struct HMACMD5Context), GFP_KERNEL);
315 cERROR(1, "calc_ntlmv2_hash: can't generate ntlmv2 hash\n"); 276
316 return -1; 277 if (pctxt == NULL)
317 } 278 return -ENOMEM;
318 279
319 /* calculate md4 hash of password */ 280 /* calculate md4 hash of password */
320 E_md4hash(ses->password, nt_hash); 281 E_md4hash(ses->password, nt_hash);
321 282
322 crypto_shash_setkey(ses->server->ntlmssp.hmacmd5, nt_hash, 283 /* convert Domainname to unicode and uppercase */
323 CIFS_NTHASH_SIZE); 284 hmac_md5_init_limK_to_64(nt_hash, 16, pctxt);
324
325 rc = crypto_shash_init(&ses->server->ntlmssp.sdeschmacmd5->shash);
326 if (rc) {
327 cERROR(1, "calc_ntlmv2_hash: could not init hmacmd5\n");
328 return rc;
329 }
330 285
331 /* convert ses->userName to unicode and uppercase */ 286 /* convert ses->userName to unicode and uppercase */
332 len = strlen(ses->userName); 287 len = strlen(ses->userName);
333 user = kmalloc(2 + (len * 2), GFP_KERNEL); 288 user = kmalloc(2 + (len * 2), GFP_KERNEL);
334 if (user == NULL) { 289 if (user == NULL)
335 cERROR(1, "calc_ntlmv2_hash: user mem alloc failure\n");
336 rc = -ENOMEM;
337 goto calc_exit_2; 290 goto calc_exit_2;
338 }
339 len = cifs_strtoUCS((__le16 *)user, ses->userName, len, nls_cp); 291 len = cifs_strtoUCS((__le16 *)user, ses->userName, len, nls_cp);
340 UniStrupr(user); 292 UniStrupr(user);
341 293 hmac_md5_update((char *)user, 2*len, pctxt);
342 crypto_shash_update(&ses->server->ntlmssp.sdeschmacmd5->shash,
343 (char *)user, 2 * len);
344 294
345 /* convert ses->domainName to unicode and uppercase */ 295 /* convert ses->domainName to unicode and uppercase */
346 if (ses->domainName) { 296 if (ses->domainName) {
347 len = strlen(ses->domainName); 297 len = strlen(ses->domainName);
348 298
349 domain = kmalloc(2 + (len * 2), GFP_KERNEL); 299 domain = kmalloc(2 + (len * 2), GFP_KERNEL);
350 if (domain == NULL) { 300 if (domain == NULL)
351 cERROR(1, "calc_ntlmv2_hash: domain mem alloc failure");
352 rc = -ENOMEM;
353 goto calc_exit_1; 301 goto calc_exit_1;
354 }
355 len = cifs_strtoUCS((__le16 *)domain, ses->domainName, len, 302 len = cifs_strtoUCS((__le16 *)domain, ses->domainName, len,
356 nls_cp); 303 nls_cp);
357 /* the following line was removed since it didn't work well 304 /* the following line was removed since it didn't work well
@@ -359,292 +306,65 @@ static int calc_ntlmv2_hash(struct cifsSesInfo *ses,
359 Maybe converting the domain name earlier makes sense */ 306 Maybe converting the domain name earlier makes sense */
360 /* UniStrupr(domain); */ 307 /* UniStrupr(domain); */
361 308
362 crypto_shash_update(&ses->server->ntlmssp.sdeschmacmd5->shash, 309 hmac_md5_update((char *)domain, 2*len, pctxt);
363 (char *)domain, 2 * len);
364 310
365 kfree(domain); 311 kfree(domain);
366 } else if (ses->serverName) {
367 len = strlen(ses->serverName);
368
369 server = kmalloc(2 + (len * 2), GFP_KERNEL);
370 if (server == NULL) {
371 cERROR(1, "calc_ntlmv2_hash: server mem alloc failure");
372 rc = -ENOMEM;
373 goto calc_exit_1;
374 }
375 len = cifs_strtoUCS((__le16 *)server, ses->serverName, len,
376 nls_cp);
377 /* the following line was removed since it didn't work well
378 with lower cased domain name that passed as an option.
379 Maybe converting the domain name earlier makes sense */
380 /* UniStrupr(domain); */
381
382 crypto_shash_update(&ses->server->ntlmssp.sdeschmacmd5->shash,
383 (char *)server, 2 * len);
384
385 kfree(server);
386 } 312 }
387
388 rc = crypto_shash_final(&ses->server->ntlmssp.sdeschmacmd5->shash,
389 ses->server->ntlmv2_hash);
390
391calc_exit_1: 313calc_exit_1:
392 kfree(user); 314 kfree(user);
393calc_exit_2: 315calc_exit_2:
394 /* BB FIXME what about bytes 24 through 40 of the signing key? 316 /* BB FIXME what about bytes 24 through 40 of the signing key?
395 compare with the NTLM example */ 317 compare with the NTLM example */
318 hmac_md5_final(ses->server->ntlmv2_hash, pctxt);
396 319
320 kfree(pctxt);
397 return rc; 321 return rc;
398} 322}
399 323
400static int 324void setup_ntlmv2_rsp(struct cifsSesInfo *ses, char *resp_buf,
401find_domain_name(struct cifsSesInfo *ses)
402{
403 int rc = 0;
404 unsigned int attrsize;
405 unsigned int type;
406 unsigned char *blobptr;
407 struct ntlmssp2_name *attrptr;
408
409 if (ses->server->tiblob) {
410 blobptr = ses->server->tiblob;
411 attrptr = (struct ntlmssp2_name *) blobptr;
412
413 while ((type = attrptr->type) != 0) {
414 blobptr += 2; /* advance attr type */
415 attrsize = attrptr->length;
416 blobptr += 2; /* advance attr size */
417 if (type == NTLMSSP_AV_NB_DOMAIN_NAME) {
418 if (!ses->domainName) {
419 ses->domainName =
420 kmalloc(attrptr->length + 1,
421 GFP_KERNEL);
422 if (!ses->domainName)
423 return -ENOMEM;
424 cifs_from_ucs2(ses->domainName,
425 (__le16 *)blobptr,
426 attrptr->length,
427 attrptr->length,
428 load_nls_default(), false);
429 }
430 }
431 blobptr += attrsize; /* advance attr value */
432 attrptr = (struct ntlmssp2_name *) blobptr;
433 }
434 } else {
435 ses->server->tilen = 2 * sizeof(struct ntlmssp2_name);
436 ses->server->tiblob = kmalloc(ses->server->tilen, GFP_KERNEL);
437 if (!ses->server->tiblob) {
438 ses->server->tilen = 0;
439 cERROR(1, "Challenge target info allocation failure");
440 return -ENOMEM;
441 }
442 memset(ses->server->tiblob, 0x0, ses->server->tilen);
443 attrptr = (struct ntlmssp2_name *) ses->server->tiblob;
444 attrptr->type = cpu_to_le16(NTLMSSP_DOMAIN_TYPE);
445 }
446
447 return rc;
448}
449
450static int
451CalcNTLMv2_response(const struct TCP_Server_Info *server,
452 char *v2_session_response)
453{
454 int rc;
455
456 if (!server->ntlmssp.sdeschmacmd5) {
457 cERROR(1, "calc_ntlmv2_hash: can't generate ntlmv2 hash\n");
458 return -1;
459 }
460
461 crypto_shash_setkey(server->ntlmssp.hmacmd5, server->ntlmv2_hash,
462 CIFS_HMAC_MD5_HASH_SIZE);
463
464 rc = crypto_shash_init(&server->ntlmssp.sdeschmacmd5->shash);
465 if (rc) {
466 cERROR(1, "CalcNTLMv2_response: could not init hmacmd5");
467 return rc;
468 }
469
470 memcpy(v2_session_response + CIFS_SERVER_CHALLENGE_SIZE,
471 server->cryptKey, CIFS_SERVER_CHALLENGE_SIZE);
472 crypto_shash_update(&server->ntlmssp.sdeschmacmd5->shash,
473 v2_session_response + CIFS_SERVER_CHALLENGE_SIZE,
474 sizeof(struct ntlmv2_resp) - CIFS_SERVER_CHALLENGE_SIZE);
475
476 if (server->tilen)
477 crypto_shash_update(&server->ntlmssp.sdeschmacmd5->shash,
478 server->tiblob, server->tilen);
479
480 rc = crypto_shash_final(&server->ntlmssp.sdeschmacmd5->shash,
481 v2_session_response);
482
483 return rc;
484}
485
486int
487setup_ntlmv2_rsp(struct cifsSesInfo *ses, char *resp_buf,
488 const struct nls_table *nls_cp) 325 const struct nls_table *nls_cp)
489{ 326{
490 int rc = 0; 327 int rc;
491 struct ntlmv2_resp *buf = (struct ntlmv2_resp *)resp_buf; 328 struct ntlmv2_resp *buf = (struct ntlmv2_resp *)resp_buf;
329 struct HMACMD5Context context;
492 330
493 buf->blob_signature = cpu_to_le32(0x00000101); 331 buf->blob_signature = cpu_to_le32(0x00000101);
494 buf->reserved = 0; 332 buf->reserved = 0;
495 buf->time = cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME)); 333 buf->time = cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
496 get_random_bytes(&buf->client_chal, sizeof(buf->client_chal)); 334 get_random_bytes(&buf->client_chal, sizeof(buf->client_chal));
497 buf->reserved2 = 0; 335 buf->reserved2 = 0;
498 336 buf->names[0].type = cpu_to_le16(NTLMSSP_DOMAIN_TYPE);
499 if (!ses->domainName) { 337 buf->names[0].length = 0;
500 rc = find_domain_name(ses); 338 buf->names[1].type = 0;
501 if (rc) { 339 buf->names[1].length = 0;
502 cERROR(1, "could not get domain/server name rc %d", rc);
503 return rc;
504 }
505 }
506 340
507 /* calculate buf->ntlmv2_hash */ 341 /* calculate buf->ntlmv2_hash */
508 rc = calc_ntlmv2_hash(ses, nls_cp); 342 rc = calc_ntlmv2_hash(ses, nls_cp);
509 if (rc) { 343 if (rc)
510 cERROR(1, "could not get v2 hash rc %d", rc);
511 return rc;
512 }
513 rc = CalcNTLMv2_response(ses->server, resp_buf);
514 if (rc) {
515 cERROR(1, "could not get v2 hash rc %d", rc); 344 cERROR(1, "could not get v2 hash rc %d", rc);
516 return rc; 345 CalcNTLMv2_response(ses, resp_buf);
517 }
518
519 if (!ses->server->ntlmssp.sdeschmacmd5) {
520 cERROR(1, "calc_ntlmv2_hash: can't generate ntlmv2 hash\n");
521 return -1;
522 }
523
524 crypto_shash_setkey(ses->server->ntlmssp.hmacmd5,
525 ses->server->ntlmv2_hash, CIFS_HMAC_MD5_HASH_SIZE);
526 346
527 rc = crypto_shash_init(&ses->server->ntlmssp.sdeschmacmd5->shash); 347 /* now calculate the MAC key for NTLMv2 */
528 if (rc) { 348 hmac_md5_init_limK_to_64(ses->server->ntlmv2_hash, 16, &context);
529 cERROR(1, "setup_ntlmv2_rsp: could not init hmacmd5\n"); 349 hmac_md5_update(resp_buf, 16, &context);
530 return rc; 350 hmac_md5_final(ses->server->mac_signing_key.data.ntlmv2.key, &context);
531 }
532 351
533 crypto_shash_update(&ses->server->ntlmssp.sdeschmacmd5->shash, 352 memcpy(&ses->server->mac_signing_key.data.ntlmv2.resp, resp_buf,
534 resp_buf, CIFS_HMAC_MD5_HASH_SIZE); 353 sizeof(struct ntlmv2_resp));
535 354 ses->server->mac_signing_key.len = 16 + sizeof(struct ntlmv2_resp);
536 rc = crypto_shash_final(&ses->server->ntlmssp.sdeschmacmd5->shash,
537 ses->server->session_key.data.ntlmv2.key);
538
539 memcpy(&ses->server->session_key.data.ntlmv2.resp, resp_buf,
540 sizeof(struct ntlmv2_resp));
541 ses->server->session_key.len = 16 + sizeof(struct ntlmv2_resp);
542
543 return rc;
544} 355}
545 356
546int 357void CalcNTLMv2_response(const struct cifsSesInfo *ses,
547calc_seckey(struct TCP_Server_Info *server) 358 char *v2_session_response)
548{
549 int rc;
550 unsigned char sec_key[CIFS_NTLMV2_SESSKEY_SIZE];
551 struct crypto_blkcipher *tfm_arc4;
552 struct scatterlist sgin, sgout;
553 struct blkcipher_desc desc;
554
555 get_random_bytes(sec_key, CIFS_NTLMV2_SESSKEY_SIZE);
556
557 tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)",
558 0, CRYPTO_ALG_ASYNC);
559 if (!tfm_arc4 || IS_ERR(tfm_arc4)) {
560 cERROR(1, "could not allocate " "master crypto API arc4\n");
561 return 1;
562 }
563
564 desc.tfm = tfm_arc4;
565
566 crypto_blkcipher_setkey(tfm_arc4,
567 server->session_key.data.ntlmv2.key, CIFS_CPHTXT_SIZE);
568 sg_init_one(&sgin, sec_key, CIFS_CPHTXT_SIZE);
569 sg_init_one(&sgout, server->ntlmssp.ciphertext, CIFS_CPHTXT_SIZE);
570 rc = crypto_blkcipher_encrypt(&desc, &sgout, &sgin, CIFS_CPHTXT_SIZE);
571
572 if (!rc)
573 memcpy(server->session_key.data.ntlmv2.key,
574 sec_key, CIFS_NTLMV2_SESSKEY_SIZE);
575
576 crypto_free_blkcipher(tfm_arc4);
577
578 return 0;
579}
580
581void
582cifs_crypto_shash_release(struct TCP_Server_Info *server)
583{
584 if (server->ntlmssp.md5)
585 crypto_free_shash(server->ntlmssp.md5);
586
587 if (server->ntlmssp.hmacmd5)
588 crypto_free_shash(server->ntlmssp.hmacmd5);
589
590 kfree(server->ntlmssp.sdeschmacmd5);
591
592 kfree(server->ntlmssp.sdescmd5);
593}
594
595int
596cifs_crypto_shash_allocate(struct TCP_Server_Info *server)
597{ 359{
598 int rc; 360 struct HMACMD5Context context;
599 unsigned int size; 361 /* rest of v2 struct already generated */
600 362 memcpy(v2_session_response + 8, ses->server->cryptKey, 8);
601 server->ntlmssp.hmacmd5 = crypto_alloc_shash("hmac(md5)", 0, 0); 363 hmac_md5_init_limK_to_64(ses->server->ntlmv2_hash, 16, &context);
602 if (!server->ntlmssp.hmacmd5 ||
603 IS_ERR(server->ntlmssp.hmacmd5)) {
604 cERROR(1, "could not allocate crypto hmacmd5\n");
605 return 1;
606 }
607
608 server->ntlmssp.md5 = crypto_alloc_shash("md5", 0, 0);
609 if (!server->ntlmssp.md5 || IS_ERR(server->ntlmssp.md5)) {
610 cERROR(1, "could not allocate crypto md5\n");
611 rc = 1;
612 goto cifs_crypto_shash_allocate_ret1;
613 }
614
615 size = sizeof(struct shash_desc) +
616 crypto_shash_descsize(server->ntlmssp.hmacmd5);
617 server->ntlmssp.sdeschmacmd5 = kmalloc(size, GFP_KERNEL);
618 if (!server->ntlmssp.sdeschmacmd5) {
619 cERROR(1, "cifs_crypto_shash_allocate: can't alloc hmacmd5\n");
620 rc = -ENOMEM;
621 goto cifs_crypto_shash_allocate_ret2;
622 }
623 server->ntlmssp.sdeschmacmd5->shash.tfm = server->ntlmssp.hmacmd5;
624 server->ntlmssp.sdeschmacmd5->shash.flags = 0x0;
625 364
365 hmac_md5_update(v2_session_response+8,
366 sizeof(struct ntlmv2_resp) - 8, &context);
626 367
627 size = sizeof(struct shash_desc) + 368 hmac_md5_final(v2_session_response, &context);
628 crypto_shash_descsize(server->ntlmssp.md5); 369/* cifs_dump_mem("v2_sess_rsp: ", v2_session_response, 32); */
629 server->ntlmssp.sdescmd5 = kmalloc(size, GFP_KERNEL);
630 if (!server->ntlmssp.sdescmd5) {
631 cERROR(1, "cifs_crypto_shash_allocate: can't alloc md5\n");
632 rc = -ENOMEM;
633 goto cifs_crypto_shash_allocate_ret3;
634 }
635 server->ntlmssp.sdescmd5->shash.tfm = server->ntlmssp.md5;
636 server->ntlmssp.sdescmd5->shash.flags = 0x0;
637
638 return 0;
639
640cifs_crypto_shash_allocate_ret3:
641 kfree(server->ntlmssp.sdeschmacmd5);
642
643cifs_crypto_shash_allocate_ret2:
644 crypto_free_shash(server->ntlmssp.md5);
645
646cifs_crypto_shash_allocate_ret1:
647 crypto_free_shash(server->ntlmssp.hmacmd5);
648
649 return rc;
650} 370}
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index c9d0cfc086eb..0cdfb8c32ac6 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -25,9 +25,6 @@
25#include <linux/workqueue.h> 25#include <linux/workqueue.h>
26#include "cifs_fs_sb.h" 26#include "cifs_fs_sb.h"
27#include "cifsacl.h" 27#include "cifsacl.h"
28#include <crypto/internal/hash.h>
29#include <linux/scatterlist.h>
30
31/* 28/*
32 * The sizes of various internal tables and strings 29 * The sizes of various internal tables and strings
33 */ 30 */
@@ -100,7 +97,7 @@ enum protocolEnum {
100 /* Netbios frames protocol not supported at this time */ 97 /* Netbios frames protocol not supported at this time */
101}; 98};
102 99
103struct session_key { 100struct mac_key {
104 unsigned int len; 101 unsigned int len;
105 union { 102 union {
106 char ntlm[CIFS_SESS_KEY_SIZE + 16]; 103 char ntlm[CIFS_SESS_KEY_SIZE + 16];
@@ -123,21 +120,6 @@ struct cifs_cred {
123 struct cifs_ace *aces; 120 struct cifs_ace *aces;
124}; 121};
125 122
126struct sdesc {
127 struct shash_desc shash;
128 char ctx[];
129};
130
131struct ntlmssp_auth {
132 __u32 client_flags;
133 __u32 server_flags;
134 unsigned char ciphertext[CIFS_CPHTXT_SIZE];
135 struct crypto_shash *hmacmd5;
136 struct crypto_shash *md5;
137 struct sdesc *sdeschmacmd5;
138 struct sdesc *sdescmd5;
139};
140
141/* 123/*
142 ***************************************************************** 124 *****************************************************************
143 * Except the CIFS PDUs themselves all the 125 * Except the CIFS PDUs themselves all the
@@ -200,14 +182,11 @@ struct TCP_Server_Info {
200 /* 16th byte of RFC1001 workstation name is always null */ 182 /* 16th byte of RFC1001 workstation name is always null */
201 char workstation_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL]; 183 char workstation_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL];
202 __u32 sequence_number; /* needed for CIFS PDU signature */ 184 __u32 sequence_number; /* needed for CIFS PDU signature */
203 struct session_key session_key; 185 struct mac_key mac_signing_key;
204 char ntlmv2_hash[16]; 186 char ntlmv2_hash[16];
205 unsigned long lstrp; /* when we got last response from this server */ 187 unsigned long lstrp; /* when we got last response from this server */
206 u16 dialect; /* dialect index that server chose */ 188 u16 dialect; /* dialect index that server chose */
207 /* extended security flavors that server supports */ 189 /* extended security flavors that server supports */
208 unsigned int tilen; /* length of the target info blob */
209 unsigned char *tiblob; /* target info blob in challenge response */
210 struct ntlmssp_auth ntlmssp; /* various keys, ciphers, flags */
211 bool sec_kerberos; /* supports plain Kerberos */ 190 bool sec_kerberos; /* supports plain Kerberos */
212 bool sec_mskerberos; /* supports legacy MS Kerberos */ 191 bool sec_mskerberos; /* supports legacy MS Kerberos */
213 bool sec_kerberosu2u; /* supports U2U Kerberos */ 192 bool sec_kerberosu2u; /* supports U2U Kerberos */
diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h
index 320e0fd0ba7b..14d036d8db11 100644
--- a/fs/cifs/cifspdu.h
+++ b/fs/cifs/cifspdu.h
@@ -134,12 +134,6 @@
134 * Size of the session key (crypto key encrypted with the password 134 * Size of the session key (crypto key encrypted with the password
135 */ 135 */
136#define CIFS_SESS_KEY_SIZE (24) 136#define CIFS_SESS_KEY_SIZE (24)
137#define CIFS_CLIENT_CHALLENGE_SIZE (8)
138#define CIFS_SERVER_CHALLENGE_SIZE (8)
139#define CIFS_HMAC_MD5_HASH_SIZE (16)
140#define CIFS_CPHTXT_SIZE (16)
141#define CIFS_NTLMV2_SESSKEY_SIZE (16)
142#define CIFS_NTHASH_SIZE (16)
143 137
144/* 138/*
145 * Maximum user name length 139 * Maximum user name length
@@ -669,6 +663,7 @@ struct ntlmv2_resp {
669 __le64 time; 663 __le64 time;
670 __u64 client_chal; /* random */ 664 __u64 client_chal; /* random */
671 __u32 reserved2; 665 __u32 reserved2;
666 struct ntlmssp2_name names[2];
672 /* array of name entries could follow ending in minimum 4 byte struct */ 667 /* array of name entries could follow ending in minimum 4 byte struct */
673} __attribute__((packed)); 668} __attribute__((packed));
674 669
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 1378d9133844..1d60c655e3e0 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -87,8 +87,9 @@ extern unsigned int smbCalcSize_LE(struct smb_hdr *ptr);
87extern int decode_negTokenInit(unsigned char *security_blob, int length, 87extern int decode_negTokenInit(unsigned char *security_blob, int length,
88 struct TCP_Server_Info *server); 88 struct TCP_Server_Info *server);
89extern int cifs_convert_address(struct sockaddr *dst, const char *src, int len); 89extern int cifs_convert_address(struct sockaddr *dst, const char *src, int len);
90extern int cifs_set_port(struct sockaddr *addr, const unsigned short int port);
90extern int cifs_fill_sockaddr(struct sockaddr *dst, const char *src, int len, 91extern int cifs_fill_sockaddr(struct sockaddr *dst, const char *src, int len,
91 unsigned short int port); 92 const unsigned short int port);
92extern int map_smb_to_linux_error(struct smb_hdr *smb, int logErr); 93extern int map_smb_to_linux_error(struct smb_hdr *smb, int logErr);
93extern void header_assemble(struct smb_hdr *, char /* command */ , 94extern void header_assemble(struct smb_hdr *, char /* command */ ,
94 const struct cifsTconInfo *, int /* length of 95 const struct cifsTconInfo *, int /* length of
@@ -361,15 +362,13 @@ extern int cifs_sign_smb(struct smb_hdr *, struct TCP_Server_Info *, __u32 *);
361extern int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *, 362extern int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *,
362 __u32 *); 363 __u32 *);
363extern int cifs_verify_signature(struct smb_hdr *, 364extern int cifs_verify_signature(struct smb_hdr *,
364 struct TCP_Server_Info *server, 365 const struct mac_key *mac_key,
365 __u32 expected_sequence_number); 366 __u32 expected_sequence_number);
366extern int cifs_calculate_session_key(struct session_key *key, const char *rn, 367extern int cifs_calculate_mac_key(struct mac_key *key, const char *rn,
367 const char *pass); 368 const char *pass);
368extern int setup_ntlmv2_rsp(struct cifsSesInfo *, char *, 369extern void CalcNTLMv2_response(const struct cifsSesInfo *, char *);
370extern void setup_ntlmv2_rsp(struct cifsSesInfo *, char *,
369 const struct nls_table *); 371 const struct nls_table *);
370extern int cifs_crypto_shash_allocate(struct TCP_Server_Info *);
371extern void cifs_crypto_shash_release(struct TCP_Server_Info *);
372extern int calc_seckey(struct TCP_Server_Info *);
373#ifdef CONFIG_CIFS_WEAK_PW_HASH 372#ifdef CONFIG_CIFS_WEAK_PW_HASH
374extern void calc_lanman_hash(const char *password, const char *cryptkey, 373extern void calc_lanman_hash(const char *password, const char *cryptkey,
375 bool encrypt, char *lnm_session_key); 374 bool encrypt, char *lnm_session_key);
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 4bda920d1f75..7e83b356cc9e 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -232,7 +232,7 @@ static int
232small_smb_init(int smb_command, int wct, struct cifsTconInfo *tcon, 232small_smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
233 void **request_buf) 233 void **request_buf)
234{ 234{
235 int rc = 0; 235 int rc;
236 236
237 rc = cifs_reconnect_tcon(tcon, smb_command); 237 rc = cifs_reconnect_tcon(tcon, smb_command);
238 if (rc) 238 if (rc)
@@ -250,7 +250,7 @@ small_smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
250 if (tcon != NULL) 250 if (tcon != NULL)
251 cifs_stats_inc(&tcon->num_smbs_sent); 251 cifs_stats_inc(&tcon->num_smbs_sent);
252 252
253 return rc; 253 return 0;
254} 254}
255 255
256int 256int
@@ -281,16 +281,9 @@ small_smb_init_no_tc(const int smb_command, const int wct,
281 281
282/* If the return code is zero, this function must fill in request_buf pointer */ 282/* If the return code is zero, this function must fill in request_buf pointer */
283static int 283static int
284smb_init(int smb_command, int wct, struct cifsTconInfo *tcon, 284__smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
285 void **request_buf /* returned */ , 285 void **request_buf, void **response_buf)
286 void **response_buf /* returned */ )
287{ 286{
288 int rc = 0;
289
290 rc = cifs_reconnect_tcon(tcon, smb_command);
291 if (rc)
292 return rc;
293
294 *request_buf = cifs_buf_get(); 287 *request_buf = cifs_buf_get();
295 if (*request_buf == NULL) { 288 if (*request_buf == NULL) {
296 /* BB should we add a retry in here if not a writepage? */ 289 /* BB should we add a retry in here if not a writepage? */
@@ -309,7 +302,31 @@ smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
309 if (tcon != NULL) 302 if (tcon != NULL)
310 cifs_stats_inc(&tcon->num_smbs_sent); 303 cifs_stats_inc(&tcon->num_smbs_sent);
311 304
312 return rc; 305 return 0;
306}
307
308/* If the return code is zero, this function must fill in request_buf pointer */
309static int
310smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
311 void **request_buf, void **response_buf)
312{
313 int rc;
314
315 rc = cifs_reconnect_tcon(tcon, smb_command);
316 if (rc)
317 return rc;
318
319 return __smb_init(smb_command, wct, tcon, request_buf, response_buf);
320}
321
322static int
323smb_init_no_reconnect(int smb_command, int wct, struct cifsTconInfo *tcon,
324 void **request_buf, void **response_buf)
325{
326 if (tcon->ses->need_reconnect || tcon->need_reconnect)
327 return -EHOSTDOWN;
328
329 return __smb_init(smb_command, wct, tcon, request_buf, response_buf);
313} 330}
314 331
315static int validate_t2(struct smb_t2_rsp *pSMB) 332static int validate_t2(struct smb_t2_rsp *pSMB)
@@ -604,14 +621,11 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
604 else 621 else
605 rc = -EINVAL; 622 rc = -EINVAL;
606 623
607 if (server->secType == Kerberos) { 624 if (server->sec_kerberos || server->sec_mskerberos)
608 if (!server->sec_kerberos && 625 server->secType = Kerberos;
609 !server->sec_mskerberos) 626 else if (server->sec_ntlmssp)
610 rc = -EOPNOTSUPP; 627 server->secType = RawNTLMSSP;
611 } else if (server->secType == RawNTLMSSP) { 628 else
612 if (!server->sec_ntlmssp)
613 rc = -EOPNOTSUPP;
614 } else
615 rc = -EOPNOTSUPP; 629 rc = -EOPNOTSUPP;
616 } 630 }
617 } else 631 } else
@@ -4537,8 +4551,8 @@ CIFSSMBQFSUnixInfo(const int xid, struct cifsTconInfo *tcon)
4537 4551
4538 cFYI(1, "In QFSUnixInfo"); 4552 cFYI(1, "In QFSUnixInfo");
4539QFSUnixRetry: 4553QFSUnixRetry:
4540 rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, 4554 rc = smb_init_no_reconnect(SMB_COM_TRANSACTION2, 15, tcon,
4541 (void **) &pSMBr); 4555 (void **) &pSMB, (void **) &pSMBr);
4542 if (rc) 4556 if (rc)
4543 return rc; 4557 return rc;
4544 4558
@@ -4607,8 +4621,8 @@ CIFSSMBSetFSUnixInfo(const int xid, struct cifsTconInfo *tcon, __u64 cap)
4607 cFYI(1, "In SETFSUnixInfo"); 4621 cFYI(1, "In SETFSUnixInfo");
4608SETFSUnixRetry: 4622SETFSUnixRetry:
4609 /* BB switch to small buf init to save memory */ 4623 /* BB switch to small buf init to save memory */
4610 rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, 4624 rc = smb_init_no_reconnect(SMB_COM_TRANSACTION2, 15, tcon,
4611 (void **) &pSMBr); 4625 (void **) &pSMB, (void **) &pSMBr);
4612 if (rc) 4626 if (rc)
4613 return rc; 4627 return rc;
4614 4628
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index ec0ea4a43bdb..88c84a38bccb 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -400,7 +400,9 @@ incomplete_rcv:
400 cFYI(1, "call to reconnect done"); 400 cFYI(1, "call to reconnect done");
401 csocket = server->ssocket; 401 csocket = server->ssocket;
402 continue; 402 continue;
403 } else if ((length == -ERESTARTSYS) || (length == -EAGAIN)) { 403 } else if (length == -ERESTARTSYS ||
404 length == -EAGAIN ||
405 length == -EINTR) {
404 msleep(1); /* minimum sleep to prevent looping 406 msleep(1); /* minimum sleep to prevent looping
405 allowing socket to clear and app threads to set 407 allowing socket to clear and app threads to set
406 tcpStatus CifsNeedReconnect if server hung */ 408 tcpStatus CifsNeedReconnect if server hung */
@@ -414,18 +416,6 @@ incomplete_rcv:
414 } else 416 } else
415 continue; 417 continue;
416 } else if (length <= 0) { 418 } else if (length <= 0) {
417 if (server->tcpStatus == CifsNew) {
418 cFYI(1, "tcp session abend after SMBnegprot");
419 /* some servers kill the TCP session rather than
420 returning an SMB negprot error, in which
421 case reconnecting here is not going to help,
422 and so simply return error to mount */
423 break;
424 }
425 if (!try_to_freeze() && (length == -EINTR)) {
426 cFYI(1, "cifsd thread killed");
427 break;
428 }
429 cFYI(1, "Reconnect after unexpected peek error %d", 419 cFYI(1, "Reconnect after unexpected peek error %d",
430 length); 420 length);
431 cifs_reconnect(server); 421 cifs_reconnect(server);
@@ -466,27 +456,19 @@ incomplete_rcv:
466 an error on SMB negprot response */ 456 an error on SMB negprot response */
467 cFYI(1, "Negative RFC1002 Session Response Error 0x%x)", 457 cFYI(1, "Negative RFC1002 Session Response Error 0x%x)",
468 pdu_length); 458 pdu_length);
469 if (server->tcpStatus == CifsNew) { 459 /* give server a second to clean up */
470 /* if nack on negprot (rather than 460 msleep(1000);
471 ret of smb negprot error) reconnecting 461 /* always try 445 first on reconnect since we get NACK
472 not going to help, ret error to mount */ 462 * on some if we ever connected to port 139 (the NACK
473 break; 463 * is since we do not begin with RFC1001 session
474 } else { 464 * initialize frame)
475 /* give server a second to 465 */
476 clean up before reconnect attempt */ 466 cifs_set_port((struct sockaddr *)
477 msleep(1000); 467 &server->addr.sockAddr, CIFS_PORT);
478 /* always try 445 first on reconnect 468 cifs_reconnect(server);
479 since we get NACK on some if we ever 469 csocket = server->ssocket;
480 connected to port 139 (the NACK is 470 wake_up(&server->response_q);
481 since we do not begin with RFC1001 471 continue;
482 session initialize frame) */
483 server->addr.sockAddr.sin_port =
484 htons(CIFS_PORT);
485 cifs_reconnect(server);
486 csocket = server->ssocket;
487 wake_up(&server->response_q);
488 continue;
489 }
490 } else if (temp != (char) 0) { 472 } else if (temp != (char) 0) {
491 cERROR(1, "Unknown RFC 1002 frame"); 473 cERROR(1, "Unknown RFC 1002 frame");
492 cifs_dump_mem(" Received Data: ", (char *)smb_buffer, 474 cifs_dump_mem(" Received Data: ", (char *)smb_buffer,
@@ -522,8 +504,7 @@ incomplete_rcv:
522 total_read += length) { 504 total_read += length) {
523 length = kernel_recvmsg(csocket, &smb_msg, &iov, 1, 505 length = kernel_recvmsg(csocket, &smb_msg, &iov, 1,
524 pdu_length - total_read, 0); 506 pdu_length - total_read, 0);
525 if ((server->tcpStatus == CifsExiting) || 507 if (server->tcpStatus == CifsExiting) {
526 (length == -EINTR)) {
527 /* then will exit */ 508 /* then will exit */
528 reconnect = 2; 509 reconnect = 2;
529 break; 510 break;
@@ -534,8 +515,9 @@ incomplete_rcv:
534 /* Now we will reread sock */ 515 /* Now we will reread sock */
535 reconnect = 1; 516 reconnect = 1;
536 break; 517 break;
537 } else if ((length == -ERESTARTSYS) || 518 } else if (length == -ERESTARTSYS ||
538 (length == -EAGAIN)) { 519 length == -EAGAIN ||
520 length == -EINTR) {
539 msleep(1); /* minimum sleep to prevent looping, 521 msleep(1); /* minimum sleep to prevent looping,
540 allowing socket to clear and app 522 allowing socket to clear and app
541 threads to set tcpStatus 523 threads to set tcpStatus
@@ -1708,7 +1690,6 @@ cifs_put_smb_ses(struct cifsSesInfo *ses)
1708 CIFSSMBLogoff(xid, ses); 1690 CIFSSMBLogoff(xid, ses);
1709 _FreeXid(xid); 1691 _FreeXid(xid);
1710 } 1692 }
1711 cifs_crypto_shash_release(server);
1712 sesInfoFree(ses); 1693 sesInfoFree(ses);
1713 cifs_put_tcp_session(server); 1694 cifs_put_tcp_session(server);
1714} 1695}
@@ -1725,9 +1706,6 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
1725 if (ses) { 1706 if (ses) {
1726 cFYI(1, "Existing smb sess found (status=%d)", ses->status); 1707 cFYI(1, "Existing smb sess found (status=%d)", ses->status);
1727 1708
1728 /* existing SMB ses has a server reference already */
1729 cifs_put_tcp_session(server);
1730
1731 mutex_lock(&ses->session_mutex); 1709 mutex_lock(&ses->session_mutex);
1732 rc = cifs_negotiate_protocol(xid, ses); 1710 rc = cifs_negotiate_protocol(xid, ses);
1733 if (rc) { 1711 if (rc) {
@@ -1750,6 +1728,9 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
1750 } 1728 }
1751 } 1729 }
1752 mutex_unlock(&ses->session_mutex); 1730 mutex_unlock(&ses->session_mutex);
1731
1732 /* existing SMB ses has a server reference already */
1733 cifs_put_tcp_session(server);
1753 FreeXid(xid); 1734 FreeXid(xid);
1754 return ses; 1735 return ses;
1755 } 1736 }
@@ -1788,23 +1769,13 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
1788 ses->linux_uid = volume_info->linux_uid; 1769 ses->linux_uid = volume_info->linux_uid;
1789 ses->overrideSecFlg = volume_info->secFlg; 1770 ses->overrideSecFlg = volume_info->secFlg;
1790 1771
1791 rc = cifs_crypto_shash_allocate(server);
1792 if (rc) {
1793 cERROR(1, "could not setup hash structures rc %d", rc);
1794 goto get_ses_fail;
1795 }
1796 server->tilen = 0;
1797 server->tiblob = NULL;
1798
1799 mutex_lock(&ses->session_mutex); 1772 mutex_lock(&ses->session_mutex);
1800 rc = cifs_negotiate_protocol(xid, ses); 1773 rc = cifs_negotiate_protocol(xid, ses);
1801 if (!rc) 1774 if (!rc)
1802 rc = cifs_setup_session(xid, ses, volume_info->local_nls); 1775 rc = cifs_setup_session(xid, ses, volume_info->local_nls);
1803 mutex_unlock(&ses->session_mutex); 1776 mutex_unlock(&ses->session_mutex);
1804 if (rc) { 1777 if (rc)
1805 cifs_crypto_shash_release(ses->server);
1806 goto get_ses_fail; 1778 goto get_ses_fail;
1807 }
1808 1779
1809 /* success, put it on the list */ 1780 /* success, put it on the list */
1810 write_lock(&cifs_tcp_ses_lock); 1781 write_lock(&cifs_tcp_ses_lock);
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 86a164f08a74..53cce8cc2224 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -801,6 +801,8 @@ retry_iget5_locked:
801 inode->i_flags |= S_NOATIME | S_NOCMTIME; 801 inode->i_flags |= S_NOATIME | S_NOCMTIME;
802 if (inode->i_state & I_NEW) { 802 if (inode->i_state & I_NEW) {
803 inode->i_ino = hash; 803 inode->i_ino = hash;
804 if (S_ISREG(inode->i_mode))
805 inode->i_data.backing_dev_info = sb->s_bdi;
804#ifdef CONFIG_CIFS_FSCACHE 806#ifdef CONFIG_CIFS_FSCACHE
805 /* initialize per-inode cache cookie pointer */ 807 /* initialize per-inode cache cookie pointer */
806 CIFS_I(inode)->fscache = NULL; 808 CIFS_I(inode)->fscache = NULL;
@@ -1462,29 +1464,18 @@ int cifs_rename(struct inode *source_dir, struct dentry *source_dentry,
1462{ 1464{
1463 char *fromName = NULL; 1465 char *fromName = NULL;
1464 char *toName = NULL; 1466 char *toName = NULL;
1465 struct cifs_sb_info *cifs_sb_source; 1467 struct cifs_sb_info *cifs_sb;
1466 struct cifs_sb_info *cifs_sb_target;
1467 struct cifsTconInfo *tcon; 1468 struct cifsTconInfo *tcon;
1468 FILE_UNIX_BASIC_INFO *info_buf_source = NULL; 1469 FILE_UNIX_BASIC_INFO *info_buf_source = NULL;
1469 FILE_UNIX_BASIC_INFO *info_buf_target; 1470 FILE_UNIX_BASIC_INFO *info_buf_target;
1470 int xid, rc, tmprc; 1471 int xid, rc, tmprc;
1471 1472
1472 cifs_sb_target = CIFS_SB(target_dir->i_sb); 1473 cifs_sb = CIFS_SB(source_dir->i_sb);
1473 cifs_sb_source = CIFS_SB(source_dir->i_sb); 1474 tcon = cifs_sb->tcon;
1474 tcon = cifs_sb_source->tcon;
1475 1475
1476 xid = GetXid(); 1476 xid = GetXid();
1477 1477
1478 /* 1478 /*
1479 * BB: this might be allowed if same server, but different share.
1480 * Consider adding support for this
1481 */
1482 if (tcon != cifs_sb_target->tcon) {
1483 rc = -EXDEV;
1484 goto cifs_rename_exit;
1485 }
1486
1487 /*
1488 * we already have the rename sem so we do not need to 1479 * we already have the rename sem so we do not need to
1489 * grab it again here to protect the path integrity 1480 * grab it again here to protect the path integrity
1490 */ 1481 */
@@ -1519,17 +1510,16 @@ int cifs_rename(struct inode *source_dir, struct dentry *source_dentry,
1519 info_buf_target = info_buf_source + 1; 1510 info_buf_target = info_buf_source + 1;
1520 tmprc = CIFSSMBUnixQPathInfo(xid, tcon, fromName, 1511 tmprc = CIFSSMBUnixQPathInfo(xid, tcon, fromName,
1521 info_buf_source, 1512 info_buf_source,
1522 cifs_sb_source->local_nls, 1513 cifs_sb->local_nls,
1523 cifs_sb_source->mnt_cifs_flags & 1514 cifs_sb->mnt_cifs_flags &
1524 CIFS_MOUNT_MAP_SPECIAL_CHR); 1515 CIFS_MOUNT_MAP_SPECIAL_CHR);
1525 if (tmprc != 0) 1516 if (tmprc != 0)
1526 goto unlink_target; 1517 goto unlink_target;
1527 1518
1528 tmprc = CIFSSMBUnixQPathInfo(xid, tcon, 1519 tmprc = CIFSSMBUnixQPathInfo(xid, tcon, toName,
1529 toName, info_buf_target, 1520 info_buf_target,
1530 cifs_sb_target->local_nls, 1521 cifs_sb->local_nls,
1531 /* remap based on source sb */ 1522 cifs_sb->mnt_cifs_flags &
1532 cifs_sb_source->mnt_cifs_flags &
1533 CIFS_MOUNT_MAP_SPECIAL_CHR); 1523 CIFS_MOUNT_MAP_SPECIAL_CHR);
1534 1524
1535 if (tmprc == 0 && (info_buf_source->UniqueId == 1525 if (tmprc == 0 && (info_buf_source->UniqueId ==
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c
index f97851119e6c..9aad47a2d62f 100644
--- a/fs/cifs/netmisc.c
+++ b/fs/cifs/netmisc.c
@@ -206,26 +206,30 @@ cifs_convert_address(struct sockaddr *dst, const char *src, int len)
206} 206}
207 207
208int 208int
209cifs_fill_sockaddr(struct sockaddr *dst, const char *src, int len, 209cifs_set_port(struct sockaddr *addr, const unsigned short int port)
210 const unsigned short int port)
211{ 210{
212 if (!cifs_convert_address(dst, src, len)) 211 switch (addr->sa_family) {
213 return 0;
214
215 switch (dst->sa_family) {
216 case AF_INET: 212 case AF_INET:
217 ((struct sockaddr_in *)dst)->sin_port = htons(port); 213 ((struct sockaddr_in *)addr)->sin_port = htons(port);
218 break; 214 break;
219 case AF_INET6: 215 case AF_INET6:
220 ((struct sockaddr_in6 *)dst)->sin6_port = htons(port); 216 ((struct sockaddr_in6 *)addr)->sin6_port = htons(port);
221 break; 217 break;
222 default: 218 default:
223 return 0; 219 return 0;
224 } 220 }
225
226 return 1; 221 return 1;
227} 222}
228 223
224int
225cifs_fill_sockaddr(struct sockaddr *dst, const char *src, int len,
226 const unsigned short int port)
227{
228 if (!cifs_convert_address(dst, src, len))
229 return 0;
230 return cifs_set_port(dst, port);
231}
232
229/***************************************************************************** 233/*****************************************************************************
230convert a NT status code to a dos class/code 234convert a NT status code to a dos class/code
231 *****************************************************************************/ 235 *****************************************************************************/
diff --git a/fs/cifs/ntlmssp.h b/fs/cifs/ntlmssp.h
index 1db0f0746a5b..49c9a4e75319 100644
--- a/fs/cifs/ntlmssp.h
+++ b/fs/cifs/ntlmssp.h
@@ -61,19 +61,6 @@
61#define NTLMSSP_NEGOTIATE_KEY_XCH 0x40000000 61#define NTLMSSP_NEGOTIATE_KEY_XCH 0x40000000
62#define NTLMSSP_NEGOTIATE_56 0x80000000 62#define NTLMSSP_NEGOTIATE_56 0x80000000
63 63
64/* Define AV Pair Field IDs */
65#define NTLMSSP_AV_EOL 0
66#define NTLMSSP_AV_NB_COMPUTER_NAME 1
67#define NTLMSSP_AV_NB_DOMAIN_NAME 2
68#define NTLMSSP_AV_DNS_COMPUTER_NAME 3
69#define NTLMSSP_AV_DNS_DOMAIN_NAME 4
70#define NTLMSSP_AV_DNS_TREE_NAME 5
71#define NTLMSSP_AV_FLAGS 6
72#define NTLMSSP_AV_TIMESTAMP 7
73#define NTLMSSP_AV_RESTRICTION 8
74#define NTLMSSP_AV_TARGET_NAME 9
75#define NTLMSSP_AV_CHANNEL_BINDINGS 10
76
77/* Although typedefs are not commonly used for structure definitions */ 64/* Although typedefs are not commonly used for structure definitions */
78/* in the Linux kernel, in this particular case they are useful */ 65/* in the Linux kernel, in this particular case they are useful */
79/* to more closely match the standards document for NTLMSSP from */ 66/* to more closely match the standards document for NTLMSSP from */
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 795095f4eac6..0a57cb7db5dd 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -383,9 +383,6 @@ static int decode_ascii_ssetup(char **pbcc_area, int bleft,
383static int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len, 383static int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
384 struct cifsSesInfo *ses) 384 struct cifsSesInfo *ses)
385{ 385{
386 unsigned int tioffset; /* challeng message target info area */
387 unsigned int tilen; /* challeng message target info area length */
388
389 CHALLENGE_MESSAGE *pblob = (CHALLENGE_MESSAGE *)bcc_ptr; 386 CHALLENGE_MESSAGE *pblob = (CHALLENGE_MESSAGE *)bcc_ptr;
390 387
391 if (blob_len < sizeof(CHALLENGE_MESSAGE)) { 388 if (blob_len < sizeof(CHALLENGE_MESSAGE)) {
@@ -408,20 +405,6 @@ static int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
408 /* BB spec says that if AvId field of MsvAvTimestamp is populated then 405 /* BB spec says that if AvId field of MsvAvTimestamp is populated then
409 we must set the MIC field of the AUTHENTICATE_MESSAGE */ 406 we must set the MIC field of the AUTHENTICATE_MESSAGE */
410 407
411 ses->server->ntlmssp.server_flags = le32_to_cpu(pblob->NegotiateFlags);
412
413 tioffset = cpu_to_le16(pblob->TargetInfoArray.BufferOffset);
414 tilen = cpu_to_le16(pblob->TargetInfoArray.Length);
415 ses->server->tilen = tilen;
416 if (tilen) {
417 ses->server->tiblob = kmalloc(tilen, GFP_KERNEL);
418 if (!ses->server->tiblob) {
419 cERROR(1, "Challenge target info allocation failure");
420 return -ENOMEM;
421 }
422 memcpy(ses->server->tiblob, bcc_ptr + tioffset, tilen);
423 }
424
425 return 0; 408 return 0;
426} 409}
427 410
@@ -442,13 +425,12 @@ static void build_ntlmssp_negotiate_blob(unsigned char *pbuffer,
442 /* BB is NTLMV2 session security format easier to use here? */ 425 /* BB is NTLMV2 session security format easier to use here? */
443 flags = NTLMSSP_NEGOTIATE_56 | NTLMSSP_REQUEST_TARGET | 426 flags = NTLMSSP_NEGOTIATE_56 | NTLMSSP_REQUEST_TARGET |
444 NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE | 427 NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE |
445 NTLMSSP_NEGOTIATE_NTLM; 428 NTLMSSP_NEGOTIATE_NT_ONLY | NTLMSSP_NEGOTIATE_NTLM;
446 if (ses->server->secMode & 429 if (ses->server->secMode &
447 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) { 430 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
448 flags |= NTLMSSP_NEGOTIATE_SIGN | 431 flags |= NTLMSSP_NEGOTIATE_SIGN;
449 NTLMSSP_NEGOTIATE_KEY_XCH | 432 if (ses->server->secMode & SECMODE_SIGN_REQUIRED)
450 NTLMSSP_NEGOTIATE_EXTENDED_SEC; 433 flags |= NTLMSSP_NEGOTIATE_ALWAYS_SIGN;
451 }
452 434
453 sec_blob->NegotiateFlags |= cpu_to_le32(flags); 435 sec_blob->NegotiateFlags |= cpu_to_le32(flags);
454 436
@@ -469,12 +451,10 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
469 struct cifsSesInfo *ses, 451 struct cifsSesInfo *ses,
470 const struct nls_table *nls_cp, bool first) 452 const struct nls_table *nls_cp, bool first)
471{ 453{
472 int rc;
473 unsigned int size;
474 AUTHENTICATE_MESSAGE *sec_blob = (AUTHENTICATE_MESSAGE *)pbuffer; 454 AUTHENTICATE_MESSAGE *sec_blob = (AUTHENTICATE_MESSAGE *)pbuffer;
475 __u32 flags; 455 __u32 flags;
476 unsigned char *tmp; 456 unsigned char *tmp;
477 struct ntlmv2_resp ntlmv2_response = {}; 457 char ntlm_session_key[CIFS_SESS_KEY_SIZE];
478 458
479 memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8); 459 memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8);
480 sec_blob->MessageType = NtLmAuthenticate; 460 sec_blob->MessageType = NtLmAuthenticate;
@@ -497,25 +477,19 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
497 sec_blob->LmChallengeResponse.Length = 0; 477 sec_blob->LmChallengeResponse.Length = 0;
498 sec_blob->LmChallengeResponse.MaximumLength = 0; 478 sec_blob->LmChallengeResponse.MaximumLength = 0;
499 479
500 sec_blob->NtChallengeResponse.BufferOffset = cpu_to_le32(tmp - pbuffer); 480 /* calculate session key, BB what about adding similar ntlmv2 path? */
501 rc = setup_ntlmv2_rsp(ses, (char *)&ntlmv2_response, nls_cp); 481 SMBNTencrypt(ses->password, ses->server->cryptKey, ntlm_session_key);
502 if (rc) { 482 if (first)
503 cERROR(1, "error rc: %d during ntlmssp ntlmv2 setup", rc); 483 cifs_calculate_mac_key(&ses->server->mac_signing_key,
504 goto setup_ntlmv2_ret; 484 ntlm_session_key, ses->password);
505 }
506 size = sizeof(struct ntlmv2_resp);
507 memcpy(tmp, (char *)&ntlmv2_response, size);
508 tmp += size;
509 if (ses->server->tilen > 0) {
510 memcpy(tmp, ses->server->tiblob, ses->server->tilen);
511 tmp += ses->server->tilen;
512 } else
513 ses->server->tilen = 0;
514 485
515 sec_blob->NtChallengeResponse.Length = cpu_to_le16(size + 486 memcpy(tmp, ntlm_session_key, CIFS_SESS_KEY_SIZE);
516 ses->server->tilen); 487 sec_blob->NtChallengeResponse.BufferOffset = cpu_to_le32(tmp - pbuffer);
488 sec_blob->NtChallengeResponse.Length = cpu_to_le16(CIFS_SESS_KEY_SIZE);
517 sec_blob->NtChallengeResponse.MaximumLength = 489 sec_blob->NtChallengeResponse.MaximumLength =
518 cpu_to_le16(size + ses->server->tilen); 490 cpu_to_le16(CIFS_SESS_KEY_SIZE);
491
492 tmp += CIFS_SESS_KEY_SIZE;
519 493
520 if (ses->domainName == NULL) { 494 if (ses->domainName == NULL) {
521 sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer); 495 sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer);
@@ -527,6 +501,7 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
527 len = cifs_strtoUCS((__le16 *)tmp, ses->domainName, 501 len = cifs_strtoUCS((__le16 *)tmp, ses->domainName,
528 MAX_USERNAME_SIZE, nls_cp); 502 MAX_USERNAME_SIZE, nls_cp);
529 len *= 2; /* unicode is 2 bytes each */ 503 len *= 2; /* unicode is 2 bytes each */
504 len += 2; /* trailing null */
530 sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer); 505 sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer);
531 sec_blob->DomainName.Length = cpu_to_le16(len); 506 sec_blob->DomainName.Length = cpu_to_le16(len);
532 sec_blob->DomainName.MaximumLength = cpu_to_le16(len); 507 sec_blob->DomainName.MaximumLength = cpu_to_le16(len);
@@ -543,6 +518,7 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
543 len = cifs_strtoUCS((__le16 *)tmp, ses->userName, 518 len = cifs_strtoUCS((__le16 *)tmp, ses->userName,
544 MAX_USERNAME_SIZE, nls_cp); 519 MAX_USERNAME_SIZE, nls_cp);
545 len *= 2; /* unicode is 2 bytes each */ 520 len *= 2; /* unicode is 2 bytes each */
521 len += 2; /* trailing null */
546 sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer); 522 sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer);
547 sec_blob->UserName.Length = cpu_to_le16(len); 523 sec_blob->UserName.Length = cpu_to_le16(len);
548 sec_blob->UserName.MaximumLength = cpu_to_le16(len); 524 sec_blob->UserName.MaximumLength = cpu_to_le16(len);
@@ -554,26 +530,9 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
554 sec_blob->WorkstationName.MaximumLength = 0; 530 sec_blob->WorkstationName.MaximumLength = 0;
555 tmp += 2; 531 tmp += 2;
556 532
557 if ((ses->server->ntlmssp.server_flags & NTLMSSP_NEGOTIATE_KEY_XCH) && 533 sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer);
558 !calc_seckey(ses->server)) { 534 sec_blob->SessionKey.Length = 0;
559 memcpy(tmp, ses->server->ntlmssp.ciphertext, CIFS_CPHTXT_SIZE); 535 sec_blob->SessionKey.MaximumLength = 0;
560 sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer);
561 sec_blob->SessionKey.Length = cpu_to_le16(CIFS_CPHTXT_SIZE);
562 sec_blob->SessionKey.MaximumLength =
563 cpu_to_le16(CIFS_CPHTXT_SIZE);
564 tmp += CIFS_CPHTXT_SIZE;
565 } else {
566 sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer);
567 sec_blob->SessionKey.Length = 0;
568 sec_blob->SessionKey.MaximumLength = 0;
569 }
570
571 ses->server->sequence_number = 0;
572
573setup_ntlmv2_ret:
574 if (ses->server->tilen > 0)
575 kfree(ses->server->tiblob);
576
577 return tmp - pbuffer; 536 return tmp - pbuffer;
578} 537}
579 538
@@ -587,14 +546,15 @@ static void setup_ntlmssp_neg_req(SESSION_SETUP_ANDX *pSMB,
587 return; 546 return;
588} 547}
589 548
590static int setup_ntlmssp_auth_req(char *ntlmsspblob, 549static int setup_ntlmssp_auth_req(SESSION_SETUP_ANDX *pSMB,
591 struct cifsSesInfo *ses, 550 struct cifsSesInfo *ses,
592 const struct nls_table *nls, bool first_time) 551 const struct nls_table *nls, bool first_time)
593{ 552{
594 int bloblen; 553 int bloblen;
595 554
596 bloblen = build_ntlmssp_auth_blob(ntlmsspblob, ses, nls, 555 bloblen = build_ntlmssp_auth_blob(&pSMB->req.SecurityBlob[0], ses, nls,
597 first_time); 556 first_time);
557 pSMB->req.SecurityBlobLength = cpu_to_le16(bloblen);
598 558
599 return bloblen; 559 return bloblen;
600} 560}
@@ -730,7 +690,7 @@ ssetup_ntlmssp_authenticate:
730 690
731 if (first_time) /* should this be moved into common code 691 if (first_time) /* should this be moved into common code
732 with similar ntlmv2 path? */ 692 with similar ntlmv2 path? */
733 cifs_calculate_session_key(&ses->server->session_key, 693 cifs_calculate_mac_key(&ses->server->mac_signing_key,
734 ntlm_session_key, ses->password); 694 ntlm_session_key, ses->password);
735 /* copy session key */ 695 /* copy session key */
736 696
@@ -769,21 +729,12 @@ ssetup_ntlmssp_authenticate:
769 cpu_to_le16(sizeof(struct ntlmv2_resp)); 729 cpu_to_le16(sizeof(struct ntlmv2_resp));
770 730
771 /* calculate session key */ 731 /* calculate session key */
772 rc = setup_ntlmv2_rsp(ses, v2_sess_key, nls_cp); 732 setup_ntlmv2_rsp(ses, v2_sess_key, nls_cp);
773 if (rc) {
774 kfree(v2_sess_key);
775 goto ssetup_exit;
776 }
777 /* FIXME: calculate MAC key */ 733 /* FIXME: calculate MAC key */
778 memcpy(bcc_ptr, (char *)v2_sess_key, 734 memcpy(bcc_ptr, (char *)v2_sess_key,
779 sizeof(struct ntlmv2_resp)); 735 sizeof(struct ntlmv2_resp));
780 bcc_ptr += sizeof(struct ntlmv2_resp); 736 bcc_ptr += sizeof(struct ntlmv2_resp);
781 kfree(v2_sess_key); 737 kfree(v2_sess_key);
782 if (ses->server->tilen > 0) {
783 memcpy(bcc_ptr, ses->server->tiblob,
784 ses->server->tilen);
785 bcc_ptr += ses->server->tilen;
786 }
787 if (ses->capabilities & CAP_UNICODE) { 738 if (ses->capabilities & CAP_UNICODE) {
788 if (iov[0].iov_len % 2) { 739 if (iov[0].iov_len % 2) {
789 *bcc_ptr = 0; 740 *bcc_ptr = 0;
@@ -814,15 +765,15 @@ ssetup_ntlmssp_authenticate:
814 } 765 }
815 /* bail out if key is too long */ 766 /* bail out if key is too long */
816 if (msg->sesskey_len > 767 if (msg->sesskey_len >
817 sizeof(ses->server->session_key.data.krb5)) { 768 sizeof(ses->server->mac_signing_key.data.krb5)) {
818 cERROR(1, "Kerberos signing key too long (%u bytes)", 769 cERROR(1, "Kerberos signing key too long (%u bytes)",
819 msg->sesskey_len); 770 msg->sesskey_len);
820 rc = -EOVERFLOW; 771 rc = -EOVERFLOW;
821 goto ssetup_exit; 772 goto ssetup_exit;
822 } 773 }
823 if (first_time) { 774 if (first_time) {
824 ses->server->session_key.len = msg->sesskey_len; 775 ses->server->mac_signing_key.len = msg->sesskey_len;
825 memcpy(ses->server->session_key.data.krb5, 776 memcpy(ses->server->mac_signing_key.data.krb5,
826 msg->data, msg->sesskey_len); 777 msg->data, msg->sesskey_len);
827 } 778 }
828 pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC; 779 pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC;
@@ -864,28 +815,12 @@ ssetup_ntlmssp_authenticate:
864 if (phase == NtLmNegotiate) { 815 if (phase == NtLmNegotiate) {
865 setup_ntlmssp_neg_req(pSMB, ses); 816 setup_ntlmssp_neg_req(pSMB, ses);
866 iov[1].iov_len = sizeof(NEGOTIATE_MESSAGE); 817 iov[1].iov_len = sizeof(NEGOTIATE_MESSAGE);
867 iov[1].iov_base = &pSMB->req.SecurityBlob[0];
868 } else if (phase == NtLmAuthenticate) { 818 } else if (phase == NtLmAuthenticate) {
869 int blob_len; 819 int blob_len;
870 char *ntlmsspblob; 820 blob_len = setup_ntlmssp_auth_req(pSMB, ses,
871 821 nls_cp,
872 ntlmsspblob = kmalloc(5 * 822 first_time);
873 sizeof(struct _AUTHENTICATE_MESSAGE),
874 GFP_KERNEL);
875 if (!ntlmsspblob) {
876 cERROR(1, "Can't allocate NTLMSSP");
877 rc = -ENOMEM;
878 goto ssetup_exit;
879 }
880
881 blob_len = setup_ntlmssp_auth_req(ntlmsspblob,
882 ses,
883 nls_cp,
884 first_time);
885 iov[1].iov_len = blob_len; 823 iov[1].iov_len = blob_len;
886 iov[1].iov_base = ntlmsspblob;
887 pSMB->req.SecurityBlobLength =
888 cpu_to_le16(blob_len);
889 /* Make sure that we tell the server that we 824 /* Make sure that we tell the server that we
890 are using the uid that it just gave us back 825 are using the uid that it just gave us back
891 on the response (challenge) */ 826 on the response (challenge) */
@@ -895,6 +830,7 @@ ssetup_ntlmssp_authenticate:
895 rc = -ENOSYS; 830 rc = -ENOSYS;
896 goto ssetup_exit; 831 goto ssetup_exit;
897 } 832 }
833 iov[1].iov_base = &pSMB->req.SecurityBlob[0];
898 /* unicode strings must be word aligned */ 834 /* unicode strings must be word aligned */
899 if ((iov[0].iov_len + iov[1].iov_len) % 2) { 835 if ((iov[0].iov_len + iov[1].iov_len) % 2) {
900 *bcc_ptr = 0; 836 *bcc_ptr = 0;
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index e0588cdf4cc5..82f78c4d6978 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -543,7 +543,7 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
543 (ses->server->secMode & (SECMODE_SIGN_REQUIRED | 543 (ses->server->secMode & (SECMODE_SIGN_REQUIRED |
544 SECMODE_SIGN_ENABLED))) { 544 SECMODE_SIGN_ENABLED))) {
545 rc = cifs_verify_signature(midQ->resp_buf, 545 rc = cifs_verify_signature(midQ->resp_buf,
546 ses->server, 546 &ses->server->mac_signing_key,
547 midQ->sequence_number+1); 547 midQ->sequence_number+1);
548 if (rc) { 548 if (rc) {
549 cERROR(1, "Unexpected SMB signature"); 549 cERROR(1, "Unexpected SMB signature");
@@ -731,7 +731,7 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
731 (ses->server->secMode & (SECMODE_SIGN_REQUIRED | 731 (ses->server->secMode & (SECMODE_SIGN_REQUIRED |
732 SECMODE_SIGN_ENABLED))) { 732 SECMODE_SIGN_ENABLED))) {
733 rc = cifs_verify_signature(out_buf, 733 rc = cifs_verify_signature(out_buf,
734 ses->server, 734 &ses->server->mac_signing_key,
735 midQ->sequence_number+1); 735 midQ->sequence_number+1);
736 if (rc) { 736 if (rc) {
737 cERROR(1, "Unexpected SMB signature"); 737 cERROR(1, "Unexpected SMB signature");
@@ -981,7 +981,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
981 (ses->server->secMode & (SECMODE_SIGN_REQUIRED | 981 (ses->server->secMode & (SECMODE_SIGN_REQUIRED |
982 SECMODE_SIGN_ENABLED))) { 982 SECMODE_SIGN_ENABLED))) {
983 rc = cifs_verify_signature(out_buf, 983 rc = cifs_verify_signature(out_buf,
984 ses->server, 984 &ses->server->mac_signing_key,
985 midQ->sequence_number+1); 985 midQ->sequence_number+1);
986 if (rc) { 986 if (rc) {
987 cERROR(1, "Unexpected SMB signature"); 987 cERROR(1, "Unexpected SMB signature");
diff --git a/fs/coda/psdev.c b/fs/coda/psdev.c
index de89645777c7..116af7546cf0 100644
--- a/fs/coda/psdev.c
+++ b/fs/coda/psdev.c
@@ -184,8 +184,8 @@ static ssize_t coda_psdev_write(struct file *file, const char __user *buf,
184 } 184 }
185 185
186 /* adjust outsize. is this useful ?? */ 186 /* adjust outsize. is this useful ?? */
187 req->uc_outSize = nbytes; 187 req->uc_outSize = nbytes;
188 req->uc_flags |= REQ_WRITE; 188 req->uc_flags |= CODA_REQ_WRITE;
189 count = nbytes; 189 count = nbytes;
190 190
191 /* Convert filedescriptor into a file handle */ 191 /* Convert filedescriptor into a file handle */
diff --git a/fs/compat.c b/fs/compat.c
index 718c7062aec1..0644a154672b 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -1153,7 +1153,7 @@ static ssize_t compat_do_readv_writev(int type, struct file *file,
1153{ 1153{
1154 compat_ssize_t tot_len; 1154 compat_ssize_t tot_len;
1155 struct iovec iovstack[UIO_FASTIOV]; 1155 struct iovec iovstack[UIO_FASTIOV];
1156 struct iovec *iov; 1156 struct iovec *iov = iovstack;
1157 ssize_t ret; 1157 ssize_t ret;
1158 io_fn_t fn; 1158 io_fn_t fn;
1159 iov_fn_t fnv; 1159 iov_fn_t fnv;
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 51f270b479b6..48d74c7391d1 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -634,7 +634,7 @@ static int dio_send_cur_page(struct dio *dio)
634 int ret = 0; 634 int ret = 0;
635 635
636 if (dio->bio) { 636 if (dio->bio) {
637 loff_t cur_offset = dio->block_in_file << dio->blkbits; 637 loff_t cur_offset = dio->cur_page_fs_offset;
638 loff_t bio_next_offset = dio->logical_offset_in_bio + 638 loff_t bio_next_offset = dio->logical_offset_in_bio +
639 dio->bio->bi_size; 639 dio->bio->bi_size;
640 640
@@ -659,7 +659,7 @@ static int dio_send_cur_page(struct dio *dio)
659 * Submit now if the underlying fs is about to perform a 659 * Submit now if the underlying fs is about to perform a
660 * metadata read 660 * metadata read
661 */ 661 */
662 if (dio->boundary) 662 else if (dio->boundary)
663 dio_bio_submit(dio); 663 dio_bio_submit(dio);
664 } 664 }
665 665
diff --git a/fs/exec.c b/fs/exec.c
index 2d9455282744..828dd2461d6b 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -376,6 +376,9 @@ static int count(const char __user * const __user * argv, int max)
376 argv++; 376 argv++;
377 if (i++ >= max) 377 if (i++ >= max)
378 return -E2BIG; 378 return -E2BIG;
379
380 if (fatal_signal_pending(current))
381 return -ERESTARTNOHAND;
379 cond_resched(); 382 cond_resched();
380 } 383 }
381 } 384 }
@@ -419,6 +422,12 @@ static int copy_strings(int argc, const char __user *const __user *argv,
419 while (len > 0) { 422 while (len > 0) {
420 int offset, bytes_to_copy; 423 int offset, bytes_to_copy;
421 424
425 if (fatal_signal_pending(current)) {
426 ret = -ERESTARTNOHAND;
427 goto out;
428 }
429 cond_resched();
430
422 offset = pos % PAGE_SIZE; 431 offset = pos % PAGE_SIZE;
423 if (offset == 0) 432 if (offset == 0)
424 offset = PAGE_SIZE; 433 offset = PAGE_SIZE;
@@ -594,6 +603,11 @@ int setup_arg_pages(struct linux_binprm *bprm,
594#else 603#else
595 stack_top = arch_align_stack(stack_top); 604 stack_top = arch_align_stack(stack_top);
596 stack_top = PAGE_ALIGN(stack_top); 605 stack_top = PAGE_ALIGN(stack_top);
606
607 if (unlikely(stack_top < mmap_min_addr) ||
608 unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
609 return -ENOMEM;
610
597 stack_shift = vma->vm_end - stack_top; 611 stack_shift = vma->vm_end - stack_top;
598 612
599 bprm->p -= stack_shift; 613 bprm->p -= stack_shift;
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 6769fd0f35b8..f8cc34f542c3 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -769,11 +769,15 @@ EXPORT_SYMBOL(kill_fasync);
769 769
770static int __init fcntl_init(void) 770static int __init fcntl_init(void)
771{ 771{
772 /* please add new bits here to ensure allocation uniqueness */ 772 /*
773 BUILD_BUG_ON(19 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32( 773 * Please add new bits here to ensure allocation uniqueness.
774 * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
775 * is defined as O_NONBLOCK on some platforms and not on others.
776 */
777 BUILD_BUG_ON(18 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
774 O_RDONLY | O_WRONLY | O_RDWR | 778 O_RDONLY | O_WRONLY | O_RDWR |
775 O_CREAT | O_EXCL | O_NOCTTY | 779 O_CREAT | O_EXCL | O_NOCTTY |
776 O_TRUNC | O_APPEND | O_NONBLOCK | 780 O_TRUNC | O_APPEND | /* O_NONBLOCK | */
777 __O_SYNC | O_DSYNC | FASYNC | 781 __O_SYNC | O_DSYNC | FASYNC |
778 O_DIRECT | O_LARGEFILE | O_DIRECTORY | 782 O_DIRECT | O_LARGEFILE | O_DIRECTORY |
779 O_NOFOLLOW | O_NOATIME | O_CLOEXEC | 783 O_NOFOLLOW | O_NOATIME | O_CLOEXEC |
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 7d9d06ba184b..ab38fef1c9a1 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -52,8 +52,6 @@ struct wb_writeback_work {
52#define CREATE_TRACE_POINTS 52#define CREATE_TRACE_POINTS
53#include <trace/events/writeback.h> 53#include <trace/events/writeback.h>
54 54
55#define inode_to_bdi(inode) ((inode)->i_mapping->backing_dev_info)
56
57/* 55/*
58 * We don't actually have pdflush, but this one is exported though /proc... 56 * We don't actually have pdflush, but this one is exported though /proc...
59 */ 57 */
@@ -71,6 +69,16 @@ int writeback_in_progress(struct backing_dev_info *bdi)
71 return test_bit(BDI_writeback_running, &bdi->state); 69 return test_bit(BDI_writeback_running, &bdi->state);
72} 70}
73 71
72static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
73{
74 struct super_block *sb = inode->i_sb;
75
76 if (strcmp(sb->s_type->name, "bdev") == 0)
77 return inode->i_mapping->backing_dev_info;
78
79 return sb->s_bdi;
80}
81
74static void bdi_queue_work(struct backing_dev_info *bdi, 82static void bdi_queue_work(struct backing_dev_info *bdi,
75 struct wb_writeback_work *work) 83 struct wb_writeback_work *work)
76{ 84{
@@ -808,7 +816,7 @@ int bdi_writeback_thread(void *data)
808 wb->last_active = jiffies; 816 wb->last_active = jiffies;
809 817
810 set_current_state(TASK_INTERRUPTIBLE); 818 set_current_state(TASK_INTERRUPTIBLE);
811 if (!list_empty(&bdi->work_list)) { 819 if (!list_empty(&bdi->work_list) || kthread_should_stop()) {
812 __set_current_state(TASK_RUNNING); 820 __set_current_state(TASK_RUNNING);
813 continue; 821 continue;
814 } 822 }
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 69ad053ffd78..cde755cca564 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -276,7 +276,7 @@ static void flush_bg_queue(struct fuse_conn *fc)
276 * Called with fc->lock, unlocks it 276 * Called with fc->lock, unlocks it
277 */ 277 */
278static void request_end(struct fuse_conn *fc, struct fuse_req *req) 278static void request_end(struct fuse_conn *fc, struct fuse_req *req)
279__releases(&fc->lock) 279__releases(fc->lock)
280{ 280{
281 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end; 281 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
282 req->end = NULL; 282 req->end = NULL;
@@ -306,8 +306,8 @@ __releases(&fc->lock)
306 306
307static void wait_answer_interruptible(struct fuse_conn *fc, 307static void wait_answer_interruptible(struct fuse_conn *fc,
308 struct fuse_req *req) 308 struct fuse_req *req)
309__releases(&fc->lock) 309__releases(fc->lock)
310__acquires(&fc->lock) 310__acquires(fc->lock)
311{ 311{
312 if (signal_pending(current)) 312 if (signal_pending(current))
313 return; 313 return;
@@ -325,8 +325,8 @@ static void queue_interrupt(struct fuse_conn *fc, struct fuse_req *req)
325} 325}
326 326
327static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req) 327static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
328__releases(&fc->lock) 328__releases(fc->lock)
329__acquires(&fc->lock) 329__acquires(fc->lock)
330{ 330{
331 if (!fc->no_interrupt) { 331 if (!fc->no_interrupt) {
332 /* Any signal may interrupt this */ 332 /* Any signal may interrupt this */
@@ -905,8 +905,8 @@ static int request_pending(struct fuse_conn *fc)
905 905
906/* Wait until a request is available on the pending list */ 906/* Wait until a request is available on the pending list */
907static void request_wait(struct fuse_conn *fc) 907static void request_wait(struct fuse_conn *fc)
908__releases(&fc->lock) 908__releases(fc->lock)
909__acquires(&fc->lock) 909__acquires(fc->lock)
910{ 910{
911 DECLARE_WAITQUEUE(wait, current); 911 DECLARE_WAITQUEUE(wait, current);
912 912
@@ -934,7 +934,7 @@ __acquires(&fc->lock)
934 */ 934 */
935static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_copy_state *cs, 935static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_copy_state *cs,
936 size_t nbytes, struct fuse_req *req) 936 size_t nbytes, struct fuse_req *req)
937__releases(&fc->lock) 937__releases(fc->lock)
938{ 938{
939 struct fuse_in_header ih; 939 struct fuse_in_header ih;
940 struct fuse_interrupt_in arg; 940 struct fuse_interrupt_in arg;
@@ -1354,7 +1354,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
1354 loff_t file_size; 1354 loff_t file_size;
1355 unsigned int num; 1355 unsigned int num;
1356 unsigned int offset; 1356 unsigned int offset;
1357 size_t total_len; 1357 size_t total_len = 0;
1358 1358
1359 req = fuse_get_req(fc); 1359 req = fuse_get_req(fc);
1360 if (IS_ERR(req)) 1360 if (IS_ERR(req))
@@ -1720,8 +1720,8 @@ static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
1720 * This function releases and reacquires fc->lock 1720 * This function releases and reacquires fc->lock
1721 */ 1721 */
1722static void end_requests(struct fuse_conn *fc, struct list_head *head) 1722static void end_requests(struct fuse_conn *fc, struct list_head *head)
1723__releases(&fc->lock) 1723__releases(fc->lock)
1724__acquires(&fc->lock) 1724__acquires(fc->lock)
1725{ 1725{
1726 while (!list_empty(head)) { 1726 while (!list_empty(head)) {
1727 struct fuse_req *req; 1727 struct fuse_req *req;
@@ -1744,8 +1744,8 @@ __acquires(&fc->lock)
1744 * locked). 1744 * locked).
1745 */ 1745 */
1746static void end_io_requests(struct fuse_conn *fc) 1746static void end_io_requests(struct fuse_conn *fc)
1747__releases(&fc->lock) 1747__releases(fc->lock)
1748__acquires(&fc->lock) 1748__acquires(fc->lock)
1749{ 1749{
1750 while (!list_empty(&fc->io)) { 1750 while (!list_empty(&fc->io)) {
1751 struct fuse_req *req = 1751 struct fuse_req *req =
@@ -1769,6 +1769,16 @@ __acquires(&fc->lock)
1769 } 1769 }
1770} 1770}
1771 1771
1772static void end_queued_requests(struct fuse_conn *fc)
1773__releases(fc->lock)
1774__acquires(fc->lock)
1775{
1776 fc->max_background = UINT_MAX;
1777 flush_bg_queue(fc);
1778 end_requests(fc, &fc->pending);
1779 end_requests(fc, &fc->processing);
1780}
1781
1772/* 1782/*
1773 * Abort all requests. 1783 * Abort all requests.
1774 * 1784 *
@@ -1795,8 +1805,7 @@ void fuse_abort_conn(struct fuse_conn *fc)
1795 fc->connected = 0; 1805 fc->connected = 0;
1796 fc->blocked = 0; 1806 fc->blocked = 0;
1797 end_io_requests(fc); 1807 end_io_requests(fc);
1798 end_requests(fc, &fc->pending); 1808 end_queued_requests(fc);
1799 end_requests(fc, &fc->processing);
1800 wake_up_all(&fc->waitq); 1809 wake_up_all(&fc->waitq);
1801 wake_up_all(&fc->blocked_waitq); 1810 wake_up_all(&fc->blocked_waitq);
1802 kill_fasync(&fc->fasync, SIGIO, POLL_IN); 1811 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
@@ -1811,8 +1820,9 @@ int fuse_dev_release(struct inode *inode, struct file *file)
1811 if (fc) { 1820 if (fc) {
1812 spin_lock(&fc->lock); 1821 spin_lock(&fc->lock);
1813 fc->connected = 0; 1822 fc->connected = 0;
1814 end_requests(fc, &fc->pending); 1823 fc->blocked = 0;
1815 end_requests(fc, &fc->processing); 1824 end_queued_requests(fc);
1825 wake_up_all(&fc->blocked_waitq);
1816 spin_unlock(&fc->lock); 1826 spin_unlock(&fc->lock);
1817 fuse_conn_put(fc); 1827 fuse_conn_put(fc);
1818 } 1828 }
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 147c1f71bdb9..c8224587123f 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1144,8 +1144,8 @@ static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
1144 1144
1145/* Called under fc->lock, may release and reacquire it */ 1145/* Called under fc->lock, may release and reacquire it */
1146static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req) 1146static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req)
1147__releases(&fc->lock) 1147__releases(fc->lock)
1148__acquires(&fc->lock) 1148__acquires(fc->lock)
1149{ 1149{
1150 struct fuse_inode *fi = get_fuse_inode(req->inode); 1150 struct fuse_inode *fi = get_fuse_inode(req->inode);
1151 loff_t size = i_size_read(req->inode); 1151 loff_t size = i_size_read(req->inode);
@@ -1183,8 +1183,8 @@ __acquires(&fc->lock)
1183 * Called with fc->lock 1183 * Called with fc->lock
1184 */ 1184 */
1185void fuse_flush_writepages(struct inode *inode) 1185void fuse_flush_writepages(struct inode *inode)
1186__releases(&fc->lock) 1186__releases(fc->lock)
1187__acquires(&fc->lock) 1187__acquires(fc->lock)
1188{ 1188{
1189 struct fuse_conn *fc = get_fuse_conn(inode); 1189 struct fuse_conn *fc = get_fuse_conn(inode);
1190 struct fuse_inode *fi = get_fuse_inode(inode); 1190 struct fuse_inode *fi = get_fuse_inode(inode);
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index cde1248a6225..ac750bd31a6f 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -932,7 +932,7 @@ int gfs2_logd(void *data)
932 932
933 do { 933 do {
934 prepare_to_wait(&sdp->sd_logd_waitq, &wait, 934 prepare_to_wait(&sdp->sd_logd_waitq, &wait,
935 TASK_UNINTERRUPTIBLE); 935 TASK_INTERRUPTIBLE);
936 if (!gfs2_ail_flush_reqd(sdp) && 936 if (!gfs2_ail_flush_reqd(sdp) &&
937 !gfs2_jrnl_flush_reqd(sdp) && 937 !gfs2_jrnl_flush_reqd(sdp) &&
938 !kthread_should_stop()) 938 !kthread_should_stop())
diff --git a/fs/minix/namei.c b/fs/minix/namei.c
index e20ee85955d1..f3f3578393a4 100644
--- a/fs/minix/namei.c
+++ b/fs/minix/namei.c
@@ -115,7 +115,7 @@ static int minix_mkdir(struct inode * dir, struct dentry *dentry, int mode)
115 115
116 inode_inc_link_count(dir); 116 inode_inc_link_count(dir);
117 117
118 inode = minix_new_inode(dir, mode, &err); 118 inode = minix_new_inode(dir, S_IFDIR | mode, &err);
119 if (!inode) 119 if (!inode)
120 goto out_dir; 120 goto out_dir;
121 121
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
index 6c2aad49d731..f7e13db613cb 100644
--- a/fs/nfs/Kconfig
+++ b/fs/nfs/Kconfig
@@ -63,6 +63,7 @@ config NFS_V3_ACL
63config NFS_V4 63config NFS_V4
64 bool "NFS client support for NFS version 4" 64 bool "NFS client support for NFS version 4"
65 depends on NFS_FS 65 depends on NFS_FS
66 select SUNRPC_GSS
66 help 67 help
67 This option enables support for version 4 of the NFS protocol 68 This option enables support for version 4 of the NFS protocol
68 (RFC 3530) in the kernel's NFS client. 69 (RFC 3530) in the kernel's NFS client.
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 4e7df2adb212..e7340729af89 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -275,7 +275,7 @@ static int nfs_sockaddr_match_ipaddr6(const struct sockaddr *sa1,
275 sin1->sin6_scope_id != sin2->sin6_scope_id) 275 sin1->sin6_scope_id != sin2->sin6_scope_id)
276 return 0; 276 return 0;
277 277
278 return ipv6_addr_equal(&sin1->sin6_addr, &sin1->sin6_addr); 278 return ipv6_addr_equal(&sin1->sin6_addr, &sin2->sin6_addr);
279} 279}
280#else /* !defined(CONFIG_IPV6) && !defined(CONFIG_IPV6_MODULE) */ 280#else /* !defined(CONFIG_IPV6) && !defined(CONFIG_IPV6_MODULE) */
281static int nfs_sockaddr_match_ipaddr6(const struct sockaddr *sa1, 281static int nfs_sockaddr_match_ipaddr6(const struct sockaddr *sa1,
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index eb51bd6201da..05bf3c0dc751 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -723,10 +723,6 @@ static int do_vfs_lock(struct file *file, struct file_lock *fl)
723 default: 723 default:
724 BUG(); 724 BUG();
725 } 725 }
726 if (res < 0)
727 dprintk(KERN_WARNING "%s: VFS is out of sync with lock manager"
728 " - error %d!\n",
729 __func__, res);
730 return res; 726 return res;
731} 727}
732 728
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index ec3966e4706b..f4cbf0c306c6 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -431,7 +431,15 @@ static int nfs_statfs(struct dentry *dentry, struct kstatfs *buf)
431 goto out_err; 431 goto out_err;
432 432
433 error = server->nfs_client->rpc_ops->statfs(server, fh, &res); 433 error = server->nfs_client->rpc_ops->statfs(server, fh, &res);
434 if (unlikely(error == -ESTALE)) {
435 struct dentry *pd_dentry;
434 436
437 pd_dentry = dget_parent(dentry);
438 if (pd_dentry != NULL) {
439 nfs_zap_caches(pd_dentry->d_inode);
440 dput(pd_dentry);
441 }
442 }
435 nfs_free_fattr(res.fattr); 443 nfs_free_fattr(res.fattr);
436 if (error < 0) 444 if (error < 0)
437 goto out_err; 445 goto out_err;
diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig
index 95932f523aef..4264377552e2 100644
--- a/fs/nfsd/Kconfig
+++ b/fs/nfsd/Kconfig
@@ -69,6 +69,7 @@ config NFSD_V4
69 depends on NFSD && PROC_FS && EXPERIMENTAL 69 depends on NFSD && PROC_FS && EXPERIMENTAL
70 select NFSD_V3 70 select NFSD_V3
71 select FS_POSIX_ACL 71 select FS_POSIX_ACL
72 select SUNRPC_GSS
72 help 73 help
73 This option enables support in your system's NFS server for 74 This option enables support in your system's NFS server for
74 version 4 of the NFS protocol (RFC 3530). 75 version 4 of the NFS protocol (RFC 3530).
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 3dfef0623968..cf0d2ffb3c84 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -440,7 +440,7 @@ test_share(struct nfs4_stateid *stp, struct nfsd4_open *open) {
440 440
441static int nfs4_access_to_omode(u32 access) 441static int nfs4_access_to_omode(u32 access)
442{ 442{
443 switch (access) { 443 switch (access & NFS4_SHARE_ACCESS_BOTH) {
444 case NFS4_SHARE_ACCESS_READ: 444 case NFS4_SHARE_ACCESS_READ:
445 return O_RDONLY; 445 return O_RDONLY;
446 case NFS4_SHARE_ACCESS_WRITE: 446 case NFS4_SHARE_ACCESS_WRITE:
diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
index a76e0aa5cd3f..391915093fe1 100644
--- a/fs/ocfs2/acl.c
+++ b/fs/ocfs2/acl.c
@@ -209,7 +209,10 @@ static int ocfs2_acl_set_mode(struct inode *inode, struct buffer_head *di_bh,
209 } 209 }
210 210
211 inode->i_mode = new_mode; 211 inode->i_mode = new_mode;
212 inode->i_ctime = CURRENT_TIME;
212 di->i_mode = cpu_to_le16(inode->i_mode); 213 di->i_mode = cpu_to_le16(inode->i_mode);
214 di->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
215 di->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
213 216
214 ocfs2_journal_dirty(handle, di_bh); 217 ocfs2_journal_dirty(handle, di_bh);
215 218
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 215e12ce1d85..592fae5007d1 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -6672,7 +6672,7 @@ int ocfs2_grab_pages(struct inode *inode, loff_t start, loff_t end,
6672 last_page_bytes = PAGE_ALIGN(end); 6672 last_page_bytes = PAGE_ALIGN(end);
6673 index = start >> PAGE_CACHE_SHIFT; 6673 index = start >> PAGE_CACHE_SHIFT;
6674 do { 6674 do {
6675 pages[numpages] = grab_cache_page(mapping, index); 6675 pages[numpages] = find_or_create_page(mapping, index, GFP_NOFS);
6676 if (!pages[numpages]) { 6676 if (!pages[numpages]) {
6677 ret = -ENOMEM; 6677 ret = -ENOMEM;
6678 mlog_errno(ret); 6678 mlog_errno(ret);
diff --git a/fs/ocfs2/blockcheck.c b/fs/ocfs2/blockcheck.c
index ec6d12339593..c7ee03c22226 100644
--- a/fs/ocfs2/blockcheck.c
+++ b/fs/ocfs2/blockcheck.c
@@ -439,7 +439,7 @@ int ocfs2_block_check_validate(void *data, size_t blocksize,
439 439
440 ocfs2_blockcheck_inc_failure(stats); 440 ocfs2_blockcheck_inc_failure(stats);
441 mlog(ML_ERROR, 441 mlog(ML_ERROR,
442 "CRC32 failed: stored: %u, computed %u. Applying ECC.\n", 442 "CRC32 failed: stored: 0x%x, computed 0x%x. Applying ECC.\n",
443 (unsigned int)check.bc_crc32e, (unsigned int)crc); 443 (unsigned int)check.bc_crc32e, (unsigned int)crc);
444 444
445 /* Ok, try ECC fixups */ 445 /* Ok, try ECC fixups */
@@ -453,7 +453,7 @@ int ocfs2_block_check_validate(void *data, size_t blocksize,
453 goto out; 453 goto out;
454 } 454 }
455 455
456 mlog(ML_ERROR, "Fixed CRC32 failed: stored: %u, computed %u\n", 456 mlog(ML_ERROR, "Fixed CRC32 failed: stored: 0x%x, computed 0x%x\n",
457 (unsigned int)check.bc_crc32e, (unsigned int)crc); 457 (unsigned int)check.bc_crc32e, (unsigned int)crc);
458 458
459 rc = -EIO; 459 rc = -EIO;
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index 1361997cf205..cbe2f057cc28 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -977,7 +977,7 @@ static int o2net_tx_can_proceed(struct o2net_node *nn,
977int o2net_send_message_vec(u32 msg_type, u32 key, struct kvec *caller_vec, 977int o2net_send_message_vec(u32 msg_type, u32 key, struct kvec *caller_vec,
978 size_t caller_veclen, u8 target_node, int *status) 978 size_t caller_veclen, u8 target_node, int *status)
979{ 979{
980 int ret; 980 int ret = 0;
981 struct o2net_msg *msg = NULL; 981 struct o2net_msg *msg = NULL;
982 size_t veclen, caller_bytes = 0; 982 size_t veclen, caller_bytes = 0;
983 struct kvec *vec = NULL; 983 struct kvec *vec = NULL;
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index f04ebcfffc4a..c49f6de0e7ab 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -3931,6 +3931,15 @@ static int ocfs2_dx_dir_rebalance(struct ocfs2_super *osb, struct inode *dir,
3931 goto out_commit; 3931 goto out_commit;
3932 } 3932 }
3933 3933
3934 cpos = split_hash;
3935 ret = ocfs2_dx_dir_new_cluster(dir, &et, cpos, handle,
3936 data_ac, meta_ac, new_dx_leaves,
3937 num_dx_leaves);
3938 if (ret) {
3939 mlog_errno(ret);
3940 goto out_commit;
3941 }
3942
3934 for (i = 0; i < num_dx_leaves; i++) { 3943 for (i = 0; i < num_dx_leaves; i++) {
3935 ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir), 3944 ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir),
3936 orig_dx_leaves[i], 3945 orig_dx_leaves[i],
@@ -3939,15 +3948,14 @@ static int ocfs2_dx_dir_rebalance(struct ocfs2_super *osb, struct inode *dir,
3939 mlog_errno(ret); 3948 mlog_errno(ret);
3940 goto out_commit; 3949 goto out_commit;
3941 } 3950 }
3942 }
3943 3951
3944 cpos = split_hash; 3952 ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir),
3945 ret = ocfs2_dx_dir_new_cluster(dir, &et, cpos, handle, 3953 new_dx_leaves[i],
3946 data_ac, meta_ac, new_dx_leaves, 3954 OCFS2_JOURNAL_ACCESS_WRITE);
3947 num_dx_leaves); 3955 if (ret) {
3948 if (ret) { 3956 mlog_errno(ret);
3949 mlog_errno(ret); 3957 goto out_commit;
3950 goto out_commit; 3958 }
3951 } 3959 }
3952 3960
3953 ocfs2_dx_dir_transfer_leaf(dir, split_hash, handle, tmp_dx_leaf, 3961 ocfs2_dx_dir_transfer_leaf(dir, split_hash, handle, tmp_dx_leaf,
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h
index 4b6ae2c13b47..765298908f1d 100644
--- a/fs/ocfs2/dlm/dlmcommon.h
+++ b/fs/ocfs2/dlm/dlmcommon.h
@@ -1030,6 +1030,7 @@ int dlm_drop_lockres_ref(struct dlm_ctxt *dlm,
1030 struct dlm_lock_resource *res); 1030 struct dlm_lock_resource *res);
1031void dlm_clean_master_list(struct dlm_ctxt *dlm, 1031void dlm_clean_master_list(struct dlm_ctxt *dlm,
1032 u8 dead_node); 1032 u8 dead_node);
1033void dlm_force_free_mles(struct dlm_ctxt *dlm);
1033int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock); 1034int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock);
1034int __dlm_lockres_has_locks(struct dlm_lock_resource *res); 1035int __dlm_lockres_has_locks(struct dlm_lock_resource *res);
1035int __dlm_lockres_unused(struct dlm_lock_resource *res); 1036int __dlm_lockres_unused(struct dlm_lock_resource *res);
diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c
index 5efdd37dfe48..901ca52bf86b 100644
--- a/fs/ocfs2/dlm/dlmdebug.c
+++ b/fs/ocfs2/dlm/dlmdebug.c
@@ -636,8 +636,14 @@ static void *lockres_seq_start(struct seq_file *m, loff_t *pos)
636 spin_lock(&dlm->track_lock); 636 spin_lock(&dlm->track_lock);
637 if (oldres) 637 if (oldres)
638 track_list = &oldres->tracking; 638 track_list = &oldres->tracking;
639 else 639 else {
640 track_list = &dlm->tracking_list; 640 track_list = &dlm->tracking_list;
641 if (list_empty(track_list)) {
642 dl = NULL;
643 spin_unlock(&dlm->track_lock);
644 goto bail;
645 }
646 }
641 647
642 list_for_each_entry(res, track_list, tracking) { 648 list_for_each_entry(res, track_list, tracking) {
643 if (&res->tracking == &dlm->tracking_list) 649 if (&res->tracking == &dlm->tracking_list)
@@ -660,6 +666,7 @@ static void *lockres_seq_start(struct seq_file *m, loff_t *pos)
660 } else 666 } else
661 dl = NULL; 667 dl = NULL;
662 668
669bail:
663 /* passed to seq_show */ 670 /* passed to seq_show */
664 return dl; 671 return dl;
665} 672}
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index 153abb5abef0..11a5c87fd7f7 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -693,6 +693,7 @@ void dlm_unregister_domain(struct dlm_ctxt *dlm)
693 693
694 dlm_mark_domain_leaving(dlm); 694 dlm_mark_domain_leaving(dlm);
695 dlm_leave_domain(dlm); 695 dlm_leave_domain(dlm);
696 dlm_force_free_mles(dlm);
696 dlm_complete_dlm_shutdown(dlm); 697 dlm_complete_dlm_shutdown(dlm);
697 } 698 }
698 dlm_put(dlm); 699 dlm_put(dlm);
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index ffb4c68dafa4..f564b0e5f80d 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -3433,3 +3433,43 @@ void dlm_lockres_release_ast(struct dlm_ctxt *dlm,
3433 wake_up(&res->wq); 3433 wake_up(&res->wq);
3434 wake_up(&dlm->migration_wq); 3434 wake_up(&dlm->migration_wq);
3435} 3435}
3436
3437void dlm_force_free_mles(struct dlm_ctxt *dlm)
3438{
3439 int i;
3440 struct hlist_head *bucket;
3441 struct dlm_master_list_entry *mle;
3442 struct hlist_node *tmp, *list;
3443
3444 /*
3445 * We notified all other nodes that we are exiting the domain and
3446 * marked the dlm state to DLM_CTXT_LEAVING. If any mles are still
3447 * around we force free them and wake any processes that are waiting
3448 * on the mles
3449 */
3450 spin_lock(&dlm->spinlock);
3451 spin_lock(&dlm->master_lock);
3452
3453 BUG_ON(dlm->dlm_state != DLM_CTXT_LEAVING);
3454 BUG_ON((find_next_bit(dlm->domain_map, O2NM_MAX_NODES, 0) < O2NM_MAX_NODES));
3455
3456 for (i = 0; i < DLM_HASH_BUCKETS; i++) {
3457 bucket = dlm_master_hash(dlm, i);
3458 hlist_for_each_safe(list, tmp, bucket) {
3459 mle = hlist_entry(list, struct dlm_master_list_entry,
3460 master_hash_node);
3461 if (mle->type != DLM_MLE_BLOCK) {
3462 mlog(ML_ERROR, "bad mle: %p\n", mle);
3463 dlm_print_one_mle(mle);
3464 }
3465 atomic_set(&mle->woken, 1);
3466 wake_up(&mle->wq);
3467
3468 __dlm_unlink_mle(dlm, mle);
3469 __dlm_mle_detach_hb_events(dlm, mle);
3470 __dlm_put_mle(mle);
3471 }
3472 }
3473 spin_unlock(&dlm->master_lock);
3474 spin_unlock(&dlm->spinlock);
3475}
diff --git a/fs/ocfs2/dlmglue.h b/fs/ocfs2/dlmglue.h
index d1ce48e1b3d6..1d596d8c4a4a 100644
--- a/fs/ocfs2/dlmglue.h
+++ b/fs/ocfs2/dlmglue.h
@@ -84,6 +84,7 @@ enum {
84 OI_LS_PARENT, 84 OI_LS_PARENT,
85 OI_LS_RENAME1, 85 OI_LS_RENAME1,
86 OI_LS_RENAME2, 86 OI_LS_RENAME2,
87 OI_LS_REFLINK_TARGET,
87}; 88};
88 89
89int ocfs2_dlm_init(struct ocfs2_super *osb); 90int ocfs2_dlm_init(struct ocfs2_super *osb);
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 81296b4e3646..9a03c151b5ce 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -36,6 +36,7 @@
36#include <linux/writeback.h> 36#include <linux/writeback.h>
37#include <linux/falloc.h> 37#include <linux/falloc.h>
38#include <linux/quotaops.h> 38#include <linux/quotaops.h>
39#include <linux/blkdev.h>
39 40
40#define MLOG_MASK_PREFIX ML_INODE 41#define MLOG_MASK_PREFIX ML_INODE
41#include <cluster/masklog.h> 42#include <cluster/masklog.h>
@@ -190,8 +191,16 @@ static int ocfs2_sync_file(struct file *file, int datasync)
190 if (err) 191 if (err)
191 goto bail; 192 goto bail;
192 193
193 if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) 194 if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) {
195 /*
196 * We still have to flush drive's caches to get data to the
197 * platter
198 */
199 if (osb->s_mount_opt & OCFS2_MOUNT_BARRIER)
200 blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL,
201 NULL, BLKDEV_IFL_WAIT);
194 goto bail; 202 goto bail;
203 }
195 204
196 journal = osb->journal->j_journal; 205 journal = osb->journal->j_journal;
197 err = jbd2_journal_force_commit(journal); 206 err = jbd2_journal_force_commit(journal);
@@ -774,7 +783,7 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
774 BUG_ON(abs_to > (((u64)index + 1) << PAGE_CACHE_SHIFT)); 783 BUG_ON(abs_to > (((u64)index + 1) << PAGE_CACHE_SHIFT));
775 BUG_ON(abs_from & (inode->i_blkbits - 1)); 784 BUG_ON(abs_from & (inode->i_blkbits - 1));
776 785
777 page = grab_cache_page(mapping, index); 786 page = find_or_create_page(mapping, index, GFP_NOFS);
778 if (!page) { 787 if (!page) {
779 ret = -ENOMEM; 788 ret = -ENOMEM;
780 mlog_errno(ret); 789 mlog_errno(ret);
@@ -2329,7 +2338,7 @@ out_dio:
2329 BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT)); 2338 BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT));
2330 2339
2331 if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) || 2340 if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) ||
2332 ((file->f_flags & O_DIRECT) && has_refcount)) { 2341 ((file->f_flags & O_DIRECT) && !direct_io)) {
2333 ret = filemap_fdatawrite_range(file->f_mapping, pos, 2342 ret = filemap_fdatawrite_range(file->f_mapping, pos,
2334 pos + count - 1); 2343 pos + count - 1);
2335 if (ret < 0) 2344 if (ret < 0)
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index 0492464916b1..eece3e05d9d0 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -488,7 +488,11 @@ static int ocfs2_read_locked_inode(struct inode *inode,
488 OCFS2_BH_IGNORE_CACHE); 488 OCFS2_BH_IGNORE_CACHE);
489 } else { 489 } else {
490 status = ocfs2_read_blocks_sync(osb, args->fi_blkno, 1, &bh); 490 status = ocfs2_read_blocks_sync(osb, args->fi_blkno, 1, &bh);
491 if (!status) 491 /*
492 * If buffer is in jbd, then its checksum may not have been
493 * computed as yet.
494 */
495 if (!status && !buffer_jbd(bh))
492 status = ocfs2_validate_inode_block(osb->sb, bh); 496 status = ocfs2_validate_inode_block(osb->sb, bh);
493 } 497 }
494 if (status < 0) { 498 if (status < 0) {
diff --git a/fs/ocfs2/mmap.c b/fs/ocfs2/mmap.c
index af2b8fe1f139..4c18f4ad93b4 100644
--- a/fs/ocfs2/mmap.c
+++ b/fs/ocfs2/mmap.c
@@ -74,9 +74,11 @@ static int __ocfs2_page_mkwrite(struct inode *inode, struct buffer_head *di_bh,
74 /* 74 /*
75 * Another node might have truncated while we were waiting on 75 * Another node might have truncated while we were waiting on
76 * cluster locks. 76 * cluster locks.
77 * We don't check size == 0 before the shift. This is borrowed
78 * from do_generic_file_read.
77 */ 79 */
78 last_index = size >> PAGE_CACHE_SHIFT; 80 last_index = (size - 1) >> PAGE_CACHE_SHIFT;
79 if (page->index > last_index) { 81 if (unlikely(!size || page->index > last_index)) {
80 ret = -EINVAL; 82 ret = -EINVAL;
81 goto out; 83 goto out;
82 } 84 }
@@ -107,7 +109,7 @@ static int __ocfs2_page_mkwrite(struct inode *inode, struct buffer_head *di_bh,
107 * because the "write" would invalidate their data. 109 * because the "write" would invalidate their data.
108 */ 110 */
109 if (page->index == last_index) 111 if (page->index == last_index)
110 len = size & ~PAGE_CACHE_MASK; 112 len = ((size - 1) & ~PAGE_CACHE_MASK) + 1;
111 113
112 ret = ocfs2_write_begin_nolock(mapping, pos, len, 0, &locked_page, 114 ret = ocfs2_write_begin_nolock(mapping, pos, len, 0, &locked_page,
113 &fsdata, di_bh, page); 115 &fsdata, di_bh, page);
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index f171b51a74f7..a00dda2e4f16 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -472,32 +472,23 @@ leave:
472 return status; 472 return status;
473} 473}
474 474
475static int ocfs2_mknod_locked(struct ocfs2_super *osb, 475static int __ocfs2_mknod_locked(struct inode *dir,
476 struct inode *dir, 476 struct inode *inode,
477 struct inode *inode, 477 dev_t dev,
478 dev_t dev, 478 struct buffer_head **new_fe_bh,
479 struct buffer_head **new_fe_bh, 479 struct buffer_head *parent_fe_bh,
480 struct buffer_head *parent_fe_bh, 480 handle_t *handle,
481 handle_t *handle, 481 struct ocfs2_alloc_context *inode_ac,
482 struct ocfs2_alloc_context *inode_ac) 482 u64 fe_blkno, u64 suballoc_loc, u16 suballoc_bit)
483{ 483{
484 int status = 0; 484 int status = 0;
485 struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
485 struct ocfs2_dinode *fe = NULL; 486 struct ocfs2_dinode *fe = NULL;
486 struct ocfs2_extent_list *fel; 487 struct ocfs2_extent_list *fel;
487 u64 suballoc_loc, fe_blkno = 0;
488 u16 suballoc_bit;
489 u16 feat; 488 u16 feat;
490 489
491 *new_fe_bh = NULL; 490 *new_fe_bh = NULL;
492 491
493 status = ocfs2_claim_new_inode(handle, dir, parent_fe_bh,
494 inode_ac, &suballoc_loc,
495 &suballoc_bit, &fe_blkno);
496 if (status < 0) {
497 mlog_errno(status);
498 goto leave;
499 }
500
501 /* populate as many fields early on as possible - many of 492 /* populate as many fields early on as possible - many of
502 * these are used by the support functions here and in 493 * these are used by the support functions here and in
503 * callers. */ 494 * callers. */
@@ -591,6 +582,34 @@ leave:
591 return status; 582 return status;
592} 583}
593 584
585static int ocfs2_mknod_locked(struct ocfs2_super *osb,
586 struct inode *dir,
587 struct inode *inode,
588 dev_t dev,
589 struct buffer_head **new_fe_bh,
590 struct buffer_head *parent_fe_bh,
591 handle_t *handle,
592 struct ocfs2_alloc_context *inode_ac)
593{
594 int status = 0;
595 u64 suballoc_loc, fe_blkno = 0;
596 u16 suballoc_bit;
597
598 *new_fe_bh = NULL;
599
600 status = ocfs2_claim_new_inode(handle, dir, parent_fe_bh,
601 inode_ac, &suballoc_loc,
602 &suballoc_bit, &fe_blkno);
603 if (status < 0) {
604 mlog_errno(status);
605 return status;
606 }
607
608 return __ocfs2_mknod_locked(dir, inode, dev, new_fe_bh,
609 parent_fe_bh, handle, inode_ac,
610 fe_blkno, suballoc_loc, suballoc_bit);
611}
612
594static int ocfs2_mkdir(struct inode *dir, 613static int ocfs2_mkdir(struct inode *dir,
595 struct dentry *dentry, 614 struct dentry *dentry,
596 int mode) 615 int mode)
@@ -1852,61 +1871,117 @@ bail:
1852 return status; 1871 return status;
1853} 1872}
1854 1873
1855static int ocfs2_prepare_orphan_dir(struct ocfs2_super *osb, 1874static int ocfs2_lookup_lock_orphan_dir(struct ocfs2_super *osb,
1856 struct inode **ret_orphan_dir, 1875 struct inode **ret_orphan_dir,
1857 u64 blkno, 1876 struct buffer_head **ret_orphan_dir_bh)
1858 char *name,
1859 struct ocfs2_dir_lookup_result *lookup)
1860{ 1877{
1861 struct inode *orphan_dir_inode; 1878 struct inode *orphan_dir_inode;
1862 struct buffer_head *orphan_dir_bh = NULL; 1879 struct buffer_head *orphan_dir_bh = NULL;
1863 int status = 0; 1880 int ret = 0;
1864
1865 status = ocfs2_blkno_stringify(blkno, name);
1866 if (status < 0) {
1867 mlog_errno(status);
1868 return status;
1869 }
1870 1881
1871 orphan_dir_inode = ocfs2_get_system_file_inode(osb, 1882 orphan_dir_inode = ocfs2_get_system_file_inode(osb,
1872 ORPHAN_DIR_SYSTEM_INODE, 1883 ORPHAN_DIR_SYSTEM_INODE,
1873 osb->slot_num); 1884 osb->slot_num);
1874 if (!orphan_dir_inode) { 1885 if (!orphan_dir_inode) {
1875 status = -ENOENT; 1886 ret = -ENOENT;
1876 mlog_errno(status); 1887 mlog_errno(ret);
1877 return status; 1888 return ret;
1878 } 1889 }
1879 1890
1880 mutex_lock(&orphan_dir_inode->i_mutex); 1891 mutex_lock(&orphan_dir_inode->i_mutex);
1881 1892
1882 status = ocfs2_inode_lock(orphan_dir_inode, &orphan_dir_bh, 1); 1893 ret = ocfs2_inode_lock(orphan_dir_inode, &orphan_dir_bh, 1);
1883 if (status < 0) { 1894 if (ret < 0) {
1884 mlog_errno(status); 1895 mutex_unlock(&orphan_dir_inode->i_mutex);
1885 goto leave; 1896 iput(orphan_dir_inode);
1897
1898 mlog_errno(ret);
1899 return ret;
1886 } 1900 }
1887 1901
1888 status = ocfs2_prepare_dir_for_insert(osb, orphan_dir_inode, 1902 *ret_orphan_dir = orphan_dir_inode;
1889 orphan_dir_bh, name, 1903 *ret_orphan_dir_bh = orphan_dir_bh;
1890 OCFS2_ORPHAN_NAMELEN, lookup);
1891 if (status < 0) {
1892 ocfs2_inode_unlock(orphan_dir_inode, 1);
1893 1904
1894 mlog_errno(status); 1905 return 0;
1895 goto leave; 1906}
1907
1908static int __ocfs2_prepare_orphan_dir(struct inode *orphan_dir_inode,
1909 struct buffer_head *orphan_dir_bh,
1910 u64 blkno,
1911 char *name,
1912 struct ocfs2_dir_lookup_result *lookup)
1913{
1914 int ret;
1915 struct ocfs2_super *osb = OCFS2_SB(orphan_dir_inode->i_sb);
1916
1917 ret = ocfs2_blkno_stringify(blkno, name);
1918 if (ret < 0) {
1919 mlog_errno(ret);
1920 return ret;
1921 }
1922
1923 ret = ocfs2_prepare_dir_for_insert(osb, orphan_dir_inode,
1924 orphan_dir_bh, name,
1925 OCFS2_ORPHAN_NAMELEN, lookup);
1926 if (ret < 0) {
1927 mlog_errno(ret);
1928 return ret;
1929 }
1930
1931 return 0;
1932}
1933
1934/**
1935 * ocfs2_prepare_orphan_dir() - Prepare an orphan directory for
1936 * insertion of an orphan.
1937 * @osb: ocfs2 file system
1938 * @ret_orphan_dir: Orphan dir inode - returned locked!
1939 * @blkno: Actual block number of the inode to be inserted into orphan dir.
1940 * @lookup: dir lookup result, to be passed back into functions like
1941 * ocfs2_orphan_add
1942 *
1943 * Returns zero on success and the ret_orphan_dir, name and lookup
1944 * fields will be populated.
1945 *
1946 * Returns non-zero on failure.
1947 */
1948static int ocfs2_prepare_orphan_dir(struct ocfs2_super *osb,
1949 struct inode **ret_orphan_dir,
1950 u64 blkno,
1951 char *name,
1952 struct ocfs2_dir_lookup_result *lookup)
1953{
1954 struct inode *orphan_dir_inode = NULL;
1955 struct buffer_head *orphan_dir_bh = NULL;
1956 int ret = 0;
1957
1958 ret = ocfs2_lookup_lock_orphan_dir(osb, &orphan_dir_inode,
1959 &orphan_dir_bh);
1960 if (ret < 0) {
1961 mlog_errno(ret);
1962 return ret;
1963 }
1964
1965 ret = __ocfs2_prepare_orphan_dir(orphan_dir_inode, orphan_dir_bh,
1966 blkno, name, lookup);
1967 if (ret < 0) {
1968 mlog_errno(ret);
1969 goto out;
1896 } 1970 }
1897 1971
1898 *ret_orphan_dir = orphan_dir_inode; 1972 *ret_orphan_dir = orphan_dir_inode;
1899 1973
1900leave: 1974out:
1901 if (status) { 1975 brelse(orphan_dir_bh);
1976
1977 if (ret) {
1978 ocfs2_inode_unlock(orphan_dir_inode, 1);
1902 mutex_unlock(&orphan_dir_inode->i_mutex); 1979 mutex_unlock(&orphan_dir_inode->i_mutex);
1903 iput(orphan_dir_inode); 1980 iput(orphan_dir_inode);
1904 } 1981 }
1905 1982
1906 brelse(orphan_dir_bh); 1983 mlog_exit(ret);
1907 1984 return ret;
1908 mlog_exit(status);
1909 return status;
1910} 1985}
1911 1986
1912static int ocfs2_orphan_add(struct ocfs2_super *osb, 1987static int ocfs2_orphan_add(struct ocfs2_super *osb,
@@ -2053,6 +2128,99 @@ leave:
2053 return status; 2128 return status;
2054} 2129}
2055 2130
2131/**
2132 * ocfs2_prep_new_orphaned_file() - Prepare the orphan dir to recieve a newly
2133 * allocated file. This is different from the typical 'add to orphan dir'
2134 * operation in that the inode does not yet exist. This is a problem because
2135 * the orphan dir stringifies the inode block number to come up with it's
2136 * dirent. Obviously if the inode does not yet exist we have a chicken and egg
2137 * problem. This function works around it by calling deeper into the orphan
2138 * and suballoc code than other callers. Use this only by necessity.
2139 * @dir: The directory which this inode will ultimately wind up under - not the
2140 * orphan dir!
2141 * @dir_bh: buffer_head the @dir inode block
2142 * @orphan_name: string of length (CFS2_ORPHAN_NAMELEN + 1). Will be filled
2143 * with the string to be used for orphan dirent. Pass back to the orphan dir
2144 * code.
2145 * @ret_orphan_dir: orphan dir inode returned to be passed back into orphan
2146 * dir code.
2147 * @ret_di_blkno: block number where the new inode will be allocated.
2148 * @orphan_insert: Dir insert context to be passed back into orphan dir code.
2149 * @ret_inode_ac: Inode alloc context to be passed back to the allocator.
2150 *
2151 * Returns zero on success and the ret_orphan_dir, name and lookup
2152 * fields will be populated.
2153 *
2154 * Returns non-zero on failure.
2155 */
2156static int ocfs2_prep_new_orphaned_file(struct inode *dir,
2157 struct buffer_head *dir_bh,
2158 char *orphan_name,
2159 struct inode **ret_orphan_dir,
2160 u64 *ret_di_blkno,
2161 struct ocfs2_dir_lookup_result *orphan_insert,
2162 struct ocfs2_alloc_context **ret_inode_ac)
2163{
2164 int ret;
2165 u64 di_blkno;
2166 struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
2167 struct inode *orphan_dir = NULL;
2168 struct buffer_head *orphan_dir_bh = NULL;
2169 struct ocfs2_alloc_context *inode_ac = NULL;
2170
2171 ret = ocfs2_lookup_lock_orphan_dir(osb, &orphan_dir, &orphan_dir_bh);
2172 if (ret < 0) {
2173 mlog_errno(ret);
2174 return ret;
2175 }
2176
2177 /* reserve an inode spot */
2178 ret = ocfs2_reserve_new_inode(osb, &inode_ac);
2179 if (ret < 0) {
2180 if (ret != -ENOSPC)
2181 mlog_errno(ret);
2182 goto out;
2183 }
2184
2185 ret = ocfs2_find_new_inode_loc(dir, dir_bh, inode_ac,
2186 &di_blkno);
2187 if (ret) {
2188 mlog_errno(ret);
2189 goto out;
2190 }
2191
2192 ret = __ocfs2_prepare_orphan_dir(orphan_dir, orphan_dir_bh,
2193 di_blkno, orphan_name, orphan_insert);
2194 if (ret < 0) {
2195 mlog_errno(ret);
2196 goto out;
2197 }
2198
2199out:
2200 if (ret == 0) {
2201 *ret_orphan_dir = orphan_dir;
2202 *ret_di_blkno = di_blkno;
2203 *ret_inode_ac = inode_ac;
2204 /*
2205 * orphan_name and orphan_insert are already up to
2206 * date via prepare_orphan_dir
2207 */
2208 } else {
2209 /* Unroll reserve_new_inode* */
2210 if (inode_ac)
2211 ocfs2_free_alloc_context(inode_ac);
2212
2213 /* Unroll orphan dir locking */
2214 mutex_unlock(&orphan_dir->i_mutex);
2215 ocfs2_inode_unlock(orphan_dir, 1);
2216 iput(orphan_dir);
2217 }
2218
2219 brelse(orphan_dir_bh);
2220
2221 return 0;
2222}
2223
2056int ocfs2_create_inode_in_orphan(struct inode *dir, 2224int ocfs2_create_inode_in_orphan(struct inode *dir,
2057 int mode, 2225 int mode,
2058 struct inode **new_inode) 2226 struct inode **new_inode)
@@ -2068,6 +2236,8 @@ int ocfs2_create_inode_in_orphan(struct inode *dir,
2068 struct buffer_head *new_di_bh = NULL; 2236 struct buffer_head *new_di_bh = NULL;
2069 struct ocfs2_alloc_context *inode_ac = NULL; 2237 struct ocfs2_alloc_context *inode_ac = NULL;
2070 struct ocfs2_dir_lookup_result orphan_insert = { NULL, }; 2238 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
2239 u64 uninitialized_var(di_blkno), suballoc_loc;
2240 u16 suballoc_bit;
2071 2241
2072 status = ocfs2_inode_lock(dir, &parent_di_bh, 1); 2242 status = ocfs2_inode_lock(dir, &parent_di_bh, 1);
2073 if (status < 0) { 2243 if (status < 0) {
@@ -2076,20 +2246,9 @@ int ocfs2_create_inode_in_orphan(struct inode *dir,
2076 return status; 2246 return status;
2077 } 2247 }
2078 2248
2079 /* 2249 status = ocfs2_prep_new_orphaned_file(dir, parent_di_bh,
2080 * We give the orphan dir the root blkno to fake an orphan name, 2250 orphan_name, &orphan_dir,
2081 * and allocate enough space for our insertion. 2251 &di_blkno, &orphan_insert, &inode_ac);
2082 */
2083 status = ocfs2_prepare_orphan_dir(osb, &orphan_dir,
2084 osb->root_blkno,
2085 orphan_name, &orphan_insert);
2086 if (status < 0) {
2087 mlog_errno(status);
2088 goto leave;
2089 }
2090
2091 /* reserve an inode spot */
2092 status = ocfs2_reserve_new_inode(osb, &inode_ac);
2093 if (status < 0) { 2252 if (status < 0) {
2094 if (status != -ENOSPC) 2253 if (status != -ENOSPC)
2095 mlog_errno(status); 2254 mlog_errno(status);
@@ -2116,17 +2275,20 @@ int ocfs2_create_inode_in_orphan(struct inode *dir,
2116 goto leave; 2275 goto leave;
2117 did_quota_inode = 1; 2276 did_quota_inode = 1;
2118 2277
2119 inode->i_nlink = 0; 2278 status = ocfs2_claim_new_inode_at_loc(handle, dir, inode_ac,
2120 /* do the real work now. */ 2279 &suballoc_loc,
2121 status = ocfs2_mknod_locked(osb, dir, inode, 2280 &suballoc_bit, di_blkno);
2122 0, &new_di_bh, parent_di_bh, handle,
2123 inode_ac);
2124 if (status < 0) { 2281 if (status < 0) {
2125 mlog_errno(status); 2282 mlog_errno(status);
2126 goto leave; 2283 goto leave;
2127 } 2284 }
2128 2285
2129 status = ocfs2_blkno_stringify(OCFS2_I(inode)->ip_blkno, orphan_name); 2286 inode->i_nlink = 0;
2287 /* do the real work now. */
2288 status = __ocfs2_mknod_locked(dir, inode,
2289 0, &new_di_bh, parent_di_bh, handle,
2290 inode_ac, di_blkno, suballoc_loc,
2291 suballoc_bit);
2130 if (status < 0) { 2292 if (status < 0) {
2131 mlog_errno(status); 2293 mlog_errno(status);
2132 goto leave; 2294 goto leave;
diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h
index 33f1c9a8258d..fa31d05e41b7 100644
--- a/fs/ocfs2/ocfs2_fs.h
+++ b/fs/ocfs2/ocfs2_fs.h
@@ -235,18 +235,31 @@
235#define OCFS2_HAS_REFCOUNT_FL (0x0010) 235#define OCFS2_HAS_REFCOUNT_FL (0x0010)
236 236
237/* Inode attributes, keep in sync with EXT2 */ 237/* Inode attributes, keep in sync with EXT2 */
238#define OCFS2_SECRM_FL (0x00000001) /* Secure deletion */ 238#define OCFS2_SECRM_FL FS_SECRM_FL /* Secure deletion */
239#define OCFS2_UNRM_FL (0x00000002) /* Undelete */ 239#define OCFS2_UNRM_FL FS_UNRM_FL /* Undelete */
240#define OCFS2_COMPR_FL (0x00000004) /* Compress file */ 240#define OCFS2_COMPR_FL FS_COMPR_FL /* Compress file */
241#define OCFS2_SYNC_FL (0x00000008) /* Synchronous updates */ 241#define OCFS2_SYNC_FL FS_SYNC_FL /* Synchronous updates */
242#define OCFS2_IMMUTABLE_FL (0x00000010) /* Immutable file */ 242#define OCFS2_IMMUTABLE_FL FS_IMMUTABLE_FL /* Immutable file */
243#define OCFS2_APPEND_FL (0x00000020) /* writes to file may only append */ 243#define OCFS2_APPEND_FL FS_APPEND_FL /* writes to file may only append */
244#define OCFS2_NODUMP_FL (0x00000040) /* do not dump file */ 244#define OCFS2_NODUMP_FL FS_NODUMP_FL /* do not dump file */
245#define OCFS2_NOATIME_FL (0x00000080) /* do not update atime */ 245#define OCFS2_NOATIME_FL FS_NOATIME_FL /* do not update atime */
246#define OCFS2_DIRSYNC_FL (0x00010000) /* dirsync behaviour (directories only) */ 246/* Reserved for compression usage... */
247 247#define OCFS2_DIRTY_FL FS_DIRTY_FL
248#define OCFS2_FL_VISIBLE (0x000100FF) /* User visible flags */ 248#define OCFS2_COMPRBLK_FL FS_COMPRBLK_FL /* One or more compressed clusters */
249#define OCFS2_FL_MODIFIABLE (0x000100FF) /* User modifiable flags */ 249#define OCFS2_NOCOMP_FL FS_NOCOMP_FL /* Don't compress */
250#define OCFS2_ECOMPR_FL FS_ECOMPR_FL /* Compression error */
251/* End compression flags --- maybe not all used */
252#define OCFS2_BTREE_FL FS_BTREE_FL /* btree format dir */
253#define OCFS2_INDEX_FL FS_INDEX_FL /* hash-indexed directory */
254#define OCFS2_IMAGIC_FL FS_IMAGIC_FL /* AFS directory */
255#define OCFS2_JOURNAL_DATA_FL FS_JOURNAL_DATA_FL /* Reserved for ext3 */
256#define OCFS2_NOTAIL_FL FS_NOTAIL_FL /* file tail should not be merged */
257#define OCFS2_DIRSYNC_FL FS_DIRSYNC_FL /* dirsync behaviour (directories only) */
258#define OCFS2_TOPDIR_FL FS_TOPDIR_FL /* Top of directory hierarchies*/
259#define OCFS2_RESERVED_FL FS_RESERVED_FL /* reserved for ext2 lib */
260
261#define OCFS2_FL_VISIBLE FS_FL_USER_VISIBLE /* User visible flags */
262#define OCFS2_FL_MODIFIABLE FS_FL_USER_MODIFIABLE /* User modifiable flags */
250 263
251/* 264/*
252 * Extent record flags (e_node.leaf.flags) 265 * Extent record flags (e_node.leaf.flags)
diff --git a/fs/ocfs2/ocfs2_ioctl.h b/fs/ocfs2/ocfs2_ioctl.h
index 2d3420af1a83..5d241505690b 100644
--- a/fs/ocfs2/ocfs2_ioctl.h
+++ b/fs/ocfs2/ocfs2_ioctl.h
@@ -23,10 +23,10 @@
23/* 23/*
24 * ioctl commands 24 * ioctl commands
25 */ 25 */
26#define OCFS2_IOC_GETFLAGS _IOR('f', 1, long) 26#define OCFS2_IOC_GETFLAGS FS_IOC_GETFLAGS
27#define OCFS2_IOC_SETFLAGS _IOW('f', 2, long) 27#define OCFS2_IOC_SETFLAGS FS_IOC_SETFLAGS
28#define OCFS2_IOC32_GETFLAGS _IOR('f', 1, int) 28#define OCFS2_IOC32_GETFLAGS FS_IOC32_GETFLAGS
29#define OCFS2_IOC32_SETFLAGS _IOW('f', 2, int) 29#define OCFS2_IOC32_SETFLAGS FS_IOC32_SETFLAGS
30 30
31/* 31/*
32 * Space reservation / allocation / free ioctls and argument structure 32 * Space reservation / allocation / free ioctls and argument structure
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index 73a11ccfd4c2..efdd75607406 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -2960,7 +2960,7 @@ static int ocfs2_duplicate_clusters_by_page(handle_t *handle,
2960 if (map_end & (PAGE_CACHE_SIZE - 1)) 2960 if (map_end & (PAGE_CACHE_SIZE - 1))
2961 to = map_end & (PAGE_CACHE_SIZE - 1); 2961 to = map_end & (PAGE_CACHE_SIZE - 1);
2962 2962
2963 page = grab_cache_page(mapping, page_index); 2963 page = find_or_create_page(mapping, page_index, GFP_NOFS);
2964 2964
2965 /* 2965 /*
2966 * In case PAGE_CACHE_SIZE <= CLUSTER_SIZE, This page 2966 * In case PAGE_CACHE_SIZE <= CLUSTER_SIZE, This page
@@ -3179,7 +3179,8 @@ static int ocfs2_cow_sync_writeback(struct super_block *sb,
3179 if (map_end > end) 3179 if (map_end > end)
3180 map_end = end; 3180 map_end = end;
3181 3181
3182 page = grab_cache_page(context->inode->i_mapping, page_index); 3182 page = find_or_create_page(context->inode->i_mapping,
3183 page_index, GFP_NOFS);
3183 BUG_ON(!page); 3184 BUG_ON(!page);
3184 3185
3185 wait_on_page_writeback(page); 3186 wait_on_page_writeback(page);
@@ -4200,8 +4201,9 @@ static int __ocfs2_reflink(struct dentry *old_dentry,
4200 goto out; 4201 goto out;
4201 } 4202 }
4202 4203
4203 mutex_lock(&new_inode->i_mutex); 4204 mutex_lock_nested(&new_inode->i_mutex, I_MUTEX_CHILD);
4204 ret = ocfs2_inode_lock(new_inode, &new_bh, 1); 4205 ret = ocfs2_inode_lock_nested(new_inode, &new_bh, 1,
4206 OI_LS_REFLINK_TARGET);
4205 if (ret) { 4207 if (ret) {
4206 mlog_errno(ret); 4208 mlog_errno(ret);
4207 goto out_unlock; 4209 goto out_unlock;
diff --git a/fs/ocfs2/reservations.c b/fs/ocfs2/reservations.c
index d8b6e4259b80..3e78db361bc7 100644
--- a/fs/ocfs2/reservations.c
+++ b/fs/ocfs2/reservations.c
@@ -732,25 +732,23 @@ int ocfs2_resmap_resv_bits(struct ocfs2_reservation_map *resmap,
732 struct ocfs2_alloc_reservation *resv, 732 struct ocfs2_alloc_reservation *resv,
733 int *cstart, int *clen) 733 int *cstart, int *clen)
734{ 734{
735 unsigned int wanted = *clen;
736
737 if (resv == NULL || ocfs2_resmap_disabled(resmap)) 735 if (resv == NULL || ocfs2_resmap_disabled(resmap))
738 return -ENOSPC; 736 return -ENOSPC;
739 737
740 spin_lock(&resv_lock); 738 spin_lock(&resv_lock);
741 739
742 /*
743 * We don't want to over-allocate for temporary
744 * windows. Otherwise, we run the risk of fragmenting the
745 * allocation space.
746 */
747 wanted = ocfs2_resv_window_bits(resmap, resv);
748 if ((resv->r_flags & OCFS2_RESV_FLAG_TMP) || wanted < *clen)
749 wanted = *clen;
750
751 if (ocfs2_resv_empty(resv)) { 740 if (ocfs2_resv_empty(resv)) {
752 mlog(0, "empty reservation, find new window\n"); 741 /*
742 * We don't want to over-allocate for temporary
743 * windows. Otherwise, we run the risk of fragmenting the
744 * allocation space.
745 */
746 unsigned int wanted = ocfs2_resv_window_bits(resmap, resv);
753 747
748 if ((resv->r_flags & OCFS2_RESV_FLAG_TMP) || wanted < *clen)
749 wanted = *clen;
750
751 mlog(0, "empty reservation, find new window\n");
754 /* 752 /*
755 * Try to get a window here. If it works, we must fall 753 * Try to get a window here. If it works, we must fall
756 * through and test the bitmap . This avoids some 754 * through and test the bitmap . This avoids some
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
index a8e6a95a353f..849c2f0e0a0e 100644
--- a/fs/ocfs2/suballoc.c
+++ b/fs/ocfs2/suballoc.c
@@ -57,11 +57,28 @@ struct ocfs2_suballoc_result {
57 u64 sr_bg_blkno; /* The bg we allocated from. Set 57 u64 sr_bg_blkno; /* The bg we allocated from. Set
58 to 0 when a block group is 58 to 0 when a block group is
59 contiguous. */ 59 contiguous. */
60 u64 sr_bg_stable_blkno; /*
61 * Doesn't change, always
62 * set to target block
63 * group descriptor
64 * block.
65 */
60 u64 sr_blkno; /* The first allocated block */ 66 u64 sr_blkno; /* The first allocated block */
61 unsigned int sr_bit_offset; /* The bit in the bg */ 67 unsigned int sr_bit_offset; /* The bit in the bg */
62 unsigned int sr_bits; /* How many bits we claimed */ 68 unsigned int sr_bits; /* How many bits we claimed */
63}; 69};
64 70
71static u64 ocfs2_group_from_res(struct ocfs2_suballoc_result *res)
72{
73 if (res->sr_blkno == 0)
74 return 0;
75
76 if (res->sr_bg_blkno)
77 return res->sr_bg_blkno;
78
79 return ocfs2_which_suballoc_group(res->sr_blkno, res->sr_bit_offset);
80}
81
65static inline void ocfs2_debug_bg(struct ocfs2_group_desc *bg); 82static inline void ocfs2_debug_bg(struct ocfs2_group_desc *bg);
66static inline void ocfs2_debug_suballoc_inode(struct ocfs2_dinode *fe); 83static inline void ocfs2_debug_suballoc_inode(struct ocfs2_dinode *fe);
67static inline u16 ocfs2_find_victim_chain(struct ocfs2_chain_list *cl); 84static inline u16 ocfs2_find_victim_chain(struct ocfs2_chain_list *cl);
@@ -138,6 +155,10 @@ void ocfs2_free_ac_resource(struct ocfs2_alloc_context *ac)
138 brelse(ac->ac_bh); 155 brelse(ac->ac_bh);
139 ac->ac_bh = NULL; 156 ac->ac_bh = NULL;
140 ac->ac_resv = NULL; 157 ac->ac_resv = NULL;
158 if (ac->ac_find_loc_priv) {
159 kfree(ac->ac_find_loc_priv);
160 ac->ac_find_loc_priv = NULL;
161 }
141} 162}
142 163
143void ocfs2_free_alloc_context(struct ocfs2_alloc_context *ac) 164void ocfs2_free_alloc_context(struct ocfs2_alloc_context *ac)
@@ -336,7 +357,7 @@ out:
336static void ocfs2_bg_discontig_add_extent(struct ocfs2_super *osb, 357static void ocfs2_bg_discontig_add_extent(struct ocfs2_super *osb,
337 struct ocfs2_group_desc *bg, 358 struct ocfs2_group_desc *bg,
338 struct ocfs2_chain_list *cl, 359 struct ocfs2_chain_list *cl,
339 u64 p_blkno, u32 clusters) 360 u64 p_blkno, unsigned int clusters)
340{ 361{
341 struct ocfs2_extent_list *el = &bg->bg_list; 362 struct ocfs2_extent_list *el = &bg->bg_list;
342 struct ocfs2_extent_rec *rec; 363 struct ocfs2_extent_rec *rec;
@@ -348,7 +369,7 @@ static void ocfs2_bg_discontig_add_extent(struct ocfs2_super *osb,
348 rec->e_blkno = cpu_to_le64(p_blkno); 369 rec->e_blkno = cpu_to_le64(p_blkno);
349 rec->e_cpos = cpu_to_le32(le16_to_cpu(bg->bg_bits) / 370 rec->e_cpos = cpu_to_le32(le16_to_cpu(bg->bg_bits) /
350 le16_to_cpu(cl->cl_bpc)); 371 le16_to_cpu(cl->cl_bpc));
351 rec->e_leaf_clusters = cpu_to_le32(clusters); 372 rec->e_leaf_clusters = cpu_to_le16(clusters);
352 le16_add_cpu(&bg->bg_bits, clusters * le16_to_cpu(cl->cl_bpc)); 373 le16_add_cpu(&bg->bg_bits, clusters * le16_to_cpu(cl->cl_bpc));
353 le16_add_cpu(&bg->bg_free_bits_count, 374 le16_add_cpu(&bg->bg_free_bits_count,
354 clusters * le16_to_cpu(cl->cl_bpc)); 375 clusters * le16_to_cpu(cl->cl_bpc));
@@ -1678,6 +1699,15 @@ static int ocfs2_search_one_group(struct ocfs2_alloc_context *ac,
1678 if (!ret) 1699 if (!ret)
1679 ocfs2_bg_discontig_fix_result(ac, gd, res); 1700 ocfs2_bg_discontig_fix_result(ac, gd, res);
1680 1701
1702 /*
1703 * sr_bg_blkno might have been changed by
1704 * ocfs2_bg_discontig_fix_result
1705 */
1706 res->sr_bg_stable_blkno = group_bh->b_blocknr;
1707
1708 if (ac->ac_find_loc_only)
1709 goto out_loc_only;
1710
1681 ret = ocfs2_alloc_dinode_update_counts(alloc_inode, handle, ac->ac_bh, 1711 ret = ocfs2_alloc_dinode_update_counts(alloc_inode, handle, ac->ac_bh,
1682 res->sr_bits, 1712 res->sr_bits,
1683 le16_to_cpu(gd->bg_chain)); 1713 le16_to_cpu(gd->bg_chain));
@@ -1691,6 +1721,7 @@ static int ocfs2_search_one_group(struct ocfs2_alloc_context *ac,
1691 if (ret < 0) 1721 if (ret < 0)
1692 mlog_errno(ret); 1722 mlog_errno(ret);
1693 1723
1724out_loc_only:
1694 *bits_left = le16_to_cpu(gd->bg_free_bits_count); 1725 *bits_left = le16_to_cpu(gd->bg_free_bits_count);
1695 1726
1696out: 1727out:
@@ -1708,7 +1739,6 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
1708{ 1739{
1709 int status; 1740 int status;
1710 u16 chain; 1741 u16 chain;
1711 u32 tmp_used;
1712 u64 next_group; 1742 u64 next_group;
1713 struct inode *alloc_inode = ac->ac_inode; 1743 struct inode *alloc_inode = ac->ac_inode;
1714 struct buffer_head *group_bh = NULL; 1744 struct buffer_head *group_bh = NULL;
@@ -1770,6 +1800,11 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
1770 if (!status) 1800 if (!status)
1771 ocfs2_bg_discontig_fix_result(ac, bg, res); 1801 ocfs2_bg_discontig_fix_result(ac, bg, res);
1772 1802
1803 /*
1804 * sr_bg_blkno might have been changed by
1805 * ocfs2_bg_discontig_fix_result
1806 */
1807 res->sr_bg_stable_blkno = group_bh->b_blocknr;
1773 1808
1774 /* 1809 /*
1775 * Keep track of previous block descriptor read. When 1810 * Keep track of previous block descriptor read. When
@@ -1796,22 +1831,17 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
1796 } 1831 }
1797 } 1832 }
1798 1833
1799 /* Ok, claim our bits now: set the info on dinode, chainlist 1834 if (ac->ac_find_loc_only)
1800 * and then the group */ 1835 goto out_loc_only;
1801 status = ocfs2_journal_access_di(handle, 1836
1802 INODE_CACHE(alloc_inode), 1837 status = ocfs2_alloc_dinode_update_counts(alloc_inode, handle,
1803 ac->ac_bh, 1838 ac->ac_bh, res->sr_bits,
1804 OCFS2_JOURNAL_ACCESS_WRITE); 1839 chain);
1805 if (status < 0) { 1840 if (status) {
1806 mlog_errno(status); 1841 mlog_errno(status);
1807 goto bail; 1842 goto bail;
1808 } 1843 }
1809 1844
1810 tmp_used = le32_to_cpu(fe->id1.bitmap1.i_used);
1811 fe->id1.bitmap1.i_used = cpu_to_le32(res->sr_bits + tmp_used);
1812 le32_add_cpu(&cl->cl_recs[chain].c_free, -res->sr_bits);
1813 ocfs2_journal_dirty(handle, ac->ac_bh);
1814
1815 status = ocfs2_block_group_set_bits(handle, 1845 status = ocfs2_block_group_set_bits(handle,
1816 alloc_inode, 1846 alloc_inode,
1817 bg, 1847 bg,
@@ -1826,6 +1856,7 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
1826 mlog(0, "Allocated %u bits from suballocator %llu\n", res->sr_bits, 1856 mlog(0, "Allocated %u bits from suballocator %llu\n", res->sr_bits,
1827 (unsigned long long)le64_to_cpu(fe->i_blkno)); 1857 (unsigned long long)le64_to_cpu(fe->i_blkno));
1828 1858
1859out_loc_only:
1829 *bits_left = le16_to_cpu(bg->bg_free_bits_count); 1860 *bits_left = le16_to_cpu(bg->bg_free_bits_count);
1830bail: 1861bail:
1831 brelse(group_bh); 1862 brelse(group_bh);
@@ -1845,6 +1876,7 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
1845 int status; 1876 int status;
1846 u16 victim, i; 1877 u16 victim, i;
1847 u16 bits_left = 0; 1878 u16 bits_left = 0;
1879 u64 hint = ac->ac_last_group;
1848 struct ocfs2_chain_list *cl; 1880 struct ocfs2_chain_list *cl;
1849 struct ocfs2_dinode *fe; 1881 struct ocfs2_dinode *fe;
1850 1882
@@ -1872,7 +1904,7 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
1872 goto bail; 1904 goto bail;
1873 } 1905 }
1874 1906
1875 res->sr_bg_blkno = ac->ac_last_group; 1907 res->sr_bg_blkno = hint;
1876 if (res->sr_bg_blkno) { 1908 if (res->sr_bg_blkno) {
1877 /* Attempt to short-circuit the usual search mechanism 1909 /* Attempt to short-circuit the usual search mechanism
1878 * by jumping straight to the most recently used 1910 * by jumping straight to the most recently used
@@ -1896,8 +1928,10 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
1896 1928
1897 status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits, 1929 status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits,
1898 res, &bits_left); 1930 res, &bits_left);
1899 if (!status) 1931 if (!status) {
1932 hint = ocfs2_group_from_res(res);
1900 goto set_hint; 1933 goto set_hint;
1934 }
1901 if (status < 0 && status != -ENOSPC) { 1935 if (status < 0 && status != -ENOSPC) {
1902 mlog_errno(status); 1936 mlog_errno(status);
1903 goto bail; 1937 goto bail;
@@ -1920,8 +1954,10 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
1920 ac->ac_chain = i; 1954 ac->ac_chain = i;
1921 status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits, 1955 status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits,
1922 res, &bits_left); 1956 res, &bits_left);
1923 if (!status) 1957 if (!status) {
1958 hint = ocfs2_group_from_res(res);
1924 break; 1959 break;
1960 }
1925 if (status < 0 && status != -ENOSPC) { 1961 if (status < 0 && status != -ENOSPC) {
1926 mlog_errno(status); 1962 mlog_errno(status);
1927 goto bail; 1963 goto bail;
@@ -1936,7 +1972,7 @@ set_hint:
1936 if (bits_left < min_bits) 1972 if (bits_left < min_bits)
1937 ac->ac_last_group = 0; 1973 ac->ac_last_group = 0;
1938 else 1974 else
1939 ac->ac_last_group = res->sr_bg_blkno; 1975 ac->ac_last_group = hint;
1940 } 1976 }
1941 1977
1942bail: 1978bail:
@@ -2016,6 +2052,136 @@ static inline void ocfs2_save_inode_ac_group(struct inode *dir,
2016 OCFS2_I(dir)->ip_last_used_slot = ac->ac_alloc_slot; 2052 OCFS2_I(dir)->ip_last_used_slot = ac->ac_alloc_slot;
2017} 2053}
2018 2054
2055int ocfs2_find_new_inode_loc(struct inode *dir,
2056 struct buffer_head *parent_fe_bh,
2057 struct ocfs2_alloc_context *ac,
2058 u64 *fe_blkno)
2059{
2060 int ret;
2061 handle_t *handle = NULL;
2062 struct ocfs2_suballoc_result *res;
2063
2064 BUG_ON(!ac);
2065 BUG_ON(ac->ac_bits_given != 0);
2066 BUG_ON(ac->ac_bits_wanted != 1);
2067 BUG_ON(ac->ac_which != OCFS2_AC_USE_INODE);
2068
2069 res = kzalloc(sizeof(*res), GFP_NOFS);
2070 if (res == NULL) {
2071 ret = -ENOMEM;
2072 mlog_errno(ret);
2073 goto out;
2074 }
2075
2076 ocfs2_init_inode_ac_group(dir, parent_fe_bh, ac);
2077
2078 /*
2079 * The handle started here is for chain relink. Alternatively,
2080 * we could just disable relink for these calls.
2081 */
2082 handle = ocfs2_start_trans(OCFS2_SB(dir->i_sb), OCFS2_SUBALLOC_ALLOC);
2083 if (IS_ERR(handle)) {
2084 ret = PTR_ERR(handle);
2085 handle = NULL;
2086 mlog_errno(ret);
2087 goto out;
2088 }
2089
2090 /*
2091 * This will instruct ocfs2_claim_suballoc_bits and
2092 * ocfs2_search_one_group to search but save actual allocation
2093 * for later.
2094 */
2095 ac->ac_find_loc_only = 1;
2096
2097 ret = ocfs2_claim_suballoc_bits(ac, handle, 1, 1, res);
2098 if (ret < 0) {
2099 mlog_errno(ret);
2100 goto out;
2101 }
2102
2103 ac->ac_find_loc_priv = res;
2104 *fe_blkno = res->sr_blkno;
2105
2106out:
2107 if (handle)
2108 ocfs2_commit_trans(OCFS2_SB(dir->i_sb), handle);
2109
2110 if (ret)
2111 kfree(res);
2112
2113 return ret;
2114}
2115
2116int ocfs2_claim_new_inode_at_loc(handle_t *handle,
2117 struct inode *dir,
2118 struct ocfs2_alloc_context *ac,
2119 u64 *suballoc_loc,
2120 u16 *suballoc_bit,
2121 u64 di_blkno)
2122{
2123 int ret;
2124 u16 chain;
2125 struct ocfs2_suballoc_result *res = ac->ac_find_loc_priv;
2126 struct buffer_head *bg_bh = NULL;
2127 struct ocfs2_group_desc *bg;
2128 struct ocfs2_dinode *di = (struct ocfs2_dinode *) ac->ac_bh->b_data;
2129
2130 /*
2131 * Since di_blkno is being passed back in, we check for any
2132 * inconsistencies which may have happened between
2133 * calls. These are code bugs as di_blkno is not expected to
2134 * change once returned from ocfs2_find_new_inode_loc()
2135 */
2136 BUG_ON(res->sr_blkno != di_blkno);
2137
2138 ret = ocfs2_read_group_descriptor(ac->ac_inode, di,
2139 res->sr_bg_stable_blkno, &bg_bh);
2140 if (ret) {
2141 mlog_errno(ret);
2142 goto out;
2143 }
2144
2145 bg = (struct ocfs2_group_desc *) bg_bh->b_data;
2146 chain = le16_to_cpu(bg->bg_chain);
2147
2148 ret = ocfs2_alloc_dinode_update_counts(ac->ac_inode, handle,
2149 ac->ac_bh, res->sr_bits,
2150 chain);
2151 if (ret) {
2152 mlog_errno(ret);
2153 goto out;
2154 }
2155
2156 ret = ocfs2_block_group_set_bits(handle,
2157 ac->ac_inode,
2158 bg,
2159 bg_bh,
2160 res->sr_bit_offset,
2161 res->sr_bits);
2162 if (ret < 0) {
2163 mlog_errno(ret);
2164 goto out;
2165 }
2166
2167 mlog(0, "Allocated %u bits from suballocator %llu\n", res->sr_bits,
2168 (unsigned long long)di_blkno);
2169
2170 atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
2171
2172 BUG_ON(res->sr_bits != 1);
2173
2174 *suballoc_loc = res->sr_bg_blkno;
2175 *suballoc_bit = res->sr_bit_offset;
2176 ac->ac_bits_given++;
2177 ocfs2_save_inode_ac_group(dir, ac);
2178
2179out:
2180 brelse(bg_bh);
2181
2182 return ret;
2183}
2184
2019int ocfs2_claim_new_inode(handle_t *handle, 2185int ocfs2_claim_new_inode(handle_t *handle,
2020 struct inode *dir, 2186 struct inode *dir,
2021 struct buffer_head *parent_fe_bh, 2187 struct buffer_head *parent_fe_bh,
@@ -2567,7 +2733,8 @@ out:
2567 * suballoc_bit. 2733 * suballoc_bit.
2568 */ 2734 */
2569static int ocfs2_get_suballoc_slot_bit(struct ocfs2_super *osb, u64 blkno, 2735static int ocfs2_get_suballoc_slot_bit(struct ocfs2_super *osb, u64 blkno,
2570 u16 *suballoc_slot, u16 *suballoc_bit) 2736 u16 *suballoc_slot, u64 *group_blkno,
2737 u16 *suballoc_bit)
2571{ 2738{
2572 int status; 2739 int status;
2573 struct buffer_head *inode_bh = NULL; 2740 struct buffer_head *inode_bh = NULL;
@@ -2604,6 +2771,8 @@ static int ocfs2_get_suballoc_slot_bit(struct ocfs2_super *osb, u64 blkno,
2604 *suballoc_slot = le16_to_cpu(inode_fe->i_suballoc_slot); 2771 *suballoc_slot = le16_to_cpu(inode_fe->i_suballoc_slot);
2605 if (suballoc_bit) 2772 if (suballoc_bit)
2606 *suballoc_bit = le16_to_cpu(inode_fe->i_suballoc_bit); 2773 *suballoc_bit = le16_to_cpu(inode_fe->i_suballoc_bit);
2774 if (group_blkno)
2775 *group_blkno = le64_to_cpu(inode_fe->i_suballoc_loc);
2607 2776
2608bail: 2777bail:
2609 brelse(inode_bh); 2778 brelse(inode_bh);
@@ -2621,7 +2790,8 @@ bail:
2621 */ 2790 */
2622static int ocfs2_test_suballoc_bit(struct ocfs2_super *osb, 2791static int ocfs2_test_suballoc_bit(struct ocfs2_super *osb,
2623 struct inode *suballoc, 2792 struct inode *suballoc,
2624 struct buffer_head *alloc_bh, u64 blkno, 2793 struct buffer_head *alloc_bh,
2794 u64 group_blkno, u64 blkno,
2625 u16 bit, int *res) 2795 u16 bit, int *res)
2626{ 2796{
2627 struct ocfs2_dinode *alloc_di; 2797 struct ocfs2_dinode *alloc_di;
@@ -2642,10 +2812,8 @@ static int ocfs2_test_suballoc_bit(struct ocfs2_super *osb,
2642 goto bail; 2812 goto bail;
2643 } 2813 }
2644 2814
2645 if (alloc_di->i_suballoc_loc) 2815 bg_blkno = group_blkno ? group_blkno :
2646 bg_blkno = le64_to_cpu(alloc_di->i_suballoc_loc); 2816 ocfs2_which_suballoc_group(blkno, bit);
2647 else
2648 bg_blkno = ocfs2_which_suballoc_group(blkno, bit);
2649 status = ocfs2_read_group_descriptor(suballoc, alloc_di, bg_blkno, 2817 status = ocfs2_read_group_descriptor(suballoc, alloc_di, bg_blkno,
2650 &group_bh); 2818 &group_bh);
2651 if (status < 0) { 2819 if (status < 0) {
@@ -2680,6 +2848,7 @@ bail:
2680int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res) 2848int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res)
2681{ 2849{
2682 int status; 2850 int status;
2851 u64 group_blkno = 0;
2683 u16 suballoc_bit = 0, suballoc_slot = 0; 2852 u16 suballoc_bit = 0, suballoc_slot = 0;
2684 struct inode *inode_alloc_inode; 2853 struct inode *inode_alloc_inode;
2685 struct buffer_head *alloc_bh = NULL; 2854 struct buffer_head *alloc_bh = NULL;
@@ -2687,7 +2856,7 @@ int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res)
2687 mlog_entry("blkno: %llu", (unsigned long long)blkno); 2856 mlog_entry("blkno: %llu", (unsigned long long)blkno);
2688 2857
2689 status = ocfs2_get_suballoc_slot_bit(osb, blkno, &suballoc_slot, 2858 status = ocfs2_get_suballoc_slot_bit(osb, blkno, &suballoc_slot,
2690 &suballoc_bit); 2859 &group_blkno, &suballoc_bit);
2691 if (status < 0) { 2860 if (status < 0) {
2692 mlog(ML_ERROR, "get alloc slot and bit failed %d\n", status); 2861 mlog(ML_ERROR, "get alloc slot and bit failed %d\n", status);
2693 goto bail; 2862 goto bail;
@@ -2715,7 +2884,7 @@ int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res)
2715 } 2884 }
2716 2885
2717 status = ocfs2_test_suballoc_bit(osb, inode_alloc_inode, alloc_bh, 2886 status = ocfs2_test_suballoc_bit(osb, inode_alloc_inode, alloc_bh,
2718 blkno, suballoc_bit, res); 2887 group_blkno, blkno, suballoc_bit, res);
2719 if (status < 0) 2888 if (status < 0)
2720 mlog(ML_ERROR, "test suballoc bit failed %d\n", status); 2889 mlog(ML_ERROR, "test suballoc bit failed %d\n", status);
2721 2890
diff --git a/fs/ocfs2/suballoc.h b/fs/ocfs2/suballoc.h
index a017dd3ee7d9..b8afabfeede4 100644
--- a/fs/ocfs2/suballoc.h
+++ b/fs/ocfs2/suballoc.h
@@ -56,6 +56,9 @@ struct ocfs2_alloc_context {
56 u64 ac_max_block; /* Highest block number to allocate. 0 is 56 u64 ac_max_block; /* Highest block number to allocate. 0 is
57 is the same as ~0 - unlimited */ 57 is the same as ~0 - unlimited */
58 58
59 int ac_find_loc_only; /* hack for reflink operation ordering */
60 struct ocfs2_suballoc_result *ac_find_loc_priv; /* */
61
59 struct ocfs2_alloc_reservation *ac_resv; 62 struct ocfs2_alloc_reservation *ac_resv;
60}; 63};
61 64
@@ -197,4 +200,22 @@ int ocfs2_lock_allocators(struct inode *inode, struct ocfs2_extent_tree *et,
197 struct ocfs2_alloc_context **meta_ac); 200 struct ocfs2_alloc_context **meta_ac);
198 201
199int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res); 202int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res);
203
204
205
206/*
207 * The following two interfaces are for ocfs2_create_inode_in_orphan().
208 */
209int ocfs2_find_new_inode_loc(struct inode *dir,
210 struct buffer_head *parent_fe_bh,
211 struct ocfs2_alloc_context *ac,
212 u64 *fe_blkno);
213
214int ocfs2_claim_new_inode_at_loc(handle_t *handle,
215 struct inode *dir,
216 struct ocfs2_alloc_context *ac,
217 u64 *suballoc_loc,
218 u16 *suballoc_bit,
219 u64 di_blkno);
220
200#endif /* _CHAINALLOC_H_ */ 221#endif /* _CHAINALLOC_H_ */
diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
index 32499d213fc4..9975457c981f 100644
--- a/fs/ocfs2/symlink.c
+++ b/fs/ocfs2/symlink.c
@@ -128,7 +128,7 @@ static void *ocfs2_fast_follow_link(struct dentry *dentry,
128 } 128 }
129 129
130 /* Fast symlinks can't be large */ 130 /* Fast symlinks can't be large */
131 len = strlen(target); 131 len = strnlen(target, ocfs2_fast_symlink_chars(inode->i_sb));
132 link = kzalloc(len + 1, GFP_NOFS); 132 link = kzalloc(len + 1, GFP_NOFS);
133 if (!link) { 133 if (!link) {
134 status = -ENOMEM; 134 status = -ENOMEM;
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index d03469f61801..06fa5e77c40e 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -1286,13 +1286,11 @@ int ocfs2_xattr_get_nolock(struct inode *inode,
1286 xis.inode_bh = xbs.inode_bh = di_bh; 1286 xis.inode_bh = xbs.inode_bh = di_bh;
1287 di = (struct ocfs2_dinode *)di_bh->b_data; 1287 di = (struct ocfs2_dinode *)di_bh->b_data;
1288 1288
1289 down_read(&oi->ip_xattr_sem);
1290 ret = ocfs2_xattr_ibody_get(inode, name_index, name, buffer, 1289 ret = ocfs2_xattr_ibody_get(inode, name_index, name, buffer,
1291 buffer_size, &xis); 1290 buffer_size, &xis);
1292 if (ret == -ENODATA && di->i_xattr_loc) 1291 if (ret == -ENODATA && di->i_xattr_loc)
1293 ret = ocfs2_xattr_block_get(inode, name_index, name, buffer, 1292 ret = ocfs2_xattr_block_get(inode, name_index, name, buffer,
1294 buffer_size, &xbs); 1293 buffer_size, &xbs);
1295 up_read(&oi->ip_xattr_sem);
1296 1294
1297 return ret; 1295 return ret;
1298} 1296}
@@ -1316,8 +1314,10 @@ static int ocfs2_xattr_get(struct inode *inode,
1316 mlog_errno(ret); 1314 mlog_errno(ret);
1317 return ret; 1315 return ret;
1318 } 1316 }
1317 down_read(&OCFS2_I(inode)->ip_xattr_sem);
1319 ret = ocfs2_xattr_get_nolock(inode, di_bh, name_index, 1318 ret = ocfs2_xattr_get_nolock(inode, di_bh, name_index,
1320 name, buffer, buffer_size); 1319 name, buffer, buffer_size);
1320 up_read(&OCFS2_I(inode)->ip_xattr_sem);
1321 1321
1322 ocfs2_inode_unlock(inode, 0); 1322 ocfs2_inode_unlock(inode, 0);
1323 1323
diff --git a/fs/proc/base.c b/fs/proc/base.c
index a1c43e7c8a7b..8e4addaa5424 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -2675,7 +2675,7 @@ static const struct pid_entry tgid_base_stuff[] = {
2675 INF("auxv", S_IRUSR, proc_pid_auxv), 2675 INF("auxv", S_IRUSR, proc_pid_auxv),
2676 ONE("status", S_IRUGO, proc_pid_status), 2676 ONE("status", S_IRUGO, proc_pid_status),
2677 ONE("personality", S_IRUSR, proc_pid_personality), 2677 ONE("personality", S_IRUSR, proc_pid_personality),
2678 INF("limits", S_IRUSR, proc_pid_limits), 2678 INF("limits", S_IRUGO, proc_pid_limits),
2679#ifdef CONFIG_SCHED_DEBUG 2679#ifdef CONFIG_SCHED_DEBUG
2680 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations), 2680 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
2681#endif 2681#endif
@@ -3011,7 +3011,7 @@ static const struct pid_entry tid_base_stuff[] = {
3011 INF("auxv", S_IRUSR, proc_pid_auxv), 3011 INF("auxv", S_IRUSR, proc_pid_auxv),
3012 ONE("status", S_IRUGO, proc_pid_status), 3012 ONE("status", S_IRUGO, proc_pid_status),
3013 ONE("personality", S_IRUSR, proc_pid_personality), 3013 ONE("personality", S_IRUSR, proc_pid_personality),
3014 INF("limits", S_IRUSR, proc_pid_limits), 3014 INF("limits", S_IRUGO, proc_pid_limits),
3015#ifdef CONFIG_SCHED_DEBUG 3015#ifdef CONFIG_SCHED_DEBUG
3016 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations), 3016 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
3017#endif 3017#endif
diff --git a/fs/proc/page.c b/fs/proc/page.c
index 180cf5a0bd67..3b8b45660331 100644
--- a/fs/proc/page.c
+++ b/fs/proc/page.c
@@ -146,7 +146,7 @@ u64 stable_page_flags(struct page *page)
146 u |= kpf_copy_bit(k, KPF_HWPOISON, PG_hwpoison); 146 u |= kpf_copy_bit(k, KPF_HWPOISON, PG_hwpoison);
147#endif 147#endif
148 148
149#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR 149#ifdef CONFIG_ARCH_USES_PG_UNCACHED
150 u |= kpf_copy_bit(k, KPF_UNCACHED, PG_uncached); 150 u |= kpf_copy_bit(k, KPF_UNCACHED, PG_uncached);
151#endif 151#endif
152 152
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 439fc1f1c1c4..1dbca4e8cc16 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -224,7 +224,8 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
224 /* We don't show the stack guard page in /proc/maps */ 224 /* We don't show the stack guard page in /proc/maps */
225 start = vma->vm_start; 225 start = vma->vm_start;
226 if (vma->vm_flags & VM_GROWSDOWN) 226 if (vma->vm_flags & VM_GROWSDOWN)
227 start += PAGE_SIZE; 227 if (!vma_stack_continue(vma->vm_prev, vma->vm_start))
228 start += PAGE_SIZE;
228 229
229 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n", 230 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
230 start, 231 start,
@@ -362,13 +363,13 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
362 mss->referenced += PAGE_SIZE; 363 mss->referenced += PAGE_SIZE;
363 mapcount = page_mapcount(page); 364 mapcount = page_mapcount(page);
364 if (mapcount >= 2) { 365 if (mapcount >= 2) {
365 if (pte_dirty(ptent)) 366 if (pte_dirty(ptent) || PageDirty(page))
366 mss->shared_dirty += PAGE_SIZE; 367 mss->shared_dirty += PAGE_SIZE;
367 else 368 else
368 mss->shared_clean += PAGE_SIZE; 369 mss->shared_clean += PAGE_SIZE;
369 mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount; 370 mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount;
370 } else { 371 } else {
371 if (pte_dirty(ptent)) 372 if (pte_dirty(ptent) || PageDirty(page))
372 mss->private_dirty += PAGE_SIZE; 373 mss->private_dirty += PAGE_SIZE;
373 else 374 else
374 mss->private_clean += PAGE_SIZE; 375 mss->private_clean += PAGE_SIZE;
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index 91c817ff02c3..2367fb3f70bc 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -163,7 +163,7 @@ static ssize_t read_vmcore(struct file *file, char __user *buffer,
163 163
164static const struct file_operations proc_vmcore_operations = { 164static const struct file_operations proc_vmcore_operations = {
165 .read = read_vmcore, 165 .read = read_vmcore,
166 .llseek = generic_file_llseek, 166 .llseek = default_llseek,
167}; 167};
168 168
169static struct vmcore* __init get_new_element(void) 169static struct vmcore* __init get_new_element(void)
diff --git a/fs/reiserfs/ioctl.c b/fs/reiserfs/ioctl.c
index f53505de0712..5cbb81e134ac 100644
--- a/fs/reiserfs/ioctl.c
+++ b/fs/reiserfs/ioctl.c
@@ -170,6 +170,7 @@ int reiserfs_prepare_write(struct file *f, struct page *page,
170int reiserfs_unpack(struct inode *inode, struct file *filp) 170int reiserfs_unpack(struct inode *inode, struct file *filp)
171{ 171{
172 int retval = 0; 172 int retval = 0;
173 int depth;
173 int index; 174 int index;
174 struct page *page; 175 struct page *page;
175 struct address_space *mapping; 176 struct address_space *mapping;
@@ -188,8 +189,8 @@ int reiserfs_unpack(struct inode *inode, struct file *filp)
188 /* we need to make sure nobody is changing the file size beneath 189 /* we need to make sure nobody is changing the file size beneath
189 ** us 190 ** us
190 */ 191 */
191 mutex_lock(&inode->i_mutex); 192 reiserfs_mutex_lock_safe(&inode->i_mutex, inode->i_sb);
192 reiserfs_write_lock(inode->i_sb); 193 depth = reiserfs_write_lock_once(inode->i_sb);
193 194
194 write_from = inode->i_size & (blocksize - 1); 195 write_from = inode->i_size & (blocksize - 1);
195 /* if we are on a block boundary, we are already unpacked. */ 196 /* if we are on a block boundary, we are already unpacked. */
@@ -224,6 +225,6 @@ int reiserfs_unpack(struct inode *inode, struct file *filp)
224 225
225 out: 226 out:
226 mutex_unlock(&inode->i_mutex); 227 mutex_unlock(&inode->i_mutex);
227 reiserfs_write_unlock(inode->i_sb); 228 reiserfs_write_unlock_once(inode->i_sb, depth);
228 return retval; 229 return retval;
229} 230}
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index d72cf2bb054a..286e36e21dae 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -1932,7 +1932,8 @@ xfs_buf_init(void)
1932 if (!xfs_buf_zone) 1932 if (!xfs_buf_zone)
1933 goto out; 1933 goto out;
1934 1934
1935 xfslogd_workqueue = create_workqueue("xfslogd"); 1935 xfslogd_workqueue = alloc_workqueue("xfslogd",
1936 WQ_RESCUER | WQ_HIGHPRI, 1);
1936 if (!xfslogd_workqueue) 1937 if (!xfslogd_workqueue)
1937 goto out_free_buf_zone; 1938 goto out_free_buf_zone;
1938 1939
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
index 4fec427b83ef..3b9e626f7cd1 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
@@ -785,6 +785,8 @@ xfs_ioc_fsgetxattr(
785{ 785{
786 struct fsxattr fa; 786 struct fsxattr fa;
787 787
788 memset(&fa, 0, sizeof(struct fsxattr));
789
788 xfs_ilock(ip, XFS_ILOCK_SHARED); 790 xfs_ilock(ip, XFS_ILOCK_SHARED);
789 fa.fsx_xflags = xfs_ip2xflags(ip); 791 fa.fsx_xflags = xfs_ip2xflags(ip);
790 fa.fsx_extsize = ip->i_d.di_extsize << ip->i_mount->m_sb.sb_blocklog; 792 fa.fsx_extsize = ip->i_d.di_extsize << ip->i_mount->m_sb.sb_blocklog;
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
index ed575fb4b495..7e206fc1fa36 100644
--- a/fs/xfs/xfs_log_cil.c
+++ b/fs/xfs/xfs_log_cil.c
@@ -405,9 +405,15 @@ xlog_cil_push(
405 new_ctx = kmem_zalloc(sizeof(*new_ctx), KM_SLEEP|KM_NOFS); 405 new_ctx = kmem_zalloc(sizeof(*new_ctx), KM_SLEEP|KM_NOFS);
406 new_ctx->ticket = xlog_cil_ticket_alloc(log); 406 new_ctx->ticket = xlog_cil_ticket_alloc(log);
407 407
408 /* lock out transaction commit, but don't block on background push */ 408 /*
409 * Lock out transaction commit, but don't block for background pushes
410 * unless we are well over the CIL space limit. See the definition of
411 * XLOG_CIL_HARD_SPACE_LIMIT() for the full explanation of the logic
412 * used here.
413 */
409 if (!down_write_trylock(&cil->xc_ctx_lock)) { 414 if (!down_write_trylock(&cil->xc_ctx_lock)) {
410 if (!push_seq) 415 if (!push_seq &&
416 cil->xc_ctx->space_used < XLOG_CIL_HARD_SPACE_LIMIT(log))
411 goto out_free_ticket; 417 goto out_free_ticket;
412 down_write(&cil->xc_ctx_lock); 418 down_write(&cil->xc_ctx_lock);
413 } 419 }
@@ -422,7 +428,7 @@ xlog_cil_push(
422 goto out_skip; 428 goto out_skip;
423 429
424 /* check for a previously pushed seqeunce */ 430 /* check for a previously pushed seqeunce */
425 if (push_seq < cil->xc_ctx->sequence) 431 if (push_seq && push_seq < cil->xc_ctx->sequence)
426 goto out_skip; 432 goto out_skip;
427 433
428 /* 434 /*
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index ced52b98b322..edcdfe01617f 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -426,13 +426,13 @@ struct xfs_cil {
426}; 426};
427 427
428/* 428/*
429 * The amount of log space we should the CIL to aggregate is difficult to size. 429 * The amount of log space we allow the CIL to aggregate is difficult to size.
430 * Whatever we chose we have to make we can get a reservation for the log space 430 * Whatever we choose, we have to make sure we can get a reservation for the
431 * effectively, that it is large enough to capture sufficient relogging to 431 * log space effectively, that it is large enough to capture sufficient
432 * reduce log buffer IO significantly, but it is not too large for the log or 432 * relogging to reduce log buffer IO significantly, but it is not too large for
433 * induces too much latency when writing out through the iclogs. We track both 433 * the log or induces too much latency when writing out through the iclogs. We
434 * space consumed and the number of vectors in the checkpoint context, so we 434 * track both space consumed and the number of vectors in the checkpoint
435 * need to decide which to use for limiting. 435 * context, so we need to decide which to use for limiting.
436 * 436 *
437 * Every log buffer we write out during a push needs a header reserved, which 437 * Every log buffer we write out during a push needs a header reserved, which
438 * is at least one sector and more for v2 logs. Hence we need a reservation of 438 * is at least one sector and more for v2 logs. Hence we need a reservation of
@@ -459,16 +459,21 @@ struct xfs_cil {
459 * checkpoint transaction ticket is specific to the checkpoint context, rather 459 * checkpoint transaction ticket is specific to the checkpoint context, rather
460 * than the CIL itself. 460 * than the CIL itself.
461 * 461 *
462 * With dynamic reservations, we can basically make up arbitrary limits for the 462 * With dynamic reservations, we can effectively make up arbitrary limits for
463 * checkpoint size so long as they don't violate any other size rules. Hence 463 * the checkpoint size so long as they don't violate any other size rules.
464 * the initial maximum size for the checkpoint transaction will be set to a 464 * Recovery imposes a rule that no transaction exceed half the log, so we are
465 * quarter of the log or 8MB, which ever is smaller. 8MB is an arbitrary limit 465 * limited by that. Furthermore, the log transaction reservation subsystem
466 * right now based on the latency of writing out a large amount of data through 466 * tries to keep 25% of the log free, so we need to keep below that limit or we
467 * the circular iclog buffers. 467 * risk running out of free log space to start any new transactions.
468 *
469 * In order to keep background CIL push efficient, we will set a lower
470 * threshold at which background pushing is attempted without blocking current
471 * transaction commits. A separate, higher bound defines when CIL pushes are
472 * enforced to ensure we stay within our maximum checkpoint size bounds.
473 * threshold, yet give us plenty of space for aggregation on large logs.
468 */ 474 */
469 475#define XLOG_CIL_SPACE_LIMIT(log) (log->l_logsize >> 3)
470#define XLOG_CIL_SPACE_LIMIT(log) \ 476#define XLOG_CIL_HARD_SPACE_LIMIT(log) (3 * (log->l_logsize >> 4))
471 (min((log->l_logsize >> 2), (8 * 1024 * 1024)))
472 477
473/* 478/*
474 * The reservation head lsn is not made up of a cycle number and block number. 479 * The reservation head lsn is not made up of a cycle number and block number.
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index c0786d446a00..984cdc62e30b 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -55,7 +55,7 @@
55extern u8 acpi_gbl_permanent_mmap; 55extern u8 acpi_gbl_permanent_mmap;
56 56
57/* 57/*
58 * Globals that are publically available, allowing for 58 * Globals that are publicly available, allowing for
59 * run time configuration 59 * run time configuration
60 */ 60 */
61extern u32 acpi_dbg_level; 61extern u32 acpi_dbg_level;
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h
index c7376bf80b06..8ca18e26d7e3 100644
--- a/include/asm-generic/gpio.h
+++ b/include/asm-generic/gpio.h
@@ -16,15 +16,27 @@
16 * While the GPIO programming interface defines valid GPIO numbers 16 * While the GPIO programming interface defines valid GPIO numbers
17 * to be in the range 0..MAX_INT, this library restricts them to the 17 * to be in the range 0..MAX_INT, this library restricts them to the
18 * smaller range 0..ARCH_NR_GPIOS-1. 18 * smaller range 0..ARCH_NR_GPIOS-1.
19 *
20 * ARCH_NR_GPIOS is somewhat arbitrary; it usually reflects the sum of
21 * builtin/SoC GPIOs plus a number of GPIOs on expanders; the latter is
22 * actually an estimate of a board-specific value.
19 */ 23 */
20 24
21#ifndef ARCH_NR_GPIOS 25#ifndef ARCH_NR_GPIOS
22#define ARCH_NR_GPIOS 256 26#define ARCH_NR_GPIOS 256
23#endif 27#endif
24 28
29/*
30 * "valid" GPIO numbers are nonnegative and may be passed to
31 * setup routines like gpio_request(). only some valid numbers
32 * can successfully be requested and used.
33 *
34 * Invalid GPIO numbers are useful for indicating no-such-GPIO in
35 * platform data and other tables.
36 */
37
25static inline int gpio_is_valid(int number) 38static inline int gpio_is_valid(int number)
26{ 39{
27 /* only some non-negative numbers are valid */
28 return ((unsigned)number) < ARCH_NR_GPIOS; 40 return ((unsigned)number) < ARCH_NR_GPIOS;
29} 41}
30 42
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 7809d230adee..4c9461a4f9e6 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -612,7 +612,7 @@ struct drm_gem_object {
612 struct kref refcount; 612 struct kref refcount;
613 613
614 /** Handle count of this object. Each handle also holds a reference */ 614 /** Handle count of this object. Each handle also holds a reference */
615 struct kref handlecount; 615 atomic_t handle_count; /* number of handles on this object */
616 616
617 /** Related drm device */ 617 /** Related drm device */
618 struct drm_device *dev; 618 struct drm_device *dev;
@@ -808,7 +808,6 @@ struct drm_driver {
808 */ 808 */
809 int (*gem_init_object) (struct drm_gem_object *obj); 809 int (*gem_init_object) (struct drm_gem_object *obj);
810 void (*gem_free_object) (struct drm_gem_object *obj); 810 void (*gem_free_object) (struct drm_gem_object *obj);
811 void (*gem_free_object_unlocked) (struct drm_gem_object *obj);
812 811
813 /* vga arb irq handler */ 812 /* vga arb irq handler */
814 void (*vgaarb_irq)(struct drm_device *dev, bool state); 813 void (*vgaarb_irq)(struct drm_device *dev, bool state);
@@ -1175,6 +1174,7 @@ extern int drm_release(struct inode *inode, struct file *filp);
1175extern int drm_mmap(struct file *filp, struct vm_area_struct *vma); 1174extern int drm_mmap(struct file *filp, struct vm_area_struct *vma);
1176extern int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma); 1175extern int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma);
1177extern void drm_vm_open_locked(struct vm_area_struct *vma); 1176extern void drm_vm_open_locked(struct vm_area_struct *vma);
1177extern void drm_vm_close_locked(struct vm_area_struct *vma);
1178extern resource_size_t drm_core_get_map_ofs(struct drm_local_map * map); 1178extern resource_size_t drm_core_get_map_ofs(struct drm_local_map * map);
1179extern resource_size_t drm_core_get_reg_ofs(struct drm_device *dev); 1179extern resource_size_t drm_core_get_reg_ofs(struct drm_device *dev);
1180extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait); 1180extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
@@ -1455,12 +1455,11 @@ int drm_gem_init(struct drm_device *dev);
1455void drm_gem_destroy(struct drm_device *dev); 1455void drm_gem_destroy(struct drm_device *dev);
1456void drm_gem_object_release(struct drm_gem_object *obj); 1456void drm_gem_object_release(struct drm_gem_object *obj);
1457void drm_gem_object_free(struct kref *kref); 1457void drm_gem_object_free(struct kref *kref);
1458void drm_gem_object_free_unlocked(struct kref *kref);
1459struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev, 1458struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev,
1460 size_t size); 1459 size_t size);
1461int drm_gem_object_init(struct drm_device *dev, 1460int drm_gem_object_init(struct drm_device *dev,
1462 struct drm_gem_object *obj, size_t size); 1461 struct drm_gem_object *obj, size_t size);
1463void drm_gem_object_handle_free(struct kref *kref); 1462void drm_gem_object_handle_free(struct drm_gem_object *obj);
1464void drm_gem_vm_open(struct vm_area_struct *vma); 1463void drm_gem_vm_open(struct vm_area_struct *vma);
1465void drm_gem_vm_close(struct vm_area_struct *vma); 1464void drm_gem_vm_close(struct vm_area_struct *vma);
1466int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); 1465int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
@@ -1483,8 +1482,12 @@ drm_gem_object_unreference(struct drm_gem_object *obj)
1483static inline void 1482static inline void
1484drm_gem_object_unreference_unlocked(struct drm_gem_object *obj) 1483drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
1485{ 1484{
1486 if (obj != NULL) 1485 if (obj != NULL) {
1487 kref_put(&obj->refcount, drm_gem_object_free_unlocked); 1486 struct drm_device *dev = obj->dev;
1487 mutex_lock(&dev->struct_mutex);
1488 kref_put(&obj->refcount, drm_gem_object_free);
1489 mutex_unlock(&dev->struct_mutex);
1490 }
1488} 1491}
1489 1492
1490int drm_gem_handle_create(struct drm_file *file_priv, 1493int drm_gem_handle_create(struct drm_file *file_priv,
@@ -1495,7 +1498,7 @@ static inline void
1495drm_gem_object_handle_reference(struct drm_gem_object *obj) 1498drm_gem_object_handle_reference(struct drm_gem_object *obj)
1496{ 1499{
1497 drm_gem_object_reference(obj); 1500 drm_gem_object_reference(obj);
1498 kref_get(&obj->handlecount); 1501 atomic_inc(&obj->handle_count);
1499} 1502}
1500 1503
1501static inline void 1504static inline void
@@ -1504,12 +1507,15 @@ drm_gem_object_handle_unreference(struct drm_gem_object *obj)
1504 if (obj == NULL) 1507 if (obj == NULL)
1505 return; 1508 return;
1506 1509
1510 if (atomic_read(&obj->handle_count) == 0)
1511 return;
1507 /* 1512 /*
1508 * Must bump handle count first as this may be the last 1513 * Must bump handle count first as this may be the last
1509 * ref, in which case the object would disappear before we 1514 * ref, in which case the object would disappear before we
1510 * checked for a name 1515 * checked for a name
1511 */ 1516 */
1512 kref_put(&obj->handlecount, drm_gem_object_handle_free); 1517 if (atomic_dec_and_test(&obj->handle_count))
1518 drm_gem_object_handle_free(obj);
1513 drm_gem_object_unreference(obj); 1519 drm_gem_object_unreference(obj);
1514} 1520}
1515 1521
@@ -1519,12 +1525,17 @@ drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
1519 if (obj == NULL) 1525 if (obj == NULL)
1520 return; 1526 return;
1521 1527
1528 if (atomic_read(&obj->handle_count) == 0)
1529 return;
1530
1522 /* 1531 /*
1523 * Must bump handle count first as this may be the last 1532 * Must bump handle count first as this may be the last
1524 * ref, in which case the object would disappear before we 1533 * ref, in which case the object would disappear before we
1525 * checked for a name 1534 * checked for a name
1526 */ 1535 */
1527 kref_put(&obj->handlecount, drm_gem_object_handle_free); 1536
1537 if (atomic_dec_and_test(&obj->handle_count))
1538 drm_gem_object_handle_free(obj);
1528 drm_gem_object_unreference_unlocked(obj); 1539 drm_gem_object_unreference_unlocked(obj);
1529} 1540}
1530 1541
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index c9f3cc5949a8..3e5a51af757c 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -386,7 +386,15 @@ struct drm_connector_funcs {
386 void (*dpms)(struct drm_connector *connector, int mode); 386 void (*dpms)(struct drm_connector *connector, int mode);
387 void (*save)(struct drm_connector *connector); 387 void (*save)(struct drm_connector *connector);
388 void (*restore)(struct drm_connector *connector); 388 void (*restore)(struct drm_connector *connector);
389 enum drm_connector_status (*detect)(struct drm_connector *connector); 389
390 /* Check to see if anything is attached to the connector.
391 * @force is set to false whilst polling, true when checking the
392 * connector due to user request. @force can be used by the driver
393 * to avoid expensive, destructive operations during automated
394 * probing.
395 */
396 enum drm_connector_status (*detect)(struct drm_connector *connector,
397 bool force);
390 int (*fill_modes)(struct drm_connector *connector, uint32_t max_width, uint32_t max_height); 398 int (*fill_modes)(struct drm_connector *connector, uint32_t max_width, uint32_t max_height);
391 int (*set_property)(struct drm_connector *connector, struct drm_property *property, 399 int (*set_property)(struct drm_connector *connector, struct drm_property *property,
392 uint64_t val); 400 uint64_t val);
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 3a9940ef728b..883c1d439899 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -85,7 +85,6 @@
85 {0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ 85 {0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
86 {0x1002, 0x5462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ 86 {0x1002, 0x5462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
87 {0x1002, 0x5464, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ 87 {0x1002, 0x5464, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
88 {0x1002, 0x5657, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
89 {0x1002, 0x5548, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ 88 {0x1002, 0x5548, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
90 {0x1002, 0x5549, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ 89 {0x1002, 0x5549, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
91 {0x1002, 0x554A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ 90 {0x1002, 0x554A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
@@ -103,6 +102,7 @@
103 {0x1002, 0x564F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 102 {0x1002, 0x564F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
104 {0x1002, 0x5652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 103 {0x1002, 0x5652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
105 {0x1002, 0x5653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 104 {0x1002, 0x5653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
105 {0x1002, 0x5657, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
106 {0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP}, \ 106 {0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP}, \
107 {0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \ 107 {0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
108 {0x1002, 0x5954, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ 108 {0x1002, 0x5954, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 5a53d8f039a2..0c991023ee47 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -579,6 +579,7 @@ void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it);
579int cgroup_scan_tasks(struct cgroup_scanner *scan); 579int cgroup_scan_tasks(struct cgroup_scanner *scan);
580int cgroup_attach_task(struct cgroup *, struct task_struct *); 580int cgroup_attach_task(struct cgroup *, struct task_struct *);
581int cgroup_attach_task_all(struct task_struct *from, struct task_struct *); 581int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
582
582static inline int cgroup_attach_task_current_cg(struct task_struct *tsk) 583static inline int cgroup_attach_task_current_cg(struct task_struct *tsk)
583{ 584{
584 return cgroup_attach_task_all(current, tsk); 585 return cgroup_attach_task_all(current, tsk);
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 9ddc8780e8db..5778b559d59c 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -360,5 +360,8 @@ extern ssize_t compat_rw_copy_check_uvector(int type,
360 const struct compat_iovec __user *uvector, unsigned long nr_segs, 360 const struct compat_iovec __user *uvector, unsigned long nr_segs,
361 unsigned long fast_segs, struct iovec *fast_pointer, 361 unsigned long fast_segs, struct iovec *fast_pointer,
362 struct iovec **ret_pointer); 362 struct iovec **ret_pointer);
363
364extern void __user *compat_alloc_user_space(unsigned long len);
365
363#endif /* CONFIG_COMPAT */ 366#endif /* CONFIG_COMPAT */
364#endif /* _LINUX_COMPAT_H */ 367#endif /* _LINUX_COMPAT_H */
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 36ca9721a0c2..1be416bbbb82 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -53,6 +53,7 @@ struct cpuidle_state {
53#define CPUIDLE_FLAG_BALANCED (0x40) /* medium latency, moderate savings */ 53#define CPUIDLE_FLAG_BALANCED (0x40) /* medium latency, moderate savings */
54#define CPUIDLE_FLAG_DEEP (0x80) /* high latency, large savings */ 54#define CPUIDLE_FLAG_DEEP (0x80) /* high latency, large savings */
55#define CPUIDLE_FLAG_IGNORE (0x100) /* ignore during this idle period */ 55#define CPUIDLE_FLAG_IGNORE (0x100) /* ignore during this idle period */
56#define CPUIDLE_FLAG_TLB_FLUSHED (0x200) /* tlb will be flushed */
56 57
57#define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000) 58#define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000)
58 59
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index ce29b8151198..ba8319ae5fcc 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -102,6 +102,9 @@ static inline u64 dma_get_mask(struct device *dev)
102 return DMA_BIT_MASK(32); 102 return DMA_BIT_MASK(32);
103} 103}
104 104
105#ifdef ARCH_HAS_DMA_SET_COHERENT_MASK
106int dma_set_coherent_mask(struct device *dev, u64 mask);
107#else
105static inline int dma_set_coherent_mask(struct device *dev, u64 mask) 108static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
106{ 109{
107 if (!dma_supported(dev, mask)) 110 if (!dma_supported(dev, mask))
@@ -109,6 +112,7 @@ static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
109 dev->coherent_dma_mask = mask; 112 dev->coherent_dma_mask = mask;
110 return 0; 113 return 0;
111} 114}
115#endif
112 116
113extern u64 dma_get_required_mask(struct device *dev); 117extern u64 dma_get_required_mask(struct device *dev);
114 118
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index c61d4ca27bcc..e2106495cc11 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -548,7 +548,7 @@ static inline bool dma_dev_has_pq_continue(struct dma_device *dma)
548 return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE; 548 return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE;
549} 549}
550 550
551static unsigned short dma_dev_to_maxpq(struct dma_device *dma) 551static inline unsigned short dma_dev_to_maxpq(struct dma_device *dma)
552{ 552{
553 return dma->max_pq & ~DMA_HAS_PQ_CONTINUE; 553 return dma->max_pq & ~DMA_HAS_PQ_CONTINUE;
554} 554}
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 2c958f4fce1e..926b50322a46 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -136,6 +136,7 @@ extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t);
136 136
137extern int elevator_init(struct request_queue *, char *); 137extern int elevator_init(struct request_queue *, char *);
138extern void elevator_exit(struct elevator_queue *); 138extern void elevator_exit(struct elevator_queue *);
139extern int elevator_change(struct request_queue *, const char *);
139extern int elv_rq_merge_ok(struct request *, struct bio *); 140extern int elv_rq_merge_ok(struct request *, struct bio *);
140 141
141/* 142/*
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 76041b614758..63d069bd80b7 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1093,6 +1093,10 @@ struct file_lock {
1093 1093
1094#include <linux/fcntl.h> 1094#include <linux/fcntl.h>
1095 1095
1096/* temporary stubs for BKL removal */
1097#define lock_flocks() lock_kernel()
1098#define unlock_flocks() unlock_kernel()
1099
1096extern void send_sigio(struct fown_struct *fown, int fd, int band); 1100extern void send_sigio(struct fown_struct *fown, int fd, int band);
1097 1101
1098#ifdef CONFIG_FILE_LOCKING 1102#ifdef CONFIG_FILE_LOCKING
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index 03f616b78cfa..e41f7dd1ae67 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -13,6 +13,7 @@
13#include <linux/errno.h> 13#include <linux/errno.h>
14 14
15struct device; 15struct device;
16struct gpio_chip;
16 17
17/* 18/*
18 * Some platforms don't support the GPIO programming interface. 19 * Some platforms don't support the GPIO programming interface.
diff --git a/include/linux/i2c/sx150x.h b/include/linux/i2c/sx150x.h
index ee3049cb9ba5..52baa79d69a7 100644
--- a/include/linux/i2c/sx150x.h
+++ b/include/linux/i2c/sx150x.h
@@ -63,6 +63,9 @@
63 * IRQ lines will appear. Similarly to gpio_base, the expander 63 * IRQ lines will appear. Similarly to gpio_base, the expander
64 * will create a block of irqs beginning at this number. 64 * will create a block of irqs beginning at this number.
65 * This value is ignored if irq_summary is < 0. 65 * This value is ignored if irq_summary is < 0.
66 * @reset_during_probe: If set to true, the driver will trigger a full
67 * reset of the chip at the beginning of the probe
68 * in order to place it in a known state.
66 */ 69 */
67struct sx150x_platform_data { 70struct sx150x_platform_data {
68 unsigned gpio_base; 71 unsigned gpio_base;
@@ -73,6 +76,7 @@ struct sx150x_platform_data {
73 u16 io_polarity; 76 u16 io_polarity;
74 int irq_summary; 77 int irq_summary;
75 unsigned irq_base; 78 unsigned irq_base;
79 bool reset_during_probe;
76}; 80};
77 81
78#endif /* __LINUX_I2C_SX150X_H */ 82#endif /* __LINUX_I2C_SX150X_H */
diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h
index 0a6b3d5c490c..7fb592793738 100644
--- a/include/linux/io-mapping.h
+++ b/include/linux/io-mapping.h
@@ -79,7 +79,7 @@ io_mapping_free(struct io_mapping *mapping)
79} 79}
80 80
81/* Atomic map/unmap */ 81/* Atomic map/unmap */
82static inline void * 82static inline void __iomem *
83io_mapping_map_atomic_wc(struct io_mapping *mapping, 83io_mapping_map_atomic_wc(struct io_mapping *mapping,
84 unsigned long offset, 84 unsigned long offset,
85 int slot) 85 int slot)
@@ -94,12 +94,12 @@ io_mapping_map_atomic_wc(struct io_mapping *mapping,
94} 94}
95 95
96static inline void 96static inline void
97io_mapping_unmap_atomic(void *vaddr, int slot) 97io_mapping_unmap_atomic(void __iomem *vaddr, int slot)
98{ 98{
99 iounmap_atomic(vaddr, slot); 99 iounmap_atomic(vaddr, slot);
100} 100}
101 101
102static inline void * 102static inline void __iomem *
103io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset) 103io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
104{ 104{
105 resource_size_t phys_addr; 105 resource_size_t phys_addr;
@@ -111,7 +111,7 @@ io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
111} 111}
112 112
113static inline void 113static inline void
114io_mapping_unmap(void *vaddr) 114io_mapping_unmap(void __iomem *vaddr)
115{ 115{
116 iounmap(vaddr); 116 iounmap(vaddr);
117} 117}
@@ -125,38 +125,38 @@ struct io_mapping;
125static inline struct io_mapping * 125static inline struct io_mapping *
126io_mapping_create_wc(resource_size_t base, unsigned long size) 126io_mapping_create_wc(resource_size_t base, unsigned long size)
127{ 127{
128 return (struct io_mapping *) ioremap_wc(base, size); 128 return (struct io_mapping __force *) ioremap_wc(base, size);
129} 129}
130 130
131static inline void 131static inline void
132io_mapping_free(struct io_mapping *mapping) 132io_mapping_free(struct io_mapping *mapping)
133{ 133{
134 iounmap(mapping); 134 iounmap((void __force __iomem *) mapping);
135} 135}
136 136
137/* Atomic map/unmap */ 137/* Atomic map/unmap */
138static inline void * 138static inline void __iomem *
139io_mapping_map_atomic_wc(struct io_mapping *mapping, 139io_mapping_map_atomic_wc(struct io_mapping *mapping,
140 unsigned long offset, 140 unsigned long offset,
141 int slot) 141 int slot)
142{ 142{
143 return ((char *) mapping) + offset; 143 return ((char __force __iomem *) mapping) + offset;
144} 144}
145 145
146static inline void 146static inline void
147io_mapping_unmap_atomic(void *vaddr, int slot) 147io_mapping_unmap_atomic(void __iomem *vaddr, int slot)
148{ 148{
149} 149}
150 150
151/* Non-atomic map/unmap */ 151/* Non-atomic map/unmap */
152static inline void * 152static inline void __iomem *
153io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset) 153io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
154{ 154{
155 return ((char *) mapping) + offset; 155 return ((char __force __iomem *) mapping) + offset;
156} 156}
157 157
158static inline void 158static inline void
159io_mapping_unmap(void *vaddr) 159io_mapping_unmap(void __iomem *vaddr)
160{ 160{
161} 161}
162 162
diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h
index 4aa95f203f3e..62dbee554f60 100644
--- a/include/linux/kfifo.h
+++ b/include/linux/kfifo.h
@@ -214,7 +214,7 @@ __kfifo_must_check_helper(unsigned int val)
214 */ 214 */
215#define kfifo_reset(fifo) \ 215#define kfifo_reset(fifo) \
216(void)({ \ 216(void)({ \
217 typeof(fifo + 1) __tmp = (fifo); \ 217 typeof((fifo) + 1) __tmp = (fifo); \
218 __tmp->kfifo.in = __tmp->kfifo.out = 0; \ 218 __tmp->kfifo.in = __tmp->kfifo.out = 0; \
219}) 219})
220 220
@@ -228,7 +228,7 @@ __kfifo_must_check_helper(unsigned int val)
228 */ 228 */
229#define kfifo_reset_out(fifo) \ 229#define kfifo_reset_out(fifo) \
230(void)({ \ 230(void)({ \
231 typeof(fifo + 1) __tmp = (fifo); \ 231 typeof((fifo) + 1) __tmp = (fifo); \
232 __tmp->kfifo.out = __tmp->kfifo.in; \ 232 __tmp->kfifo.out = __tmp->kfifo.in; \
233}) 233})
234 234
@@ -238,7 +238,7 @@ __kfifo_must_check_helper(unsigned int val)
238 */ 238 */
239#define kfifo_len(fifo) \ 239#define kfifo_len(fifo) \
240({ \ 240({ \
241 typeof(fifo + 1) __tmpl = (fifo); \ 241 typeof((fifo) + 1) __tmpl = (fifo); \
242 __tmpl->kfifo.in - __tmpl->kfifo.out; \ 242 __tmpl->kfifo.in - __tmpl->kfifo.out; \
243}) 243})
244 244
@@ -248,7 +248,7 @@ __kfifo_must_check_helper(unsigned int val)
248 */ 248 */
249#define kfifo_is_empty(fifo) \ 249#define kfifo_is_empty(fifo) \
250({ \ 250({ \
251 typeof(fifo + 1) __tmpq = (fifo); \ 251 typeof((fifo) + 1) __tmpq = (fifo); \
252 __tmpq->kfifo.in == __tmpq->kfifo.out; \ 252 __tmpq->kfifo.in == __tmpq->kfifo.out; \
253}) 253})
254 254
@@ -258,7 +258,7 @@ __kfifo_must_check_helper(unsigned int val)
258 */ 258 */
259#define kfifo_is_full(fifo) \ 259#define kfifo_is_full(fifo) \
260({ \ 260({ \
261 typeof(fifo + 1) __tmpq = (fifo); \ 261 typeof((fifo) + 1) __tmpq = (fifo); \
262 kfifo_len(__tmpq) > __tmpq->kfifo.mask; \ 262 kfifo_len(__tmpq) > __tmpq->kfifo.mask; \
263}) 263})
264 264
@@ -269,7 +269,7 @@ __kfifo_must_check_helper(unsigned int val)
269#define kfifo_avail(fifo) \ 269#define kfifo_avail(fifo) \
270__kfifo_must_check_helper( \ 270__kfifo_must_check_helper( \
271({ \ 271({ \
272 typeof(fifo + 1) __tmpq = (fifo); \ 272 typeof((fifo) + 1) __tmpq = (fifo); \
273 const size_t __recsize = sizeof(*__tmpq->rectype); \ 273 const size_t __recsize = sizeof(*__tmpq->rectype); \
274 unsigned int __avail = kfifo_size(__tmpq) - kfifo_len(__tmpq); \ 274 unsigned int __avail = kfifo_size(__tmpq) - kfifo_len(__tmpq); \
275 (__recsize) ? ((__avail <= __recsize) ? 0 : \ 275 (__recsize) ? ((__avail <= __recsize) ? 0 : \
@@ -284,7 +284,7 @@ __kfifo_must_check_helper( \
284 */ 284 */
285#define kfifo_skip(fifo) \ 285#define kfifo_skip(fifo) \
286(void)({ \ 286(void)({ \
287 typeof(fifo + 1) __tmp = (fifo); \ 287 typeof((fifo) + 1) __tmp = (fifo); \
288 const size_t __recsize = sizeof(*__tmp->rectype); \ 288 const size_t __recsize = sizeof(*__tmp->rectype); \
289 struct __kfifo *__kfifo = &__tmp->kfifo; \ 289 struct __kfifo *__kfifo = &__tmp->kfifo; \
290 if (__recsize) \ 290 if (__recsize) \
@@ -302,7 +302,7 @@ __kfifo_must_check_helper( \
302#define kfifo_peek_len(fifo) \ 302#define kfifo_peek_len(fifo) \
303__kfifo_must_check_helper( \ 303__kfifo_must_check_helper( \
304({ \ 304({ \
305 typeof(fifo + 1) __tmp = (fifo); \ 305 typeof((fifo) + 1) __tmp = (fifo); \
306 const size_t __recsize = sizeof(*__tmp->rectype); \ 306 const size_t __recsize = sizeof(*__tmp->rectype); \
307 struct __kfifo *__kfifo = &__tmp->kfifo; \ 307 struct __kfifo *__kfifo = &__tmp->kfifo; \
308 (!__recsize) ? kfifo_len(__tmp) * sizeof(*__tmp->type) : \ 308 (!__recsize) ? kfifo_len(__tmp) * sizeof(*__tmp->type) : \
@@ -325,7 +325,7 @@ __kfifo_must_check_helper( \
325#define kfifo_alloc(fifo, size, gfp_mask) \ 325#define kfifo_alloc(fifo, size, gfp_mask) \
326__kfifo_must_check_helper( \ 326__kfifo_must_check_helper( \
327({ \ 327({ \
328 typeof(fifo + 1) __tmp = (fifo); \ 328 typeof((fifo) + 1) __tmp = (fifo); \
329 struct __kfifo *__kfifo = &__tmp->kfifo; \ 329 struct __kfifo *__kfifo = &__tmp->kfifo; \
330 __is_kfifo_ptr(__tmp) ? \ 330 __is_kfifo_ptr(__tmp) ? \
331 __kfifo_alloc(__kfifo, size, sizeof(*__tmp->type), gfp_mask) : \ 331 __kfifo_alloc(__kfifo, size, sizeof(*__tmp->type), gfp_mask) : \
@@ -339,7 +339,7 @@ __kfifo_must_check_helper( \
339 */ 339 */
340#define kfifo_free(fifo) \ 340#define kfifo_free(fifo) \
341({ \ 341({ \
342 typeof(fifo + 1) __tmp = (fifo); \ 342 typeof((fifo) + 1) __tmp = (fifo); \
343 struct __kfifo *__kfifo = &__tmp->kfifo; \ 343 struct __kfifo *__kfifo = &__tmp->kfifo; \
344 if (__is_kfifo_ptr(__tmp)) \ 344 if (__is_kfifo_ptr(__tmp)) \
345 __kfifo_free(__kfifo); \ 345 __kfifo_free(__kfifo); \
@@ -358,7 +358,7 @@ __kfifo_must_check_helper( \
358 */ 358 */
359#define kfifo_init(fifo, buffer, size) \ 359#define kfifo_init(fifo, buffer, size) \
360({ \ 360({ \
361 typeof(fifo + 1) __tmp = (fifo); \ 361 typeof((fifo) + 1) __tmp = (fifo); \
362 struct __kfifo *__kfifo = &__tmp->kfifo; \ 362 struct __kfifo *__kfifo = &__tmp->kfifo; \
363 __is_kfifo_ptr(__tmp) ? \ 363 __is_kfifo_ptr(__tmp) ? \
364 __kfifo_init(__kfifo, buffer, size, sizeof(*__tmp->type)) : \ 364 __kfifo_init(__kfifo, buffer, size, sizeof(*__tmp->type)) : \
@@ -379,8 +379,8 @@ __kfifo_must_check_helper( \
379 */ 379 */
380#define kfifo_put(fifo, val) \ 380#define kfifo_put(fifo, val) \
381({ \ 381({ \
382 typeof(fifo + 1) __tmp = (fifo); \ 382 typeof((fifo) + 1) __tmp = (fifo); \
383 typeof(val + 1) __val = (val); \ 383 typeof((val) + 1) __val = (val); \
384 unsigned int __ret; \ 384 unsigned int __ret; \
385 const size_t __recsize = sizeof(*__tmp->rectype); \ 385 const size_t __recsize = sizeof(*__tmp->rectype); \
386 struct __kfifo *__kfifo = &__tmp->kfifo; \ 386 struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -421,8 +421,8 @@ __kfifo_must_check_helper( \
421#define kfifo_get(fifo, val) \ 421#define kfifo_get(fifo, val) \
422__kfifo_must_check_helper( \ 422__kfifo_must_check_helper( \
423({ \ 423({ \
424 typeof(fifo + 1) __tmp = (fifo); \ 424 typeof((fifo) + 1) __tmp = (fifo); \
425 typeof(val + 1) __val = (val); \ 425 typeof((val) + 1) __val = (val); \
426 unsigned int __ret; \ 426 unsigned int __ret; \
427 const size_t __recsize = sizeof(*__tmp->rectype); \ 427 const size_t __recsize = sizeof(*__tmp->rectype); \
428 struct __kfifo *__kfifo = &__tmp->kfifo; \ 428 struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -462,8 +462,8 @@ __kfifo_must_check_helper( \
462#define kfifo_peek(fifo, val) \ 462#define kfifo_peek(fifo, val) \
463__kfifo_must_check_helper( \ 463__kfifo_must_check_helper( \
464({ \ 464({ \
465 typeof(fifo + 1) __tmp = (fifo); \ 465 typeof((fifo) + 1) __tmp = (fifo); \
466 typeof(val + 1) __val = (val); \ 466 typeof((val) + 1) __val = (val); \
467 unsigned int __ret; \ 467 unsigned int __ret; \
468 const size_t __recsize = sizeof(*__tmp->rectype); \ 468 const size_t __recsize = sizeof(*__tmp->rectype); \
469 struct __kfifo *__kfifo = &__tmp->kfifo; \ 469 struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -501,8 +501,8 @@ __kfifo_must_check_helper( \
501 */ 501 */
502#define kfifo_in(fifo, buf, n) \ 502#define kfifo_in(fifo, buf, n) \
503({ \ 503({ \
504 typeof(fifo + 1) __tmp = (fifo); \ 504 typeof((fifo) + 1) __tmp = (fifo); \
505 typeof(buf + 1) __buf = (buf); \ 505 typeof((buf) + 1) __buf = (buf); \
506 unsigned long __n = (n); \ 506 unsigned long __n = (n); \
507 const size_t __recsize = sizeof(*__tmp->rectype); \ 507 const size_t __recsize = sizeof(*__tmp->rectype); \
508 struct __kfifo *__kfifo = &__tmp->kfifo; \ 508 struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -554,8 +554,8 @@ __kfifo_must_check_helper( \
554#define kfifo_out(fifo, buf, n) \ 554#define kfifo_out(fifo, buf, n) \
555__kfifo_must_check_helper( \ 555__kfifo_must_check_helper( \
556({ \ 556({ \
557 typeof(fifo + 1) __tmp = (fifo); \ 557 typeof((fifo) + 1) __tmp = (fifo); \
558 typeof(buf + 1) __buf = (buf); \ 558 typeof((buf) + 1) __buf = (buf); \
559 unsigned long __n = (n); \ 559 unsigned long __n = (n); \
560 const size_t __recsize = sizeof(*__tmp->rectype); \ 560 const size_t __recsize = sizeof(*__tmp->rectype); \
561 struct __kfifo *__kfifo = &__tmp->kfifo; \ 561 struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -611,7 +611,7 @@ __kfifo_must_check_helper( \
611#define kfifo_from_user(fifo, from, len, copied) \ 611#define kfifo_from_user(fifo, from, len, copied) \
612__kfifo_must_check_helper( \ 612__kfifo_must_check_helper( \
613({ \ 613({ \
614 typeof(fifo + 1) __tmp = (fifo); \ 614 typeof((fifo) + 1) __tmp = (fifo); \
615 const void __user *__from = (from); \ 615 const void __user *__from = (from); \
616 unsigned int __len = (len); \ 616 unsigned int __len = (len); \
617 unsigned int *__copied = (copied); \ 617 unsigned int *__copied = (copied); \
@@ -639,7 +639,7 @@ __kfifo_must_check_helper( \
639#define kfifo_to_user(fifo, to, len, copied) \ 639#define kfifo_to_user(fifo, to, len, copied) \
640__kfifo_must_check_helper( \ 640__kfifo_must_check_helper( \
641({ \ 641({ \
642 typeof(fifo + 1) __tmp = (fifo); \ 642 typeof((fifo) + 1) __tmp = (fifo); \
643 void __user *__to = (to); \ 643 void __user *__to = (to); \
644 unsigned int __len = (len); \ 644 unsigned int __len = (len); \
645 unsigned int *__copied = (copied); \ 645 unsigned int *__copied = (copied); \
@@ -666,7 +666,7 @@ __kfifo_must_check_helper( \
666 */ 666 */
667#define kfifo_dma_in_prepare(fifo, sgl, nents, len) \ 667#define kfifo_dma_in_prepare(fifo, sgl, nents, len) \
668({ \ 668({ \
669 typeof(fifo + 1) __tmp = (fifo); \ 669 typeof((fifo) + 1) __tmp = (fifo); \
670 struct scatterlist *__sgl = (sgl); \ 670 struct scatterlist *__sgl = (sgl); \
671 int __nents = (nents); \ 671 int __nents = (nents); \
672 unsigned int __len = (len); \ 672 unsigned int __len = (len); \
@@ -690,7 +690,7 @@ __kfifo_must_check_helper( \
690 */ 690 */
691#define kfifo_dma_in_finish(fifo, len) \ 691#define kfifo_dma_in_finish(fifo, len) \
692(void)({ \ 692(void)({ \
693 typeof(fifo + 1) __tmp = (fifo); \ 693 typeof((fifo) + 1) __tmp = (fifo); \
694 unsigned int __len = (len); \ 694 unsigned int __len = (len); \
695 const size_t __recsize = sizeof(*__tmp->rectype); \ 695 const size_t __recsize = sizeof(*__tmp->rectype); \
696 struct __kfifo *__kfifo = &__tmp->kfifo; \ 696 struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -717,7 +717,7 @@ __kfifo_must_check_helper( \
717 */ 717 */
718#define kfifo_dma_out_prepare(fifo, sgl, nents, len) \ 718#define kfifo_dma_out_prepare(fifo, sgl, nents, len) \
719({ \ 719({ \
720 typeof(fifo + 1) __tmp = (fifo); \ 720 typeof((fifo) + 1) __tmp = (fifo); \
721 struct scatterlist *__sgl = (sgl); \ 721 struct scatterlist *__sgl = (sgl); \
722 int __nents = (nents); \ 722 int __nents = (nents); \
723 unsigned int __len = (len); \ 723 unsigned int __len = (len); \
@@ -741,7 +741,7 @@ __kfifo_must_check_helper( \
741 */ 741 */
742#define kfifo_dma_out_finish(fifo, len) \ 742#define kfifo_dma_out_finish(fifo, len) \
743(void)({ \ 743(void)({ \
744 typeof(fifo + 1) __tmp = (fifo); \ 744 typeof((fifo) + 1) __tmp = (fifo); \
745 unsigned int __len = (len); \ 745 unsigned int __len = (len); \
746 const size_t __recsize = sizeof(*__tmp->rectype); \ 746 const size_t __recsize = sizeof(*__tmp->rectype); \
747 struct __kfifo *__kfifo = &__tmp->kfifo; \ 747 struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -766,8 +766,8 @@ __kfifo_must_check_helper( \
766#define kfifo_out_peek(fifo, buf, n) \ 766#define kfifo_out_peek(fifo, buf, n) \
767__kfifo_must_check_helper( \ 767__kfifo_must_check_helper( \
768({ \ 768({ \
769 typeof(fifo + 1) __tmp = (fifo); \ 769 typeof((fifo) + 1) __tmp = (fifo); \
770 typeof(buf + 1) __buf = (buf); \ 770 typeof((buf) + 1) __buf = (buf); \
771 unsigned long __n = (n); \ 771 unsigned long __n = (n); \
772 const size_t __recsize = sizeof(*__tmp->rectype); \ 772 const size_t __recsize = sizeof(*__tmp->rectype); \
773 struct __kfifo *__kfifo = &__tmp->kfifo; \ 773 struct __kfifo *__kfifo = &__tmp->kfifo; \
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index 74d691ee9121..3319a6967626 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -16,6 +16,9 @@
16struct stable_node; 16struct stable_node;
17struct mem_cgroup; 17struct mem_cgroup;
18 18
19struct page *ksm_does_need_to_copy(struct page *page,
20 struct vm_area_struct *vma, unsigned long address);
21
19#ifdef CONFIG_KSM 22#ifdef CONFIG_KSM
20int ksm_madvise(struct vm_area_struct *vma, unsigned long start, 23int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
21 unsigned long end, int advice, unsigned long *vm_flags); 24 unsigned long end, int advice, unsigned long *vm_flags);
@@ -70,19 +73,14 @@ static inline void set_page_stable_node(struct page *page,
70 * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE, 73 * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
71 * but what if the vma was unmerged while the page was swapped out? 74 * but what if the vma was unmerged while the page was swapped out?
72 */ 75 */
73struct page *ksm_does_need_to_copy(struct page *page, 76static inline int ksm_might_need_to_copy(struct page *page,
74 struct vm_area_struct *vma, unsigned long address);
75static inline struct page *ksm_might_need_to_copy(struct page *page,
76 struct vm_area_struct *vma, unsigned long address) 77 struct vm_area_struct *vma, unsigned long address)
77{ 78{
78 struct anon_vma *anon_vma = page_anon_vma(page); 79 struct anon_vma *anon_vma = page_anon_vma(page);
79 80
80 if (!anon_vma || 81 return anon_vma &&
81 (anon_vma->root == vma->anon_vma->root && 82 (anon_vma->root != vma->anon_vma->root ||
82 page->index == linear_page_index(vma, address))) 83 page->index != linear_page_index(vma, address));
83 return page;
84
85 return ksm_does_need_to_copy(page, vma, address);
86} 84}
87 85
88int page_referenced_ksm(struct page *page, 86int page_referenced_ksm(struct page *page,
@@ -115,10 +113,10 @@ static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
115 return 0; 113 return 0;
116} 114}
117 115
118static inline struct page *ksm_might_need_to_copy(struct page *page, 116static inline int ksm_might_need_to_copy(struct page *page,
119 struct vm_area_struct *vma, unsigned long address) 117 struct vm_area_struct *vma, unsigned long address)
120{ 118{
121 return page; 119 return 0;
122} 120}
123 121
124static inline int page_referenced_ksm(struct page *page, 122static inline int page_referenced_ksm(struct page *page,
diff --git a/include/linux/lglock.h b/include/linux/lglock.h
index b288cb713b90..f549056fb20b 100644
--- a/include/linux/lglock.h
+++ b/include/linux/lglock.h
@@ -150,7 +150,7 @@
150 int i; \ 150 int i; \
151 preempt_disable(); \ 151 preempt_disable(); \
152 rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \ 152 rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \
153 for_each_online_cpu(i) { \ 153 for_each_possible_cpu(i) { \
154 arch_spinlock_t *lock; \ 154 arch_spinlock_t *lock; \
155 lock = &per_cpu(name##_lock, i); \ 155 lock = &per_cpu(name##_lock, i); \
156 arch_spin_lock(lock); \ 156 arch_spin_lock(lock); \
@@ -161,7 +161,7 @@
161 void name##_global_unlock(void) { \ 161 void name##_global_unlock(void) { \
162 int i; \ 162 int i; \
163 rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \ 163 rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \
164 for_each_online_cpu(i) { \ 164 for_each_possible_cpu(i) { \
165 arch_spinlock_t *lock; \ 165 arch_spinlock_t *lock; \
166 lock = &per_cpu(name##_lock, i); \ 166 lock = &per_cpu(name##_lock, i); \
167 arch_spin_unlock(lock); \ 167 arch_spin_unlock(lock); \
diff --git a/include/linux/libata.h b/include/linux/libata.h
index f010f18a0f86..45fb2967b66d 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -335,6 +335,7 @@ enum {
335 ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */ 335 ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */
336 ATA_EHI_NO_AUTOPSY = (1 << 2), /* no autopsy */ 336 ATA_EHI_NO_AUTOPSY = (1 << 2), /* no autopsy */
337 ATA_EHI_QUIET = (1 << 3), /* be quiet */ 337 ATA_EHI_QUIET = (1 << 3), /* be quiet */
338 ATA_EHI_NO_RECOVERY = (1 << 4), /* no recovery */
338 339
339 ATA_EHI_DID_SOFTRESET = (1 << 16), /* already soft-reset this port */ 340 ATA_EHI_DID_SOFTRESET = (1 << 16), /* already soft-reset this port */
340 ATA_EHI_DID_HARDRESET = (1 << 17), /* already soft-reset this port */ 341 ATA_EHI_DID_HARDRESET = (1 << 17), /* already soft-reset this port */
@@ -723,6 +724,7 @@ struct ata_port {
723 struct ata_ioports ioaddr; /* ATA cmd/ctl/dma register blocks */ 724 struct ata_ioports ioaddr; /* ATA cmd/ctl/dma register blocks */
724 u8 ctl; /* cache of ATA control register */ 725 u8 ctl; /* cache of ATA control register */
725 u8 last_ctl; /* Cache last written value */ 726 u8 last_ctl; /* Cache last written value */
727 struct ata_link* sff_pio_task_link; /* link currently used */
726 struct delayed_work sff_pio_task; 728 struct delayed_work sff_pio_task;
727#ifdef CONFIG_ATA_BMDMA 729#ifdef CONFIG_ATA_BMDMA
728 struct ata_bmdma_prd *bmdma_prd; /* BMDMA SG list */ 730 struct ata_bmdma_prd *bmdma_prd; /* BMDMA SG list */
@@ -1594,7 +1596,7 @@ extern void ata_sff_irq_on(struct ata_port *ap);
1594extern void ata_sff_irq_clear(struct ata_port *ap); 1596extern void ata_sff_irq_clear(struct ata_port *ap);
1595extern int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, 1597extern int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
1596 u8 status, int in_wq); 1598 u8 status, int in_wq);
1597extern void ata_sff_queue_pio_task(struct ata_port *ap, unsigned long delay); 1599extern void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay);
1598extern unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc); 1600extern unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc);
1599extern bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc); 1601extern bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc);
1600extern unsigned int ata_sff_port_intr(struct ata_port *ap, 1602extern unsigned int ata_sff_port_intr(struct ata_port *ap,
diff --git a/include/linux/mm.h b/include/linux/mm.h
index e6b1210772ce..74949fbef8c6 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -864,6 +864,12 @@ int set_page_dirty(struct page *page);
864int set_page_dirty_lock(struct page *page); 864int set_page_dirty_lock(struct page *page);
865int clear_page_dirty_for_io(struct page *page); 865int clear_page_dirty_for_io(struct page *page);
866 866
867/* Is the vma a continuation of the stack vma above it? */
868static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
869{
870 return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
871}
872
867extern unsigned long move_page_tables(struct vm_area_struct *vma, 873extern unsigned long move_page_tables(struct vm_area_struct *vma,
868 unsigned long old_addr, struct vm_area_struct *new_vma, 874 unsigned long old_addr, struct vm_area_struct *new_vma,
869 unsigned long new_addr, unsigned long len); 875 unsigned long new_addr, unsigned long len);
diff --git a/include/linux/mmc/sdio.h b/include/linux/mmc/sdio.h
index 329a8faa6e37..245cdacee544 100644
--- a/include/linux/mmc/sdio.h
+++ b/include/linux/mmc/sdio.h
@@ -38,6 +38,8 @@
38 * [8:0] Byte/block count 38 * [8:0] Byte/block count
39 */ 39 */
40 40
41#define R4_MEMORY_PRESENT (1 << 27)
42
41/* 43/*
42 SDIO status in R5 44 SDIO status in R5
43 Type 45 Type
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 6e6e62648a4d..3984c4eb41fd 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -284,6 +284,13 @@ struct zone {
284 unsigned long watermark[NR_WMARK]; 284 unsigned long watermark[NR_WMARK];
285 285
286 /* 286 /*
287 * When free pages are below this point, additional steps are taken
288 * when reading the number of free pages to avoid per-cpu counter
289 * drift allowing watermarks to be breached
290 */
291 unsigned long percpu_drift_mark;
292
293 /*
287 * We don't know if the memory that we're going to allocate will be freeable 294 * We don't know if the memory that we're going to allocate will be freeable
288 * or/and it will be released eventually, so to avoid totally wasting several 295 * or/and it will be released eventually, so to avoid totally wasting several
289 * GB of ram we must reserve some of the lower zone memory (otherwise we risk 296 * GB of ram we must reserve some of the lower zone memory (otherwise we risk
@@ -441,6 +448,12 @@ static inline int zone_is_oom_locked(const struct zone *zone)
441 return test_bit(ZONE_OOM_LOCKED, &zone->flags); 448 return test_bit(ZONE_OOM_LOCKED, &zone->flags);
442} 449}
443 450
451#ifdef CONFIG_SMP
452unsigned long zone_nr_free_pages(struct zone *zone);
453#else
454#define zone_nr_free_pages(zone) zone_page_state(zone, NR_FREE_PAGES)
455#endif /* CONFIG_SMP */
456
444/* 457/*
445 * The "priority" of VM scanning is how much of the queues we will scan in one 458 * The "priority" of VM scanning is how much of the queues we will scan in one
446 * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the 459 * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the
diff --git a/include/linux/module.h b/include/linux/module.h
index 8a6b9fdc7ffa..aace066bad8f 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -686,17 +686,16 @@ extern int module_sysfs_initialized;
686 686
687 687
688#ifdef CONFIG_GENERIC_BUG 688#ifdef CONFIG_GENERIC_BUG
689int module_bug_finalize(const Elf_Ehdr *, const Elf_Shdr *, 689void module_bug_finalize(const Elf_Ehdr *, const Elf_Shdr *,
690 struct module *); 690 struct module *);
691void module_bug_cleanup(struct module *); 691void module_bug_cleanup(struct module *);
692 692
693#else /* !CONFIG_GENERIC_BUG */ 693#else /* !CONFIG_GENERIC_BUG */
694 694
695static inline int module_bug_finalize(const Elf_Ehdr *hdr, 695static inline void module_bug_finalize(const Elf_Ehdr *hdr,
696 const Elf_Shdr *sechdrs, 696 const Elf_Shdr *sechdrs,
697 struct module *mod) 697 struct module *mod)
698{ 698{
699 return 0;
700} 699}
701static inline void module_bug_cleanup(struct module *mod) {} 700static inline void module_bug_cleanup(struct module *mod) {}
702#endif /* CONFIG_GENERIC_BUG */ 701#endif /* CONFIG_GENERIC_BUG */
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 878cab4f5fcc..f363bc8fdc74 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -78,6 +78,14 @@ struct mutex_waiter {
78# include <linux/mutex-debug.h> 78# include <linux/mutex-debug.h>
79#else 79#else
80# define __DEBUG_MUTEX_INITIALIZER(lockname) 80# define __DEBUG_MUTEX_INITIALIZER(lockname)
81/**
82 * mutex_init - initialize the mutex
83 * @mutex: the mutex to be initialized
84 *
85 * Initialize the mutex to unlocked state.
86 *
87 * It is not allowed to initialize an already locked mutex.
88 */
81# define mutex_init(mutex) \ 89# define mutex_init(mutex) \
82do { \ 90do { \
83 static struct lock_class_key __key; \ 91 static struct lock_class_key __key; \
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 10d33309e9a6..570fddeb0388 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -393,6 +393,9 @@
393#define PCI_DEVICE_ID_VLSI_82C147 0x0105 393#define PCI_DEVICE_ID_VLSI_82C147 0x0105
394#define PCI_DEVICE_ID_VLSI_VAS96011 0x0702 394#define PCI_DEVICE_ID_VLSI_VAS96011 0x0702
395 395
396/* AMD RD890 Chipset */
397#define PCI_DEVICE_ID_RD890_IOMMU 0x5a23
398
396#define PCI_VENDOR_ID_ADL 0x1005 399#define PCI_VENDOR_ID_ADL 0x1005
397#define PCI_DEVICE_ID_ADL_2301 0x2301 400#define PCI_DEVICE_ID_ADL_2301 0x2301
398 401
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index d50ba858cfe0..d1a9193960f1 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -274,8 +274,14 @@ static inline int dquot_alloc_space(struct inode *inode, qsize_t nr)
274 int ret; 274 int ret;
275 275
276 ret = dquot_alloc_space_nodirty(inode, nr); 276 ret = dquot_alloc_space_nodirty(inode, nr);
277 if (!ret) 277 if (!ret) {
278 mark_inode_dirty_sync(inode); 278 /*
279 * Mark inode fully dirty. Since we are allocating blocks, inode
280 * would become fully dirty soon anyway and it reportedly
281 * reduces inode_lock contention.
282 */
283 mark_inode_dirty(inode);
284 }
279 return ret; 285 return ret;
280} 286}
281 287
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 9fbc54a2585d..83af1f8d8b74 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -454,7 +454,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
454 * Makes rcu_dereference_check() do the dirty work. 454 * Makes rcu_dereference_check() do the dirty work.
455 */ 455 */
456#define rcu_dereference_bh(p) \ 456#define rcu_dereference_bh(p) \
457 rcu_dereference_check(p, rcu_read_lock_bh_held()) 457 rcu_dereference_check(p, rcu_read_lock_bh_held() || irqs_disabled())
458 458
459/** 459/**
460 * rcu_dereference_sched - fetch RCU-protected pointer, checking for RCU-sched 460 * rcu_dereference_sched - fetch RCU-protected pointer, checking for RCU-sched
diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h
index 7415839ac890..5310d27abd2a 100644
--- a/include/linux/semaphore.h
+++ b/include/linux/semaphore.h
@@ -26,6 +26,9 @@ struct semaphore {
26 .wait_list = LIST_HEAD_INIT((name).wait_list), \ 26 .wait_list = LIST_HEAD_INIT((name).wait_list), \
27} 27}
28 28
29#define DEFINE_SEMAPHORE(name) \
30 struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
31
29#define DECLARE_MUTEX(name) \ 32#define DECLARE_MUTEX(name) \
30 struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1) 33 struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
31 34
diff --git a/include/linux/spi/dw_spi.h b/include/linux/spi/dw_spi.h
index cc813f95a2f2..c91302f3a257 100644
--- a/include/linux/spi/dw_spi.h
+++ b/include/linux/spi/dw_spi.h
@@ -14,7 +14,9 @@
14#define SPI_MODE_OFFSET 6 14#define SPI_MODE_OFFSET 6
15#define SPI_SCPH_OFFSET 6 15#define SPI_SCPH_OFFSET 6
16#define SPI_SCOL_OFFSET 7 16#define SPI_SCOL_OFFSET 7
17
17#define SPI_TMOD_OFFSET 8 18#define SPI_TMOD_OFFSET 8
19#define SPI_TMOD_MASK (0x3 << SPI_TMOD_OFFSET)
18#define SPI_TMOD_TR 0x0 /* xmit & recv */ 20#define SPI_TMOD_TR 0x0 /* xmit & recv */
19#define SPI_TMOD_TO 0x1 /* xmit only */ 21#define SPI_TMOD_TO 0x1 /* xmit only */
20#define SPI_TMOD_RO 0x2 /* recv only */ 22#define SPI_TMOD_RO 0x2 /* recv only */
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 569dc722a600..85f38a63f098 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -30,7 +30,7 @@ struct rpc_inode;
30 * The high-level client handle 30 * The high-level client handle
31 */ 31 */
32struct rpc_clnt { 32struct rpc_clnt {
33 struct kref cl_kref; /* Number of references */ 33 atomic_t cl_count; /* Number of references */
34 struct list_head cl_clients; /* Global list of clients */ 34 struct list_head cl_clients; /* Global list of clients */
35 struct list_head cl_tasks; /* List of tasks */ 35 struct list_head cl_tasks; /* List of tasks */
36 spinlock_t cl_lock; /* spinlock */ 36 spinlock_t cl_lock; /* spinlock */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 2fee51a11b73..7cdd63366f88 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -19,6 +19,7 @@ struct bio;
19#define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */ 19#define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */
20#define SWAP_FLAG_PRIO_MASK 0x7fff 20#define SWAP_FLAG_PRIO_MASK 0x7fff
21#define SWAP_FLAG_PRIO_SHIFT 0 21#define SWAP_FLAG_PRIO_SHIFT 0
22#define SWAP_FLAG_DISCARD 0x10000 /* discard swap cluster after use */
22 23
23static inline int current_is_kswapd(void) 24static inline int current_is_kswapd(void)
24{ 25{
@@ -142,7 +143,7 @@ struct swap_extent {
142enum { 143enum {
143 SWP_USED = (1 << 0), /* is slot in swap_info[] used? */ 144 SWP_USED = (1 << 0), /* is slot in swap_info[] used? */
144 SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */ 145 SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */
145 SWP_DISCARDABLE = (1 << 2), /* blkdev supports discard */ 146 SWP_DISCARDABLE = (1 << 2), /* swapon+blkdev support discard */
146 SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */ 147 SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */
147 SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */ 148 SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */
148 SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */ 149 SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */
@@ -315,6 +316,7 @@ extern long nr_swap_pages;
315extern long total_swap_pages; 316extern long total_swap_pages;
316extern void si_swapinfo(struct sysinfo *); 317extern void si_swapinfo(struct sysinfo *);
317extern swp_entry_t get_swap_page(void); 318extern swp_entry_t get_swap_page(void);
319extern swp_entry_t get_swap_page_of_type(int);
318extern int valid_swaphandles(swp_entry_t, unsigned long *); 320extern int valid_swaphandles(swp_entry_t, unsigned long *);
319extern int add_swap_count_continuation(swp_entry_t, gfp_t); 321extern int add_swap_count_continuation(swp_entry_t, gfp_t);
320extern void swap_shmem_alloc(swp_entry_t); 322extern void swap_shmem_alloc(swp_entry_t);
@@ -331,13 +333,6 @@ extern int reuse_swap_page(struct page *);
331extern int try_to_free_swap(struct page *); 333extern int try_to_free_swap(struct page *);
332struct backing_dev_info; 334struct backing_dev_info;
333 335
334#ifdef CONFIG_HIBERNATION
335void hibernation_freeze_swap(void);
336void hibernation_thaw_swap(void);
337swp_entry_t get_swap_for_hibernation(int type);
338void swap_free_for_hibernation(swp_entry_t val);
339#endif
340
341/* linux/mm/thrash.c */ 336/* linux/mm/thrash.c */
342extern struct mm_struct *swap_token_mm; 337extern struct mm_struct *swap_token_mm;
343extern void grab_swap_token(struct mm_struct *); 338extern void grab_swap_token(struct mm_struct *);
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 7f43ccdc1d38..eaaea37b3b75 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -170,6 +170,28 @@ static inline unsigned long zone_page_state(struct zone *zone,
170 return x; 170 return x;
171} 171}
172 172
173/*
174 * More accurate version that also considers the currently pending
175 * deltas. For that we need to loop over all cpus to find the current
176 * deltas. There is no synchronization so the result cannot be
177 * exactly accurate either.
178 */
179static inline unsigned long zone_page_state_snapshot(struct zone *zone,
180 enum zone_stat_item item)
181{
182 long x = atomic_long_read(&zone->vm_stat[item]);
183
184#ifdef CONFIG_SMP
185 int cpu;
186 for_each_online_cpu(cpu)
187 x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
188
189 if (x < 0)
190 x = 0;
191#endif
192 return x;
193}
194
173extern unsigned long global_reclaimable_pages(void); 195extern unsigned long global_reclaimable_pages(void);
174extern unsigned long zone_reclaimable_pages(struct zone *zone); 196extern unsigned long zone_reclaimable_pages(struct zone *zone);
175 197
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 0836ccc57121..3efc9f3f43a0 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -614,6 +614,7 @@ int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
614 (wait)->private = current; \ 614 (wait)->private = current; \
615 (wait)->func = autoremove_wake_function; \ 615 (wait)->func = autoremove_wake_function; \
616 INIT_LIST_HEAD(&(wait)->task_list); \ 616 INIT_LIST_HEAD(&(wait)->task_list); \
617 (wait)->flags = 0; \
617 } while (0) 618 } while (0)
618 619
619/** 620/**
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index f11100f96482..25e02c941bac 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -235,6 +235,10 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
235#define work_clear_pending(work) \ 235#define work_clear_pending(work) \
236 clear_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) 236 clear_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
237 237
238/*
239 * Workqueue flags and constants. For details, please refer to
240 * Documentation/workqueue.txt.
241 */
238enum { 242enum {
239 WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */ 243 WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */
240 WQ_UNBOUND = 1 << 1, /* not bound to any cpu */ 244 WQ_UNBOUND = 1 << 1, /* not bound to any cpu */
diff --git a/ipc/sem.c b/ipc/sem.c
index 40a8f462a822..0e0d49bbb867 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -743,6 +743,8 @@ static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in,
743 { 743 {
744 struct semid_ds out; 744 struct semid_ds out;
745 745
746 memset(&out, 0, sizeof(out));
747
746 ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm); 748 ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm);
747 749
748 out.sem_otime = in->sem_otime; 750 out.sem_otime = in->sem_otime;
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index ed19afd9e3fe..c9483d8f6140 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1798,13 +1798,13 @@ out:
1798int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk) 1798int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
1799{ 1799{
1800 struct cgroupfs_root *root; 1800 struct cgroupfs_root *root;
1801 struct cgroup *cur_cg;
1802 int retval = 0; 1801 int retval = 0;
1803 1802
1804 cgroup_lock(); 1803 cgroup_lock();
1805 for_each_active_root(root) { 1804 for_each_active_root(root) {
1806 cur_cg = task_cgroup_from_root(from, root); 1805 struct cgroup *from_cg = task_cgroup_from_root(from, root);
1807 retval = cgroup_attach_task(cur_cg, tsk); 1806
1807 retval = cgroup_attach_task(from_cg, tsk);
1808 if (retval) 1808 if (retval)
1809 break; 1809 break;
1810 } 1810 }
diff --git a/kernel/compat.c b/kernel/compat.c
index e167efce8423..c9e2ec0b34a8 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -1126,3 +1126,24 @@ compat_sys_sysinfo(struct compat_sysinfo __user *info)
1126 1126
1127 return 0; 1127 return 0;
1128} 1128}
1129
1130/*
1131 * Allocate user-space memory for the duration of a single system call,
1132 * in order to marshall parameters inside a compat thunk.
1133 */
1134void __user *compat_alloc_user_space(unsigned long len)
1135{
1136 void __user *ptr;
1137
1138 /* If len would occupy more than half of the entire compat space... */
1139 if (unlikely(len > (((compat_uptr_t)~0) >> 1)))
1140 return NULL;
1141
1142 ptr = arch_compat_alloc_user_space(len);
1143
1144 if (unlikely(!access_ok(VERIFY_WRITE, ptr, len)))
1145 return NULL;
1146
1147 return ptr;
1148}
1149EXPORT_SYMBOL_GPL(compat_alloc_user_space);
diff --git a/kernel/debug/kdb/kdb_bp.c b/kernel/debug/kdb/kdb_bp.c
index 75bd9b3ebbb7..20059ef4459a 100644
--- a/kernel/debug/kdb/kdb_bp.c
+++ b/kernel/debug/kdb/kdb_bp.c
@@ -274,7 +274,6 @@ static int kdb_bp(int argc, const char **argv)
274 int i, bpno; 274 int i, bpno;
275 kdb_bp_t *bp, *bp_check; 275 kdb_bp_t *bp, *bp_check;
276 int diag; 276 int diag;
277 int free;
278 char *symname = NULL; 277 char *symname = NULL;
279 long offset = 0ul; 278 long offset = 0ul;
280 int nextarg; 279 int nextarg;
@@ -305,7 +304,6 @@ static int kdb_bp(int argc, const char **argv)
305 /* 304 /*
306 * Find an empty bp structure to allocate 305 * Find an empty bp structure to allocate
307 */ 306 */
308 free = KDB_MAXBPT;
309 for (bpno = 0, bp = kdb_breakpoints; bpno < KDB_MAXBPT; bpno++, bp++) { 307 for (bpno = 0, bp = kdb_breakpoints; bpno < KDB_MAXBPT; bpno++, bp++) {
310 if (bp->bp_free) 308 if (bp->bp_free)
311 break; 309 break;
diff --git a/kernel/fork.c b/kernel/fork.c
index b7e9d60a675d..c445f8cc408d 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -356,10 +356,10 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
356 if (IS_ERR(pol)) 356 if (IS_ERR(pol))
357 goto fail_nomem_policy; 357 goto fail_nomem_policy;
358 vma_set_policy(tmp, pol); 358 vma_set_policy(tmp, pol);
359 tmp->vm_mm = mm;
359 if (anon_vma_fork(tmp, mpnt)) 360 if (anon_vma_fork(tmp, mpnt))
360 goto fail_nomem_anon_vma_fork; 361 goto fail_nomem_anon_vma_fork;
361 tmp->vm_flags &= ~VM_LOCKED; 362 tmp->vm_flags &= ~VM_LOCKED;
362 tmp->vm_mm = mm;
363 tmp->vm_next = tmp->vm_prev = NULL; 363 tmp->vm_next = tmp->vm_prev = NULL;
364 file = tmp->vm_file; 364 file = tmp->vm_file;
365 if (file) { 365 if (file) {
diff --git a/kernel/gcov/fs.c b/kernel/gcov/fs.c
index ef3c3f88a7a3..f83972b16564 100644
--- a/kernel/gcov/fs.c
+++ b/kernel/gcov/fs.c
@@ -33,10 +33,11 @@
33 * @children: child nodes 33 * @children: child nodes
34 * @all: list head for list of all nodes 34 * @all: list head for list of all nodes
35 * @parent: parent node 35 * @parent: parent node
36 * @info: associated profiling data structure if not a directory 36 * @loaded_info: array of pointers to profiling data sets for loaded object
37 * @ghost: when an object file containing profiling data is unloaded we keep a 37 * files.
38 * copy of the profiling data here to allow collecting coverage data 38 * @num_loaded: number of profiling data sets for loaded object files.
39 * for cleanup code. Such a node is called a "ghost". 39 * @unloaded_info: accumulated copy of profiling data sets for unloaded
40 * object files. Used only when gcov_persist=1.
40 * @dentry: main debugfs entry, either a directory or data file 41 * @dentry: main debugfs entry, either a directory or data file
41 * @links: associated symbolic links 42 * @links: associated symbolic links
42 * @name: data file basename 43 * @name: data file basename
@@ -51,10 +52,11 @@ struct gcov_node {
51 struct list_head children; 52 struct list_head children;
52 struct list_head all; 53 struct list_head all;
53 struct gcov_node *parent; 54 struct gcov_node *parent;
54 struct gcov_info *info; 55 struct gcov_info **loaded_info;
55 struct gcov_info *ghost; 56 struct gcov_info *unloaded_info;
56 struct dentry *dentry; 57 struct dentry *dentry;
57 struct dentry **links; 58 struct dentry **links;
59 int num_loaded;
58 char name[0]; 60 char name[0];
59}; 61};
60 62
@@ -136,16 +138,37 @@ static const struct seq_operations gcov_seq_ops = {
136}; 138};
137 139
138/* 140/*
139 * Return the profiling data set for a given node. This can either be the 141 * Return a profiling data set associated with the given node. This is
140 * original profiling data structure or a duplicate (also called "ghost") 142 * either a data set for a loaded object file or a data set copy in case
141 * in case the associated object file has been unloaded. 143 * all associated object files have been unloaded.
142 */ 144 */
143static struct gcov_info *get_node_info(struct gcov_node *node) 145static struct gcov_info *get_node_info(struct gcov_node *node)
144{ 146{
145 if (node->info) 147 if (node->num_loaded > 0)
146 return node->info; 148 return node->loaded_info[0];
147 149
148 return node->ghost; 150 return node->unloaded_info;
151}
152
153/*
154 * Return a newly allocated profiling data set which contains the sum of
155 * all profiling data associated with the given node.
156 */
157static struct gcov_info *get_accumulated_info(struct gcov_node *node)
158{
159 struct gcov_info *info;
160 int i = 0;
161
162 if (node->unloaded_info)
163 info = gcov_info_dup(node->unloaded_info);
164 else
165 info = gcov_info_dup(node->loaded_info[i++]);
166 if (!info)
167 return NULL;
168 for (; i < node->num_loaded; i++)
169 gcov_info_add(info, node->loaded_info[i]);
170
171 return info;
149} 172}
150 173
151/* 174/*
@@ -163,9 +186,10 @@ static int gcov_seq_open(struct inode *inode, struct file *file)
163 mutex_lock(&node_lock); 186 mutex_lock(&node_lock);
164 /* 187 /*
165 * Read from a profiling data copy to minimize reference tracking 188 * Read from a profiling data copy to minimize reference tracking
166 * complexity and concurrent access. 189 * complexity and concurrent access and to keep accumulating multiple
190 * profiling data sets associated with one node simple.
167 */ 191 */
168 info = gcov_info_dup(get_node_info(node)); 192 info = get_accumulated_info(node);
169 if (!info) 193 if (!info)
170 goto out_unlock; 194 goto out_unlock;
171 iter = gcov_iter_new(info); 195 iter = gcov_iter_new(info);
@@ -225,12 +249,25 @@ static struct gcov_node *get_node_by_name(const char *name)
225 return NULL; 249 return NULL;
226} 250}
227 251
252/*
253 * Reset all profiling data associated with the specified node.
254 */
255static void reset_node(struct gcov_node *node)
256{
257 int i;
258
259 if (node->unloaded_info)
260 gcov_info_reset(node->unloaded_info);
261 for (i = 0; i < node->num_loaded; i++)
262 gcov_info_reset(node->loaded_info[i]);
263}
264
228static void remove_node(struct gcov_node *node); 265static void remove_node(struct gcov_node *node);
229 266
230/* 267/*
231 * write() implementation for gcov data files. Reset profiling data for the 268 * write() implementation for gcov data files. Reset profiling data for the
232 * associated file. If the object file has been unloaded (i.e. this is 269 * corresponding file. If all associated object files have been unloaded,
233 * a "ghost" node), remove the debug fs node as well. 270 * remove the debug fs node as well.
234 */ 271 */
235static ssize_t gcov_seq_write(struct file *file, const char __user *addr, 272static ssize_t gcov_seq_write(struct file *file, const char __user *addr,
236 size_t len, loff_t *pos) 273 size_t len, loff_t *pos)
@@ -245,10 +282,10 @@ static ssize_t gcov_seq_write(struct file *file, const char __user *addr,
245 node = get_node_by_name(info->filename); 282 node = get_node_by_name(info->filename);
246 if (node) { 283 if (node) {
247 /* Reset counts or remove node for unloaded modules. */ 284 /* Reset counts or remove node for unloaded modules. */
248 if (node->ghost) 285 if (node->num_loaded == 0)
249 remove_node(node); 286 remove_node(node);
250 else 287 else
251 gcov_info_reset(node->info); 288 reset_node(node);
252 } 289 }
253 /* Reset counts for open file. */ 290 /* Reset counts for open file. */
254 gcov_info_reset(info); 291 gcov_info_reset(info);
@@ -378,7 +415,10 @@ static void init_node(struct gcov_node *node, struct gcov_info *info,
378 INIT_LIST_HEAD(&node->list); 415 INIT_LIST_HEAD(&node->list);
379 INIT_LIST_HEAD(&node->children); 416 INIT_LIST_HEAD(&node->children);
380 INIT_LIST_HEAD(&node->all); 417 INIT_LIST_HEAD(&node->all);
381 node->info = info; 418 if (node->loaded_info) {
419 node->loaded_info[0] = info;
420 node->num_loaded = 1;
421 }
382 node->parent = parent; 422 node->parent = parent;
383 if (name) 423 if (name)
384 strcpy(node->name, name); 424 strcpy(node->name, name);
@@ -394,9 +434,13 @@ static struct gcov_node *new_node(struct gcov_node *parent,
394 struct gcov_node *node; 434 struct gcov_node *node;
395 435
396 node = kzalloc(sizeof(struct gcov_node) + strlen(name) + 1, GFP_KERNEL); 436 node = kzalloc(sizeof(struct gcov_node) + strlen(name) + 1, GFP_KERNEL);
397 if (!node) { 437 if (!node)
398 pr_warning("out of memory\n"); 438 goto err_nomem;
399 return NULL; 439 if (info) {
440 node->loaded_info = kcalloc(1, sizeof(struct gcov_info *),
441 GFP_KERNEL);
442 if (!node->loaded_info)
443 goto err_nomem;
400 } 444 }
401 init_node(node, info, name, parent); 445 init_node(node, info, name, parent);
402 /* Differentiate between gcov data file nodes and directory nodes. */ 446 /* Differentiate between gcov data file nodes and directory nodes. */
@@ -416,6 +460,11 @@ static struct gcov_node *new_node(struct gcov_node *parent,
416 list_add(&node->all, &all_head); 460 list_add(&node->all, &all_head);
417 461
418 return node; 462 return node;
463
464err_nomem:
465 kfree(node);
466 pr_warning("out of memory\n");
467 return NULL;
419} 468}
420 469
421/* Remove symbolic links associated with node. */ 470/* Remove symbolic links associated with node. */
@@ -441,8 +490,9 @@ static void release_node(struct gcov_node *node)
441 list_del(&node->all); 490 list_del(&node->all);
442 debugfs_remove(node->dentry); 491 debugfs_remove(node->dentry);
443 remove_links(node); 492 remove_links(node);
444 if (node->ghost) 493 kfree(node->loaded_info);
445 gcov_info_free(node->ghost); 494 if (node->unloaded_info)
495 gcov_info_free(node->unloaded_info);
446 kfree(node); 496 kfree(node);
447} 497}
448 498
@@ -477,7 +527,7 @@ static struct gcov_node *get_child_by_name(struct gcov_node *parent,
477 527
478/* 528/*
479 * write() implementation for reset file. Reset all profiling data to zero 529 * write() implementation for reset file. Reset all profiling data to zero
480 * and remove ghost nodes. 530 * and remove nodes for which all associated object files are unloaded.
481 */ 531 */
482static ssize_t reset_write(struct file *file, const char __user *addr, 532static ssize_t reset_write(struct file *file, const char __user *addr,
483 size_t len, loff_t *pos) 533 size_t len, loff_t *pos)
@@ -487,8 +537,8 @@ static ssize_t reset_write(struct file *file, const char __user *addr,
487 mutex_lock(&node_lock); 537 mutex_lock(&node_lock);
488restart: 538restart:
489 list_for_each_entry(node, &all_head, all) { 539 list_for_each_entry(node, &all_head, all) {
490 if (node->info) 540 if (node->num_loaded > 0)
491 gcov_info_reset(node->info); 541 reset_node(node);
492 else if (list_empty(&node->children)) { 542 else if (list_empty(&node->children)) {
493 remove_node(node); 543 remove_node(node);
494 /* Several nodes may have gone - restart loop. */ 544 /* Several nodes may have gone - restart loop. */
@@ -564,37 +614,115 @@ err_remove:
564} 614}
565 615
566/* 616/*
567 * The profiling data set associated with this node is being unloaded. Store a 617 * Associate a profiling data set with an existing node. Needs to be called
568 * copy of the profiling data and turn this node into a "ghost". 618 * with node_lock held.
569 */ 619 */
570static int ghost_node(struct gcov_node *node) 620static void add_info(struct gcov_node *node, struct gcov_info *info)
571{ 621{
572 node->ghost = gcov_info_dup(node->info); 622 struct gcov_info **loaded_info;
573 if (!node->ghost) { 623 int num = node->num_loaded;
574 pr_warning("could not save data for '%s' (out of memory)\n", 624
575 node->info->filename); 625 /*
576 return -ENOMEM; 626 * Prepare new array. This is done first to simplify cleanup in
627 * case the new data set is incompatible, the node only contains
628 * unloaded data sets and there's not enough memory for the array.
629 */
630 loaded_info = kcalloc(num + 1, sizeof(struct gcov_info *), GFP_KERNEL);
631 if (!loaded_info) {
632 pr_warning("could not add '%s' (out of memory)\n",
633 info->filename);
634 return;
635 }
636 memcpy(loaded_info, node->loaded_info,
637 num * sizeof(struct gcov_info *));
638 loaded_info[num] = info;
639 /* Check if the new data set is compatible. */
640 if (num == 0) {
641 /*
642 * A module was unloaded, modified and reloaded. The new
643 * data set replaces the copy of the last one.
644 */
645 if (!gcov_info_is_compatible(node->unloaded_info, info)) {
646 pr_warning("discarding saved data for %s "
647 "(incompatible version)\n", info->filename);
648 gcov_info_free(node->unloaded_info);
649 node->unloaded_info = NULL;
650 }
651 } else {
652 /*
653 * Two different versions of the same object file are loaded.
654 * The initial one takes precedence.
655 */
656 if (!gcov_info_is_compatible(node->loaded_info[0], info)) {
657 pr_warning("could not add '%s' (incompatible "
658 "version)\n", info->filename);
659 kfree(loaded_info);
660 return;
661 }
577 } 662 }
578 node->info = NULL; 663 /* Overwrite previous array. */
664 kfree(node->loaded_info);
665 node->loaded_info = loaded_info;
666 node->num_loaded = num + 1;
667}
579 668
580 return 0; 669/*
670 * Return the index of a profiling data set associated with a node.
671 */
672static int get_info_index(struct gcov_node *node, struct gcov_info *info)
673{
674 int i;
675
676 for (i = 0; i < node->num_loaded; i++) {
677 if (node->loaded_info[i] == info)
678 return i;
679 }
680 return -ENOENT;
581} 681}
582 682
583/* 683/*
584 * Profiling data for this node has been loaded again. Add profiling data 684 * Save the data of a profiling data set which is being unloaded.
585 * from previous instantiation and turn this node into a regular node.
586 */ 685 */
587static void revive_node(struct gcov_node *node, struct gcov_info *info) 686static void save_info(struct gcov_node *node, struct gcov_info *info)
588{ 687{
589 if (gcov_info_is_compatible(node->ghost, info)) 688 if (node->unloaded_info)
590 gcov_info_add(info, node->ghost); 689 gcov_info_add(node->unloaded_info, info);
591 else { 690 else {
592 pr_warning("discarding saved data for '%s' (version changed)\n", 691 node->unloaded_info = gcov_info_dup(info);
692 if (!node->unloaded_info) {
693 pr_warning("could not save data for '%s' "
694 "(out of memory)\n", info->filename);
695 }
696 }
697}
698
699/*
700 * Disassociate a profiling data set from a node. Needs to be called with
701 * node_lock held.
702 */
703static void remove_info(struct gcov_node *node, struct gcov_info *info)
704{
705 int i;
706
707 i = get_info_index(node, info);
708 if (i < 0) {
709 pr_warning("could not remove '%s' (not found)\n",
593 info->filename); 710 info->filename);
711 return;
594 } 712 }
595 gcov_info_free(node->ghost); 713 if (gcov_persist)
596 node->ghost = NULL; 714 save_info(node, info);
597 node->info = info; 715 /* Shrink array. */
716 node->loaded_info[i] = node->loaded_info[node->num_loaded - 1];
717 node->num_loaded--;
718 if (node->num_loaded > 0)
719 return;
720 /* Last loaded data set was removed. */
721 kfree(node->loaded_info);
722 node->loaded_info = NULL;
723 node->num_loaded = 0;
724 if (!node->unloaded_info)
725 remove_node(node);
598} 726}
599 727
600/* 728/*
@@ -609,30 +737,18 @@ void gcov_event(enum gcov_action action, struct gcov_info *info)
609 node = get_node_by_name(info->filename); 737 node = get_node_by_name(info->filename);
610 switch (action) { 738 switch (action) {
611 case GCOV_ADD: 739 case GCOV_ADD:
612 /* Add new node or revive ghost. */ 740 if (node)
613 if (!node) { 741 add_info(node, info);
742 else
614 add_node(info); 743 add_node(info);
615 break;
616 }
617 if (gcov_persist)
618 revive_node(node, info);
619 else {
620 pr_warning("could not add '%s' (already exists)\n",
621 info->filename);
622 }
623 break; 744 break;
624 case GCOV_REMOVE: 745 case GCOV_REMOVE:
625 /* Remove node or turn into ghost. */ 746 if (node)
626 if (!node) { 747 remove_info(node, info);
748 else {
627 pr_warning("could not remove '%s' (not found)\n", 749 pr_warning("could not remove '%s' (not found)\n",
628 info->filename); 750 info->filename);
629 break;
630 } 751 }
631 if (gcov_persist) {
632 if (!ghost_node(node))
633 break;
634 }
635 remove_node(node);
636 break; 752 break;
637 } 753 }
638 mutex_unlock(&node_lock); 754 mutex_unlock(&node_lock);
diff --git a/kernel/groups.c b/kernel/groups.c
index 53b1916c9492..253dc0f35cf4 100644
--- a/kernel/groups.c
+++ b/kernel/groups.c
@@ -143,10 +143,9 @@ int groups_search(const struct group_info *group_info, gid_t grp)
143 right = group_info->ngroups; 143 right = group_info->ngroups;
144 while (left < right) { 144 while (left < right) {
145 unsigned int mid = (left+right)/2; 145 unsigned int mid = (left+right)/2;
146 int cmp = grp - GROUP_AT(group_info, mid); 146 if (grp > GROUP_AT(group_info, mid))
147 if (cmp > 0)
148 left = mid + 1; 147 left = mid + 1;
149 else if (cmp < 0) 148 else if (grp < GROUP_AT(group_info, mid))
150 right = mid; 149 right = mid;
151 else 150 else
152 return 1; 151 return 1;
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index ce669174f355..1decafbb6b1a 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -1091,11 +1091,10 @@ EXPORT_SYMBOL_GPL(hrtimer_cancel);
1091 */ 1091 */
1092ktime_t hrtimer_get_remaining(const struct hrtimer *timer) 1092ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
1093{ 1093{
1094 struct hrtimer_clock_base *base;
1095 unsigned long flags; 1094 unsigned long flags;
1096 ktime_t rem; 1095 ktime_t rem;
1097 1096
1098 base = lock_hrtimer_base(timer, &flags); 1097 lock_hrtimer_base(timer, &flags);
1099 rem = hrtimer_expires_remaining(timer); 1098 rem = hrtimer_expires_remaining(timer);
1100 unlock_hrtimer_base(timer, &flags); 1099 unlock_hrtimer_base(timer, &flags);
1101 1100
diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c
index d71a987fd2bf..c7c2aed9e2dc 100644
--- a/kernel/hw_breakpoint.c
+++ b/kernel/hw_breakpoint.c
@@ -433,7 +433,8 @@ register_user_hw_breakpoint(struct perf_event_attr *attr,
433 perf_overflow_handler_t triggered, 433 perf_overflow_handler_t triggered,
434 struct task_struct *tsk) 434 struct task_struct *tsk)
435{ 435{
436 return perf_event_create_kernel_counter(attr, -1, tsk->pid, triggered); 436 return perf_event_create_kernel_counter(attr, -1, task_pid_vnr(tsk),
437 triggered);
437} 438}
438EXPORT_SYMBOL_GPL(register_user_hw_breakpoint); 439EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);
439 440
diff --git a/kernel/kfifo.c b/kernel/kfifo.c
index 6b5580c57644..01a0700e873f 100644
--- a/kernel/kfifo.c
+++ b/kernel/kfifo.c
@@ -365,8 +365,6 @@ static unsigned int setup_sgl(struct __kfifo *fifo, struct scatterlist *sgl,
365 n = setup_sgl_buf(sgl, fifo->data + off, nents, l); 365 n = setup_sgl_buf(sgl, fifo->data + off, nents, l);
366 n += setup_sgl_buf(sgl + n, fifo->data, nents - n, len - l); 366 n += setup_sgl_buf(sgl + n, fifo->data, nents - n, len - l);
367 367
368 if (n)
369 sg_mark_end(sgl + n - 1);
370 return n; 368 return n;
371} 369}
372 370
diff --git a/kernel/module.c b/kernel/module.c
index d0b5f8db11b4..ccd641991842 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -1537,6 +1537,7 @@ static int __unlink_module(void *_mod)
1537{ 1537{
1538 struct module *mod = _mod; 1538 struct module *mod = _mod;
1539 list_del(&mod->list); 1539 list_del(&mod->list);
1540 module_bug_cleanup(mod);
1540 return 0; 1541 return 0;
1541} 1542}
1542 1543
@@ -2625,6 +2626,7 @@ static struct module *load_module(void __user *umod,
2625 if (err < 0) 2626 if (err < 0)
2626 goto ddebug; 2627 goto ddebug;
2627 2628
2629 module_bug_finalize(info.hdr, info.sechdrs, mod);
2628 list_add_rcu(&mod->list, &modules); 2630 list_add_rcu(&mod->list, &modules);
2629 mutex_unlock(&module_mutex); 2631 mutex_unlock(&module_mutex);
2630 2632
@@ -2650,6 +2652,8 @@ static struct module *load_module(void __user *umod,
2650 mutex_lock(&module_mutex); 2652 mutex_lock(&module_mutex);
2651 /* Unlink carefully: kallsyms could be walking list. */ 2653 /* Unlink carefully: kallsyms could be walking list. */
2652 list_del_rcu(&mod->list); 2654 list_del_rcu(&mod->list);
2655 module_bug_cleanup(mod);
2656
2653 ddebug: 2657 ddebug:
2654 if (!mod->taints) 2658 if (!mod->taints)
2655 dynamic_debug_remove(info.debug); 2659 dynamic_debug_remove(info.debug);
diff --git a/kernel/mutex.c b/kernel/mutex.c
index 4c0b7b3e6d2e..200407c1502f 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -36,15 +36,6 @@
36# include <asm/mutex.h> 36# include <asm/mutex.h>
37#endif 37#endif
38 38
39/***
40 * mutex_init - initialize the mutex
41 * @lock: the mutex to be initialized
42 * @key: the lock_class_key for the class; used by mutex lock debugging
43 *
44 * Initialize the mutex to unlocked state.
45 *
46 * It is not allowed to initialize an already locked mutex.
47 */
48void 39void
49__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) 40__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
50{ 41{
@@ -68,7 +59,7 @@ EXPORT_SYMBOL(__mutex_init);
68static __used noinline void __sched 59static __used noinline void __sched
69__mutex_lock_slowpath(atomic_t *lock_count); 60__mutex_lock_slowpath(atomic_t *lock_count);
70 61
71/*** 62/**
72 * mutex_lock - acquire the mutex 63 * mutex_lock - acquire the mutex
73 * @lock: the mutex to be acquired 64 * @lock: the mutex to be acquired
74 * 65 *
@@ -105,7 +96,7 @@ EXPORT_SYMBOL(mutex_lock);
105 96
106static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count); 97static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
107 98
108/*** 99/**
109 * mutex_unlock - release the mutex 100 * mutex_unlock - release the mutex
110 * @lock: the mutex to be released 101 * @lock: the mutex to be released
111 * 102 *
@@ -364,8 +355,8 @@ __mutex_lock_killable_slowpath(atomic_t *lock_count);
364static noinline int __sched 355static noinline int __sched
365__mutex_lock_interruptible_slowpath(atomic_t *lock_count); 356__mutex_lock_interruptible_slowpath(atomic_t *lock_count);
366 357
367/*** 358/**
368 * mutex_lock_interruptible - acquire the mutex, interruptable 359 * mutex_lock_interruptible - acquire the mutex, interruptible
369 * @lock: the mutex to be acquired 360 * @lock: the mutex to be acquired
370 * 361 *
371 * Lock the mutex like mutex_lock(), and return 0 if the mutex has 362 * Lock the mutex like mutex_lock(), and return 0 if the mutex has
@@ -456,15 +447,15 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
456 return prev == 1; 447 return prev == 1;
457} 448}
458 449
459/*** 450/**
460 * mutex_trylock - try acquire the mutex, without waiting 451 * mutex_trylock - try to acquire the mutex, without waiting
461 * @lock: the mutex to be acquired 452 * @lock: the mutex to be acquired
462 * 453 *
463 * Try to acquire the mutex atomically. Returns 1 if the mutex 454 * Try to acquire the mutex atomically. Returns 1 if the mutex
464 * has been acquired successfully, and 0 on contention. 455 * has been acquired successfully, and 0 on contention.
465 * 456 *
466 * NOTE: this function follows the spin_trylock() convention, so 457 * NOTE: this function follows the spin_trylock() convention, so
467 * it is negated to the down_trylock() return values! Be careful 458 * it is negated from the down_trylock() return values! Be careful
468 * about this when converting semaphore users to mutexes. 459 * about this when converting semaphore users to mutexes.
469 * 460 *
470 * This function must not be used in interrupt context. The 461 * This function must not be used in interrupt context. The
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 403d1804b198..db5b56064687 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -402,11 +402,31 @@ static void perf_group_detach(struct perf_event *event)
402 } 402 }
403} 403}
404 404
405static inline int
406event_filter_match(struct perf_event *event)
407{
408 return event->cpu == -1 || event->cpu == smp_processor_id();
409}
410
405static void 411static void
406event_sched_out(struct perf_event *event, 412event_sched_out(struct perf_event *event,
407 struct perf_cpu_context *cpuctx, 413 struct perf_cpu_context *cpuctx,
408 struct perf_event_context *ctx) 414 struct perf_event_context *ctx)
409{ 415{
416 u64 delta;
417 /*
418 * An event which could not be activated because of
419 * filter mismatch still needs to have its timings
420 * maintained, otherwise bogus information is return
421 * via read() for time_enabled, time_running:
422 */
423 if (event->state == PERF_EVENT_STATE_INACTIVE
424 && !event_filter_match(event)) {
425 delta = ctx->time - event->tstamp_stopped;
426 event->tstamp_running += delta;
427 event->tstamp_stopped = ctx->time;
428 }
429
410 if (event->state != PERF_EVENT_STATE_ACTIVE) 430 if (event->state != PERF_EVENT_STATE_ACTIVE)
411 return; 431 return;
412 432
@@ -432,9 +452,7 @@ group_sched_out(struct perf_event *group_event,
432 struct perf_event_context *ctx) 452 struct perf_event_context *ctx)
433{ 453{
434 struct perf_event *event; 454 struct perf_event *event;
435 455 int state = group_event->state;
436 if (group_event->state != PERF_EVENT_STATE_ACTIVE)
437 return;
438 456
439 event_sched_out(group_event, cpuctx, ctx); 457 event_sched_out(group_event, cpuctx, ctx);
440 458
@@ -444,7 +462,7 @@ group_sched_out(struct perf_event *group_event,
444 list_for_each_entry(event, &group_event->sibling_list, group_entry) 462 list_for_each_entry(event, &group_event->sibling_list, group_entry)
445 event_sched_out(event, cpuctx, ctx); 463 event_sched_out(event, cpuctx, ctx);
446 464
447 if (group_event->attr.exclusive) 465 if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
448 cpuctx->exclusive = 0; 466 cpuctx->exclusive = 0;
449} 467}
450 468
@@ -5743,15 +5761,15 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
5743{ 5761{
5744 unsigned int cpu = (long)hcpu; 5762 unsigned int cpu = (long)hcpu;
5745 5763
5746 switch (action) { 5764 switch (action & ~CPU_TASKS_FROZEN) {
5747 5765
5748 case CPU_UP_PREPARE: 5766 case CPU_UP_PREPARE:
5749 case CPU_UP_PREPARE_FROZEN: 5767 case CPU_DOWN_FAILED:
5750 perf_event_init_cpu(cpu); 5768 perf_event_init_cpu(cpu);
5751 break; 5769 break;
5752 5770
5771 case CPU_UP_CANCELED:
5753 case CPU_DOWN_PREPARE: 5772 case CPU_DOWN_PREPARE:
5754 case CPU_DOWN_PREPARE_FROZEN:
5755 perf_event_exit_cpu(cpu); 5773 perf_event_exit_cpu(cpu);
5756 break; 5774 break;
5757 5775
diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c
index b7e4c362361b..645e541a45f6 100644
--- a/kernel/pm_qos_params.c
+++ b/kernel/pm_qos_params.c
@@ -389,10 +389,12 @@ static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
389 } else if (count == 11) { /* len('0x12345678/0') */ 389 } else if (count == 11) { /* len('0x12345678/0') */
390 if (copy_from_user(ascii_value, buf, 11)) 390 if (copy_from_user(ascii_value, buf, 11))
391 return -EFAULT; 391 return -EFAULT;
392 if (strlen(ascii_value) != 10)
393 return -EINVAL;
392 x = sscanf(ascii_value, "%x", &value); 394 x = sscanf(ascii_value, "%x", &value);
393 if (x != 1) 395 if (x != 1)
394 return -EINVAL; 396 return -EINVAL;
395 pr_debug(KERN_ERR "%s, %d, 0x%x\n", ascii_value, x, value); 397 pr_debug("%s, %d, 0x%x\n", ascii_value, x, value);
396 } else 398 } else
397 return -EINVAL; 399 return -EINVAL;
398 400
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index c77963938bca..8dc31e02ae12 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -338,7 +338,6 @@ int hibernation_snapshot(int platform_mode)
338 goto Close; 338 goto Close;
339 339
340 suspend_console(); 340 suspend_console();
341 hibernation_freeze_swap();
342 saved_mask = clear_gfp_allowed_mask(GFP_IOFS); 341 saved_mask = clear_gfp_allowed_mask(GFP_IOFS);
343 error = dpm_suspend_start(PMSG_FREEZE); 342 error = dpm_suspend_start(PMSG_FREEZE);
344 if (error) 343 if (error)
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 5e7edfb05e66..d3f795f01bbc 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -1086,7 +1086,6 @@ void swsusp_free(void)
1086 buffer = NULL; 1086 buffer = NULL;
1087 alloc_normal = 0; 1087 alloc_normal = 0;
1088 alloc_highmem = 0; 1088 alloc_highmem = 0;
1089 hibernation_thaw_swap();
1090} 1089}
1091 1090
1092/* Helper functions used for the shrinking of memory. */ 1091/* Helper functions used for the shrinking of memory. */
@@ -1122,9 +1121,19 @@ static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask)
1122 return nr_alloc; 1121 return nr_alloc;
1123} 1122}
1124 1123
1125static unsigned long preallocate_image_memory(unsigned long nr_pages) 1124static unsigned long preallocate_image_memory(unsigned long nr_pages,
1125 unsigned long avail_normal)
1126{ 1126{
1127 return preallocate_image_pages(nr_pages, GFP_IMAGE); 1127 unsigned long alloc;
1128
1129 if (avail_normal <= alloc_normal)
1130 return 0;
1131
1132 alloc = avail_normal - alloc_normal;
1133 if (nr_pages < alloc)
1134 alloc = nr_pages;
1135
1136 return preallocate_image_pages(alloc, GFP_IMAGE);
1128} 1137}
1129 1138
1130#ifdef CONFIG_HIGHMEM 1139#ifdef CONFIG_HIGHMEM
@@ -1170,15 +1179,22 @@ static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1170 */ 1179 */
1171static void free_unnecessary_pages(void) 1180static void free_unnecessary_pages(void)
1172{ 1181{
1173 unsigned long save_highmem, to_free_normal, to_free_highmem; 1182 unsigned long save, to_free_normal, to_free_highmem;
1174 1183
1175 to_free_normal = alloc_normal - count_data_pages(); 1184 save = count_data_pages();
1176 save_highmem = count_highmem_pages(); 1185 if (alloc_normal >= save) {
1177 if (alloc_highmem > save_highmem) { 1186 to_free_normal = alloc_normal - save;
1178 to_free_highmem = alloc_highmem - save_highmem; 1187 save = 0;
1188 } else {
1189 to_free_normal = 0;
1190 save -= alloc_normal;
1191 }
1192 save += count_highmem_pages();
1193 if (alloc_highmem >= save) {
1194 to_free_highmem = alloc_highmem - save;
1179 } else { 1195 } else {
1180 to_free_highmem = 0; 1196 to_free_highmem = 0;
1181 to_free_normal -= save_highmem - alloc_highmem; 1197 to_free_normal -= save - alloc_highmem;
1182 } 1198 }
1183 1199
1184 memory_bm_position_reset(&copy_bm); 1200 memory_bm_position_reset(&copy_bm);
@@ -1259,7 +1275,7 @@ int hibernate_preallocate_memory(void)
1259{ 1275{
1260 struct zone *zone; 1276 struct zone *zone;
1261 unsigned long saveable, size, max_size, count, highmem, pages = 0; 1277 unsigned long saveable, size, max_size, count, highmem, pages = 0;
1262 unsigned long alloc, save_highmem, pages_highmem; 1278 unsigned long alloc, save_highmem, pages_highmem, avail_normal;
1263 struct timeval start, stop; 1279 struct timeval start, stop;
1264 int error; 1280 int error;
1265 1281
@@ -1296,6 +1312,7 @@ int hibernate_preallocate_memory(void)
1296 else 1312 else
1297 count += zone_page_state(zone, NR_FREE_PAGES); 1313 count += zone_page_state(zone, NR_FREE_PAGES);
1298 } 1314 }
1315 avail_normal = count;
1299 count += highmem; 1316 count += highmem;
1300 count -= totalreserve_pages; 1317 count -= totalreserve_pages;
1301 1318
@@ -1310,12 +1327,21 @@ int hibernate_preallocate_memory(void)
1310 */ 1327 */
1311 if (size >= saveable) { 1328 if (size >= saveable) {
1312 pages = preallocate_image_highmem(save_highmem); 1329 pages = preallocate_image_highmem(save_highmem);
1313 pages += preallocate_image_memory(saveable - pages); 1330 pages += preallocate_image_memory(saveable - pages, avail_normal);
1314 goto out; 1331 goto out;
1315 } 1332 }
1316 1333
1317 /* Estimate the minimum size of the image. */ 1334 /* Estimate the minimum size of the image. */
1318 pages = minimum_image_size(saveable); 1335 pages = minimum_image_size(saveable);
1336 /*
1337 * To avoid excessive pressure on the normal zone, leave room in it to
1338 * accommodate an image of the minimum size (unless it's already too
1339 * small, in which case don't preallocate pages from it at all).
1340 */
1341 if (avail_normal > pages)
1342 avail_normal -= pages;
1343 else
1344 avail_normal = 0;
1319 if (size < pages) 1345 if (size < pages)
1320 size = min_t(unsigned long, pages, max_size); 1346 size = min_t(unsigned long, pages, max_size);
1321 1347
@@ -1336,16 +1362,34 @@ int hibernate_preallocate_memory(void)
1336 */ 1362 */
1337 pages_highmem = preallocate_image_highmem(highmem / 2); 1363 pages_highmem = preallocate_image_highmem(highmem / 2);
1338 alloc = (count - max_size) - pages_highmem; 1364 alloc = (count - max_size) - pages_highmem;
1339 pages = preallocate_image_memory(alloc); 1365 pages = preallocate_image_memory(alloc, avail_normal);
1340 if (pages < alloc) 1366 if (pages < alloc) {
1341 goto err_out; 1367 /* We have exhausted non-highmem pages, try highmem. */
1342 size = max_size - size; 1368 alloc -= pages;
1343 alloc = size; 1369 pages += pages_highmem;
1344 size = preallocate_highmem_fraction(size, highmem, count); 1370 pages_highmem = preallocate_image_highmem(alloc);
1345 pages_highmem += size; 1371 if (pages_highmem < alloc)
1346 alloc -= size; 1372 goto err_out;
1347 pages += preallocate_image_memory(alloc); 1373 pages += pages_highmem;
1348 pages += pages_highmem; 1374 /*
1375 * size is the desired number of saveable pages to leave in
1376 * memory, so try to preallocate (all memory - size) pages.
1377 */
1378 alloc = (count - pages) - size;
1379 pages += preallocate_image_highmem(alloc);
1380 } else {
1381 /*
1382 * There are approximately max_size saveable pages at this point
1383 * and we want to reduce this number down to size.
1384 */
1385 alloc = max_size - size;
1386 size = preallocate_highmem_fraction(alloc, highmem, count);
1387 pages_highmem += size;
1388 alloc -= size;
1389 size = preallocate_image_memory(alloc, avail_normal);
1390 pages_highmem += preallocate_image_highmem(alloc - size);
1391 pages += pages_highmem + size;
1392 }
1349 1393
1350 /* 1394 /*
1351 * We only need as many page frames for the image as there are saveable 1395 * We only need as many page frames for the image as there are saveable
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 5d0059eed3e4..e6a5bdf61a37 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -136,10 +136,10 @@ sector_t alloc_swapdev_block(int swap)
136{ 136{
137 unsigned long offset; 137 unsigned long offset;
138 138
139 offset = swp_offset(get_swap_for_hibernation(swap)); 139 offset = swp_offset(get_swap_page_of_type(swap));
140 if (offset) { 140 if (offset) {
141 if (swsusp_extents_insert(offset)) 141 if (swsusp_extents_insert(offset))
142 swap_free_for_hibernation(swp_entry(swap, offset)); 142 swap_free(swp_entry(swap, offset));
143 else 143 else
144 return swapdev_block(swap, offset); 144 return swapdev_block(swap, offset);
145 } 145 }
@@ -163,7 +163,7 @@ void free_all_swap_pages(int swap)
163 ext = container_of(node, struct swsusp_extent, node); 163 ext = container_of(node, struct swsusp_extent, node);
164 rb_erase(node, &swsusp_extents); 164 rb_erase(node, &swsusp_extents);
165 for (offset = ext->start; offset <= ext->end; offset++) 165 for (offset = ext->start; offset <= ext->end; offset++)
166 swap_free_for_hibernation(swp_entry(swap, offset)); 166 swap_free(swp_entry(swap, offset));
167 167
168 kfree(ext); 168 kfree(ext);
169 } 169 }
diff --git a/kernel/sched.c b/kernel/sched.c
index 09b574e7f4df..dc85ceb90832 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1294,6 +1294,10 @@ static void resched_task(struct task_struct *p)
1294static void sched_rt_avg_update(struct rq *rq, u64 rt_delta) 1294static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
1295{ 1295{
1296} 1296}
1297
1298static void sched_avg_update(struct rq *rq)
1299{
1300}
1297#endif /* CONFIG_SMP */ 1301#endif /* CONFIG_SMP */
1298 1302
1299#if BITS_PER_LONG == 32 1303#if BITS_PER_LONG == 32
@@ -3182,6 +3186,8 @@ static void update_cpu_load(struct rq *this_rq)
3182 3186
3183 this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i; 3187 this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i;
3184 } 3188 }
3189
3190 sched_avg_update(this_rq);
3185} 3191}
3186 3192
3187static void update_cpu_load_active(struct rq *this_rq) 3193static void update_cpu_load_active(struct rq *this_rq)
@@ -3507,9 +3513,9 @@ void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
3507 rtime = nsecs_to_cputime(p->se.sum_exec_runtime); 3513 rtime = nsecs_to_cputime(p->se.sum_exec_runtime);
3508 3514
3509 if (total) { 3515 if (total) {
3510 u64 temp; 3516 u64 temp = rtime;
3511 3517
3512 temp = (u64)(rtime * utime); 3518 temp *= utime;
3513 do_div(temp, total); 3519 do_div(temp, total);
3514 utime = (cputime_t)temp; 3520 utime = (cputime_t)temp;
3515 } else 3521 } else
@@ -3540,9 +3546,9 @@ void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
3540 rtime = nsecs_to_cputime(cputime.sum_exec_runtime); 3546 rtime = nsecs_to_cputime(cputime.sum_exec_runtime);
3541 3547
3542 if (total) { 3548 if (total) {
3543 u64 temp; 3549 u64 temp = rtime;
3544 3550
3545 temp = (u64)(rtime * cputime.utime); 3551 temp *= cputime.utime;
3546 do_div(temp, total); 3552 do_div(temp, total);
3547 utime = (cputime_t)temp; 3553 utime = (cputime_t)temp;
3548 } else 3554 } else
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index ab661ebc4895..db3f674ca49d 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -54,13 +54,13 @@ enum sched_tunable_scaling sysctl_sched_tunable_scaling
54 * Minimal preemption granularity for CPU-bound tasks: 54 * Minimal preemption granularity for CPU-bound tasks:
55 * (default: 2 msec * (1 + ilog(ncpus)), units: nanoseconds) 55 * (default: 2 msec * (1 + ilog(ncpus)), units: nanoseconds)
56 */ 56 */
57unsigned int sysctl_sched_min_granularity = 2000000ULL; 57unsigned int sysctl_sched_min_granularity = 750000ULL;
58unsigned int normalized_sysctl_sched_min_granularity = 2000000ULL; 58unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
59 59
60/* 60/*
61 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity 61 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
62 */ 62 */
63static unsigned int sched_nr_latency = 3; 63static unsigned int sched_nr_latency = 8;
64 64
65/* 65/*
66 * After fork, child runs first. If set to 0 (default) then 66 * After fork, child runs first. If set to 0 (default) then
@@ -1313,7 +1313,7 @@ static struct sched_group *
1313find_idlest_group(struct sched_domain *sd, struct task_struct *p, 1313find_idlest_group(struct sched_domain *sd, struct task_struct *p,
1314 int this_cpu, int load_idx) 1314 int this_cpu, int load_idx)
1315{ 1315{
1316 struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups; 1316 struct sched_group *idlest = NULL, *group = sd->groups;
1317 unsigned long min_load = ULONG_MAX, this_load = 0; 1317 unsigned long min_load = ULONG_MAX, this_load = 0;
1318 int imbalance = 100 + (sd->imbalance_pct-100)/2; 1318 int imbalance = 100 + (sd->imbalance_pct-100)/2;
1319 1319
@@ -1348,7 +1348,6 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
1348 1348
1349 if (local_group) { 1349 if (local_group) {
1350 this_load = avg_load; 1350 this_load = avg_load;
1351 this = group;
1352 } else if (avg_load < min_load) { 1351 } else if (avg_load < min_load) {
1353 min_load = avg_load; 1352 min_load = avg_load;
1354 idlest = group; 1353 idlest = group;
@@ -2268,8 +2267,6 @@ unsigned long scale_rt_power(int cpu)
2268 struct rq *rq = cpu_rq(cpu); 2267 struct rq *rq = cpu_rq(cpu);
2269 u64 total, available; 2268 u64 total, available;
2270 2269
2271 sched_avg_update(rq);
2272
2273 total = sched_avg_period() + (rq->clock - rq->age_stamp); 2270 total = sched_avg_period() + (rq->clock - rq->age_stamp);
2274 available = total - rq->rt_avg; 2271 available = total - rq->rt_avg;
2275 2272
@@ -3633,7 +3630,7 @@ static inline int nohz_kick_needed(struct rq *rq, int cpu)
3633 if (time_before(now, nohz.next_balance)) 3630 if (time_before(now, nohz.next_balance))
3634 return 0; 3631 return 0;
3635 3632
3636 if (!rq->nr_running) 3633 if (rq->idle_at_tick)
3637 return 0; 3634 return 0;
3638 3635
3639 first_pick_cpu = atomic_read(&nohz.first_pick_cpu); 3636 first_pick_cpu = atomic_read(&nohz.first_pick_cpu);
diff --git a/kernel/smp.c b/kernel/smp.c
index 75c970c715d3..ed6aacfcb7ef 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -365,9 +365,10 @@ call:
365EXPORT_SYMBOL_GPL(smp_call_function_any); 365EXPORT_SYMBOL_GPL(smp_call_function_any);
366 366
367/** 367/**
368 * __smp_call_function_single(): Run a function on another CPU 368 * __smp_call_function_single(): Run a function on a specific CPU
369 * @cpu: The CPU to run on. 369 * @cpu: The CPU to run on.
370 * @data: Pre-allocated and setup data structure 370 * @data: Pre-allocated and setup data structure
371 * @wait: If true, wait until function has completed on specified CPU.
371 * 372 *
372 * Like smp_call_function_single(), but allow caller to pass in a 373 * Like smp_call_function_single(), but allow caller to pass in a
373 * pre-allocated data structure. Useful for embedding @data inside 374 * pre-allocated data structure. Useful for embedding @data inside
@@ -376,8 +377,10 @@ EXPORT_SYMBOL_GPL(smp_call_function_any);
376void __smp_call_function_single(int cpu, struct call_single_data *data, 377void __smp_call_function_single(int cpu, struct call_single_data *data,
377 int wait) 378 int wait)
378{ 379{
379 csd_lock(data); 380 unsigned int this_cpu;
381 unsigned long flags;
380 382
383 this_cpu = get_cpu();
381 /* 384 /*
382 * Can deadlock when called with interrupts disabled. 385 * Can deadlock when called with interrupts disabled.
383 * We allow cpu's that are not yet online though, as no one else can 386 * We allow cpu's that are not yet online though, as no one else can
@@ -387,7 +390,15 @@ void __smp_call_function_single(int cpu, struct call_single_data *data,
387 WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled() 390 WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled()
388 && !oops_in_progress); 391 && !oops_in_progress);
389 392
390 generic_exec_single(cpu, data, wait); 393 if (cpu == this_cpu) {
394 local_irq_save(flags);
395 data->func(data->info);
396 local_irq_restore(flags);
397 } else {
398 csd_lock(data);
399 generic_exec_single(cpu, data, wait);
400 }
401 put_cpu();
391} 402}
392 403
393/** 404/**
diff --git a/kernel/sys.c b/kernel/sys.c
index e9ad44489828..7f5a0cd296a9 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -931,6 +931,7 @@ SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
931 pgid = pid; 931 pgid = pid;
932 if (pgid < 0) 932 if (pgid < 0)
933 return -EINVAL; 933 return -EINVAL;
934 rcu_read_lock();
934 935
935 /* From this point forward we keep holding onto the tasklist lock 936 /* From this point forward we keep holding onto the tasklist lock
936 * so that our parent does not change from under us. -DaveM 937 * so that our parent does not change from under us. -DaveM
@@ -984,6 +985,7 @@ SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
984out: 985out:
985 /* All paths lead to here, thus we are safe. -DaveM */ 986 /* All paths lead to here, thus we are safe. -DaveM */
986 write_unlock_irq(&tasklist_lock); 987 write_unlock_irq(&tasklist_lock);
988 rcu_read_unlock();
987 return err; 989 return err;
988} 990}
989 991
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index ca38e8e3e907..f88552c6d227 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -1713,10 +1713,7 @@ static __init int sysctl_init(void)
1713{ 1713{
1714 sysctl_set_parent(NULL, root_table); 1714 sysctl_set_parent(NULL, root_table);
1715#ifdef CONFIG_SYSCTL_SYSCALL_CHECK 1715#ifdef CONFIG_SYSCTL_SYSCALL_CHECK
1716 { 1716 sysctl_check_table(current->nsproxy, root_table);
1717 int err;
1718 err = sysctl_check_table(current->nsproxy, root_table);
1719 }
1720#endif 1717#endif
1721 return 0; 1718 return 0;
1722} 1719}
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 0d88ce9b9fb8..fa7ece649fe1 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -381,12 +381,19 @@ static int function_stat_show(struct seq_file *m, void *v)
381{ 381{
382 struct ftrace_profile *rec = v; 382 struct ftrace_profile *rec = v;
383 char str[KSYM_SYMBOL_LEN]; 383 char str[KSYM_SYMBOL_LEN];
384 int ret = 0;
384#ifdef CONFIG_FUNCTION_GRAPH_TRACER 385#ifdef CONFIG_FUNCTION_GRAPH_TRACER
385 static DEFINE_MUTEX(mutex);
386 static struct trace_seq s; 386 static struct trace_seq s;
387 unsigned long long avg; 387 unsigned long long avg;
388 unsigned long long stddev; 388 unsigned long long stddev;
389#endif 389#endif
390 mutex_lock(&ftrace_profile_lock);
391
392 /* we raced with function_profile_reset() */
393 if (unlikely(rec->counter == 0)) {
394 ret = -EBUSY;
395 goto out;
396 }
390 397
391 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); 398 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
392 seq_printf(m, " %-30.30s %10lu", str, rec->counter); 399 seq_printf(m, " %-30.30s %10lu", str, rec->counter);
@@ -408,7 +415,6 @@ static int function_stat_show(struct seq_file *m, void *v)
408 do_div(stddev, (rec->counter - 1) * 1000); 415 do_div(stddev, (rec->counter - 1) * 1000);
409 } 416 }
410 417
411 mutex_lock(&mutex);
412 trace_seq_init(&s); 418 trace_seq_init(&s);
413 trace_print_graph_duration(rec->time, &s); 419 trace_print_graph_duration(rec->time, &s);
414 trace_seq_puts(&s, " "); 420 trace_seq_puts(&s, " ");
@@ -416,11 +422,12 @@ static int function_stat_show(struct seq_file *m, void *v)
416 trace_seq_puts(&s, " "); 422 trace_seq_puts(&s, " ");
417 trace_print_graph_duration(stddev, &s); 423 trace_print_graph_duration(stddev, &s);
418 trace_print_seq(m, &s); 424 trace_print_seq(m, &s);
419 mutex_unlock(&mutex);
420#endif 425#endif
421 seq_putc(m, '\n'); 426 seq_putc(m, '\n');
427out:
428 mutex_unlock(&ftrace_profile_lock);
422 429
423 return 0; 430 return ret;
424} 431}
425 432
426static void ftrace_profile_reset(struct ftrace_profile_stat *stat) 433static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
@@ -1503,6 +1510,8 @@ static void *t_start(struct seq_file *m, loff_t *pos)
1503 if (*pos > 0) 1510 if (*pos > 0)
1504 return t_hash_start(m, pos); 1511 return t_hash_start(m, pos);
1505 iter->flags |= FTRACE_ITER_PRINTALL; 1512 iter->flags |= FTRACE_ITER_PRINTALL;
1513 /* reset in case of seek/pread */
1514 iter->flags &= ~FTRACE_ITER_HASH;
1506 return iter; 1515 return iter;
1507 } 1516 }
1508 1517
@@ -2409,7 +2418,7 @@ static const struct file_operations ftrace_filter_fops = {
2409 .open = ftrace_filter_open, 2418 .open = ftrace_filter_open,
2410 .read = seq_read, 2419 .read = seq_read,
2411 .write = ftrace_filter_write, 2420 .write = ftrace_filter_write,
2412 .llseek = ftrace_regex_lseek, 2421 .llseek = no_llseek,
2413 .release = ftrace_filter_release, 2422 .release = ftrace_filter_release,
2414}; 2423};
2415 2424
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 19cccc3c3028..492197e2f86c 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -2985,13 +2985,11 @@ static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
2985 2985
2986static void rb_advance_iter(struct ring_buffer_iter *iter) 2986static void rb_advance_iter(struct ring_buffer_iter *iter)
2987{ 2987{
2988 struct ring_buffer *buffer;
2989 struct ring_buffer_per_cpu *cpu_buffer; 2988 struct ring_buffer_per_cpu *cpu_buffer;
2990 struct ring_buffer_event *event; 2989 struct ring_buffer_event *event;
2991 unsigned length; 2990 unsigned length;
2992 2991
2993 cpu_buffer = iter->cpu_buffer; 2992 cpu_buffer = iter->cpu_buffer;
2994 buffer = cpu_buffer->buffer;
2995 2993
2996 /* 2994 /*
2997 * Check if we are at the end of the buffer. 2995 * Check if we are at the end of the buffer.
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index 000e6e85b445..31cc4cb0dbf2 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -91,6 +91,8 @@ int perf_trace_init(struct perf_event *p_event)
91 tp_event->class && tp_event->class->reg && 91 tp_event->class && tp_event->class->reg &&
92 try_module_get(tp_event->mod)) { 92 try_module_get(tp_event->mod)) {
93 ret = perf_trace_event_init(tp_event, p_event); 93 ret = perf_trace_event_init(tp_event, p_event);
94 if (ret)
95 module_put(tp_event->mod);
94 break; 96 break;
95 } 97 }
96 } 98 }
@@ -146,6 +148,7 @@ void perf_trace_destroy(struct perf_event *p_event)
146 } 148 }
147 } 149 }
148out: 150out:
151 module_put(tp_event->mod);
149 mutex_unlock(&event_mutex); 152 mutex_unlock(&event_mutex);
150} 153}
151 154
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 8b27c9849b42..544301d29dee 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -514,8 +514,8 @@ static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
514static int kretprobe_dispatcher(struct kretprobe_instance *ri, 514static int kretprobe_dispatcher(struct kretprobe_instance *ri,
515 struct pt_regs *regs); 515 struct pt_regs *regs);
516 516
517/* Check the name is good for event/group */ 517/* Check the name is good for event/group/fields */
518static int check_event_name(const char *name) 518static int is_good_name(const char *name)
519{ 519{
520 if (!isalpha(*name) && *name != '_') 520 if (!isalpha(*name) && *name != '_')
521 return 0; 521 return 0;
@@ -557,7 +557,7 @@ static struct trace_probe *alloc_trace_probe(const char *group,
557 else 557 else
558 tp->rp.kp.pre_handler = kprobe_dispatcher; 558 tp->rp.kp.pre_handler = kprobe_dispatcher;
559 559
560 if (!event || !check_event_name(event)) { 560 if (!event || !is_good_name(event)) {
561 ret = -EINVAL; 561 ret = -EINVAL;
562 goto error; 562 goto error;
563 } 563 }
@@ -567,7 +567,7 @@ static struct trace_probe *alloc_trace_probe(const char *group,
567 if (!tp->call.name) 567 if (!tp->call.name)
568 goto error; 568 goto error;
569 569
570 if (!group || !check_event_name(group)) { 570 if (!group || !is_good_name(group)) {
571 ret = -EINVAL; 571 ret = -EINVAL;
572 goto error; 572 goto error;
573 } 573 }
@@ -883,7 +883,7 @@ static int create_trace_probe(int argc, char **argv)
883 int i, ret = 0; 883 int i, ret = 0;
884 int is_return = 0, is_delete = 0; 884 int is_return = 0, is_delete = 0;
885 char *symbol = NULL, *event = NULL, *group = NULL; 885 char *symbol = NULL, *event = NULL, *group = NULL;
886 char *arg, *tmp; 886 char *arg;
887 unsigned long offset = 0; 887 unsigned long offset = 0;
888 void *addr = NULL; 888 void *addr = NULL;
889 char buf[MAX_EVENT_NAME_LEN]; 889 char buf[MAX_EVENT_NAME_LEN];
@@ -992,26 +992,36 @@ static int create_trace_probe(int argc, char **argv)
992 /* parse arguments */ 992 /* parse arguments */
993 ret = 0; 993 ret = 0;
994 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) { 994 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
995 /* Increment count for freeing args in error case */
996 tp->nr_args++;
997
995 /* Parse argument name */ 998 /* Parse argument name */
996 arg = strchr(argv[i], '='); 999 arg = strchr(argv[i], '=');
997 if (arg) 1000 if (arg) {
998 *arg++ = '\0'; 1001 *arg++ = '\0';
999 else 1002 tp->args[i].name = kstrdup(argv[i], GFP_KERNEL);
1003 } else {
1000 arg = argv[i]; 1004 arg = argv[i];
1005 /* If argument name is omitted, set "argN" */
1006 snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
1007 tp->args[i].name = kstrdup(buf, GFP_KERNEL);
1008 }
1001 1009
1002 tp->args[i].name = kstrdup(argv[i], GFP_KERNEL);
1003 if (!tp->args[i].name) { 1010 if (!tp->args[i].name) {
1004 pr_info("Failed to allocate argument%d name '%s'.\n", 1011 pr_info("Failed to allocate argument[%d] name.\n", i);
1005 i, argv[i]);
1006 ret = -ENOMEM; 1012 ret = -ENOMEM;
1007 goto error; 1013 goto error;
1008 } 1014 }
1009 tmp = strchr(tp->args[i].name, ':'); 1015
1010 if (tmp) 1016 if (!is_good_name(tp->args[i].name)) {
1011 *tmp = '_'; /* convert : to _ */ 1017 pr_info("Invalid argument[%d] name: %s\n",
1018 i, tp->args[i].name);
1019 ret = -EINVAL;
1020 goto error;
1021 }
1012 1022
1013 if (conflict_field_name(tp->args[i].name, tp->args, i)) { 1023 if (conflict_field_name(tp->args[i].name, tp->args, i)) {
1014 pr_info("Argument%d name '%s' conflicts with " 1024 pr_info("Argument[%d] name '%s' conflicts with "
1015 "another field.\n", i, argv[i]); 1025 "another field.\n", i, argv[i]);
1016 ret = -EINVAL; 1026 ret = -EINVAL;
1017 goto error; 1027 goto error;
@@ -1020,12 +1030,9 @@ static int create_trace_probe(int argc, char **argv)
1020 /* Parse fetch argument */ 1030 /* Parse fetch argument */
1021 ret = parse_probe_arg(arg, tp, &tp->args[i], is_return); 1031 ret = parse_probe_arg(arg, tp, &tp->args[i], is_return);
1022 if (ret) { 1032 if (ret) {
1023 pr_info("Parse error at argument%d. (%d)\n", i, ret); 1033 pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
1024 kfree(tp->args[i].name);
1025 goto error; 1034 goto error;
1026 } 1035 }
1027
1028 tp->nr_args++;
1029 } 1036 }
1030 1037
1031 ret = register_trace_probe(tp); 1038 ret = register_trace_probe(tp);
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 0d53c8e853b1..7f9c3c52ecc1 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -122,7 +122,7 @@ static void __touch_watchdog(void)
122 122
123void touch_softlockup_watchdog(void) 123void touch_softlockup_watchdog(void)
124{ 124{
125 __get_cpu_var(watchdog_touch_ts) = 0; 125 __raw_get_cpu_var(watchdog_touch_ts) = 0;
126} 126}
127EXPORT_SYMBOL(touch_softlockup_watchdog); 127EXPORT_SYMBOL(touch_softlockup_watchdog);
128 128
@@ -142,7 +142,14 @@ void touch_all_softlockup_watchdogs(void)
142#ifdef CONFIG_HARDLOCKUP_DETECTOR 142#ifdef CONFIG_HARDLOCKUP_DETECTOR
143void touch_nmi_watchdog(void) 143void touch_nmi_watchdog(void)
144{ 144{
145 __get_cpu_var(watchdog_nmi_touch) = true; 145 if (watchdog_enabled) {
146 unsigned cpu;
147
148 for_each_present_cpu(cpu) {
149 if (per_cpu(watchdog_nmi_touch, cpu) != true)
150 per_cpu(watchdog_nmi_touch, cpu) = true;
151 }
152 }
146 touch_softlockup_watchdog(); 153 touch_softlockup_watchdog();
147} 154}
148EXPORT_SYMBOL(touch_nmi_watchdog); 155EXPORT_SYMBOL(touch_nmi_watchdog);
@@ -433,6 +440,9 @@ static int watchdog_enable(int cpu)
433 wake_up_process(p); 440 wake_up_process(p);
434 } 441 }
435 442
443 /* if any cpu succeeds, watchdog is considered enabled for the system */
444 watchdog_enabled = 1;
445
436 return 0; 446 return 0;
437} 447}
438 448
@@ -455,9 +465,6 @@ static void watchdog_disable(int cpu)
455 per_cpu(softlockup_watchdog, cpu) = NULL; 465 per_cpu(softlockup_watchdog, cpu) = NULL;
456 kthread_stop(p); 466 kthread_stop(p);
457 } 467 }
458
459 /* if any cpu succeeds, watchdog is considered enabled for the system */
460 watchdog_enabled = 1;
461} 468}
462 469
463static void watchdog_enable_all_cpus(void) 470static void watchdog_enable_all_cpus(void)
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 727f24e563ae..f77afd939229 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1,19 +1,26 @@
1/* 1/*
2 * linux/kernel/workqueue.c 2 * kernel/workqueue.c - generic async execution with shared worker pool
3 * 3 *
4 * Generic mechanism for defining kernel helper threads for running 4 * Copyright (C) 2002 Ingo Molnar
5 * arbitrary tasks in process context.
6 * 5 *
7 * Started by Ingo Molnar, Copyright (C) 2002 6 * Derived from the taskqueue/keventd code by:
7 * David Woodhouse <dwmw2@infradead.org>
8 * Andrew Morton
9 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
10 * Theodore Ts'o <tytso@mit.edu>
8 * 11 *
9 * Derived from the taskqueue/keventd code by: 12 * Made to use alloc_percpu by Christoph Lameter.
10 * 13 *
11 * David Woodhouse <dwmw2@infradead.org> 14 * Copyright (C) 2010 SUSE Linux Products GmbH
12 * Andrew Morton 15 * Copyright (C) 2010 Tejun Heo <tj@kernel.org>
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu>
15 * 16 *
16 * Made to use alloc_percpu by Christoph Lameter. 17 * This is the generic async execution mechanism. Work items as are
18 * executed in process context. The worker pool is shared and
19 * automatically managed. There is one worker pool for each CPU and
20 * one extra for works which are better served by workers which are
21 * not bound to any specific CPU.
22 *
23 * Please read Documentation/workqueue.txt for details.
17 */ 24 */
18 25
19#include <linux/module.h> 26#include <linux/module.h>
diff --git a/lib/bug.c b/lib/bug.c
index 7cdfad88128f..19552096d16b 100644
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -72,8 +72,8 @@ static const struct bug_entry *module_find_bug(unsigned long bugaddr)
72 return NULL; 72 return NULL;
73} 73}
74 74
75int module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, 75void module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
76 struct module *mod) 76 struct module *mod)
77{ 77{
78 char *secstrings; 78 char *secstrings;
79 unsigned int i; 79 unsigned int i;
@@ -97,8 +97,6 @@ int module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
97 * could potentially lead to deadlock and thus be counter-productive. 97 * could potentially lead to deadlock and thus be counter-productive.
98 */ 98 */
99 list_add(&mod->bug_list, &module_bug_list); 99 list_add(&mod->bug_list, &module_bug_list);
100
101 return 0;
102} 100}
103 101
104void module_bug_cleanup(struct module *mod) 102void module_bug_cleanup(struct module *mod)
diff --git a/lib/list_sort.c b/lib/list_sort.c
index 4b5cb794c38b..a7616fa3162e 100644
--- a/lib/list_sort.c
+++ b/lib/list_sort.c
@@ -70,7 +70,7 @@ static void merge_and_restore_back_links(void *priv,
70 * element comparison is needed, so the client's cmp() 70 * element comparison is needed, so the client's cmp()
71 * routine can invoke cond_resched() periodically. 71 * routine can invoke cond_resched() periodically.
72 */ 72 */
73 (*cmp)(priv, tail, tail); 73 (*cmp)(priv, tail->next, tail->next);
74 74
75 tail->next->prev = tail; 75 tail->next->prev = tail;
76 tail = tail->next; 76 tail = tail->next;
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index a5ec42868f99..4ceb05d772ae 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -248,8 +248,18 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents,
248 left -= sg_size; 248 left -= sg_size;
249 249
250 sg = alloc_fn(alloc_size, gfp_mask); 250 sg = alloc_fn(alloc_size, gfp_mask);
251 if (unlikely(!sg)) 251 if (unlikely(!sg)) {
252 return -ENOMEM; 252 /*
253 * Adjust entry count to reflect that the last
254 * entry of the previous table won't be used for
255 * linkage. Without this, sg_kfree() may get
256 * confused.
257 */
258 if (prv)
259 table->nents = ++table->orig_nents;
260
261 return -ENOMEM;
262 }
253 263
254 sg_init_table(sg, alloc_size); 264 sg_init_table(sg, alloc_size);
255 table->nents = table->orig_nents += sg_size; 265 table->nents = table->orig_nents += sg_size;
diff --git a/mm/Kconfig b/mm/Kconfig
index f4e516e9c37c..f0fb9124e410 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -189,7 +189,7 @@ config COMPACTION
189config MIGRATION 189config MIGRATION
190 bool "Page migration" 190 bool "Page migration"
191 def_bool y 191 def_bool y
192 depends on NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE 192 depends on NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE || COMPACTION
193 help 193 help
194 Allows the migration of the physical location of pages of processes 194 Allows the migration of the physical location of pages of processes
195 while the virtual addresses are not changed. This is useful in 195 while the virtual addresses are not changed. This is useful in
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index eaa4a5bbe063..65d420499a61 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -30,6 +30,7 @@ EXPORT_SYMBOL_GPL(default_backing_dev_info);
30 30
31struct backing_dev_info noop_backing_dev_info = { 31struct backing_dev_info noop_backing_dev_info = {
32 .name = "noop", 32 .name = "noop",
33 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
33}; 34};
34EXPORT_SYMBOL_GPL(noop_backing_dev_info); 35EXPORT_SYMBOL_GPL(noop_backing_dev_info);
35 36
@@ -243,6 +244,7 @@ static int __init default_bdi_init(void)
243 err = bdi_init(&default_backing_dev_info); 244 err = bdi_init(&default_backing_dev_info);
244 if (!err) 245 if (!err)
245 bdi_register(&default_backing_dev_info, NULL, "default"); 246 bdi_register(&default_backing_dev_info, NULL, "default");
247 err = bdi_init(&noop_backing_dev_info);
246 248
247 return err; 249 return err;
248} 250}
@@ -445,8 +447,8 @@ static int bdi_forker_thread(void *ptr)
445 switch (action) { 447 switch (action) {
446 case FORK_THREAD: 448 case FORK_THREAD:
447 __set_current_state(TASK_RUNNING); 449 __set_current_state(TASK_RUNNING);
448 task = kthread_run(bdi_writeback_thread, &bdi->wb, "flush-%s", 450 task = kthread_create(bdi_writeback_thread, &bdi->wb,
449 dev_name(bdi->dev)); 451 "flush-%s", dev_name(bdi->dev));
450 if (IS_ERR(task)) { 452 if (IS_ERR(task)) {
451 /* 453 /*
452 * If thread creation fails, force writeout of 454 * If thread creation fails, force writeout of
@@ -457,10 +459,13 @@ static int bdi_forker_thread(void *ptr)
457 /* 459 /*
458 * The spinlock makes sure we do not lose 460 * The spinlock makes sure we do not lose
459 * wake-ups when racing with 'bdi_queue_work()'. 461 * wake-ups when racing with 'bdi_queue_work()'.
462 * And as soon as the bdi thread is visible, we
463 * can start it.
460 */ 464 */
461 spin_lock_bh(&bdi->wb_lock); 465 spin_lock_bh(&bdi->wb_lock);
462 bdi->wb.task = task; 466 bdi->wb.task = task;
463 spin_unlock_bh(&bdi->wb_lock); 467 spin_unlock_bh(&bdi->wb_lock);
468 wake_up_process(task);
464 } 469 }
465 break; 470 break;
466 471
diff --git a/mm/bounce.c b/mm/bounce.c
index 13b6dad1eed2..1481de68184b 100644
--- a/mm/bounce.c
+++ b/mm/bounce.c
@@ -116,8 +116,8 @@ static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
116 */ 116 */
117 vfrom = page_address(fromvec->bv_page) + tovec->bv_offset; 117 vfrom = page_address(fromvec->bv_page) + tovec->bv_offset;
118 118
119 flush_dcache_page(tovec->bv_page);
120 bounce_copy_vec(tovec, vfrom); 119 bounce_copy_vec(tovec, vfrom);
120 flush_dcache_page(tovec->bv_page);
121 } 121 }
122} 122}
123 123
diff --git a/mm/compaction.c b/mm/compaction.c
index 94cce51b0b35..4d709ee59013 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -214,15 +214,16 @@ static void acct_isolated(struct zone *zone, struct compact_control *cc)
214/* Similar to reclaim, but different enough that they don't share logic */ 214/* Similar to reclaim, but different enough that they don't share logic */
215static bool too_many_isolated(struct zone *zone) 215static bool too_many_isolated(struct zone *zone)
216{ 216{
217 217 unsigned long active, inactive, isolated;
218 unsigned long inactive, isolated;
219 218
220 inactive = zone_page_state(zone, NR_INACTIVE_FILE) + 219 inactive = zone_page_state(zone, NR_INACTIVE_FILE) +
221 zone_page_state(zone, NR_INACTIVE_ANON); 220 zone_page_state(zone, NR_INACTIVE_ANON);
221 active = zone_page_state(zone, NR_ACTIVE_FILE) +
222 zone_page_state(zone, NR_ACTIVE_ANON);
222 isolated = zone_page_state(zone, NR_ISOLATED_FILE) + 223 isolated = zone_page_state(zone, NR_ISOLATED_FILE) +
223 zone_page_state(zone, NR_ISOLATED_ANON); 224 zone_page_state(zone, NR_ISOLATED_ANON);
224 225
225 return isolated > inactive; 226 return isolated > (inactive + active) / 2;
226} 227}
227 228
228/* 229/*
diff --git a/mm/fremap.c b/mm/fremap.c
index 46f5dacf90a2..ec520c7b28df 100644
--- a/mm/fremap.c
+++ b/mm/fremap.c
@@ -125,7 +125,6 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
125{ 125{
126 struct mm_struct *mm = current->mm; 126 struct mm_struct *mm = current->mm;
127 struct address_space *mapping; 127 struct address_space *mapping;
128 unsigned long end = start + size;
129 struct vm_area_struct *vma; 128 struct vm_area_struct *vma;
130 int err = -EINVAL; 129 int err = -EINVAL;
131 int has_write_lock = 0; 130 int has_write_lock = 0;
@@ -142,6 +141,10 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
142 if (start + size <= start) 141 if (start + size <= start)
143 return err; 142 return err;
144 143
144 /* Does pgoff wrap? */
145 if (pgoff + (size >> PAGE_SHIFT) < pgoff)
146 return err;
147
145 /* Can we represent this offset inside this architecture's pte's? */ 148 /* Can we represent this offset inside this architecture's pte's? */
146#if PTE_FILE_MAX_BITS < BITS_PER_LONG 149#if PTE_FILE_MAX_BITS < BITS_PER_LONG
147 if (pgoff + (size >> PAGE_SHIFT) >= (1UL << PTE_FILE_MAX_BITS)) 150 if (pgoff + (size >> PAGE_SHIFT) >= (1UL << PTE_FILE_MAX_BITS))
@@ -168,7 +171,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
168 if (!(vma->vm_flags & VM_CAN_NONLINEAR)) 171 if (!(vma->vm_flags & VM_CAN_NONLINEAR))
169 goto out; 172 goto out;
170 173
171 if (end <= start || start < vma->vm_start || end > vma->vm_end) 174 if (start < vma->vm_start || start + size > vma->vm_end)
172 goto out; 175 goto out;
173 176
174 /* Must set VM_NONLINEAR before any pages are populated. */ 177 /* Must set VM_NONLINEAR before any pages are populated. */
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index cc5be788a39f..c03273807182 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2324,11 +2324,8 @@ retry_avoidcopy:
2324 * and just make the page writable */ 2324 * and just make the page writable */
2325 avoidcopy = (page_mapcount(old_page) == 1); 2325 avoidcopy = (page_mapcount(old_page) == 1);
2326 if (avoidcopy) { 2326 if (avoidcopy) {
2327 if (!trylock_page(old_page)) { 2327 if (PageAnon(old_page))
2328 if (PageAnon(old_page)) 2328 page_move_anon_rmap(old_page, vma, address);
2329 page_move_anon_rmap(old_page, vma, address);
2330 } else
2331 unlock_page(old_page);
2332 set_huge_ptep_writable(vma, address, ptep); 2329 set_huge_ptep_writable(vma, address, ptep);
2333 return 0; 2330 return 0;
2334 } 2331 }
@@ -2404,7 +2401,7 @@ retry_avoidcopy:
2404 set_huge_pte_at(mm, address, ptep, 2401 set_huge_pte_at(mm, address, ptep,
2405 make_huge_pte(vma, new_page, 1)); 2402 make_huge_pte(vma, new_page, 1));
2406 page_remove_rmap(old_page); 2403 page_remove_rmap(old_page);
2407 hugepage_add_anon_rmap(new_page, vma, address); 2404 hugepage_add_new_anon_rmap(new_page, vma, address);
2408 /* Make the old page be freed below */ 2405 /* Make the old page be freed below */
2409 new_page = old_page; 2406 new_page = old_page;
2410 mmu_notifier_invalidate_range_end(mm, 2407 mmu_notifier_invalidate_range_end(mm,
@@ -2631,10 +2628,16 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2631 vma, address); 2628 vma, address);
2632 } 2629 }
2633 2630
2634 if (!pagecache_page) { 2631 /*
2635 page = pte_page(entry); 2632 * hugetlb_cow() requires page locks of pte_page(entry) and
2633 * pagecache_page, so here we need take the former one
2634 * when page != pagecache_page or !pagecache_page.
2635 * Note that locking order is always pagecache_page -> page,
2636 * so no worry about deadlock.
2637 */
2638 page = pte_page(entry);
2639 if (page != pagecache_page)
2636 lock_page(page); 2640 lock_page(page);
2637 }
2638 2641
2639 spin_lock(&mm->page_table_lock); 2642 spin_lock(&mm->page_table_lock);
2640 /* Check for a racing update before calling hugetlb_cow */ 2643 /* Check for a racing update before calling hugetlb_cow */
@@ -2661,9 +2664,8 @@ out_page_table_lock:
2661 if (pagecache_page) { 2664 if (pagecache_page) {
2662 unlock_page(pagecache_page); 2665 unlock_page(pagecache_page);
2663 put_page(pagecache_page); 2666 put_page(pagecache_page);
2664 } else {
2665 unlock_page(page);
2666 } 2667 }
2668 unlock_page(page);
2667 2669
2668out_mutex: 2670out_mutex:
2669 mutex_unlock(&hugetlb_instantiation_mutex); 2671 mutex_unlock(&hugetlb_instantiation_mutex);
diff --git a/mm/ksm.c b/mm/ksm.c
index e2ae00458320..65ab5c7067d9 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -712,7 +712,7 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
712 if (!ptep) 712 if (!ptep)
713 goto out; 713 goto out;
714 714
715 if (pte_write(*ptep)) { 715 if (pte_write(*ptep) || pte_dirty(*ptep)) {
716 pte_t entry; 716 pte_t entry;
717 717
718 swapped = PageSwapCache(page); 718 swapped = PageSwapCache(page);
@@ -735,7 +735,9 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
735 set_pte_at(mm, addr, ptep, entry); 735 set_pte_at(mm, addr, ptep, entry);
736 goto out_unlock; 736 goto out_unlock;
737 } 737 }
738 entry = pte_wrprotect(entry); 738 if (pte_dirty(entry))
739 set_page_dirty(page);
740 entry = pte_mkclean(pte_wrprotect(entry));
739 set_pte_at_notify(mm, addr, ptep, entry); 741 set_pte_at_notify(mm, addr, ptep, entry);
740 } 742 }
741 *orig_pte = *ptep; 743 *orig_pte = *ptep;
@@ -1504,8 +1506,6 @@ struct page *ksm_does_need_to_copy(struct page *page,
1504{ 1506{
1505 struct page *new_page; 1507 struct page *new_page;
1506 1508
1507 unlock_page(page); /* any racers will COW it, not modify it */
1508
1509 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); 1509 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
1510 if (new_page) { 1510 if (new_page) {
1511 copy_user_highpage(new_page, page, address, vma); 1511 copy_user_highpage(new_page, page, address, vma);
@@ -1521,7 +1521,6 @@ struct page *ksm_does_need_to_copy(struct page *page,
1521 add_page_to_unevictable_list(new_page); 1521 add_page_to_unevictable_list(new_page);
1522 } 1522 }
1523 1523
1524 page_cache_release(page);
1525 return new_page; 1524 return new_page;
1526} 1525}
1527 1526
diff --git a/mm/memory.c b/mm/memory.c
index 6b2ab1051851..0e18b4d649ec 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2623,7 +2623,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
2623 unsigned int flags, pte_t orig_pte) 2623 unsigned int flags, pte_t orig_pte)
2624{ 2624{
2625 spinlock_t *ptl; 2625 spinlock_t *ptl;
2626 struct page *page; 2626 struct page *page, *swapcache = NULL;
2627 swp_entry_t entry; 2627 swp_entry_t entry;
2628 pte_t pte; 2628 pte_t pte;
2629 struct mem_cgroup *ptr = NULL; 2629 struct mem_cgroup *ptr = NULL;
@@ -2679,10 +2679,25 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
2679 lock_page(page); 2679 lock_page(page);
2680 delayacct_clear_flag(DELAYACCT_PF_SWAPIN); 2680 delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
2681 2681
2682 page = ksm_might_need_to_copy(page, vma, address); 2682 /*
2683 if (!page) { 2683 * Make sure try_to_free_swap or reuse_swap_page or swapoff did not
2684 ret = VM_FAULT_OOM; 2684 * release the swapcache from under us. The page pin, and pte_same
2685 goto out; 2685 * test below, are not enough to exclude that. Even if it is still
2686 * swapcache, we need to check that the page's swap has not changed.
2687 */
2688 if (unlikely(!PageSwapCache(page) || page_private(page) != entry.val))
2689 goto out_page;
2690
2691 if (ksm_might_need_to_copy(page, vma, address)) {
2692 swapcache = page;
2693 page = ksm_does_need_to_copy(page, vma, address);
2694
2695 if (unlikely(!page)) {
2696 ret = VM_FAULT_OOM;
2697 page = swapcache;
2698 swapcache = NULL;
2699 goto out_page;
2700 }
2686 } 2701 }
2687 2702
2688 if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) { 2703 if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) {
@@ -2735,6 +2750,18 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
2735 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page)) 2750 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
2736 try_to_free_swap(page); 2751 try_to_free_swap(page);
2737 unlock_page(page); 2752 unlock_page(page);
2753 if (swapcache) {
2754 /*
2755 * Hold the lock to avoid the swap entry to be reused
2756 * until we take the PT lock for the pte_same() check
2757 * (to avoid false positives from pte_same). For
2758 * further safety release the lock after the swap_free
2759 * so that the swap count won't change under a
2760 * parallel locked swapcache.
2761 */
2762 unlock_page(swapcache);
2763 page_cache_release(swapcache);
2764 }
2738 2765
2739 if (flags & FAULT_FLAG_WRITE) { 2766 if (flags & FAULT_FLAG_WRITE) {
2740 ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte); 2767 ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte);
@@ -2756,6 +2783,10 @@ out_page:
2756 unlock_page(page); 2783 unlock_page(page);
2757out_release: 2784out_release:
2758 page_cache_release(page); 2785 page_cache_release(page);
2786 if (swapcache) {
2787 unlock_page(swapcache);
2788 page_cache_release(swapcache);
2789 }
2759 return ret; 2790 return ret;
2760} 2791}
2761 2792
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index a4cfcdc00455..dd186c1a5d53 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -584,19 +584,19 @@ static inline int pageblock_free(struct page *page)
584/* Return the start of the next active pageblock after a given page */ 584/* Return the start of the next active pageblock after a given page */
585static struct page *next_active_pageblock(struct page *page) 585static struct page *next_active_pageblock(struct page *page)
586{ 586{
587 int pageblocks_stride;
588
589 /* Ensure the starting page is pageblock-aligned */ 587 /* Ensure the starting page is pageblock-aligned */
590 BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1)); 588 BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1));
591 589
592 /* Move forward by at least 1 * pageblock_nr_pages */
593 pageblocks_stride = 1;
594
595 /* If the entire pageblock is free, move to the end of free page */ 590 /* If the entire pageblock is free, move to the end of free page */
596 if (pageblock_free(page)) 591 if (pageblock_free(page)) {
597 pageblocks_stride += page_order(page) - pageblock_order; 592 int order;
593 /* be careful. we don't have locks, page_order can be changed.*/
594 order = page_order(page);
595 if ((order < MAX_ORDER) && (order >= pageblock_order))
596 return page + (1 << order);
597 }
598 598
599 return page + (pageblocks_stride * pageblock_nr_pages); 599 return page + pageblock_nr_pages;
600} 600}
601 601
602/* Checks if this range of memory is likely to be hot-removable. */ 602/* Checks if this range of memory is likely to be hot-removable. */
diff --git a/mm/mlock.c b/mm/mlock.c
index cbae7c5b9568..b70919ce4f72 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -135,12 +135,6 @@ void munlock_vma_page(struct page *page)
135 } 135 }
136} 136}
137 137
138/* Is the vma a continuation of the stack vma above it? */
139static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
140{
141 return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
142}
143
144static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr) 138static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
145{ 139{
146 return (vma->vm_flags & VM_GROWSDOWN) && 140 return (vma->vm_flags & VM_GROWSDOWN) &&
diff --git a/mm/mmap.c b/mm/mmap.c
index 6128dc8e5ede..00161a48a451 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2009,6 +2009,7 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
2009 removed_exe_file_vma(mm); 2009 removed_exe_file_vma(mm);
2010 fput(new->vm_file); 2010 fput(new->vm_file);
2011 } 2011 }
2012 unlink_anon_vmas(new);
2012 out_free_mpol: 2013 out_free_mpol:
2013 mpol_put(pol); 2014 mpol_put(pol);
2014 out_free_vma: 2015 out_free_vma:
diff --git a/mm/mmzone.c b/mm/mmzone.c
index f5b7d1760213..e35bfb82c855 100644
--- a/mm/mmzone.c
+++ b/mm/mmzone.c
@@ -87,3 +87,24 @@ int memmap_valid_within(unsigned long pfn,
87 return 1; 87 return 1;
88} 88}
89#endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */ 89#endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */
90
91#ifdef CONFIG_SMP
92/* Called when a more accurate view of NR_FREE_PAGES is needed */
93unsigned long zone_nr_free_pages(struct zone *zone)
94{
95 unsigned long nr_free_pages = zone_page_state(zone, NR_FREE_PAGES);
96
97 /*
98 * While kswapd is awake, it is considered the zone is under some
99 * memory pressure. Under pressure, there is a risk that
100 * per-cpu-counter-drift will allow the min watermark to be breached
101 * potentially causing a live-lock. While kswapd is awake and
102 * free pages are low, get a better estimate for free pages
103 */
104 if (nr_free_pages < zone->percpu_drift_mark &&
105 !waitqueue_active(&zone->zone_pgdat->kswapd_wait))
106 return zone_page_state_snapshot(zone, NR_FREE_PAGES);
107
108 return nr_free_pages;
109}
110#endif /* CONFIG_SMP */
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index fc81cb22869e..4029583a1024 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -121,8 +121,8 @@ struct task_struct *find_lock_task_mm(struct task_struct *p)
121} 121}
122 122
123/* return true if the task is not adequate as candidate victim task. */ 123/* return true if the task is not adequate as candidate victim task. */
124static bool oom_unkillable_task(struct task_struct *p, struct mem_cgroup *mem, 124static bool oom_unkillable_task(struct task_struct *p,
125 const nodemask_t *nodemask) 125 const struct mem_cgroup *mem, const nodemask_t *nodemask)
126{ 126{
127 if (is_global_init(p)) 127 if (is_global_init(p))
128 return true; 128 return true;
@@ -208,8 +208,13 @@ unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem,
208 */ 208 */
209 points += p->signal->oom_score_adj; 209 points += p->signal->oom_score_adj;
210 210
211 if (points < 0) 211 /*
212 return 0; 212 * Never return 0 for an eligible task that may be killed since it's
213 * possible that no single user task uses more than 0.1% of memory and
214 * no single admin tasks uses more than 3.0%.
215 */
216 if (points <= 0)
217 return 1;
213 return (points < 1000) ? points : 1000; 218 return (points < 1000) ? points : 1000;
214} 219}
215 220
@@ -339,26 +344,24 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
339/** 344/**
340 * dump_tasks - dump current memory state of all system tasks 345 * dump_tasks - dump current memory state of all system tasks
341 * @mem: current's memory controller, if constrained 346 * @mem: current's memory controller, if constrained
347 * @nodemask: nodemask passed to page allocator for mempolicy ooms
342 * 348 *
343 * Dumps the current memory state of all system tasks, excluding kernel threads. 349 * Dumps the current memory state of all eligible tasks. Tasks not in the same
350 * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes
351 * are not shown.
344 * State information includes task's pid, uid, tgid, vm size, rss, cpu, oom_adj 352 * State information includes task's pid, uid, tgid, vm size, rss, cpu, oom_adj
345 * value, oom_score_adj value, and name. 353 * value, oom_score_adj value, and name.
346 * 354 *
347 * If the actual is non-NULL, only tasks that are a member of the mem_cgroup are
348 * shown.
349 *
350 * Call with tasklist_lock read-locked. 355 * Call with tasklist_lock read-locked.
351 */ 356 */
352static void dump_tasks(const struct mem_cgroup *mem) 357static void dump_tasks(const struct mem_cgroup *mem, const nodemask_t *nodemask)
353{ 358{
354 struct task_struct *p; 359 struct task_struct *p;
355 struct task_struct *task; 360 struct task_struct *task;
356 361
357 pr_info("[ pid ] uid tgid total_vm rss cpu oom_adj oom_score_adj name\n"); 362 pr_info("[ pid ] uid tgid total_vm rss cpu oom_adj oom_score_adj name\n");
358 for_each_process(p) { 363 for_each_process(p) {
359 if (p->flags & PF_KTHREAD) 364 if (oom_unkillable_task(p, mem, nodemask))
360 continue;
361 if (mem && !task_in_mem_cgroup(p, mem))
362 continue; 365 continue;
363 366
364 task = find_lock_task_mm(p); 367 task = find_lock_task_mm(p);
@@ -381,7 +384,7 @@ static void dump_tasks(const struct mem_cgroup *mem)
381} 384}
382 385
383static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order, 386static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
384 struct mem_cgroup *mem) 387 struct mem_cgroup *mem, const nodemask_t *nodemask)
385{ 388{
386 task_lock(current); 389 task_lock(current);
387 pr_warning("%s invoked oom-killer: gfp_mask=0x%x, order=%d, " 390 pr_warning("%s invoked oom-killer: gfp_mask=0x%x, order=%d, "
@@ -394,7 +397,7 @@ static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
394 mem_cgroup_print_oom_info(mem, p); 397 mem_cgroup_print_oom_info(mem, p);
395 show_mem(); 398 show_mem();
396 if (sysctl_oom_dump_tasks) 399 if (sysctl_oom_dump_tasks)
397 dump_tasks(mem); 400 dump_tasks(mem, nodemask);
398} 401}
399 402
400#define K(x) ((x) << (PAGE_SHIFT-10)) 403#define K(x) ((x) << (PAGE_SHIFT-10))
@@ -436,7 +439,7 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
436 unsigned int victim_points = 0; 439 unsigned int victim_points = 0;
437 440
438 if (printk_ratelimit()) 441 if (printk_ratelimit())
439 dump_header(p, gfp_mask, order, mem); 442 dump_header(p, gfp_mask, order, mem, nodemask);
440 443
441 /* 444 /*
442 * If the task is already exiting, don't alarm the sysadmin or kill 445 * If the task is already exiting, don't alarm the sysadmin or kill
@@ -482,7 +485,7 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
482 * Determines whether the kernel must panic because of the panic_on_oom sysctl. 485 * Determines whether the kernel must panic because of the panic_on_oom sysctl.
483 */ 486 */
484static void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask, 487static void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
485 int order) 488 int order, const nodemask_t *nodemask)
486{ 489{
487 if (likely(!sysctl_panic_on_oom)) 490 if (likely(!sysctl_panic_on_oom))
488 return; 491 return;
@@ -496,7 +499,7 @@ static void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
496 return; 499 return;
497 } 500 }
498 read_lock(&tasklist_lock); 501 read_lock(&tasklist_lock);
499 dump_header(NULL, gfp_mask, order, NULL); 502 dump_header(NULL, gfp_mask, order, NULL, nodemask);
500 read_unlock(&tasklist_lock); 503 read_unlock(&tasklist_lock);
501 panic("Out of memory: %s panic_on_oom is enabled\n", 504 panic("Out of memory: %s panic_on_oom is enabled\n",
502 sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide"); 505 sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide");
@@ -509,7 +512,7 @@ void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask)
509 unsigned int points = 0; 512 unsigned int points = 0;
510 struct task_struct *p; 513 struct task_struct *p;
511 514
512 check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, 0); 515 check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, 0, NULL);
513 limit = mem_cgroup_get_limit(mem) >> PAGE_SHIFT; 516 limit = mem_cgroup_get_limit(mem) >> PAGE_SHIFT;
514 read_lock(&tasklist_lock); 517 read_lock(&tasklist_lock);
515retry: 518retry:
@@ -641,6 +644,7 @@ static void clear_system_oom(void)
641void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, 644void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
642 int order, nodemask_t *nodemask) 645 int order, nodemask_t *nodemask)
643{ 646{
647 const nodemask_t *mpol_mask;
644 struct task_struct *p; 648 struct task_struct *p;
645 unsigned long totalpages; 649 unsigned long totalpages;
646 unsigned long freed = 0; 650 unsigned long freed = 0;
@@ -670,7 +674,8 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
670 */ 674 */
671 constraint = constrained_alloc(zonelist, gfp_mask, nodemask, 675 constraint = constrained_alloc(zonelist, gfp_mask, nodemask,
672 &totalpages); 676 &totalpages);
673 check_panic_on_oom(constraint, gfp_mask, order); 677 mpol_mask = (constraint == CONSTRAINT_MEMORY_POLICY) ? nodemask : NULL;
678 check_panic_on_oom(constraint, gfp_mask, order, mpol_mask);
674 679
675 read_lock(&tasklist_lock); 680 read_lock(&tasklist_lock);
676 if (sysctl_oom_kill_allocating_task && 681 if (sysctl_oom_kill_allocating_task &&
@@ -688,15 +693,13 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
688 } 693 }
689 694
690retry: 695retry:
691 p = select_bad_process(&points, totalpages, NULL, 696 p = select_bad_process(&points, totalpages, NULL, mpol_mask);
692 constraint == CONSTRAINT_MEMORY_POLICY ? nodemask :
693 NULL);
694 if (PTR_ERR(p) == -1UL) 697 if (PTR_ERR(p) == -1UL)
695 goto out; 698 goto out;
696 699
697 /* Found nothing?!?! Either we hang forever, or we panic. */ 700 /* Found nothing?!?! Either we hang forever, or we panic. */
698 if (!p) { 701 if (!p) {
699 dump_header(NULL, gfp_mask, order, NULL); 702 dump_header(NULL, gfp_mask, order, NULL, mpol_mask);
700 read_unlock(&tasklist_lock); 703 read_unlock(&tasklist_lock);
701 panic("Out of memory and no killable processes...\n"); 704 panic("Out of memory and no killable processes...\n");
702 } 705 }
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a9649f4b261e..a8cfa9cc6e86 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -588,13 +588,13 @@ static void free_pcppages_bulk(struct zone *zone, int count,
588{ 588{
589 int migratetype = 0; 589 int migratetype = 0;
590 int batch_free = 0; 590 int batch_free = 0;
591 int to_free = count;
591 592
592 spin_lock(&zone->lock); 593 spin_lock(&zone->lock);
593 zone->all_unreclaimable = 0; 594 zone->all_unreclaimable = 0;
594 zone->pages_scanned = 0; 595 zone->pages_scanned = 0;
595 596
596 __mod_zone_page_state(zone, NR_FREE_PAGES, count); 597 while (to_free) {
597 while (count) {
598 struct page *page; 598 struct page *page;
599 struct list_head *list; 599 struct list_head *list;
600 600
@@ -619,8 +619,9 @@ static void free_pcppages_bulk(struct zone *zone, int count,
619 /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */ 619 /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
620 __free_one_page(page, zone, 0, page_private(page)); 620 __free_one_page(page, zone, 0, page_private(page));
621 trace_mm_page_pcpu_drain(page, 0, page_private(page)); 621 trace_mm_page_pcpu_drain(page, 0, page_private(page));
622 } while (--count && --batch_free && !list_empty(list)); 622 } while (--to_free && --batch_free && !list_empty(list));
623 } 623 }
624 __mod_zone_page_state(zone, NR_FREE_PAGES, count);
624 spin_unlock(&zone->lock); 625 spin_unlock(&zone->lock);
625} 626}
626 627
@@ -631,8 +632,8 @@ static void free_one_page(struct zone *zone, struct page *page, int order,
631 zone->all_unreclaimable = 0; 632 zone->all_unreclaimable = 0;
632 zone->pages_scanned = 0; 633 zone->pages_scanned = 0;
633 634
634 __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
635 __free_one_page(page, zone, order, migratetype); 635 __free_one_page(page, zone, order, migratetype);
636 __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
636 spin_unlock(&zone->lock); 637 spin_unlock(&zone->lock);
637} 638}
638 639
@@ -1461,7 +1462,7 @@ int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1461{ 1462{
1462 /* free_pages my go negative - that's OK */ 1463 /* free_pages my go negative - that's OK */
1463 long min = mark; 1464 long min = mark;
1464 long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1; 1465 long free_pages = zone_nr_free_pages(z) - (1 << order) + 1;
1465 int o; 1466 int o;
1466 1467
1467 if (alloc_flags & ALLOC_HIGH) 1468 if (alloc_flags & ALLOC_HIGH)
@@ -1846,6 +1847,7 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
1846 struct page *page = NULL; 1847 struct page *page = NULL;
1847 struct reclaim_state reclaim_state; 1848 struct reclaim_state reclaim_state;
1848 struct task_struct *p = current; 1849 struct task_struct *p = current;
1850 bool drained = false;
1849 1851
1850 cond_resched(); 1852 cond_resched();
1851 1853
@@ -1864,14 +1866,25 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
1864 1866
1865 cond_resched(); 1867 cond_resched();
1866 1868
1867 if (order != 0) 1869 if (unlikely(!(*did_some_progress)))
1868 drain_all_pages(); 1870 return NULL;
1869 1871
1870 if (likely(*did_some_progress)) 1872retry:
1871 page = get_page_from_freelist(gfp_mask, nodemask, order, 1873 page = get_page_from_freelist(gfp_mask, nodemask, order,
1872 zonelist, high_zoneidx, 1874 zonelist, high_zoneidx,
1873 alloc_flags, preferred_zone, 1875 alloc_flags, preferred_zone,
1874 migratetype); 1876 migratetype);
1877
1878 /*
1879 * If an allocation failed after direct reclaim, it could be because
1880 * pages are pinned on the per-cpu lists. Drain them and try again
1881 */
1882 if (!page && !drained) {
1883 drain_all_pages();
1884 drained = true;
1885 goto retry;
1886 }
1887
1875 return page; 1888 return page;
1876} 1889}
1877 1890
@@ -2423,7 +2436,7 @@ void show_free_areas(void)
2423 " all_unreclaimable? %s" 2436 " all_unreclaimable? %s"
2424 "\n", 2437 "\n",
2425 zone->name, 2438 zone->name,
2426 K(zone_page_state(zone, NR_FREE_PAGES)), 2439 K(zone_nr_free_pages(zone)),
2427 K(min_wmark_pages(zone)), 2440 K(min_wmark_pages(zone)),
2428 K(low_wmark_pages(zone)), 2441 K(low_wmark_pages(zone)),
2429 K(high_wmark_pages(zone)), 2442 K(high_wmark_pages(zone)),
diff --git a/mm/percpu.c b/mm/percpu.c
index 58c572b18b07..c76ef3891e0d 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1401,9 +1401,9 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1401 1401
1402 if (pcpu_first_unit_cpu == NR_CPUS) 1402 if (pcpu_first_unit_cpu == NR_CPUS)
1403 pcpu_first_unit_cpu = cpu; 1403 pcpu_first_unit_cpu = cpu;
1404 pcpu_last_unit_cpu = cpu;
1404 } 1405 }
1405 } 1406 }
1406 pcpu_last_unit_cpu = cpu;
1407 pcpu_nr_units = unit; 1407 pcpu_nr_units = unit;
1408 1408
1409 for_each_possible_cpu(cpu) 1409 for_each_possible_cpu(cpu)
diff --git a/mm/rmap.c b/mm/rmap.c
index f6f0d2dda2ea..92e6757f196e 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -381,7 +381,13 @@ vma_address(struct page *page, struct vm_area_struct *vma)
381unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) 381unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
382{ 382{
383 if (PageAnon(page)) { 383 if (PageAnon(page)) {
384 if (vma->anon_vma->root != page_anon_vma(page)->root) 384 struct anon_vma *page__anon_vma = page_anon_vma(page);
385 /*
386 * Note: swapoff's unuse_vma() is more efficient with this
387 * check, and needs it to match anon_vma when KSM is active.
388 */
389 if (!vma->anon_vma || !page__anon_vma ||
390 vma->anon_vma->root != page__anon_vma->root)
385 return -EFAULT; 391 return -EFAULT;
386 } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) { 392 } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) {
387 if (!vma->vm_file || 393 if (!vma->vm_file ||
@@ -1564,13 +1570,14 @@ static void __hugepage_set_anon_rmap(struct page *page,
1564 struct vm_area_struct *vma, unsigned long address, int exclusive) 1570 struct vm_area_struct *vma, unsigned long address, int exclusive)
1565{ 1571{
1566 struct anon_vma *anon_vma = vma->anon_vma; 1572 struct anon_vma *anon_vma = vma->anon_vma;
1573
1567 BUG_ON(!anon_vma); 1574 BUG_ON(!anon_vma);
1568 if (!exclusive) { 1575
1569 struct anon_vma_chain *avc; 1576 if (PageAnon(page))
1570 avc = list_entry(vma->anon_vma_chain.prev, 1577 return;
1571 struct anon_vma_chain, same_vma); 1578 if (!exclusive)
1572 anon_vma = avc->anon_vma; 1579 anon_vma = anon_vma->root;
1573 } 1580
1574 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 1581 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
1575 page->mapping = (struct address_space *) anon_vma; 1582 page->mapping = (struct address_space *) anon_vma;
1576 page->index = linear_page_index(vma, address); 1583 page->index = linear_page_index(vma, address);
@@ -1581,6 +1588,8 @@ void hugepage_add_anon_rmap(struct page *page,
1581{ 1588{
1582 struct anon_vma *anon_vma = vma->anon_vma; 1589 struct anon_vma *anon_vma = vma->anon_vma;
1583 int first; 1590 int first;
1591
1592 BUG_ON(!PageLocked(page));
1584 BUG_ON(!anon_vma); 1593 BUG_ON(!anon_vma);
1585 BUG_ON(address < vma->vm_start || address >= vma->vm_end); 1594 BUG_ON(address < vma->vm_start || address >= vma->vm_end);
1586 first = atomic_inc_and_test(&page->_mapcount); 1595 first = atomic_inc_and_test(&page->_mapcount);
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 1f3f9c59a73a..7c703ff2f36f 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -47,8 +47,6 @@ long nr_swap_pages;
47long total_swap_pages; 47long total_swap_pages;
48static int least_priority; 48static int least_priority;
49 49
50static bool swap_for_hibernation;
51
52static const char Bad_file[] = "Bad swap file entry "; 50static const char Bad_file[] = "Bad swap file entry ";
53static const char Unused_file[] = "Unused swap file entry "; 51static const char Unused_file[] = "Unused swap file entry ";
54static const char Bad_offset[] = "Bad swap offset entry "; 52static const char Bad_offset[] = "Bad swap offset entry ";
@@ -141,8 +139,7 @@ static int discard_swap(struct swap_info_struct *si)
141 nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9); 139 nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9);
142 if (nr_blocks) { 140 if (nr_blocks) {
143 err = blkdev_issue_discard(si->bdev, start_block, 141 err = blkdev_issue_discard(si->bdev, start_block,
144 nr_blocks, GFP_KERNEL, 142 nr_blocks, GFP_KERNEL, BLKDEV_IFL_WAIT);
145 BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER);
146 if (err) 143 if (err)
147 return err; 144 return err;
148 cond_resched(); 145 cond_resched();
@@ -153,8 +150,7 @@ static int discard_swap(struct swap_info_struct *si)
153 nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9); 150 nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9);
154 151
155 err = blkdev_issue_discard(si->bdev, start_block, 152 err = blkdev_issue_discard(si->bdev, start_block,
156 nr_blocks, GFP_KERNEL, 153 nr_blocks, GFP_KERNEL, BLKDEV_IFL_WAIT);
157 BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER);
158 if (err) 154 if (err)
159 break; 155 break;
160 156
@@ -193,8 +189,7 @@ static void discard_swap_cluster(struct swap_info_struct *si,
193 start_block <<= PAGE_SHIFT - 9; 189 start_block <<= PAGE_SHIFT - 9;
194 nr_blocks <<= PAGE_SHIFT - 9; 190 nr_blocks <<= PAGE_SHIFT - 9;
195 if (blkdev_issue_discard(si->bdev, start_block, 191 if (blkdev_issue_discard(si->bdev, start_block,
196 nr_blocks, GFP_NOIO, BLKDEV_IFL_WAIT | 192 nr_blocks, GFP_NOIO, BLKDEV_IFL_WAIT))
197 BLKDEV_IFL_BARRIER))
198 break; 193 break;
199 } 194 }
200 195
@@ -320,10 +315,8 @@ checks:
320 if (offset > si->highest_bit) 315 if (offset > si->highest_bit)
321 scan_base = offset = si->lowest_bit; 316 scan_base = offset = si->lowest_bit;
322 317
323 /* reuse swap entry of cache-only swap if not hibernation. */ 318 /* reuse swap entry of cache-only swap if not busy. */
324 if (vm_swap_full() 319 if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
325 && usage == SWAP_HAS_CACHE
326 && si->swap_map[offset] == SWAP_HAS_CACHE) {
327 int swap_was_freed; 320 int swap_was_freed;
328 spin_unlock(&swap_lock); 321 spin_unlock(&swap_lock);
329 swap_was_freed = __try_to_reclaim_swap(si, offset); 322 swap_was_freed = __try_to_reclaim_swap(si, offset);
@@ -453,8 +446,6 @@ swp_entry_t get_swap_page(void)
453 spin_lock(&swap_lock); 446 spin_lock(&swap_lock);
454 if (nr_swap_pages <= 0) 447 if (nr_swap_pages <= 0)
455 goto noswap; 448 goto noswap;
456 if (swap_for_hibernation)
457 goto noswap;
458 nr_swap_pages--; 449 nr_swap_pages--;
459 450
460 for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) { 451 for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) {
@@ -487,6 +478,28 @@ noswap:
487 return (swp_entry_t) {0}; 478 return (swp_entry_t) {0};
488} 479}
489 480
481/* The only caller of this function is now susupend routine */
482swp_entry_t get_swap_page_of_type(int type)
483{
484 struct swap_info_struct *si;
485 pgoff_t offset;
486
487 spin_lock(&swap_lock);
488 si = swap_info[type];
489 if (si && (si->flags & SWP_WRITEOK)) {
490 nr_swap_pages--;
491 /* This is called for allocating swap entry, not cache */
492 offset = scan_swap_map(si, 1);
493 if (offset) {
494 spin_unlock(&swap_lock);
495 return swp_entry(type, offset);
496 }
497 nr_swap_pages++;
498 }
499 spin_unlock(&swap_lock);
500 return (swp_entry_t) {0};
501}
502
490static struct swap_info_struct *swap_info_get(swp_entry_t entry) 503static struct swap_info_struct *swap_info_get(swp_entry_t entry)
491{ 504{
492 struct swap_info_struct *p; 505 struct swap_info_struct *p;
@@ -670,6 +683,24 @@ int try_to_free_swap(struct page *page)
670 if (page_swapcount(page)) 683 if (page_swapcount(page))
671 return 0; 684 return 0;
672 685
686 /*
687 * Once hibernation has begun to create its image of memory,
688 * there's a danger that one of the calls to try_to_free_swap()
689 * - most probably a call from __try_to_reclaim_swap() while
690 * hibernation is allocating its own swap pages for the image,
691 * but conceivably even a call from memory reclaim - will free
692 * the swap from a page which has already been recorded in the
693 * image as a clean swapcache page, and then reuse its swap for
694 * another page of the image. On waking from hibernation, the
695 * original page might be freed under memory pressure, then
696 * later read back in from swap, now with the wrong data.
697 *
698 * Hibernation clears bits from gfp_allowed_mask to prevent
699 * memory reclaim from writing to disk, so check that here.
700 */
701 if (!(gfp_allowed_mask & __GFP_IO))
702 return 0;
703
673 delete_from_swap_cache(page); 704 delete_from_swap_cache(page);
674 SetPageDirty(page); 705 SetPageDirty(page);
675 return 1; 706 return 1;
@@ -746,74 +777,6 @@ int mem_cgroup_count_swap_user(swp_entry_t ent, struct page **pagep)
746#endif 777#endif
747 778
748#ifdef CONFIG_HIBERNATION 779#ifdef CONFIG_HIBERNATION
749
750static pgoff_t hibernation_offset[MAX_SWAPFILES];
751/*
752 * Once hibernation starts to use swap, we freeze swap_map[]. Otherwise,
753 * saved swap_map[] image to the disk will be an incomplete because it's
754 * changing without synchronization with hibernation snap shot.
755 * At resume, we just make swap_for_hibernation=false. We can forget
756 * used maps easily.
757 */
758void hibernation_freeze_swap(void)
759{
760 int i;
761
762 spin_lock(&swap_lock);
763
764 printk(KERN_INFO "PM: Freeze Swap\n");
765 swap_for_hibernation = true;
766 for (i = 0; i < MAX_SWAPFILES; i++)
767 hibernation_offset[i] = 1;
768 spin_unlock(&swap_lock);
769}
770
771void hibernation_thaw_swap(void)
772{
773 spin_lock(&swap_lock);
774 if (swap_for_hibernation) {
775 printk(KERN_INFO "PM: Thaw Swap\n");
776 swap_for_hibernation = false;
777 }
778 spin_unlock(&swap_lock);
779}
780
781/*
782 * Because updateing swap_map[] can make not-saved-status-change,
783 * we use our own easy allocator.
784 * Please see kernel/power/swap.c, Used swaps are recorded into
785 * RB-tree.
786 */
787swp_entry_t get_swap_for_hibernation(int type)
788{
789 pgoff_t off;
790 swp_entry_t val = {0};
791 struct swap_info_struct *si;
792
793 spin_lock(&swap_lock);
794
795 si = swap_info[type];
796 if (!si || !(si->flags & SWP_WRITEOK))
797 goto done;
798
799 for (off = hibernation_offset[type]; off < si->max; ++off) {
800 if (!si->swap_map[off])
801 break;
802 }
803 if (off < si->max) {
804 val = swp_entry(type, off);
805 hibernation_offset[type] = off + 1;
806 }
807done:
808 spin_unlock(&swap_lock);
809 return val;
810}
811
812void swap_free_for_hibernation(swp_entry_t ent)
813{
814 /* Nothing to do */
815}
816
817/* 780/*
818 * Find the swap type that corresponds to given device (if any). 781 * Find the swap type that corresponds to given device (if any).
819 * 782 *
@@ -2084,7 +2047,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
2084 p->flags |= SWP_SOLIDSTATE; 2047 p->flags |= SWP_SOLIDSTATE;
2085 p->cluster_next = 1 + (random32() % p->highest_bit); 2048 p->cluster_next = 1 + (random32() % p->highest_bit);
2086 } 2049 }
2087 if (discard_swap(p) == 0) 2050 if (discard_swap(p) == 0 && (swap_flags & SWAP_FLAG_DISCARD))
2088 p->flags |= SWP_DISCARDABLE; 2051 p->flags |= SWP_DISCARDABLE;
2089 } 2052 }
2090 2053
diff --git a/mm/vmscan.c b/mm/vmscan.c
index c391c320dbaf..c5dfabf25f11 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1804,12 +1804,11 @@ static void shrink_zone(int priority, struct zone *zone,
1804 * If a zone is deemed to be full of pinned pages then just give it a light 1804 * If a zone is deemed to be full of pinned pages then just give it a light
1805 * scan then give up on it. 1805 * scan then give up on it.
1806 */ 1806 */
1807static bool shrink_zones(int priority, struct zonelist *zonelist, 1807static void shrink_zones(int priority, struct zonelist *zonelist,
1808 struct scan_control *sc) 1808 struct scan_control *sc)
1809{ 1809{
1810 struct zoneref *z; 1810 struct zoneref *z;
1811 struct zone *zone; 1811 struct zone *zone;
1812 bool all_unreclaimable = true;
1813 1812
1814 for_each_zone_zonelist_nodemask(zone, z, zonelist, 1813 for_each_zone_zonelist_nodemask(zone, z, zonelist,
1815 gfp_zone(sc->gfp_mask), sc->nodemask) { 1814 gfp_zone(sc->gfp_mask), sc->nodemask) {
@@ -1827,8 +1826,38 @@ static bool shrink_zones(int priority, struct zonelist *zonelist,
1827 } 1826 }
1828 1827
1829 shrink_zone(priority, zone, sc); 1828 shrink_zone(priority, zone, sc);
1830 all_unreclaimable = false;
1831 } 1829 }
1830}
1831
1832static bool zone_reclaimable(struct zone *zone)
1833{
1834 return zone->pages_scanned < zone_reclaimable_pages(zone) * 6;
1835}
1836
1837/*
1838 * As hibernation is going on, kswapd is freezed so that it can't mark
1839 * the zone into all_unreclaimable. It can't handle OOM during hibernation.
1840 * So let's check zone's unreclaimable in direct reclaim as well as kswapd.
1841 */
1842static bool all_unreclaimable(struct zonelist *zonelist,
1843 struct scan_control *sc)
1844{
1845 struct zoneref *z;
1846 struct zone *zone;
1847 bool all_unreclaimable = true;
1848
1849 for_each_zone_zonelist_nodemask(zone, z, zonelist,
1850 gfp_zone(sc->gfp_mask), sc->nodemask) {
1851 if (!populated_zone(zone))
1852 continue;
1853 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1854 continue;
1855 if (zone_reclaimable(zone)) {
1856 all_unreclaimable = false;
1857 break;
1858 }
1859 }
1860
1832 return all_unreclaimable; 1861 return all_unreclaimable;
1833} 1862}
1834 1863
@@ -1852,7 +1881,6 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
1852 struct scan_control *sc) 1881 struct scan_control *sc)
1853{ 1882{
1854 int priority; 1883 int priority;
1855 bool all_unreclaimable;
1856 unsigned long total_scanned = 0; 1884 unsigned long total_scanned = 0;
1857 struct reclaim_state *reclaim_state = current->reclaim_state; 1885 struct reclaim_state *reclaim_state = current->reclaim_state;
1858 struct zoneref *z; 1886 struct zoneref *z;
@@ -1869,7 +1897,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
1869 sc->nr_scanned = 0; 1897 sc->nr_scanned = 0;
1870 if (!priority) 1898 if (!priority)
1871 disable_swap_token(); 1899 disable_swap_token();
1872 all_unreclaimable = shrink_zones(priority, zonelist, sc); 1900 shrink_zones(priority, zonelist, sc);
1873 /* 1901 /*
1874 * Don't shrink slabs when reclaiming memory from 1902 * Don't shrink slabs when reclaiming memory from
1875 * over limit cgroups 1903 * over limit cgroups
@@ -1931,7 +1959,7 @@ out:
1931 return sc->nr_reclaimed; 1959 return sc->nr_reclaimed;
1932 1960
1933 /* top priority shrink_zones still had more to do? don't OOM, then */ 1961 /* top priority shrink_zones still had more to do? don't OOM, then */
1934 if (scanning_global_lru(sc) && !all_unreclaimable) 1962 if (scanning_global_lru(sc) && !all_unreclaimable(zonelist, sc))
1935 return 1; 1963 return 1;
1936 1964
1937 return 0; 1965 return 0;
@@ -2197,8 +2225,7 @@ loop_again:
2197 total_scanned += sc.nr_scanned; 2225 total_scanned += sc.nr_scanned;
2198 if (zone->all_unreclaimable) 2226 if (zone->all_unreclaimable)
2199 continue; 2227 continue;
2200 if (nr_slab == 0 && 2228 if (nr_slab == 0 && !zone_reclaimable(zone))
2201 zone->pages_scanned >= (zone_reclaimable_pages(zone) * 6))
2202 zone->all_unreclaimable = 1; 2229 zone->all_unreclaimable = 1;
2203 /* 2230 /*
2204 * If we've done a decent amount of scanning and 2231 * If we've done a decent amount of scanning and
diff --git a/mm/vmstat.c b/mm/vmstat.c
index f389168f9a83..355a9e669aaa 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -138,11 +138,24 @@ static void refresh_zone_stat_thresholds(void)
138 int threshold; 138 int threshold;
139 139
140 for_each_populated_zone(zone) { 140 for_each_populated_zone(zone) {
141 unsigned long max_drift, tolerate_drift;
142
141 threshold = calculate_threshold(zone); 143 threshold = calculate_threshold(zone);
142 144
143 for_each_online_cpu(cpu) 145 for_each_online_cpu(cpu)
144 per_cpu_ptr(zone->pageset, cpu)->stat_threshold 146 per_cpu_ptr(zone->pageset, cpu)->stat_threshold
145 = threshold; 147 = threshold;
148
149 /*
150 * Only set percpu_drift_mark if there is a danger that
151 * NR_FREE_PAGES reports the low watermark is ok when in fact
152 * the min watermark could be breached by an allocation
153 */
154 tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
155 max_drift = num_online_cpus() * threshold;
156 if (max_drift > tolerate_drift)
157 zone->percpu_drift_mark = high_wmark_pages(zone) +
158 max_drift;
146 } 159 }
147} 160}
148 161
@@ -813,7 +826,7 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
813 "\n scanned %lu" 826 "\n scanned %lu"
814 "\n spanned %lu" 827 "\n spanned %lu"
815 "\n present %lu", 828 "\n present %lu",
816 zone_page_state(zone, NR_FREE_PAGES), 829 zone_nr_free_pages(zone),
817 min_wmark_pages(zone), 830 min_wmark_pages(zone),
818 low_wmark_pages(zone), 831 low_wmark_pages(zone),
819 high_wmark_pages(zone), 832 high_wmark_pages(zone),
@@ -998,6 +1011,7 @@ static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
998 switch (action) { 1011 switch (action) {
999 case CPU_ONLINE: 1012 case CPU_ONLINE:
1000 case CPU_ONLINE_FROZEN: 1013 case CPU_ONLINE_FROZEN:
1014 refresh_zone_stat_thresholds();
1001 start_cpu_timer(cpu); 1015 start_cpu_timer(cpu);
1002 node_set_state(cpu_to_node(cpu), N_CPU); 1016 node_set_state(cpu_to_node(cpu), N_CPU);
1003 break; 1017 break;
diff --git a/net/9p/client.c b/net/9p/client.c
index dc6f2f26d023..9eb72505308f 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -331,8 +331,10 @@ static void p9_tag_cleanup(struct p9_client *c)
331 } 331 }
332 } 332 }
333 333
334 if (c->tagpool) 334 if (c->tagpool) {
335 p9_idpool_put(0, c->tagpool); /* free reserved tag 0 */
335 p9_idpool_destroy(c->tagpool); 336 p9_idpool_destroy(c->tagpool);
337 }
336 338
337 /* free requests associated with tags */ 339 /* free requests associated with tags */
338 for (row = 0; row < (c->max_tag/P9_ROW_MAXTAG); row++) { 340 for (row = 0; row < (c->max_tag/P9_ROW_MAXTAG); row++) {
@@ -944,6 +946,7 @@ struct p9_fid *p9_client_walk(struct p9_fid *oldfid, int nwname, char **wnames,
944 int16_t nwqids, count; 946 int16_t nwqids, count;
945 947
946 err = 0; 948 err = 0;
949 wqids = NULL;
947 clnt = oldfid->clnt; 950 clnt = oldfid->clnt;
948 if (clone) { 951 if (clone) {
949 fid = p9_fid_create(clnt); 952 fid = p9_fid_create(clnt);
@@ -994,9 +997,11 @@ struct p9_fid *p9_client_walk(struct p9_fid *oldfid, int nwname, char **wnames,
994 else 997 else
995 fid->qid = oldfid->qid; 998 fid->qid = oldfid->qid;
996 999
1000 kfree(wqids);
997 return fid; 1001 return fid;
998 1002
999clunk_fid: 1003clunk_fid:
1004 kfree(wqids);
1000 p9_client_clunk(fid); 1005 p9_client_clunk(fid);
1001 fid = NULL; 1006 fid = NULL;
1002 1007
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
index 0ea20c30466c..17c5ba7551a5 100644
--- a/net/9p/trans_rdma.c
+++ b/net/9p/trans_rdma.c
@@ -426,8 +426,10 @@ static int rdma_request(struct p9_client *client, struct p9_req_t *req)
426 426
427 /* Allocate an fcall for the reply */ 427 /* Allocate an fcall for the reply */
428 rpl_context = kmalloc(sizeof *rpl_context, GFP_KERNEL); 428 rpl_context = kmalloc(sizeof *rpl_context, GFP_KERNEL);
429 if (!rpl_context) 429 if (!rpl_context) {
430 err = -ENOMEM;
430 goto err_close; 431 goto err_close;
432 }
431 433
432 /* 434 /*
433 * If the request has a buffer, steal it, otherwise 435 * If the request has a buffer, steal it, otherwise
@@ -445,8 +447,8 @@ static int rdma_request(struct p9_client *client, struct p9_req_t *req)
445 } 447 }
446 rpl_context->rc = req->rc; 448 rpl_context->rc = req->rc;
447 if (!rpl_context->rc) { 449 if (!rpl_context->rc) {
448 kfree(rpl_context); 450 err = -ENOMEM;
449 goto err_close; 451 goto err_free2;
450 } 452 }
451 453
452 /* 454 /*
@@ -458,11 +460,8 @@ static int rdma_request(struct p9_client *client, struct p9_req_t *req)
458 */ 460 */
459 if (atomic_inc_return(&rdma->rq_count) <= rdma->rq_depth) { 461 if (atomic_inc_return(&rdma->rq_count) <= rdma->rq_depth) {
460 err = post_recv(client, rpl_context); 462 err = post_recv(client, rpl_context);
461 if (err) { 463 if (err)
462 kfree(rpl_context->rc); 464 goto err_free1;
463 kfree(rpl_context);
464 goto err_close;
465 }
466 } else 465 } else
467 atomic_dec(&rdma->rq_count); 466 atomic_dec(&rdma->rq_count);
468 467
@@ -471,8 +470,10 @@ static int rdma_request(struct p9_client *client, struct p9_req_t *req)
471 470
472 /* Post the request */ 471 /* Post the request */
473 c = kmalloc(sizeof *c, GFP_KERNEL); 472 c = kmalloc(sizeof *c, GFP_KERNEL);
474 if (!c) 473 if (!c) {
475 goto err_close; 474 err = -ENOMEM;
475 goto err_free1;
476 }
476 c->req = req; 477 c->req = req;
477 478
478 c->busa = ib_dma_map_single(rdma->cm_id->device, 479 c->busa = ib_dma_map_single(rdma->cm_id->device,
@@ -499,9 +500,15 @@ static int rdma_request(struct p9_client *client, struct p9_req_t *req)
499 return ib_post_send(rdma->qp, &wr, &bad_wr); 500 return ib_post_send(rdma->qp, &wr, &bad_wr);
500 501
501 error: 502 error:
503 kfree(c);
504 kfree(rpl_context->rc);
505 kfree(rpl_context);
502 P9_DPRINTK(P9_DEBUG_ERROR, "EIO\n"); 506 P9_DPRINTK(P9_DEBUG_ERROR, "EIO\n");
503 return -EIO; 507 return -EIO;
504 508 err_free1:
509 kfree(rpl_context->rc);
510 err_free2:
511 kfree(rpl_context);
505 err_close: 512 err_close:
506 spin_lock_irqsave(&rdma->req_lock, flags); 513 spin_lock_irqsave(&rdma->req_lock, flags);
507 if (rdma->state < P9_RDMA_CLOSING) { 514 if (rdma->state < P9_RDMA_CLOSING) {
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 36cb66022a27..e9eaaf7d43c1 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -38,7 +38,7 @@ static const struct rpc_authops *auth_flavors[RPC_AUTH_MAXFLAVOR] = {
38static LIST_HEAD(cred_unused); 38static LIST_HEAD(cred_unused);
39static unsigned long number_cred_unused; 39static unsigned long number_cred_unused;
40 40
41#define MAX_HASHTABLE_BITS (10) 41#define MAX_HASHTABLE_BITS (14)
42static int param_set_hashtbl_sz(const char *val, const struct kernel_param *kp) 42static int param_set_hashtbl_sz(const char *val, const struct kernel_param *kp)
43{ 43{
44 unsigned long num; 44 unsigned long num;
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index dcfc66bab2bb..12c485982814 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -745,17 +745,18 @@ gss_pipe_release(struct inode *inode)
745 struct rpc_inode *rpci = RPC_I(inode); 745 struct rpc_inode *rpci = RPC_I(inode);
746 struct gss_upcall_msg *gss_msg; 746 struct gss_upcall_msg *gss_msg;
747 747
748restart:
748 spin_lock(&inode->i_lock); 749 spin_lock(&inode->i_lock);
749 while (!list_empty(&rpci->in_downcall)) { 750 list_for_each_entry(gss_msg, &rpci->in_downcall, list) {
750 751
751 gss_msg = list_entry(rpci->in_downcall.next, 752 if (!list_empty(&gss_msg->msg.list))
752 struct gss_upcall_msg, list); 753 continue;
753 gss_msg->msg.errno = -EPIPE; 754 gss_msg->msg.errno = -EPIPE;
754 atomic_inc(&gss_msg->count); 755 atomic_inc(&gss_msg->count);
755 __gss_unhash_msg(gss_msg); 756 __gss_unhash_msg(gss_msg);
756 spin_unlock(&inode->i_lock); 757 spin_unlock(&inode->i_lock);
757 gss_release_msg(gss_msg); 758 gss_release_msg(gss_msg);
758 spin_lock(&inode->i_lock); 759 goto restart;
759 } 760 }
760 spin_unlock(&inode->i_lock); 761 spin_unlock(&inode->i_lock);
761 762
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index 032644610524..778e5dfc5144 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -237,6 +237,7 @@ get_key(const void *p, const void *end,
237 if (!supported_gss_krb5_enctype(alg)) { 237 if (!supported_gss_krb5_enctype(alg)) {
238 printk(KERN_WARNING "gss_kerberos_mech: unsupported " 238 printk(KERN_WARNING "gss_kerberos_mech: unsupported "
239 "encryption key algorithm %d\n", alg); 239 "encryption key algorithm %d\n", alg);
240 p = ERR_PTR(-EINVAL);
240 goto out_err; 241 goto out_err;
241 } 242 }
242 p = simple_get_netobj(p, end, &key); 243 p = simple_get_netobj(p, end, &key);
@@ -282,15 +283,19 @@ gss_import_v1_context(const void *p, const void *end, struct krb5_ctx *ctx)
282 ctx->enctype = ENCTYPE_DES_CBC_RAW; 283 ctx->enctype = ENCTYPE_DES_CBC_RAW;
283 284
284 ctx->gk5e = get_gss_krb5_enctype(ctx->enctype); 285 ctx->gk5e = get_gss_krb5_enctype(ctx->enctype);
285 if (ctx->gk5e == NULL) 286 if (ctx->gk5e == NULL) {
287 p = ERR_PTR(-EINVAL);
286 goto out_err; 288 goto out_err;
289 }
287 290
288 /* The downcall format was designed before we completely understood 291 /* The downcall format was designed before we completely understood
289 * the uses of the context fields; so it includes some stuff we 292 * the uses of the context fields; so it includes some stuff we
290 * just give some minimal sanity-checking, and some we ignore 293 * just give some minimal sanity-checking, and some we ignore
291 * completely (like the next twenty bytes): */ 294 * completely (like the next twenty bytes): */
292 if (unlikely(p + 20 > end || p + 20 < p)) 295 if (unlikely(p + 20 > end || p + 20 < p)) {
296 p = ERR_PTR(-EFAULT);
293 goto out_err; 297 goto out_err;
298 }
294 p += 20; 299 p += 20;
295 p = simple_get_bytes(p, end, &tmp, sizeof(tmp)); 300 p = simple_get_bytes(p, end, &tmp, sizeof(tmp));
296 if (IS_ERR(p)) 301 if (IS_ERR(p))
@@ -619,6 +624,7 @@ gss_import_v2_context(const void *p, const void *end, struct krb5_ctx *ctx,
619 if (ctx->seq_send64 != ctx->seq_send) { 624 if (ctx->seq_send64 != ctx->seq_send) {
620 dprintk("%s: seq_send64 %lx, seq_send %x overflow?\n", __func__, 625 dprintk("%s: seq_send64 %lx, seq_send %x overflow?\n", __func__,
621 (long unsigned)ctx->seq_send64, ctx->seq_send); 626 (long unsigned)ctx->seq_send64, ctx->seq_send);
627 p = ERR_PTR(-EINVAL);
622 goto out_err; 628 goto out_err;
623 } 629 }
624 p = simple_get_bytes(p, end, &ctx->enctype, sizeof(ctx->enctype)); 630 p = simple_get_bytes(p, end, &ctx->enctype, sizeof(ctx->enctype));
diff --git a/net/sunrpc/auth_gss/gss_spkm3_mech.c b/net/sunrpc/auth_gss/gss_spkm3_mech.c
index dc3f1f5ed865..adade3d313f2 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_mech.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_mech.c
@@ -100,6 +100,7 @@ gss_import_sec_context_spkm3(const void *p, size_t len,
100 if (version != 1) { 100 if (version != 1) {
101 dprintk("RPC: unknown spkm3 token format: " 101 dprintk("RPC: unknown spkm3 token format: "
102 "obsolete nfs-utils?\n"); 102 "obsolete nfs-utils?\n");
103 p = ERR_PTR(-EINVAL);
103 goto out_err_free_ctx; 104 goto out_err_free_ctx;
104 } 105 }
105 106
@@ -135,8 +136,10 @@ gss_import_sec_context_spkm3(const void *p, size_t len,
135 if (IS_ERR(p)) 136 if (IS_ERR(p))
136 goto out_err_free_intg_alg; 137 goto out_err_free_intg_alg;
137 138
138 if (p != end) 139 if (p != end) {
140 p = ERR_PTR(-EFAULT);
139 goto out_err_free_intg_key; 141 goto out_err_free_intg_key;
142 }
140 143
141 ctx_id->internal_ctx_id = ctx; 144 ctx_id->internal_ctx_id = ctx;
142 145
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 2388d83b68ff..fa5549079d79 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -226,7 +226,7 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, stru
226 goto out_no_principal; 226 goto out_no_principal;
227 } 227 }
228 228
229 kref_init(&clnt->cl_kref); 229 atomic_set(&clnt->cl_count, 1);
230 230
231 err = rpc_setup_pipedir(clnt, program->pipe_dir_name); 231 err = rpc_setup_pipedir(clnt, program->pipe_dir_name);
232 if (err < 0) 232 if (err < 0)
@@ -390,14 +390,14 @@ rpc_clone_client(struct rpc_clnt *clnt)
390 if (new->cl_principal == NULL) 390 if (new->cl_principal == NULL)
391 goto out_no_principal; 391 goto out_no_principal;
392 } 392 }
393 kref_init(&new->cl_kref); 393 atomic_set(&new->cl_count, 1);
394 err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name); 394 err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name);
395 if (err != 0) 395 if (err != 0)
396 goto out_no_path; 396 goto out_no_path;
397 if (new->cl_auth) 397 if (new->cl_auth)
398 atomic_inc(&new->cl_auth->au_count); 398 atomic_inc(&new->cl_auth->au_count);
399 xprt_get(clnt->cl_xprt); 399 xprt_get(clnt->cl_xprt);
400 kref_get(&clnt->cl_kref); 400 atomic_inc(&clnt->cl_count);
401 rpc_register_client(new); 401 rpc_register_client(new);
402 rpciod_up(); 402 rpciod_up();
403 return new; 403 return new;
@@ -465,10 +465,8 @@ EXPORT_SYMBOL_GPL(rpc_shutdown_client);
465 * Free an RPC client 465 * Free an RPC client
466 */ 466 */
467static void 467static void
468rpc_free_client(struct kref *kref) 468rpc_free_client(struct rpc_clnt *clnt)
469{ 469{
470 struct rpc_clnt *clnt = container_of(kref, struct rpc_clnt, cl_kref);
471
472 dprintk("RPC: destroying %s client for %s\n", 470 dprintk("RPC: destroying %s client for %s\n",
473 clnt->cl_protname, clnt->cl_server); 471 clnt->cl_protname, clnt->cl_server);
474 if (!IS_ERR(clnt->cl_path.dentry)) { 472 if (!IS_ERR(clnt->cl_path.dentry)) {
@@ -495,12 +493,10 @@ out_free:
495 * Free an RPC client 493 * Free an RPC client
496 */ 494 */
497static void 495static void
498rpc_free_auth(struct kref *kref) 496rpc_free_auth(struct rpc_clnt *clnt)
499{ 497{
500 struct rpc_clnt *clnt = container_of(kref, struct rpc_clnt, cl_kref);
501
502 if (clnt->cl_auth == NULL) { 498 if (clnt->cl_auth == NULL) {
503 rpc_free_client(kref); 499 rpc_free_client(clnt);
504 return; 500 return;
505 } 501 }
506 502
@@ -509,10 +505,11 @@ rpc_free_auth(struct kref *kref)
509 * release remaining GSS contexts. This mechanism ensures 505 * release remaining GSS contexts. This mechanism ensures
510 * that it can do so safely. 506 * that it can do so safely.
511 */ 507 */
512 kref_init(kref); 508 atomic_inc(&clnt->cl_count);
513 rpcauth_release(clnt->cl_auth); 509 rpcauth_release(clnt->cl_auth);
514 clnt->cl_auth = NULL; 510 clnt->cl_auth = NULL;
515 kref_put(kref, rpc_free_client); 511 if (atomic_dec_and_test(&clnt->cl_count))
512 rpc_free_client(clnt);
516} 513}
517 514
518/* 515/*
@@ -525,7 +522,8 @@ rpc_release_client(struct rpc_clnt *clnt)
525 522
526 if (list_empty(&clnt->cl_tasks)) 523 if (list_empty(&clnt->cl_tasks))
527 wake_up(&destroy_wait); 524 wake_up(&destroy_wait);
528 kref_put(&clnt->cl_kref, rpc_free_auth); 525 if (atomic_dec_and_test(&clnt->cl_count))
526 rpc_free_auth(clnt);
529} 527}
530 528
531/** 529/**
@@ -588,7 +586,7 @@ void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt)
588 if (clnt != NULL) { 586 if (clnt != NULL) {
589 rpc_task_release_client(task); 587 rpc_task_release_client(task);
590 task->tk_client = clnt; 588 task->tk_client = clnt;
591 kref_get(&clnt->cl_kref); 589 atomic_inc(&clnt->cl_count);
592 if (clnt->cl_softrtry) 590 if (clnt->cl_softrtry)
593 task->tk_flags |= RPC_TASK_SOFT; 591 task->tk_flags |= RPC_TASK_SOFT;
594 /* Add to the client's list of all tasks */ 592 /* Add to the client's list of all tasks */
@@ -931,7 +929,7 @@ call_reserveresult(struct rpc_task *task)
931 task->tk_status = 0; 929 task->tk_status = 0;
932 if (status >= 0) { 930 if (status >= 0) {
933 if (task->tk_rqstp) { 931 if (task->tk_rqstp) {
934 task->tk_action = call_allocate; 932 task->tk_action = call_refresh;
935 return; 933 return;
936 } 934 }
937 935
@@ -966,13 +964,54 @@ call_reserveresult(struct rpc_task *task)
966} 964}
967 965
968/* 966/*
969 * 2. Allocate the buffer. For details, see sched.c:rpc_malloc. 967 * 2. Bind and/or refresh the credentials
968 */
969static void
970call_refresh(struct rpc_task *task)
971{
972 dprint_status(task);
973
974 task->tk_action = call_refreshresult;
975 task->tk_status = 0;
976 task->tk_client->cl_stats->rpcauthrefresh++;
977 rpcauth_refreshcred(task);
978}
979
980/*
981 * 2a. Process the results of a credential refresh
982 */
983static void
984call_refreshresult(struct rpc_task *task)
985{
986 int status = task->tk_status;
987
988 dprint_status(task);
989
990 task->tk_status = 0;
991 task->tk_action = call_allocate;
992 if (status >= 0 && rpcauth_uptodatecred(task))
993 return;
994 switch (status) {
995 case -EACCES:
996 rpc_exit(task, -EACCES);
997 return;
998 case -ENOMEM:
999 rpc_exit(task, -ENOMEM);
1000 return;
1001 case -ETIMEDOUT:
1002 rpc_delay(task, 3*HZ);
1003 }
1004 task->tk_action = call_refresh;
1005}
1006
1007/*
1008 * 2b. Allocate the buffer. For details, see sched.c:rpc_malloc.
970 * (Note: buffer memory is freed in xprt_release). 1009 * (Note: buffer memory is freed in xprt_release).
971 */ 1010 */
972static void 1011static void
973call_allocate(struct rpc_task *task) 1012call_allocate(struct rpc_task *task)
974{ 1013{
975 unsigned int slack = task->tk_client->cl_auth->au_cslack; 1014 unsigned int slack = task->tk_rqstp->rq_cred->cr_auth->au_cslack;
976 struct rpc_rqst *req = task->tk_rqstp; 1015 struct rpc_rqst *req = task->tk_rqstp;
977 struct rpc_xprt *xprt = task->tk_xprt; 1016 struct rpc_xprt *xprt = task->tk_xprt;
978 struct rpc_procinfo *proc = task->tk_msg.rpc_proc; 1017 struct rpc_procinfo *proc = task->tk_msg.rpc_proc;
@@ -980,7 +1019,7 @@ call_allocate(struct rpc_task *task)
980 dprint_status(task); 1019 dprint_status(task);
981 1020
982 task->tk_status = 0; 1021 task->tk_status = 0;
983 task->tk_action = call_refresh; 1022 task->tk_action = call_bind;
984 1023
985 if (req->rq_buffer) 1024 if (req->rq_buffer)
986 return; 1025 return;
@@ -1017,47 +1056,6 @@ call_allocate(struct rpc_task *task)
1017 rpc_exit(task, -ERESTARTSYS); 1056 rpc_exit(task, -ERESTARTSYS);
1018} 1057}
1019 1058
1020/*
1021 * 2a. Bind and/or refresh the credentials
1022 */
1023static void
1024call_refresh(struct rpc_task *task)
1025{
1026 dprint_status(task);
1027
1028 task->tk_action = call_refreshresult;
1029 task->tk_status = 0;
1030 task->tk_client->cl_stats->rpcauthrefresh++;
1031 rpcauth_refreshcred(task);
1032}
1033
1034/*
1035 * 2b. Process the results of a credential refresh
1036 */
1037static void
1038call_refreshresult(struct rpc_task *task)
1039{
1040 int status = task->tk_status;
1041
1042 dprint_status(task);
1043
1044 task->tk_status = 0;
1045 task->tk_action = call_bind;
1046 if (status >= 0 && rpcauth_uptodatecred(task))
1047 return;
1048 switch (status) {
1049 case -EACCES:
1050 rpc_exit(task, -EACCES);
1051 return;
1052 case -ENOMEM:
1053 rpc_exit(task, -ENOMEM);
1054 return;
1055 case -ETIMEDOUT:
1056 rpc_delay(task, 3*HZ);
1057 }
1058 task->tk_action = call_refresh;
1059}
1060
1061static inline int 1059static inline int
1062rpc_task_need_encode(struct rpc_task *task) 1060rpc_task_need_encode(struct rpc_task *task)
1063{ 1061{
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 95ccbcf45d3e..8c8eef2b8f26 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -48,7 +48,7 @@ static void rpc_purge_list(struct rpc_inode *rpci, struct list_head *head,
48 return; 48 return;
49 do { 49 do {
50 msg = list_entry(head->next, struct rpc_pipe_msg, list); 50 msg = list_entry(head->next, struct rpc_pipe_msg, list);
51 list_del(&msg->list); 51 list_del_init(&msg->list);
52 msg->errno = err; 52 msg->errno = err;
53 destroy_msg(msg); 53 destroy_msg(msg);
54 } while (!list_empty(head)); 54 } while (!list_empty(head));
@@ -208,7 +208,7 @@ rpc_pipe_release(struct inode *inode, struct file *filp)
208 if (msg != NULL) { 208 if (msg != NULL) {
209 spin_lock(&inode->i_lock); 209 spin_lock(&inode->i_lock);
210 msg->errno = -EAGAIN; 210 msg->errno = -EAGAIN;
211 list_del(&msg->list); 211 list_del_init(&msg->list);
212 spin_unlock(&inode->i_lock); 212 spin_unlock(&inode->i_lock);
213 rpci->ops->destroy_msg(msg); 213 rpci->ops->destroy_msg(msg);
214 } 214 }
@@ -268,7 +268,7 @@ rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset)
268 if (res < 0 || msg->len == msg->copied) { 268 if (res < 0 || msg->len == msg->copied) {
269 filp->private_data = NULL; 269 filp->private_data = NULL;
270 spin_lock(&inode->i_lock); 270 spin_lock(&inode->i_lock);
271 list_del(&msg->list); 271 list_del_init(&msg->list);
272 spin_unlock(&inode->i_lock); 272 spin_unlock(&inode->i_lock);
273 rpci->ops->destroy_msg(msg); 273 rpci->ops->destroy_msg(msg);
274 } 274 }
@@ -371,21 +371,23 @@ rpc_show_info(struct seq_file *m, void *v)
371static int 371static int
372rpc_info_open(struct inode *inode, struct file *file) 372rpc_info_open(struct inode *inode, struct file *file)
373{ 373{
374 struct rpc_clnt *clnt; 374 struct rpc_clnt *clnt = NULL;
375 int ret = single_open(file, rpc_show_info, NULL); 375 int ret = single_open(file, rpc_show_info, NULL);
376 376
377 if (!ret) { 377 if (!ret) {
378 struct seq_file *m = file->private_data; 378 struct seq_file *m = file->private_data;
379 mutex_lock(&inode->i_mutex); 379
380 clnt = RPC_I(inode)->private; 380 spin_lock(&file->f_path.dentry->d_lock);
381 if (clnt) { 381 if (!d_unhashed(file->f_path.dentry))
382 kref_get(&clnt->cl_kref); 382 clnt = RPC_I(inode)->private;
383 if (clnt != NULL && atomic_inc_not_zero(&clnt->cl_count)) {
384 spin_unlock(&file->f_path.dentry->d_lock);
383 m->private = clnt; 385 m->private = clnt;
384 } else { 386 } else {
387 spin_unlock(&file->f_path.dentry->d_lock);
385 single_release(inode, file); 388 single_release(inode, file);
386 ret = -EINVAL; 389 ret = -EINVAL;
387 } 390 }
388 mutex_unlock(&inode->i_mutex);
389 } 391 }
390 return ret; 392 return ret;
391} 393}
diff --git a/samples/kfifo/dma-example.c b/samples/kfifo/dma-example.c
index ee03a4f0b64f..06473791c08a 100644
--- a/samples/kfifo/dma-example.c
+++ b/samples/kfifo/dma-example.c
@@ -24,6 +24,7 @@ static int __init example_init(void)
24{ 24{
25 int i; 25 int i;
26 unsigned int ret; 26 unsigned int ret;
27 unsigned int nents;
27 struct scatterlist sg[10]; 28 struct scatterlist sg[10];
28 29
29 printk(KERN_INFO "DMA fifo test start\n"); 30 printk(KERN_INFO "DMA fifo test start\n");
@@ -61,9 +62,9 @@ static int __init example_init(void)
61 * byte at the beginning, after the kfifo_skip(). 62 * byte at the beginning, after the kfifo_skip().
62 */ 63 */
63 sg_init_table(sg, ARRAY_SIZE(sg)); 64 sg_init_table(sg, ARRAY_SIZE(sg));
64 ret = kfifo_dma_in_prepare(&fifo, sg, ARRAY_SIZE(sg), FIFO_SIZE); 65 nents = kfifo_dma_in_prepare(&fifo, sg, ARRAY_SIZE(sg), FIFO_SIZE);
65 printk(KERN_INFO "DMA sgl entries: %d\n", ret); 66 printk(KERN_INFO "DMA sgl entries: %d\n", nents);
66 if (!ret) { 67 if (!nents) {
67 /* fifo is full and no sgl was created */ 68 /* fifo is full and no sgl was created */
68 printk(KERN_WARNING "error kfifo_dma_in_prepare\n"); 69 printk(KERN_WARNING "error kfifo_dma_in_prepare\n");
69 return -EIO; 70 return -EIO;
@@ -71,7 +72,7 @@ static int __init example_init(void)
71 72
72 /* receive data */ 73 /* receive data */
73 printk(KERN_INFO "scatterlist for receive:\n"); 74 printk(KERN_INFO "scatterlist for receive:\n");
74 for (i = 0; i < ARRAY_SIZE(sg); i++) { 75 for (i = 0; i < nents; i++) {
75 printk(KERN_INFO 76 printk(KERN_INFO
76 "sg[%d] -> " 77 "sg[%d] -> "
77 "page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n", 78 "page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n",
@@ -91,16 +92,16 @@ static int __init example_init(void)
91 kfifo_dma_in_finish(&fifo, ret); 92 kfifo_dma_in_finish(&fifo, ret);
92 93
93 /* Prepare to transmit data, example: 8 bytes */ 94 /* Prepare to transmit data, example: 8 bytes */
94 ret = kfifo_dma_out_prepare(&fifo, sg, ARRAY_SIZE(sg), 8); 95 nents = kfifo_dma_out_prepare(&fifo, sg, ARRAY_SIZE(sg), 8);
95 printk(KERN_INFO "DMA sgl entries: %d\n", ret); 96 printk(KERN_INFO "DMA sgl entries: %d\n", nents);
96 if (!ret) { 97 if (!nents) {
97 /* no data was available and no sgl was created */ 98 /* no data was available and no sgl was created */
98 printk(KERN_WARNING "error kfifo_dma_out_prepare\n"); 99 printk(KERN_WARNING "error kfifo_dma_out_prepare\n");
99 return -EIO; 100 return -EIO;
100 } 101 }
101 102
102 printk(KERN_INFO "scatterlist for transmit:\n"); 103 printk(KERN_INFO "scatterlist for transmit:\n");
103 for (i = 0; i < ARRAY_SIZE(sg); i++) { 104 for (i = 0; i < nents; i++) {
104 printk(KERN_INFO 105 printk(KERN_INFO
105 "sg[%d] -> " 106 "sg[%d] -> "
106 "page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n", 107 "page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n",
diff --git a/scripts/basic/docproc.c b/scripts/basic/docproc.c
index 79ab973fb43a..fc3b18d844af 100644
--- a/scripts/basic/docproc.c
+++ b/scripts/basic/docproc.c
@@ -34,12 +34,14 @@
34 * 34 *
35 */ 35 */
36 36
37#define _GNU_SOURCE
37#include <stdio.h> 38#include <stdio.h>
38#include <stdlib.h> 39#include <stdlib.h>
39#include <string.h> 40#include <string.h>
40#include <ctype.h> 41#include <ctype.h>
41#include <unistd.h> 42#include <unistd.h>
42#include <limits.h> 43#include <limits.h>
44#include <errno.h>
43#include <sys/types.h> 45#include <sys/types.h>
44#include <sys/wait.h> 46#include <sys/wait.h>
45 47
@@ -54,6 +56,7 @@ typedef void FILEONLY(char * file);
54FILEONLY *internalfunctions; 56FILEONLY *internalfunctions;
55FILEONLY *externalfunctions; 57FILEONLY *externalfunctions;
56FILEONLY *symbolsonly; 58FILEONLY *symbolsonly;
59FILEONLY *findall;
57 60
58typedef void FILELINE(char * file, char * line); 61typedef void FILELINE(char * file, char * line);
59FILELINE * singlefunctions; 62FILELINE * singlefunctions;
@@ -65,12 +68,30 @@ FILELINE * docsection;
65#define KERNELDOCPATH "scripts/" 68#define KERNELDOCPATH "scripts/"
66#define KERNELDOC "kernel-doc" 69#define KERNELDOC "kernel-doc"
67#define DOCBOOK "-docbook" 70#define DOCBOOK "-docbook"
71#define LIST "-list"
68#define FUNCTION "-function" 72#define FUNCTION "-function"
69#define NOFUNCTION "-nofunction" 73#define NOFUNCTION "-nofunction"
70#define NODOCSECTIONS "-no-doc-sections" 74#define NODOCSECTIONS "-no-doc-sections"
71 75
72static char *srctree, *kernsrctree; 76static char *srctree, *kernsrctree;
73 77
78static char **all_list = NULL;
79static int all_list_len = 0;
80
81static void consume_symbol(const char *sym)
82{
83 int i;
84
85 for (i = 0; i < all_list_len; i++) {
86 if (!all_list[i])
87 continue;
88 if (strcmp(sym, all_list[i]))
89 continue;
90 all_list[i] = NULL;
91 break;
92 }
93}
94
74static void usage (void) 95static void usage (void)
75{ 96{
76 fprintf(stderr, "Usage: docproc {doc|depend} file\n"); 97 fprintf(stderr, "Usage: docproc {doc|depend} file\n");
@@ -248,6 +269,7 @@ static void docfunctions(char * filename, char * type)
248 struct symfile * sym = &symfilelist[i]; 269 struct symfile * sym = &symfilelist[i];
249 for (j=0; j < sym->symbolcnt; j++) { 270 for (j=0; j < sym->symbolcnt; j++) {
250 vec[idx++] = type; 271 vec[idx++] = type;
272 consume_symbol(sym->symbollist[j].name);
251 vec[idx++] = sym->symbollist[j].name; 273 vec[idx++] = sym->symbollist[j].name;
252 } 274 }
253 } 275 }
@@ -287,6 +309,11 @@ static void singfunc(char * filename, char * line)
287 vec[idx++] = &line[i]; 309 vec[idx++] = &line[i];
288 } 310 }
289 } 311 }
312 for (i = 0; i < idx; i++) {
313 if (strcmp(vec[i], FUNCTION))
314 continue;
315 consume_symbol(vec[i + 1]);
316 }
290 vec[idx++] = filename; 317 vec[idx++] = filename;
291 vec[idx] = NULL; 318 vec[idx] = NULL;
292 exec_kernel_doc(vec); 319 exec_kernel_doc(vec);
@@ -306,6 +333,10 @@ static void docsect(char *filename, char *line)
306 if (*s == '\n') 333 if (*s == '\n')
307 *s = '\0'; 334 *s = '\0';
308 335
336 asprintf(&s, "DOC: %s", line);
337 consume_symbol(s);
338 free(s);
339
309 vec[0] = KERNELDOC; 340 vec[0] = KERNELDOC;
310 vec[1] = DOCBOOK; 341 vec[1] = DOCBOOK;
311 vec[2] = FUNCTION; 342 vec[2] = FUNCTION;
@@ -315,6 +346,84 @@ static void docsect(char *filename, char *line)
315 exec_kernel_doc(vec); 346 exec_kernel_doc(vec);
316} 347}
317 348
349static void find_all_symbols(char *filename)
350{
351 char *vec[4]; /* kerneldoc -list file NULL */
352 pid_t pid;
353 int ret, i, count, start;
354 char real_filename[PATH_MAX + 1];
355 int pipefd[2];
356 char *data, *str;
357 size_t data_len = 0;
358
359 vec[0] = KERNELDOC;
360 vec[1] = LIST;
361 vec[2] = filename;
362 vec[3] = NULL;
363
364 if (pipe(pipefd)) {
365 perror("pipe");
366 exit(1);
367 }
368
369 switch (pid=fork()) {
370 case -1:
371 perror("fork");
372 exit(1);
373 case 0:
374 close(pipefd[0]);
375 dup2(pipefd[1], 1);
376 memset(real_filename, 0, sizeof(real_filename));
377 strncat(real_filename, kernsrctree, PATH_MAX);
378 strncat(real_filename, "/" KERNELDOCPATH KERNELDOC,
379 PATH_MAX - strlen(real_filename));
380 execvp(real_filename, vec);
381 fprintf(stderr, "exec ");
382 perror(real_filename);
383 exit(1);
384 default:
385 close(pipefd[1]);
386 data = malloc(4096);
387 do {
388 while ((ret = read(pipefd[0],
389 data + data_len,
390 4096)) > 0) {
391 data_len += ret;
392 data = realloc(data, data_len + 4096);
393 }
394 } while (ret == -EAGAIN);
395 if (ret != 0) {
396 perror("read");
397 exit(1);
398 }
399 waitpid(pid, &ret ,0);
400 }
401 if (WIFEXITED(ret))
402 exitstatus |= WEXITSTATUS(ret);
403 else
404 exitstatus = 0xff;
405
406 count = 0;
407 /* poor man's strtok, but with counting */
408 for (i = 0; i < data_len; i++) {
409 if (data[i] == '\n') {
410 count++;
411 data[i] = '\0';
412 }
413 }
414 start = all_list_len;
415 all_list_len += count;
416 all_list = realloc(all_list, sizeof(char *) * all_list_len);
417 str = data;
418 for (i = 0; i < data_len && start != all_list_len; i++) {
419 if (data[i] == '\0') {
420 all_list[start] = str;
421 str = data + i + 1;
422 start++;
423 }
424 }
425}
426
318/* 427/*
319 * Parse file, calling action specific functions for: 428 * Parse file, calling action specific functions for:
320 * 1) Lines containing !E 429 * 1) Lines containing !E
@@ -322,7 +431,8 @@ static void docsect(char *filename, char *line)
322 * 3) Lines containing !D 431 * 3) Lines containing !D
323 * 4) Lines containing !F 432 * 4) Lines containing !F
324 * 5) Lines containing !P 433 * 5) Lines containing !P
325 * 6) Default lines - lines not matching the above 434 * 6) Lines containing !C
435 * 7) Default lines - lines not matching the above
326 */ 436 */
327static void parse_file(FILE *infile) 437static void parse_file(FILE *infile)
328{ 438{
@@ -365,6 +475,12 @@ static void parse_file(FILE *infile)
365 s++; 475 s++;
366 docsection(line + 2, s); 476 docsection(line + 2, s);
367 break; 477 break;
478 case 'C':
479 while (*s && !isspace(*s)) s++;
480 *s = '\0';
481 if (findall)
482 findall(line+2);
483 break;
368 default: 484 default:
369 defaultline(line); 485 defaultline(line);
370 } 486 }
@@ -380,6 +496,7 @@ static void parse_file(FILE *infile)
380int main(int argc, char *argv[]) 496int main(int argc, char *argv[])
381{ 497{
382 FILE * infile; 498 FILE * infile;
499 int i;
383 500
384 srctree = getenv("SRCTREE"); 501 srctree = getenv("SRCTREE");
385 if (!srctree) 502 if (!srctree)
@@ -415,6 +532,7 @@ int main(int argc, char *argv[])
415 symbolsonly = find_export_symbols; 532 symbolsonly = find_export_symbols;
416 singlefunctions = noaction2; 533 singlefunctions = noaction2;
417 docsection = noaction2; 534 docsection = noaction2;
535 findall = find_all_symbols;
418 parse_file(infile); 536 parse_file(infile);
419 537
420 /* Rewind to start from beginning of file again */ 538 /* Rewind to start from beginning of file again */
@@ -425,8 +543,16 @@ int main(int argc, char *argv[])
425 symbolsonly = printline; 543 symbolsonly = printline;
426 singlefunctions = singfunc; 544 singlefunctions = singfunc;
427 docsection = docsect; 545 docsection = docsect;
546 findall = NULL;
428 547
429 parse_file(infile); 548 parse_file(infile);
549
550 for (i = 0; i < all_list_len; i++) {
551 if (!all_list[i])
552 continue;
553 fprintf(stderr, "Warning: didn't use docs for %s\n",
554 all_list[i]);
555 }
430 } 556 }
431 else if (strcmp("depend", argv[1]) == 0) 557 else if (strcmp("depend", argv[1]) == 0)
432 { 558 {
@@ -439,6 +565,7 @@ int main(int argc, char *argv[])
439 symbolsonly = adddep; 565 symbolsonly = adddep;
440 singlefunctions = adddep2; 566 singlefunctions = adddep2;
441 docsection = adddep2; 567 docsection = adddep2;
568 findall = adddep;
442 parse_file(infile); 569 parse_file(infile);
443 printf("\n"); 570 printf("\n");
444 } 571 }
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
index 102e1235fd5c..cdb6dc1f6458 100755
--- a/scripts/kernel-doc
+++ b/scripts/kernel-doc
@@ -44,12 +44,13 @@ use strict;
44# Note: This only supports 'c'. 44# Note: This only supports 'c'.
45 45
46# usage: 46# usage:
47# kernel-doc [ -docbook | -html | -text | -man ] [ -no-doc-sections ] 47# kernel-doc [ -docbook | -html | -text | -man | -list ] [ -no-doc-sections ]
48# [ -function funcname [ -function funcname ...] ] c file(s)s > outputfile 48# [ -function funcname [ -function funcname ...] ] c file(s)s > outputfile
49# or 49# or
50# [ -nofunction funcname [ -function funcname ...] ] c file(s)s > outputfile 50# [ -nofunction funcname [ -function funcname ...] ] c file(s)s > outputfile
51# 51#
52# Set output format using one of -docbook -html -text or -man. Default is man. 52# Set output format using one of -docbook -html -text or -man. Default is man.
53# The -list format is for internal use by docproc.
53# 54#
54# -no-doc-sections 55# -no-doc-sections
55# Do not output DOC: sections 56# Do not output DOC: sections
@@ -210,9 +211,16 @@ my %highlights_text = ( $type_constant, "\$1",
210 $type_param, "\$1" ); 211 $type_param, "\$1" );
211my $blankline_text = ""; 212my $blankline_text = "";
212 213
214# list mode
215my %highlights_list = ( $type_constant, "\$1",
216 $type_func, "\$1",
217 $type_struct, "\$1",
218 $type_param, "\$1" );
219my $blankline_list = "";
213 220
214sub usage { 221sub usage {
215 print "Usage: $0 [ -v ] [ -docbook | -html | -text | -man ] [ -no-doc-sections ]\n"; 222 print "Usage: $0 [ -v ] [ -docbook | -html | -text | -man | -list ]\n";
223 print " [ -no-doc-sections ]\n";
216 print " [ -function funcname [ -function funcname ...] ]\n"; 224 print " [ -function funcname [ -function funcname ...] ]\n";
217 print " [ -nofunction funcname [ -nofunction funcname ...] ]\n"; 225 print " [ -nofunction funcname [ -nofunction funcname ...] ]\n";
218 print " c source file(s) > outputfile\n"; 226 print " c source file(s) > outputfile\n";
@@ -318,6 +326,10 @@ while ($ARGV[0] =~ m/^-(.*)/) {
318 $output_mode = "xml"; 326 $output_mode = "xml";
319 %highlights = %highlights_xml; 327 %highlights = %highlights_xml;
320 $blankline = $blankline_xml; 328 $blankline = $blankline_xml;
329 } elsif ($cmd eq "-list") {
330 $output_mode = "list";
331 %highlights = %highlights_list;
332 $blankline = $blankline_list;
321 } elsif ($cmd eq "-gnome") { 333 } elsif ($cmd eq "-gnome") {
322 $output_mode = "gnome"; 334 $output_mode = "gnome";
323 %highlights = %highlights_gnome; 335 %highlights = %highlights_gnome;
@@ -1361,6 +1373,42 @@ sub output_blockhead_text(%) {
1361 } 1373 }
1362} 1374}
1363 1375
1376## list mode output functions
1377
1378sub output_function_list(%) {
1379 my %args = %{$_[0]};
1380
1381 print $args{'function'} . "\n";
1382}
1383
1384# output enum in list
1385sub output_enum_list(%) {
1386 my %args = %{$_[0]};
1387 print $args{'enum'} . "\n";
1388}
1389
1390# output typedef in list
1391sub output_typedef_list(%) {
1392 my %args = %{$_[0]};
1393 print $args{'typedef'} . "\n";
1394}
1395
1396# output struct as list
1397sub output_struct_list(%) {
1398 my %args = %{$_[0]};
1399
1400 print $args{'struct'} . "\n";
1401}
1402
1403sub output_blockhead_list(%) {
1404 my %args = %{$_[0]};
1405 my ($parameter, $section);
1406
1407 foreach $section (@{$args{'sectionlist'}}) {
1408 print "DOC: $section\n";
1409 }
1410}
1411
1364## 1412##
1365# generic output function for all types (function, struct/union, typedef, enum); 1413# generic output function for all types (function, struct/union, typedef, enum);
1366# calls the generated, variable output_ function name based on 1414# calls the generated, variable output_ function name based on
@@ -1679,7 +1727,7 @@ sub check_sections($$$$$$) {
1679 foreach $px (0 .. $#prms) { 1727 foreach $px (0 .. $#prms) {
1680 $prm_clean = $prms[$px]; 1728 $prm_clean = $prms[$px];
1681 $prm_clean =~ s/\[.*\]//; 1729 $prm_clean =~ s/\[.*\]//;
1682 $prm_clean =~ s/__attribute__\s*\(\([a-z,_\*\s\(\)]*\)\)//; 1730 $prm_clean =~ s/__attribute__\s*\(\([a-z,_\*\s\(\)]*\)\)//i;
1683 # ignore array size in a parameter string; 1731 # ignore array size in a parameter string;
1684 # however, the original param string may contain 1732 # however, the original param string may contain
1685 # spaces, e.g.: addr[6 + 2] 1733 # spaces, e.g.: addr[6 + 2]
diff --git a/security/apparmor/include/resource.h b/security/apparmor/include/resource.h
index 3c88be946494..02baec732bb5 100644
--- a/security/apparmor/include/resource.h
+++ b/security/apparmor/include/resource.h
@@ -33,8 +33,8 @@ struct aa_rlimit {
33}; 33};
34 34
35int aa_map_resource(int resource); 35int aa_map_resource(int resource);
36int aa_task_setrlimit(struct aa_profile *profile, unsigned int resource, 36int aa_task_setrlimit(struct aa_profile *profile, struct task_struct *,
37 struct rlimit *new_rlim); 37 unsigned int resource, struct rlimit *new_rlim);
38 38
39void __aa_transition_rlimits(struct aa_profile *old, struct aa_profile *new); 39void __aa_transition_rlimits(struct aa_profile *old, struct aa_profile *new);
40 40
diff --git a/security/apparmor/lib.c b/security/apparmor/lib.c
index 6e85cdb4303f..506d2baf6147 100644
--- a/security/apparmor/lib.c
+++ b/security/apparmor/lib.c
@@ -40,6 +40,7 @@ char *aa_split_fqname(char *fqname, char **ns_name)
40 *ns_name = NULL; 40 *ns_name = NULL;
41 if (name[0] == ':') { 41 if (name[0] == ':') {
42 char *split = strchr(&name[1], ':'); 42 char *split = strchr(&name[1], ':');
43 *ns_name = skip_spaces(&name[1]);
43 if (split) { 44 if (split) {
44 /* overwrite ':' with \0 */ 45 /* overwrite ':' with \0 */
45 *split = 0; 46 *split = 0;
@@ -47,7 +48,6 @@ char *aa_split_fqname(char *fqname, char **ns_name)
47 } else 48 } else
48 /* a ns name without a following profile is allowed */ 49 /* a ns name without a following profile is allowed */
49 name = NULL; 50 name = NULL;
50 *ns_name = &name[1];
51 } 51 }
52 if (name && *name == 0) 52 if (name && *name == 0)
53 name = NULL; 53 name = NULL;
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index f73e2c204218..cf1de4462ccd 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -614,7 +614,7 @@ static int apparmor_task_setrlimit(struct task_struct *task,
614 int error = 0; 614 int error = 0;
615 615
616 if (!unconfined(profile)) 616 if (!unconfined(profile))
617 error = aa_task_setrlimit(profile, resource, new_rlim); 617 error = aa_task_setrlimit(profile, task, resource, new_rlim);
618 618
619 return error; 619 return error;
620} 620}
diff --git a/security/apparmor/path.c b/security/apparmor/path.c
index 19358dc14605..82396050f186 100644
--- a/security/apparmor/path.c
+++ b/security/apparmor/path.c
@@ -59,8 +59,7 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
59{ 59{
60 struct path root, tmp; 60 struct path root, tmp;
61 char *res; 61 char *res;
62 int deleted, connected; 62 int connected, error = 0;
63 int error = 0;
64 63
65 /* Get the root we want to resolve too, released below */ 64 /* Get the root we want to resolve too, released below */
66 if (flags & PATH_CHROOT_REL) { 65 if (flags & PATH_CHROOT_REL) {
@@ -74,19 +73,8 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
74 } 73 }
75 74
76 spin_lock(&dcache_lock); 75 spin_lock(&dcache_lock);
77 /* There is a race window between path lookup here and the 76 tmp = root;
78 * need to strip the " (deleted) string that __d_path applies 77 res = __d_path(path, &tmp, buf, buflen);
79 * Detect the race and relookup the path
80 *
81 * The stripping of (deleted) is a hack that could be removed
82 * with an updated __d_path
83 */
84 do {
85 tmp = root;
86 deleted = d_unlinked(path->dentry);
87 res = __d_path(path, &tmp, buf, buflen);
88
89 } while (deleted != d_unlinked(path->dentry));
90 spin_unlock(&dcache_lock); 78 spin_unlock(&dcache_lock);
91 79
92 *name = res; 80 *name = res;
@@ -98,21 +86,17 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
98 *name = buf; 86 *name = buf;
99 goto out; 87 goto out;
100 } 88 }
101 if (deleted) {
102 /* On some filesystems, newly allocated dentries appear to the
103 * security_path hooks as a deleted dentry except without an
104 * inode allocated.
105 *
106 * Remove the appended deleted text and return as string for
107 * normal mediation, or auditing. The (deleted) string is
108 * guaranteed to be added in this case, so just strip it.
109 */
110 buf[buflen - 11] = 0; /* - (len(" (deleted)") +\0) */
111 89
112 if (path->dentry->d_inode && !(flags & PATH_MEDIATE_DELETED)) { 90 /* Handle two cases:
91 * 1. A deleted dentry && profile is not allowing mediation of deleted
92 * 2. On some filesystems, newly allocated dentries appear to the
93 * security_path hooks as a deleted dentry except without an inode
94 * allocated.
95 */
96 if (d_unlinked(path->dentry) && path->dentry->d_inode &&
97 !(flags & PATH_MEDIATE_DELETED)) {
113 error = -ENOENT; 98 error = -ENOENT;
114 goto out; 99 goto out;
115 }
116 } 100 }
117 101
118 /* Determine if the path is connected to the expected root */ 102 /* Determine if the path is connected to the expected root */
diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c
index 3cdc1ad0787e..52cc865f1464 100644
--- a/security/apparmor/policy.c
+++ b/security/apparmor/policy.c
@@ -1151,12 +1151,14 @@ ssize_t aa_remove_profiles(char *fqname, size_t size)
1151 /* released below */ 1151 /* released below */
1152 ns = aa_get_namespace(root); 1152 ns = aa_get_namespace(root);
1153 1153
1154 write_lock(&ns->lock);
1155 if (!name) { 1154 if (!name) {
1156 /* remove namespace - can only happen if fqname[0] == ':' */ 1155 /* remove namespace - can only happen if fqname[0] == ':' */
1156 write_lock(&ns->parent->lock);
1157 __remove_namespace(ns); 1157 __remove_namespace(ns);
1158 write_unlock(&ns->parent->lock);
1158 } else { 1159 } else {
1159 /* remove profile */ 1160 /* remove profile */
1161 write_lock(&ns->lock);
1160 profile = aa_get_profile(__lookup_profile(&ns->base, name)); 1162 profile = aa_get_profile(__lookup_profile(&ns->base, name));
1161 if (!profile) { 1163 if (!profile) {
1162 error = -ENOENT; 1164 error = -ENOENT;
@@ -1165,8 +1167,8 @@ ssize_t aa_remove_profiles(char *fqname, size_t size)
1165 } 1167 }
1166 name = profile->base.hname; 1168 name = profile->base.hname;
1167 __remove_profile(profile); 1169 __remove_profile(profile);
1170 write_unlock(&ns->lock);
1168 } 1171 }
1169 write_unlock(&ns->lock);
1170 1172
1171 /* don't fail removal if audit fails */ 1173 /* don't fail removal if audit fails */
1172 (void) audit_policy(OP_PROF_RM, GFP_KERNEL, name, info, error); 1174 (void) audit_policy(OP_PROF_RM, GFP_KERNEL, name, info, error);
diff --git a/security/apparmor/resource.c b/security/apparmor/resource.c
index 4a368f1fd36d..a4136c10b1c6 100644
--- a/security/apparmor/resource.c
+++ b/security/apparmor/resource.c
@@ -72,6 +72,7 @@ int aa_map_resource(int resource)
72/** 72/**
73 * aa_task_setrlimit - test permission to set an rlimit 73 * aa_task_setrlimit - test permission to set an rlimit
74 * @profile - profile confining the task (NOT NULL) 74 * @profile - profile confining the task (NOT NULL)
75 * @task - task the resource is being set on
75 * @resource - the resource being set 76 * @resource - the resource being set
76 * @new_rlim - the new resource limit (NOT NULL) 77 * @new_rlim - the new resource limit (NOT NULL)
77 * 78 *
@@ -79,18 +80,21 @@ int aa_map_resource(int resource)
79 * 80 *
80 * Returns: 0 or error code if setting resource failed 81 * Returns: 0 or error code if setting resource failed
81 */ 82 */
82int aa_task_setrlimit(struct aa_profile *profile, unsigned int resource, 83int aa_task_setrlimit(struct aa_profile *profile, struct task_struct *task,
83 struct rlimit *new_rlim) 84 unsigned int resource, struct rlimit *new_rlim)
84{ 85{
85 int error = 0; 86 int error = 0;
86 87
87 if (profile->rlimits.mask & (1 << resource) && 88 /* TODO: extend resource control to handle other (non current)
88 new_rlim->rlim_max > profile->rlimits.limits[resource].rlim_max) 89 * processes. AppArmor rules currently have the implicit assumption
89 90 * that the task is setting the resource of the current process
90 error = audit_resource(profile, resource, new_rlim->rlim_max, 91 */
91 -EACCES); 92 if ((task != current->group_leader) ||
93 (profile->rlimits.mask & (1 << resource) &&
94 new_rlim->rlim_max > profile->rlimits.limits[resource].rlim_max))
95 error = -EACCES;
92 96
93 return error; 97 return audit_resource(profile, resource, new_rlim->rlim_max, error);
94} 98}
95 99
96/** 100/**
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
index 16d100d3fc38..3fbcd1dda0ef 100644
--- a/security/integrity/ima/ima.h
+++ b/security/integrity/ima/ima.h
@@ -35,6 +35,7 @@ enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 };
35#define IMA_MEASURE_HTABLE_SIZE (1 << IMA_HASH_BITS) 35#define IMA_MEASURE_HTABLE_SIZE (1 << IMA_HASH_BITS)
36 36
37/* set during initialization */ 37/* set during initialization */
38extern int iint_initialized;
38extern int ima_initialized; 39extern int ima_initialized;
39extern int ima_used_chip; 40extern int ima_used_chip;
40extern char *ima_hash; 41extern char *ima_hash;
diff --git a/security/integrity/ima/ima_iint.c b/security/integrity/ima/ima_iint.c
index 7625b85c2274..afba4aef812f 100644
--- a/security/integrity/ima/ima_iint.c
+++ b/security/integrity/ima/ima_iint.c
@@ -22,9 +22,10 @@
22 22
23RADIX_TREE(ima_iint_store, GFP_ATOMIC); 23RADIX_TREE(ima_iint_store, GFP_ATOMIC);
24DEFINE_SPINLOCK(ima_iint_lock); 24DEFINE_SPINLOCK(ima_iint_lock);
25
26static struct kmem_cache *iint_cache __read_mostly; 25static struct kmem_cache *iint_cache __read_mostly;
27 26
27int iint_initialized = 0;
28
28/* ima_iint_find_get - return the iint associated with an inode 29/* ima_iint_find_get - return the iint associated with an inode
29 * 30 *
30 * ima_iint_find_get gets a reference to the iint. Caller must 31 * ima_iint_find_get gets a reference to the iint. Caller must
@@ -141,6 +142,7 @@ static int __init ima_iintcache_init(void)
141 iint_cache = 142 iint_cache =
142 kmem_cache_create("iint_cache", sizeof(struct ima_iint_cache), 0, 143 kmem_cache_create("iint_cache", sizeof(struct ima_iint_cache), 0,
143 SLAB_PANIC, init_once); 144 SLAB_PANIC, init_once);
145 iint_initialized = 1;
144 return 0; 146 return 0;
145} 147}
146security_initcall(ima_iintcache_init); 148security_initcall(ima_iintcache_init);
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index f93641382e9f..e662b89d4079 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -148,12 +148,14 @@ void ima_counts_get(struct file *file)
148 struct ima_iint_cache *iint; 148 struct ima_iint_cache *iint;
149 int rc; 149 int rc;
150 150
151 if (!ima_initialized || !S_ISREG(inode->i_mode)) 151 if (!iint_initialized || !S_ISREG(inode->i_mode))
152 return; 152 return;
153 iint = ima_iint_find_get(inode); 153 iint = ima_iint_find_get(inode);
154 if (!iint) 154 if (!iint)
155 return; 155 return;
156 mutex_lock(&iint->mutex); 156 mutex_lock(&iint->mutex);
157 if (!ima_initialized)
158 goto out;
157 rc = ima_must_measure(iint, inode, MAY_READ, FILE_CHECK); 159 rc = ima_must_measure(iint, inode, MAY_READ, FILE_CHECK);
158 if (rc < 0) 160 if (rc < 0)
159 goto out; 161 goto out;
@@ -213,7 +215,7 @@ void ima_file_free(struct file *file)
213 struct inode *inode = file->f_dentry->d_inode; 215 struct inode *inode = file->f_dentry->d_inode;
214 struct ima_iint_cache *iint; 216 struct ima_iint_cache *iint;
215 217
216 if (!ima_initialized || !S_ISREG(inode->i_mode)) 218 if (!iint_initialized || !S_ISREG(inode->i_mode))
217 return; 219 return;
218 iint = ima_iint_find_get(inode); 220 iint = ima_iint_find_get(inode);
219 if (!iint) 221 if (!iint)
@@ -230,7 +232,7 @@ static int process_measurement(struct file *file, const unsigned char *filename,
230{ 232{
231 struct inode *inode = file->f_dentry->d_inode; 233 struct inode *inode = file->f_dentry->d_inode;
232 struct ima_iint_cache *iint; 234 struct ima_iint_cache *iint;
233 int rc; 235 int rc = 0;
234 236
235 if (!ima_initialized || !S_ISREG(inode->i_mode)) 237 if (!ima_initialized || !S_ISREG(inode->i_mode))
236 return 0; 238 return 0;
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index b2b0998d6abd..60924f6a52db 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -1272,6 +1272,7 @@ long keyctl_session_to_parent(void)
1272 keyring_r = NULL; 1272 keyring_r = NULL;
1273 1273
1274 me = current; 1274 me = current;
1275 rcu_read_lock();
1275 write_lock_irq(&tasklist_lock); 1276 write_lock_irq(&tasklist_lock);
1276 1277
1277 parent = me->real_parent; 1278 parent = me->real_parent;
@@ -1304,7 +1305,8 @@ long keyctl_session_to_parent(void)
1304 goto not_permitted; 1305 goto not_permitted;
1305 1306
1306 /* the keyrings must have the same UID */ 1307 /* the keyrings must have the same UID */
1307 if (pcred->tgcred->session_keyring->uid != mycred->euid || 1308 if ((pcred->tgcred->session_keyring &&
1309 pcred->tgcred->session_keyring->uid != mycred->euid) ||
1308 mycred->tgcred->session_keyring->uid != mycred->euid) 1310 mycred->tgcred->session_keyring->uid != mycred->euid)
1309 goto not_permitted; 1311 goto not_permitted;
1310 1312
@@ -1319,6 +1321,7 @@ long keyctl_session_to_parent(void)
1319 set_ti_thread_flag(task_thread_info(parent), TIF_NOTIFY_RESUME); 1321 set_ti_thread_flag(task_thread_info(parent), TIF_NOTIFY_RESUME);
1320 1322
1321 write_unlock_irq(&tasklist_lock); 1323 write_unlock_irq(&tasklist_lock);
1324 rcu_read_unlock();
1322 if (oldcred) 1325 if (oldcred)
1323 put_cred(oldcred); 1326 put_cred(oldcred);
1324 return 0; 1327 return 0;
@@ -1327,6 +1330,7 @@ already_same:
1327 ret = 0; 1330 ret = 0;
1328not_permitted: 1331not_permitted:
1329 write_unlock_irq(&tasklist_lock); 1332 write_unlock_irq(&tasklist_lock);
1333 rcu_read_unlock();
1330 put_cred(cred); 1334 put_cred(cred);
1331 return ret; 1335 return ret;
1332 1336
diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c
index ef43995119a4..c668b447c725 100644
--- a/security/tomoyo/common.c
+++ b/security/tomoyo/common.c
@@ -1416,15 +1416,19 @@ static char *tomoyo_print_header(struct tomoyo_request_info *r)
1416 const pid_t gpid = task_pid_nr(current); 1416 const pid_t gpid = task_pid_nr(current);
1417 static const int tomoyo_buffer_len = 4096; 1417 static const int tomoyo_buffer_len = 4096;
1418 char *buffer = kmalloc(tomoyo_buffer_len, GFP_NOFS); 1418 char *buffer = kmalloc(tomoyo_buffer_len, GFP_NOFS);
1419 pid_t ppid;
1419 if (!buffer) 1420 if (!buffer)
1420 return NULL; 1421 return NULL;
1421 do_gettimeofday(&tv); 1422 do_gettimeofday(&tv);
1423 rcu_read_lock();
1424 ppid = task_tgid_vnr(current->real_parent);
1425 rcu_read_unlock();
1422 snprintf(buffer, tomoyo_buffer_len - 1, 1426 snprintf(buffer, tomoyo_buffer_len - 1,
1423 "#timestamp=%lu profile=%u mode=%s (global-pid=%u)" 1427 "#timestamp=%lu profile=%u mode=%s (global-pid=%u)"
1424 " task={ pid=%u ppid=%u uid=%u gid=%u euid=%u" 1428 " task={ pid=%u ppid=%u uid=%u gid=%u euid=%u"
1425 " egid=%u suid=%u sgid=%u fsuid=%u fsgid=%u }", 1429 " egid=%u suid=%u sgid=%u fsuid=%u fsgid=%u }",
1426 tv.tv_sec, r->profile, tomoyo_mode[r->mode], gpid, 1430 tv.tv_sec, r->profile, tomoyo_mode[r->mode], gpid,
1427 (pid_t) sys_getpid(), (pid_t) sys_getppid(), 1431 task_tgid_vnr(current), ppid,
1428 current_uid(), current_gid(), current_euid(), 1432 current_uid(), current_gid(), current_euid(),
1429 current_egid(), current_suid(), current_sgid(), 1433 current_egid(), current_suid(), current_sgid(),
1430 current_fsuid(), current_fsgid()); 1434 current_fsuid(), current_fsgid());
diff --git a/security/tomoyo/common.h b/security/tomoyo/common.h
index 04454cb7b24a..7c66bd898782 100644
--- a/security/tomoyo/common.h
+++ b/security/tomoyo/common.h
@@ -689,9 +689,6 @@ struct tomoyo_profile {
689 689
690/********** Function prototypes. **********/ 690/********** Function prototypes. **********/
691 691
692extern asmlinkage long sys_getpid(void);
693extern asmlinkage long sys_getppid(void);
694
695/* Check whether the given string starts with the given keyword. */ 692/* Check whether the given string starts with the given keyword. */
696bool tomoyo_str_starts(char **src, const char *find); 693bool tomoyo_str_starts(char **src, const char *find);
697/* Get tomoyo_realpath() of current process. */ 694/* Get tomoyo_realpath() of current process. */
diff --git a/sound/core/control.c b/sound/core/control.c
index 070aab490191..45a818002d99 100644
--- a/sound/core/control.c
+++ b/sound/core/control.c
@@ -31,6 +31,7 @@
31 31
32/* max number of user-defined controls */ 32/* max number of user-defined controls */
33#define MAX_USER_CONTROLS 32 33#define MAX_USER_CONTROLS 32
34#define MAX_CONTROL_COUNT 1028
34 35
35struct snd_kctl_ioctl { 36struct snd_kctl_ioctl {
36 struct list_head list; /* list of all ioctls */ 37 struct list_head list; /* list of all ioctls */
@@ -195,6 +196,10 @@ static struct snd_kcontrol *snd_ctl_new(struct snd_kcontrol *control,
195 196
196 if (snd_BUG_ON(!control || !control->count)) 197 if (snd_BUG_ON(!control || !control->count))
197 return NULL; 198 return NULL;
199
200 if (control->count > MAX_CONTROL_COUNT)
201 return NULL;
202
198 kctl = kzalloc(sizeof(*kctl) + sizeof(struct snd_kcontrol_volatile) * control->count, GFP_KERNEL); 203 kctl = kzalloc(sizeof(*kctl) + sizeof(struct snd_kcontrol_volatile) * control->count, GFP_KERNEL);
199 if (kctl == NULL) { 204 if (kctl == NULL) {
200 snd_printk(KERN_ERR "Cannot allocate control instance\n"); 205 snd_printk(KERN_ERR "Cannot allocate control instance\n");
diff --git a/sound/core/pcm.c b/sound/core/pcm.c
index 204af48c5cc1..ac242a377aea 100644
--- a/sound/core/pcm.c
+++ b/sound/core/pcm.c
@@ -372,14 +372,17 @@ static void snd_pcm_substream_proc_hw_params_read(struct snd_info_entry *entry,
372 struct snd_info_buffer *buffer) 372 struct snd_info_buffer *buffer)
373{ 373{
374 struct snd_pcm_substream *substream = entry->private_data; 374 struct snd_pcm_substream *substream = entry->private_data;
375 struct snd_pcm_runtime *runtime = substream->runtime; 375 struct snd_pcm_runtime *runtime;
376
377 mutex_lock(&substream->pcm->open_mutex);
378 runtime = substream->runtime;
376 if (!runtime) { 379 if (!runtime) {
377 snd_iprintf(buffer, "closed\n"); 380 snd_iprintf(buffer, "closed\n");
378 return; 381 goto unlock;
379 } 382 }
380 if (runtime->status->state == SNDRV_PCM_STATE_OPEN) { 383 if (runtime->status->state == SNDRV_PCM_STATE_OPEN) {
381 snd_iprintf(buffer, "no setup\n"); 384 snd_iprintf(buffer, "no setup\n");
382 return; 385 goto unlock;
383 } 386 }
384 snd_iprintf(buffer, "access: %s\n", snd_pcm_access_name(runtime->access)); 387 snd_iprintf(buffer, "access: %s\n", snd_pcm_access_name(runtime->access));
385 snd_iprintf(buffer, "format: %s\n", snd_pcm_format_name(runtime->format)); 388 snd_iprintf(buffer, "format: %s\n", snd_pcm_format_name(runtime->format));
@@ -398,20 +401,25 @@ static void snd_pcm_substream_proc_hw_params_read(struct snd_info_entry *entry,
398 snd_iprintf(buffer, "OSS period frames: %lu\n", (unsigned long)runtime->oss.period_frames); 401 snd_iprintf(buffer, "OSS period frames: %lu\n", (unsigned long)runtime->oss.period_frames);
399 } 402 }
400#endif 403#endif
404 unlock:
405 mutex_unlock(&substream->pcm->open_mutex);
401} 406}
402 407
403static void snd_pcm_substream_proc_sw_params_read(struct snd_info_entry *entry, 408static void snd_pcm_substream_proc_sw_params_read(struct snd_info_entry *entry,
404 struct snd_info_buffer *buffer) 409 struct snd_info_buffer *buffer)
405{ 410{
406 struct snd_pcm_substream *substream = entry->private_data; 411 struct snd_pcm_substream *substream = entry->private_data;
407 struct snd_pcm_runtime *runtime = substream->runtime; 412 struct snd_pcm_runtime *runtime;
413
414 mutex_lock(&substream->pcm->open_mutex);
415 runtime = substream->runtime;
408 if (!runtime) { 416 if (!runtime) {
409 snd_iprintf(buffer, "closed\n"); 417 snd_iprintf(buffer, "closed\n");
410 return; 418 goto unlock;
411 } 419 }
412 if (runtime->status->state == SNDRV_PCM_STATE_OPEN) { 420 if (runtime->status->state == SNDRV_PCM_STATE_OPEN) {
413 snd_iprintf(buffer, "no setup\n"); 421 snd_iprintf(buffer, "no setup\n");
414 return; 422 goto unlock;
415 } 423 }
416 snd_iprintf(buffer, "tstamp_mode: %s\n", snd_pcm_tstamp_mode_name(runtime->tstamp_mode)); 424 snd_iprintf(buffer, "tstamp_mode: %s\n", snd_pcm_tstamp_mode_name(runtime->tstamp_mode));
417 snd_iprintf(buffer, "period_step: %u\n", runtime->period_step); 425 snd_iprintf(buffer, "period_step: %u\n", runtime->period_step);
@@ -421,24 +429,29 @@ static void snd_pcm_substream_proc_sw_params_read(struct snd_info_entry *entry,
421 snd_iprintf(buffer, "silence_threshold: %lu\n", runtime->silence_threshold); 429 snd_iprintf(buffer, "silence_threshold: %lu\n", runtime->silence_threshold);
422 snd_iprintf(buffer, "silence_size: %lu\n", runtime->silence_size); 430 snd_iprintf(buffer, "silence_size: %lu\n", runtime->silence_size);
423 snd_iprintf(buffer, "boundary: %lu\n", runtime->boundary); 431 snd_iprintf(buffer, "boundary: %lu\n", runtime->boundary);
432 unlock:
433 mutex_unlock(&substream->pcm->open_mutex);
424} 434}
425 435
426static void snd_pcm_substream_proc_status_read(struct snd_info_entry *entry, 436static void snd_pcm_substream_proc_status_read(struct snd_info_entry *entry,
427 struct snd_info_buffer *buffer) 437 struct snd_info_buffer *buffer)
428{ 438{
429 struct snd_pcm_substream *substream = entry->private_data; 439 struct snd_pcm_substream *substream = entry->private_data;
430 struct snd_pcm_runtime *runtime = substream->runtime; 440 struct snd_pcm_runtime *runtime;
431 struct snd_pcm_status status; 441 struct snd_pcm_status status;
432 int err; 442 int err;
443
444 mutex_lock(&substream->pcm->open_mutex);
445 runtime = substream->runtime;
433 if (!runtime) { 446 if (!runtime) {
434 snd_iprintf(buffer, "closed\n"); 447 snd_iprintf(buffer, "closed\n");
435 return; 448 goto unlock;
436 } 449 }
437 memset(&status, 0, sizeof(status)); 450 memset(&status, 0, sizeof(status));
438 err = snd_pcm_status(substream, &status); 451 err = snd_pcm_status(substream, &status);
439 if (err < 0) { 452 if (err < 0) {
440 snd_iprintf(buffer, "error %d\n", err); 453 snd_iprintf(buffer, "error %d\n", err);
441 return; 454 goto unlock;
442 } 455 }
443 snd_iprintf(buffer, "state: %s\n", snd_pcm_state_name(status.state)); 456 snd_iprintf(buffer, "state: %s\n", snd_pcm_state_name(status.state));
444 snd_iprintf(buffer, "owner_pid : %d\n", pid_vnr(substream->pid)); 457 snd_iprintf(buffer, "owner_pid : %d\n", pid_vnr(substream->pid));
@@ -452,6 +465,8 @@ static void snd_pcm_substream_proc_status_read(struct snd_info_entry *entry,
452 snd_iprintf(buffer, "-----\n"); 465 snd_iprintf(buffer, "-----\n");
453 snd_iprintf(buffer, "hw_ptr : %ld\n", runtime->status->hw_ptr); 466 snd_iprintf(buffer, "hw_ptr : %ld\n", runtime->status->hw_ptr);
454 snd_iprintf(buffer, "appl_ptr : %ld\n", runtime->control->appl_ptr); 467 snd_iprintf(buffer, "appl_ptr : %ld\n", runtime->control->appl_ptr);
468 unlock:
469 mutex_unlock(&substream->pcm->open_mutex);
455} 470}
456 471
457#ifdef CONFIG_SND_PCM_XRUN_DEBUG 472#ifdef CONFIG_SND_PCM_XRUN_DEBUG
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 134fc6c2e08d..d4eb2ef80784 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -1992,6 +1992,8 @@ void snd_pcm_release_substream(struct snd_pcm_substream *substream)
1992 substream->ops->close(substream); 1992 substream->ops->close(substream);
1993 substream->hw_opened = 0; 1993 substream->hw_opened = 0;
1994 } 1994 }
1995 if (pm_qos_request_active(&substream->latency_pm_qos_req))
1996 pm_qos_remove_request(&substream->latency_pm_qos_req);
1995 if (substream->pcm_release) { 1997 if (substream->pcm_release) {
1996 substream->pcm_release(substream); 1998 substream->pcm_release(substream);
1997 substream->pcm_release = NULL; 1999 substream->pcm_release = NULL;
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
index eb68326c37d4..a7868ad4d530 100644
--- a/sound/core/rawmidi.c
+++ b/sound/core/rawmidi.c
@@ -829,6 +829,8 @@ static int snd_rawmidi_control_ioctl(struct snd_card *card,
829 829
830 if (get_user(device, (int __user *)argp)) 830 if (get_user(device, (int __user *)argp))
831 return -EFAULT; 831 return -EFAULT;
832 if (device >= SNDRV_RAWMIDI_DEVICES) /* next device is -1 */
833 device = SNDRV_RAWMIDI_DEVICES - 1;
832 mutex_lock(&register_mutex); 834 mutex_lock(&register_mutex);
833 device = device < 0 ? 0 : device + 1; 835 device = device < 0 ? 0 : device + 1;
834 while (device < SNDRV_RAWMIDI_DEVICES) { 836 while (device < SNDRV_RAWMIDI_DEVICES) {
diff --git a/sound/core/seq/oss/seq_oss_init.c b/sound/core/seq/oss/seq_oss_init.c
index 685712276ac9..69cd7b3c362d 100644
--- a/sound/core/seq/oss/seq_oss_init.c
+++ b/sound/core/seq/oss/seq_oss_init.c
@@ -281,13 +281,10 @@ snd_seq_oss_open(struct file *file, int level)
281 return 0; 281 return 0;
282 282
283 _error: 283 _error:
284 snd_seq_oss_writeq_delete(dp->writeq);
285 snd_seq_oss_readq_delete(dp->readq);
286 snd_seq_oss_synth_cleanup(dp); 284 snd_seq_oss_synth_cleanup(dp);
287 snd_seq_oss_midi_cleanup(dp); 285 snd_seq_oss_midi_cleanup(dp);
288 delete_port(dp);
289 delete_seq_queue(dp->queue); 286 delete_seq_queue(dp->queue);
290 kfree(dp); 287 delete_port(dp);
291 288
292 return rc; 289 return rc;
293} 290}
@@ -350,8 +347,10 @@ create_port(struct seq_oss_devinfo *dp)
350static int 347static int
351delete_port(struct seq_oss_devinfo *dp) 348delete_port(struct seq_oss_devinfo *dp)
352{ 349{
353 if (dp->port < 0) 350 if (dp->port < 0) {
351 kfree(dp);
354 return 0; 352 return 0;
353 }
355 354
356 debug_printk(("delete_port %i\n", dp->port)); 355 debug_printk(("delete_port %i\n", dp->port));
357 return snd_seq_event_port_detach(dp->cseq, dp->port); 356 return snd_seq_event_port_detach(dp->cseq, dp->port);
diff --git a/sound/i2c/other/ak4xxx-adda.c b/sound/i2c/other/ak4xxx-adda.c
index 1adb8a3c2b62..42d7844ecd0b 100644
--- a/sound/i2c/other/ak4xxx-adda.c
+++ b/sound/i2c/other/ak4xxx-adda.c
@@ -900,7 +900,7 @@ static int proc_init(struct snd_akm4xxx *ak)
900 return 0; 900 return 0;
901} 901}
902#else /* !CONFIG_PROC_FS */ 902#else /* !CONFIG_PROC_FS */
903static int proc_init(struct snd_akm4xxx *ak) {} 903static int proc_init(struct snd_akm4xxx *ak) { return 0; }
904#endif 904#endif
905 905
906int snd_akm4xxx_build_controls(struct snd_akm4xxx *ak) 906int snd_akm4xxx_build_controls(struct snd_akm4xxx *ak)
diff --git a/sound/isa/msnd/msnd_pinnacle.c b/sound/isa/msnd/msnd_pinnacle.c
index 5f3e68401f90..91d6023a63e5 100644
--- a/sound/isa/msnd/msnd_pinnacle.c
+++ b/sound/isa/msnd/msnd_pinnacle.c
@@ -764,9 +764,9 @@ static long io[SNDRV_CARDS] = SNDRV_DEFAULT_PORT;
764static int irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; 764static int irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ;
765static long mem[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; 765static long mem[SNDRV_CARDS] = SNDRV_DEFAULT_PORT;
766 766
767#ifndef MSND_CLASSIC
767static long cfg[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; 768static long cfg[SNDRV_CARDS] = SNDRV_DEFAULT_PORT;
768 769
769#ifndef MSND_CLASSIC
770/* Extra Peripheral Configuration (Default: Disable) */ 770/* Extra Peripheral Configuration (Default: Disable) */
771static long ide_io0[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; 771static long ide_io0[SNDRV_CARDS] = SNDRV_DEFAULT_PORT;
772static long ide_io1[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; 772static long ide_io1[SNDRV_CARDS] = SNDRV_DEFAULT_PORT;
@@ -894,7 +894,11 @@ static int __devinit snd_msnd_isa_probe(struct device *pdev, unsigned int idx)
894 struct snd_card *card; 894 struct snd_card *card;
895 struct snd_msnd *chip; 895 struct snd_msnd *chip;
896 896
897 if (has_isapnp(idx) || cfg[idx] == SNDRV_AUTO_PORT) { 897 if (has_isapnp(idx)
898#ifndef MSND_CLASSIC
899 || cfg[idx] == SNDRV_AUTO_PORT
900#endif
901 ) {
898 printk(KERN_INFO LOGNAME ": Assuming PnP mode\n"); 902 printk(KERN_INFO LOGNAME ": Assuming PnP mode\n");
899 return -ENODEV; 903 return -ENODEV;
900 } 904 }
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 3827092cc1d2..14829210ef0b 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -4536,7 +4536,7 @@ int snd_hda_parse_pin_def_config(struct hda_codec *codec,
4536 cfg->hp_outs--; 4536 cfg->hp_outs--;
4537 memmove(cfg->hp_pins + i, cfg->hp_pins + i + 1, 4537 memmove(cfg->hp_pins + i, cfg->hp_pins + i + 1,
4538 sizeof(cfg->hp_pins[0]) * (cfg->hp_outs - i)); 4538 sizeof(cfg->hp_pins[0]) * (cfg->hp_outs - i));
4539 memmove(sequences_hp + i - 1, sequences_hp + i, 4539 memmove(sequences_hp + i, sequences_hp + i + 1,
4540 sizeof(sequences_hp[0]) * (cfg->hp_outs - i)); 4540 sizeof(sequences_hp[0]) * (cfg->hp_outs - i));
4541 } 4541 }
4542 } 4542 }
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 1053fff4bd0a..34940a079051 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -126,6 +126,7 @@ MODULE_SUPPORTED_DEVICE("{{Intel, ICH6},"
126 "{Intel, ICH10}," 126 "{Intel, ICH10},"
127 "{Intel, PCH}," 127 "{Intel, PCH},"
128 "{Intel, CPT}," 128 "{Intel, CPT},"
129 "{Intel, PBG},"
129 "{Intel, SCH}," 130 "{Intel, SCH},"
130 "{ATI, SB450}," 131 "{ATI, SB450},"
131 "{ATI, SB600}," 132 "{ATI, SB600},"
@@ -2749,6 +2750,8 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
2749 { PCI_DEVICE(0x8086, 0x3b57), .driver_data = AZX_DRIVER_ICH }, 2750 { PCI_DEVICE(0x8086, 0x3b57), .driver_data = AZX_DRIVER_ICH },
2750 /* CPT */ 2751 /* CPT */
2751 { PCI_DEVICE(0x8086, 0x1c20), .driver_data = AZX_DRIVER_PCH }, 2752 { PCI_DEVICE(0x8086, 0x1c20), .driver_data = AZX_DRIVER_PCH },
2753 /* PBG */
2754 { PCI_DEVICE(0x8086, 0x1d20), .driver_data = AZX_DRIVER_PCH },
2752 /* SCH */ 2755 /* SCH */
2753 { PCI_DEVICE(0x8086, 0x811b), .driver_data = AZX_DRIVER_SCH }, 2756 { PCI_DEVICE(0x8086, 0x811b), .driver_data = AZX_DRIVER_SCH },
2754 /* ATI SB 450/600 */ 2757 /* ATI SB 450/600 */
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index b697fd2a6f8b..10bbbaf6ebc3 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -3641,6 +3641,7 @@ static struct snd_pci_quirk ad1984_cfg_tbl[] = {
3641 /* Lenovo Thinkpad T61/X61 */ 3641 /* Lenovo Thinkpad T61/X61 */
3642 SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo Thinkpad", AD1984_THINKPAD), 3642 SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo Thinkpad", AD1984_THINKPAD),
3643 SND_PCI_QUIRK(0x1028, 0x0214, "Dell T3400", AD1984_DELL_DESKTOP), 3643 SND_PCI_QUIRK(0x1028, 0x0214, "Dell T3400", AD1984_DELL_DESKTOP),
3644 SND_PCI_QUIRK(0x1028, 0x0233, "Dell Latitude E6400", AD1984_DELL_DESKTOP),
3644 {} 3645 {}
3645}; 3646};
3646 3647
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
index 4ef5efaaaef1..488fd9ade1ba 100644
--- a/sound/pci/hda/patch_cirrus.c
+++ b/sound/pci/hda/patch_cirrus.c
@@ -972,6 +972,53 @@ static struct hda_verb cs_coef_init_verbs[] = {
972 {} /* terminator */ 972 {} /* terminator */
973}; 973};
974 974
975/* Errata: CS4207 rev C0/C1/C2 Silicon
976 *
977 * http://www.cirrus.com/en/pubs/errata/ER880C3.pdf
978 *
979 * 6. At high temperature (TA > +85°C), the digital supply current (IVD)
980 * may be excessive (up to an additional 200 μA), which is most easily
981 * observed while the part is being held in reset (RESET# active low).
982 *
983 * Root Cause: At initial powerup of the device, the logic that drives
984 * the clock and write enable to the S/PDIF SRC RAMs is not properly
985 * initialized.
986 * Certain random patterns will cause a steady leakage current in those
987 * RAM cells. The issue will resolve once the SRCs are used (turned on).
988 *
989 * Workaround: The following verb sequence briefly turns on the S/PDIF SRC
990 * blocks, which will alleviate the issue.
991 */
992
993static struct hda_verb cs_errata_init_verbs[] = {
994 {0x01, AC_VERB_SET_POWER_STATE, 0x00}, /* AFG: D0 */
995 {0x11, AC_VERB_SET_PROC_STATE, 0x01}, /* VPW: processing on */
996
997 {0x11, AC_VERB_SET_COEF_INDEX, 0x0008},
998 {0x11, AC_VERB_SET_PROC_COEF, 0x9999},
999 {0x11, AC_VERB_SET_COEF_INDEX, 0x0017},
1000 {0x11, AC_VERB_SET_PROC_COEF, 0xa412},
1001 {0x11, AC_VERB_SET_COEF_INDEX, 0x0001},
1002 {0x11, AC_VERB_SET_PROC_COEF, 0x0009},
1003
1004 {0x07, AC_VERB_SET_POWER_STATE, 0x00}, /* S/PDIF Rx: D0 */
1005 {0x08, AC_VERB_SET_POWER_STATE, 0x00}, /* S/PDIF Tx: D0 */
1006
1007 {0x11, AC_VERB_SET_COEF_INDEX, 0x0017},
1008 {0x11, AC_VERB_SET_PROC_COEF, 0x2412},
1009 {0x11, AC_VERB_SET_COEF_INDEX, 0x0008},
1010 {0x11, AC_VERB_SET_PROC_COEF, 0x0000},
1011 {0x11, AC_VERB_SET_COEF_INDEX, 0x0001},
1012 {0x11, AC_VERB_SET_PROC_COEF, 0x0008},
1013 {0x11, AC_VERB_SET_PROC_STATE, 0x00},
1014
1015 {0x07, AC_VERB_SET_POWER_STATE, 0x03}, /* S/PDIF Rx: D3 */
1016 {0x08, AC_VERB_SET_POWER_STATE, 0x03}, /* S/PDIF Tx: D3 */
1017 /*{0x01, AC_VERB_SET_POWER_STATE, 0x03},*/ /* AFG: D3 This is already handled */
1018
1019 {} /* terminator */
1020};
1021
975/* SPDIF setup */ 1022/* SPDIF setup */
976static void init_digital(struct hda_codec *codec) 1023static void init_digital(struct hda_codec *codec)
977{ 1024{
@@ -991,6 +1038,9 @@ static int cs_init(struct hda_codec *codec)
991{ 1038{
992 struct cs_spec *spec = codec->spec; 1039 struct cs_spec *spec = codec->spec;
993 1040
1041 /* init_verb sequence for C0/C1/C2 errata*/
1042 snd_hda_sequence_write(codec, cs_errata_init_verbs);
1043
994 snd_hda_sequence_write(codec, cs_coef_init_verbs); 1044 snd_hda_sequence_write(codec, cs_coef_init_verbs);
995 1045
996 if (spec->gpio_mask) { 1046 if (spec->gpio_mask) {
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 5cdb80edbd7f..972e7c453b3d 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -116,6 +116,7 @@ struct conexant_spec {
116 unsigned int dell_vostro:1; 116 unsigned int dell_vostro:1;
117 unsigned int ideapad:1; 117 unsigned int ideapad:1;
118 unsigned int thinkpad:1; 118 unsigned int thinkpad:1;
119 unsigned int hp_laptop:1;
119 120
120 unsigned int ext_mic_present; 121 unsigned int ext_mic_present;
121 unsigned int recording; 122 unsigned int recording;
@@ -2299,6 +2300,18 @@ static void cxt5066_ideapad_automic(struct hda_codec *codec)
2299 } 2300 }
2300} 2301}
2301 2302
2303/* toggle input of built-in digital mic and mic jack appropriately */
2304static void cxt5066_hp_laptop_automic(struct hda_codec *codec)
2305{
2306 unsigned int present;
2307
2308 present = snd_hda_jack_detect(codec, 0x1b);
2309 snd_printdd("CXT5066: external microphone present=%d\n", present);
2310 snd_hda_codec_write(codec, 0x17, 0, AC_VERB_SET_CONNECT_SEL,
2311 present ? 1 : 3);
2312}
2313
2314
2302/* toggle input of built-in digital mic and mic jack appropriately 2315/* toggle input of built-in digital mic and mic jack appropriately
2303 order is: external mic -> dock mic -> interal mic */ 2316 order is: external mic -> dock mic -> interal mic */
2304static void cxt5066_thinkpad_automic(struct hda_codec *codec) 2317static void cxt5066_thinkpad_automic(struct hda_codec *codec)
@@ -2408,6 +2421,20 @@ static void cxt5066_ideapad_event(struct hda_codec *codec, unsigned int res)
2408} 2421}
2409 2422
2410/* unsolicited event for jack sensing */ 2423/* unsolicited event for jack sensing */
2424static void cxt5066_hp_laptop_event(struct hda_codec *codec, unsigned int res)
2425{
2426 snd_printdd("CXT5066_hp_laptop: unsol event %x (%x)\n", res, res >> 26);
2427 switch (res >> 26) {
2428 case CONEXANT_HP_EVENT:
2429 cxt5066_hp_automute(codec);
2430 break;
2431 case CONEXANT_MIC_EVENT:
2432 cxt5066_hp_laptop_automic(codec);
2433 break;
2434 }
2435}
2436
2437/* unsolicited event for jack sensing */
2411static void cxt5066_thinkpad_event(struct hda_codec *codec, unsigned int res) 2438static void cxt5066_thinkpad_event(struct hda_codec *codec, unsigned int res)
2412{ 2439{
2413 snd_printdd("CXT5066_thinkpad: unsol event %x (%x)\n", res, res >> 26); 2440 snd_printdd("CXT5066_thinkpad: unsol event %x (%x)\n", res, res >> 26);
@@ -2989,6 +3016,14 @@ static struct hda_verb cxt5066_init_verbs_portd_lo[] = {
2989 { } /* end */ 3016 { } /* end */
2990}; 3017};
2991 3018
3019
3020static struct hda_verb cxt5066_init_verbs_hp_laptop[] = {
3021 {0x14, AC_VERB_SET_CONNECT_SEL, 0x0},
3022 {0x19, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | CONEXANT_HP_EVENT},
3023 {0x1b, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | CONEXANT_MIC_EVENT},
3024 { } /* end */
3025};
3026
2992/* initialize jack-sensing, too */ 3027/* initialize jack-sensing, too */
2993static int cxt5066_init(struct hda_codec *codec) 3028static int cxt5066_init(struct hda_codec *codec)
2994{ 3029{
@@ -3004,6 +3039,8 @@ static int cxt5066_init(struct hda_codec *codec)
3004 cxt5066_ideapad_automic(codec); 3039 cxt5066_ideapad_automic(codec);
3005 else if (spec->thinkpad) 3040 else if (spec->thinkpad)
3006 cxt5066_thinkpad_automic(codec); 3041 cxt5066_thinkpad_automic(codec);
3042 else if (spec->hp_laptop)
3043 cxt5066_hp_laptop_automic(codec);
3007 } 3044 }
3008 cxt5066_set_mic_boost(codec); 3045 cxt5066_set_mic_boost(codec);
3009 return 0; 3046 return 0;
@@ -3031,6 +3068,7 @@ enum {
3031 CXT5066_DELL_VOSTO, /* Dell Vostro 1015i */ 3068 CXT5066_DELL_VOSTO, /* Dell Vostro 1015i */
3032 CXT5066_IDEAPAD, /* Lenovo IdeaPad U150 */ 3069 CXT5066_IDEAPAD, /* Lenovo IdeaPad U150 */
3033 CXT5066_THINKPAD, /* Lenovo ThinkPad T410s, others? */ 3070 CXT5066_THINKPAD, /* Lenovo ThinkPad T410s, others? */
3071 CXT5066_HP_LAPTOP, /* HP Laptop */
3034 CXT5066_MODELS 3072 CXT5066_MODELS
3035}; 3073};
3036 3074
@@ -3041,6 +3079,7 @@ static const char *cxt5066_models[CXT5066_MODELS] = {
3041 [CXT5066_DELL_VOSTO] = "dell-vostro", 3079 [CXT5066_DELL_VOSTO] = "dell-vostro",
3042 [CXT5066_IDEAPAD] = "ideapad", 3080 [CXT5066_IDEAPAD] = "ideapad",
3043 [CXT5066_THINKPAD] = "thinkpad", 3081 [CXT5066_THINKPAD] = "thinkpad",
3082 [CXT5066_HP_LAPTOP] = "hp-laptop",
3044}; 3083};
3045 3084
3046static struct snd_pci_quirk cxt5066_cfg_tbl[] = { 3085static struct snd_pci_quirk cxt5066_cfg_tbl[] = {
@@ -3052,8 +3091,11 @@ static struct snd_pci_quirk cxt5066_cfg_tbl[] = {
3052 SND_PCI_QUIRK(0x1028, 0x02d8, "Dell Vostro", CXT5066_DELL_VOSTO), 3091 SND_PCI_QUIRK(0x1028, 0x02d8, "Dell Vostro", CXT5066_DELL_VOSTO),
3053 SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTO), 3092 SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTO),
3054 SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD), 3093 SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD),
3094 SND_PCI_QUIRK(0x103c, 0x360b, "HP G60", CXT5066_HP_LAPTOP),
3095 SND_PCI_QUIRK(0x1179, 0xff1e, "Toshiba Satellite C650D", CXT5066_IDEAPAD),
3055 SND_PCI_QUIRK(0x1179, 0xff50, "Toshiba Satellite P500-PSPGSC-01800T", CXT5066_OLPC_XO_1_5), 3096 SND_PCI_QUIRK(0x1179, 0xff50, "Toshiba Satellite P500-PSPGSC-01800T", CXT5066_OLPC_XO_1_5),
3056 SND_PCI_QUIRK(0x1179, 0xffe0, "Toshiba Satellite Pro T130-15F", CXT5066_OLPC_XO_1_5), 3097 SND_PCI_QUIRK(0x1179, 0xffe0, "Toshiba Satellite Pro T130-15F", CXT5066_OLPC_XO_1_5),
3098 SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400s", CXT5066_THINKPAD),
3057 SND_PCI_QUIRK(0x17aa, 0x21b2, "Thinkpad X100e", CXT5066_IDEAPAD), 3099 SND_PCI_QUIRK(0x17aa, 0x21b2, "Thinkpad X100e", CXT5066_IDEAPAD),
3058 SND_PCI_QUIRK(0x17aa, 0x21b3, "Thinkpad Edge 13 (197)", CXT5066_IDEAPAD), 3100 SND_PCI_QUIRK(0x17aa, 0x21b3, "Thinkpad Edge 13 (197)", CXT5066_IDEAPAD),
3059 SND_PCI_QUIRK(0x17aa, 0x21b4, "Thinkpad Edge", CXT5066_IDEAPAD), 3101 SND_PCI_QUIRK(0x17aa, 0x21b4, "Thinkpad Edge", CXT5066_IDEAPAD),
@@ -3116,6 +3158,23 @@ static int patch_cxt5066(struct hda_codec *codec)
3116 spec->num_init_verbs++; 3158 spec->num_init_verbs++;
3117 spec->dell_automute = 1; 3159 spec->dell_automute = 1;
3118 break; 3160 break;
3161 case CXT5066_HP_LAPTOP:
3162 codec->patch_ops.init = cxt5066_init;
3163 codec->patch_ops.unsol_event = cxt5066_hp_laptop_event;
3164 spec->init_verbs[spec->num_init_verbs] =
3165 cxt5066_init_verbs_hp_laptop;
3166 spec->num_init_verbs++;
3167 spec->hp_laptop = 1;
3168 spec->mixers[spec->num_mixers++] = cxt5066_mixer_master;
3169 spec->mixers[spec->num_mixers++] = cxt5066_mixers;
3170 /* no S/PDIF out */
3171 spec->multiout.dig_out_nid = 0;
3172 /* input source automatically selected */
3173 spec->input_mux = NULL;
3174 spec->port_d_mode = 0;
3175 spec->mic_boost = 3; /* default 30dB gain */
3176 break;
3177
3119 case CXT5066_OLPC_XO_1_5: 3178 case CXT5066_OLPC_XO_1_5:
3120 codec->patch_ops.init = cxt5066_olpc_init; 3179 codec->patch_ops.init = cxt5066_olpc_init;
3121 codec->patch_ops.unsol_event = cxt5066_olpc_unsol_event; 3180 codec->patch_ops.unsol_event = cxt5066_olpc_unsol_event;
diff --git a/sound/pci/hda/patch_nvhdmi.c b/sound/pci/hda/patch_nvhdmi.c
index 69b950d527c3..baa108b9d6aa 100644
--- a/sound/pci/hda/patch_nvhdmi.c
+++ b/sound/pci/hda/patch_nvhdmi.c
@@ -84,7 +84,7 @@ static struct hda_verb nvhdmi_basic_init_7x[] = {
84#else 84#else
85/* support all rates and formats */ 85/* support all rates and formats */
86#define SUPPORTED_RATES \ 86#define SUPPORTED_RATES \
87 (SNDRV_PCM_RATE_22050 | SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 |\ 87 (SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 |\
88 SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_176400 |\ 88 SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_176400 |\
89 SNDRV_PCM_RATE_192000) 89 SNDRV_PCM_RATE_192000)
90#define SUPPORTED_MAXBPS 24 90#define SUPPORTED_MAXBPS 24
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 627bf9963368..a432e6efd19b 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -1594,12 +1594,22 @@ static void alc_auto_parse_digital(struct hda_codec *codec)
1594 } 1594 }
1595 1595
1596 if (spec->autocfg.dig_in_pin) { 1596 if (spec->autocfg.dig_in_pin) {
1597 hda_nid_t dig_nid; 1597 dig_nid = codec->start_nid;
1598 err = snd_hda_get_connections(codec, 1598 for (i = 0; i < codec->num_nodes; i++, dig_nid++) {
1599 spec->autocfg.dig_in_pin, 1599 unsigned int wcaps = get_wcaps(codec, dig_nid);
1600 &dig_nid, 1); 1600 if (get_wcaps_type(wcaps) != AC_WID_AUD_IN)
1601 if (err > 0) 1601 continue;
1602 spec->dig_in_nid = dig_nid; 1602 if (!(wcaps & AC_WCAP_DIGITAL))
1603 continue;
1604 if (!(wcaps & AC_WCAP_CONN_LIST))
1605 continue;
1606 err = get_connection_index(codec, dig_nid,
1607 spec->autocfg.dig_in_pin);
1608 if (err >= 0) {
1609 spec->dig_in_nid = dig_nid;
1610 break;
1611 }
1612 }
1603 } 1613 }
1604} 1614}
1605 1615
@@ -5334,6 +5344,7 @@ static void fillup_priv_adc_nids(struct hda_codec *codec, hda_nid_t *nids,
5334 5344
5335static struct snd_pci_quirk beep_white_list[] = { 5345static struct snd_pci_quirk beep_white_list[] = {
5336 SND_PCI_QUIRK(0x1043, 0x829f, "ASUS", 1), 5346 SND_PCI_QUIRK(0x1043, 0x829f, "ASUS", 1),
5347 SND_PCI_QUIRK(0x1043, 0x83ce, "EeePC", 1),
5337 SND_PCI_QUIRK(0x8086, 0xd613, "Intel", 1), 5348 SND_PCI_QUIRK(0x8086, 0xd613, "Intel", 1),
5338 {} 5349 {}
5339}; 5350};
@@ -14452,6 +14463,7 @@ static void alc269_auto_init(struct hda_codec *codec)
14452 14463
14453enum { 14464enum {
14454 ALC269_FIXUP_SONY_VAIO, 14465 ALC269_FIXUP_SONY_VAIO,
14466 ALC269_FIXUP_DELL_M101Z,
14455}; 14467};
14456 14468
14457static const struct hda_verb alc269_sony_vaio_fixup_verbs[] = { 14469static const struct hda_verb alc269_sony_vaio_fixup_verbs[] = {
@@ -14463,11 +14475,20 @@ static const struct alc_fixup alc269_fixups[] = {
14463 [ALC269_FIXUP_SONY_VAIO] = { 14475 [ALC269_FIXUP_SONY_VAIO] = {
14464 .verbs = alc269_sony_vaio_fixup_verbs 14476 .verbs = alc269_sony_vaio_fixup_verbs
14465 }, 14477 },
14478 [ALC269_FIXUP_DELL_M101Z] = {
14479 .verbs = (const struct hda_verb[]) {
14480 /* Enables internal speaker */
14481 {0x20, AC_VERB_SET_COEF_INDEX, 13},
14482 {0x20, AC_VERB_SET_PROC_COEF, 0x4040},
14483 {}
14484 }
14485 },
14466}; 14486};
14467 14487
14468static struct snd_pci_quirk alc269_fixup_tbl[] = { 14488static struct snd_pci_quirk alc269_fixup_tbl[] = {
14469 SND_PCI_QUIRK(0x104d, 0x9071, "Sony VAIO", ALC269_FIXUP_SONY_VAIO), 14489 SND_PCI_QUIRK(0x104d, 0x9071, "Sony VAIO", ALC269_FIXUP_SONY_VAIO),
14470 SND_PCI_QUIRK(0x104d, 0x9077, "Sony VAIO", ALC269_FIXUP_SONY_VAIO), 14490 SND_PCI_QUIRK(0x104d, 0x9077, "Sony VAIO", ALC269_FIXUP_SONY_VAIO),
14491 SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
14471 {} 14492 {}
14472}; 14493};
14473 14494
diff --git a/sound/pci/oxygen/oxygen.c b/sound/pci/oxygen/oxygen.c
index 289cb4dacfc7..6c0a11adb2a8 100644
--- a/sound/pci/oxygen/oxygen.c
+++ b/sound/pci/oxygen/oxygen.c
@@ -543,6 +543,10 @@ static int __devinit get_oxygen_model(struct oxygen *chip,
543 chip->model.suspend = claro_suspend; 543 chip->model.suspend = claro_suspend;
544 chip->model.resume = claro_resume; 544 chip->model.resume = claro_resume;
545 chip->model.set_adc_params = set_ak5385_params; 545 chip->model.set_adc_params = set_ak5385_params;
546 chip->model.device_config = PLAYBACK_0_TO_I2S |
547 PLAYBACK_1_TO_SPDIF |
548 CAPTURE_0_FROM_I2S_2 |
549 CAPTURE_1_FROM_SPDIF;
546 break; 550 break;
547 } 551 }
548 if (id->driver_data == MODEL_MERIDIAN || 552 if (id->driver_data == MODEL_MERIDIAN ||
diff --git a/sound/pci/oxygen/oxygen.h b/sound/pci/oxygen/oxygen.h
index 6147216af744..a3409edcfb50 100644
--- a/sound/pci/oxygen/oxygen.h
+++ b/sound/pci/oxygen/oxygen.h
@@ -155,6 +155,7 @@ void oxygen_pci_remove(struct pci_dev *pci);
155int oxygen_pci_suspend(struct pci_dev *pci, pm_message_t state); 155int oxygen_pci_suspend(struct pci_dev *pci, pm_message_t state);
156int oxygen_pci_resume(struct pci_dev *pci); 156int oxygen_pci_resume(struct pci_dev *pci);
157#endif 157#endif
158void oxygen_pci_shutdown(struct pci_dev *pci);
158 159
159/* oxygen_mixer.c */ 160/* oxygen_mixer.c */
160 161
diff --git a/sound/pci/oxygen/oxygen_lib.c b/sound/pci/oxygen/oxygen_lib.c
index fad03d64e3ad..7e93cf884437 100644
--- a/sound/pci/oxygen/oxygen_lib.c
+++ b/sound/pci/oxygen/oxygen_lib.c
@@ -519,16 +519,21 @@ static void oxygen_init(struct oxygen *chip)
519 } 519 }
520} 520}
521 521
522static void oxygen_card_free(struct snd_card *card) 522static void oxygen_shutdown(struct oxygen *chip)
523{ 523{
524 struct oxygen *chip = card->private_data;
525
526 spin_lock_irq(&chip->reg_lock); 524 spin_lock_irq(&chip->reg_lock);
527 chip->interrupt_mask = 0; 525 chip->interrupt_mask = 0;
528 chip->pcm_running = 0; 526 chip->pcm_running = 0;
529 oxygen_write16(chip, OXYGEN_DMA_STATUS, 0); 527 oxygen_write16(chip, OXYGEN_DMA_STATUS, 0);
530 oxygen_write16(chip, OXYGEN_INTERRUPT_MASK, 0); 528 oxygen_write16(chip, OXYGEN_INTERRUPT_MASK, 0);
531 spin_unlock_irq(&chip->reg_lock); 529 spin_unlock_irq(&chip->reg_lock);
530}
531
532static void oxygen_card_free(struct snd_card *card)
533{
534 struct oxygen *chip = card->private_data;
535
536 oxygen_shutdown(chip);
532 if (chip->irq >= 0) 537 if (chip->irq >= 0)
533 free_irq(chip->irq, chip); 538 free_irq(chip->irq, chip);
534 flush_scheduled_work(); 539 flush_scheduled_work();
@@ -778,3 +783,13 @@ int oxygen_pci_resume(struct pci_dev *pci)
778} 783}
779EXPORT_SYMBOL(oxygen_pci_resume); 784EXPORT_SYMBOL(oxygen_pci_resume);
780#endif /* CONFIG_PM */ 785#endif /* CONFIG_PM */
786
787void oxygen_pci_shutdown(struct pci_dev *pci)
788{
789 struct snd_card *card = pci_get_drvdata(pci);
790 struct oxygen *chip = card->private_data;
791
792 oxygen_shutdown(chip);
793 chip->model.cleanup(chip);
794}
795EXPORT_SYMBOL(oxygen_pci_shutdown);
diff --git a/sound/pci/oxygen/virtuoso.c b/sound/pci/oxygen/virtuoso.c
index f03a2f2cffee..06c863e86e3d 100644
--- a/sound/pci/oxygen/virtuoso.c
+++ b/sound/pci/oxygen/virtuoso.c
@@ -95,6 +95,7 @@ static struct pci_driver xonar_driver = {
95 .suspend = oxygen_pci_suspend, 95 .suspend = oxygen_pci_suspend,
96 .resume = oxygen_pci_resume, 96 .resume = oxygen_pci_resume,
97#endif 97#endif
98 .shutdown = oxygen_pci_shutdown,
98}; 99};
99 100
100static int __init alsa_card_xonar_init(void) 101static int __init alsa_card_xonar_init(void)
diff --git a/sound/pci/oxygen/xonar_wm87x6.c b/sound/pci/oxygen/xonar_wm87x6.c
index dbc4b89d74e4..b82c1cfa96f5 100644
--- a/sound/pci/oxygen/xonar_wm87x6.c
+++ b/sound/pci/oxygen/xonar_wm87x6.c
@@ -53,6 +53,8 @@ struct xonar_wm87x6 {
53 struct xonar_generic generic; 53 struct xonar_generic generic;
54 u16 wm8776_regs[0x17]; 54 u16 wm8776_regs[0x17];
55 u16 wm8766_regs[0x10]; 55 u16 wm8766_regs[0x10];
56 struct snd_kcontrol *line_adcmux_control;
57 struct snd_kcontrol *mic_adcmux_control;
56 struct snd_kcontrol *lc_controls[13]; 58 struct snd_kcontrol *lc_controls[13];
57}; 59};
58 60
@@ -193,6 +195,7 @@ static void xonar_ds_init(struct oxygen *chip)
193static void xonar_ds_cleanup(struct oxygen *chip) 195static void xonar_ds_cleanup(struct oxygen *chip)
194{ 196{
195 xonar_disable_output(chip); 197 xonar_disable_output(chip);
198 wm8776_write(chip, WM8776_RESET, 0);
196} 199}
197 200
198static void xonar_ds_suspend(struct oxygen *chip) 201static void xonar_ds_suspend(struct oxygen *chip)
@@ -603,6 +606,7 @@ static int wm8776_input_mux_put(struct snd_kcontrol *ctl,
603{ 606{
604 struct oxygen *chip = ctl->private_data; 607 struct oxygen *chip = ctl->private_data;
605 struct xonar_wm87x6 *data = chip->model_data; 608 struct xonar_wm87x6 *data = chip->model_data;
609 struct snd_kcontrol *other_ctl;
606 unsigned int mux_bit = ctl->private_value; 610 unsigned int mux_bit = ctl->private_value;
607 u16 reg; 611 u16 reg;
608 int changed; 612 int changed;
@@ -610,8 +614,18 @@ static int wm8776_input_mux_put(struct snd_kcontrol *ctl,
610 mutex_lock(&chip->mutex); 614 mutex_lock(&chip->mutex);
611 reg = data->wm8776_regs[WM8776_ADCMUX]; 615 reg = data->wm8776_regs[WM8776_ADCMUX];
612 if (value->value.integer.value[0]) { 616 if (value->value.integer.value[0]) {
613 reg &= ~0x003;
614 reg |= mux_bit; 617 reg |= mux_bit;
618 /* line-in and mic-in are exclusive */
619 mux_bit ^= 3;
620 if (reg & mux_bit) {
621 reg &= ~mux_bit;
622 if (mux_bit == 1)
623 other_ctl = data->line_adcmux_control;
624 else
625 other_ctl = data->mic_adcmux_control;
626 snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_VALUE,
627 &other_ctl->id);
628 }
615 } else 629 } else
616 reg &= ~mux_bit; 630 reg &= ~mux_bit;
617 changed = reg != data->wm8776_regs[WM8776_ADCMUX]; 631 changed = reg != data->wm8776_regs[WM8776_ADCMUX];
@@ -963,7 +977,13 @@ static int xonar_ds_mixer_init(struct oxygen *chip)
963 err = snd_ctl_add(chip->card, ctl); 977 err = snd_ctl_add(chip->card, ctl);
964 if (err < 0) 978 if (err < 0)
965 return err; 979 return err;
980 if (!strcmp(ctl->id.name, "Line Capture Switch"))
981 data->line_adcmux_control = ctl;
982 else if (!strcmp(ctl->id.name, "Mic Capture Switch"))
983 data->mic_adcmux_control = ctl;
966 } 984 }
985 if (!data->line_adcmux_control || !data->mic_adcmux_control)
986 return -ENXIO;
967 BUILD_BUG_ON(ARRAY_SIZE(lc_controls) != ARRAY_SIZE(data->lc_controls)); 987 BUILD_BUG_ON(ARRAY_SIZE(lc_controls) != ARRAY_SIZE(data->lc_controls));
968 for (i = 0; i < ARRAY_SIZE(lc_controls); ++i) { 988 for (i = 0; i < ARRAY_SIZE(lc_controls); ++i) {
969 ctl = snd_ctl_new1(&lc_controls[i], chip); 989 ctl = snd_ctl_new1(&lc_controls[i], chip);
diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
index b92adef8e81e..d6fa7bfd9aa1 100644
--- a/sound/pci/rme9652/hdsp.c
+++ b/sound/pci/rme9652/hdsp.c
@@ -4609,6 +4609,7 @@ static int snd_hdsp_hwdep_ioctl(struct snd_hwdep *hw, struct file *file, unsigne
4609 if (err < 0) 4609 if (err < 0)
4610 return err; 4610 return err;
4611 4611
4612 memset(&info, 0, sizeof(info));
4612 spin_lock_irqsave(&hdsp->lock, flags); 4613 spin_lock_irqsave(&hdsp->lock, flags);
4613 info.pref_sync_ref = (unsigned char)hdsp_pref_sync_ref(hdsp); 4614 info.pref_sync_ref = (unsigned char)hdsp_pref_sync_ref(hdsp);
4614 info.wordclock_sync_check = (unsigned char)hdsp_wc_sync_check(hdsp); 4615 info.wordclock_sync_check = (unsigned char)hdsp_wc_sync_check(hdsp);
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index 547b713d7204..0c98ef9156d8 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -4127,6 +4127,7 @@ static int snd_hdspm_hwdep_ioctl(struct snd_hwdep * hw, struct file *file,
4127 4127
4128 case SNDRV_HDSPM_IOCTL_GET_CONFIG_INFO: 4128 case SNDRV_HDSPM_IOCTL_GET_CONFIG_INFO:
4129 4129
4130 memset(&info, 0, sizeof(info));
4130 spin_lock_irq(&hdspm->lock); 4131 spin_lock_irq(&hdspm->lock);
4131 info.pref_sync_ref = hdspm_pref_sync_ref(hdspm); 4132 info.pref_sync_ref = hdspm_pref_sync_ref(hdspm);
4132 info.wordclock_sync_check = hdspm_wc_sync_check(hdspm); 4133 info.wordclock_sync_check = hdspm_wc_sync_check(hdspm);
diff --git a/sound/ppc/snd_ps3.c b/sound/ppc/snd_ps3.c
index 2f12da4da561..581a670e8261 100644
--- a/sound/ppc/snd_ps3.c
+++ b/sound/ppc/snd_ps3.c
@@ -579,7 +579,7 @@ static int snd_ps3_delay_to_bytes(struct snd_pcm_substream *substream,
579 rate * delay_ms / 1000) 579 rate * delay_ms / 1000)
580 * substream->runtime->channels; 580 * substream->runtime->channels;
581 581
582 pr_debug(KERN_ERR "%s: time=%d rate=%d bytes=%ld, frames=%d, ret=%d\n", 582 pr_debug("%s: time=%d rate=%d bytes=%ld, frames=%d, ret=%d\n",
583 __func__, 583 __func__,
584 delay_ms, 584 delay_ms,
585 rate, 585 rate,
diff --git a/sound/soc/s3c24xx/s3c-dma.c b/sound/soc/s3c24xx/s3c-dma.c
index 1b61c23ff300..f1b1bc4bacfb 100644
--- a/sound/soc/s3c24xx/s3c-dma.c
+++ b/sound/soc/s3c24xx/s3c-dma.c
@@ -94,8 +94,7 @@ static void s3c_dma_enqueue(struct snd_pcm_substream *substream)
94 94
95 if ((pos + len) > prtd->dma_end) { 95 if ((pos + len) > prtd->dma_end) {
96 len = prtd->dma_end - pos; 96 len = prtd->dma_end - pos;
97 pr_debug(KERN_DEBUG "%s: corrected dma len %ld\n", 97 pr_debug("%s: corrected dma len %ld\n", __func__, len);
98 __func__, len);
99 } 98 }
100 99
101 ret = s3c2410_dma_enqueue(prtd->params->channel, 100 ret = s3c2410_dma_enqueue(prtd->params->channel,
diff --git a/sound/soc/sh/migor.c b/sound/soc/sh/migor.c
index b823a5c9b9bc..87e2b7fcbf17 100644
--- a/sound/soc/sh/migor.c
+++ b/sound/soc/sh/migor.c
@@ -12,6 +12,7 @@
12#include <linux/firmware.h> 12#include <linux/firmware.h>
13#include <linux/module.h> 13#include <linux/module.h>
14 14
15#include <asm/clkdev.h>
15#include <asm/clock.h> 16#include <asm/clock.h>
16 17
17#include <cpu/sh7722.h> 18#include <cpu/sh7722.h>
@@ -40,12 +41,12 @@ static struct clk_ops siumckb_clk_ops = {
40}; 41};
41 42
42static struct clk siumckb_clk = { 43static struct clk siumckb_clk = {
43 .name = "siumckb_clk",
44 .id = -1,
45 .ops = &siumckb_clk_ops, 44 .ops = &siumckb_clk_ops,
46 .rate = 0, /* initialised at run-time */ 45 .rate = 0, /* initialised at run-time */
47}; 46};
48 47
48static struct clk_lookup *siumckb_lookup;
49
49static int migor_hw_params(struct snd_pcm_substream *substream, 50static int migor_hw_params(struct snd_pcm_substream *substream,
50 struct snd_pcm_hw_params *params) 51 struct snd_pcm_hw_params *params)
51{ 52{
@@ -180,6 +181,13 @@ static int __init migor_init(void)
180 if (ret < 0) 181 if (ret < 0)
181 return ret; 182 return ret;
182 183
184 siumckb_lookup = clkdev_alloc(&siumckb_clk, "siumckb_clk", NULL);
185 if (!siumckb_lookup) {
186 ret = -ENOMEM;
187 goto eclkdevalloc;
188 }
189 clkdev_add(siumckb_lookup);
190
183 /* Port number used on this machine: port B */ 191 /* Port number used on this machine: port B */
184 migor_snd_device = platform_device_alloc("soc-audio", 1); 192 migor_snd_device = platform_device_alloc("soc-audio", 1);
185 if (!migor_snd_device) { 193 if (!migor_snd_device) {
@@ -200,12 +208,15 @@ static int __init migor_init(void)
200epdevadd: 208epdevadd:
201 platform_device_put(migor_snd_device); 209 platform_device_put(migor_snd_device);
202epdevalloc: 210epdevalloc:
211 clkdev_drop(siumckb_lookup);
212eclkdevalloc:
203 clk_unregister(&siumckb_clk); 213 clk_unregister(&siumckb_clk);
204 return ret; 214 return ret;
205} 215}
206 216
207static void __exit migor_exit(void) 217static void __exit migor_exit(void)
208{ 218{
219 clkdev_drop(siumckb_lookup);
209 clk_unregister(&siumckb_clk); 220 clk_unregister(&siumckb_clk);
210 platform_device_unregister(migor_snd_device); 221 platform_device_unregister(migor_snd_device);
211} 222}
diff --git a/sound/soc/soc-cache.c b/sound/soc/soc-cache.c
index adbc68ce9050..f6b0d2829ea9 100644
--- a/sound/soc/soc-cache.c
+++ b/sound/soc/soc-cache.c
@@ -203,8 +203,9 @@ static int snd_soc_8_16_write(struct snd_soc_codec *codec, unsigned int reg,
203 data[1] = (value >> 8) & 0xff; 203 data[1] = (value >> 8) & 0xff;
204 data[2] = value & 0xff; 204 data[2] = value & 0xff;
205 205
206 if (!snd_soc_codec_volatile_register(codec, reg)) 206 if (!snd_soc_codec_volatile_register(codec, reg)
207 reg_cache[reg] = value; 207 && reg < codec->reg_cache_size)
208 reg_cache[reg] = value;
208 209
209 if (codec->cache_only) { 210 if (codec->cache_only) {
210 codec->cache_sync = 1; 211 codec->cache_sync = 1;
diff --git a/sound/usb/card.c b/sound/usb/card.c
index 9feb00c831a0..4eabafa5b037 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -126,7 +126,7 @@ static void snd_usb_stream_disconnect(struct list_head *head)
126 for (idx = 0; idx < 2; idx++) { 126 for (idx = 0; idx < 2; idx++) {
127 subs = &as->substream[idx]; 127 subs = &as->substream[idx];
128 if (!subs->num_formats) 128 if (!subs->num_formats)
129 return; 129 continue;
130 snd_usb_release_substream_urbs(subs, 1); 130 snd_usb_release_substream_urbs(subs, 1);
131 subs->interface = -1; 131 subs->interface = -1;
132 } 132 }
@@ -216,6 +216,11 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
216 } 216 }
217 217
218 switch (protocol) { 218 switch (protocol) {
219 default:
220 snd_printdd(KERN_WARNING "unknown interface protocol %#02x, assuming v1\n",
221 protocol);
222 /* fall through */
223
219 case UAC_VERSION_1: { 224 case UAC_VERSION_1: {
220 struct uac1_ac_header_descriptor *h1 = control_header; 225 struct uac1_ac_header_descriptor *h1 = control_header;
221 226
@@ -253,10 +258,6 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
253 258
254 break; 259 break;
255 } 260 }
256
257 default:
258 snd_printk(KERN_ERR "unknown protocol version 0x%02x\n", protocol);
259 return -EINVAL;
260 } 261 }
261 262
262 return 0; 263 return 0;
@@ -465,7 +466,13 @@ static void *snd_usb_audio_probe(struct usb_device *dev,
465 goto __error; 466 goto __error;
466 } 467 }
467 468
468 chip->ctrl_intf = alts; 469 /*
470 * For devices with more than one control interface, we assume the
471 * first contains the audio controls. We might need a more specific
472 * check here in the future.
473 */
474 if (!chip->ctrl_intf)
475 chip->ctrl_intf = alts;
469 476
470 if (err > 0) { 477 if (err > 0) {
471 /* create normal USB audio interfaces */ 478 /* create normal USB audio interfaces */
diff --git a/sound/usb/clock.c b/sound/usb/clock.c
index b853f8df794f..7754a1034545 100644
--- a/sound/usb/clock.c
+++ b/sound/usb/clock.c
@@ -295,12 +295,11 @@ int snd_usb_init_sample_rate(struct snd_usb_audio *chip, int iface,
295 295
296 switch (altsd->bInterfaceProtocol) { 296 switch (altsd->bInterfaceProtocol) {
297 case UAC_VERSION_1: 297 case UAC_VERSION_1:
298 default:
298 return set_sample_rate_v1(chip, iface, alts, fmt, rate); 299 return set_sample_rate_v1(chip, iface, alts, fmt, rate);
299 300
300 case UAC_VERSION_2: 301 case UAC_VERSION_2:
301 return set_sample_rate_v2(chip, iface, alts, fmt, rate); 302 return set_sample_rate_v2(chip, iface, alts, fmt, rate);
302 } 303 }
303
304 return -EINVAL;
305} 304}
306 305
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
index 1a701f1e8f50..ef0a07e34844 100644
--- a/sound/usb/endpoint.c
+++ b/sound/usb/endpoint.c
@@ -275,6 +275,12 @@ int snd_usb_parse_audio_endpoints(struct snd_usb_audio *chip, int iface_no)
275 275
276 /* get audio formats */ 276 /* get audio formats */
277 switch (protocol) { 277 switch (protocol) {
278 default:
279 snd_printdd(KERN_WARNING "%d:%u:%d: unknown interface protocol %#02x, assuming v1\n",
280 dev->devnum, iface_no, altno, protocol);
281 protocol = UAC_VERSION_1;
282 /* fall through */
283
278 case UAC_VERSION_1: { 284 case UAC_VERSION_1: {
279 struct uac1_as_header_descriptor *as = 285 struct uac1_as_header_descriptor *as =
280 snd_usb_find_csint_desc(alts->extra, alts->extralen, NULL, UAC_AS_GENERAL); 286 snd_usb_find_csint_desc(alts->extra, alts->extralen, NULL, UAC_AS_GENERAL);
@@ -336,11 +342,6 @@ int snd_usb_parse_audio_endpoints(struct snd_usb_audio *chip, int iface_no)
336 dev->devnum, iface_no, altno, as->bTerminalLink); 342 dev->devnum, iface_no, altno, as->bTerminalLink);
337 continue; 343 continue;
338 } 344 }
339
340 default:
341 snd_printk(KERN_ERR "%d:%u:%d : unknown interface protocol %04x\n",
342 dev->devnum, iface_no, altno, protocol);
343 continue;
344 } 345 }
345 346
346 /* get format type */ 347 /* get format type */
diff --git a/sound/usb/format.c b/sound/usb/format.c
index 3a1375459c06..69148212aa70 100644
--- a/sound/usb/format.c
+++ b/sound/usb/format.c
@@ -49,7 +49,8 @@ static u64 parse_audio_format_i_type(struct snd_usb_audio *chip,
49 u64 pcm_formats; 49 u64 pcm_formats;
50 50
51 switch (protocol) { 51 switch (protocol) {
52 case UAC_VERSION_1: { 52 case UAC_VERSION_1:
53 default: {
53 struct uac_format_type_i_discrete_descriptor *fmt = _fmt; 54 struct uac_format_type_i_discrete_descriptor *fmt = _fmt;
54 sample_width = fmt->bBitResolution; 55 sample_width = fmt->bBitResolution;
55 sample_bytes = fmt->bSubframeSize; 56 sample_bytes = fmt->bSubframeSize;
@@ -64,9 +65,6 @@ static u64 parse_audio_format_i_type(struct snd_usb_audio *chip,
64 format <<= 1; 65 format <<= 1;
65 break; 66 break;
66 } 67 }
67
68 default:
69 return -EINVAL;
70 } 68 }
71 69
72 pcm_formats = 0; 70 pcm_formats = 0;
@@ -384,6 +382,10 @@ static int parse_audio_format_i(struct snd_usb_audio *chip,
384 * audio class v2 uses class specific EP0 range requests for that. 382 * audio class v2 uses class specific EP0 range requests for that.
385 */ 383 */
386 switch (protocol) { 384 switch (protocol) {
385 default:
386 snd_printdd(KERN_WARNING "%d:%u:%d : invalid protocol version %d, assuming v1\n",
387 chip->dev->devnum, fp->iface, fp->altsetting, protocol);
388 /* fall through */
387 case UAC_VERSION_1: 389 case UAC_VERSION_1:
388 fp->channels = fmt->bNrChannels; 390 fp->channels = fmt->bNrChannels;
389 ret = parse_audio_format_rates_v1(chip, fp, (unsigned char *) fmt, 7); 391 ret = parse_audio_format_rates_v1(chip, fp, (unsigned char *) fmt, 7);
@@ -392,10 +394,6 @@ static int parse_audio_format_i(struct snd_usb_audio *chip,
392 /* fp->channels is already set in this case */ 394 /* fp->channels is already set in this case */
393 ret = parse_audio_format_rates_v2(chip, fp); 395 ret = parse_audio_format_rates_v2(chip, fp);
394 break; 396 break;
395 default:
396 snd_printk(KERN_ERR "%d:%u:%d : invalid protocol version %d\n",
397 chip->dev->devnum, fp->iface, fp->altsetting, protocol);
398 return -EINVAL;
399 } 397 }
400 398
401 if (fp->channels < 1) { 399 if (fp->channels < 1) {
@@ -438,6 +436,10 @@ static int parse_audio_format_ii(struct snd_usb_audio *chip,
438 fp->channels = 1; 436 fp->channels = 1;
439 437
440 switch (protocol) { 438 switch (protocol) {
439 default:
440 snd_printdd(KERN_WARNING "%d:%u:%d : invalid protocol version %d, assuming v1\n",
441 chip->dev->devnum, fp->iface, fp->altsetting, protocol);
442 /* fall through */
441 case UAC_VERSION_1: { 443 case UAC_VERSION_1: {
442 struct uac_format_type_ii_discrete_descriptor *fmt = _fmt; 444 struct uac_format_type_ii_discrete_descriptor *fmt = _fmt;
443 brate = le16_to_cpu(fmt->wMaxBitRate); 445 brate = le16_to_cpu(fmt->wMaxBitRate);
@@ -456,10 +458,6 @@ static int parse_audio_format_ii(struct snd_usb_audio *chip,
456 ret = parse_audio_format_rates_v2(chip, fp); 458 ret = parse_audio_format_rates_v2(chip, fp);
457 break; 459 break;
458 } 460 }
459 default:
460 snd_printk(KERN_ERR "%d:%u:%d : invalid protocol version %d\n",
461 chip->dev->devnum, fp->iface, fp->altsetting, protocol);
462 return -EINVAL;
463 } 461 }
464 462
465 return ret; 463 return ret;
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index c166db0057d3..3ed3901369ce 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -2175,7 +2175,15 @@ int snd_usb_create_mixer(struct snd_usb_audio *chip, int ctrlif,
2175 } 2175 }
2176 2176
2177 host_iface = &usb_ifnum_to_if(chip->dev, ctrlif)->altsetting[0]; 2177 host_iface = &usb_ifnum_to_if(chip->dev, ctrlif)->altsetting[0];
2178 mixer->protocol = get_iface_desc(host_iface)->bInterfaceProtocol; 2178 switch (get_iface_desc(host_iface)->bInterfaceProtocol) {
2179 case UAC_VERSION_1:
2180 default:
2181 mixer->protocol = UAC_VERSION_1;
2182 break;
2183 case UAC_VERSION_2:
2184 mixer->protocol = UAC_VERSION_2;
2185 break;
2186 }
2179 2187
2180 if ((err = snd_usb_mixer_controls(mixer)) < 0 || 2188 if ((err = snd_usb_mixer_controls(mixer)) < 0 ||
2181 (err = snd_usb_mixer_status_create(mixer)) < 0) 2189 (err = snd_usb_mixer_status_create(mixer)) < 0)
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index 3634cedf9306..3b5135c93062 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -173,13 +173,12 @@ int snd_usb_init_pitch(struct snd_usb_audio *chip, int iface,
173 173
174 switch (altsd->bInterfaceProtocol) { 174 switch (altsd->bInterfaceProtocol) {
175 case UAC_VERSION_1: 175 case UAC_VERSION_1:
176 default:
176 return init_pitch_v1(chip, iface, alts, fmt); 177 return init_pitch_v1(chip, iface, alts, fmt);
177 178
178 case UAC_VERSION_2: 179 case UAC_VERSION_2:
179 return init_pitch_v2(chip, iface, alts, fmt); 180 return init_pitch_v2(chip, iface, alts, fmt);
180 } 181 }
181
182 return -EINVAL;
183} 182}
184 183
185/* 184/*
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 4f1fa77c1feb..1950e19af1cf 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -1017,7 +1017,7 @@ builtin-revert.o wt-status.o: wt-status.h
1017# we compile into subdirectories. if the target directory is not the source directory, they might not exists. So 1017# we compile into subdirectories. if the target directory is not the source directory, they might not exists. So
1018# we depend the various files onto their directories. 1018# we depend the various files onto their directories.
1019DIRECTORY_DEPS = $(LIB_OBJS) $(BUILTIN_OBJS) $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h 1019DIRECTORY_DEPS = $(LIB_OBJS) $(BUILTIN_OBJS) $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h
1020$(DIRECTORY_DEPS): $(sort $(dir $(DIRECTORY_DEPS))) 1020$(DIRECTORY_DEPS): | $(sort $(dir $(DIRECTORY_DEPS)))
1021# In the second step, we make a rule to actually create these directories 1021# In the second step, we make a rule to actually create these directories
1022$(sort $(dir $(DIRECTORY_DEPS))): 1022$(sort $(dir $(DIRECTORY_DEPS))):
1023 $(QUIET_MKDIR)$(MKDIR) -p $@ 2>/dev/null 1023 $(QUIET_MKDIR)$(MKDIR) -p $@ 2>/dev/null
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index 624a96c636fd..6de4313924fb 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -50,6 +50,7 @@ static inline void callchain_init(struct callchain_node *node)
50 INIT_LIST_HEAD(&node->children); 50 INIT_LIST_HEAD(&node->children);
51 INIT_LIST_HEAD(&node->val); 51 INIT_LIST_HEAD(&node->val);
52 52
53 node->children_hit = 0;
53 node->parent = NULL; 54 node->parent = NULL;
54 node->hit = 0; 55 node->hit = 0;
55} 56}
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index e72f05c3bef0..fcc16e4349df 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -1539,6 +1539,7 @@ static int convert_to_probe_trace_events(struct perf_probe_event *pev,
1539 goto error; 1539 goto error;
1540 } 1540 }
1541 tev->point.offset = pev->point.offset; 1541 tev->point.offset = pev->point.offset;
1542 tev->point.retprobe = pev->point.retprobe;
1542 tev->nargs = pev->nargs; 1543 tev->nargs = pev->nargs;
1543 if (tev->nargs) { 1544 if (tev->nargs) {
1544 tev->args = zalloc(sizeof(struct probe_trace_arg) 1545 tev->args = zalloc(sizeof(struct probe_trace_arg)
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index 525136684d4e..32b81f707ff5 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -686,6 +686,25 @@ static int find_variable(Dwarf_Die *sp_die, struct probe_finder *pf)
686 char buf[32], *ptr; 686 char buf[32], *ptr;
687 int ret, nscopes; 687 int ret, nscopes;
688 688
689 if (!is_c_varname(pf->pvar->var)) {
690 /* Copy raw parameters */
691 pf->tvar->value = strdup(pf->pvar->var);
692 if (pf->tvar->value == NULL)
693 return -ENOMEM;
694 if (pf->pvar->type) {
695 pf->tvar->type = strdup(pf->pvar->type);
696 if (pf->tvar->type == NULL)
697 return -ENOMEM;
698 }
699 if (pf->pvar->name) {
700 pf->tvar->name = strdup(pf->pvar->name);
701 if (pf->tvar->name == NULL)
702 return -ENOMEM;
703 } else
704 pf->tvar->name = NULL;
705 return 0;
706 }
707
689 if (pf->pvar->name) 708 if (pf->pvar->name)
690 pf->tvar->name = strdup(pf->pvar->name); 709 pf->tvar->name = strdup(pf->pvar->name);
691 else { 710 else {
@@ -700,19 +719,6 @@ static int find_variable(Dwarf_Die *sp_die, struct probe_finder *pf)
700 if (pf->tvar->name == NULL) 719 if (pf->tvar->name == NULL)
701 return -ENOMEM; 720 return -ENOMEM;
702 721
703 if (!is_c_varname(pf->pvar->var)) {
704 /* Copy raw parameters */
705 pf->tvar->value = strdup(pf->pvar->var);
706 if (pf->tvar->value == NULL)
707 return -ENOMEM;
708 if (pf->pvar->type) {
709 pf->tvar->type = strdup(pf->pvar->type);
710 if (pf->tvar->type == NULL)
711 return -ENOMEM;
712 }
713 return 0;
714 }
715
716 pr_debug("Searching '%s' variable in context.\n", 722 pr_debug("Searching '%s' variable in context.\n",
717 pf->pvar->var); 723 pf->pvar->var);
718 /* Search child die for local variables and parameters. */ 724 /* Search child die for local variables and parameters. */
@@ -783,6 +789,16 @@ static int convert_probe_point(Dwarf_Die *sp_die, struct probe_finder *pf)
783 /* This function has no name. */ 789 /* This function has no name. */
784 tev->point.offset = (unsigned long)pf->addr; 790 tev->point.offset = (unsigned long)pf->addr;
785 791
792 /* Return probe must be on the head of a subprogram */
793 if (pf->pev->point.retprobe) {
794 if (tev->point.offset != 0) {
795 pr_warning("Return probe must be on the head of"
796 " a real function\n");
797 return -EINVAL;
798 }
799 tev->point.retprobe = true;
800 }
801
786 pr_debug("Probe point found: %s+%lu\n", tev->point.symbol, 802 pr_debug("Probe point found: %s+%lu\n", tev->point.symbol,
787 tev->point.offset); 803 tev->point.offset);
788 804
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 1a367734e016..b2f5ae97f33d 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -2268,6 +2268,9 @@ static int setup_list(struct strlist **list, const char *list_str,
2268 2268
2269int symbol__init(void) 2269int symbol__init(void)
2270{ 2270{
2271 if (symbol_conf.initialized)
2272 return 0;
2273
2271 elf_version(EV_CURRENT); 2274 elf_version(EV_CURRENT);
2272 if (symbol_conf.sort_by_name) 2275 if (symbol_conf.sort_by_name)
2273 symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) - 2276 symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) -
@@ -2293,6 +2296,7 @@ int symbol__init(void)
2293 symbol_conf.sym_list_str, "symbol") < 0) 2296 symbol_conf.sym_list_str, "symbol") < 0)
2294 goto out_free_comm_list; 2297 goto out_free_comm_list;
2295 2298
2299 symbol_conf.initialized = true;
2296 return 0; 2300 return 0;
2297 2301
2298out_free_dso_list: 2302out_free_dso_list:
@@ -2304,11 +2308,14 @@ out_free_comm_list:
2304 2308
2305void symbol__exit(void) 2309void symbol__exit(void)
2306{ 2310{
2311 if (!symbol_conf.initialized)
2312 return;
2307 strlist__delete(symbol_conf.sym_list); 2313 strlist__delete(symbol_conf.sym_list);
2308 strlist__delete(symbol_conf.dso_list); 2314 strlist__delete(symbol_conf.dso_list);
2309 strlist__delete(symbol_conf.comm_list); 2315 strlist__delete(symbol_conf.comm_list);
2310 vmlinux_path__exit(); 2316 vmlinux_path__exit();
2311 symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL; 2317 symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL;
2318 symbol_conf.initialized = false;
2312} 2319}
2313 2320
2314int machines__create_kernel_maps(struct rb_root *self, pid_t pid) 2321int machines__create_kernel_maps(struct rb_root *self, pid_t pid)
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index b7a8da4af5a0..ea95c2756f05 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -69,7 +69,8 @@ struct symbol_conf {
69 show_nr_samples, 69 show_nr_samples,
70 use_callchain, 70 use_callchain,
71 exclude_other, 71 exclude_other,
72 show_cpu_utilization; 72 show_cpu_utilization,
73 initialized;
73 const char *vmlinux_name, 74 const char *vmlinux_name,
74 *source_prefix, 75 *source_prefix,
75 *field_sep; 76 *field_sep;
diff --git a/tools/perf/util/trace-event-scripting.c b/tools/perf/util/trace-event-scripting.c
index 7ea983acfaea..f7af2fca965d 100644
--- a/tools/perf/util/trace-event-scripting.c
+++ b/tools/perf/util/trace-event-scripting.c
@@ -97,7 +97,7 @@ void setup_python_scripting(void)
97 register_python_scripting(&python_scripting_unsupported_ops); 97 register_python_scripting(&python_scripting_unsupported_ops);
98} 98}
99#else 99#else
100struct scripting_ops python_scripting_ops; 100extern struct scripting_ops python_scripting_ops;
101 101
102void setup_python_scripting(void) 102void setup_python_scripting(void)
103{ 103{
@@ -158,7 +158,7 @@ void setup_perl_scripting(void)
158 register_perl_scripting(&perl_scripting_unsupported_ops); 158 register_perl_scripting(&perl_scripting_unsupported_ops);
159} 159}
160#else 160#else
161struct scripting_ops perl_scripting_ops; 161extern struct scripting_ops perl_scripting_ops;
162 162
163void setup_perl_scripting(void) 163void setup_perl_scripting(void)
164{ 164{
diff --git a/tools/perf/util/ui/browsers/hists.c b/tools/perf/util/ui/browsers/hists.c
index dafdf6775d77..6866aa4c41e0 100644
--- a/tools/perf/util/ui/browsers/hists.c
+++ b/tools/perf/util/ui/browsers/hists.c
@@ -773,7 +773,7 @@ int hists__browse(struct hists *self, const char *helpline, const char *ev_name)
773 773
774 switch (key) { 774 switch (key) {
775 case 'a': 775 case 'a':
776 if (browser->selection->map == NULL && 776 if (browser->selection->map == NULL ||
777 browser->selection->map->dso->annotate_warned) 777 browser->selection->map->dso->annotate_warned)
778 continue; 778 continue;
779 goto do_annotate; 779 goto do_annotate;
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index 66cf65b510b1..c1f1e3c62984 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -218,7 +218,6 @@ kvm_irqfd_assign(struct kvm *kvm, int fd, int gsi)
218 events = file->f_op->poll(file, &irqfd->pt); 218 events = file->f_op->poll(file, &irqfd->pt);
219 219
220 list_add_tail(&irqfd->list, &kvm->irqfds.items); 220 list_add_tail(&irqfd->list, &kvm->irqfds.items);
221 spin_unlock_irq(&kvm->irqfds.lock);
222 221
223 /* 222 /*
224 * Check if there was an event already pending on the eventfd 223 * Check if there was an event already pending on the eventfd
@@ -227,6 +226,8 @@ kvm_irqfd_assign(struct kvm *kvm, int fd, int gsi)
227 if (events & POLLIN) 226 if (events & POLLIN)
228 schedule_work(&irqfd->inject); 227 schedule_work(&irqfd->inject);
229 228
229 spin_unlock_irq(&kvm->irqfds.lock);
230
230 /* 231 /*
231 * do not drop the file until the irqfd is fully initialized, otherwise 232 * do not drop the file until the irqfd is fully initialized, otherwise
232 * we might race against the POLLHUP 233 * we might race against the POLLHUP
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index b78b794c1039..5186e728c53e 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1958,10 +1958,10 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
1958 cpu); 1958 cpu);
1959 hardware_disable(NULL); 1959 hardware_disable(NULL);
1960 break; 1960 break;
1961 case CPU_ONLINE: 1961 case CPU_STARTING:
1962 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n", 1962 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
1963 cpu); 1963 cpu);
1964 smp_call_function_single(cpu, hardware_enable, NULL, 1); 1964 hardware_enable(NULL);
1965 break; 1965 break;
1966 } 1966 }
1967 return NOTIFY_OK; 1967 return NOTIFY_OK;
@@ -1970,10 +1970,12 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
1970 1970
1971asmlinkage void kvm_handle_fault_on_reboot(void) 1971asmlinkage void kvm_handle_fault_on_reboot(void)
1972{ 1972{
1973 if (kvm_rebooting) 1973 if (kvm_rebooting) {
1974 /* spin while reset goes on */ 1974 /* spin while reset goes on */
1975 local_irq_enable();
1975 while (true) 1976 while (true)
1976 ; 1977 ;
1978 }
1977 /* Fault while not rebooting. We want the trace. */ 1979 /* Fault while not rebooting. We want the trace. */
1978 BUG(); 1980 BUG();
1979} 1981}
@@ -2096,7 +2098,6 @@ int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
2096 2098
2097static struct notifier_block kvm_cpu_notifier = { 2099static struct notifier_block kvm_cpu_notifier = {
2098 .notifier_call = kvm_cpu_hotplug, 2100 .notifier_call = kvm_cpu_hotplug,
2099 .priority = 20, /* must be > scheduler priority */
2100}; 2101};
2101 2102
2102static int vm_stat_get(void *_offset, u64 *val) 2103static int vm_stat_get(void *_offset, u64 *val)